gen/ir: Move function body codegen state into separate class

Previously, the transitory state only needed and valid during
generation of the LLVM IR for the function body was conflated
with the general codegen metadata for the function declaration
in IrFunction.

There is further potential for cleanup regarding the use of
gIR->func() and so on all over the code base, but this is out
of scope of this commit, which is only concerned with those
IrFunction members moved to FuncGenState.

GitHub: Fixes #1661.
This commit is contained in:
David Nadlinger 2016-08-01 20:16:20 +01:00
parent 358253f6a6
commit 6cc93bc8ba
18 changed files with 1417 additions and 1355 deletions

View file

@ -16,6 +16,7 @@
#include "module.h" #include "module.h"
#include "mtype.h" #include "mtype.h"
#include "gen/dvalue.h" #include "gen/dvalue.h"
#include "gen/funcgenstate.h"
#include "gen/irstate.h" #include "gen/irstate.h"
#include "gen/llvm.h" #include "gen/llvm.h"
#include "gen/llvmhelpers.h" #include "gen/llvmhelpers.h"
@ -863,9 +864,9 @@ DSliceValue *DtoCatArrays(Loc &loc, Type *arrayType, Expression *exp1,
args.push_back(val); args.push_back(val);
} }
LLValue *newArray = gIR->func() auto newArray = gIR->funcGen()
->scopes->callOrInvoke(fn, args, ".appendedArray") .scopes.callOrInvoke(fn, args, ".appendedArray")
.getInstruction(); .getInstruction();
return getSlice(arrayType, newArray); return getSlice(arrayType, newArray);
} }
@ -937,7 +938,7 @@ static LLValue *DtoArrayEqCmp_impl(Loc &loc, const char *func, DValue *l,
args.push_back(DtoBitCast(tival, fn->getFunctionType()->getParamType(2))); args.push_back(DtoBitCast(tival, fn->getFunctionType()->getParamType(2)));
} }
return gIR->func()->scopes->callOrInvoke(fn, args).getInstruction(); return gIR->funcGen().scopes.callOrInvoke(fn, args).getInstruction();
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -994,9 +995,9 @@ LLValue *DtoArrayCastLength(Loc &loc, LLValue *len, LLType *elemty,
} }
LLFunction *fn = getRuntimeFunction(loc, gIR->module, "_d_array_cast_len"); LLFunction *fn = getRuntimeFunction(loc, gIR->module, "_d_array_cast_len");
return gIR->CreateCallOrInvoke(fn, len, return gIR
LLConstantInt::get(DtoSize_t(), esz, false), ->CreateCallOrInvoke(fn, len, LLConstantInt::get(DtoSize_t(), esz, false),
LLConstantInt::get(DtoSize_t(), nsz, false)) LLConstantInt::get(DtoSize_t(), nsz, false))
.getInstruction(); .getInstruction();
} }

636
gen/funcgenstate.cpp Normal file
View file

@ -0,0 +1,636 @@
//===-- funcgenstate.cpp --------------------------------------------------===//
//
// LDC the LLVM D compiler
//
// This file is distributed under the BSD-style LDC license. See the LICENSE
// file for details.
//
//===----------------------------------------------------------------------===//
#include "gen/funcgenstate.h"
#include "gen/llvm.h"
#include "gen/llvmhelpers.h"
#include "gen/ms-cxx-helper.h"
#include "gen/runtime.h"
#include "ir/irfunction.h"
JumpTarget::JumpTarget(llvm::BasicBlock *targetBlock,
CleanupCursor cleanupScope, Statement *targetStatement)
: targetBlock(targetBlock), cleanupScope(cleanupScope),
targetStatement(targetStatement) {}
GotoJump::GotoJump(Loc loc, llvm::BasicBlock *sourceBlock,
llvm::BasicBlock *tentativeTarget, Identifier *targetLabel)
: sourceLoc(std::move(loc)), sourceBlock(sourceBlock),
tentativeTarget(tentativeTarget), targetLabel(targetLabel) {}
CatchScope::CatchScope(llvm::Constant *classInfoPtr,
llvm::BasicBlock *bodyBlock, CleanupCursor cleanupScope,
llvm::MDNode *branchWeights)
: classInfoPtr(classInfoPtr), bodyBlock(bodyBlock),
cleanupScope(cleanupScope), branchWeights(branchWeights) {}
namespace {
#if LDC_LLVM_VER >= 308
// MSVC/x86 uses C++ exception handling that puts cleanup blocks into funclets.
// This means that we cannot use a branch selector and conditional branches
// at cleanup exit to continue with different targets.
// Instead we make a full copy of the cleanup code for every target
//
// Return the beginning basic block of the cleanup code
llvm::BasicBlock *executeCleanupCopying(IRState &irs, CleanupScope &scope,
llvm::BasicBlock *sourceBlock,
llvm::BasicBlock *continueWith,
llvm::BasicBlock *unwindTo,
llvm::Value *funclet) {
if (isCatchSwitchBlock(scope.beginBlock))
return continueWith;
if (scope.cleanupBlocks.empty()) {
// figure out the list of blocks used by this cleanup step
findSuccessors(scope.cleanupBlocks, scope.beginBlock, scope.endBlock);
if (!scope.endBlock->getTerminator())
// Set up the unconditional branch at the end of the cleanup
llvm::BranchInst::Create(continueWith, scope.endBlock);
} else {
// check whether we have an exit target with the same continuation
for (CleanupExitTarget &tgt : scope.exitTargets)
if (tgt.branchTarget == continueWith) {
tgt.sourceBlocks.push_back(sourceBlock);
return tgt.cleanupBlocks.front();
}
}
// reuse the original IR if not unwinding and not already used
bool useOriginal = unwindTo == nullptr && funclet == nullptr;
for (CleanupExitTarget &tgt : scope.exitTargets)
useOriginal = useOriginal && tgt.cleanupBlocks.front() != scope.beginBlock;
// append new target
scope.exitTargets.push_back(CleanupExitTarget(continueWith));
scope.exitTargets.back().sourceBlocks.push_back(sourceBlock);
if (useOriginal) {
// change the continuation target if the initial branch was created
// by another instance with unwinding
if (continueWith)
if (auto term = scope.endBlock->getTerminator())
if (auto succ = term->getSuccessor(0))
if (succ != continueWith) {
remapBlocksValue(scope.cleanupBlocks, succ, continueWith);
}
scope.exitTargets.back().cleanupBlocks = scope.cleanupBlocks;
} else {
// clone the code
cloneBlocks(scope.cleanupBlocks, scope.exitTargets.back().cleanupBlocks,
continueWith, unwindTo, funclet);
}
return scope.exitTargets.back().cleanupBlocks.front();
}
#endif // LDC_LLVM_VER >= 308
void executeCleanup(IRState &irs, CleanupScope &scope,
llvm::BasicBlock *sourceBlock,
llvm::BasicBlock *continueWith) {
assert(!useMSVCEH()); // should always use executeCleanupCopying
if (scope.exitTargets.empty() ||
(scope.exitTargets.size() == 1 &&
scope.exitTargets[0].branchTarget == continueWith)) {
// We didn't need a branch selector before and still don't need one.
assert(!scope.branchSelector);
// Set up the unconditional branch at the end of the cleanup if we have
// not done so already.
if (scope.exitTargets.empty()) {
scope.exitTargets.push_back(CleanupExitTarget(continueWith));
llvm::BranchInst::Create(continueWith, scope.endBlock);
}
scope.exitTargets.front().sourceBlocks.push_back(sourceBlock);
return;
}
// We need a branch selector if we are here...
if (!scope.branchSelector) {
// ... and have not created one yet, so do so now.
scope.branchSelector = new llvm::AllocaInst(
llvm::Type::getInt32Ty(irs.context()),
llvm::Twine("branchsel.") + scope.beginBlock->getName(),
irs.topallocapoint());
// Now we also need to store 0 to it to keep the paths that go to the
// only existing branch target the same.
auto &v = scope.exitTargets.front().sourceBlocks;
for (auto bb : v) {
new llvm::StoreInst(DtoConstUint(0), scope.branchSelector,
bb->getTerminator());
}
// And convert the BranchInst to the existing branch target to a
// SelectInst so we can append the other cases to it.
scope.endBlock->getTerminator()->eraseFromParent();
llvm::Value *sel =
new llvm::LoadInst(scope.branchSelector, "", scope.endBlock);
llvm::SwitchInst::Create(
sel, scope.exitTargets[0].branchTarget,
1, // Expected number of branches, only for pre-allocating.
scope.endBlock);
}
// If we already know this branch target, figure out the branch selector
// value and simply insert the store into the source block (prior to the
// last instruction, which is the branch to the first cleanup).
for (unsigned i = 0; i < scope.exitTargets.size(); ++i) {
CleanupExitTarget &t = scope.exitTargets[i];
if (t.branchTarget == continueWith) {
new llvm::StoreInst(DtoConstUint(i), scope.branchSelector,
sourceBlock->getTerminator());
// Note: Strictly speaking, keeping this up to date would not be
// needed right now, because we never to any optimizations that
// require changes to the source blocks after the initial conversion
// from one to two branch targets. Keeping this around for now to
// ease future development, but may be removed to save some work.
t.sourceBlocks.push_back(sourceBlock);
return;
}
}
// We don't know this branch target yet, so add it to the SwitchInst...
llvm::ConstantInt *const selectorVal = DtoConstUint(scope.exitTargets.size());
llvm::cast<llvm::SwitchInst>(scope.endBlock->getTerminator())
->addCase(selectorVal, continueWith);
// ... insert the store into the source block...
new llvm::StoreInst(selectorVal, scope.branchSelector,
sourceBlock->getTerminator());
// ... and keep track of it (again, this is unnecessary right now as
// discussed in the above note).
scope.exitTargets.push_back(CleanupExitTarget(continueWith));
scope.exitTargets.back().sourceBlocks.push_back(sourceBlock);
}
}
ScopeStack::~ScopeStack() {
// If there are still unresolved gotos left, it means that they were either
// down or "sideways" (i.e. down another branch) of the tree of all
// cleanup scopes, both of which are not allowed in D.
if (!topLevelUnresolvedGotos.empty()) {
for (const auto &i : topLevelUnresolvedGotos) {
error(i.sourceLoc, "goto into try/finally scope is not allowed");
}
fatal();
}
}
void ScopeStack::pushCleanup(llvm::BasicBlock *beginBlock,
llvm::BasicBlock *endBlock) {
cleanupScopes.push_back(CleanupScope(beginBlock, endBlock));
}
void ScopeStack::runCleanups(CleanupCursor sourceScope,
CleanupCursor targetScope,
llvm::BasicBlock *continueWith) {
#if LDC_LLVM_VER >= 308
if (useMSVCEH()) {
runCleanupCopies(sourceScope, targetScope, continueWith);
return;
}
#endif
assert(targetScope <= sourceScope);
if (targetScope == sourceScope) {
// No cleanups to run, just branch to the next block.
irs.ir->CreateBr(continueWith);
return;
}
// Insert the unconditional branch to the first cleanup block.
irs.ir->CreateBr(cleanupScopes[sourceScope - 1].beginBlock);
// Update all the control flow in the cleanups to make sure we end up where
// we want.
for (CleanupCursor i = sourceScope; i-- > targetScope;) {
llvm::BasicBlock *nextBlock =
(i > targetScope) ? cleanupScopes[i - 1].beginBlock : continueWith;
executeCleanup(irs, cleanupScopes[i], irs.scopebb(), nextBlock);
}
}
#if LDC_LLVM_VER >= 308
void ScopeStack::runCleanupCopies(CleanupCursor sourceScope,
CleanupCursor targetScope,
llvm::BasicBlock *continueWith) {
assert(targetScope <= sourceScope);
// work through the blocks in reverse execution order, so we
// can merge cleanups that end up at the same continuation target
for (CleanupCursor i = targetScope; i < sourceScope; ++i)
continueWith = executeCleanupCopying(irs, cleanupScopes[i], irs.scopebb(),
continueWith, nullptr, nullptr);
// Insert the unconditional branch to the first cleanup block.
irs.ir->CreateBr(continueWith);
}
llvm::BasicBlock *ScopeStack::runCleanupPad(CleanupCursor scope,
llvm::BasicBlock *unwindTo) {
// a catch switch never needs to be cloned and is an unwind target itself
if (isCatchSwitchBlock(cleanupScopes[scope].beginBlock))
return cleanupScopes[scope].beginBlock;
// each cleanup block is bracketed by a pair of cleanuppad/cleanupret
// instructions, any unwinding should also just continue at the next
// cleanup block, e.g.:
//
// cleanuppad:
// %0 = cleanuppad within %funclet[]
// %frame = nullptr
// if (!_d_enter_cleanup(%frame)) br label %cleanupret
// else br label %copy
//
// copy:
// invoke _dtor to %cleanupret unwind %unwindTo [ "funclet"(token %0) ]
//
// cleanupret:
// _d_leave_cleanup(%frame)
// cleanupret %0 unwind %unwindTo
//
llvm::BasicBlock *cleanupbb =
llvm::BasicBlock::Create(irs.context(), "cleanuppad", irs.topfunc());
auto cleanuppad =
llvm::CleanupPadInst::Create(getFuncletToken(), {}, "", cleanupbb);
llvm::BasicBlock *cleanupret =
llvm::BasicBlock::Create(irs.context(), "cleanupret", irs.topfunc());
// preparation to allocate some space on the stack where _d_enter_cleanup
// can place an exception frame (but not done here)
auto frame = getNullPtr(getVoidPtrType());
auto savedInsertBlock = irs.ir->GetInsertBlock();
auto savedInsertPoint = irs.ir->GetInsertPoint();
auto savedDbgLoc = irs.DBuilder.GetCurrentLoc();
auto endFn = getRuntimeFunction(Loc(), irs.module, "_d_leave_cleanup");
irs.ir->SetInsertPoint(cleanupret);
irs.DBuilder.EmitStopPoint(irs.func()->decl->loc);
irs.ir->CreateCall(endFn, frame,
{llvm::OperandBundleDef("funclet", cleanuppad)}, "");
llvm::CleanupReturnInst::Create(cleanuppad, unwindTo, cleanupret);
auto copybb = executeCleanupCopying(irs, cleanupScopes[scope], cleanupbb,
cleanupret, unwindTo, cleanuppad);
auto beginFn = getRuntimeFunction(Loc(), irs.module, "_d_enter_cleanup");
irs.ir->SetInsertPoint(cleanupbb);
irs.DBuilder.EmitStopPoint(irs.func()->decl->loc);
auto exec = irs.ir->CreateCall(
beginFn, frame, {llvm::OperandBundleDef("funclet", cleanuppad)}, "");
llvm::BranchInst::Create(copybb, cleanupret, exec, cleanupbb);
irs.ir->SetInsertPoint(savedInsertBlock, savedInsertPoint);
irs.DBuilder.EmitStopPoint(savedDbgLoc);
return cleanupbb;
}
#endif
void ScopeStack::runAllCleanups(llvm::BasicBlock *continueWith) {
runCleanups(0, continueWith);
}
void ScopeStack::popCleanups(CleanupCursor targetScope) {
assert(targetScope <= currentCleanupScope());
if (targetScope == currentCleanupScope()) {
return;
}
for (CleanupCursor i = currentCleanupScope(); i-- > targetScope;) {
// Any gotos that are still unresolved necessarily leave this scope.
// Thus, the cleanup needs to be executed.
for (const auto &gotoJump : currentUnresolvedGotos()) {
// Make the source resp. last cleanup branch to this one.
llvm::BasicBlock *tentative = gotoJump.tentativeTarget;
#if LDC_LLVM_VER >= 308
if (useMSVCEH()) {
llvm::BasicBlock *continueWith = llvm::BasicBlock::Create(
irs.context(), "jumpcleanup", irs.topfunc());
auto startCleanup =
executeCleanupCopying(irs, cleanupScopes[i], gotoJump.sourceBlock,
continueWith, nullptr, nullptr);
tentative->replaceAllUsesWith(startCleanup);
llvm::BranchInst::Create(tentative, continueWith);
} else
#endif
{
tentative->replaceAllUsesWith(cleanupScopes[i].beginBlock);
// And continue execution with the tentative target (we simply reuse
// it because there is no reason not to).
executeCleanup(irs, cleanupScopes[i], gotoJump.sourceBlock, tentative);
}
}
std::vector<GotoJump> &nextUnresolved =
(i == 0) ? topLevelUnresolvedGotos
: cleanupScopes[i - 1].unresolvedGotos;
nextUnresolved.insert(nextUnresolved.end(),
currentUnresolvedGotos().begin(),
currentUnresolvedGotos().end());
cleanupScopes.pop_back();
}
}
void ScopeStack::pushCatch(llvm::Constant *classInfoPtr,
llvm::BasicBlock *bodyBlock,
llvm::MDNode *matchWeights) {
if (useMSVCEH()) {
#if LDC_LLVM_VER >= 308
assert(isCatchSwitchBlock(bodyBlock));
pushCleanup(bodyBlock, bodyBlock);
#endif
} else {
catchScopes.emplace_back(classInfoPtr, bodyBlock, currentCleanupScope(),
matchWeights);
currentLandingPads().push_back(nullptr);
}
}
void ScopeStack::popCatch() {
if (useMSVCEH()) {
#if LDC_LLVM_VER >= 308
assert(isCatchSwitchBlock(cleanupScopes.back().beginBlock));
popCleanups(currentCleanupScope() - 1);
#endif
} else {
catchScopes.pop_back();
currentLandingPads().pop_back();
}
}
void ScopeStack::pushLoopTarget(Statement *loopStatement,
llvm::BasicBlock *continueTarget,
llvm::BasicBlock *breakTarget) {
continueTargets.emplace_back(continueTarget, currentCleanupScope(),
loopStatement);
breakTargets.emplace_back(breakTarget, currentCleanupScope(), loopStatement);
}
void ScopeStack::popLoopTarget() {
continueTargets.pop_back();
breakTargets.pop_back();
}
void ScopeStack::pushBreakTarget(Statement *switchStatement,
llvm::BasicBlock *targetBlock) {
breakTargets.push_back({targetBlock, currentCleanupScope(), switchStatement});
}
void ScopeStack::popBreakTarget() { breakTargets.pop_back(); }
void ScopeStack::addLabelTarget(Identifier *labelName,
llvm::BasicBlock *targetBlock) {
labelTargets[labelName] = {targetBlock, currentCleanupScope(), nullptr};
// See whether any of the unresolved gotos target this label, and resolve
// those that do.
std::vector<GotoJump> &unresolved = currentUnresolvedGotos();
size_t i = 0;
while (i < unresolved.size()) {
if (unresolved[i].targetLabel != labelName) {
++i;
continue;
}
unresolved[i].tentativeTarget->replaceAllUsesWith(targetBlock);
unresolved[i].tentativeTarget->eraseFromParent();
unresolved.erase(unresolved.begin() + i);
}
}
void ScopeStack::jumpToLabel(Loc loc, Identifier *labelName) {
// If we have already seen that label, branch to it, executing any cleanups
// as necessary.
auto it = labelTargets.find(labelName);
if (it != labelTargets.end()) {
runCleanups(it->second.cleanupScope, it->second.targetBlock);
return;
}
llvm::BasicBlock *target =
llvm::BasicBlock::Create(irs.context(), "goto.unresolved", irs.topfunc());
irs.ir->CreateBr(target);
currentUnresolvedGotos().emplace_back(loc, irs.scopebb(), target, labelName);
}
void ScopeStack::jumpToStatement(std::vector<JumpTarget> &targets,
Statement *loopOrSwitchStatement) {
for (auto it = targets.rbegin(), end = targets.rend(); it != end; ++it) {
if (it->targetStatement == loopOrSwitchStatement) {
runCleanups(it->cleanupScope, it->targetBlock);
return;
}
}
assert(false && "Target for labeled break not found.");
}
void ScopeStack::jumpToClosest(std::vector<JumpTarget> &targets) {
assert(!targets.empty() &&
"Encountered break/continue but no loop in scope.");
JumpTarget &t = targets.back();
runCleanups(t.cleanupScope, t.targetBlock);
}
std::vector<GotoJump> &ScopeStack::currentUnresolvedGotos() {
return cleanupScopes.empty() ? topLevelUnresolvedGotos
: cleanupScopes.back().unresolvedGotos;
}
std::vector<llvm::BasicBlock *> &ScopeStack::currentLandingPads() {
return cleanupScopes.empty() ? topLevelLandingPads
: cleanupScopes.back().landingPads;
}
llvm::BasicBlock *&ScopeStack::getLandingPadRef(CleanupCursor scope) {
auto &pads = cleanupScopes.empty() ? topLevelLandingPads
: cleanupScopes[scope].landingPads;
if (pads.empty()) {
// Have not encountered any catches (for which we would push a scope) or
// calls to throwing functions (where we would have already executed
// this if) in this cleanup scope yet.
pads.push_back(nullptr);
}
return pads.back();
}
llvm::BasicBlock *ScopeStack::getLandingPad() {
llvm::BasicBlock *&landingPad = getLandingPadRef(currentCleanupScope() - 1);
if (!landingPad) {
#if LDC_LLVM_VER >= 308
if (useMSVCEH()) {
assert(currentCleanupScope() > 0);
landingPad = emitLandingPadMSVCEH(currentCleanupScope() - 1);
} else
#endif
landingPad = emitLandingPad();
}
return landingPad;
}
namespace {
llvm::LandingPadInst *createLandingPadInst(IRState &irs) {
LLType *retType =
LLStructType::get(LLType::getInt8PtrTy(irs.context()),
LLType::getInt32Ty(irs.context()), nullptr);
#if LDC_LLVM_VER >= 307
LLFunction *currentFunction = irs.func()->func;
if (!currentFunction->hasPersonalityFn()) {
LLFunction *personalityFn =
getRuntimeFunction(Loc(), irs.module, "_d_eh_personality");
currentFunction->setPersonalityFn(personalityFn);
}
return irs.ir->CreateLandingPad(retType, 0);
#else
LLFunction *personalityFn =
getRuntimeFunction(Loc(), irs.module, "_d_eh_personality");
return irs.ir->CreateLandingPad(retType, personalityFn, 0);
#endif
}
}
#if LDC_LLVM_VER >= 308
llvm::BasicBlock *ScopeStack::emitLandingPadMSVCEH(CleanupCursor scope) {
LLFunction *currentFunction = irs.func()->func;
if (!currentFunction->hasPersonalityFn()) {
const char *personality = "__CxxFrameHandler3";
LLFunction *personalityFn =
getRuntimeFunction(Loc(), irs.module, personality);
currentFunction->setPersonalityFn(personalityFn);
}
if (scope == 0)
return runCleanupPad(scope, nullptr);
llvm::BasicBlock *&pad = getLandingPadRef(scope - 1);
if (!pad)
pad = emitLandingPadMSVCEH(scope - 1);
return runCleanupPad(scope, pad);
}
#endif
llvm::BasicBlock *ScopeStack::emitLandingPad() {
// save and rewrite scope
IRScope savedIRScope = irs.scope();
llvm::BasicBlock *beginBB =
llvm::BasicBlock::Create(irs.context(), "landingPad", irs.topfunc());
irs.scope() = IRScope(beginBB);
llvm::LandingPadInst *landingPad = createLandingPadInst(irs);
// Stash away the exception object pointer and selector value into their
// stack slots.
llvm::Value *ehPtr = DtoExtractValue(landingPad, 0);
irs.ir->CreateStore(ehPtr, irs.funcGen().getOrCreateEhPtrSlot());
llvm::Value *ehSelector = DtoExtractValue(landingPad, 1);
if (!irs.funcGen().ehSelectorSlot) {
irs.funcGen().ehSelectorSlot =
DtoRawAlloca(ehSelector->getType(), 0, "eh.selector");
}
irs.ir->CreateStore(ehSelector, irs.funcGen().ehSelectorSlot);
// Add landingpad clauses, emit finallys and 'if' chain to catch the
// exception.
CleanupCursor lastCleanup = currentCleanupScope();
for (auto it = catchScopes.rbegin(), end = catchScopes.rend(); it != end;
++it) {
// Insert any cleanups in between the last catch we ran (i.e. tested for
// and found that the type does not match) and this one.
assert(lastCleanup >= it->cleanupScope);
if (lastCleanup > it->cleanupScope) {
landingPad->setCleanup(true);
llvm::BasicBlock *afterCleanupBB = llvm::BasicBlock::Create(
irs.context(), beginBB->getName() + llvm::Twine(".after.cleanup"),
irs.topfunc());
runCleanups(lastCleanup, it->cleanupScope, afterCleanupBB);
irs.scope() = IRScope(afterCleanupBB);
lastCleanup = it->cleanupScope;
}
// Add the ClassInfo reference to the landingpad instruction so it is
// emitted to the EH tables.
landingPad->addClause(it->classInfoPtr);
llvm::BasicBlock *mismatchBB = llvm::BasicBlock::Create(
irs.context(), beginBB->getName() + llvm::Twine(".mismatch"),
irs.topfunc());
// "Call" llvm.eh.typeid.for, which gives us the eh selector value to
// compare the landing pad selector value with.
llvm::Value *ehTypeId =
irs.ir->CreateCall(GET_INTRINSIC_DECL(eh_typeid_for),
DtoBitCast(it->classInfoPtr, getVoidPtrType()));
// Compare the selector value from the unwinder against the expected
// one and branch accordingly.
irs.ir->CreateCondBr(
irs.ir->CreateICmpEQ(irs.ir->CreateLoad(irs.funcGen().ehSelectorSlot),
ehTypeId),
it->bodyBlock, mismatchBB, it->branchWeights);
irs.scope() = IRScope(mismatchBB);
}
// No catch matched. Execute all finallys and resume unwinding.
if (lastCleanup > 0) {
landingPad->setCleanup(true);
runCleanups(lastCleanup, 0, irs.funcGen().getOrCreateResumeUnwindBlock());
} else if (!catchScopes.empty()) {
// Directly convert the last mismatch branch into a branch to the
// unwind resume block.
irs.scopebb()->replaceAllUsesWith(
irs.funcGen().getOrCreateResumeUnwindBlock());
irs.scopebb()->eraseFromParent();
} else {
irs.ir->CreateBr(irs.funcGen().getOrCreateResumeUnwindBlock());
}
irs.scope() = savedIRScope;
return beginBB;
}
llvm::AllocaInst *FuncGenState::getOrCreateEhPtrSlot() {
if (!ehPtrSlot) {
ehPtrSlot = DtoRawAlloca(getVoidPtrType(), 0, "eh.ptr");
}
return ehPtrSlot;
}
llvm::BasicBlock *FuncGenState::getOrCreateResumeUnwindBlock() {
assert(irFunc.func == irs.topfunc() &&
"Should only access unwind resume block while emitting function.");
if (!resumeUnwindBlock) {
resumeUnwindBlock =
llvm::BasicBlock::Create(irs.context(), "eh.resume", irFunc.func);
llvm::BasicBlock *oldBB = irs.scopebb();
irs.scope() = IRScope(resumeUnwindBlock);
llvm::Function *resumeFn =
getRuntimeFunction(Loc(), irs.module, "_d_eh_resume_unwind");
irs.ir->CreateCall(resumeFn, DtoLoad(getOrCreateEhPtrSlot()));
irs.ir->CreateUnreachable();
irs.scope() = IRScope(oldBB);
}
return resumeUnwindBlock;
}

515
gen/funcgenstate.h Normal file
View file

@ -0,0 +1,515 @@
//===-- gen/funcgenstate.h - Function code generation state -----*- C++ -*-===//
//
// LDC the LLVM D compiler
//
// This file is distributed under the BSD-style LDC license. See the LICENSE
// file for details.
//
//===----------------------------------------------------------------------===//
//
// "Global" transitory state kept while emitting LLVM IR for the body of a
// single function, with FuncGenState being the top-level such entity.
//
//===----------------------------------------------------------------------===//
#ifndef LDC_GEN_FUNCGENSTATE_H
#define LDC_GEN_FUNCGENSTATE_H
#include "gen/irstate.h"
#include "gen/pgo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/CallSite.h"
#include <vector>
class Identifier;
struct IRState;
class Statement;
namespace llvm {
class AllocaInst;
class BasicBlock;
class Constant;
class MDNode;
class Value;
}
/// Represents a position on the stack of currently active cleanup scopes.
///
/// Since we always need to run a contiguous part of the stack (or all) in
/// order, two cursors (one of which is usually the currently top of the stack)
/// are enough to identify a sequence of cleanups to run.
using CleanupCursor = size_t;
/// Stores information needed to correctly jump to a given label or loop/switch
/// statement (break/continue can be labeled, but are not necessarily).
struct JumpTarget {
/// The basic block to ultimately branch to.
llvm::BasicBlock *targetBlock = nullptr;
/// The index of the target in the stack of active cleanup scopes.
///
/// When generating code for a jump to this label, the cleanups between
/// the current depth and that of the level will be emitted. Note that
/// we need to handle only one direction (towards the root of the stack)
/// because D forbids gotos into try or finally blocks.
// TODO: We might not be able to detect illegal jumps across try-finally
// blocks by only storing the index.
CleanupCursor cleanupScope;
/// Keeps target of the associated loop or switch statement so we can
/// handle both unlabeled and labeled jumps.
Statement *targetStatement = nullptr;
JumpTarget() = default;
JumpTarget(llvm::BasicBlock *targetBlock, CleanupCursor cleanupScope,
Statement *targetStatement);
};
/// Keeps track of source and target label of a goto.
///
/// Used if we cannot immediately emit all the code for a jump because we have
/// not generated code for the target yet.
struct GotoJump {
// The location of the goto instruction, for error reporting.
Loc sourceLoc;
/// The basic block which contains the goto as its terminator.
llvm::BasicBlock *sourceBlock = nullptr;
/// While we have not found the actual branch target, we might need to
/// create a "fake" basic block in order to be able to execute the cleanups
/// (we do not keep branching information around after leaving the scope).
llvm::BasicBlock *tentativeTarget = nullptr;
/// The label to target with the goto.
Identifier *targetLabel = nullptr;
GotoJump(Loc loc, llvm::BasicBlock *sourceBlock,
llvm::BasicBlock *tentativeTarget, Identifier *targetLabel);
};
/// Describes a particular way to leave a cleanup scope and continue execution
/// with another one.
///
/// In general, there can be multiple ones (normal exit, early returns,
/// breaks/continues, exceptions, and so on).
struct CleanupExitTarget {
explicit CleanupExitTarget(llvm::BasicBlock *t) : branchTarget(t) {}
/// The target basic block to branch to after running the cleanup.
llvm::BasicBlock *branchTarget = nullptr;
/// The basic blocks that want to continue with this target after running
/// the cleanup. We need to keep this information around so we can insert
/// stores to the branch selector variable when converting from one to two
/// targets.
std::vector<llvm::BasicBlock *> sourceBlocks;
/// MSVC: The basic blocks that are executed when going this route
std::vector<llvm::BasicBlock *> cleanupBlocks;
};
/// Represents a scope (in abstract terms, not curly braces) that requires a
/// piece of cleanup code to be run whenever it is left, whether as part of
/// normal control flow or exception unwinding.
///
/// This includes finally blocks (which are also generated by the frontend for
/// running the destructors of non-temporary variables) and the destructors of
/// temporaries (which are unfortunately not lowered by the frontend).
///
/// Our goal is to only emit each cleanup once such as to avoid generating an
/// exponential number of basic blocks/landing pads for handling all the
/// different ways of exiting a deeply nested scope (consider e.g. ten
/// local variables with destructors, each of which might throw itself).
class CleanupScope {
public:
CleanupScope(llvm::BasicBlock *beginBlock, llvm::BasicBlock *endBlock)
: beginBlock(beginBlock), endBlock(endBlock) {}
/// The basic block to branch to for running the cleanup.
llvm::BasicBlock *beginBlock = nullptr;
/// The basic block that contains the end of the cleanup code (is different
/// from beginBlock if the cleanup contains control flow).
llvm::BasicBlock *endBlock = nullptr;
/// The branch selector variable, or null if not created yet.
llvm::AllocaInst *branchSelector = nullptr;
/// Stores all possible targets blocks after running this cleanup, along
/// with what predecessors want to continue at that target. The index in
/// the vector corresponds to the branch selector value for that target.
// Note: This is of course a bad choice of data structure for many targets
// complexity-wise. However, situations where this matters should be
// exceedingly rare in both hand-written as well as generated code.
std::vector<CleanupExitTarget> exitTargets;
/// Keeps track of all the gotos originating from somewhere inside this
/// scope for which we have not found the label yet (because it occurs
/// lexically later in the function).
// Note: Should also be a dense map from source block to the rest of the
// data if we expect many gotos.
std::vector<GotoJump> unresolvedGotos;
/// Caches landing pads generated for catches at this cleanup scope level.
///
/// One element is pushed to the back on each time a catch block is entered,
/// and popped again once it is left. If the corresponding landing pad has
/// not been generated yet (this is done lazily), the pointer is null.
std::vector<llvm::BasicBlock *> landingPads;
/// MSVC: The original basic blocks that are executed for beginBlock to
/// endBlock
std::vector<llvm::BasicBlock *> cleanupBlocks;
};
/// Stores information to be able to branch to a catch clause if it matches.
///
/// Each catch body is emitted only once, but may be target from many landing
/// pads (in case of nested catch or cleanup scopes).
struct CatchScope {
/// The ClassInfo reference corresponding to the type to match the
/// exception object against.
llvm::Constant *classInfoPtr = nullptr;
/// The block to branch to if the exception type matches.
llvm::BasicBlock *bodyBlock = nullptr;
/// The cleanup scope stack level corresponding to this catch.
CleanupCursor cleanupScope;
// PGO branch weights for the exception type match branch.
// (first weight is for match, second is for mismatch)
llvm::MDNode *branchWeights = nullptr;
CatchScope(llvm::Constant *classInfoPtr, llvm::BasicBlock *bodyBlock,
CleanupCursor cleanupScope, llvm::MDNode *branchWeights = nullptr);
};
/// Keeps track of active (abstract) scopes in a function that influence code
/// generation of their contents. This includes cleanups (finally blocks,
/// destructors), try/catch blocks and labels for goto/break/continue.
///
/// Note that the entire code generation process, and this class in particular,
/// depends heavily on the fact that we visit the statement/expression tree in
/// its natural order, i.e. depth-first and in lexical order. In other words,
/// the code here expects that after a cleanup/catch/loop/etc. has been pushed,
/// the contents of the block are generated, and it is then popped again
/// afterwards. This is also encoded in the fact that none of the methods for
/// branching/running cleanups take a cursor for describing the "source" scope,
/// it is always assumed to be the current one.
///
/// Handling of break/continue could be moved into a separate layer that uses
/// the rest of the ScopeStack API, as it (in contrast to goto) never requires
/// resolving forward references across cleanup scopes.
class ScopeStack {
public:
explicit ScopeStack(IRState &irs) : irs(irs) {}
~ScopeStack();
/// Registers a piece of cleanup code to be run.
///
/// The end block is expected not to contain a terminator yet. It will be
/// added by ScopeStack as needed, based on what follow-up blocks code from
/// within this scope will branch to.
void pushCleanup(llvm::BasicBlock *beginBlock, llvm::BasicBlock *endBlock);
/// Terminates the current basic block with a branch to the cleanups needed
/// for leaving the current scope and continuing execution at the target
/// scope stack level.
///
/// After running them, execution will branch to the given basic block.
void runCleanups(CleanupCursor targetScope, llvm::BasicBlock *continueWith) {
runCleanups(currentCleanupScope(), targetScope, continueWith);
}
/// Like #runCleanups(), but runs all of them until the top-level scope is
/// reached.
void runAllCleanups(llvm::BasicBlock *continueWith);
#if LDC_LLVM_VER >= 308
void runCleanupCopies(CleanupCursor sourceScope, CleanupCursor targetScope,
llvm::BasicBlock *continueWith);
llvm::BasicBlock *runCleanupPad(CleanupCursor scope,
llvm::BasicBlock *unwindTo);
#endif
/// Pops all the cleanups between the current scope and the target cursor.
///
/// This does not insert any cleanup calls, use #runCleanups() beforehand.
void popCleanups(CleanupCursor targetScope);
/// Returns a cursor that identifies the current cleanup scope, to be later
/// used with #runCleanups() et al.
///
/// Note that this cursor is only valid as long as the current scope is not
/// popped.
CleanupCursor currentCleanupScope() { return cleanupScopes.size(); }
/// Registers a catch block to be taken into consideration when an exception
/// is thrown within the current scope.
///
/// When a potentially throwing function call is emitted, a landing pad will
/// be emitted to compare the dynamic type info of the exception against the
/// given ClassInfo constant and to branch to the given body block if it
/// matches. The registered catch blocks are maintained on a stack, with the
/// top-most (i.e. last pushed, innermost) taking precedence.
void pushCatch(llvm::Constant *classInfoPtr, llvm::BasicBlock *bodyBlock,
llvm::MDNode *matchWeights = nullptr);
/// Unregisters the last registered catch block.
void popCatch();
size_t currentCatchScope() { return catchScopes.size(); }
#if LDC_LLVM_VER >= 308
/// MSVC: catch and cleanup code is emitted as funclets and need
/// to be referenced from inner pads and calls
void pushFunclet(llvm::Value *funclet) { funclets.push_back(funclet); }
void popFunclet() { funclets.pop_back(); }
llvm::Value *getFunclet() {
return funclets.empty() ? nullptr : funclets.back();
}
llvm::Value *getFuncletToken() {
return funclets.empty() ? llvm::ConstantTokenNone::get(irs.context())
: funclets.back();
}
#endif
/// Registers a loop statement to be used as a target for break/continue
/// statements in the current scope.
void pushLoopTarget(Statement *loopStatement,
llvm::BasicBlock *continueTarget,
llvm::BasicBlock *breakTarget);
/// Pops the last pushed loop target, so it is no longer taken into
/// consideration for resolving breaks/continues.
void popLoopTarget();
/// Registers a statement to be used as a target for break statements in the
/// current scope (currently applies only to switch statements).
void pushBreakTarget(Statement *switchStatement,
llvm::BasicBlock *targetBlock);
/// Unregisters the last registered break target.
void popBreakTarget();
/// Adds a label to serve as a target for goto statements.
///
/// Also causes in-flight forward references to this label to be resolved.
void addLabelTarget(Identifier *labelName, llvm::BasicBlock *targetBlock);
/// Emits a call or invoke to the given callee, depending on whether there
/// are catches/cleanups active or not.
template <typename T>
llvm::CallSite callOrInvoke(llvm::Value *callee, const T &args,
const char *name = "");
/// Terminates the current basic block with an unconditional branch to the
/// given label, along with the cleanups to execute on the way there.
///
/// Legal forward references (i.e. within the same function, and not into
/// a cleanup scope) will be resolved.
void jumpToLabel(Loc loc, Identifier *labelName);
/// Terminates the current basic block with an unconditional branch to the
/// continue target generated by the given loop statement, along with
/// the cleanups to execute on the way there.
void continueWithLoop(Statement *loopStatement) {
jumpToStatement(continueTargets, loopStatement);
}
/// Terminates the current basic block with an unconditional branch to the
/// closest loop continue target, along with the cleanups to execute on
/// the way there.
void continueWithClosest() { jumpToClosest(continueTargets); }
/// Terminates the current basic block with an unconditional branch to the
/// break target generated by the given loop or switch statement, along with
/// the cleanups to execute on the way there.
void breakToStatement(Statement *loopOrSwitchStatement) {
jumpToStatement(breakTargets, loopOrSwitchStatement);
}
/// Terminates the current basic block with an unconditional branch to the
/// closest break statement target, along with the cleanups to execute on
/// the way there.
void breakToClosest() { jumpToClosest(breakTargets); }
/// get exisiting or emit new landing pad
llvm::BasicBlock *getLandingPad();
private:
/// Internal version that allows specifying the scope at which to start
/// emitting the cleanups.
void runCleanups(CleanupCursor sourceScope, CleanupCursor targetScope,
llvm::BasicBlock *continueWith);
std::vector<GotoJump> &currentUnresolvedGotos();
std::vector<llvm::BasicBlock *> &currentLandingPads();
llvm::BasicBlock *&getLandingPadRef(CleanupCursor scope);
/// Emits a landing pad to honor all the active cleanups and catches.
llvm::BasicBlock *emitLandingPad();
#if LDC_LLVM_VER >= 308
llvm::BasicBlock *emitLandingPadMSVCEH(CleanupCursor scope);
#endif
/// Unified implementation for labeled break/continue.
void jumpToStatement(std::vector<JumpTarget> &targets,
Statement *loopOrSwitchStatement);
/// Unified implementation for unlabeled break/continue.
void jumpToClosest(std::vector<JumpTarget> &targets);
/// The ambient IRState. For legacy reasons, there is currently a cyclic
/// dependency between the two.
IRState &irs;
using LabelTargetMap = llvm::DenseMap<Identifier *, JumpTarget>;
/// The labels we have encountered in this function so far, accessed by
/// their associated identifier (i.e. the name of the label).
LabelTargetMap labelTargets;
///
std::vector<JumpTarget> breakTargets;
///
std::vector<JumpTarget> continueTargets;
/// cleanupScopes[i] contains the information to go from
/// currentCleanupScope() == i + 1 to currentCleanupScope() == i.
std::vector<CleanupScope> cleanupScopes;
///
std::vector<CatchScope> catchScopes;
/// Gotos which we were not able to resolve to any cleanup scope, but which
/// might still be defined later in the function at top level. If there are
/// any left on function exit, it is an error (e.g. because the user tried
/// to goto into a finally block, etc.).
std::vector<GotoJump> topLevelUnresolvedGotos;
/// Caches landing pads generated for catches without any cleanups to run
/// (null if not yet emitted, one element is pushed to/popped from the back
/// on entering/leaving a catch block).
std::vector<llvm::BasicBlock *> topLevelLandingPads;
/// MSVC: stack of currently built catch/cleanup funclets
std::vector<llvm::Value *> funclets;
};
template <typename T>
llvm::CallSite ScopeStack::callOrInvoke(llvm::Value *callee, const T &args,
const char *name) {
// If this is a direct call, we might be able to use the callee attributes
// to our advantage.
llvm::Function *calleeFn = llvm::dyn_cast<llvm::Function>(callee);
// Intrinsics don't support invoking and 'nounwind' functions don't need it.
const bool doesNotThrow =
calleeFn && (calleeFn->isIntrinsic() || calleeFn->doesNotThrow());
#if LDC_LLVM_VER >= 308
// calls inside a funclet must be annotated with its value
llvm::SmallVector<llvm::OperandBundleDef, 2> BundleList;
if (auto funclet = getFunclet())
BundleList.push_back(llvm::OperandBundleDef("funclet", funclet));
#endif
if (doesNotThrow || (cleanupScopes.empty() && catchScopes.empty())) {
llvm::CallInst *call = irs.ir->CreateCall(callee, args,
#if LDC_LLVM_VER >= 308
BundleList,
#endif
name);
if (calleeFn) {
call->setAttributes(calleeFn->getAttributes());
}
return call;
}
llvm::BasicBlock *landingPad = getLandingPad();
llvm::BasicBlock *postinvoke = llvm::BasicBlock::Create(
irs.context(), "postinvoke", irs.topfunc(), landingPad);
llvm::InvokeInst *invoke =
irs.ir->CreateInvoke(callee, postinvoke, landingPad, args,
#if LDC_LLVM_VER >= 308
BundleList,
#endif
name);
if (calleeFn) {
invoke->setAttributes(calleeFn->getAttributes());
}
irs.scope() = IRScope(postinvoke);
return invoke;
}
/// The "global" transitory state necessary for emitting the body of a certain
/// function.
///
/// For general metadata associated with a function that persists for the entire
/// IRState lifetime (i.e. llvm::Module emission process) see IrFunction.
class FuncGenState {
public:
explicit FuncGenState(IrFunction &irFunc, IRState &irs)
: irFunc(irFunc), scopes(irs), irs(irs) {}
FuncGenState(FuncGenState const &) = delete;
FuncGenState &operator=(FuncGenState const &) = delete;
/// Returns the stack slot that contains the exception object pointer while a
/// landing pad is active, lazily creating it as needed.
///
/// This value must dominate all uses; first storing it, and then loading it
/// when calling _d_eh_resume_unwind. If we take a select at the end of any
/// cleanups on the way to the latter, the value must also dominate all other
/// predecessors of the cleanup. Thus, we just use a single alloca in the
/// entry BB of the function.
llvm::AllocaInst *getOrCreateEhPtrSlot();
/// Returns the basic block with the call to the unwind resume function.
///
/// Because of ehPtrSlot, we do not need more than one, so might as well
/// save on code size and reuse it.
llvm::BasicBlock *getOrCreateResumeUnwindBlock();
// The function code is being generated for.
IrFunction &irFunc;
/// The stack of scopes inside the function.
ScopeStack scopes;
// PGO information
CodeGenPGO pgo;
/// The marker at which to insert `alloca`s in the function entry bb.
llvm::Instruction *allocapoint = nullptr;
/// alloca for the nested context of this function
llvm::Value *nestedVar = nullptr;
/// The basic block with the return instruction.
llvm::BasicBlock *retBlock = nullptr;
/// A stack slot containing the return value, for functions that return by
/// value.
llvm::AllocaInst *retValSlot = nullptr;
/// Similar story to ehPtrSlot, but for the selector value.
llvm::AllocaInst *ehSelectorSlot = nullptr;
private:
IRState &irs;
llvm::AllocaInst *ehPtrSlot = nullptr;
llvm::BasicBlock *resumeUnwindBlock = nullptr;
};
#endif

View file

@ -22,6 +22,7 @@
#include "gen/arrays.h" #include "gen/arrays.h"
#include "gen/classes.h" #include "gen/classes.h"
#include "gen/dvalue.h" #include "gen/dvalue.h"
#include "gen/funcgenstate.h"
#include "gen/function-inlining.h" #include "gen/function-inlining.h"
#include "gen/inlineir.h" #include "gen/inlineir.h"
#include "gen/irstate.h" #include "gen/irstate.h"
@ -863,24 +864,21 @@ void DtoDefineFunction(FuncDeclaration *fd, bool linkageAvailableExternally) {
} }
IrFunction *irFunc = getIrFunc(fd); IrFunction *irFunc = getIrFunc(fd);
IrFuncTy &irFty = irFunc->irFty;
// debug info // debug info
irFunc->diSubprogram = gIR->DBuilder.EmitSubProgram(fd); irFunc->diSubprogram = gIR->DBuilder.EmitSubProgram(fd);
Type *t = fd->type->toBasetype(); if (!fd->fbody) {
TypeFunction *f = static_cast<TypeFunction *>(t);
// assert(f->ctype);
llvm::Function *func = irFunc->func;
// is there a body?
if (fd->fbody == nullptr) {
return; return;
} }
IF_LOG Logger::println("Doing function body for: %s", fd->toChars()); IF_LOG Logger::println("Doing function body for: %s", fd->toChars());
gIR->functions.push_back(irFunc); gIR->funcGenStates.emplace_back(new FuncGenState(*irFunc, *gIR));
auto &funcGen = gIR->funcGen();
const auto f = static_cast<TypeFunction *>(fd->type->toBasetype());
IrFuncTy &irFty = irFunc->irFty;
llvm::Function *func = irFunc->func;;
const auto lwc = lowerFuncLinkage(fd); const auto lwc = lowerFuncLinkage(fd);
if (linkageAvailableExternally) { if (linkageAvailableExternally) {
@ -917,7 +915,6 @@ void DtoDefineFunction(FuncDeclaration *fd, bool linkageAvailableExternally) {
llvm::BasicBlock *beginbb = llvm::BasicBlock *beginbb =
llvm::BasicBlock::Create(gIR->context(), "", func); llvm::BasicBlock::Create(gIR->context(), "", func);
// assert(gIR->scopes.empty());
gIR->scopes.push_back(IRScope(beginbb)); gIR->scopes.push_back(IRScope(beginbb));
// Set the FastMath options for this function scope. // Set the FastMath options for this function scope.
@ -932,7 +929,7 @@ void DtoDefineFunction(FuncDeclaration *fd, bool linkageAvailableExternally) {
// matter at all // matter at all
llvm::Instruction *allocaPoint = new llvm::AllocaInst( llvm::Instruction *allocaPoint = new llvm::AllocaInst(
LLType::getInt32Ty(gIR->context()), "alloca point", beginbb); LLType::getInt32Ty(gIR->context()), "alloca point", beginbb);
irFunc->allocapoint = allocaPoint; funcGen.allocapoint = allocaPoint;
// debug info - after all allocas, but before any llvm.dbg.declare etc // debug info - after all allocas, but before any llvm.dbg.declare etc
gIR->DBuilder.EmitFuncStart(fd); gIR->DBuilder.EmitFuncStart(fd);
@ -986,46 +983,39 @@ void DtoDefineFunction(FuncDeclaration *fd, bool linkageAvailableExternally) {
defineParameters(irFty, *fd->parameters); defineParameters(irFty, *fd->parameters);
// Initialize PGO state for this function // Initialize PGO state for this function
irFunc->pgo.assignRegionCounters(fd, irFunc->func); funcGen.pgo.assignRegionCounters(fd, irFunc->func);
DtoCreateNestedContext(funcGen);
if (fd->vresult && !fd->vresult->nestedrefs.dim) // FIXME: not sure here :/
{ {
ScopeStack scopeStack(gIR); DtoVarDeclaration(fd->vresult);
irFunc->scopes = &scopeStack;
DtoCreateNestedContext(fd);
if (fd->vresult && !fd->vresult->nestedrefs.dim) // FIXME: not sure here :/
{
DtoVarDeclaration(fd->vresult);
}
// D varargs: prepare _argptr and _arguments
if (f->linkage == LINKd && f->varargs == 1) {
// allocate _argptr (of type core.stdc.stdarg.va_list)
Type *const argptrType = Type::tvalist->semantic(fd->loc, fd->_scope);
LLValue *argptrMem = DtoAlloca(argptrType, "_argptr_mem");
irFunc->_argptr = argptrMem;
// initialize _argptr with a call to the va_start intrinsic
DLValue argptrVal(argptrType, argptrMem);
LLValue *llAp = gABI->prepareVaStart(&argptrVal);
llvm::CallInst::Create(GET_INTRINSIC_DECL(vastart), llAp, "",
gIR->scopebb());
// copy _arguments to a memory location
irFunc->_arguments =
DtoAllocaDump(irFunc->_arguments, 0, "_arguments_mem");
}
irFunc->pgo.emitCounterIncrement(fd->fbody);
irFunc->pgo.setCurrentStmt(fd->fbody);
// output function body
Statement_toIR(fd->fbody, gIR);
irFunc->scopes = nullptr;
} }
// D varargs: prepare _argptr and _arguments
if (f->linkage == LINKd && f->varargs == 1) {
// allocate _argptr (of type core.stdc.stdarg.va_list)
Type *const argptrType = Type::tvalist->semantic(fd->loc, fd->_scope);
LLValue *argptrMem = DtoAlloca(argptrType, "_argptr_mem");
irFunc->_argptr = argptrMem;
// initialize _argptr with a call to the va_start intrinsic
DLValue argptrVal(argptrType, argptrMem);
LLValue *llAp = gABI->prepareVaStart(&argptrVal);
llvm::CallInst::Create(GET_INTRINSIC_DECL(vastart), llAp, "",
gIR->scopebb());
// copy _arguments to a memory location
irFunc->_arguments =
DtoAllocaDump(irFunc->_arguments, 0, "_arguments_mem");
}
funcGen.pgo.emitCounterIncrement(fd->fbody);
funcGen.pgo.setCurrentStmt(fd->fbody);
// output function body
Statement_toIR(fd->fbody, gIR);
llvm::BasicBlock *bb = gIR->scopebb(); llvm::BasicBlock *bb = gIR->scopebb();
if (pred_begin(bb) == pred_end(bb) && if (pred_begin(bb) == pred_end(bb) &&
bb != &bb->getParent()->getEntryBlock()) { bb != &bb->getParent()->getEntryBlock()) {
@ -1057,14 +1047,15 @@ void DtoDefineFunction(FuncDeclaration *fd, bool linkageAvailableExternally) {
// erase alloca point // erase alloca point
if (allocaPoint->getParent()) { if (allocaPoint->getParent()) {
funcGen.allocapoint = nullptr;
allocaPoint->eraseFromParent(); allocaPoint->eraseFromParent();
allocaPoint = nullptr;
} }
allocaPoint = nullptr;
gIR->func()->allocapoint = nullptr;
gIR->scopes.pop_back(); gIR->scopes.pop_back();
gIR->functions.pop_back(); assert(&gIR->funcGen() == &funcGen);
gIR->funcGenStates.pop_back();
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View file

@ -127,7 +127,7 @@ DValue *DtoInlineIRExpr(Loc &loc, FuncDeclaration *fdecl,
// is needed e.g. when the parent function has "unsafe-fp-math"="true" // is needed e.g. when the parent function has "unsafe-fp-math"="true"
// applied. // applied.
{ {
assert(!gIR->functions.empty() && "Inline ir outside function"); assert(!gIR->funcGenStates.empty() && "Inline ir outside function");
auto enclosingFunc = gIR->topfunc(); auto enclosingFunc = gIR->topfunc();
assert(enclosingFunc); assert(enclosingFunc);
copyFnAttributes(fun, enclosingFunc); copyFnAttributes(fun, enclosingFunc);

View file

@ -11,6 +11,7 @@
#include "declaration.h" #include "declaration.h"
#include "mtype.h" #include "mtype.h"
#include "statement.h" #include "statement.h"
#include "gen/funcgenstate.h"
#include "gen/llvm.h" #include "gen/llvm.h"
#include "gen/tollvm.h" #include "gen/tollvm.h"
#include "ir/irfunction.h" #include "ir/irfunction.h"
@ -44,19 +45,23 @@ IRState::IRState(const char *name, llvm::LLVMContext &context)
asmBlock = nullptr; asmBlock = nullptr;
} }
IRState::~IRState() {}
FuncGenState &IRState::funcGen() {
assert(!funcGenStates.empty() && "Function stack is empty!");
return *funcGenStates.back();
}
IrFunction *IRState::func() { IrFunction *IRState::func() {
assert(!functions.empty() && "Function stack is empty!"); return &funcGen().irFunc;
return functions.back();
} }
llvm::Function *IRState::topfunc() { llvm::Function *IRState::topfunc() {
assert(!functions.empty() && "Function stack is empty!"); return func()->func;
return functions.back()->func;
} }
llvm::Instruction *IRState::topallocapoint() { llvm::Instruction *IRState::topallocapoint() {
assert(!functions.empty() && "AllocaPoint stack is empty!"); return funcGen().allocapoint;
return functions.back()->allocapoint;
} }
IRScope &IRState::scope() { IRScope &IRState::scope() {
@ -77,33 +82,33 @@ bool IRState::scopereturned() {
LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, const char *Name) { LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, const char *Name) {
LLSmallVector<LLValue *, 1> args; LLSmallVector<LLValue *, 1> args;
return func()->scopes->callOrInvoke(Callee, args, Name); return funcGen().scopes.callOrInvoke(Callee, args, Name);
} }
LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, LLValue *Arg1, LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, LLValue *Arg1,
const char *Name) { const char *Name) {
LLValue *args[] = {Arg1}; LLValue *args[] = {Arg1};
return func()->scopes->callOrInvoke(Callee, args, Name); return funcGen().scopes.callOrInvoke(Callee, args, Name);
} }
LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, LLValue *Arg1, LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, LLValue *Arg1,
LLValue *Arg2, const char *Name) { LLValue *Arg2, const char *Name) {
LLValue *args[] = {Arg1, Arg2}; LLValue *args[] = {Arg1, Arg2};
return func()->scopes->callOrInvoke(Callee, args, Name); return funcGen().scopes.callOrInvoke(Callee, args, Name);
} }
LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, LLValue *Arg1, LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, LLValue *Arg1,
LLValue *Arg2, LLValue *Arg3, LLValue *Arg2, LLValue *Arg3,
const char *Name) { const char *Name) {
LLValue *args[] = {Arg1, Arg2, Arg3}; LLValue *args[] = {Arg1, Arg2, Arg3};
return func()->scopes->callOrInvoke(Callee, args, Name); return funcGen().scopes.callOrInvoke(Callee, args, Name);
} }
LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, LLValue *Arg1, LLCallSite IRState::CreateCallOrInvoke(LLValue *Callee, LLValue *Arg1,
LLValue *Arg2, LLValue *Arg3, LLValue *Arg2, LLValue *Arg3,
LLValue *Arg4, const char *Name) { LLValue *Arg4, const char *Name) {
LLValue *args[] = {Arg1, Arg2, Arg3, Arg4}; LLValue *args[] = {Arg1, Arg2, Arg3, Arg4};
return func()->scopes->callOrInvoke(Callee, args, Name); return funcGen().scopes.callOrInvoke(Callee, args, Name);
} }
bool IRState::emitArrayBoundsChecks() { bool IRState::emitArrayBoundsChecks() {
@ -112,7 +117,7 @@ bool IRState::emitArrayBoundsChecks() {
} }
// Safe functions only. // Safe functions only.
if (functions.empty()) { if (funcGenStates.empty()) {
return false; return false;
} }
@ -127,3 +132,13 @@ IRBuilder<> *IRBuilderHelper::operator->() {
assert(b.GetInsertBlock() != NULL); assert(b.GetInsertBlock() != NULL);
return &b; return &b;
} }
////////////////////////////////////////////////////////////////////////////////
bool useMSVCEH() {
#if LDC_LLVM_VER >= 308
return global.params.targetTriple->isWindowsMSVCEnvironment();
#else
return false;
#endif
}

View file

@ -15,16 +15,16 @@
#ifndef LDC_GEN_IRSTATE_H #ifndef LDC_GEN_IRSTATE_H
#define LDC_GEN_IRSTATE_H #define LDC_GEN_IRSTATE_H
#include "aggregate.h"
#include "root.h"
#include "ir/iraggr.h"
#include "ir/irvar.h"
#include "gen/dibuilder.h"
#include <deque> #include <deque>
#include <list> #include <memory>
#include <set> #include <set>
#include <sstream> #include <sstream>
#include <vector> #include <vector>
#include "aggregate.h"
#include "root.h"
#include "gen/dibuilder.h"
#include "ir/iraggr.h"
#include "ir/irvar.h"
#include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringMap.h"
#include "llvm/ProfileData/InstrProfReader.h" #include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/IR/CallSite.h" #include "llvm/IR/CallSite.h"
@ -35,7 +35,7 @@ class TargetMachine;
class IndexedInstrProfReader; class IndexedInstrProfReader;
} }
// global ir state for current module class FuncGenState;
struct IRState; struct IRState;
struct TargetABI; struct TargetABI;
@ -107,6 +107,7 @@ struct IRAsmBlock {
// represents the module // represents the module
struct IRState { struct IRState {
IRState(const char *name, llvm::LLVMContext &context); IRState(const char *name, llvm::LLVMContext &context);
~IRState();
llvm::Module module; llvm::Module module;
llvm::LLVMContext &context() const { return module.getContext(); } llvm::LLVMContext &context() const { return module.getContext(); }
@ -116,11 +117,12 @@ struct IRState {
LLStructType *mutexType; LLStructType *mutexType;
LLStructType *moduleRefType; LLStructType *moduleRefType;
// functions // Stack of currently codegen'd functions (more than one for lambdas or other
typedef std::vector<IrFunction *> FunctionVector; // nested functions, inlining-only codegen'ing, etc.), and some convenience
FunctionVector functions; // accessors for the top-most one.
std::vector<std::unique_ptr<FuncGenState>> funcGenStates;
FuncGenState &funcGen();
IrFunction *func(); IrFunction *func();
llvm::Function *topfunc(); llvm::Function *topfunc();
llvm::Instruction *topallocapoint(); llvm::Instruction *topallocapoint();
@ -198,4 +200,6 @@ struct IRState {
void Statement_toIR(Statement *s, IRState *irs); void Statement_toIR(Statement *s, IRState *irs);
bool useMSVCEH();
#endif // LDC_GEN_IRSTATE_H #endif // LDC_GEN_IRSTATE_H

View file

@ -15,6 +15,7 @@
#include "gen/classes.h" #include "gen/classes.h"
#include "gen/complex.h" #include "gen/complex.h"
#include "gen/dvalue.h" #include "gen/dvalue.h"
#include "gen/funcgenstate.h"
#include "gen/functions.h" #include "gen/functions.h"
#include "gen/irstate.h" #include "gen/irstate.h"
#include "gen/llvm.h" #include "gen/llvm.h"
@ -262,7 +263,7 @@ void DtoAssert(Module *M, Loc &loc, DValue *msg) {
args.push_back(DtoConstUint(loc.linnum)); args.push_back(DtoConstUint(loc.linnum));
// call // call
gIR->func()->scopes->callOrInvoke(fn, args); gIR->funcGen().scopes.callOrInvoke(fn, args);
// after assert is always unreachable // after assert is always unreachable
gIR->ir->CreateUnreachable(); gIR->ir->CreateUnreachable();
@ -290,7 +291,7 @@ void DtoGoto(Loc &loc, LabelDsymbol *target) {
fatal(); fatal();
} }
gIR->func()->scopes->jumpToLabel(loc, target->ident); gIR->funcGen().scopes.jumpToLabel(loc, target->ident);
} }
/****************************************************************************** /******************************************************************************

View file

@ -12,6 +12,7 @@
#include "statement.h" #include "statement.h"
#include "template.h" #include "template.h"
#include "gen/dvalue.h" #include "gen/dvalue.h"
#include "gen/funcgenstate.h"
#include "gen/irstate.h" #include "gen/irstate.h"
#include "gen/llvm.h" #include "gen/llvm.h"
#include "gen/llvmhelpers.h" #include "gen/llvmhelpers.h"
@ -138,7 +139,7 @@ void DtoDefineNakedFunction(FuncDeclaration *fd) {
IF_LOG Logger::println("DtoDefineNakedFunction(%s)", mangleExact(fd)); IF_LOG Logger::println("DtoDefineNakedFunction(%s)", mangleExact(fd));
LOG_SCOPE; LOG_SCOPE;
gIR->functions.push_back(getIrFunc(fd)); gIR->funcGenStates.emplace_back(new FuncGenState(*getIrFunc(fd), *gIR));
// we need to do special processing on the body, since we only want // we need to do special processing on the body, since we only want
// to allow actual inline asm blocks to reach the final asm output // to allow actual inline asm blocks to reach the final asm output
@ -233,7 +234,7 @@ void DtoDefineNakedFunction(FuncDeclaration *fd) {
gIR->module.appendModuleInlineAsm(asmstr.str()); gIR->module.appendModuleInlineAsm(asmstr.str());
asmstr.str(""); asmstr.str("");
gIR->functions.pop_back(); gIR->funcGenStates.pop_back();
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View file

@ -10,6 +10,7 @@
#include "target.h" #include "target.h"
#include "gen/nested.h" #include "gen/nested.h"
#include "gen/dvalue.h" #include "gen/dvalue.h"
#include "gen/funcgenstate.h"
#include "gen/functions.h" #include "gen/functions.h"
#include "gen/irstate.h" #include "gen/irstate.h"
#include "gen/llvmhelpers.h" #include "gen/llvmhelpers.h"
@ -65,11 +66,11 @@ DValue *DtoNestedVariable(Loc &loc, Type *astype, VarDeclaration *vd,
// get the nested context // get the nested context
LLValue *ctx = nullptr; LLValue *ctx = nullptr;
if (irfunc->nestedVar) { auto currentCtx = gIR->funcGen().nestedVar;
if (currentCtx) {
Logger::println("Using own nested context of current function"); Logger::println("Using own nested context of current function");
ctx = currentCtx;
ctx = irfunc->nestedVar; dwarfValue = currentCtx;
dwarfValue = ctx;
} else if (irfunc->decl->isMember2()) { } else if (irfunc->decl->isMember2()) {
Logger::println( Logger::println(
"Current function is member of nested class, loading vthis"); "Current function is member of nested class, loading vthis");
@ -213,23 +214,24 @@ LLValue *DtoNestedContext(Loc &loc, Dsymbol *sym) {
// The function we are currently in, and the constructed object/called // The function we are currently in, and the constructed object/called
// function might inherit a context pointer from. // function might inherit a context pointer from.
IrFunction *irfunc = gIR->func(); auto &funcGen = gIR->funcGen();
auto &irFunc = funcGen.irFunc;
bool fromParent = true; bool fromParent = true;
LLValue *val; LLValue *val;
if (irfunc->nestedVar) { if (funcGen.nestedVar) {
// if this func has its own vars that are accessed by nested funcs // if this func has its own vars that are accessed by nested funcs
// use its own context // use its own context
val = irfunc->nestedVar; val = funcGen.nestedVar;
fromParent = false; fromParent = false;
} else if (irfunc->nestArg) { } else if (irFunc.nestArg) {
// otherwise, it may have gotten a context from the caller // otherwise, it may have gotten a context from the caller
val = DtoLoad(irfunc->nestArg); val = DtoLoad(irFunc.nestArg);
} else if (irfunc->thisArg) { } else if (irFunc.thisArg) {
// or just have a this argument // or just have a this argument
AggregateDeclaration *ad = irfunc->decl->isMember2(); AggregateDeclaration *ad = irFunc.decl->isMember2();
val = ad->isClassDeclaration() ? DtoLoad(irfunc->thisArg) : irfunc->thisArg; val = ad->isClassDeclaration() ? DtoLoad(irFunc.thisArg) : irFunc.thisArg;
if (!ad->vthis) { if (!ad->vthis) {
// This is just a plain 'outer' reference of a class nested in a // This is just a plain 'outer' reference of a class nested in a
// function (but without any variables in the nested context). // function (but without any variables in the nested context).
@ -244,7 +246,7 @@ LLValue *DtoNestedContext(Loc &loc, Dsymbol *sym) {
// tries to call a nested function from the parent scope). // tries to call a nested function from the parent scope).
error(loc, error(loc,
"function %s is a nested function and cannot be accessed from %s", "function %s is a nested function and cannot be accessed from %s",
sym->toPrettyChars(), irfunc->decl->toPrettyChars()); sym->toPrettyChars(), irFunc.decl->toPrettyChars());
fatal(); fatal();
} }
return llvm::ConstantPointerNull::get(getVoidPtrType()); return llvm::ConstantPointerNull::get(getVoidPtrType());
@ -263,7 +265,7 @@ LLValue *DtoNestedContext(Loc &loc, Dsymbol *sym) {
if (frameToPass) { if (frameToPass) {
IF_LOG Logger::println("Parent frame is from %s", frameToPass->toChars()); IF_LOG Logger::println("Parent frame is from %s", frameToPass->toChars());
FuncDeclaration *ctxfd = irfunc->decl; FuncDeclaration *ctxfd = irFunc.decl;
IF_LOG Logger::println("Current function is %s", ctxfd->toChars()); IF_LOG Logger::println("Current function is %s", ctxfd->toChars());
if (fromParent) { if (fromParent) {
ctxfd = getParentFunc(ctxfd, true); ctxfd = getParentFunc(ctxfd, true);
@ -408,7 +410,8 @@ static void DtoCreateNestedContextType(FuncDeclaration *fd) {
irFunc.frameTypeAlignment = builder.overallAlignment(); irFunc.frameTypeAlignment = builder.overallAlignment();
} }
void DtoCreateNestedContext(FuncDeclaration *fd) { void DtoCreateNestedContext(FuncGenState &funcGen) {
const auto fd = funcGen.irFunc.decl;
IF_LOG Logger::println("DtoCreateNestedContext for %s", fd->toPrettyChars()); IF_LOG Logger::println("DtoCreateNestedContext for %s", fd->toPrettyChars());
LOG_SCOPE LOG_SCOPE
@ -416,9 +419,9 @@ void DtoCreateNestedContext(FuncDeclaration *fd) {
// construct nested variables array // construct nested variables array
if (fd->closureVars.dim > 0) { if (fd->closureVars.dim > 0) {
IrFunction *irfunction = getIrFunc(fd); auto &irFunc = funcGen.irFunc;
unsigned depth = irfunction->depth; unsigned depth = irFunc.depth;
LLStructType *frameType = irfunction->frameType; LLStructType *frameType = irFunc.frameType;
// Create frame for current function and append to frames list // Create frame for current function and append to frames list
LLValue *frame = nullptr; LLValue *frame = nullptr;
bool needsClosure = fd->needsClosure(); bool needsClosure = fd->needsClosure();
@ -427,17 +430,17 @@ void DtoCreateNestedContext(FuncDeclaration *fd) {
frame = DtoGcMalloc(fd->loc, frameType, ".frame"); frame = DtoGcMalloc(fd->loc, frameType, ".frame");
} else { } else {
unsigned alignment = unsigned alignment =
std::max(getABITypeAlign(frameType), irfunction->frameTypeAlignment); std::max(getABITypeAlign(frameType), irFunc.frameTypeAlignment);
frame = DtoRawAlloca(frameType, alignment, ".frame"); frame = DtoRawAlloca(frameType, alignment, ".frame");
} }
// copy parent frames into beginning // copy parent frames into beginning
if (depth != 0) { if (depth != 0) {
LLValue *src = irfunction->nestArg; LLValue *src = irFunc.nestArg;
if (!src) { if (!src) {
assert(irfunction->thisArg); assert(irFunc.thisArg);
assert(fd->isMember2()); assert(fd->isMember2());
LLValue *thisval = DtoLoad(irfunction->thisArg); LLValue *thisval = DtoLoad(irFunc.thisArg);
AggregateDeclaration *cd = fd->isMember2(); AggregateDeclaration *cd = fd->isMember2();
assert(cd); assert(cd);
assert(cd->vthis); assert(cd->vthis);
@ -463,8 +466,7 @@ void DtoCreateNestedContext(FuncDeclaration *fd) {
DtoAlignedStore(src, gep); DtoAlignedStore(src, gep);
} }
// store context in IrFunction funcGen.nestedVar = frame;
irfunction->nestedVar = frame;
// go through all nested vars and assign addresses where possible. // go through all nested vars and assign addresses where possible.
for (auto vd : fd->closureVars) { for (auto vd : fd->closureVars) {

View file

@ -24,8 +24,11 @@
// Nested variable and context helpers // Nested variable and context helpers
/////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////
/// Creates the context value for a nested function. class FuncGenState;
void DtoCreateNestedContext(FuncDeclaration *fd);
/// Creates the nested struct alloca for the current function (if there are any
/// nested references to its variables).
void DtoCreateNestedContext(FuncGenState &funcGen);
/// Resolves the nested context for classes and structs with arbitrary nesting. /// Resolves the nested context for classes and structs with arbitrary nesting.
void DtoResolveNestedContext(Loc &loc, AggregateDeclaration *decl, void DtoResolveNestedContext(Loc &loc, AggregateDeclaration *decl,

View file

@ -17,6 +17,7 @@
#include "gen/classes.h" #include "gen/classes.h"
#include "gen/coverage.h" #include "gen/coverage.h"
#include "gen/dvalue.h" #include "gen/dvalue.h"
#include "gen/funcgenstate.h"
#include "gen/irstate.h" #include "gen/irstate.h"
#include "gen/llvm.h" #include "gen/llvm.h"
#include "gen/llvmhelpers.h" #include "gen/llvmhelpers.h"
@ -102,7 +103,7 @@ public:
stmt->loc.toChars()); stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
for (auto s : *stmt->statements) { for (auto s : *stmt->statements) {
@ -118,7 +119,7 @@ public:
IF_LOG Logger::println("ReturnStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("ReturnStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// emit dwarf stop point // emit dwarf stop point
@ -129,7 +130,8 @@ public:
// The LLVM value to return, or null for void returns. // The LLVM value to return, or null for void returns.
LLValue *returnValue = nullptr; LLValue *returnValue = nullptr;
IrFunction *const f = irs->func(); auto &funcGen = irs->funcGen();
IrFunction *const f = &funcGen.irFunc;
FuncDeclaration *const fd = f->decl; FuncDeclaration *const fd = f->decl;
LLFunction *const llFunc = f->func; LLFunction *const llFunc = f->func;
@ -147,15 +149,15 @@ public:
DLValue returnValue(f->type->next, sretPointer); DLValue returnValue(f->type->next, sretPointer);
// try to construct the return value in-place // try to construct the return value in-place
const auto initialCleanupScope = f->scopes->currentCleanupScope(); const auto initialCleanupScope = funcGen.scopes.currentCleanupScope();
const bool constructed = toInPlaceConstruction(&returnValue, stmt->exp); const bool constructed = toInPlaceConstruction(&returnValue, stmt->exp);
if (constructed) { if (constructed) {
// cleanup manually (otherwise done by toElemDtor()) // cleanup manually (otherwise done by toElemDtor())
if (f->scopes->currentCleanupScope() != initialCleanupScope) { if (funcGen.scopes.currentCleanupScope() != initialCleanupScope) {
auto endbb = llvm::BasicBlock::Create( auto endbb = llvm::BasicBlock::Create(
irs->context(), "inPlaceSretConstruct.success", llFunc); irs->context(), "inPlaceSretConstruct.success", llFunc);
f->scopes->runCleanups(initialCleanupScope, endbb); funcGen.scopes.runCleanups(initialCleanupScope, endbb);
f->scopes->popCleanups(initialCleanupScope); funcGen.scopes.popCleanups(initialCleanupScope);
irs->scope() = IRScope(endbb); irs->scope() = IRScope(endbb);
} }
} else { } else {
@ -177,9 +179,8 @@ public:
callPostblit(stmt->loc, stmt->exp, sretPointer); callPostblit(stmt->loc, stmt->exp, sretPointer);
} }
} }
} } else {
// the return type is not void, so this is a normal "register" return // the return type is not void, so this is a normal "register" return
else {
if (!stmt->exp && (llFunc == irs->mainFunc)) { if (!stmt->exp && (llFunc == irs->mainFunc)) {
returnValue = returnValue =
LLConstant::getNullValue(irs->mainFunc->getReturnType()); LLConstant::getNullValue(irs->mainFunc->getReturnType());
@ -241,14 +242,14 @@ public:
// just directly emit the return instruction. If there are cleanups to run // just directly emit the return instruction. If there are cleanups to run
// first, we need to store the return value to a stack slot, in which case // first, we need to store the return value to a stack slot, in which case
// we can use a shared return bb for all these cases. // we can use a shared return bb for all these cases.
const bool useRetValSlot = f->scopes->currentCleanupScope() != 0; const bool useRetValSlot = funcGen.scopes.currentCleanupScope() != 0;
const bool sharedRetBlockExists = !!f->retBlock; const bool sharedRetBlockExists = !!funcGen.retBlock;
if (useRetValSlot) { if (useRetValSlot) {
if (!sharedRetBlockExists) { if (!sharedRetBlockExists) {
f->retBlock = funcGen.retBlock =
llvm::BasicBlock::Create(irs->context(), "return", llFunc); llvm::BasicBlock::Create(irs->context(), "return", llFunc);
if (returnValue) { if (returnValue) {
f->retValSlot = funcGen.retValSlot =
DtoRawAlloca(returnValue->getType(), 0, "return.slot"); DtoRawAlloca(returnValue->getType(), 0, "return.slot");
} }
} }
@ -256,13 +257,13 @@ public:
// Create the store to the slot at the end of our current basic // Create the store to the slot at the end of our current basic
// block, before we run the cleanups. // block, before we run the cleanups.
if (returnValue) { if (returnValue) {
irs->ir->CreateStore(returnValue, f->retValSlot); irs->ir->CreateStore(returnValue, funcGen.retValSlot);
} }
// Now run the cleanups. // Now run the cleanups.
f->scopes->runAllCleanups(f->retBlock); funcGen.scopes.runAllCleanups(funcGen.retBlock);
irs->scope() = IRScope(f->retBlock); irs->scope() = IRScope(funcGen.retBlock);
} }
// If we need to emit the actual return instruction, do so. // If we need to emit the actual return instruction, do so.
@ -275,7 +276,7 @@ public:
irs->DBuilder.EmitStopPoint(fd->endloc); irs->DBuilder.EmitStopPoint(fd->endloc);
} }
irs->ir->CreateRet(useRetValSlot ? DtoLoad(f->retValSlot) irs->ir->CreateRet(useRetValSlot ? DtoLoad(funcGen.retValSlot)
: returnValue); : returnValue);
} else { } else {
irs->ir->CreateRetVoid(); irs->ir->CreateRetVoid();
@ -295,7 +296,7 @@ public:
IF_LOG Logger::println("ExpStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("ExpStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// emit dwarf stop point // emit dwarf stop point
@ -323,7 +324,7 @@ public:
IF_LOG Logger::println("IfStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("IfStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
auto truecount = PGO.getRegionCount(stmt); auto truecount = PGO.getRegionCount(stmt);
auto elsecount = PGO.getCurrentRegionCount() - truecount; auto elsecount = PGO.getCurrentRegionCount() - truecount;
@ -395,7 +396,7 @@ public:
IF_LOG Logger::println("ScopeStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("ScopeStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
if (stmt->statement) { if (stmt->statement) {
@ -411,7 +412,7 @@ public:
IF_LOG Logger::println("WhileStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("WhileStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// start a dwarf lexical block // start a dwarf lexical block
@ -452,12 +453,12 @@ public:
irs->scope() = IRScope(whilebodybb); irs->scope() = IRScope(whilebodybb);
// while body code // while body code
irs->func()->scopes->pushLoopTarget(stmt, whilebb, endbb); irs->funcGen().scopes.pushLoopTarget(stmt, whilebb, endbb);
PGO.emitCounterIncrement(stmt); PGO.emitCounterIncrement(stmt);
if (stmt->_body) { if (stmt->_body) {
stmt->_body->accept(this); stmt->_body->accept(this);
} }
irs->func()->scopes->popLoopTarget(); irs->funcGen().scopes.popLoopTarget();
// loop // loop
if (!irs->scopereturned()) { if (!irs->scopereturned()) {
@ -477,7 +478,7 @@ public:
IF_LOG Logger::println("DoStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("DoStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
auto entryCount = PGO.setCurrentStmt(stmt); auto entryCount = PGO.setCurrentStmt(stmt);
// start a dwarf lexical block // start a dwarf lexical block
@ -499,12 +500,12 @@ public:
irs->scope() = IRScope(dowhilebb); irs->scope() = IRScope(dowhilebb);
// do-while body code // do-while body code
irs->func()->scopes->pushLoopTarget(stmt, condbb, endbb); irs->funcGen().scopes.pushLoopTarget(stmt, condbb, endbb);
PGO.emitCounterIncrement(stmt); PGO.emitCounterIncrement(stmt);
if (stmt->_body) { if (stmt->_body) {
stmt->_body->accept(this); stmt->_body->accept(this);
} }
irs->func()->scopes->popLoopTarget(); irs->funcGen().scopes.popLoopTarget();
// branch to condition block // branch to condition block
llvm::BranchInst::Create(condbb, irs->scopebb()); llvm::BranchInst::Create(condbb, irs->scopebb());
@ -546,7 +547,7 @@ public:
IF_LOG Logger::println("ForStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("ForStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// start new dwarf lexical block // start new dwarf lexical block
@ -578,7 +579,7 @@ public:
while (ScopeStatement *scope = scopeStart->isScopeStatement()) { while (ScopeStatement *scope = scopeStart->isScopeStatement()) {
scopeStart = scope->statement; scopeStart = scope->statement;
} }
irs->func()->scopes->pushLoopTarget(scopeStart, forincbb, endbb); irs->funcGen().scopes.pushLoopTarget(scopeStart, forincbb, endbb);
// replace current scope // replace current scope
irs->scope() = IRScope(forbb); irs->scope() = IRScope(forbb);
@ -634,7 +635,7 @@ public:
llvm::BranchInst::Create(forbb, irs->scopebb()); llvm::BranchInst::Create(forbb, irs->scopebb());
} }
irs->func()->scopes->popLoopTarget(); irs->funcGen().scopes.popLoopTarget();
// rewrite the scope // rewrite the scope
irs->scope() = IRScope(endbb); irs->scope() = IRScope(endbb);
@ -649,7 +650,7 @@ public:
IF_LOG Logger::println("BreakStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("BreakStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// don't emit two terminators in a row // don't emit two terminators in a row
@ -674,9 +675,9 @@ public:
targetStatement = tmp->statement; targetStatement = tmp->statement;
} }
irs->func()->scopes->breakToStatement(targetStatement); irs->funcGen().scopes.breakToStatement(targetStatement);
} else { } else {
irs->func()->scopes->breakToClosest(); irs->funcGen().scopes.breakToClosest();
} }
// the break terminated this basicblock, start a new one // the break terminated this basicblock, start a new one
@ -692,7 +693,7 @@ public:
stmt->loc.toChars()); stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// emit dwarf stop point // emit dwarf stop point
@ -710,9 +711,9 @@ public:
targetLoopStatement = tmp->statement; targetLoopStatement = tmp->statement;
} }
irs->func()->scopes->continueWithLoop(targetLoopStatement); irs->funcGen().scopes.continueWithLoop(targetLoopStatement);
} else { } else {
irs->func()->scopes->continueWithClosest(); irs->funcGen().scopes.continueWithClosest();
} }
// the break terminated this basicblock, start a new one // the break terminated this basicblock, start a new one
@ -736,7 +737,7 @@ public:
stmt->loc.toChars()); stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
/*auto entryCount = */ PGO.setCurrentStmt(stmt); /*auto entryCount = */ PGO.setCurrentStmt(stmt);
// emit dwarf stop point // emit dwarf stop point
@ -770,8 +771,8 @@ public:
stmt->finalbody->accept(this); stmt->finalbody->accept(this);
irs->DBuilder.EmitBlockEnd(); irs->DBuilder.EmitBlockEnd();
CleanupCursor cleanupBefore = irs->func()->scopes->currentCleanupScope(); CleanupCursor cleanupBefore = irs->funcGen().scopes.currentCleanupScope();
irs->func()->scopes->pushCleanup(finallybb, irs->scopebb()); irs->funcGen().scopes.pushCleanup(finallybb, irs->scopebb());
// Emit the try block. // Emit the try block.
irs->scope() = IRScope(trybb); irs->scope() = IRScope(trybb);
@ -786,12 +787,12 @@ public:
if (!irs->scopereturned()) { if (!irs->scopereturned()) {
llvm::BasicBlock *successbb = llvm::BasicBlock::Create( llvm::BasicBlock *successbb = llvm::BasicBlock::Create(
irs->context(), "try.success", irs->topfunc()); irs->context(), "try.success", irs->topfunc());
irs->func()->scopes->runCleanups(cleanupBefore, successbb); irs->funcGen().scopes.runCleanups(cleanupBefore, successbb);
irs->scope() = IRScope(successbb); irs->scope() = IRScope(successbb);
// PGO counter tracks the continuation of the try-finally statement // PGO counter tracks the continuation of the try-finally statement
PGO.emitCounterIncrement(stmt); PGO.emitCounterIncrement(stmt);
} }
irs->func()->scopes->popCleanups(cleanupBefore); irs->funcGen().scopes.popCleanups(cleanupBefore);
} }
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
@ -883,7 +884,7 @@ public:
stmt->loc.toChars()); stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
auto entryCount = PGO.setCurrentStmt(stmt); auto entryCount = PGO.setCurrentStmt(stmt);
// Emit dwarf stop point // Emit dwarf stop point
@ -911,14 +912,14 @@ public:
#if LDC_LLVM_VER >= 308 #if LDC_LLVM_VER >= 308
if (useMSVCEH()) { if (useMSVCEH()) {
ScopeStack *scopes = irs->func()->scopes; auto &scopes = irs->funcGen().scopes;
auto catchSwitchBlock = llvm::BasicBlock::Create( auto catchSwitchBlock = llvm::BasicBlock::Create(
irs->context(), "catch.dispatch", irs->topfunc()); irs->context(), "catch.dispatch", irs->topfunc());
llvm::BasicBlock *unwindto = llvm::BasicBlock *unwindto =
scopes->currentCleanupScope() > 0 || scopes->currentCatchScope() > 0 scopes.currentCleanupScope() > 0 || scopes.currentCatchScope() > 0
? scopes->getLandingPad() ? scopes.getLandingPad()
: nullptr; : nullptr;
auto funclet = scopes->getFunclet(); auto funclet = scopes.getFunclet();
auto catchSwitchInst = llvm::CatchSwitchInst::Create( auto catchSwitchInst = llvm::CatchSwitchInst::Create(
funclet ? funclet : llvm::ConstantTokenNone::get(irs->context()), funclet ? funclet : llvm::ConstantTokenNone::get(irs->context()),
unwindto, stmt->catches->dim, "", catchSwitchBlock); unwindto, stmt->catches->dim, "", catchSwitchBlock);
@ -954,7 +955,7 @@ public:
CatchBlock cb = {nullptr, catchSwitchBlock, catchCount}; CatchBlock cb = {nullptr, catchSwitchBlock, catchCount};
catchBlocks.push_back(cb); // just for cleanup catchBlocks.push_back(cb); // just for cleanup
scopes->pushCatch(nullptr, catchSwitchBlock); scopes.pushCatch(nullptr, catchSwitchBlock);
// if no landing pad is created, the catch blocks are unused, but // if no landing pad is created, the catch blocks are unused, but
// the verifier complains if there are catchpads without personality // the verifier complains if there are catchpads without personality
@ -981,7 +982,7 @@ public:
const auto enterCatchFn = const auto enterCatchFn =
getRuntimeFunction(Loc(), irs->module, "_d_eh_enter_catch"); getRuntimeFunction(Loc(), irs->module, "_d_eh_enter_catch");
auto ptr = DtoLoad(irs->func()->getOrCreateEhPtrSlot()); auto ptr = DtoLoad(irs->funcGen().getOrCreateEhPtrSlot());
auto throwableObj = irs->ir->CreateCall(enterCatchFn, ptr); auto throwableObj = irs->ir->CreateCall(enterCatchFn, ptr);
// For catches that use the Throwable object, create storage for it. // For catches that use the Throwable object, create storage for it.
@ -1038,7 +1039,7 @@ public:
DtoResolveClass(cb.classdecl); DtoResolveClass(cb.classdecl);
irs->func()->scopes->pushCatch( irs->funcGen().scopes.pushCatch(
getIrAggr(cb.classdecl)->getClassInfoSymbol(), cb.BB, matchWeights); getIrAggr(cb.classdecl)->getClassInfoSymbol(), cb.BB, matchWeights);
} }
} }
@ -1058,7 +1059,7 @@ public:
// Now that we have done the try block, remove the catches and continue // Now that we have done the try block, remove the catches and continue
// codegen in the end block the try and all the catches branch to. // codegen in the end block the try and all the catches branch to.
for (size_t i = 0; i < catchBlocks.size(); ++i) { for (size_t i = 0; i < catchBlocks.size(); ++i) {
irs->func()->scopes->popCatch(); irs->funcGen().scopes.popCatch();
} }
// Move end block after all generated blocks // Move end block after all generated blocks
@ -1075,7 +1076,7 @@ public:
IF_LOG Logger::println("ThrowStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("ThrowStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// emit dwarf stop point // emit dwarf stop point
@ -1106,7 +1107,7 @@ public:
IF_LOG Logger::println("SwitchStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("SwitchStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
const auto incomingPGORegionCount = PGO.getCurrentRegionCount(); const auto incomingPGORegionCount = PGO.getCurrentRegionCount();
@ -1189,20 +1190,20 @@ public:
} }
// default // default
llvm::BasicBlock *defbb = nullptr; auto defaultTargetBB = endbb;
if (stmt->sdefault) { if (stmt->sdefault) {
Logger::println("has default"); Logger::println("has default");
defbb = defaultTargetBB =
llvm::BasicBlock::Create(irs->context(), "default", irs->topfunc()); llvm::BasicBlock::Create(irs->context(), "default", irs->topfunc());
stmt->sdefault->bodyBB = defbb; stmt->sdefault->bodyBB = defaultTargetBB;
} }
// do switch body // do switch body
assert(stmt->_body); assert(stmt->_body);
irs->scope() = IRScope(bodybb); irs->scope() = IRScope(bodybb);
irs->func()->scopes->pushBreakTarget(stmt, endbb); irs->funcGen().scopes.pushBreakTarget(stmt, endbb);
stmt->_body->accept(this); stmt->_body->accept(this);
irs->func()->scopes->popBreakTarget(); irs->funcGen().scopes.popBreakTarget();
if (!irs->scopereturned()) { if (!irs->scopereturned()) {
llvm::BranchInst::Create(endbb, irs->scopebb()); llvm::BranchInst::Create(endbb, irs->scopebb());
} }
@ -1211,7 +1212,6 @@ public:
if (useSwitchInst) { if (useSwitchInst) {
// The case index value. // The case index value.
LLValue *condVal; LLValue *condVal;
if (isStringSwitch) { if (isStringSwitch) {
condVal = call_string_switch_runtime(stringTableSlice, stmt->condition); condVal = call_string_switch_runtime(stringTableSlice, stmt->condition);
} else { } else {
@ -1224,7 +1224,7 @@ public:
// directly from the switch statement and not "goto default", etc. // directly from the switch statement and not "goto default", etc.
llvm::SwitchInst *si; llvm::SwitchInst *si;
if (!global.params.genInstrProf) { if (!global.params.genInstrProf) {
si = llvm::SwitchInst::Create(condVal, defbb ? defbb : endbb, caseCount, si = llvm::SwitchInst::Create(condVal, defaultTargetBB, caseCount,
irs->scopebb()); irs->scopebb());
for (size_t i = 0; i < caseCount; ++i) { for (size_t i = 0; i < caseCount; ++i) {
si->addCase(isaConstantInt(indices[i]), (*cases)[i]->bodyBB); si->addCase(isaConstantInt(indices[i]), (*cases)[i]->bodyBB);
@ -1238,8 +1238,8 @@ public:
irs->context(), "defaultcntr", irs->topfunc()); irs->context(), "defaultcntr", irs->topfunc());
irs->scope() = IRScope(defaultcntr); irs->scope() = IRScope(defaultcntr);
PGO.emitCounterIncrement(stmt->sdefault); PGO.emitCounterIncrement(stmt->sdefault);
llvm::BranchInst::Create(defbb ? defbb : endbb, irs->scopebb()); llvm::BranchInst::Create(defaultTargetBB, irs->scopebb());
defaultcntr->moveBefore(defbb ? defbb : endbb); defaultcntr->moveBefore(defaultTargetBB);
// Create switch // Create switch
si = llvm::SwitchInst::Create(condVal, defaultcntr, caseCount, si = llvm::SwitchInst::Create(condVal, defaultcntr, caseCount,
switchbb); switchbb);
@ -1287,16 +1287,15 @@ public:
llvm::BasicBlock::Create(irs->context(), "checkcase", irs->topfunc()); llvm::BasicBlock::Create(irs->context(), "checkcase", irs->topfunc());
llvm::BranchInst::Create(nextbb, irs->scopebb()); llvm::BranchInst::Create(nextbb, irs->scopebb());
auto defaultjumptarget = defbb ? defbb : endbb;
// Create "default:" counter for profiling
if (global.params.genInstrProf) { if (global.params.genInstrProf) {
// Prepend extra BB to "default:" to increment profiling counter.
llvm::BasicBlock *defaultcntr = llvm::BasicBlock::Create( llvm::BasicBlock *defaultcntr = llvm::BasicBlock::Create(
irs->context(), "defaultcntr", irs->topfunc()); irs->context(), "defaultcntr", irs->topfunc());
irs->scope() = IRScope(defaultcntr); irs->scope() = IRScope(defaultcntr);
PGO.emitCounterIncrement(stmt->sdefault); PGO.emitCounterIncrement(stmt->sdefault);
llvm::BranchInst::Create(defbb ? defbb : endbb, irs->scopebb()); llvm::BranchInst::Create(defaultTargetBB, irs->scopebb());
defaultcntr->moveBefore(defbb ? defbb : endbb); defaultcntr->moveBefore(defaultTargetBB);
defaultjumptarget = defaultcntr; defaultTargetBB = defaultcntr;
} }
irs->scope() = IRScope(nextbb); irs->scope() = IRScope(nextbb);
@ -1341,22 +1340,12 @@ public:
irs->scope() = IRScope(nextbb); irs->scope() = IRScope(nextbb);
} }
llvm::BranchInst::Create(defaultjumptarget, irs->scopebb()); llvm::BranchInst::Create(defaultTargetBB, irs->scopebb());
endbb->moveAfter(nextbb); endbb->moveAfter(nextbb);
} }
irs->scope() = IRScope(endbb); irs->scope() = IRScope(endbb);
// Reset backend variables to original state (to allow multiple codegen
// passes of same ast nodes)
// TODO: move the codegen state variables out of the AST.
for (CaseStatement *cs : *stmt->cases) {
cs->bodyBB = nullptr;
}
if (stmt->sdefault) {
stmt->sdefault->bodyBB = nullptr;
}
} }
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
@ -1365,7 +1354,7 @@ public:
IF_LOG Logger::println("CaseStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("CaseStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
llvm::BasicBlock *nbb = llvm::BasicBlock *nbb =
@ -1397,7 +1386,7 @@ public:
IF_LOG Logger::println("DefaultStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("DefaultStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
assert(stmt->bodyBB); assert(stmt->bodyBB);
@ -1433,7 +1422,7 @@ public:
stmt->loc.toChars()); stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// if no statements, there's nothing to do // if no statements, there's nothing to do
@ -1481,13 +1470,13 @@ public:
// push loop scope // push loop scope
// continue goes to next statement, break goes to end // continue goes to next statement, break goes to end
irs->func()->scopes->pushLoopTarget(stmt, nextbb, endbb); irs->funcGen().scopes.pushLoopTarget(stmt, nextbb, endbb);
// do statement // do statement
s->accept(this); s->accept(this);
// pop loop scope // pop loop scope
irs->func()->scopes->popLoopTarget(); irs->funcGen().scopes.popLoopTarget();
// next stmt // next stmt
if (!irs->scopereturned()) { if (!irs->scopereturned()) {
@ -1511,7 +1500,7 @@ public:
IF_LOG Logger::println("ForeachStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("ForeachStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// start a dwarf lexical block // start a dwarf lexical block
@ -1621,11 +1610,11 @@ public:
} }
// emit body // emit body
irs->func()->scopes->pushLoopTarget(stmt, nextbb, endbb); irs->funcGen().scopes.pushLoopTarget(stmt, nextbb, endbb);
if (stmt->_body) { if (stmt->_body) {
stmt->_body->accept(this); stmt->_body->accept(this);
} }
irs->func()->scopes->popLoopTarget(); irs->funcGen().scopes.popLoopTarget();
if (!irs->scopereturned()) { if (!irs->scopereturned()) {
llvm::BranchInst::Create(nextbb, irs->scopebb()); llvm::BranchInst::Create(nextbb, irs->scopebb());
@ -1654,7 +1643,7 @@ public:
stmt->loc.toChars()); stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// start a dwarf lexical block // start a dwarf lexical block
@ -1727,11 +1716,11 @@ public:
} }
// emit body // emit body
irs->func()->scopes->pushLoopTarget(stmt, nextbb, endbb); irs->funcGen().scopes.pushLoopTarget(stmt, nextbb, endbb);
if (stmt->_body) { if (stmt->_body) {
stmt->_body->accept(this); stmt->_body->accept(this);
} }
irs->func()->scopes->popLoopTarget(); irs->funcGen().scopes.popLoopTarget();
// jump to next iteration // jump to next iteration
if (!irs->scopereturned()) { if (!irs->scopereturned()) {
@ -1765,7 +1754,7 @@ public:
IF_LOG Logger::println("LabelStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("LabelStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
// if it's an inline asm label, we don't create a basicblock, just emit it // if it's an inline asm label, we don't create a basicblock, just emit it
@ -1786,7 +1775,7 @@ public:
llvm::BasicBlock *labelBB = llvm::BasicBlock::Create( llvm::BasicBlock *labelBB = llvm::BasicBlock::Create(
irs->context(), llvm::Twine("label.") + stmt->ident->toChars(), irs->context(), llvm::Twine("label.") + stmt->ident->toChars(),
irs->topfunc()); irs->topfunc());
irs->func()->scopes->addLabelTarget(stmt->ident, labelBB); irs->funcGen().scopes.addLabelTarget(stmt->ident, labelBB);
if (!irs->scopereturned()) { if (!irs->scopereturned()) {
llvm::BranchInst::Create(labelBB, irs->scopebb()); llvm::BranchInst::Create(labelBB, irs->scopebb());
@ -1808,7 +1797,7 @@ public:
IF_LOG Logger::println("GotoStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("GotoStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
irs->DBuilder.EmitStopPoint(stmt->loc); irs->DBuilder.EmitStopPoint(stmt->loc);
@ -1830,7 +1819,7 @@ public:
stmt->loc.toChars()); stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
irs->DBuilder.EmitStopPoint(stmt->loc); irs->DBuilder.EmitStopPoint(stmt->loc);
@ -1860,7 +1849,7 @@ public:
stmt->loc.toChars()); stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
irs->DBuilder.EmitStopPoint(stmt->loc); irs->DBuilder.EmitStopPoint(stmt->loc);
@ -1892,7 +1881,7 @@ public:
IF_LOG Logger::println("WithStatement::toIR(): %s", stmt->loc.toChars()); IF_LOG Logger::println("WithStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
irs->DBuilder.EmitBlockStart(stmt->loc); irs->DBuilder.EmitBlockStart(stmt->loc);
@ -1922,7 +1911,7 @@ public:
stmt->loc.toChars()); stmt->loc.toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = irs->func()->pgo; auto &PGO = irs->funcGen().pgo;
PGO.setCurrentStmt(stmt); PGO.setCurrentStmt(stmt);
llvm::Function *fn = llvm::Function *fn =

View file

@ -15,6 +15,7 @@
#include "gen/abi.h" #include "gen/abi.h"
#include "gen/classes.h" #include "gen/classes.h"
#include "gen/dvalue.h" #include "gen/dvalue.h"
#include "gen/funcgenstate.h"
#include "gen/functions.h" #include "gen/functions.h"
#include "gen/irstate.h" #include "gen/irstate.h"
#include "gen/llvm.h" #include "gen/llvm.h"
@ -885,13 +886,13 @@ DValue *DtoCallFunction(Loc &loc, Type *resulttype, DValue *fnval,
} }
// call the function // call the function
LLCallSite call = gIR->func()->scopes->callOrInvoke(callable, args); LLCallSite call = gIR->funcGen().scopes.callOrInvoke(callable, args);
#if LDC_LLVM_VER >= 309 #if LDC_LLVM_VER >= 309
// PGO: Insert instrumentation or attach profile metadata at indirect call // PGO: Insert instrumentation or attach profile metadata at indirect call
// sites. // sites.
if (!call.getCalledFunction()) { if (!call.getCalledFunction()) {
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.emitIndirectCallPGO(call.getInstruction(), callable); PGO.emitIndirectCallPGO(call.getInstruction(), callable);
} }
#endif #endif

View file

@ -18,6 +18,7 @@
#include "gen/coverage.h" #include "gen/coverage.h"
#include "gen/dvalue.h" #include "gen/dvalue.h"
#include "gen/functions.h" #include "gen/functions.h"
#include "gen/funcgenstate.h"
#include "gen/inlineir.h" #include "gen/inlineir.h"
#include "gen/irstate.h" #include "gen/irstate.h"
#include "gen/llvm.h" #include "gen/llvm.h"
@ -186,7 +187,7 @@ void pushVarDtorCleanup(IRState *p, VarDeclaration *vd) {
IRScope oldScope = p->scope(); IRScope oldScope = p->scope();
p->scope() = IRScope(beginBB); p->scope() = IRScope(beginBB);
toElemDtor(vd->edtor); toElemDtor(vd->edtor);
p->func()->scopes->pushCleanup(beginBB, p->scopebb()); p->funcGen().scopes.pushCleanup(beginBB, p->scopebb());
p->scope() = oldScope; p->scope() = oldScope;
} }
} }
@ -218,12 +219,12 @@ class ToElemVisitor : public Visitor {
public: public:
ToElemVisitor(IRState *p_, bool destructTemporaries_) ToElemVisitor(IRState *p_, bool destructTemporaries_)
: p(p_), destructTemporaries(destructTemporaries_), result(nullptr) { : p(p_), destructTemporaries(destructTemporaries_), result(nullptr) {
initialCleanupScope = p->func()->scopes->currentCleanupScope(); initialCleanupScope = p->funcGen().scopes.currentCleanupScope();
} }
DValue *getResult() { DValue *getResult() {
if (destructTemporaries && if (destructTemporaries &&
p->func()->scopes->currentCleanupScope() != initialCleanupScope) { p->funcGen().scopes.currentCleanupScope() != initialCleanupScope) {
// We might share the CFG edges through the below cleanup blocks with // We might share the CFG edges through the below cleanup blocks with
// other paths (e.g. exception unwinding) where the result value has not // other paths (e.g. exception unwinding) where the result value has not
// been constructed. At runtime, the branches will be chosen such that the // been constructed. At runtime, the branches will be chosen such that the
@ -247,8 +248,8 @@ public:
llvm::BasicBlock *endbb = llvm::BasicBlock::Create( llvm::BasicBlock *endbb = llvm::BasicBlock::Create(
p->context(), "toElem.success", p->topfunc()); p->context(), "toElem.success", p->topfunc());
p->func()->scopes->runCleanups(initialCleanupScope, endbb); p->funcGen().scopes.runCleanups(initialCleanupScope, endbb);
p->func()->scopes->popCleanups(initialCleanupScope); p->funcGen().scopes.popCleanups(initialCleanupScope);
p->scope() = IRScope(endbb); p->scope() = IRScope(endbb);
destructTemporaries = false; destructTemporaries = false;
@ -269,7 +270,7 @@ public:
e->type ? e->type->toChars() : "(null)"); e->type ? e->type->toChars() : "(null)");
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
result = DtoDeclarationExp(e->declaration); result = DtoDeclarationExp(e->declaration);
@ -566,7 +567,7 @@ public:
\ \
errorOnIllegalArrayOp(e, e->e1, e->e2); \ errorOnIllegalArrayOp(e, e->e1, e->e2); \
\ \
auto &PGO = gIR->func()->pgo; \ auto &PGO = gIR->funcGen().pgo; \
PGO.setCurrentStmt(e); \ PGO.setCurrentStmt(e); \
\ \
result = Func(e->loc, e->type, toElem(e->e1), e->e2); \ result = Func(e->loc, e->type, toElem(e->e1), e->e2); \
@ -618,7 +619,7 @@ public:
\ \
errorOnIllegalArrayOp(e, e->e1, e->e2); \ errorOnIllegalArrayOp(e, e->e1, e->e2); \
\ \
auto &PGO = gIR->func()->pgo; \ auto &PGO = gIR->funcGen().pgo; \
PGO.setCurrentStmt(e); \ PGO.setCurrentStmt(e); \
\ \
result = binAssign<Func, useLValTypeForBinOp>(e); \ result = binAssign<Func, useLValTypeForBinOp>(e); \
@ -645,7 +646,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
// handle magic inline asm // handle magic inline asm
@ -746,7 +747,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
// get the value to cast // get the value to cast
@ -778,7 +779,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *base = DtoSymbolAddress(e->loc, e->var->type, e->var); DValue *base = DtoSymbolAddress(e->loc, e->var->type, e->var);
@ -827,7 +828,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
// The address of a StructLiteralExp can in fact be a global variable, check // The address of a StructLiteralExp can in fact be a global variable, check
@ -880,7 +881,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
// function pointers are special // function pointers are special
@ -908,7 +909,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *l = toElem(e->e1); DValue *l = toElem(e->e1);
@ -976,7 +977,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
// special cases: `this(int) { this(); }` and `this(int) { super(); }` // special cases: `this(int) { this(); }` and `this(int) { super(); }`
@ -1014,7 +1015,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *l = toElem(e->e1); DValue *l = toElem(e->e1);
@ -1056,7 +1057,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
// value being sliced // value being sliced
@ -1168,7 +1169,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *l = toElem(e->e1); DValue *l = toElem(e->e1);
@ -1299,7 +1300,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *l = toElem(e->e1); DValue *l = toElem(e->e1);
@ -1365,7 +1366,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
LLValue *const lval = DtoLVal(e->e1); LLValue *const lval = DtoLVal(e->e1);
@ -1414,7 +1415,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
bool isArgprefixHandled = false; bool isArgprefixHandled = false;
@ -1538,7 +1539,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *dval = toElem(e->e1); DValue *dval = toElem(e->e1);
@ -1597,7 +1598,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *u = toElem(e->e1); DValue *u = toElem(e->e1);
@ -1610,7 +1611,7 @@ public:
IF_LOG Logger::print("AssertExp::toElem: %s\n", e->toChars()); IF_LOG Logger::print("AssertExp::toElem: %s\n", e->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
if (!global.params.useAssert) if (!global.params.useAssert)
@ -1694,7 +1695,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *u = toElem(e->e1); DValue *u = toElem(e->e1);
@ -1714,7 +1715,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *u = toElem(e->e1); DValue *u = toElem(e->e1);
@ -1776,7 +1777,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
DValue *u = toElem(e->e1); DValue *u = toElem(e->e1);
@ -2005,7 +2006,7 @@ public:
e->type->toChars()); e->type->toChars());
LOG_SCOPE; LOG_SCOPE;
auto &PGO = gIR->func()->pgo; auto &PGO = gIR->funcGen().pgo;
PGO.setCurrentStmt(e); PGO.setCurrentStmt(e);
Type *dtype = e->type->toBasetype(); Type *dtype = e->type->toBasetype();
@ -2181,21 +2182,22 @@ public:
LLType *dgty = DtoType(e->type); LLType *dgty = DtoType(e->type);
LLValue *cval; LLValue *cval;
IrFunction *irfn = p->func(); auto &funcGen = p->funcGen();
if (irfn->nestedVar && fd->toParent2() == irfn->decl) { auto &irfn = funcGen.irFunc;
if (funcGen.nestedVar && fd->toParent2() == irfn.decl) {
// We check fd->toParent2() because a frame allocated in one // We check fd->toParent2() because a frame allocated in one
// function cannot be used for a delegate created in another // function cannot be used for a delegate created in another
// function. Happens with anonymous functions. // function. Happens with anonymous functions.
cval = irfn->nestedVar; cval = funcGen.nestedVar;
} else if (irfn->nestArg) { } else if (irfn.nestArg) {
cval = DtoLoad(irfn->nestArg); cval = DtoLoad(irfn.nestArg);
} else if (irfn->thisArg) { } else if (irfn.thisArg) {
AggregateDeclaration *ad = irfn->decl->isMember2(); AggregateDeclaration *ad = irfn.decl->isMember2();
if (!ad || !ad->vthis) { if (!ad || !ad->vthis) {
cval = getNullPtr(getVoidPtrType()); cval = getNullPtr(getVoidPtrType());
} else { } else {
cval = cval =
ad->isClassDeclaration() ? DtoLoad(irfn->thisArg) : irfn->thisArg; ad->isClassDeclaration() ? DtoLoad(irfn.thisArg) : irfn.thisArg;
cval = DtoLoad( cval = DtoLoad(
DtoGEPi(cval, 0, getFieldGEPIndex(ad, ad->vthis), ".vthis")); DtoGEPi(cval, 0, getFieldGEPIndex(ad, ad->vthis), ".vthis"));
} }

View file

@ -482,11 +482,7 @@ LLConstant *DtoConstString(const char *str) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
LLValue *DtoLoad(LLValue *src, const char *name) { LLValue *DtoLoad(LLValue *src, const char *name) {
// if (Logger::enabled()) return gIR->ir->CreateLoad(src, name);
// Logger::cout() << "loading " << *src << '\n';
llvm::LoadInst *ld = gIR->ir->CreateLoad(src, name);
// ld->setVolatile(gIR->func()->inVolatile);
return ld;
} }
// Like DtoLoad, but the pointer is guaranteed to be aligned appropriately for // Like DtoLoad, but the pointer is guaranteed to be aligned appropriately for

View file

@ -19,7 +19,7 @@
#include "hdrgen.h" // for parametersTypeToChars() #include "hdrgen.h" // for parametersTypeToChars()
#include "mtype.h" #include "mtype.h"
#include "target.h" #include "target.h"
#include "gen/funcgenstate.h"
#include "gen/irstate.h" #include "gen/irstate.h"
#include "gen/logger.h" #include "gen/logger.h"
#include "gen/tollvm.h" #include "gen/tollvm.h"
@ -351,8 +351,8 @@ llvm::GlobalVariable *IrAggr::getInterfaceVtbl(BaseClass *b, bool new_instance,
setLinkage(lwc, thunk); setLinkage(lwc, thunk);
thunk->copyAttributesFrom(irFunc->func); thunk->copyAttributesFrom(irFunc->func);
// Thunks themselves don't have an identity, only the target // Thunks themselves don't have an identity, only the target
// function has. // function has.
#if LDC_LLVM_VER >= 309 #if LDC_LLVM_VER >= 309
thunk->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); thunk->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
#else #else
@ -378,7 +378,7 @@ llvm::GlobalVariable *IrAggr::getInterfaceVtbl(BaseClass *b, bool new_instance,
auto thunkFunc = getIrFunc(thunkFd, true); // create the IrFunction auto thunkFunc = getIrFunc(thunkFd, true); // create the IrFunction
thunkFunc->func = thunk; thunkFunc->func = thunk;
thunkFunc->type = irFunc->type; thunkFunc->type = irFunc->type;
gIR->functions.push_back(thunkFunc); gIR->funcGenStates.emplace_back(new FuncGenState(*thunkFunc, *gIR));
// debug info // debug info
thunkFunc->diSubprogram = gIR->DBuilder.EmitThunk(thunk, thunkFd); thunkFunc->diSubprogram = gIR->DBuilder.EmitThunk(thunk, thunkFd);
@ -433,7 +433,7 @@ llvm::GlobalVariable *IrAggr::getInterfaceVtbl(BaseClass *b, bool new_instance,
// clean up // clean up
gIR->scopes.pop_back(); gIR->scopes.pop_back();
gIR->functions.pop_back(); gIR->funcGenStates.pop_back();
} }
constants.push_back(thunk); constants.push_back(thunk);

View file

@ -7,616 +7,13 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "ir/irfunction.h"
#include "gen/llvm.h" #include "gen/llvm.h"
#include "gen/llvmhelpers.h" #include "gen/llvmhelpers.h"
#include "gen/irstate.h" #include "gen/irstate.h"
#include "gen/runtime.h"
#include "gen/tollvm.h" #include "gen/tollvm.h"
#include "gen/ms-cxx-helper.h"
#include "ir/irdsymbol.h" #include "ir/irdsymbol.h"
#include "ir/irfunction.h"
#include <sstream>
JumpTarget::JumpTarget(llvm::BasicBlock *targetBlock,
CleanupCursor cleanupScope, Statement *targetStatement)
: targetBlock(targetBlock), cleanupScope(cleanupScope),
targetStatement(targetStatement) {}
GotoJump::GotoJump(Loc loc, llvm::BasicBlock *sourceBlock,
llvm::BasicBlock *tentativeTarget, Identifier *targetLabel)
: sourceLoc(std::move(loc)), sourceBlock(sourceBlock),
tentativeTarget(tentativeTarget), targetLabel(targetLabel) {}
CatchScope::CatchScope(llvm::Constant *classInfoPtr,
llvm::BasicBlock *bodyBlock, CleanupCursor cleanupScope,
llvm::MDNode *branchWeights)
: classInfoPtr(classInfoPtr), bodyBlock(bodyBlock),
cleanupScope(cleanupScope), branchWeights(branchWeights) {}
bool useMSVCEH() {
#if LDC_LLVM_VER >= 308
return global.params.targetTriple->isWindowsMSVCEnvironment();
#else
return false;
#endif
}
namespace {
#if LDC_LLVM_VER >= 308
// MSVC/x86 uses C++ exception handling that puts cleanup blocks into funclets.
// This means that we cannot use a branch selector and conditional branches
// at cleanup exit to continue with different targets.
// Instead we make a full copy of the cleanup code for every target
//
// Return the beginning basic block of the cleanup code
llvm::BasicBlock *executeCleanupCopying(IRState *irs, CleanupScope &scope,
llvm::BasicBlock *sourceBlock,
llvm::BasicBlock *continueWith,
llvm::BasicBlock *unwindTo,
llvm::Value *funclet) {
if (isCatchSwitchBlock(scope.beginBlock))
return continueWith;
if (scope.cleanupBlocks.empty()) {
// figure out the list of blocks used by this cleanup step
findSuccessors(scope.cleanupBlocks, scope.beginBlock, scope.endBlock);
if (!scope.endBlock->getTerminator())
// Set up the unconditional branch at the end of the cleanup
llvm::BranchInst::Create(continueWith, scope.endBlock);
} else {
// check whether we have an exit target with the same continuation
for (CleanupExitTarget &tgt : scope.exitTargets)
if (tgt.branchTarget == continueWith) {
tgt.sourceBlocks.push_back(sourceBlock);
return tgt.cleanupBlocks.front();
}
}
// reuse the original IR if not unwinding and not already used
bool useOriginal = unwindTo == nullptr && funclet == nullptr;
for (CleanupExitTarget &tgt : scope.exitTargets)
useOriginal = useOriginal && tgt.cleanupBlocks.front() != scope.beginBlock;
// append new target
scope.exitTargets.push_back(CleanupExitTarget(continueWith));
scope.exitTargets.back().sourceBlocks.push_back(sourceBlock);
if (useOriginal) {
// change the continuation target if the initial branch was created
// by another instance with unwinding
if (continueWith)
if (auto term = scope.endBlock->getTerminator())
if (auto succ = term->getSuccessor(0))
if (succ != continueWith) {
remapBlocksValue(scope.cleanupBlocks, succ, continueWith);
}
scope.exitTargets.back().cleanupBlocks = scope.cleanupBlocks;
} else {
// clone the code
cloneBlocks(scope.cleanupBlocks, scope.exitTargets.back().cleanupBlocks,
continueWith, unwindTo, funclet);
}
return scope.exitTargets.back().cleanupBlocks.front();
}
#endif // LDC_LLVM_VER >= 308
void executeCleanup(IRState *irs, CleanupScope &scope,
llvm::BasicBlock *sourceBlock,
llvm::BasicBlock *continueWith) {
assert(!useMSVCEH()); // should always use executeCleanupCopying
if (scope.exitTargets.empty() ||
(scope.exitTargets.size() == 1 &&
scope.exitTargets[0].branchTarget == continueWith)) {
// We didn't need a branch selector before and still don't need one.
assert(!scope.branchSelector);
// Set up the unconditional branch at the end of the cleanup if we have
// not done so already.
if (scope.exitTargets.empty()) {
scope.exitTargets.push_back(CleanupExitTarget(continueWith));
llvm::BranchInst::Create(continueWith, scope.endBlock);
}
scope.exitTargets.front().sourceBlocks.push_back(sourceBlock);
return;
}
// We need a branch selector if we are here...
if (!scope.branchSelector) {
// ... and have not created one yet, so do so now.
scope.branchSelector = new llvm::AllocaInst(
llvm::Type::getInt32Ty(gIR->context()),
llvm::Twine("branchsel.") + scope.beginBlock->getName(),
irs->topallocapoint());
// Now we also need to store 0 to it to keep the paths that go to the
// only existing branch target the same.
auto &v = scope.exitTargets.front().sourceBlocks;
for (auto bb : v) {
new llvm::StoreInst(DtoConstUint(0), scope.branchSelector,
bb->getTerminator());
}
// And convert the BranchInst to the existing branch target to a
// SelectInst so we can append the other cases to it.
scope.endBlock->getTerminator()->eraseFromParent();
llvm::Value *sel =
new llvm::LoadInst(scope.branchSelector, "", scope.endBlock);
llvm::SwitchInst::Create(
sel, scope.exitTargets[0].branchTarget,
1, // Expected number of branches, only for pre-allocating.
scope.endBlock);
}
// If we already know this branch target, figure out the branch selector
// value and simply insert the store into the source block (prior to the
// last instruction, which is the branch to the first cleanup).
for (unsigned i = 0; i < scope.exitTargets.size(); ++i) {
CleanupExitTarget &t = scope.exitTargets[i];
if (t.branchTarget == continueWith) {
new llvm::StoreInst(DtoConstUint(i), scope.branchSelector,
sourceBlock->getTerminator());
// Note: Strictly speaking, keeping this up to date would not be
// needed right now, because we never to any optimizations that
// require changes to the source blocks after the initial conversion
// from one to two branch targets. Keeping this around for now to
// ease future development, but may be removed to save some work.
t.sourceBlocks.push_back(sourceBlock);
return;
}
}
// We don't know this branch target yet, so add it to the SwitchInst...
llvm::ConstantInt *const selectorVal = DtoConstUint(scope.exitTargets.size());
llvm::cast<llvm::SwitchInst>(scope.endBlock->getTerminator())
->addCase(selectorVal, continueWith);
// ... insert the store into the source block...
new llvm::StoreInst(selectorVal, scope.branchSelector,
sourceBlock->getTerminator());
// ... and keep track of it (again, this is unnecessary right now as
// discussed in the above note).
scope.exitTargets.push_back(CleanupExitTarget(continueWith));
scope.exitTargets.back().sourceBlocks.push_back(sourceBlock);
}
}
ScopeStack::~ScopeStack() {
// If there are still unresolved gotos left, it means that they were either
// down or "sideways" (i.e. down another branch) of the tree of all
// cleanup scopes, both of which are not allowed in D.
if (!topLevelUnresolvedGotos.empty()) {
for (const auto &i : topLevelUnresolvedGotos) {
error(i.sourceLoc, "goto into try/finally scope is not allowed");
}
fatal();
}
}
void ScopeStack::pushCleanup(llvm::BasicBlock *beginBlock,
llvm::BasicBlock *endBlock) {
cleanupScopes.push_back(CleanupScope(beginBlock, endBlock));
}
void ScopeStack::runCleanups(CleanupCursor sourceScope,
CleanupCursor targetScope,
llvm::BasicBlock *continueWith) {
#if LDC_LLVM_VER >= 308
if (useMSVCEH()) {
runCleanupCopies(sourceScope, targetScope, continueWith);
return;
}
#endif
assert(targetScope <= sourceScope);
if (targetScope == sourceScope) {
// No cleanups to run, just branch to the next block.
irs->ir->CreateBr(continueWith);
return;
}
// Insert the unconditional branch to the first cleanup block.
irs->ir->CreateBr(cleanupScopes[sourceScope - 1].beginBlock);
// Update all the control flow in the cleanups to make sure we end up where
// we want.
for (CleanupCursor i = sourceScope; i-- > targetScope;) {
llvm::BasicBlock *nextBlock =
(i > targetScope) ? cleanupScopes[i - 1].beginBlock : continueWith;
executeCleanup(irs, cleanupScopes[i], irs->scopebb(), nextBlock);
}
}
#if LDC_LLVM_VER >= 308
void ScopeStack::runCleanupCopies(CleanupCursor sourceScope,
CleanupCursor targetScope,
llvm::BasicBlock *continueWith) {
assert(targetScope <= sourceScope);
// work through the blocks in reverse execution order, so we
// can merge cleanups that end up at the same continuation target
for (CleanupCursor i = targetScope; i < sourceScope; ++i)
continueWith = executeCleanupCopying(irs, cleanupScopes[i], irs->scopebb(),
continueWith, nullptr, nullptr);
// Insert the unconditional branch to the first cleanup block.
irs->ir->CreateBr(continueWith);
}
llvm::BasicBlock *ScopeStack::runCleanupPad(CleanupCursor scope,
llvm::BasicBlock *unwindTo) {
// a catch switch never needs to be cloned and is an unwind target itself
if (isCatchSwitchBlock(cleanupScopes[scope].beginBlock))
return cleanupScopes[scope].beginBlock;
// each cleanup block is bracketed by a pair of cleanuppad/cleanupret
// instructions, any unwinding should also just continue at the next
// cleanup block, e.g.:
//
// cleanuppad:
// %0 = cleanuppad within %funclet[]
// %frame = nullptr
// if (!_d_enter_cleanup(%frame)) br label %cleanupret
// else br label %copy
//
// copy:
// invoke _dtor to %cleanupret unwind %unwindTo [ "funclet"(token %0) ]
//
// cleanupret:
// _d_leave_cleanup(%frame)
// cleanupret %0 unwind %unwindTo
//
llvm::BasicBlock *cleanupbb =
llvm::BasicBlock::Create(irs->context(), "cleanuppad", irs->topfunc());
auto cleanuppad =
llvm::CleanupPadInst::Create(getFuncletToken(), {}, "", cleanupbb);
llvm::BasicBlock *cleanupret =
llvm::BasicBlock::Create(irs->context(), "cleanupret", irs->topfunc());
// preparation to allocate some space on the stack where _d_enter_cleanup
// can place an exception frame (but not done here)
auto frame = getNullPtr(getVoidPtrType());
auto savedInsertBlock = irs->ir->GetInsertBlock();
auto savedInsertPoint = irs->ir->GetInsertPoint();
auto savedDbgLoc = irs->DBuilder.GetCurrentLoc();
auto endFn = getRuntimeFunction(Loc(), irs->module, "_d_leave_cleanup");
irs->ir->SetInsertPoint(cleanupret);
irs->DBuilder.EmitStopPoint(irs->func()->decl->loc);
irs->ir->CreateCall(endFn, frame,
{llvm::OperandBundleDef("funclet", cleanuppad)}, "");
llvm::CleanupReturnInst::Create(cleanuppad, unwindTo, cleanupret);
auto copybb = executeCleanupCopying(irs, cleanupScopes[scope], cleanupbb,
cleanupret, unwindTo, cleanuppad);
auto beginFn = getRuntimeFunction(Loc(), irs->module, "_d_enter_cleanup");
irs->ir->SetInsertPoint(cleanupbb);
irs->DBuilder.EmitStopPoint(irs->func()->decl->loc);
auto exec = irs->ir->CreateCall(
beginFn, frame, {llvm::OperandBundleDef("funclet", cleanuppad)}, "");
llvm::BranchInst::Create(copybb, cleanupret, exec, cleanupbb);
irs->ir->SetInsertPoint(savedInsertBlock, savedInsertPoint);
irs->DBuilder.EmitStopPoint(savedDbgLoc);
return cleanupbb;
}
#endif
void ScopeStack::runAllCleanups(llvm::BasicBlock *continueWith) {
runCleanups(0, continueWith);
}
void ScopeStack::popCleanups(CleanupCursor targetScope) {
assert(targetScope <= currentCleanupScope());
if (targetScope == currentCleanupScope()) {
return;
}
for (CleanupCursor i = currentCleanupScope(); i-- > targetScope;) {
// Any gotos that are still unresolved necessarily leave this scope.
// Thus, the cleanup needs to be executed.
for (const auto &gotoJump : currentUnresolvedGotos()) {
// Make the source resp. last cleanup branch to this one.
llvm::BasicBlock *tentative = gotoJump.tentativeTarget;
#if LDC_LLVM_VER >= 308
if (useMSVCEH()) {
llvm::BasicBlock *continueWith = llvm::BasicBlock::Create(
irs->context(), "jumpcleanup", irs->topfunc());
auto startCleanup =
executeCleanupCopying(irs, cleanupScopes[i], gotoJump.sourceBlock,
continueWith, nullptr, nullptr);
tentative->replaceAllUsesWith(startCleanup);
llvm::BranchInst::Create(tentative, continueWith);
} else
#endif
{
tentative->replaceAllUsesWith(cleanupScopes[i].beginBlock);
// And continue execution with the tentative target (we simply reuse
// it because there is no reason not to).
executeCleanup(irs, cleanupScopes[i], gotoJump.sourceBlock, tentative);
}
}
std::vector<GotoJump> &nextUnresolved =
(i == 0) ? topLevelUnresolvedGotos
: cleanupScopes[i - 1].unresolvedGotos;
nextUnresolved.insert(nextUnresolved.end(),
currentUnresolvedGotos().begin(),
currentUnresolvedGotos().end());
cleanupScopes.pop_back();
}
}
void ScopeStack::pushCatch(llvm::Constant *classInfoPtr,
llvm::BasicBlock *bodyBlock,
llvm::MDNode *matchWeights) {
if (useMSVCEH()) {
#if LDC_LLVM_VER >= 308
assert(isCatchSwitchBlock(bodyBlock));
pushCleanup(bodyBlock, bodyBlock);
#endif
} else {
catchScopes.emplace_back(classInfoPtr, bodyBlock, currentCleanupScope(),
matchWeights);
currentLandingPads().push_back(nullptr);
}
}
void ScopeStack::popCatch() {
if (useMSVCEH()) {
#if LDC_LLVM_VER >= 308
assert(isCatchSwitchBlock(cleanupScopes.back().beginBlock));
popCleanups(currentCleanupScope() - 1);
#endif
} else {
catchScopes.pop_back();
currentLandingPads().pop_back();
}
}
void ScopeStack::pushLoopTarget(Statement *loopStatement,
llvm::BasicBlock *continueTarget,
llvm::BasicBlock *breakTarget) {
continueTargets.emplace_back(continueTarget, currentCleanupScope(),
loopStatement);
breakTargets.emplace_back(breakTarget, currentCleanupScope(), loopStatement);
}
void ScopeStack::popLoopTarget() {
continueTargets.pop_back();
breakTargets.pop_back();
}
void ScopeStack::pushBreakTarget(Statement *switchStatement,
llvm::BasicBlock *targetBlock) {
breakTargets.push_back({targetBlock, currentCleanupScope(), switchStatement});
}
void ScopeStack::popBreakTarget() { breakTargets.pop_back(); }
void ScopeStack::addLabelTarget(Identifier *labelName,
llvm::BasicBlock *targetBlock) {
labelTargets[labelName] = {targetBlock, currentCleanupScope(), nullptr};
// See whether any of the unresolved gotos target this label, and resolve
// those that do.
std::vector<GotoJump> &unresolved = currentUnresolvedGotos();
size_t i = 0;
while (i < unresolved.size()) {
if (unresolved[i].targetLabel != labelName) {
++i;
continue;
}
unresolved[i].tentativeTarget->replaceAllUsesWith(targetBlock);
unresolved[i].tentativeTarget->eraseFromParent();
unresolved.erase(unresolved.begin() + i);
}
}
void ScopeStack::jumpToLabel(Loc loc, Identifier *labelName) {
// If we have already seen that label, branch to it, executing any cleanups
// as necessary.
auto it = labelTargets.find(labelName);
if (it != labelTargets.end()) {
runCleanups(it->second.cleanupScope, it->second.targetBlock);
return;
}
llvm::BasicBlock *target = llvm::BasicBlock::Create(
irs->context(), "goto.unresolved", irs->topfunc());
irs->ir->CreateBr(target);
currentUnresolvedGotos().emplace_back(loc, irs->scopebb(), target, labelName);
}
void ScopeStack::jumpToStatement(std::vector<JumpTarget> &targets,
Statement *loopOrSwitchStatement) {
for (auto it = targets.rbegin(), end = targets.rend(); it != end; ++it) {
if (it->targetStatement == loopOrSwitchStatement) {
runCleanups(it->cleanupScope, it->targetBlock);
return;
}
}
assert(false && "Target for labeled break not found.");
}
void ScopeStack::jumpToClosest(std::vector<JumpTarget> &targets) {
assert(!targets.empty() &&
"Encountered break/continue but no loop in scope.");
JumpTarget &t = targets.back();
runCleanups(t.cleanupScope, t.targetBlock);
}
std::vector<GotoJump> &ScopeStack::currentUnresolvedGotos() {
return cleanupScopes.empty() ? topLevelUnresolvedGotos
: cleanupScopes.back().unresolvedGotos;
}
std::vector<llvm::BasicBlock *> &ScopeStack::currentLandingPads() {
return cleanupScopes.empty() ? topLevelLandingPads
: cleanupScopes.back().landingPads;
}
llvm::BasicBlock *&ScopeStack::getLandingPadRef(CleanupCursor scope) {
auto &pads = cleanupScopes.empty() ? topLevelLandingPads
: cleanupScopes[scope].landingPads;
if (pads.empty()) {
// Have not encountered any catches (for which we would push a scope) or
// calls to throwing functions (where we would have already executed
// this if) in this cleanup scope yet.
pads.push_back(nullptr);
}
return pads.back();
}
llvm::BasicBlock *ScopeStack::getLandingPad() {
llvm::BasicBlock *&landingPad = getLandingPadRef(currentCleanupScope() - 1);
if (!landingPad) {
#if LDC_LLVM_VER >= 308
if (useMSVCEH()) {
assert(currentCleanupScope() > 0);
landingPad = emitLandingPadMSVCEH(currentCleanupScope() - 1);
} else
#endif
landingPad = emitLandingPad();
}
return landingPad;
}
namespace {
llvm::LandingPadInst *createLandingPadInst(IRState *irs) {
LLType *retType =
LLStructType::get(LLType::getInt8PtrTy(irs->context()),
LLType::getInt32Ty(irs->context()), nullptr);
#if LDC_LLVM_VER >= 307
LLFunction *currentFunction = irs->func()->func;
if (!currentFunction->hasPersonalityFn()) {
LLFunction *personalityFn =
getRuntimeFunction(Loc(), irs->module, "_d_eh_personality");
currentFunction->setPersonalityFn(personalityFn);
}
return irs->ir->CreateLandingPad(retType, 0);
#else
LLFunction *personalityFn =
getRuntimeFunction(Loc(), irs->module, "_d_eh_personality");
return irs->ir->CreateLandingPad(retType, personalityFn, 0);
#endif
}
}
#if LDC_LLVM_VER >= 308
llvm::BasicBlock *ScopeStack::emitLandingPadMSVCEH(CleanupCursor scope) {
LLFunction *currentFunction = irs->func()->func;
if (!currentFunction->hasPersonalityFn()) {
const char *personality = "__CxxFrameHandler3";
LLFunction *personalityFn =
getRuntimeFunction(Loc(), irs->module, personality);
currentFunction->setPersonalityFn(personalityFn);
}
if (scope == 0)
return runCleanupPad(scope, nullptr);
llvm::BasicBlock *&pad = getLandingPadRef(scope - 1);
if (!pad)
pad = emitLandingPadMSVCEH(scope - 1);
return runCleanupPad(scope, pad);
}
#endif
llvm::BasicBlock *ScopeStack::emitLandingPad() {
// save and rewrite scope
IRScope savedIRScope = irs->scope();
llvm::BasicBlock *beginBB =
llvm::BasicBlock::Create(irs->context(), "landingPad", irs->topfunc());
irs->scope() = IRScope(beginBB);
llvm::LandingPadInst *landingPad = createLandingPadInst(irs);
// Stash away the exception object pointer and selector value into their
// stack slots.
llvm::Value *ehPtr = DtoExtractValue(landingPad, 0);
irs->ir->CreateStore(ehPtr, irs->func()->getOrCreateEhPtrSlot());
llvm::Value *ehSelector = DtoExtractValue(landingPad, 1);
if (!irs->func()->ehSelectorSlot) {
irs->func()->ehSelectorSlot =
DtoRawAlloca(ehSelector->getType(), 0, "eh.selector");
}
irs->ir->CreateStore(ehSelector, irs->func()->ehSelectorSlot);
// Add landingpad clauses, emit finallys and 'if' chain to catch the
// exception.
CleanupCursor lastCleanup = currentCleanupScope();
for (auto it = catchScopes.rbegin(), end = catchScopes.rend(); it != end;
++it) {
// Insert any cleanups in between the last catch we ran (i.e. tested for
// and found that the type does not match) and this one.
assert(lastCleanup >= it->cleanupScope);
if (lastCleanup > it->cleanupScope) {
landingPad->setCleanup(true);
llvm::BasicBlock *afterCleanupBB = llvm::BasicBlock::Create(
irs->context(), beginBB->getName() + llvm::Twine(".after.cleanup"),
irs->topfunc());
runCleanups(lastCleanup, it->cleanupScope, afterCleanupBB);
irs->scope() = IRScope(afterCleanupBB);
lastCleanup = it->cleanupScope;
}
// Add the ClassInfo reference to the landingpad instruction so it is
// emitted to the EH tables.
landingPad->addClause(it->classInfoPtr);
llvm::BasicBlock *mismatchBB = llvm::BasicBlock::Create(
irs->context(), beginBB->getName() + llvm::Twine(".mismatch"),
irs->topfunc());
// "Call" llvm.eh.typeid.for, which gives us the eh selector value to
// compare the landing pad selector value with.
llvm::Value *ehTypeId =
irs->ir->CreateCall(GET_INTRINSIC_DECL(eh_typeid_for),
DtoBitCast(it->classInfoPtr, getVoidPtrType()));
// Compare the selector value from the unwinder against the expected
// one and branch accordingly.
irs->ir->CreateCondBr(
irs->ir->CreateICmpEQ(irs->ir->CreateLoad(irs->func()->ehSelectorSlot),
ehTypeId),
it->bodyBlock, mismatchBB, it->branchWeights);
irs->scope() = IRScope(mismatchBB);
}
// No catch matched. Execute all finallys and resume unwinding.
if (lastCleanup > 0) {
landingPad->setCleanup(true);
runCleanups(lastCleanup, 0, irs->func()->getOrCreateResumeUnwindBlock());
} else if (!catchScopes.empty()) {
// Directly convert the last mismatch branch into a branch to the
// unwind resume block.
irs->scopebb()->replaceAllUsesWith(
irs->func()->getOrCreateResumeUnwindBlock());
irs->scopebb()->eraseFromParent();
} else {
irs->ir->CreateBr(irs->func()->getOrCreateResumeUnwindBlock());
}
irs->scope() = savedIRScope;
return beginBB;
}
IrFunction::IrFunction(FuncDeclaration *fd) : FMF() { IrFunction::IrFunction(FuncDeclaration *fd) : FMF() {
decl = fd; decl = fd;
@ -640,33 +37,6 @@ void IrFunction::setAlwaysInline() {
func->addFnAttr(llvm::Attribute::AlwaysInline); func->addFnAttr(llvm::Attribute::AlwaysInline);
} }
llvm::AllocaInst *IrFunction::getOrCreateEhPtrSlot() {
if (!ehPtrSlot) {
ehPtrSlot = DtoRawAlloca(getVoidPtrType(), 0, "eh.ptr");
}
return ehPtrSlot;
}
llvm::BasicBlock *IrFunction::getOrCreateResumeUnwindBlock() {
assert(func == gIR->topfunc() &&
"Should only access unwind resume block while emitting function.");
if (!resumeUnwindBlock) {
resumeUnwindBlock =
llvm::BasicBlock::Create(gIR->context(), "eh.resume", func);
llvm::BasicBlock *oldBB = gIR->scopebb();
gIR->scope() = IRScope(resumeUnwindBlock);
llvm::Function *resumeFn =
getRuntimeFunction(Loc(), gIR->module, "_d_eh_resume_unwind");
gIR->ir->CreateCall(resumeFn, DtoLoad(getOrCreateEhPtrSlot()));
gIR->ir->CreateUnreachable();
gIR->scope() = IRScope(oldBB);
}
return resumeUnwindBlock;
}
IrFunction *getIrFunc(FuncDeclaration *decl, bool create) { IrFunction *getIrFunc(FuncDeclaration *decl, bool create) {
if (!isIrFuncCreated(decl) && create) { if (!isIrFuncCreated(decl) && create) {
assert(decl->ir->irFunc == NULL); assert(decl->ir->irFunc == NULL);

View file

@ -18,438 +18,12 @@
#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/DenseMapInfo.h"
#include "gen/llvm.h" #include "gen/llvm.h"
#include "gen/irstate.h"
#include "gen/pgo.h"
#include "ir/irfuncty.h" #include "ir/irfuncty.h"
#include <map>
#include <stack> #include <stack>
#include <vector>
class Identifier; class FuncDeclaration;
class Statement; class TypeFunction;
class VarDeclaration;
/// Represents a position on the stack of currently active cleanup scopes.
///
/// Since we always need to run a contiguous part of the stack (or all) in
/// order, two cursors (one of which is usually the currently top of the stack)
/// are enough to identify a sequence of cleanups to run.
using CleanupCursor = size_t;
/// Stores information needed to correctly jump to a given label or loop/switch
/// statement (break/continue can be labeled, but are not necessarily).
struct JumpTarget {
/// The basic block to ultimately branch to.
llvm::BasicBlock *targetBlock = nullptr;
/// The index of the target in the stack of active cleanup scopes.
///
/// When generating code for a jump to this label, the cleanups between
/// the current depth and that of the level will be emitted. Note that
/// we need to handle only one direction (towards the root of the stack)
/// because D forbids gotos into try or finally blocks.
// TODO: We might not be able to detect illegal jumps across try-finally
// blocks by only storing the index.
CleanupCursor cleanupScope;
/// Keeps target of the associated loop or switch statement so we can
/// handle both unlabeled and labeled jumps.
Statement *targetStatement = nullptr;
JumpTarget() = default;
JumpTarget(llvm::BasicBlock *targetBlock, CleanupCursor cleanupScope,
Statement *targetStatement);
};
/// Keeps track of source and target label of a goto.
///
/// Used if we cannot immediately emit all the code for a jump because we have
/// not generated code for the target yet.
struct GotoJump {
// The location of the goto instruction, for error reporting.
Loc sourceLoc;
/// The basic block which contains the goto as its terminator.
llvm::BasicBlock *sourceBlock = nullptr;
/// While we have not found the actual branch target, we might need to
/// create a "fake" basic block in order to be able to execute the cleanups
/// (we do not keep branching information around after leaving the scope).
llvm::BasicBlock *tentativeTarget = nullptr;
/// The label to target with the goto.
Identifier *targetLabel = nullptr;
GotoJump(Loc loc, llvm::BasicBlock *sourceBlock,
llvm::BasicBlock *tentativeTarget, Identifier *targetLabel);
};
/// Describes a particular way to leave a cleanup scope and continue execution
/// with another one.
///
/// In general, there can be multiple ones (normal exit, early returns,
/// breaks/continues, exceptions, and so on).
struct CleanupExitTarget {
explicit CleanupExitTarget(llvm::BasicBlock *t) : branchTarget(t) {}
/// The target basic block to branch to after running the cleanup.
llvm::BasicBlock *branchTarget = nullptr;
/// The basic blocks that want to continue with this target after running
/// the cleanup. We need to keep this information around so we can insert
/// stores to the branch selector variable when converting from one to two
/// targets.
std::vector<llvm::BasicBlock *> sourceBlocks;
/// MSVC: The basic blocks that are executed when going this route
std::vector<llvm::BasicBlock *> cleanupBlocks;
};
/// Represents a scope (in abstract terms, not curly braces) that requires a
/// piece of cleanup code to be run whenever it is left, whether as part of
/// normal control flow or exception unwinding.
///
/// This includes finally blocks (which are also generated by the frontend for
/// running the destructors of non-temporary variables) and the destructors of
/// temporaries (which are unfortunately not lowered by the frontend).
///
/// Our goal is to only emit each cleanup once such as to avoid generating an
/// exponential number of basic blocks/landing pads for handling all the
/// different ways of exiting a deeply nested scope (consider e.g. ten
/// local variables with destructors, each of which might throw itself).
class CleanupScope {
public:
CleanupScope(llvm::BasicBlock *beginBlock, llvm::BasicBlock *endBlock)
: beginBlock(beginBlock), endBlock(endBlock) {}
/// The basic block to branch to for running the cleanup.
llvm::BasicBlock *beginBlock = nullptr;
/// The basic block that contains the end of the cleanup code (is different
/// from beginBlock if the cleanup contains control flow).
llvm::BasicBlock *endBlock = nullptr;
/// The branch selector variable, or null if not created yet.
llvm::AllocaInst *branchSelector = nullptr;
/// Stores all possible targets blocks after running this cleanup, along
/// with what predecessors want to continue at that target. The index in
/// the vector corresponds to the branch selector value for that target.
// Note: This is of course a bad choice of data structure for many targets
// complexity-wise. However, situations where this matters should be
// exceedingly rare in both hand-written as well as generated code.
std::vector<CleanupExitTarget> exitTargets;
/// Keeps track of all the gotos originating from somewhere inside this
/// scope for which we have not found the label yet (because it occurs
/// lexically later in the function).
// Note: Should also be a dense map from source block to the rest of the
// data if we expect many gotos.
std::vector<GotoJump> unresolvedGotos;
/// Caches landing pads generated for catches at this cleanup scope level.
///
/// One element is pushed to the back on each time a catch block is entered,
/// and popped again once it is left. If the corresponding landing pad has
/// not been generated yet (this is done lazily), the pointer is null.
std::vector<llvm::BasicBlock *> landingPads;
/// MSVC: The original basic blocks that are executed for beginBlock to
/// endBlock
std::vector<llvm::BasicBlock *> cleanupBlocks;
};
/// Stores information to be able to branch to a catch clause if it matches.
///
/// Each catch body is emitted only once, but may be target from many landing
/// pads (in case of nested catch or cleanup scopes).
struct CatchScope {
/// The ClassInfo reference corresponding to the type to match the
/// exception object against.
llvm::Constant *classInfoPtr = nullptr;
/// The block to branch to if the exception type matches.
llvm::BasicBlock *bodyBlock = nullptr;
/// The cleanup scope stack level corresponding to this catch.
CleanupCursor cleanupScope;
// PGO branch weights for the exception type match branch.
// (first weight is for match, second is for mismatch)
llvm::MDNode *branchWeights = nullptr;
CatchScope(llvm::Constant *classInfoPtr, llvm::BasicBlock *bodyBlock,
CleanupCursor cleanupScope, llvm::MDNode *branchWeights = nullptr);
};
/// Keeps track of active (abstract) scopes in a function that influence code
/// generation of their contents. This includes cleanups (finally blocks,
/// destructors), try/catch blocks and labels for goto/break/continue.
///
/// Note that the entire code generation process, and this class in particular,
/// depends heavily on the fact that we visit the statement/expression tree in
/// its natural order, i.e. depth-first and in lexical order. In other words,
/// the code here expects that after a cleanup/catch/loop/etc. has been pushed,
/// the contents of the block are generated, and it is then popped again
/// afterwards. This is also encoded in the fact that none of the methods for
/// branching/running cleanups take a cursor for describing the "source" scope,
/// it is always assumed to be the current one.
///
/// Handling of break/continue could be moved into a separate layer that uses
/// the rest of the ScopeStack API, as it (in contrast to goto) never requires
/// resolving forward references across cleanup scopes.
class ScopeStack {
public:
explicit ScopeStack(IRState *irs) : irs(irs) {}
~ScopeStack();
/// Registers a piece of cleanup code to be run.
///
/// The end block is expected not to contain a terminator yet. It will be
/// added by ScopeStack as needed, based on what follow-up blocks code from
/// within this scope will branch to.
void pushCleanup(llvm::BasicBlock *beginBlock, llvm::BasicBlock *endBlock);
/// Terminates the current basic block with a branch to the cleanups needed
/// for leaving the current scope and continuing execution at the target
/// scope stack level.
///
/// After running them, execution will branch to the given basic block.
void runCleanups(CleanupCursor targetScope, llvm::BasicBlock *continueWith) {
runCleanups(currentCleanupScope(), targetScope, continueWith);
}
/// Like #runCleanups(), but runs all of them until the top-level scope is
/// reached.
void runAllCleanups(llvm::BasicBlock *continueWith);
#if LDC_LLVM_VER >= 308
void runCleanupCopies(CleanupCursor sourceScope, CleanupCursor targetScope,
llvm::BasicBlock *continueWith);
llvm::BasicBlock *runCleanupPad(CleanupCursor scope,
llvm::BasicBlock *unwindTo);
#endif
/// Pops all the cleanups between the current scope and the target cursor.
///
/// This does not insert any cleanup calls, use #runCleanups() beforehand.
void popCleanups(CleanupCursor targetScope);
/// Returns a cursor that identifies the current cleanup scope, to be later
/// used with #runCleanups() et al.
///
/// Note that this cursor is only valid as long as the current scope is not
/// popped.
CleanupCursor currentCleanupScope() { return cleanupScopes.size(); }
/// Registers a catch block to be taken into consideration when an exception
/// is thrown within the current scope.
///
/// When a potentially throwing function call is emitted, a landing pad will
/// be emitted to compare the dynamic type info of the exception against the
/// given ClassInfo constant and to branch to the given body block if it
/// matches. The registered catch blocks are maintained on a stack, with the
/// top-most (i.e. last pushed, innermost) taking precedence.
void pushCatch(llvm::Constant *classInfoPtr, llvm::BasicBlock *bodyBlock,
llvm::MDNode *matchWeights = nullptr);
/// Unregisters the last registered catch block.
void popCatch();
size_t currentCatchScope() { return catchScopes.size(); }
#if LDC_LLVM_VER >= 308
/// MSVC: catch and cleanup code is emitted as funclets and need
/// to be referenced from inner pads and calls
void pushFunclet(llvm::Value *funclet) {
funclets.push_back(funclet);
}
void popFunclet() {
funclets.pop_back();
}
llvm::Value *getFunclet() {
return funclets.empty() ? nullptr : funclets.back();
}
llvm::Value *getFuncletToken() {
return funclets.empty() ? llvm::ConstantTokenNone::get(irs->context())
: funclets.back();
}
#endif
/// Registers a loop statement to be used as a target for break/continue
/// statements in the current scope.
void pushLoopTarget(Statement *loopStatement,
llvm::BasicBlock *continueTarget,
llvm::BasicBlock *breakTarget);
/// Pops the last pushed loop target, so it is no longer taken into
/// consideration for resolving breaks/continues.
void popLoopTarget();
/// Registers a statement to be used as a target for break statements in the
/// current scope (currently applies only to switch statements).
void pushBreakTarget(Statement *switchStatement,
llvm::BasicBlock *targetBlock);
/// Unregisters the last registered break target.
void popBreakTarget();
/// Adds a label to serve as a target for goto statements.
///
/// Also causes in-flight forward references to this label to be resolved.
void addLabelTarget(Identifier *labelName, llvm::BasicBlock *targetBlock);
/// Emits a call or invoke to the given callee, depending on whether there
/// are catches/cleanups active or not.
template <typename T>
llvm::CallSite callOrInvoke(llvm::Value *callee, const T &args,
const char *name = "");
/// Terminates the current basic block with an unconditional branch to the
/// given label, along with the cleanups to execute on the way there.
///
/// Legal forward references (i.e. within the same function, and not into
/// a cleanup scope) will be resolved.
void jumpToLabel(Loc loc, Identifier *labelName);
/// Terminates the current basic block with an unconditional branch to the
/// continue target generated by the given loop statement, along with
/// the cleanups to execute on the way there.
void continueWithLoop(Statement *loopStatement) {
jumpToStatement(continueTargets, loopStatement);
}
/// Terminates the current basic block with an unconditional branch to the
/// closest loop continue target, along with the cleanups to execute on
/// the way there.
void continueWithClosest() { jumpToClosest(continueTargets); }
/// Terminates the current basic block with an unconditional branch to the
/// break target generated by the given loop or switch statement, along with
/// the cleanups to execute on the way there.
void breakToStatement(Statement *loopOrSwitchStatement) {
jumpToStatement(breakTargets, loopOrSwitchStatement);
}
/// Terminates the current basic block with an unconditional branch to the
/// closest break statement target, along with the cleanups to execute on
/// the way there.
void breakToClosest() { jumpToClosest(breakTargets); }
/// get exisiting or emit new landing pad
llvm::BasicBlock *getLandingPad();
private:
/// Internal version that allows specifying the scope at which to start
/// emitting the cleanups.
void runCleanups(CleanupCursor sourceScope, CleanupCursor targetScope,
llvm::BasicBlock *continueWith);
std::vector<GotoJump> &currentUnresolvedGotos();
std::vector<llvm::BasicBlock *> &currentLandingPads();
llvm::BasicBlock * &getLandingPadRef(CleanupCursor scope);
/// Emits a landing pad to honor all the active cleanups and catches.
llvm::BasicBlock *emitLandingPad();
#if LDC_LLVM_VER >= 308
llvm::BasicBlock *emitLandingPadMSVCEH(CleanupCursor scope);
#endif
/// Unified implementation for labeled break/continue.
void jumpToStatement(std::vector<JumpTarget> &targets,
Statement *loopOrSwitchStatement);
/// Unified implementation for unlabeled break/continue.
void jumpToClosest(std::vector<JumpTarget> &targets);
/// The ambient IRState. For legacy reasons, there is currently a cyclic
/// dependency between the two.
IRState *irs = nullptr;
using LabelTargetMap = llvm::DenseMap<Identifier *, JumpTarget>;
/// The labels we have encountered in this function so far, accessed by
/// their associated identifier (i.e. the name of the label).
LabelTargetMap labelTargets;
///
std::vector<JumpTarget> breakTargets;
///
std::vector<JumpTarget> continueTargets;
/// cleanupScopes[i] contains the information to go from
/// currentCleanupScope() == i + 1 to currentCleanupScope() == i.
std::vector<CleanupScope> cleanupScopes;
///
std::vector<CatchScope> catchScopes;
/// Gotos which we were not able to resolve to any cleanup scope, but which
/// might still be defined later in the function at top level. If there are
/// any left on function exit, it is an error (e.g. because the user tried
/// to goto into a finally block, etc.).
std::vector<GotoJump> topLevelUnresolvedGotos;
/// Caches landing pads generated for catches without any cleanups to run
/// (null if not yet emitted, one element is pushed to/popped from the back
/// on entering/leaving a catch block).
std::vector<llvm::BasicBlock *> topLevelLandingPads;
/// MSVC: stack of currently built catch/cleanup funclets
std::vector<llvm::Value*> funclets;
};
template <typename T>
llvm::CallSite ScopeStack::callOrInvoke(llvm::Value *callee, const T &args,
const char *name) {
// If this is a direct call, we might be able to use the callee attributes
// to our advantage.
llvm::Function *calleeFn = llvm::dyn_cast<llvm::Function>(callee);
// Intrinsics don't support invoking and 'nounwind' functions don't need it.
const bool doesNotThrow =
calleeFn && (calleeFn->isIntrinsic() || calleeFn->doesNotThrow());
#if LDC_LLVM_VER >= 308
// calls inside a funclet must be annotated with its value
llvm::SmallVector<llvm::OperandBundleDef, 2> BundleList;
if (auto funclet = getFunclet())
BundleList.push_back(llvm::OperandBundleDef("funclet", funclet));
#endif
if (doesNotThrow || (cleanupScopes.empty() && catchScopes.empty())) {
llvm::CallInst *call = irs->ir->CreateCall(callee, args,
#if LDC_LLVM_VER >= 308
BundleList,
#endif
name);
if (calleeFn) {
call->setAttributes(calleeFn->getAttributes());
}
return call;
}
llvm::BasicBlock* landingPad = getLandingPad();
llvm::BasicBlock *postinvoke = llvm::BasicBlock::Create(
irs->context(), "postinvoke", irs->topfunc(), landingPad);
llvm::InvokeInst *invoke =
irs->ir->CreateInvoke(callee, postinvoke, landingPad, args,
#if LDC_LLVM_VER >= 308
BundleList,
#endif
name);
if (calleeFn) {
invoke->setAttributes(calleeFn->getAttributes());
}
irs->scope() = IRScope(postinvoke);
return invoke;
}
// represents a function // represents a function
struct IrFunction { struct IrFunction {
@ -460,36 +34,14 @@ struct IrFunction {
void setNeverInline(); void setNeverInline();
void setAlwaysInline(); void setAlwaysInline();
/// Returns the stack slot that contains the exception object pointer while a
/// landing pad is active, lazily creating it as needed.
///
/// This value must dominate all uses; first storing it, and then loading it
/// when calling _d_eh_resume_unwind. If we take a select at the end of any
/// cleanups on the way to the latter, the value must also dominate all other
/// predecessors of the cleanup. Thus, we just use a single alloca in the
/// entry BB of the function.
llvm::AllocaInst *getOrCreateEhPtrSlot();
/// Returns the basic block with the call to the unwind resume function.
///
/// Because of ehPtrSlot, we do not need more than one, so might as well
/// save on code size and reuse it.
llvm::BasicBlock *getOrCreateResumeUnwindBlock();
llvm::Function *func = nullptr; llvm::Function *func = nullptr;
llvm::Instruction *allocapoint = nullptr;
FuncDeclaration *decl = nullptr; FuncDeclaration *decl = nullptr;
TypeFunction *type = nullptr; TypeFunction *type = nullptr;
/// Points to the associated scope stack while emitting code for the function.
ScopeStack *scopes = nullptr;
llvm::Value *sretArg = nullptr; // sret pointer arg llvm::Value *sretArg = nullptr; // sret pointer arg
llvm::Value *thisArg = nullptr; // class/struct 'this' arg llvm::Value *thisArg = nullptr; // class/struct 'this' arg
llvm::Value *nestArg = nullptr; // nested function 'this' arg llvm::Value *nestArg = nullptr; // nested function 'this' arg
llvm::Value *nestedVar =
nullptr; // alloca for the nested context of this function
llvm::StructType *frameType = nullptr; // type of nested context llvm::StructType *frameType = nullptr; // type of nested context
unsigned frameTypeAlignment = 0; // its alignment unsigned frameTypeAlignment = 0; // its alignment
// number of enclosing functions with variables accessed by nested functions // number of enclosing functions with variables accessed by nested functions
@ -498,19 +50,10 @@ struct IrFunction {
int depth = -1; int depth = -1;
bool nestedContextCreated = false; // holds whether nested context is created bool nestedContextCreated = false; // holds whether nested context is created
// TODO: Move to FuncGenState?
llvm::Value *_arguments = nullptr; llvm::Value *_arguments = nullptr;
llvm::Value *_argptr = nullptr; llvm::Value *_argptr = nullptr;
/// A stack slot containing the return value, for functions that return by
/// value.
llvm::AllocaInst *retValSlot = nullptr;
/// The basic block with the return instruction.
llvm::BasicBlock *retBlock = nullptr;
/// Similar story to ehPtrSlot, but for the selector value.
llvm::AllocaInst *ehSelectorSlot = nullptr;
#if LDC_LLVM_VER >= 307 #if LDC_LLVM_VER >= 307
llvm::DISubprogram *diSubprogram = nullptr; llvm::DISubprogram *diSubprogram = nullptr;
std::stack<llvm::DILexicalBlock *> diLexicalBlocks; std::stack<llvm::DILexicalBlock *> diLexicalBlocks;
@ -523,22 +66,14 @@ struct IrFunction {
// Debug info for all variables // Debug info for all variables
VariableMap variableMap; VariableMap variableMap;
// PGO information
CodeGenPGO pgo;
IrFuncTy irFty; IrFuncTy irFty;
/// Stores the FastMath options for this functions. /// Stores the FastMath options for this functions.
/// These are set e.g. by math related UDA's from ldc.attributes. /// These are set e.g. by math related UDA's from ldc.attributes.
llvm::FastMathFlags FMF; llvm::FastMathFlags FMF;
private:
llvm::AllocaInst *ehPtrSlot = nullptr;
llvm::BasicBlock *resumeUnwindBlock = nullptr;
}; };
IrFunction *getIrFunc(FuncDeclaration *decl, bool create = false); IrFunction *getIrFunc(FuncDeclaration *decl, bool create = false);
bool isIrFuncCreated(FuncDeclaration *decl); bool isIrFuncCreated(FuncDeclaration *decl);
bool useMSVCEH();
#endif #endif