Removed trailing whitespace.

This commit is contained in:
David Nadlinger 2012-12-15 23:59:45 +01:00
parent 9458911839
commit 1bb14c45d2
17 changed files with 123 additions and 123 deletions

View file

@ -258,7 +258,7 @@ static cl::list<std::string, StringsAdapter> linkerSwitches("L",
cl::opt<std::string> moduleDepsFile("deps", cl::opt<std::string> moduleDepsFile("deps",
cl::desc("Write module dependencies to filename"), cl::desc("Write module dependencies to filename"),
cl::value_desc("filename")); cl::value_desc("filename"));
cl::opt<std::string> mArch("march", cl::opt<std::string> mArch("march",
cl::desc("Architecture to generate code for:")); cl::desc("Architecture to generate code for:"));

View file

@ -87,7 +87,7 @@ DValue* DtoAAIndex(Loc& loc, Type* type, DValue* aa, DValue* key, bool lvalue)
if (lvalue) { if (lvalue) {
// valuesize param // valuesize param
LLValue* valsize = DtoConstSize_t(getTypePaddedSize(DtoType(type))); LLValue* valsize = DtoConstSize_t(getTypePaddedSize(DtoType(type)));
ret = gIR->CreateCallOrInvoke4(func, aaval, keyti, valsize, pkey, "aa.index").getInstruction(); ret = gIR->CreateCallOrInvoke4(func, aaval, keyti, valsize, pkey, "aa.index").getInstruction();
} else { } else {
ret = gIR->CreateCallOrInvoke3(func, aaval, keyti, pkey, "aa.index").getInstruction(); ret = gIR->CreateCallOrInvoke3(func, aaval, keyti, pkey, "aa.index").getInstruction();
@ -275,7 +275,7 @@ LLValue* DtoAAEquals(Loc& loc, TOK op, DValue* l, DValue* r)
#else #else
llvm::Function* func = LLVM_D_GetRuntimeFunction(gIR->module, "_aaEq"); llvm::Function* func = LLVM_D_GetRuntimeFunction(gIR->module, "_aaEq");
LLFunctionType* funcTy = func->getFunctionType(); LLFunctionType* funcTy = func->getFunctionType();
LLValue* aaval = DtoBitCast(l->getRVal(), funcTy->getParamType(0)); LLValue* aaval = DtoBitCast(l->getRVal(), funcTy->getParamType(0));
LLValue* abval = DtoBitCast(r->getRVal(), funcTy->getParamType(1)); LLValue* abval = DtoBitCast(r->getRVal(), funcTy->getParamType(1));
LLValue* aaTypeInfo = DtoTypeInfoOf(t); LLValue* aaTypeInfo = DtoTypeInfoOf(t);

View file

@ -26,13 +26,13 @@ DValue* DtoBinAdd(DValue* lhs, DValue* rhs)
LLValue *l, *r; LLValue *l, *r;
l = lhs->getRVal(); l = lhs->getRVal();
r = rhs->getRVal(); r = rhs->getRVal();
LLValue* res; LLValue* res;
if (t->isfloating()) if (t->isfloating())
res = gIR->ir->CreateFAdd(l, r, "tmp"); res = gIR->ir->CreateFAdd(l, r, "tmp");
else else
res = gIR->ir->CreateAdd(l, r, "tmp"); res = gIR->ir->CreateAdd(l, r, "tmp");
return new DImValue( t, res ); return new DImValue( t, res );
} }
@ -44,13 +44,13 @@ DValue* DtoBinSub(DValue* lhs, DValue* rhs)
LLValue *l, *r; LLValue *l, *r;
l = lhs->getRVal(); l = lhs->getRVal();
r = rhs->getRVal(); r = rhs->getRVal();
LLValue* res; LLValue* res;
if (t->isfloating()) if (t->isfloating())
res = gIR->ir->CreateFSub(l, r, "tmp"); res = gIR->ir->CreateFSub(l, r, "tmp");
else else
res = gIR->ir->CreateSub(l, r, "tmp"); res = gIR->ir->CreateSub(l, r, "tmp");
return new DImValue( t, res ); return new DImValue( t, res );
} }
@ -62,7 +62,7 @@ DValue* DtoBinMul(Type* targettype, DValue* lhs, DValue* rhs)
LLValue *l, *r; LLValue *l, *r;
l = lhs->getRVal(); l = lhs->getRVal();
r = rhs->getRVal(); r = rhs->getRVal();
LLValue* res; LLValue* res;
if (t->isfloating()) if (t->isfloating())
res = gIR->ir->CreateFMul(l, r, "tmp"); res = gIR->ir->CreateFMul(l, r, "tmp");
@ -79,7 +79,7 @@ DValue* DtoBinDiv(Type* targettype, DValue* lhs, DValue* rhs)
LLValue *l, *r; LLValue *l, *r;
l = lhs->getRVal(); l = lhs->getRVal();
r = rhs->getRVal(); r = rhs->getRVal();
LLValue* res; LLValue* res;
if (t->isfloating()) if (t->isfloating())
res = gIR->ir->CreateFDiv(l, r, "tmp"); res = gIR->ir->CreateFDiv(l, r, "tmp");
@ -129,7 +129,7 @@ LLValue* DtoBinNumericEquals(Loc loc, DValue* lhs, DValue* rhs, TOK op)
Logger::println("floating"); Logger::println("floating");
res = DtoBinFloatsEquals(loc, lhs, rhs, op); res = DtoBinFloatsEquals(loc, lhs, rhs, op);
} }
assert(res); assert(res);
return res; return res;
} }

View file

@ -305,7 +305,7 @@ DValue* DtoCastClass(DValue* val, Type* _to)
IrTypeClass* typeclass = stripModifiers(fc)->irtype->isClass(); IrTypeClass* typeclass = stripModifiers(fc)->irtype->isClass();
// find interface impl // find interface impl
size_t i_index = typeclass->getInterfaceIndex(it); size_t i_index = typeclass->getInterfaceIndex(it);
assert(i_index != ~0 && "requesting interface that is not implemented by this class"); assert(i_index != ~0 && "requesting interface that is not implemented by this class");

View file

@ -220,7 +220,7 @@ DValue* DtoComplexAdd(Loc& loc, Type* type, DValue* lhs, DValue* rhs)
res_re = lhs_re; res_re = lhs_re;
else // either rhs_re or no re at all (then use any) else // either rhs_re or no re at all (then use any)
res_re = rhs_re; res_re = rhs_re;
if(lhs_im && rhs_im) if(lhs_im && rhs_im)
res_im = gIR->ir->CreateFAdd(lhs_im, rhs_im, "tmp"); res_im = gIR->ir->CreateFAdd(lhs_im, rhs_im, "tmp");
else if(lhs_im) else if(lhs_im)
@ -250,7 +250,7 @@ DValue* DtoComplexSub(Loc& loc, Type* type, DValue* lhs, DValue* rhs)
res_re = lhs_re; res_re = lhs_re;
else // either rhs_re or no re at all (then use any) else // either rhs_re or no re at all (then use any)
res_re = gIR->ir->CreateFNeg(rhs_re, "neg"); res_re = gIR->ir->CreateFNeg(rhs_re, "neg");
if(lhs_im && rhs_im) if(lhs_im && rhs_im)
res_im = gIR->ir->CreateFSub(lhs_im, rhs_im, "tmp"); res_im = gIR->ir->CreateFSub(lhs_im, rhs_im, "tmp");
else if(lhs_im) else if(lhs_im)

View file

@ -171,7 +171,7 @@ void VarDeclaration::codegen(Ir* p)
#if LDC_LLVM_VER >= 302 #if LDC_LLVM_VER >= 302
// FIXME: clang uses a command line option for the thread model // FIXME: clang uses a command line option for the thread model
LLGlobalVariable* gvar = new LLGlobalVariable(*gIR->module, _type, _isconst, LLGlobalVariable* gvar = new LLGlobalVariable(*gIR->module, _type, _isconst,
DtoLinkage(this), NULL, _name, 0, DtoLinkage(this), NULL, _name, 0,
isThreadlocal() ? LLGlobalVariable::GeneralDynamicTLSModel isThreadlocal() ? LLGlobalVariable::GeneralDynamicTLSModel
: LLGlobalVariable::NotThreadLocal); : LLGlobalVariable::NotThreadLocal);
#else #else

View file

@ -277,22 +277,22 @@ LLFunction* DtoInlineIRFunction(FuncDeclaration* fdecl)
assert(tinst); assert(tinst);
Objects& objs = tinst->tdtypes; Objects& objs = tinst->tdtypes;
assert(objs.dim == 3); assert(objs.dim == 3);
Expression* a0 = isExpression(objs[0]); Expression* a0 = isExpression(objs[0]);
assert(a0); assert(a0);
StringExp* strexp = a0->toString(); StringExp* strexp = a0->toString();
assert(strexp); assert(strexp);
assert(strexp->sz == 1); assert(strexp->sz == 1);
std::string code(static_cast<char*>(strexp->string), strexp->len); std::string code(static_cast<char*>(strexp->string), strexp->len);
Type* ret = isType(objs[1]); Type* ret = isType(objs[1]);
assert(ret); assert(ret);
Tuple* a2 = isTuple(objs[2]); Tuple* a2 = isTuple(objs[2]);
assert(a2); assert(a2);
Objects& arg_types = a2->objects; Objects& arg_types = a2->objects;
std::string str; std::string str;
llvm::raw_string_ostream stream(str); llvm::raw_string_ostream stream(str);
stream << "define " << *DtoType(ret) << " @" << mangled_name << "("; stream << "define " << *DtoType(ret) << " @" << mangled_name << "(";
@ -303,34 +303,34 @@ LLFunction* DtoInlineIRFunction(FuncDeclaration* fdecl)
//assert(ty); //assert(ty);
if(!ty) if(!ty)
{ {
error(tinst->loc, error(tinst->loc,
"All parameters of a template defined with pragma llvm_inline_ir, except for the first one, should be types"); "All parameters of a template defined with pragma llvm_inline_ir, except for the first one, should be types");
fatal(); fatal();
} }
stream << *DtoType(ty); stream << *DtoType(ty);
i++; i++;
if(i >= arg_types.dim) if(i >= arg_types.dim)
break; break;
stream << ", "; stream << ", ";
} }
if(ret->ty == Tvoid) if(ret->ty == Tvoid)
code.append("\nret void"); code.append("\nret void");
stream << ")\n{\n" << code << "\n}"; stream << ")\n{\n" << code << "\n}";
llvm::SMDiagnostic err; llvm::SMDiagnostic err;
llvm::ParseAssemblyString(stream.str().c_str(), gIR->module, err, gIR->context()); llvm::ParseAssemblyString(stream.str().c_str(), gIR->module, err, gIR->context());
std::string errstr = err.getMessage(); std::string errstr = err.getMessage();
if(errstr != "") if(errstr != "")
error(tinst->loc, error(tinst->loc,
"can't parse inline LLVM IR:\n%s\n%s\n%s\nThe input string was: \n%s", "can't parse inline LLVM IR:\n%s\n%s\n%s\nThe input string was: \n%s",
err.getLineContents().c_str(), err.getLineContents().c_str(),
(std::string(err.getColumnNo(), ' ') + '^').c_str(), (std::string(err.getColumnNo(), ' ') + '^').c_str(),
errstr.c_str(), stream.str().c_str()); errstr.c_str(), stream.str().c_str());
LLFunction* fun = gIR->module->getFunction(mangled_name); LLFunction* fun = gIR->module->getFunction(mangled_name);
fun->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage); fun->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
#if LDC_LLVM_VER >= 302 #if LDC_LLVM_VER >= 302
@ -589,7 +589,7 @@ static void set_param_attrs(TypeFunction* f, llvm::Function* func, FuncDeclarati
if (attrs[j].Index == curr.Index) { if (attrs[j].Index == curr.Index) {
#if LDC_LLVM_VER >= 302 #if LDC_LLVM_VER >= 302
attrs[j].Attrs = llvm::Attributes::get( attrs[j].Attrs = llvm::Attributes::get(
gIR->context(), gIR->context(),
llvm::AttrBuilder(attrs[j].Attrs).addAttributes(curr.Attrs)); llvm::AttrBuilder(attrs[j].Attrs).addAttributes(curr.Attrs));
#else #else
attrs[j].Attrs |= curr.Attrs; attrs[j].Attrs |= curr.Attrs;
@ -665,7 +665,7 @@ void DtoDeclareFunction(FuncDeclaration* fdecl)
LLFunction* func = vafunc ? vafunc : gIR->module->getFunction(mangled_name); LLFunction* func = vafunc ? vafunc : gIR->module->getFunction(mangled_name);
if (!func) { if (!func) {
if(fdecl->llvmInternal == LLVMinline_ir) if(fdecl->llvmInternal == LLVMinline_ir)
func = DtoInlineIRFunction(fdecl); func = DtoInlineIRFunction(fdecl);
else else
func = LLFunction::Create(functype, DtoLinkage(fdecl), mangled_name, gIR->module); func = LLFunction::Create(functype, DtoLinkage(fdecl), mangled_name, gIR->module);
} else if (func->getFunctionType() != functype) { } else if (func->getFunctionType() != functype) {

View file

@ -72,7 +72,7 @@ struct IRScope
IRScope(); IRScope();
IRScope(llvm::BasicBlock* b, llvm::BasicBlock* e); IRScope(llvm::BasicBlock* b, llvm::BasicBlock* e);
const IRScope& operator=(const IRScope& rhs); const IRScope& operator=(const IRScope& rhs);
#if DMDV2 #if DMDV2
@ -89,7 +89,7 @@ struct IRBuilderHelper
struct IRAsmStmt struct IRAsmStmt
{ {
IRAsmStmt() IRAsmStmt()
: isBranchToLabel(NULL) {} : isBranchToLabel(NULL) {}
std::string code; std::string code;
@ -202,7 +202,7 @@ struct IRState
GatesList sharedGates; GatesList sharedGates;
#endif #endif
FuncDeclList unitTests; FuncDeclList unitTests;
// all template instances that had members emitted // all template instances that had members emitted
// currently only filled for singleobj // currently only filled for singleobj
// used to make sure the complete template instance gets emitted in the // used to make sure the complete template instance gets emitted in the

View file

@ -93,7 +93,7 @@ Triple llvm::Triple__get64BitArchVariant(const std::string& triple) {
return T; return T;
} }
static void appendToGlobalArray(const char *Array, static void appendToGlobalArray(const char *Array,
Module &M, Function *F, int Priority) { Module &M, Function *F, int Priority) {
IRBuilder<> IRB(M.getContext()); IRBuilder<> IRB(M.getContext());
FunctionType *FnTy = FunctionType::get(IRB.getVoidTy(), false); FunctionType *FnTy = FunctionType::get(IRB.getVoidTy(), false);

View file

@ -852,7 +852,7 @@ DValue* DtoCast(Loc& loc, DValue* val, Type* to)
if (fromtype->ty == Tvector) { if (fromtype->ty == Tvector) {
return DtoCastVector(loc, val, to); return DtoCastVector(loc, val, to);
} }
else else
#endif #endif
if (fromtype->isintegral()) { if (fromtype->isintegral()) {
return DtoCastInt(loc, val, to); return DtoCastInt(loc, val, to);

View file

@ -32,7 +32,7 @@ struct EnclosingTryFinally : EnclosingHandler
TryFinallyStatement* tf; TryFinallyStatement* tf;
llvm::BasicBlock* landingPad; llvm::BasicBlock* landingPad;
void emitCode(IRState* p); void emitCode(IRState* p);
EnclosingTryFinally(TryFinallyStatement* _tf, llvm::BasicBlock* _pad) EnclosingTryFinally(TryFinallyStatement* _tf, llvm::BasicBlock* _pad)
: tf(_tf), landingPad(_pad) {} : tf(_tf), landingPad(_pad) {}
}; };
struct EnclosingVolatile : EnclosingHandler struct EnclosingVolatile : EnclosingHandler

View file

@ -38,9 +38,9 @@ inline MDNodeField* MD_GetElement(llvm::MDNode* N, unsigned i) {
/// (Its name will be TD_PREFIX ~ <Name of TypeInfo global>) /// (Its name will be TD_PREFIX ~ <Name of TypeInfo global>)
enum TypeDataFields { enum TypeDataFields {
TD_Confirm, /// The TypeInfo this node is for. TD_Confirm, /// The TypeInfo this node is for.
TD_Type, /// A value of the LLVM type corresponding to this D type TD_Type, /// A value of the LLVM type corresponding to this D type
// Must be kept last: // Must be kept last:
TD_NumFields /// The number of fields in TypeInfo metadata TD_NumFields /// The number of fields in TypeInfo metadata
}; };
@ -55,7 +55,7 @@ enum ClassDataFields {
CD_BodyType, /// A value of the LLVM type corresponding to the class body. CD_BodyType, /// A value of the LLVM type corresponding to the class body.
CD_Finalize, /// True if this class (or a base class) has a destructor. CD_Finalize, /// True if this class (or a base class) has a destructor.
CD_CustomDelete,/// True if this class has an overridden delete operator. CD_CustomDelete,/// True if this class has an overridden delete operator.
// Must be kept last // Must be kept last
CD_NumFields /// The number of fields in ClassInfo metadata CD_NumFields /// The number of fields in ClassInfo metadata
}; };

View file

@ -52,7 +52,7 @@ namespace {
const Module& M; const Module& M;
CallGraph* CG; CallGraph* CG;
CallGraphNode* CGNode; CallGraphNode* CGNode;
const Type* getTypeFor(Value* typeinfo) const; const Type* getTypeFor(Value* typeinfo) const;
}; };
} }
@ -64,14 +64,14 @@ namespace {
void EmitMemSet(IRBuilder<>& B, Value* Dst, Value* Val, Value* Len, void EmitMemSet(IRBuilder<>& B, Value* Dst, Value* Val, Value* Len,
const Analysis& A) { const Analysis& A) {
Dst = B.CreateBitCast(Dst, PointerType::getUnqual(B.getInt8Ty())); Dst = B.CreateBitCast(Dst, PointerType::getUnqual(B.getInt8Ty()));
Module *M = B.GetInsertBlock()->getParent()->getParent(); Module *M = B.GetInsertBlock()->getParent()->getParent();
const Type* intTy = Len->getType(); const Type* intTy = Len->getType();
const Type *VoidPtrTy = PointerType::getUnqual(B.getInt8Ty()); const Type *VoidPtrTy = PointerType::getUnqual(B.getInt8Ty());
const Type *Tys[2] ={VoidPtrTy, intTy}; const Type *Tys[2] ={VoidPtrTy, intTy};
Function *MemSet = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2); Function *MemSet = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
Value *Align = ConstantInt::get(B.getInt32Ty(), 1); Value *Align = ConstantInt::get(B.getInt32Ty(), 1);
CallSite CS = B.CreateCall5(MemSet, Dst, Val, Len, Align, B.getFalse()); CallSite CS = B.CreateCall5(MemSet, Dst, Val, Len, Align, B.getFalse());
if (A.CGNode) if (A.CGNode)
A.CGNode->addCalledFunction(CS, A.CG->getOrInsertFunction(MemSet)); A.CGNode->addCalledFunction(CS, A.CG->getOrInsertFunction(MemSet));
@ -91,11 +91,11 @@ namespace {
class FunctionInfo { class FunctionInfo {
protected: protected:
const Type* Ty; const Type* Ty;
public: public:
unsigned TypeInfoArgNr; unsigned TypeInfoArgNr;
bool SafeToDelete; bool SafeToDelete;
// Analyze the current call, filling in some fields. Returns true if // Analyze the current call, filling in some fields. Returns true if
// this is an allocation we can stack-allocate. // this is an allocation we can stack-allocate.
virtual bool analyze(CallSite CS, const Analysis& A) { virtual bool analyze(CallSite CS, const Analysis& A) {
@ -103,25 +103,25 @@ namespace {
Ty = A.getTypeFor(TypeInfo); Ty = A.getTypeFor(TypeInfo);
return (Ty != NULL); return (Ty != NULL);
} }
// Returns the alloca to replace this call. // Returns the alloca to replace this call.
// It will always be inserted before the call. // It will always be inserted before the call.
virtual AllocaInst* promote(CallSite CS, IRBuilder<>& B, const Analysis& A) { virtual AllocaInst* promote(CallSite CS, IRBuilder<>& B, const Analysis& A) {
NumGcToStack++; NumGcToStack++;
Instruction* Begin = CS.getCaller()->getEntryBlock().begin(); Instruction* Begin = CS.getCaller()->getEntryBlock().begin();
return new AllocaInst(Ty, ".nongc_mem", Begin); // FIXME: align? return new AllocaInst(Ty, ".nongc_mem", Begin); // FIXME: align?
} }
FunctionInfo(unsigned typeInfoArgNr, bool safeToDelete) FunctionInfo(unsigned typeInfoArgNr, bool safeToDelete)
: TypeInfoArgNr(typeInfoArgNr), SafeToDelete(safeToDelete) {} : TypeInfoArgNr(typeInfoArgNr), SafeToDelete(safeToDelete) {}
}; };
class ArrayFI : public FunctionInfo { class ArrayFI : public FunctionInfo {
Value* arrSize; Value* arrSize;
int ArrSizeArgNr; int ArrSizeArgNr;
bool Initialized; bool Initialized;
public: public:
ArrayFI(unsigned tiArgNr, bool safeToDelete, bool initialized, ArrayFI(unsigned tiArgNr, bool safeToDelete, bool initialized,
unsigned arrSizeArgNr) unsigned arrSizeArgNr)
@ -129,11 +129,11 @@ namespace {
ArrSizeArgNr(arrSizeArgNr), ArrSizeArgNr(arrSizeArgNr),
Initialized(initialized) Initialized(initialized)
{} {}
virtual bool analyze(CallSite CS, const Analysis& A) { virtual bool analyze(CallSite CS, const Analysis& A) {
if (!FunctionInfo::analyze(CS, A)) if (!FunctionInfo::analyze(CS, A))
return false; return false;
arrSize = CS.getArgument(ArrSizeArgNr); arrSize = CS.getArgument(ArrSizeArgNr);
const IntegerType* SizeType = const IntegerType* SizeType =
dyn_cast<IntegerType>(arrSize->getType()); dyn_cast<IntegerType>(arrSize->getType());
@ -158,7 +158,7 @@ namespace {
Ty = PtrTy->getElementType(); Ty = PtrTy->getElementType();
return true; return true;
} }
virtual AllocaInst* promote(CallSite CS, IRBuilder<>& B, const Analysis& A) { virtual AllocaInst* promote(CallSite CS, IRBuilder<>& B, const Analysis& A) {
IRBuilder<> Builder = B; IRBuilder<> Builder = B;
// If the allocation is of constant size it's best to put it in the // If the allocation is of constant size it's best to put it in the
@ -174,11 +174,11 @@ namespace {
} else { } else {
NumToDynSize++; NumToDynSize++;
} }
// Convert array size to 32 bits if necessary // Convert array size to 32 bits if necessary
Value* count = Builder.CreateIntCast(arrSize, Builder.getInt32Ty(), false); Value* count = Builder.CreateIntCast(arrSize, Builder.getInt32Ty(), false);
AllocaInst* alloca = Builder.CreateAlloca(Ty, count, ".nongc_mem"); // FIXME: align? AllocaInst* alloca = Builder.CreateAlloca(Ty, count, ".nongc_mem"); // FIXME: align?
if (Initialized) { if (Initialized) {
// For now, only zero-init is supported. // For now, only zero-init is supported.
uint64_t size = A.TD.getTypeStoreSize(Ty); uint64_t size = A.TD.getTypeStoreSize(Ty);
@ -188,11 +188,11 @@ namespace {
Value* Size = B.CreateMul(TypeSize, arrSize); Value* Size = B.CreateMul(TypeSize, arrSize);
EmitMemZero(B, alloca, Size, A); EmitMemZero(B, alloca, Size, A);
} }
return alloca; return alloca;
} }
}; };
// FunctionInfo for _d_allocclass // FunctionInfo for _d_allocclass
class AllocClassFI : public FunctionInfo { class AllocClassFI : public FunctionInfo {
public: public:
@ -226,17 +226,17 @@ namespace {
Constant* hasCustomDelete = dyn_cast<Constant>(MD_GetElement(node, CD_CustomDelete)); Constant* hasCustomDelete = dyn_cast<Constant>(MD_GetElement(node, CD_CustomDelete));
if (hasDestructor == NULL || hasCustomDelete == NULL) if (hasDestructor == NULL || hasCustomDelete == NULL)
return false; return false;
if (ConstantExpr::getOr(hasDestructor, hasCustomDelete) if (ConstantExpr::getOr(hasDestructor, hasCustomDelete)
!= ConstantInt::getFalse(A.M.getContext())) != ConstantInt::getFalse(A.M.getContext()))
return false; return false;
Ty = MD_GetElement(node, CD_BodyType)->getType(); Ty = MD_GetElement(node, CD_BodyType)->getType();
return true; return true;
} }
// The default promote() should be fine. // The default promote() should be fine.
AllocClassFI() : FunctionInfo(~0u, true) {} AllocClassFI() : FunctionInfo(~0u, true) {}
}; };
} }
@ -252,27 +252,27 @@ namespace {
class LLVM_LIBRARY_VISIBILITY GarbageCollect2Stack : public FunctionPass { class LLVM_LIBRARY_VISIBILITY GarbageCollect2Stack : public FunctionPass {
StringMap<FunctionInfo*> KnownFunctions; StringMap<FunctionInfo*> KnownFunctions;
Module* M; Module* M;
FunctionInfo AllocMemoryT; FunctionInfo AllocMemoryT;
ArrayFI NewArrayVT; ArrayFI NewArrayVT;
ArrayFI NewArrayT; ArrayFI NewArrayT;
AllocClassFI AllocClass; AllocClassFI AllocClass;
public: public:
static char ID; // Pass identification static char ID; // Pass identification
GarbageCollect2Stack(); GarbageCollect2Stack();
bool doInitialization(Module &M) { bool doInitialization(Module &M) {
this->M = &M; this->M = &M;
return false; return false;
} }
bool runOnFunction(Function &F); bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const { virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetData>(); AU.addRequired<TargetData>();
AU.addRequired<DominatorTree>(); AU.addRequired<DominatorTree>();
AU.addPreserved<CallGraph>(); AU.addPreserved<CallGraph>();
AU.addPreserved<DominatorTree>(); AU.addPreserved<DominatorTree>();
} }
@ -285,7 +285,7 @@ X("dgc2stack", "Promote (GC'ed) heap allocations to stack");
// Public interface to the pass. // Public interface to the pass.
FunctionPass *createGarbageCollect2Stack() { FunctionPass *createGarbageCollect2Stack() {
return new GarbageCollect2Stack(); return new GarbageCollect2Stack();
} }
GarbageCollect2Stack::GarbageCollect2Stack() GarbageCollect2Stack::GarbageCollect2Stack()
@ -305,7 +305,7 @@ static void RemoveCall(CallSite CS, const Analysis& A) {
InvokeInst* Invoke = cast<InvokeInst>(CS.getInstruction()); InvokeInst* Invoke = cast<InvokeInst>(CS.getInstruction());
// If this was an invoke instruction, we need to do some extra // If this was an invoke instruction, we need to do some extra
// work to preserve the control flow. // work to preserve the control flow.
// Create a "conditional" branch that -simplifycfg can clean up, so we // Create a "conditional" branch that -simplifycfg can clean up, so we
// can keep using the DominatorTree without updating it. // can keep using the DominatorTree without updating it.
BranchInst::Create(Invoke->getNormalDest(), Invoke->getUnwindDest(), BranchInst::Create(Invoke->getNormalDest(), Invoke->getUnwindDest(),
@ -323,18 +323,18 @@ static bool isSafeToStackAllocate(Instruction* Alloc, DominatorTree& DT);
/// ///
bool GarbageCollect2Stack::runOnFunction(Function &F) { bool GarbageCollect2Stack::runOnFunction(Function &F) {
DEBUG(errs() << "\nRunning -dgc2stack on function " << F.getName() << '\n'); DEBUG(errs() << "\nRunning -dgc2stack on function " << F.getName() << '\n');
TargetData& TD = getAnalysis<TargetData>(); TargetData& TD = getAnalysis<TargetData>();
DominatorTree& DT = getAnalysis<DominatorTree>(); DominatorTree& DT = getAnalysis<DominatorTree>();
CallGraph* CG = getAnalysisIfAvailable<CallGraph>(); CallGraph* CG = getAnalysisIfAvailable<CallGraph>();
CallGraphNode* CGNode = CG ? (*CG)[&F] : NULL; CallGraphNode* CGNode = CG ? (*CG)[&F] : NULL;
Analysis A = { TD, *M, CG, CGNode }; Analysis A = { TD, *M, CG, CGNode };
BasicBlock& Entry = F.getEntryBlock(); BasicBlock& Entry = F.getEntryBlock();
IRBuilder<> AllocaBuilder(&Entry, Entry.begin()); IRBuilder<> AllocaBuilder(&Entry, Entry.begin());
bool Changed = false; bool Changed = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
@ -343,53 +343,53 @@ bool GarbageCollect2Stack::runOnFunction(Function &F) {
CallSite CS = CallSite::get(Inst); CallSite CS = CallSite::get(Inst);
if (!CS.getInstruction()) if (!CS.getInstruction())
continue; continue;
// Ignore indirect calls and calls to non-external functions. // Ignore indirect calls and calls to non-external functions.
Function *Callee = CS.getCalledFunction(); Function *Callee = CS.getCalledFunction();
if (Callee == 0 || !Callee->isDeclaration() || if (Callee == 0 || !Callee->isDeclaration() ||
!(Callee->hasExternalLinkage() || Callee->hasDLLImportLinkage())) !(Callee->hasExternalLinkage() || Callee->hasDLLImportLinkage()))
continue; continue;
// Ignore unknown calls. // Ignore unknown calls.
StringMap<FunctionInfo*>::iterator OMI = StringMap<FunctionInfo*>::iterator OMI =
KnownFunctions.find(Callee->getName()); KnownFunctions.find(Callee->getName());
if (OMI == KnownFunctions.end()) continue; if (OMI == KnownFunctions.end()) continue;
assert(isa<PointerType>(Inst->getType()) assert(isa<PointerType>(Inst->getType())
&& "GC function doesn't return a pointer?"); && "GC function doesn't return a pointer?");
FunctionInfo* info = OMI->getValue(); FunctionInfo* info = OMI->getValue();
if (Inst->use_empty() && info->SafeToDelete) { if (Inst->use_empty() && info->SafeToDelete) {
Changed = true; Changed = true;
NumDeleted++; NumDeleted++;
RemoveCall(CS, A); RemoveCall(CS, A);
continue; continue;
} }
DEBUG(errs() << "GarbageCollect2Stack inspecting: " << *Inst); DEBUG(errs() << "GarbageCollect2Stack inspecting: " << *Inst);
if (!info->analyze(CS, A) || !isSafeToStackAllocate(Inst, DT)) if (!info->analyze(CS, A) || !isSafeToStackAllocate(Inst, DT))
continue; continue;
// Let's alloca this! // Let's alloca this!
Changed = true; Changed = true;
IRBuilder<> Builder(BB, Inst); IRBuilder<> Builder(BB, Inst);
Value* newVal = info->promote(CS, Builder, A); Value* newVal = info->promote(CS, Builder, A);
DEBUG(errs() << "Promoted to: " << *newVal); DEBUG(errs() << "Promoted to: " << *newVal);
// Make sure the type is the same as it was before, and replace all // Make sure the type is the same as it was before, and replace all
// uses of the runtime call with the alloca. // uses of the runtime call with the alloca.
if (newVal->getType() != Inst->getType()) if (newVal->getType() != Inst->getType())
newVal = Builder.CreateBitCast(newVal, Inst->getType()); newVal = Builder.CreateBitCast(newVal, Inst->getType());
Inst->replaceAllUsesWith(newVal); Inst->replaceAllUsesWith(newVal);
RemoveCall(CS, A); RemoveCall(CS, A);
} }
} }
return Changed; return Changed;
} }
@ -397,7 +397,7 @@ const Type* Analysis::getTypeFor(Value* typeinfo) const {
GlobalVariable* ti_global = dyn_cast<GlobalVariable>(typeinfo->stripPointerCasts()); GlobalVariable* ti_global = dyn_cast<GlobalVariable>(typeinfo->stripPointerCasts());
if (!ti_global) if (!ti_global)
return NULL; return NULL;
std::string metaname = TD_PREFIX; std::string metaname = TD_PREFIX;
metaname += ti_global->getName(); metaname += ti_global->getName();
@ -414,7 +414,7 @@ const Type* Analysis::getTypeFor(Value* typeinfo) const {
if (TD_Confirm >= 0 && (!MD_GetElement(node, TD_Confirm) || if (TD_Confirm >= 0 && (!MD_GetElement(node, TD_Confirm) ||
MD_GetElement(node, TD_Confirm)->stripPointerCasts() != ti_global)) MD_GetElement(node, TD_Confirm)->stripPointerCasts() != ti_global))
return NULL; return NULL;
return MD_GetElement(node, TD_Type)->getType(); return MD_GetElement(node, TD_Type)->getType();
} }
@ -422,7 +422,7 @@ const Type* Analysis::getTypeFor(Value* typeinfo) const {
/// (without executing Def again). /// (without executing Def again).
static bool mayBeUsedAfterRealloc(Instruction* Def, Instruction* Alloc, DominatorTree& DT) { static bool mayBeUsedAfterRealloc(Instruction* Def, Instruction* Alloc, DominatorTree& DT) {
DEBUG(errs() << "### mayBeUsedAfterRealloc()\n" << *Def << *Alloc); DEBUG(errs() << "### mayBeUsedAfterRealloc()\n" << *Def << *Alloc);
// If the definition isn't used it obviously won't be used after the // If the definition isn't used it obviously won't be used after the
// allocation. // allocation.
// If it does not dominate the allocation, there's no way for it to be used // If it does not dominate the allocation, there's no way for it to be used
@ -432,12 +432,12 @@ static bool mayBeUsedAfterRealloc(Instruction* Def, Instruction* Alloc, Dominato
DEBUG(errs() << "### No uses or does not dominate allocation\n"); DEBUG(errs() << "### No uses or does not dominate allocation\n");
return false; return false;
} }
DEBUG(errs() << "### Def dominates Alloc\n"); DEBUG(errs() << "### Def dominates Alloc\n");
BasicBlock* DefBlock = Def->getParent(); BasicBlock* DefBlock = Def->getParent();
BasicBlock* AllocBlock = Alloc->getParent(); BasicBlock* AllocBlock = Alloc->getParent();
// Create a set of users and one of blocks containing users. // Create a set of users and one of blocks containing users.
SmallSet<User*, 16> Users; SmallSet<User*, 16> Users;
SmallSet<BasicBlock*, 16> UserBlocks; SmallSet<BasicBlock*, 16> UserBlocks;
@ -446,7 +446,7 @@ static bool mayBeUsedAfterRealloc(Instruction* Def, Instruction* Alloc, Dominato
Instruction* User = cast<Instruction>(*UI); Instruction* User = cast<Instruction>(*UI);
DEBUG(errs() << "USER: " << *User); DEBUG(errs() << "USER: " << *User);
BasicBlock* UserBlock = User->getParent(); BasicBlock* UserBlock = User->getParent();
// This dominance check is not performed if they're in the same block // This dominance check is not performed if they're in the same block
// because it will just walk the instruction list to figure it out. // because it will just walk the instruction list to figure it out.
// We will instead do that ourselves in the first iteration (for all // We will instead do that ourselves in the first iteration (for all
@ -457,34 +457,34 @@ static bool mayBeUsedAfterRealloc(Instruction* Def, Instruction* Alloc, Dominato
DEBUG(errs() << "### Alloc dominates user " << *User); DEBUG(errs() << "### Alloc dominates user " << *User);
return true; return true;
} }
// Phi nodes are checked separately, so no need to enter them here. // Phi nodes are checked separately, so no need to enter them here.
if (!isa<PHINode>(User)) { if (!isa<PHINode>(User)) {
Users.insert(User); Users.insert(User);
UserBlocks.insert(UserBlock); UserBlocks.insert(UserBlock);
} }
} }
// Contains first instruction of block to inspect. // Contains first instruction of block to inspect.
typedef std::pair<BasicBlock*, BasicBlock::iterator> StartPoint; typedef std::pair<BasicBlock*, BasicBlock::iterator> StartPoint;
SmallVector<StartPoint, 16> Worklist; SmallVector<StartPoint, 16> Worklist;
// Keeps track of successors that have been added to the work list. // Keeps track of successors that have been added to the work list.
SmallSet<BasicBlock*, 16> Visited; SmallSet<BasicBlock*, 16> Visited;
// Start just after the allocation. // Start just after the allocation.
// Note that we don't insert AllocBlock into the Visited set here so the // Note that we don't insert AllocBlock into the Visited set here so the
// start of the block will get inspected if it's reachable. // start of the block will get inspected if it's reachable.
BasicBlock::iterator Start = Alloc; BasicBlock::iterator Start = Alloc;
++Start; ++Start;
Worklist.push_back(StartPoint(AllocBlock, Start)); Worklist.push_back(StartPoint(AllocBlock, Start));
while (!Worklist.empty()) { while (!Worklist.empty()) {
StartPoint sp = Worklist.pop_back_val(); StartPoint sp = Worklist.pop_back_val();
BasicBlock* B = sp.first; BasicBlock* B = sp.first;
BasicBlock::iterator BBI = sp.second; BasicBlock::iterator BBI = sp.second;
// BBI is either just after the allocation (in the first iteration) // BBI is either just after the allocation (in the first iteration)
// or just after the last phi node in B (in subsequent iterations) here. // or just after the last phi node in B (in subsequent iterations) here.
// This whole 'if' is just a way to avoid performing the inner 'for' // This whole 'if' is just a way to avoid performing the inner 'for'
// loop when it can be determined not to be necessary, avoiding // loop when it can be determined not to be necessary, avoiding
// potentially expensive walks of the instruction list. // potentially expensive walks of the instruction list.
@ -517,7 +517,7 @@ static bool mayBeUsedAfterRealloc(Instruction* Def, Instruction* Alloc, Dominato
// No users and no definition or allocation after the start point, // No users and no definition or allocation after the start point,
// so just keep going. // so just keep going.
} }
// All instructions after the starting point in this block have been // All instructions after the starting point in this block have been
// accounted for. Look for successors to add to the work list. // accounted for. Look for successors to add to the work list.
TerminatorInst* Term = B->getTerminator(); TerminatorInst* Term = B->getTerminator();
@ -555,32 +555,32 @@ static bool mayBeUsedAfterRealloc(Instruction* Def, Instruction* Alloc, Dominato
/// escape from the function and no derived pointers are live at the call site /// escape from the function and no derived pointers are live at the call site
/// (i.e. if it's in a loop then the function can't use any pointer returned /// (i.e. if it's in a loop then the function can't use any pointer returned
/// from an earlier call after a new call has been made) /// from an earlier call after a new call has been made)
/// ///
/// This is currently conservative where loops are involved: it can handle /// This is currently conservative where loops are involved: it can handle
/// simple loops, but returns false if any derived pointer is used in a /// simple loops, but returns false if any derived pointer is used in a
/// subsequent iteration. /// subsequent iteration.
/// ///
/// Based on LLVM's PointerMayBeCaptured(), which only does escape analysis but /// Based on LLVM's PointerMayBeCaptured(), which only does escape analysis but
/// doesn't care about loops. /// doesn't care about loops.
bool isSafeToStackAllocate(Instruction* Alloc, DominatorTree& DT) { bool isSafeToStackAllocate(Instruction* Alloc, DominatorTree& DT) {
assert(isa<PointerType>(Alloc->getType()) && "Allocation is not a pointer?"); assert(isa<PointerType>(Alloc->getType()) && "Allocation is not a pointer?");
Value* V = Alloc; Value* V = Alloc;
SmallVector<Use*, 16> Worklist; SmallVector<Use*, 16> Worklist;
SmallSet<Use*, 16> Visited; SmallSet<Use*, 16> Visited;
for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
UI != UE; ++UI) { UI != UE; ++UI) {
Use *U = &UI.getUse(); Use *U = &UI.getUse();
Visited.insert(U); Visited.insert(U);
Worklist.push_back(U); Worklist.push_back(U);
} }
while (!Worklist.empty()) { while (!Worklist.empty()) {
Use *U = Worklist.pop_back_val(); Use *U = Worklist.pop_back_val();
Instruction *I = cast<Instruction>(U->getUser()); Instruction *I = cast<Instruction>(U->getUser());
V = U->get(); V = U->get();
switch (I->getOpcode()) { switch (I->getOpcode()) {
case Instruction::Call: case Instruction::Call:
case Instruction::Invoke: { case Instruction::Invoke: {
@ -591,7 +591,7 @@ bool isSafeToStackAllocate(Instruction* Alloc, DominatorTree& DT) {
if (CS.onlyReadsMemory() && CS.doesNotThrow() && if (CS.onlyReadsMemory() && CS.doesNotThrow() &&
I->getType() == Type::getVoidTy(I->getContext())) I->getType() == Type::getVoidTy(I->getContext()))
break; break;
// Not captured if only passed via 'nocapture' arguments. Note that // Not captured if only passed via 'nocapture' arguments. Note that
// calling a function pointer does not in itself cause the pointer to // calling a function pointer does not in itself cause the pointer to
// be captured. This is a subtle point considering that (for example) // be captured. This is a subtle point considering that (for example)
@ -628,7 +628,7 @@ bool isSafeToStackAllocate(Instruction* Alloc, DominatorTree& DT) {
// the original allocation. // the original allocation.
if (mayBeUsedAfterRealloc(I, Alloc, DT)) if (mayBeUsedAfterRealloc(I, Alloc, DT))
return false; return false;
// The original value is not captured via this if the new value isn't. // The original value is not captured via this if the new value isn't.
for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end(); for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI) { UI != UE; ++UI) {
@ -642,7 +642,7 @@ bool isSafeToStackAllocate(Instruction* Alloc, DominatorTree& DT) {
return false; return false;
} }
} }
// All uses examined - not captured or live across original allocation. // All uses examined - not captured or live across original allocation.
return true; return true;
} }

View file

@ -187,7 +187,7 @@ std::vector<llvm::Value*> DtoStructLiteralValues(const StructDeclaration* sd,
const std::vector<llvm::Value*>& inits, const std::vector<llvm::Value*>& inits,
bool isConst) bool isConst)
{ {
// get arrays // get arrays
size_t nvars = sd->fields.dim; size_t nvars = sd->fields.dim;
VarDeclaration** vars = (VarDeclaration**)sd->fields.data; VarDeclaration** vars = (VarDeclaration**)sd->fields.data;
@ -347,7 +347,7 @@ std::vector<llvm::Value*> DtoStructLiteralValues(const StructDeclaration* sd,
/// Union types will get expanded into a struct, with a type for each member. /// Union types will get expanded into a struct, with a type for each member.
LLType* DtoUnpaddedStructType(Type* dty) { LLType* DtoUnpaddedStructType(Type* dty) {
assert(dty->ty == Tstruct); assert(dty->ty == Tstruct);
typedef llvm::DenseMap<Type*, llvm::StructType*> CacheT; typedef llvm::DenseMap<Type*, llvm::StructType*> CacheT;
static llvm::ManagedStatic<CacheT> cache; static llvm::ManagedStatic<CacheT> cache;
CacheT::iterator it = cache->find(dty); CacheT::iterator it = cache->find(dty);
@ -383,9 +383,9 @@ LLValue* DtoUnpaddedStruct(Type* dty, LLValue* v) {
assert(dty->ty == Tstruct); assert(dty->ty == Tstruct);
TypeStruct* sty = static_cast<TypeStruct*>(dty); TypeStruct* sty = static_cast<TypeStruct*>(dty);
Array& fields = sty->sym->fields; Array& fields = sty->sym->fields;
LLValue* newval = llvm::UndefValue::get(DtoUnpaddedStructType(dty)); LLValue* newval = llvm::UndefValue::get(DtoUnpaddedStructType(dty));
for (unsigned i = 0; i < fields.dim; i++) { for (unsigned i = 0; i < fields.dim; i++) {
VarDeclaration* vd = static_cast<VarDeclaration*>(fields.data[i]); VarDeclaration* vd = static_cast<VarDeclaration*>(fields.data[i]);
LLValue* fieldptr = DtoIndexStruct(v, sty->sym, vd); LLValue* fieldptr = DtoIndexStruct(v, sty->sym, vd);
@ -406,7 +406,7 @@ void DtoPaddedStruct(Type* dty, LLValue* v, LLValue* lval) {
assert(dty->ty == Tstruct); assert(dty->ty == Tstruct);
TypeStruct* sty = static_cast<TypeStruct*>(dty); TypeStruct* sty = static_cast<TypeStruct*>(dty);
Array& fields = sty->sym->fields; Array& fields = sty->sym->fields;
for (unsigned i = 0; i < fields.dim; i++) { for (unsigned i = 0; i < fields.dim; i++) {
VarDeclaration* vd = static_cast<VarDeclaration*>(fields.data[i]); VarDeclaration* vd = static_cast<VarDeclaration*>(fields.data[i]);
LLValue* fieldptr = DtoIndexStruct(lval, sty->sym, vd); LLValue* fieldptr = DtoIndexStruct(lval, sty->sym, vd);

View file

@ -158,7 +158,7 @@ IrFunction::IrFunction(FuncDeclaration* fd)
frameType = NULL; frameType = NULL;
depth = -1; depth = -1;
nestedContextCreated = false; nestedContextCreated = false;
_arguments = NULL; _arguments = NULL;
_argptr = NULL; _argptr = NULL;
} }

View file

@ -33,10 +33,10 @@ struct IRTargetScope
{ {
// generating statement // generating statement
Statement* s; Statement* s;
// the try of a TryFinally that encloses the loop // the try of a TryFinally that encloses the loop
EnclosingHandler* enclosinghandler; EnclosingHandler* enclosinghandler;
llvm::BasicBlock* breakTarget; llvm::BasicBlock* breakTarget;
llvm::BasicBlock* continueTarget; llvm::BasicBlock* continueTarget;
@ -84,7 +84,7 @@ struct IrFunction : IrBase
{ {
// constructor // constructor
IrFunction(FuncDeclaration* fd); IrFunction(FuncDeclaration* fd);
// annotations // annotations
void setNeverInline(); void setNeverInline();
void setAlwaysInline(); void setAlwaysInline();
@ -98,21 +98,21 @@ struct IrFunction : IrBase
bool queued; bool queued;
bool defined; bool defined;
llvm::Value* retArg; // return in ptr arg llvm::Value* retArg; // return in ptr arg
llvm::Value* thisArg; // class/struct 'this' arg llvm::Value* thisArg; // class/struct 'this' arg
llvm::Value* nestArg; // nested function 'this' arg llvm::Value* nestArg; // nested function 'this' arg
llvm::Value* nestedVar; // nested var alloca llvm::Value* nestedVar; // nested var alloca
llvm::StructType* frameType; // type of nested context (not for -nested-ctx=array) llvm::StructType* frameType; // type of nested context (not for -nested-ctx=array)
// number of enclosing functions with variables accessed by nested functions // number of enclosing functions with variables accessed by nested functions
// (-1 if neither this function nor any enclosing ones access variables from enclosing functions) // (-1 if neither this function nor any enclosing ones access variables from enclosing functions)
int depth; int depth;
bool nestedContextCreated; // holds whether nested context is created bool nestedContextCreated; // holds whether nested context is created
llvm::Value* _arguments; llvm::Value* _arguments;
llvm::Value* _argptr; llvm::Value* _argptr;
llvm::DISubprogram diSubprogram; llvm::DISubprogram diSubprogram;
std::stack<llvm::DILexicalBlock> diLexicalBlocks; std::stack<llvm::DILexicalBlock> diLexicalBlocks;
}; };

View file

@ -42,7 +42,7 @@ struct IrStruct : IrBase
/// Aggregate D type. /// Aggregate D type.
Type* type; Type* type;
/// true only for: align(1) struct S { ... } /// true only for: align(1) struct S { ... }
bool packed; bool packed;
/// Composite type debug description. /// Composite type debug description.