Added foreach_reverse to list of tokens used for line-of-code count
Alphabetized constants Cleaned up constant names Fixed defects with decimal parsing Eliminated iteration style parameter to lexWhitespace, as it didn't really speed things up. Added support for imaginary literals
This commit is contained in:
parent
023ab09a7e
commit
bbd2ec13ea
|
@ -32,7 +32,31 @@ immutable string[] versions = ["AIX", "all", "Alpha", "ARM", "BigEndian", "BSD",
|
||||||
*/
|
*/
|
||||||
size_t findEndOfExpression(const Token[] tokens, size_t index)
|
size_t findEndOfExpression(const Token[] tokens, size_t index)
|
||||||
{
|
{
|
||||||
return index;
|
size_t i = index;
|
||||||
|
while (i < tokens.length)
|
||||||
|
{
|
||||||
|
switch (tokens[i].type)
|
||||||
|
{
|
||||||
|
case TokenType.RBrace:
|
||||||
|
case TokenType.RParen:
|
||||||
|
case TokenType.RBracket:
|
||||||
|
case TokenType.Semicolon:
|
||||||
|
break;
|
||||||
|
case TokenType.LParen:
|
||||||
|
skipParens(tokens, index);
|
||||||
|
break;
|
||||||
|
case TokenType.LBrace:
|
||||||
|
skipBraces(tokens, index);
|
||||||
|
break;
|
||||||
|
case TokenType.LBracket:
|
||||||
|
skipBrackets(tokens, index);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
++i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t findBeginningOfExpression(const Token[] tokens, size_t index)
|
size_t findBeginningOfExpression(const Token[] tokens, size_t index)
|
||||||
|
@ -64,19 +88,19 @@ struct AutoComplete
|
||||||
|
|
||||||
switch (symbol.type)
|
switch (symbol.type)
|
||||||
{
|
{
|
||||||
case TokenType.floatLiteral:
|
case TokenType.FloatLiteral:
|
||||||
return "float";
|
return "float";
|
||||||
case TokenType.doubleLiteral:
|
case TokenType.DoubleLiteral:
|
||||||
return "double";
|
return "double";
|
||||||
case TokenType.realLiteral:
|
case TokenType.RealLiteral:
|
||||||
return "real";
|
return "real";
|
||||||
case TokenType.intLiteral:
|
case TokenType.IntLiteral:
|
||||||
return "int";
|
return "int";
|
||||||
case TokenType.unsignedIntLiteral:
|
case TokenType.UnsignedIntLiteral:
|
||||||
return "uint";
|
return "uint";
|
||||||
case TokenType.longLiteral:
|
case TokenType.LongLiteral:
|
||||||
return "long";
|
return "long";
|
||||||
case TokenType.unsignedLongLiteral:
|
case TokenType.UnsignedLongLiteral:
|
||||||
return "ulong";
|
return "ulong";
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -92,21 +116,21 @@ struct AutoComplete
|
||||||
auto index = preceedingTokens.length - 1;
|
auto index = preceedingTokens.length - 1;
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
if (preceedingTokens[index] == TokenType.lBrace)
|
if (preceedingTokens[index] == TokenType.LBrace)
|
||||||
--depth;
|
--depth;
|
||||||
else if (preceedingTokens[index] == TokenType.rBrace)
|
else if (preceedingTokens[index] == TokenType.RBrace)
|
||||||
++depth;
|
++depth;
|
||||||
else if (depth <= 0 && preceedingTokens[index].value == symbol)
|
else if (depth <= 0 && preceedingTokens[index].value == symbol)
|
||||||
{
|
{
|
||||||
// Found the symbol, now determine if it was declared here.
|
// Found the symbol, now determine if it was declared here.
|
||||||
auto p = preceedingTokens[index - 1];
|
auto p = preceedingTokens[index - 1];
|
||||||
if ((p == TokenType.tAuto || p == TokenType.tImmutable
|
if ((p == TokenType.Auto || p == TokenType.Immutable
|
||||||
|| p == TokenType.tConst)
|
|| p == TokenType.Const)
|
||||||
&& preceedingTokens[index + 1] == TokenType.assign)
|
&& preceedingTokens[index + 1] == TokenType.Assign)
|
||||||
{
|
{
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
else if (p == TokenType.identifier
|
else if (p == TokenType.Identifier
|
||||||
|| (p.type > TokenType.TYPES_BEGIN
|
|| (p.type > TokenType.TYPES_BEGIN
|
||||||
&& p.type < TokenType.TYPES_END))
|
&& p.type < TokenType.TYPES_END))
|
||||||
{
|
{
|
||||||
|
@ -153,14 +177,14 @@ struct AutoComplete
|
||||||
return "";
|
return "";
|
||||||
switch (tokens[index].type)
|
switch (tokens[index].type)
|
||||||
{
|
{
|
||||||
case TokenType.tVersion:
|
case TokenType.Version:
|
||||||
return to!string(join(map!`a ~ "?1"`(versions), " ").array());
|
return to!string(join(map!`a ~ "?1"`(versions), " ").array());
|
||||||
case TokenType.tIf:
|
case TokenType.If:
|
||||||
case TokenType.tCast:
|
case TokenType.Cast:
|
||||||
case TokenType.tWhile:
|
case TokenType.While:
|
||||||
case TokenType.tFor:
|
case TokenType.For:
|
||||||
case TokenType.tForeach:
|
case TokenType.Foreach:
|
||||||
case TokenType.tSwitch:
|
case TokenType.Switch:
|
||||||
return "";
|
return "";
|
||||||
default:
|
default:
|
||||||
return "";
|
return "";
|
||||||
|
|
4
build.sh
4
build.sh
|
@ -1,2 +1,2 @@
|
||||||
dmd *.d -release -noboundscheck -O -w -wi -m64 -property -ofdscanner
|
#dmd *.d -release -noboundscheck -O -w -wi -m64 -property -ofdscanner
|
||||||
#dmd *.d -g -unittest -m64 -w -wi -property -ofdscanner
|
dmd *.d -g -unittest -m64 -w -wi -property -ofdscanner
|
||||||
|
|
|
@ -44,10 +44,10 @@ html { background-color: #111; color: #ccc; }
|
||||||
case TokenType.TYPES_BEGIN: .. case TokenType.TYPES_END:
|
case TokenType.TYPES_BEGIN: .. case TokenType.TYPES_END:
|
||||||
writeSpan("type", t.value);
|
writeSpan("type", t.value);
|
||||||
break;
|
break;
|
||||||
case TokenType.comment:
|
case TokenType.Comment:
|
||||||
writeSpan("comment", t.value);
|
writeSpan("comment", t.value);
|
||||||
break;
|
break;
|
||||||
case TokenType.stringLiteral:
|
case TokenType.STRINGS_BEGIN: .. case TokenType.STRINGS_END:
|
||||||
writeSpan("string", t.value);
|
writeSpan("string", t.value);
|
||||||
break;
|
break;
|
||||||
case TokenType.NUMBERS_BEGIN: .. case TokenType.NUMBERS_END:
|
case TokenType.NUMBERS_BEGIN: .. case TokenType.NUMBERS_END:
|
||||||
|
|
623
langutils.d
623
langutils.d
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
// Copyright Brian Schott (Sir Alaran) 2012.
|
// Copyright Brian Schott (Sir Alaran) 2012.
|
||||||
// Distributed under the Boost Software License, Version 1.0.
|
// Distributed under the Boost Software License, Version 1.0.
|
||||||
// (See accompanying file LICENSE_1_0.txt or copy at
|
// (See accompanying file LICENSE_1_0.txt or copy at
|
||||||
|
@ -41,7 +40,7 @@ pure nothrow TokenType lookupTokenType(const string input)
|
||||||
if (type !is null)
|
if (type !is null)
|
||||||
return *type;
|
return *type;
|
||||||
else
|
else
|
||||||
return TokenType.identifier;
|
return TokenType.Identifier;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,228 +51,226 @@ enum TokenType: uint
|
||||||
{
|
{
|
||||||
// Operators
|
// Operators
|
||||||
OPERATORS_BEGIN,
|
OPERATORS_BEGIN,
|
||||||
div, /// /
|
Assign, /// =
|
||||||
divEquals, /// /=
|
BitAnd, /// &
|
||||||
dot, /// .
|
BitAndEquals, /// &=
|
||||||
slice, // ..
|
BitOr, /// |
|
||||||
vararg, /// ...
|
BitOrEquals, /// |=
|
||||||
bitAnd, /// &
|
CatEquals, /// ~=
|
||||||
bitAndEquals, /// &=
|
Colon, /// :
|
||||||
logicAnd, /// &&
|
Comma, /// ,
|
||||||
bitOr, /// |
|
Decrement, /// --
|
||||||
bitOrEquals, /// |=
|
Div, /// /
|
||||||
logicOr, /// ||
|
DivEquals, /// /=
|
||||||
minus, /// -
|
Dollar, /// $
|
||||||
minusEquals, /// -=
|
Dot, /// .
|
||||||
uMinus, /// --
|
Equals, /// ==
|
||||||
plus, /// +
|
GoesTo, // =>
|
||||||
plusEquals, /// +=
|
Greater, /// >
|
||||||
uPlus, /// ++
|
GreaterEqual, /// >=
|
||||||
less, /// <
|
Hash, // #
|
||||||
lessEqual, /// <=
|
Increment, /// ++
|
||||||
shiftLeft, /// <<
|
LBrace, /// {
|
||||||
shiftLeftEqual, /// <<=
|
LBracket, /// [
|
||||||
lessOrGreater, /// <>
|
Less, /// <
|
||||||
lessEqualGreater, // <>=
|
LessEqual, /// <=
|
||||||
greater, /// >
|
LessEqualGreater, // <>=
|
||||||
greaterEqual, /// >=
|
LessOrGreater, /// <>
|
||||||
shiftRightEqual, /// >>=
|
LogicAnd, /// &&
|
||||||
unsignedShiftRightEqual, /// >>>=
|
LogicOr, /// ||
|
||||||
shiftRight, /// >>
|
LParen, /// $(LPAREN)
|
||||||
unsignedShiftRight, /// >>>
|
Minus, /// -
|
||||||
not, /// !
|
MinusEquals, /// -=
|
||||||
notEquals, /// !=
|
Mod, /// %
|
||||||
notLessEqualGreater, /// !<>
|
ModEquals, /// %=
|
||||||
unordered, /// !<>=
|
MulEquals, /// *=
|
||||||
notLess, /// !<
|
Not, /// !
|
||||||
notLessEqual, /// !<=
|
NotEquals, /// !=
|
||||||
notGreater, /// !>
|
NotGreater, /// !>
|
||||||
notGreaterEqual, /// !>=
|
NotGreaterEqual, /// !>=
|
||||||
lParen, /// $(LPAREN)
|
NotLess, /// !<
|
||||||
rParen, /// $(RPAREN)
|
NotLessEqual, /// !<=
|
||||||
lBracket, /// [
|
NotLessEqualGreater, /// !<>
|
||||||
rBracket, /// ]
|
Plus, /// +
|
||||||
lBrace, /// {
|
PlusEquals, /// +=
|
||||||
rBrace, /// }
|
Pow, /// ^^
|
||||||
ternary, /// ?
|
PowEquals, /// ^^=
|
||||||
comma, /// ,
|
RBrace, /// }
|
||||||
semicolon, /// ;
|
RBracket, /// ]
|
||||||
colon, /// :
|
RParen, /// $(RPAREN)
|
||||||
dollar, /// $
|
Semicolon, /// ;
|
||||||
assign, /// =
|
ShiftLeft, /// <<
|
||||||
equals, /// ==
|
ShiftLeftEqual, /// <<=
|
||||||
star, /// *
|
ShiftRight, /// >>
|
||||||
mulEquals, /// *=
|
ShiftRightEqual, /// >>=
|
||||||
mod, /// %
|
Slice, // ..
|
||||||
modEquals, /// %=
|
Star, /// *
|
||||||
xor, /// ^
|
Ternary, /// ?
|
||||||
xorEquals, /// ^=
|
Tilde, /// ~
|
||||||
pow, /// ^^
|
Unordered, /// !<>=
|
||||||
powEquals, /// ^^=
|
UnsignedShiftRight, /// >>>
|
||||||
tilde, /// ~
|
UnsignedShiftRightEqual, /// >>>=
|
||||||
catEquals, /// ~=
|
Vararg, /// ...
|
||||||
hash, // #
|
Xor, /// ^
|
||||||
goesTo, // =>
|
XorEquals, /// ^=
|
||||||
OPERATORS_END,
|
OPERATORS_END,
|
||||||
|
|
||||||
// Types
|
// Types
|
||||||
TYPES_BEGIN,
|
TYPES_BEGIN,
|
||||||
tString, /// string
|
Bool, /// bool,
|
||||||
tWString, /// wstring
|
Byte, /// byte,
|
||||||
tDString, /// dstring
|
Cdouble, /// cdouble,
|
||||||
tBool, /// bool,
|
Cent, /// cent,
|
||||||
tByte, /// byte,
|
Cfloat, /// cfloat,
|
||||||
tCdouble, /// cdouble,
|
Char, /// char,
|
||||||
tCent, /// cent,
|
Creal, /// creal,
|
||||||
tCfloat, /// cfloat,
|
Dchar, /// dchar,
|
||||||
tChar, /// char,
|
Double, /// double,
|
||||||
tCreal, /// creal,
|
DString, /// dstring
|
||||||
tDchar, /// dchar,
|
Float, /// float,
|
||||||
tDouble, /// double,
|
Function, /// function,
|
||||||
tFloat, /// float,
|
Idouble, /// idouble,
|
||||||
tUbyte, /// ubyte,
|
Ifloat, /// ifloat,
|
||||||
tUcent, /// ucent,
|
Int, /// int,
|
||||||
tUint, /// uint,
|
Ireal, /// ireal,
|
||||||
tUlong, /// ulong,
|
Long, /// long,
|
||||||
tShort, /// short,
|
Real, /// real,
|
||||||
tReal, /// real,
|
Short, /// short,
|
||||||
tLong, /// long,
|
String, /// string
|
||||||
tInt, /// int,
|
Ubyte, /// ubyte,
|
||||||
tFunction, /// function,
|
Ucent, /// ucent,
|
||||||
tIdouble, /// idouble,
|
Uint, /// uint,
|
||||||
tIreal, /// ireal,
|
Ulong, /// ulong,
|
||||||
tWchar, /// wchar,
|
Ushort, /// ushort,
|
||||||
tVoid, /// void,
|
Void, /// void,
|
||||||
tUshort, /// ushort,
|
Wchar, /// wchar,
|
||||||
tIfloat, /// if loat,
|
WString, /// wstring
|
||||||
TYPES_END,
|
TYPES_END,
|
||||||
tTemplate, /// template,
|
Template, /// template,
|
||||||
|
|
||||||
// Keywords
|
// Keywords
|
||||||
KEYWORDS_BEGIN,
|
KEYWORDS_BEGIN,
|
||||||
ATTRIBUTES_BEGIN,
|
ATTRIBUTES_BEGIN,
|
||||||
tExtern, /// extern,
|
Align, /// align,
|
||||||
tAlign, /// align,
|
Deprecated, /// deprecated,
|
||||||
tPragma, /// pragma,
|
Extern, /// extern,
|
||||||
tDeprecated, /// deprecated,
|
Pragma, /// pragma,
|
||||||
PROTECTION_BEGIN,
|
PROTECTION_BEGIN,
|
||||||
tPackage, /// package,
|
Export, /// export,
|
||||||
tPrivate, /// private,
|
Package, /// package,
|
||||||
tProtected, /// protected,
|
Private, /// private,
|
||||||
tPublic, /// public,
|
Protected, /// protected,
|
||||||
tExport, /// export,
|
Public, /// public,
|
||||||
PROTECTION_END,
|
PROTECTION_END,
|
||||||
tStatic, /// static,
|
Abstract, /// abstract,
|
||||||
tSynchronized, /// synchronized,
|
AtDisable, /// @disable
|
||||||
tFinal, /// final
|
Auto, /// auto,
|
||||||
tAbstract, /// abstract,
|
Const, /// const,
|
||||||
tConst, /// const,
|
Final, /// final
|
||||||
tAuto, /// auto,
|
Gshared, /// __gshared,
|
||||||
tScope, /// scope,
|
Immutable, // immutable,
|
||||||
t__gshared, /// __gshared,
|
Inout, // inout,
|
||||||
tShared, // shared,
|
Scope, /// scope,
|
||||||
tImmutable, // immutable,
|
Shared, // shared,
|
||||||
tInout, // inout,
|
Static, /// static,
|
||||||
atDisable, /// @disable
|
Synchronized, /// synchronized,
|
||||||
ATTRIBUTES_END,
|
ATTRIBUTES_END,
|
||||||
tAlias, /// alias,
|
Alias, /// alias,
|
||||||
tAsm, /// asm,
|
Asm, /// asm,
|
||||||
tAssert, /// assert,
|
Assert, /// assert,
|
||||||
tBody, /// body,
|
Body, /// body,
|
||||||
tBreak, /// break,
|
Break, /// break,
|
||||||
tCase, /// case,
|
Case, /// case,
|
||||||
tCast, /// cast,
|
Cast, /// cast,
|
||||||
tCatch, /// catch,
|
Catch, /// catch,
|
||||||
tClass, /// class,
|
Class, /// class,
|
||||||
tContinue, /// continue,
|
Continue, /// continue,
|
||||||
tDebug, /// debug,
|
Debug, /// debug,
|
||||||
tDefault, /// default,
|
Default, /// default,
|
||||||
tDelegate, /// delegate,
|
Delegate, /// delegate,
|
||||||
tDelete, /// delete,
|
Delete, /// delete,
|
||||||
tDo, /// do,
|
Do, /// do,
|
||||||
tElse, /// else,
|
Else, /// else,
|
||||||
tEnum, /// enum,
|
Enum, /// enum,
|
||||||
tFalse, /// false,
|
False, /// false,
|
||||||
tFinally, /// finally,
|
Finally, /// finally,
|
||||||
tFor, /// for,
|
Foreach, /// foreach,
|
||||||
tForeach, /// foreach,
|
Foreach_reverse, /// foreach_reverse,
|
||||||
tForeach_reverse, /// foreach_reverse,
|
For, /// for,
|
||||||
tGoto, /// goto,
|
Goto, /// goto,
|
||||||
tIf, /// if ,
|
If, /// if ,
|
||||||
tImport, /// import,
|
Import, /// import,
|
||||||
tIn, /// in,
|
In, /// in,
|
||||||
tInterface, /// interface,
|
Interface, /// interface,
|
||||||
tInvariant, /// invariant,
|
Invariant, /// invariant,
|
||||||
tIs, /// is,
|
Is, /// is,
|
||||||
tLazy, /// lazy,
|
Lazy, /// lazy,
|
||||||
tMacro, /// macro,
|
Macro, /// macro,
|
||||||
tMixin, /// mixin,
|
Mixin, /// mixin,
|
||||||
tModule, /// module,
|
Module, /// module,
|
||||||
tNew, /// new,
|
New, /// new,
|
||||||
tNothrow, /// nothrow,
|
Nothrow, /// nothrow,
|
||||||
tNull, /// null,
|
Null, /// null,
|
||||||
tOut, /// out,
|
Out, /// out,
|
||||||
tOverride, /// override,
|
Override, /// override,
|
||||||
tPure, /// pure,
|
Pure, /// pure,
|
||||||
tRef, /// ref,
|
Ref, /// ref,
|
||||||
tReturn, /// return,
|
Return, /// return,
|
||||||
tStruct, /// struct,
|
Struct, /// struct,
|
||||||
tSuper, /// super,
|
Super, /// super,
|
||||||
tSwitch, /// switch ,
|
Switch, /// switch ,
|
||||||
tThis, /// this,
|
This, /// this,
|
||||||
tThrow, /// throw,
|
Throw, /// throw,
|
||||||
tTrue, /// true,
|
True, /// true,
|
||||||
tTry, /// try,
|
Try, /// try,
|
||||||
tTypedef, /// typedef,
|
Typedef, /// typedef,
|
||||||
tTypeid, /// typeid,
|
Typeid, /// typeid,
|
||||||
tTypeof, /// typeof,
|
Typeof, /// typeof,
|
||||||
tUnion, /// union,
|
Union, /// union,
|
||||||
tUnittest, /// unittest,
|
Unittest, /// unittest,
|
||||||
tVersion, /// version,
|
Version, /// version,
|
||||||
tVolatile, /// volatile,
|
Volatile, /// volatile,
|
||||||
tWhile, /// while ,
|
While, /// while ,
|
||||||
tWith, /// with,
|
With, /// with,
|
||||||
KEYWORDS_END,
|
KEYWORDS_END,
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
CONSTANTS_BEGIN,
|
CONSTANTS_BEGIN,
|
||||||
t__FILE__, /// __FILE__,
|
File, /// __FILE__,
|
||||||
t__LINE__, /// __LINE__,
|
Line, /// __LINE__,
|
||||||
|
Thread, /// __thread,
|
||||||
t__thread, /// __thread,
|
Traits, /// __traits,
|
||||||
t__traits, /// __traits,
|
|
||||||
CONSTANTS_END,
|
CONSTANTS_END,
|
||||||
|
|
||||||
// Properties
|
// Properties
|
||||||
PROPERTIES_BEGIN,
|
PROPERTIES_BEGIN,
|
||||||
|
AtProperty, /// @property
|
||||||
atProperty, /// @property
|
AtSafe, /// @safe
|
||||||
atSafe, /// @safe
|
AtSystem, /// @system
|
||||||
atSystem, /// @system
|
AtTrusted, /// @trusted
|
||||||
atTrusted, /// @trusted
|
|
||||||
PROPERTIES_END,
|
PROPERTIES_END,
|
||||||
|
|
||||||
// Misc
|
// Misc
|
||||||
MISC_BEGIN,
|
MISC_BEGIN,
|
||||||
scriptLine, // Line at the beginning of source file that starts from #!
|
Blank, /// unknown token type
|
||||||
comment, /// /** comment */ or // comment or ///comment
|
Comment, /// /** comment */ or // comment or ///comment
|
||||||
|
Identifier, /// anything else
|
||||||
|
ScriptLine, // Line at the beginning of source file that starts from #!
|
||||||
|
Whitespace, /// whitespace
|
||||||
NUMBERS_BEGIN,
|
NUMBERS_BEGIN,
|
||||||
floatLiteral, /// 123.456f or 0x123_45p-af
|
DoubleLiteral, /// 123.456
|
||||||
doubleLiteral, /// 123.456
|
FloatLiteral, /// 123.456f or 0x123_45p-af
|
||||||
realLiteral, /// 123.456L
|
IntLiteral, /// 123 or 0b1101010101
|
||||||
intLiteral, /// 123 or 0b1101010101
|
LongLiteral, /// 123L
|
||||||
unsignedIntLiteral, /// 123u
|
RealLiteral, /// 123.456L
|
||||||
longLiteral, /// 123L
|
UnsignedIntLiteral, /// 123u
|
||||||
unsignedLongLiteral, /// 123uL
|
UnsignedLongLiteral, /// 123uL
|
||||||
NUMBERS_END,
|
NUMBERS_END,
|
||||||
STRINGS_BEGIN,
|
STRINGS_BEGIN,
|
||||||
stringLiteral, /// "a string"
|
DStringLiteral, /// "32-bit character string"d
|
||||||
wStringLiteral, /// "16-bit character string"w
|
StringLiteral, /// "a string"
|
||||||
dStringLiteral, /// "32-bit character string"d
|
WStringLiteral, /// "16-bit character string"w
|
||||||
STRINGS_END,
|
STRINGS_END,
|
||||||
identifier, /// anything else
|
|
||||||
whitespace, /// whitespace
|
|
||||||
blank, /// unknown token type
|
|
||||||
MISC_END,
|
MISC_END,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,121 +284,121 @@ immutable TokenType[string] tokenLookup;
|
||||||
static this()
|
static this()
|
||||||
{
|
{
|
||||||
tokenLookup = [
|
tokenLookup = [
|
||||||
"abstract" : TokenType.tAbstract,
|
"abstract" : TokenType.Abstract,
|
||||||
"alias" : TokenType.tAlias,
|
"alias" : TokenType.Alias,
|
||||||
"align" : TokenType.tAlign,
|
"align" : TokenType.Align,
|
||||||
"asm" : TokenType.tAsm,
|
"asm" : TokenType.Asm,
|
||||||
"assert" : TokenType.tAssert,
|
"assert" : TokenType.Assert,
|
||||||
"auto" : TokenType.tAuto,
|
"auto" : TokenType.Auto,
|
||||||
"body" : TokenType.tBody,
|
"body" : TokenType.Body,
|
||||||
"bool" : TokenType.tBool,
|
"bool" : TokenType.Bool,
|
||||||
"break" : TokenType.tBreak,
|
"break" : TokenType.Break,
|
||||||
"byte" : TokenType.tByte,
|
"byte" : TokenType.Byte,
|
||||||
"case" : TokenType.tCase,
|
"case" : TokenType.Case,
|
||||||
"cast" : TokenType.tCast,
|
"cast" : TokenType.Cast,
|
||||||
"catch" : TokenType.tCatch,
|
"catch" : TokenType.Catch,
|
||||||
"cdouble" : TokenType.tCdouble,
|
"cdouble" : TokenType.Cdouble,
|
||||||
"cent" : TokenType.tCent,
|
"cent" : TokenType.Cent,
|
||||||
"cfloat" : TokenType.tCfloat,
|
"cfloat" : TokenType.Cfloat,
|
||||||
"char" : TokenType.tChar,
|
"char" : TokenType.Char,
|
||||||
"class" : TokenType.tClass,
|
"class" : TokenType.Class,
|
||||||
"const" : TokenType.tConst,
|
"const" : TokenType.Const,
|
||||||
"continue" : TokenType.tContinue,
|
"continue" : TokenType.Continue,
|
||||||
"creal" : TokenType.tCreal,
|
"creal" : TokenType.Creal,
|
||||||
"dchar" : TokenType.tDchar,
|
"dchar" : TokenType.Dchar,
|
||||||
"debug" : TokenType.tDebug,
|
"debug" : TokenType.Debug,
|
||||||
"default" : TokenType.tDefault,
|
"default" : TokenType.Default,
|
||||||
"delegate" : TokenType.tDelegate,
|
"delegate" : TokenType.Delegate,
|
||||||
"delete" : TokenType.tDelete,
|
"delete" : TokenType.Delete,
|
||||||
"deprecated" : TokenType.tDeprecated,
|
"deprecated" : TokenType.Deprecated,
|
||||||
"do" : TokenType.tDo,
|
"@disable" : TokenType.AtDisable,
|
||||||
"double" : TokenType.tDouble,
|
"do" : TokenType.Do,
|
||||||
"dstring" : TokenType.tDString,
|
"double" : TokenType.Double,
|
||||||
"else" : TokenType.tElse,
|
"dstring" : TokenType.DString,
|
||||||
"enum" : TokenType.tEnum,
|
"else" : TokenType.Else,
|
||||||
"export" : TokenType.tExport,
|
"enum" : TokenType.Enum,
|
||||||
"extern" : TokenType.tExtern,
|
"export" : TokenType.Export,
|
||||||
"false" : TokenType.tFalse,
|
"extern" : TokenType.Extern,
|
||||||
"final" : TokenType.tFinal,
|
"false" : TokenType.False,
|
||||||
"finally" : TokenType.tFinally,
|
"__FILE__" : TokenType.File,
|
||||||
"float" : TokenType.tFloat,
|
"finally" : TokenType.Finally,
|
||||||
"for" : TokenType.tFor,
|
"final" : TokenType.Final,
|
||||||
"foreach" : TokenType.tForeach,
|
"float" : TokenType.Float,
|
||||||
"foreach_reverse" : TokenType.tForeach_reverse,
|
"foreach_reverse" : TokenType.Foreach_reverse,
|
||||||
"function" : TokenType.tFunction,
|
"foreach" : TokenType.Foreach,
|
||||||
"goto" : TokenType.tGoto,
|
"for" : TokenType.For,
|
||||||
"idouble" : TokenType.tIdouble,
|
"function" : TokenType.Function,
|
||||||
"if" : TokenType.tIf,
|
"goto" : TokenType.Goto,
|
||||||
"ifloat" : TokenType.tIfloat,
|
"__gshared" : TokenType.Gshared,
|
||||||
"immutable" : TokenType.tImmutable,
|
"idouble" : TokenType.Idouble,
|
||||||
"import" : TokenType.tImport,
|
"ifloat" : TokenType.Ifloat,
|
||||||
"in" : TokenType.tIn,
|
"if" : TokenType.If,
|
||||||
"inout" : TokenType.tInout,
|
"immutable" : TokenType.Immutable,
|
||||||
"int" : TokenType.tInt,
|
"import" : TokenType.Import,
|
||||||
"interface" : TokenType.tInterface,
|
"inout" : TokenType.Inout,
|
||||||
"invariant" : TokenType.tInvariant,
|
"interface" : TokenType.Interface,
|
||||||
"ireal" : TokenType.tIreal,
|
"in" : TokenType.In,
|
||||||
"is" : TokenType.tIs,
|
"int" : TokenType.Int,
|
||||||
"lazy" : TokenType.tLazy,
|
"invariant" : TokenType.Invariant,
|
||||||
"long" : TokenType.tLong,
|
"ireal" : TokenType.Ireal,
|
||||||
"macro" : TokenType.tMacro,
|
"is" : TokenType.Is,
|
||||||
"mixin" : TokenType.tMixin,
|
"lazy" : TokenType.Lazy,
|
||||||
"module" : TokenType.tModule,
|
"__LINE__" : TokenType.Line,
|
||||||
"new" : TokenType.tNew,
|
"long" : TokenType.Long,
|
||||||
"nothrow" : TokenType.tNothrow,
|
"macro" : TokenType.Macro,
|
||||||
"null" : TokenType.tNull,
|
"mixin" : TokenType.Mixin,
|
||||||
"out" : TokenType.tOut,
|
"module" : TokenType.Module,
|
||||||
"override" : TokenType.tOverride,
|
"new" : TokenType.New,
|
||||||
"package" : TokenType.tPackage,
|
"nothrow" : TokenType.Nothrow,
|
||||||
"pragma" : TokenType.tPragma,
|
"null" : TokenType.Null,
|
||||||
"private" : TokenType.tPrivate,
|
"out" : TokenType.Out,
|
||||||
"protected" : TokenType.tProtected,
|
"override" : TokenType.Override,
|
||||||
"public" : TokenType.tPublic,
|
"package" : TokenType.Package,
|
||||||
"pure" : TokenType.tPure,
|
"pragma" : TokenType.Pragma,
|
||||||
"real" : TokenType.tReal,
|
"private" : TokenType.Private,
|
||||||
"ref" : TokenType.tRef,
|
"@property" : TokenType.AtProperty,
|
||||||
"return" : TokenType.tReturn,
|
"protected" : TokenType.Protected,
|
||||||
"scope" : TokenType.tScope,
|
"public" : TokenType.Public,
|
||||||
"shared" : TokenType.tShared,
|
"pure" : TokenType.Pure,
|
||||||
"short" : TokenType.tShort,
|
"real" : TokenType.Real,
|
||||||
"static" : TokenType.tStatic,
|
"ref" : TokenType.Ref,
|
||||||
"struct" : TokenType.tStruct,
|
"return" : TokenType.Return,
|
||||||
"string" : TokenType.tString,
|
"@safe" : TokenType.AtSafe,
|
||||||
"super" : TokenType.tSuper,
|
"scope" : TokenType.Scope,
|
||||||
"switch" : TokenType.tSwitch,
|
"shared" : TokenType.Shared,
|
||||||
"synchronized" : TokenType.tSynchronized,
|
"short" : TokenType.Short,
|
||||||
"template" : TokenType.tTemplate,
|
"static" : TokenType.Static,
|
||||||
"this" : TokenType.tThis,
|
"string" : TokenType.String,
|
||||||
"throw" : TokenType.tThrow,
|
"struct" : TokenType.Struct,
|
||||||
"true" : TokenType.tTrue,
|
"super" : TokenType.Super,
|
||||||
"try" : TokenType.tTry,
|
"switch" : TokenType.Switch,
|
||||||
"typedef" : TokenType.tTypedef,
|
"synchronized" : TokenType.Synchronized,
|
||||||
"typeid" : TokenType.tTypeid,
|
"@system" : TokenType.AtSystem,
|
||||||
"typeof" : TokenType.tTypeof,
|
"template" : TokenType.Template,
|
||||||
"ubyte" : TokenType.tUbyte,
|
"this" : TokenType.This,
|
||||||
"ucent" : TokenType.tUcent,
|
"__thread" : TokenType.Thread,
|
||||||
"uint" : TokenType.tUint,
|
"throw" : TokenType.Throw,
|
||||||
"ulong" : TokenType.tUlong,
|
"__traits" : TokenType.Traits,
|
||||||
"union" : TokenType.tUnion,
|
"true" : TokenType.True,
|
||||||
"unittest" : TokenType.tUnittest,
|
"@trusted" : TokenType.AtTrusted,
|
||||||
"ushort" : TokenType.tUshort,
|
"try" : TokenType.Try,
|
||||||
"version" : TokenType.tVersion,
|
"typedef" : TokenType.Typedef,
|
||||||
"void" : TokenType.tVoid,
|
"typeid" : TokenType.Typeid,
|
||||||
"volatile" : TokenType.tVolatile,
|
"typeof" : TokenType.Typeof,
|
||||||
"wchar" : TokenType.tWchar,
|
"ubyte" : TokenType.Ubyte,
|
||||||
"while" : TokenType.tWhile,
|
"ucent" : TokenType.Ucent,
|
||||||
"with" : TokenType.tWith,
|
"uint" : TokenType.Uint,
|
||||||
"wstring" : TokenType.tWString,
|
"ulong" : TokenType.Ulong,
|
||||||
"__FILE__" : TokenType.t__FILE__,
|
"union" : TokenType.Union,
|
||||||
"__LINE__" : TokenType.t__LINE__,
|
"unittest" : TokenType.Unittest,
|
||||||
"__gshared" : TokenType.t__gshared,
|
"ushort" : TokenType.Ushort,
|
||||||
"__thread" : TokenType.t__thread,
|
"version" : TokenType.Version,
|
||||||
"__traits" : TokenType.t__traits,
|
"void" : TokenType.Void,
|
||||||
"@disable" : TokenType.atDisable,
|
"volatile" : TokenType.Volatile,
|
||||||
"@property" : TokenType.atProperty,
|
"wchar" : TokenType.Wchar,
|
||||||
"@safe" : TokenType.atSafe,
|
"while" : TokenType.While,
|
||||||
"@system" : TokenType.atSystem,
|
"with" : TokenType.With,
|
||||||
"@trusted" : TokenType.atTrusted,
|
"wstring" : TokenType.WString,
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
13
main.d
13
main.d
|
@ -27,12 +27,13 @@ pure bool isLineOfCode(TokenType t)
|
||||||
{
|
{
|
||||||
switch(t)
|
switch(t)
|
||||||
{
|
{
|
||||||
case TokenType.semicolon:
|
case TokenType.Semicolon:
|
||||||
case TokenType.tWhile:
|
case TokenType.While:
|
||||||
case TokenType.tIf:
|
case TokenType.If:
|
||||||
case TokenType.tFor:
|
case TokenType.For:
|
||||||
case TokenType.tForeach:
|
case TokenType.Foreach:
|
||||||
case TokenType.tCase:
|
case TokenType.Foreach_reverse:
|
||||||
|
case TokenType.Case:
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
|
|
282
parser.d
282
parser.d
|
@ -51,7 +51,7 @@ body
|
||||||
*/
|
*/
|
||||||
const(Token)[] betweenBalancedBraces(const Token[] tokens, ref size_t index)
|
const(Token)[] betweenBalancedBraces(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
return betweenBalanced(tokens, index, TokenType.lBrace, TokenType.rBrace);
|
return betweenBalanced(tokens, index, TokenType.LBrace, TokenType.RBrace);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ const(Token)[] betweenBalancedBraces(const Token[] tokens, ref size_t index)
|
||||||
*/
|
*/
|
||||||
const(Token)[] betweenBalancedParens(const Token[] tokens, ref size_t index)
|
const(Token)[] betweenBalancedParens(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
return betweenBalanced(tokens, index, TokenType.lParen, TokenType.rParen);
|
return betweenBalanced(tokens, index, TokenType.LParen, TokenType.RParen);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -69,20 +69,27 @@ const(Token)[] betweenBalancedParens(const Token[] tokens, ref size_t index)
|
||||||
*/
|
*/
|
||||||
const(Token)[] betweenBalancedBrackets(const Token[] tokens, ref size_t index)
|
const(Token)[] betweenBalancedBrackets(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
return betweenBalanced(tokens, index, TokenType.lBracket, TokenType.rBracket);
|
return betweenBalanced(tokens, index, TokenType.LBracket, TokenType.RBracket);
|
||||||
}
|
}
|
||||||
|
|
||||||
void skipBalanced(alias Op, alias Cl)(const Token[] tokens, ref size_t index)
|
|
||||||
|
/**
|
||||||
|
* If tokens[index] is currently openToken, advances index until it refers to a
|
||||||
|
* location in tokens directly after the balanced occurance of closeToken. If
|
||||||
|
* tokens[index] is closeToken, decrements index
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void skipBalanced(alias openToken, alias closeToken)(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
int depth = tokens[index] == Op ? 1 : -1;
|
int depth = tokens[index] == openToken ? 1 : -1;
|
||||||
int deltaIndex = depth;
|
int deltaIndex = depth;
|
||||||
index += deltaIndex;
|
index += deltaIndex;
|
||||||
for (; index < tokens.length && index > 0 && depth != 0; index += deltaIndex)
|
for (; index < tokens.length && index > 0 && depth != 0; index += deltaIndex)
|
||||||
{
|
{
|
||||||
switch (tokens[index].type)
|
switch (tokens[index].type)
|
||||||
{
|
{
|
||||||
case Op: ++depth; break;
|
case openToken: ++depth; break;
|
||||||
case Cl: --depth; break;
|
case closeToken: --depth; break;
|
||||||
default: break;
|
default: break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -90,12 +97,17 @@ void skipBalanced(alias Op, alias Cl)(const Token[] tokens, ref size_t index)
|
||||||
|
|
||||||
void skipParens(const Token[] tokens, ref size_t index)
|
void skipParens(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
skipBalanced!(TokenType.lParen, TokenType.rParen)(tokens, index);
|
skipBalanced!(TokenType.LParen, TokenType.RParen)(tokens, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
void skipBrackets(const Token[] tokens, ref size_t index)
|
void skipBrackets(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
skipBalanced!(TokenType.lBracket, TokenType.rBracket)(tokens, index);
|
skipBalanced!(TokenType.LBracket, TokenType.RBracket)(tokens, index);
|
||||||
|
}
|
||||||
|
|
||||||
|
void skipBraces(const Token[] tokens, ref size_t index)
|
||||||
|
{
|
||||||
|
skipBalanced!(TokenType.LBrace, TokenType.RBrace)(tokens, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -122,7 +134,7 @@ body
|
||||||
{
|
{
|
||||||
if (tokens[index] == open) ++depth;
|
if (tokens[index] == open) ++depth;
|
||||||
else if (tokens[index] == close) --depth;
|
else if (tokens[index] == close) --depth;
|
||||||
else if (tokens[index] == TokenType.comma)
|
else if (tokens[index] == TokenType.Comma)
|
||||||
{
|
{
|
||||||
app.put(", ");
|
app.put(", ");
|
||||||
}
|
}
|
||||||
|
@ -139,7 +151,7 @@ body
|
||||||
*/
|
*/
|
||||||
string parenContent(const Token[]tokens, ref size_t index)
|
string parenContent(const Token[]tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
return "(" ~ content(tokens, index, TokenType.lParen, TokenType.rParen) ~ ")";
|
return "(" ~ content(tokens, index, TokenType.LParen, TokenType.RParen) ~ ")";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -148,7 +160,7 @@ string parenContent(const Token[]tokens, ref size_t index)
|
||||||
*/
|
*/
|
||||||
string bracketContent(const Token[]tokens, ref size_t index)
|
string bracketContent(const Token[]tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
return "[" ~ content(tokens, index, TokenType.lBracket, TokenType.rBracket) ~ "]";
|
return "[" ~ content(tokens, index, TokenType.LBracket, TokenType.RBracket) ~ "]";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -159,11 +171,11 @@ string bracketContent(const Token[]tokens, ref size_t index)
|
||||||
*/
|
*/
|
||||||
void skipBlockStatement(const Token[] tokens, ref size_t index)
|
void skipBlockStatement(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
if (tokens[index] == TokenType.lBrace)
|
if (tokens[index] == TokenType.LBrace)
|
||||||
betweenBalancedBraces(tokens, index);
|
betweenBalancedBraces(tokens, index);
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
skipPastNext(tokens, TokenType.semicolon, index);
|
skipPastNext(tokens, TokenType.Semicolon, index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,11 +189,11 @@ void skipPastNext(const Token[] tokens, TokenType type, ref size_t index)
|
||||||
{
|
{
|
||||||
while (index < tokens.length)
|
while (index < tokens.length)
|
||||||
{
|
{
|
||||||
if (tokens[index].type == TokenType.lBrace)
|
if (tokens[index].type == TokenType.LBrace)
|
||||||
betweenBalancedBraces(tokens, index);
|
betweenBalancedBraces(tokens, index);
|
||||||
else if (tokens[index].type == TokenType.lParen)
|
else if (tokens[index].type == TokenType.LParen)
|
||||||
betweenBalancedParens(tokens, index);
|
betweenBalancedParens(tokens, index);
|
||||||
else if (tokens[index].type == TokenType.lBracket)
|
else if (tokens[index].type == TokenType.LBracket)
|
||||||
betweenBalancedBrackets(tokens, index);
|
betweenBalancedBrackets(tokens, index);
|
||||||
else if (tokens[index].type == type)
|
else if (tokens[index].type == type)
|
||||||
{
|
{
|
||||||
|
@ -200,18 +212,18 @@ string parseTypeDeclaration(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
switch (tokens[index].type)
|
switch (tokens[index].type)
|
||||||
{
|
{
|
||||||
case TokenType.lBracket:
|
case TokenType.LBracket:
|
||||||
type ~= bracketContent(tokens, index);
|
type ~= bracketContent(tokens, index);
|
||||||
break;
|
break;
|
||||||
case TokenType.not:
|
case TokenType.Not:
|
||||||
type ~= tokens[index++].value;
|
type ~= tokens[index++].value;
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
type ~= parenContent(tokens, index);
|
type ~= parenContent(tokens, index);
|
||||||
else
|
else
|
||||||
type ~= tokens[index++].value;
|
type ~= tokens[index++].value;
|
||||||
break;
|
break;
|
||||||
case TokenType.star:
|
case TokenType.Star:
|
||||||
case TokenType.bitAnd:
|
case TokenType.BitAnd:
|
||||||
type ~= tokens[index++].value;
|
type ~= tokens[index++].value;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -249,72 +261,72 @@ Module parseModule(const Token[] tokens, string protection = "public", string[]
|
||||||
{
|
{
|
||||||
switch(tokens[index].type)
|
switch(tokens[index].type)
|
||||||
{
|
{
|
||||||
case TokenType.tElse:
|
case TokenType.Else:
|
||||||
case TokenType.tMixin:
|
case TokenType.Mixin:
|
||||||
case TokenType.tAssert:
|
case TokenType.Assert:
|
||||||
++index;
|
++index;
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
break;
|
break;
|
||||||
case TokenType.tAlias:
|
case TokenType.Alias:
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
break;
|
break;
|
||||||
case TokenType.tImport:
|
case TokenType.Import:
|
||||||
mod.imports ~= parseImports(tokens, index);
|
mod.imports ~= parseImports(tokens, index);
|
||||||
resetLocals();
|
resetLocals();
|
||||||
break;
|
break;
|
||||||
case TokenType.tVersion:
|
case TokenType.Version:
|
||||||
++index;
|
++index;
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
{
|
{
|
||||||
tokens.betweenBalancedParens(index);
|
tokens.betweenBalancedParens(index);
|
||||||
if (tokens[index] == TokenType.lBrace)
|
if (tokens[index] == TokenType.LBrace)
|
||||||
mod.merge(parseModule(betweenBalancedBraces(tokens, index),
|
mod.merge(parseModule(betweenBalancedBraces(tokens, index),
|
||||||
localProtection.empty() ? protection : localProtection,
|
localProtection.empty() ? protection : localProtection,
|
||||||
attributes));
|
attributes));
|
||||||
}
|
}
|
||||||
else if (tokens[index] == TokenType.assign)
|
else if (tokens[index] == TokenType.Assign)
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
break;
|
break;
|
||||||
case TokenType.tDeprecated:
|
case TokenType.Deprecated:
|
||||||
case TokenType.tNothrow:
|
case TokenType.Nothrow:
|
||||||
case TokenType.tOverride:
|
case TokenType.Override:
|
||||||
case TokenType.tSynchronized:
|
case TokenType.Synchronized:
|
||||||
case TokenType.atDisable:
|
case TokenType.AtDisable:
|
||||||
case TokenType.atProperty:
|
case TokenType.AtProperty:
|
||||||
case TokenType.atSafe:
|
case TokenType.AtSafe:
|
||||||
case TokenType.atSystem:
|
case TokenType.AtSystem:
|
||||||
case TokenType.tAbstract:
|
case TokenType.Abstract:
|
||||||
case TokenType.tFinal:
|
case TokenType.Final:
|
||||||
case TokenType.t__gshared:
|
case TokenType.Gshared:
|
||||||
case TokenType.tStatic:
|
case TokenType.Static:
|
||||||
localAttributes ~= tokens[index++].value;
|
localAttributes ~= tokens[index++].value;
|
||||||
break;
|
break;
|
||||||
case TokenType.tConst:
|
case TokenType.Const:
|
||||||
case TokenType.tImmutable:
|
case TokenType.Immutable:
|
||||||
case TokenType.tInout:
|
case TokenType.Inout:
|
||||||
case TokenType.tPure:
|
case TokenType.Pure:
|
||||||
case TokenType.tScope:
|
case TokenType.Scope:
|
||||||
case TokenType.tShared:
|
case TokenType.Shared:
|
||||||
auto tmp = tokens[index++].value;
|
auto tmp = tokens[index++].value;
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
type = tmp ~ parenContent(tokens, index);
|
type = tmp ~ parenContent(tokens, index);
|
||||||
else if (tokens[index] == TokenType.colon)
|
else if (tokens[index] == TokenType.Colon)
|
||||||
{
|
{
|
||||||
index++;
|
index++;
|
||||||
attributes ~= tmp;
|
attributes ~= tmp;
|
||||||
}
|
}
|
||||||
localAttributes ~= tmp;
|
localAttributes ~= tmp;
|
||||||
break;
|
break;
|
||||||
case TokenType.tAlign:
|
case TokenType.Align:
|
||||||
case TokenType.tExtern:
|
case TokenType.Extern:
|
||||||
string attribute = tokens[index++].value;
|
string attribute = tokens[index++].value;
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
attribute ~= parenContent(tokens, index);
|
attribute ~= parenContent(tokens, index);
|
||||||
if (tokens[index] == TokenType.lBrace)
|
if (tokens[index] == TokenType.LBrace)
|
||||||
mod.merge(parseModule(betweenBalancedBraces(tokens, index),
|
mod.merge(parseModule(betweenBalancedBraces(tokens, index),
|
||||||
localProtection.empty() ? protection : localProtection,
|
localProtection.empty() ? protection : localProtection,
|
||||||
attributes ~ attribute));
|
attributes ~ attribute));
|
||||||
else if (tokens[index] == TokenType.colon)
|
else if (tokens[index] == TokenType.Colon)
|
||||||
{
|
{
|
||||||
++index;
|
++index;
|
||||||
attributes ~= attribute;
|
attributes ~= attribute;
|
||||||
|
@ -324,66 +336,66 @@ Module parseModule(const Token[] tokens, string protection = "public", string[]
|
||||||
break;
|
break;
|
||||||
case TokenType.PROTECTION_BEGIN: .. case TokenType.PROTECTION_END:
|
case TokenType.PROTECTION_BEGIN: .. case TokenType.PROTECTION_END:
|
||||||
string p = tokens[index++].value;
|
string p = tokens[index++].value;
|
||||||
if (tokens[index] == TokenType.colon)
|
if (tokens[index] == TokenType.Colon)
|
||||||
{
|
{
|
||||||
protection = p;
|
protection = p;
|
||||||
++index;
|
++index;
|
||||||
}
|
}
|
||||||
else if (tokens[index] == TokenType.lBrace)
|
else if (tokens[index] == TokenType.LBrace)
|
||||||
mod.merge(parseModule(betweenBalancedBraces(tokens, index),
|
mod.merge(parseModule(betweenBalancedBraces(tokens, index),
|
||||||
p, attributes ~ localAttributes));
|
p, attributes ~ localAttributes));
|
||||||
else
|
else
|
||||||
localProtection = p;
|
localProtection = p;
|
||||||
break;
|
break;
|
||||||
case TokenType.tModule:
|
case TokenType.Module:
|
||||||
++index;
|
++index;
|
||||||
while (index < tokens.length && tokens[index] != TokenType.semicolon)
|
while (index < tokens.length && tokens[index] != TokenType.Semicolon)
|
||||||
mod.name ~= tokens[index++].value;
|
mod.name ~= tokens[index++].value;
|
||||||
++index;
|
++index;
|
||||||
resetLocals();
|
resetLocals();
|
||||||
break;
|
break;
|
||||||
case TokenType.tUnion:
|
case TokenType.Union:
|
||||||
mod.unions ~= parseUnion(tokens, index,
|
mod.unions ~= parseUnion(tokens, index,
|
||||||
localProtection.empty() ? protection : localProtection,
|
localProtection.empty() ? protection : localProtection,
|
||||||
localAttributes ~ attributes);
|
localAttributes ~ attributes);
|
||||||
resetLocals();
|
resetLocals();
|
||||||
break;
|
break;
|
||||||
case TokenType.tClass:
|
case TokenType.Class:
|
||||||
mod.classes ~= parseClass(tokens, index,
|
mod.classes ~= parseClass(tokens, index,
|
||||||
localProtection.empty() ? protection : localProtection,
|
localProtection.empty() ? protection : localProtection,
|
||||||
localAttributes ~ attributes);
|
localAttributes ~ attributes);
|
||||||
resetLocals();
|
resetLocals();
|
||||||
break;
|
break;
|
||||||
case TokenType.tInterface:
|
case TokenType.Interface:
|
||||||
mod.interfaces ~= parseInterface(tokens, index,
|
mod.interfaces ~= parseInterface(tokens, index,
|
||||||
localProtection.empty() ? protection : localProtection,
|
localProtection.empty() ? protection : localProtection,
|
||||||
localAttributes ~ attributes);
|
localAttributes ~ attributes);
|
||||||
resetLocals();
|
resetLocals();
|
||||||
break;
|
break;
|
||||||
case TokenType.tStruct:
|
case TokenType.Struct:
|
||||||
mod.structs ~= parseStruct(tokens, index,
|
mod.structs ~= parseStruct(tokens, index,
|
||||||
localProtection.empty() ? protection : localProtection,
|
localProtection.empty() ? protection : localProtection,
|
||||||
localAttributes ~ attributes);
|
localAttributes ~ attributes);
|
||||||
resetLocals();
|
resetLocals();
|
||||||
break;
|
break;
|
||||||
case TokenType.tEnum:
|
case TokenType.Enum:
|
||||||
mod.enums ~= parseEnum(tokens, index,
|
mod.enums ~= parseEnum(tokens, index,
|
||||||
localProtection.empty() ? protection : localProtection,
|
localProtection.empty() ? protection : localProtection,
|
||||||
localAttributes ~ attributes);
|
localAttributes ~ attributes);
|
||||||
resetLocals();
|
resetLocals();
|
||||||
break;
|
break;
|
||||||
case TokenType.tTemplate:
|
case TokenType.Template:
|
||||||
++index; // template
|
++index; // template
|
||||||
++index; // name
|
++index; // name
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
tokens.betweenBalancedParens(index); // params
|
tokens.betweenBalancedParens(index); // params
|
||||||
if (tokens[index] == TokenType.lBrace)
|
if (tokens[index] == TokenType.LBrace)
|
||||||
tokens.betweenBalancedBraces(index); // body
|
tokens.betweenBalancedBraces(index); // body
|
||||||
resetLocals();
|
resetLocals();
|
||||||
break;
|
break;
|
||||||
case TokenType.TYPES_BEGIN: .. case TokenType.TYPES_END:
|
case TokenType.TYPES_BEGIN: .. case TokenType.TYPES_END:
|
||||||
case TokenType.tAuto:
|
case TokenType.Auto:
|
||||||
case TokenType.identifier:
|
case TokenType.Identifier:
|
||||||
if (type.empty())
|
if (type.empty())
|
||||||
{
|
{
|
||||||
type = tokens.parseTypeDeclaration(index);
|
type = tokens.parseTypeDeclaration(index);
|
||||||
|
@ -392,7 +404,7 @@ Module parseModule(const Token[] tokens, string protection = "public", string[]
|
||||||
{
|
{
|
||||||
name = tokens[index++].value;
|
name = tokens[index++].value;
|
||||||
if (index >= tokens.length) break;
|
if (index >= tokens.length) break;
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
{
|
{
|
||||||
mod.functions ~= parseFunction(tokens, index, type, name,
|
mod.functions ~= parseFunction(tokens, index, type, name,
|
||||||
tokens[index].lineNumber,
|
tokens[index].lineNumber,
|
||||||
|
@ -412,23 +424,23 @@ Module parseModule(const Token[] tokens, string protection = "public", string[]
|
||||||
resetLocals();
|
resetLocals();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TokenType.tUnittest:
|
case TokenType.Unittest:
|
||||||
++index;
|
++index;
|
||||||
if (!tokens.empty() && tokens[index] == TokenType.lBrace)
|
if (!tokens.empty() && tokens[index] == TokenType.LBrace)
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
resetLocals();
|
resetLocals();
|
||||||
break;
|
break;
|
||||||
case TokenType.tilde:
|
case TokenType.Tilde:
|
||||||
++index;
|
++index;
|
||||||
if (tokens[index] == TokenType.tThis)
|
if (tokens[index] == TokenType.This)
|
||||||
{
|
{
|
||||||
name = "~";
|
name = "~";
|
||||||
goto case;
|
goto case;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TokenType.tThis:
|
case TokenType.This:
|
||||||
name ~= tokens[index++].value;
|
name ~= tokens[index++].value;
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
{
|
{
|
||||||
mod.functions ~= parseFunction(tokens, index, "", name,
|
mod.functions ~= parseFunction(tokens, index, "", name,
|
||||||
tokens[index - 1].lineNumber,
|
tokens[index - 1].lineNumber,
|
||||||
|
@ -453,7 +465,7 @@ Module parseModule(const Token[] tokens, string protection = "public", string[]
|
||||||
*/
|
*/
|
||||||
string[] parseImports(const Token[] tokens, ref size_t index)
|
string[] parseImports(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
assert(tokens[index] == TokenType.tImport);
|
assert(tokens[index] == TokenType.Import);
|
||||||
++index;
|
++index;
|
||||||
auto app = appender!(string[])();
|
auto app = appender!(string[])();
|
||||||
string im;
|
string im;
|
||||||
|
@ -461,17 +473,17 @@ string[] parseImports(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
switch(tokens[index].type)
|
switch(tokens[index].type)
|
||||||
{
|
{
|
||||||
case TokenType.comma:
|
case TokenType.Comma:
|
||||||
++index;
|
++index;
|
||||||
app.put(im);
|
app.put(im);
|
||||||
im = "";
|
im = "";
|
||||||
break;
|
break;
|
||||||
case TokenType.assign:
|
case TokenType.Assign:
|
||||||
case TokenType.semicolon:
|
case TokenType.Semicolon:
|
||||||
app.put(im);
|
app.put(im);
|
||||||
++index;
|
++index;
|
||||||
return app.data;
|
return app.data;
|
||||||
case TokenType.colon:
|
case TokenType.Colon:
|
||||||
app.put(im);
|
app.put(im);
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
return app.data;
|
return app.data;
|
||||||
|
@ -491,7 +503,7 @@ Enum parseEnum(const Token[] tokens, ref size_t index, string protection,
|
||||||
string[] attributes)
|
string[] attributes)
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
assert (tokens[index] == TokenType.tEnum);
|
assert (tokens[index] == TokenType.Enum);
|
||||||
}
|
}
|
||||||
body
|
body
|
||||||
{
|
{
|
||||||
|
@ -500,7 +512,7 @@ body
|
||||||
e.line = tokens[index].lineNumber;
|
e.line = tokens[index].lineNumber;
|
||||||
e.name = tokens[index++].value;
|
e.name = tokens[index++].value;
|
||||||
|
|
||||||
if (tokens[index] == TokenType.colon)
|
if (tokens[index] == TokenType.Colon)
|
||||||
{
|
{
|
||||||
++index;
|
++index;
|
||||||
e.type = tokens[index++].value;
|
e.type = tokens[index++].value;
|
||||||
|
@ -508,7 +520,7 @@ body
|
||||||
else
|
else
|
||||||
e.type = "uint";
|
e.type = "uint";
|
||||||
|
|
||||||
if (tokens[index] != TokenType.lBrace)
|
if (tokens[index] != TokenType.LBrace)
|
||||||
{
|
{
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
return e;
|
return e;
|
||||||
|
@ -517,13 +529,13 @@ body
|
||||||
auto r = betweenBalancedBraces(tokens, index);
|
auto r = betweenBalancedBraces(tokens, index);
|
||||||
for (size_t i = 0; i < r.length;)
|
for (size_t i = 0; i < r.length;)
|
||||||
{
|
{
|
||||||
if (r[i].type == TokenType.identifier)
|
if (r[i].type == TokenType.Identifier)
|
||||||
{
|
{
|
||||||
EnumMember member;
|
EnumMember member;
|
||||||
member.line = r[i].lineNumber;
|
member.line = r[i].lineNumber;
|
||||||
member.name = r[i].value;
|
member.name = r[i].value;
|
||||||
e.members ~= member;
|
e.members ~= member;
|
||||||
r.skipPastNext(TokenType.comma, i);
|
r.skipPastNext(TokenType.Comma, i);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
++i;
|
++i;
|
||||||
|
@ -539,7 +551,7 @@ Function parseFunction(const Token[] tokens, ref size_t index, string type,
|
||||||
string name, uint line, string protection, string[] attributes)
|
string name, uint line, string protection, string[] attributes)
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
assert (tokens[index] == TokenType.lParen);
|
assert (tokens[index] == TokenType.LParen);
|
||||||
}
|
}
|
||||||
body
|
body
|
||||||
{
|
{
|
||||||
|
@ -550,7 +562,7 @@ body
|
||||||
f.attributes.insertInPlace(f.attributes.length, attributes);
|
f.attributes.insertInPlace(f.attributes.length, attributes);
|
||||||
|
|
||||||
Variable[] vars1 = parseParameters(tokens, index);
|
Variable[] vars1 = parseParameters(tokens, index);
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
{
|
{
|
||||||
f.templateParameters.insertInPlace(f.templateParameters.length,
|
f.templateParameters.insertInPlace(f.templateParameters.length,
|
||||||
map!("a.type")(vars1));
|
map!("a.type")(vars1));
|
||||||
|
@ -564,14 +576,14 @@ body
|
||||||
{
|
{
|
||||||
switch (tokens[index].type)
|
switch (tokens[index].type)
|
||||||
{
|
{
|
||||||
case TokenType.tImmutable:
|
case TokenType.Immutable:
|
||||||
case TokenType.tConst:
|
case TokenType.Const:
|
||||||
case TokenType.tPure:
|
case TokenType.Pure:
|
||||||
case TokenType.atTrusted:
|
case TokenType.AtTrusted:
|
||||||
case TokenType.atProperty:
|
case TokenType.AtProperty:
|
||||||
case TokenType.tNothrow:
|
case TokenType.Nothrow:
|
||||||
case TokenType.tFinal:
|
case TokenType.Final:
|
||||||
case TokenType.tOverride:
|
case TokenType.Override:
|
||||||
f.attributes ~= tokens[index++].value;
|
f.attributes ~= tokens[index++].value;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -579,21 +591,21 @@ body
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tokens[index] == TokenType.tIf)
|
if (tokens[index] == TokenType.If)
|
||||||
f.constraint = parseConstraint(tokens, index);
|
f.constraint = parseConstraint(tokens, index);
|
||||||
while (index < tokens.length &&
|
while (index < tokens.length &&
|
||||||
(tokens[index] == TokenType.tIn || tokens[index] == TokenType.tOut
|
(tokens[index] == TokenType.In || tokens[index] == TokenType.Out
|
||||||
|| tokens[index] == TokenType.tBody))
|
|| tokens[index] == TokenType.Body))
|
||||||
{
|
{
|
||||||
++index;
|
++index;
|
||||||
if (index < tokens.length && tokens[index] == TokenType.lBrace)
|
if (index < tokens.length && tokens[index] == TokenType.LBrace)
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
}
|
}
|
||||||
if (index >= tokens.length)
|
if (index >= tokens.length)
|
||||||
return f;
|
return f;
|
||||||
if (tokens[index] == TokenType.lBrace)
|
if (tokens[index] == TokenType.LBrace)
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
else if (tokens[index] == TokenType.semicolon)
|
else if (tokens[index] == TokenType.Semicolon)
|
||||||
++index;
|
++index;
|
||||||
return f;
|
return f;
|
||||||
}
|
}
|
||||||
|
@ -601,16 +613,16 @@ body
|
||||||
string parseConstraint(const Token[] tokens, ref size_t index)
|
string parseConstraint(const Token[] tokens, ref size_t index)
|
||||||
{
|
{
|
||||||
auto appender = appender!(string)();
|
auto appender = appender!(string)();
|
||||||
assert(tokens[index] == TokenType.tIf);
|
assert(tokens[index] == TokenType.If);
|
||||||
appender.put(tokens[index++].value);
|
appender.put(tokens[index++].value);
|
||||||
assert(tokens[index] == TokenType.lParen);
|
assert(tokens[index] == TokenType.LParen);
|
||||||
return "if " ~ parenContent(tokens, index);
|
return "if " ~ parenContent(tokens, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
Variable[] parseParameters(const Token[] tokens, ref size_t index)
|
Variable[] parseParameters(const Token[] tokens, ref size_t index)
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
assert (tokens[index] == TokenType.lParen);
|
assert (tokens[index] == TokenType.LParen);
|
||||||
}
|
}
|
||||||
body
|
body
|
||||||
{
|
{
|
||||||
|
@ -622,28 +634,28 @@ body
|
||||||
{
|
{
|
||||||
switch(r[i].type)
|
switch(r[i].type)
|
||||||
{
|
{
|
||||||
case TokenType.tIn:
|
case TokenType.In:
|
||||||
case TokenType.tOut:
|
case TokenType.Out:
|
||||||
case TokenType.tRef:
|
case TokenType.Ref:
|
||||||
case TokenType.tScope:
|
case TokenType.Scope:
|
||||||
case TokenType.tLazy:
|
case TokenType.Lazy:
|
||||||
case TokenType.tConst:
|
case TokenType.Const:
|
||||||
case TokenType.tImmutable:
|
case TokenType.Immutable:
|
||||||
case TokenType.tShared:
|
case TokenType.Shared:
|
||||||
case TokenType.tInout:
|
case TokenType.Inout:
|
||||||
auto tmp = r[i++].value;
|
auto tmp = r[i++].value;
|
||||||
if (r[i] == TokenType.lParen)
|
if (r[i] == TokenType.LParen)
|
||||||
v.type ~= tmp ~ parenContent(r, i);
|
v.type ~= tmp ~ parenContent(r, i);
|
||||||
else
|
else
|
||||||
v.attributes ~= tmp;
|
v.attributes ~= tmp;
|
||||||
break;
|
break;
|
||||||
case TokenType.colon:
|
case TokenType.Colon:
|
||||||
i++;
|
i++;
|
||||||
r.skipPastNext(TokenType.comma, i);
|
r.skipPastNext(TokenType.Comma, i);
|
||||||
appender.put(v);
|
appender.put(v);
|
||||||
v = new Variable;
|
v = new Variable;
|
||||||
break;
|
break;
|
||||||
case TokenType.comma:
|
case TokenType.Comma:
|
||||||
++i;
|
++i;
|
||||||
appender.put(v);
|
appender.put(v);
|
||||||
v = new Variable;
|
v = new Variable;
|
||||||
|
@ -660,12 +672,12 @@ body
|
||||||
v.line = r[i].lineNumber;
|
v.line = r[i].lineNumber;
|
||||||
v.name = r[i++].value;
|
v.name = r[i++].value;
|
||||||
appender.put(v);
|
appender.put(v);
|
||||||
if (i < r.length && r[i] == TokenType.vararg)
|
if (i < r.length && r[i] == TokenType.Vararg)
|
||||||
{
|
{
|
||||||
v.type ~= " ...";
|
v.type ~= " ...";
|
||||||
}
|
}
|
||||||
v = new Variable;
|
v = new Variable;
|
||||||
r.skipPastNext(TokenType.comma, i);
|
r.skipPastNext(TokenType.Comma, i);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -676,7 +688,7 @@ body
|
||||||
string[] parseBaseClassList(const Token[] tokens, ref size_t index)
|
string[] parseBaseClassList(const Token[] tokens, ref size_t index)
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
assert(tokens[index] == TokenType.colon);
|
assert(tokens[index] == TokenType.Colon);
|
||||||
}
|
}
|
||||||
body
|
body
|
||||||
{
|
{
|
||||||
|
@ -684,11 +696,11 @@ body
|
||||||
++index;
|
++index;
|
||||||
while (index < tokens.length)
|
while (index < tokens.length)
|
||||||
{
|
{
|
||||||
if (tokens[index] == TokenType.identifier)
|
if (tokens[index] == TokenType.Identifier)
|
||||||
{
|
{
|
||||||
string base = parseTypeDeclaration(tokens, index);
|
string base = parseTypeDeclaration(tokens, index);
|
||||||
appender.put(base);
|
appender.put(base);
|
||||||
if (tokens[index] == TokenType.comma)
|
if (tokens[index] == TokenType.Comma)
|
||||||
++index;
|
++index;
|
||||||
else
|
else
|
||||||
break;
|
break;
|
||||||
|
@ -717,18 +729,18 @@ Struct parseStructOrUnion(const Token[] tokens, ref size_t index, string protect
|
||||||
s.attributes = attributes;
|
s.attributes = attributes;
|
||||||
s.protection = protection;
|
s.protection = protection;
|
||||||
s.name = tokens[index++].value;
|
s.name = tokens[index++].value;
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
s.templateParameters.insertInPlace(s.templateParameters.length,
|
s.templateParameters.insertInPlace(s.templateParameters.length,
|
||||||
map!("a.type")(parseParameters(tokens, index)));
|
map!("a.type")(parseParameters(tokens, index)));
|
||||||
|
|
||||||
if (index >= tokens.length) return s;
|
if (index >= tokens.length) return s;
|
||||||
|
|
||||||
if (tokens[index] == TokenType.tIf)
|
if (tokens[index] == TokenType.If)
|
||||||
s.constraint = parseConstraint(tokens, index);
|
s.constraint = parseConstraint(tokens, index);
|
||||||
|
|
||||||
if (index >= tokens.length) return s;
|
if (index >= tokens.length) return s;
|
||||||
|
|
||||||
if (tokens[index] == TokenType.lBrace)
|
if (tokens[index] == TokenType.LBrace)
|
||||||
parseStructBody(tokens, index, s);
|
parseStructBody(tokens, index, s);
|
||||||
else
|
else
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
|
@ -739,7 +751,7 @@ Struct parseStruct(const Token[] tokens, ref size_t index, string protection,
|
||||||
string[] attributes)
|
string[] attributes)
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
assert(tokens[index] == TokenType.tStruct);
|
assert(tokens[index] == TokenType.Struct);
|
||||||
}
|
}
|
||||||
body
|
body
|
||||||
{
|
{
|
||||||
|
@ -750,7 +762,7 @@ Struct parseUnion(const Token[] tokens, ref size_t index, string protection,
|
||||||
string[] attributes)
|
string[] attributes)
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
assert(tokens[index] == TokenType.tUnion);
|
assert(tokens[index] == TokenType.Union);
|
||||||
}
|
}
|
||||||
body
|
body
|
||||||
{
|
{
|
||||||
|
@ -765,23 +777,23 @@ Inherits parseInherits(const Token[] tokens, ref size_t index, string protection
|
||||||
i.name = tokens[index++].value;
|
i.name = tokens[index++].value;
|
||||||
i.protection = protection;
|
i.protection = protection;
|
||||||
i.attributes.insertInPlace(i.attributes.length, attributes);
|
i.attributes.insertInPlace(i.attributes.length, attributes);
|
||||||
if (tokens[index] == TokenType.lParen)
|
if (tokens[index] == TokenType.LParen)
|
||||||
i.templateParameters.insertInPlace(i.templateParameters.length,
|
i.templateParameters.insertInPlace(i.templateParameters.length,
|
||||||
map!("a.type")(parseParameters(tokens, index)));
|
map!("a.type")(parseParameters(tokens, index)));
|
||||||
|
|
||||||
if (index >= tokens.length) return i;
|
if (index >= tokens.length) return i;
|
||||||
|
|
||||||
if (tokens[index] == TokenType.tIf)
|
if (tokens[index] == TokenType.If)
|
||||||
i.constraint = parseConstraint(tokens, index);
|
i.constraint = parseConstraint(tokens, index);
|
||||||
|
|
||||||
if (index >= tokens.length) return i;
|
if (index >= tokens.length) return i;
|
||||||
|
|
||||||
if (tokens[index] == TokenType.colon)
|
if (tokens[index] == TokenType.Colon)
|
||||||
i.baseClasses = parseBaseClassList(tokens, index);
|
i.baseClasses = parseBaseClassList(tokens, index);
|
||||||
|
|
||||||
if (index >= tokens.length) return i;
|
if (index >= tokens.length) return i;
|
||||||
|
|
||||||
if (tokens[index] == TokenType.lBrace)
|
if (tokens[index] == TokenType.LBrace)
|
||||||
parseStructBody(tokens, index, i);
|
parseStructBody(tokens, index, i);
|
||||||
else
|
else
|
||||||
tokens.skipBlockStatement(index);
|
tokens.skipBlockStatement(index);
|
||||||
|
@ -792,7 +804,7 @@ Inherits parseInterface(const Token[] tokens, ref size_t index, string protectio
|
||||||
string[] attributes)
|
string[] attributes)
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
assert (tokens[index] == TokenType.tInterface);
|
assert (tokens[index] == TokenType.Interface);
|
||||||
}
|
}
|
||||||
body
|
body
|
||||||
{
|
{
|
||||||
|
@ -804,7 +816,7 @@ Inherits parseClass(const Token[] tokens, ref size_t index, string protection,
|
||||||
string[] attributes)
|
string[] attributes)
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
assert(tokens[index] == TokenType.tClass);
|
assert(tokens[index] == TokenType.Class);
|
||||||
}
|
}
|
||||||
body
|
body
|
||||||
{
|
{
|
||||||
|
|
282
tokenizer.d
282
tokenizer.d
|
@ -29,7 +29,7 @@ import codegen;
|
||||||
* Returns: The whitespace, or null if style was CODE_ONLY
|
* Returns: The whitespace, or null if style was CODE_ONLY
|
||||||
*/
|
*/
|
||||||
pure nothrow string lexWhitespace(S)(S inputString, ref size_t endIndex,
|
pure nothrow string lexWhitespace(S)(S inputString, ref size_t endIndex,
|
||||||
ref uint lineNumber, IterationStyle style = IterationStyle.CODE_ONLY) // I suggest to remove the last param
|
ref uint lineNumber)
|
||||||
if (isSomeString!S)
|
if (isSomeString!S)
|
||||||
{
|
{
|
||||||
immutable startIndex = endIndex;
|
immutable startIndex = endIndex;
|
||||||
|
@ -39,13 +39,7 @@ pure nothrow string lexWhitespace(S)(S inputString, ref size_t endIndex,
|
||||||
lineNumber++;
|
lineNumber++;
|
||||||
++endIndex;
|
++endIndex;
|
||||||
}
|
}
|
||||||
final switch (style)
|
return inputString[startIndex .. endIndex];
|
||||||
{
|
|
||||||
case IterationStyle.EVERYTHING:
|
|
||||||
return inputString[startIndex .. endIndex];
|
|
||||||
case IterationStyle.CODE_ONLY:
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -257,7 +251,7 @@ pure nothrow Token lexNumber(S)(ref S inputString, ref size_t endIndex)
|
||||||
endIndex++;
|
endIndex++;
|
||||||
if (isEoF(inputString, endIndex))
|
if (isEoF(inputString, endIndex))
|
||||||
{
|
{
|
||||||
token.type = TokenType.intLiteral;
|
token.type = TokenType.IntLiteral;
|
||||||
token.value = inputString[startIndex .. endIndex];
|
token.value = inputString[startIndex .. endIndex];
|
||||||
return token;
|
return token;
|
||||||
}
|
}
|
||||||
|
@ -277,7 +271,7 @@ pure nothrow Token lexNumber(S)(ref S inputString, ref size_t endIndex)
|
||||||
lexHex(inputString, startIndex, ++endIndex, token);
|
lexHex(inputString, startIndex, ++endIndex, token);
|
||||||
return token;
|
return token;
|
||||||
default:
|
default:
|
||||||
token.type = TokenType.intLiteral;
|
token.type = TokenType.IntLiteral;
|
||||||
token.value = inputString[startIndex .. endIndex];
|
token.value = inputString[startIndex .. endIndex];
|
||||||
return token;
|
return token;
|
||||||
}
|
}
|
||||||
|
@ -295,7 +289,7 @@ pure nothrow void lexBinary(S)(ref S inputString, size_t startIndex,
|
||||||
bool lexingSuffix = false;
|
bool lexingSuffix = false;
|
||||||
bool isLong = false;
|
bool isLong = false;
|
||||||
bool isUnsigned = false;
|
bool isUnsigned = false;
|
||||||
token.type = TokenType.intLiteral;
|
token.type = TokenType.IntLiteral;
|
||||||
binaryLoop: while (!isEoF(inputString, endIndex))
|
binaryLoop: while (!isEoF(inputString, endIndex))
|
||||||
{
|
{
|
||||||
switch (inputString[endIndex])
|
switch (inputString[endIndex])
|
||||||
|
@ -315,11 +309,11 @@ pure nothrow void lexBinary(S)(ref S inputString, size_t startIndex,
|
||||||
lexingSuffix = true;
|
lexingSuffix = true;
|
||||||
if (isLong)
|
if (isLong)
|
||||||
{
|
{
|
||||||
token.type = TokenType.unsignedLongLiteral;
|
token.type = TokenType.UnsignedLongLiteral;
|
||||||
break binaryLoop;
|
break binaryLoop;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
token.type = TokenType.unsignedIntLiteral;
|
token.type = TokenType.UnsignedIntLiteral;
|
||||||
isUnsigned = true;
|
isUnsigned = true;
|
||||||
break;
|
break;
|
||||||
case 'L':
|
case 'L':
|
||||||
|
@ -329,11 +323,11 @@ pure nothrow void lexBinary(S)(ref S inputString, size_t startIndex,
|
||||||
lexingSuffix = true;
|
lexingSuffix = true;
|
||||||
if (isUnsigned)
|
if (isUnsigned)
|
||||||
{
|
{
|
||||||
token.type = TokenType.unsignedLongLiteral;
|
token.type = TokenType.UnsignedLongLiteral;
|
||||||
break binaryLoop;
|
break binaryLoop;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
token.type = TokenType.longLiteral;
|
token.type = TokenType.LongLiteral;
|
||||||
isLong = true;
|
isLong = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -356,7 +350,7 @@ pure nothrow void lexDecimal(S)(ref S inputString, size_t startIndex,
|
||||||
bool foundDot = false;
|
bool foundDot = false;
|
||||||
bool foundE = false;
|
bool foundE = false;
|
||||||
bool foundPlusMinus = false;
|
bool foundPlusMinus = false;
|
||||||
token.type = TokenType.intLiteral;
|
token.type = TokenType.IntLiteral;
|
||||||
decimalLoop: while (!isEoF(inputString, endIndex))
|
decimalLoop: while (!isEoF(inputString, endIndex))
|
||||||
{
|
{
|
||||||
switch (inputString[endIndex])
|
switch (inputString[endIndex])
|
||||||
|
@ -369,10 +363,30 @@ pure nothrow void lexDecimal(S)(ref S inputString, size_t startIndex,
|
||||||
break;
|
break;
|
||||||
case 'e':
|
case 'e':
|
||||||
case 'E':
|
case 'E':
|
||||||
if (foundE)
|
// For this to be a valid exponent, the next character must be a
|
||||||
|
// decimal character or a sign
|
||||||
|
if (foundE || isEoF(inputString, endIndex + 1))
|
||||||
break decimalLoop;
|
break decimalLoop;
|
||||||
|
switch (inputString[endIndex + 1])
|
||||||
|
{
|
||||||
|
case '+':
|
||||||
|
case '-':
|
||||||
|
if (isEoF(inputString, endIndex + 2)
|
||||||
|
|| inputString[endIndex + 2] < '0'
|
||||||
|
|| inputString[endIndex + 2] > '9')
|
||||||
|
{
|
||||||
|
break decimalLoop;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case '0': .. case '9':
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break decimalLoop;
|
||||||
|
}
|
||||||
++endIndex;
|
++endIndex;
|
||||||
foundE = true;
|
foundE = true;
|
||||||
|
isDouble = true;
|
||||||
|
token.type = TokenType.DoubleLiteral;
|
||||||
break;
|
break;
|
||||||
case '+':
|
case '+':
|
||||||
case '-':
|
case '-':
|
||||||
|
@ -388,7 +402,7 @@ pure nothrow void lexDecimal(S)(ref S inputString, size_t startIndex,
|
||||||
break decimalLoop; // two dots with other characters between them
|
break decimalLoop; // two dots with other characters between them
|
||||||
++endIndex;
|
++endIndex;
|
||||||
foundDot = true;
|
foundDot = true;
|
||||||
token.type = TokenType.doubleLiteral;
|
token.type = TokenType.DoubleLiteral;
|
||||||
isDouble = true;
|
isDouble = true;
|
||||||
break;
|
break;
|
||||||
case 'u':
|
case 'u':
|
||||||
|
@ -398,9 +412,9 @@ pure nothrow void lexDecimal(S)(ref S inputString, size_t startIndex,
|
||||||
++endIndex;
|
++endIndex;
|
||||||
lexingSuffix = true;
|
lexingSuffix = true;
|
||||||
if (isLong)
|
if (isLong)
|
||||||
token.type = TokenType.unsignedLongLiteral;
|
token.type = TokenType.UnsignedLongLiteral;
|
||||||
else
|
else
|
||||||
token.type = TokenType.unsignedIntLiteral;
|
token.type = TokenType.UnsignedIntLiteral;
|
||||||
isUnsigned = true;
|
isUnsigned = true;
|
||||||
break;
|
break;
|
||||||
case 'L':
|
case 'L':
|
||||||
|
@ -411,11 +425,11 @@ pure nothrow void lexDecimal(S)(ref S inputString, size_t startIndex,
|
||||||
++endIndex;
|
++endIndex;
|
||||||
lexingSuffix = true;
|
lexingSuffix = true;
|
||||||
if (isDouble)
|
if (isDouble)
|
||||||
token.type = TokenType.realLiteral;
|
token.type = TokenType.RealLiteral;
|
||||||
else if (isUnsigned)
|
else if (isUnsigned)
|
||||||
token.type = TokenType.unsignedLongLiteral;
|
token.type = TokenType.UnsignedLongLiteral;
|
||||||
else
|
else
|
||||||
token.type = TokenType.longLiteral;
|
token.type = TokenType.LongLiteral;
|
||||||
isLong = true;
|
isLong = true;
|
||||||
break;
|
break;
|
||||||
case 'f':
|
case 'f':
|
||||||
|
@ -424,40 +438,70 @@ pure nothrow void lexDecimal(S)(ref S inputString, size_t startIndex,
|
||||||
if (isUnsigned || isLong)
|
if (isUnsigned || isLong)
|
||||||
break decimalLoop;
|
break decimalLoop;
|
||||||
++endIndex;
|
++endIndex;
|
||||||
token.type = TokenType.floatLiteral;
|
token.type = TokenType.FloatLiteral;
|
||||||
break decimalLoop;
|
break decimalLoop;
|
||||||
|
case 'i':
|
||||||
|
++endIndex;
|
||||||
|
// Spec says that this is the last suffix, so all cases break the
|
||||||
|
// loop.
|
||||||
|
if (isDouble)
|
||||||
|
{
|
||||||
|
token.type = TokenType.Idouble;
|
||||||
|
break decimalLoop;
|
||||||
|
}
|
||||||
|
else if (isFloat)
|
||||||
|
{
|
||||||
|
token.type = TokenType.Ifloat;
|
||||||
|
break decimalLoop;
|
||||||
|
}
|
||||||
|
else if (isReal)
|
||||||
|
{
|
||||||
|
token.type = TokenType.Ireal;
|
||||||
|
break decimalLoop;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// There is no imaginary int
|
||||||
|
--endIndex;
|
||||||
|
break decimalLoop;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
break decimalLoop;
|
break decimalLoop;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// suggest to extract lexing integers into a separate function
|
|
||||||
// please see unittest below
|
|
||||||
|
|
||||||
token.value = inputString[startIndex .. endIndex];
|
token.value = inputString[startIndex .. endIndex];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
unittest {
|
unittest {
|
||||||
dump!lexDecimal("55e-4"); // yeilds intLiteral, but should be float
|
Token t;
|
||||||
dump!lexDecimal("3e+f"); // floatLiteral, but should be considered invalid
|
size_t start, end;
|
||||||
dump!lexDecimal("3e++f"); // intLiteral 3e+, but should be considered invalid
|
lexDecimal!string("55e-4", start, end, t);
|
||||||
// actually, there are lots of bugs. The point is that without decomposition of integer lexing from floating-point lexing
|
assert(t.value == "55e-4");
|
||||||
// it is very hard to prove algorithm correctness
|
assert(t.type == TokenType.DoubleLiteral);
|
||||||
|
|
||||||
|
start = end = 0;
|
||||||
|
lexDecimal!string("123.45f", start, end, t);
|
||||||
|
assert(t.value == "123.45f");
|
||||||
|
assert(t.type == TokenType.FloatLiteral);
|
||||||
|
|
||||||
|
start = end = 0;
|
||||||
|
lexDecimal!string("3e+f", start, end, t);
|
||||||
|
assert(t.value == "3");
|
||||||
|
assert(t.type == TokenType.IntLiteral);
|
||||||
|
|
||||||
|
start = end = 0;
|
||||||
|
lexDecimal!string("3e++f", start, end, t);
|
||||||
|
assert(t.value == "3");
|
||||||
|
assert(t.type == TokenType.IntLiteral);
|
||||||
|
|
||||||
|
start = end = 0;
|
||||||
|
lexDecimal!string("1234..1237", start, end, t);
|
||||||
|
assert(t.value == "1234");
|
||||||
|
assert(t.type == TokenType.IntLiteral);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temporary function to illustrate some problems
|
|
||||||
// Executes T and dumps results to console
|
|
||||||
void dump(alias T)(string s) {
|
|
||||||
size_t start;
|
|
||||||
size_t end;
|
|
||||||
Token tok;
|
|
||||||
T!(string)(s, start, end, tok);
|
|
||||||
// dump results
|
|
||||||
writeln(tok.type);
|
|
||||||
writeln(tok.value);
|
|
||||||
writeln(start);
|
|
||||||
writeln(end);
|
|
||||||
}
|
|
||||||
|
|
||||||
nothrow void lexHex(S)(ref S inputString, ref size_t startIndex,
|
nothrow void lexHex(S)(ref S inputString, ref size_t startIndex,
|
||||||
ref size_t endIndex, ref Token token) if (isSomeString!S)
|
ref size_t endIndex, ref Token token) if (isSomeString!S)
|
||||||
|
@ -471,7 +515,7 @@ nothrow void lexHex(S)(ref S inputString, ref size_t startIndex,
|
||||||
bool foundDot = false;
|
bool foundDot = false;
|
||||||
bool foundE = false;
|
bool foundE = false;
|
||||||
bool foundPlusMinus = false;
|
bool foundPlusMinus = false;
|
||||||
token.type = TokenType.intLiteral;
|
token.type = TokenType.IntLiteral;
|
||||||
hexLoop: while (!isEoF(inputString, endIndex))
|
hexLoop: while (!isEoF(inputString, endIndex))
|
||||||
{
|
{
|
||||||
switch (inputString[endIndex])
|
switch (inputString[endIndex])
|
||||||
|
@ -505,7 +549,7 @@ nothrow void lexHex(S)(ref S inputString, ref size_t startIndex,
|
||||||
break hexLoop; // two dots with other characters between them
|
break hexLoop; // two dots with other characters between them
|
||||||
++endIndex;
|
++endIndex;
|
||||||
foundDot = true;
|
foundDot = true;
|
||||||
token.type = TokenType.doubleLiteral;
|
token.type = TokenType.DoubleLiteral;
|
||||||
isDouble = true;
|
isDouble = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -566,7 +610,7 @@ Token[] tokenize(S)(S inputString, IterationStyle iterationStyle = IterationStyl
|
||||||
Token currentToken;
|
Token currentToken;
|
||||||
currentToken.lineNumber = lineNumber; // lineNumber is always 1
|
currentToken.lineNumber = lineNumber; // lineNumber is always 1
|
||||||
currentToken.value = lexScriptLine(inputString, endIndex, lineNumber);
|
currentToken.value = lexScriptLine(inputString, endIndex, lineNumber);
|
||||||
currentToken.type = TokenType.scriptLine;
|
currentToken.type = TokenType.ScriptLine;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (!isEoF(inputString, endIndex))
|
while (!isEoF(inputString, endIndex))
|
||||||
|
@ -580,8 +624,8 @@ Token[] tokenize(S)(S inputString, IterationStyle iterationStyle = IterationStyl
|
||||||
{
|
{
|
||||||
currentToken.lineNumber = lineNumber;
|
currentToken.lineNumber = lineNumber;
|
||||||
currentToken.value = lexWhitespace(inputString, endIndex,
|
currentToken.value = lexWhitespace(inputString, endIndex,
|
||||||
lineNumber, IterationStyle.EVERYTHING); // note: I suggest to remove the last parameter to simplify lexWhitespace
|
lineNumber);
|
||||||
currentToken.type = TokenType.whitespace;
|
currentToken.type = TokenType.Whitespace;
|
||||||
tokenAppender.put(currentToken);
|
tokenAppender.put(currentToken);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -593,66 +637,66 @@ Token[] tokenize(S)(S inputString, IterationStyle iterationStyle = IterationStyl
|
||||||
outerSwitch: switch(inputString[endIndex])
|
outerSwitch: switch(inputString[endIndex])
|
||||||
{
|
{
|
||||||
mixin(generateCaseTrie(
|
mixin(generateCaseTrie(
|
||||||
"=", "TokenType.assign",
|
"=", "TokenType.Assign",
|
||||||
"&", "TokenType.bitAnd",
|
"&", "TokenType.BitAnd",
|
||||||
"&=", "TokenType.bitAndEquals",
|
"&=", "TokenType.BitAndEquals",
|
||||||
"|", "TokenType.bitOr",
|
"|", "TokenType.BitOr",
|
||||||
"|=", "TokenType.bitOrEquals",
|
"|=", "TokenType.BitOrEquals",
|
||||||
"~=", "TokenType.catEquals",
|
"~=", "TokenType.CatEquals",
|
||||||
":", "TokenType.colon",
|
":", "TokenType.Colon",
|
||||||
",", "TokenType.comma",
|
",", "TokenType.Comma",
|
||||||
"$", "TokenType.dollar",
|
"$", "TokenType.Dollar",
|
||||||
".", "TokenType.dot",
|
".", "TokenType.Dot",
|
||||||
"==", "TokenType.equals",
|
"==", "TokenType.Equals",
|
||||||
"=>", "TokenType.goesTo",
|
"=>", "TokenType.GoesTo",
|
||||||
">", "TokenType.greater",
|
">", "TokenType.Greater",
|
||||||
">=", "TokenType.greaterEqual",
|
">=", "TokenType.GreaterEqual",
|
||||||
"#", "TokenType.hash",
|
"#", "TokenType.Hash",
|
||||||
"&&", "TokenType.logicAnd",
|
"&&", "TokenType.LogicAnd",
|
||||||
"{", "TokenType.lBrace",
|
"{", "TokenType.LBrace",
|
||||||
"[", "TokenType.lBracket",
|
"[", "TokenType.LBracket",
|
||||||
"<", "TokenType.less",
|
"<", "TokenType.Less",
|
||||||
"<=", "TokenType.lessEqual",
|
"<=", "TokenType.LessEqual",
|
||||||
"<>=", "TokenType.lessEqualGreater",
|
"<>=", "TokenType.LessEqualGreater",
|
||||||
"<>", "TokenType.lessOrGreater",
|
"<>", "TokenType.LessOrGreater",
|
||||||
"||", "TokenType.logicOr",
|
"||", "TokenType.LogicOr",
|
||||||
"(", "TokenType.lParen",
|
"(", "TokenType.LParen",
|
||||||
"-", "TokenType.minus",
|
"-", "TokenType.Minus",
|
||||||
"-=", "TokenType.minusEquals",
|
"-=", "TokenType.MinusEquals",
|
||||||
"%", "TokenType.mod",
|
"%", "TokenType.Mod",
|
||||||
"%=", "TokenType.modEquals",
|
"%=", "TokenType.ModEquals",
|
||||||
"*=", "TokenType.mulEquals",
|
"*=", "TokenType.MulEquals",
|
||||||
"!", "TokenType.not",
|
"!", "TokenType.Not",
|
||||||
"!=", "TokenType.notEquals",
|
"!=", "TokenType.NotEquals",
|
||||||
"!>", "TokenType.notGreater",
|
"!>", "TokenType.NotGreater",
|
||||||
"!>=", "TokenType.notGreaterEqual",
|
"!>=", "TokenType.NotGreaterEqual",
|
||||||
"!<", "TokenType.notLess",
|
"!<", "TokenType.NotLess",
|
||||||
"!<=", "TokenType.notLessEqual",
|
"!<=", "TokenType.NotLessEqual",
|
||||||
"!<>", "TokenType.notLessEqualGreater",
|
"!<>", "TokenType.NotLessEqualGreater",
|
||||||
"+", "TokenType.plus",
|
"+", "TokenType.Plus",
|
||||||
"+=", "TokenType.plusEquals",
|
"+=", "TokenType.PlusEquals",
|
||||||
"^^", "TokenType.pow",
|
"^^", "TokenType.Pow",
|
||||||
"^^=", "TokenType.powEquals",
|
"^^=", "TokenType.PowEquals",
|
||||||
"}", "TokenType.rBrace",
|
"}", "TokenType.RBrace",
|
||||||
"]", "TokenType.rBracket",
|
"]", "TokenType.RBracket",
|
||||||
")", "TokenType.rParen",
|
")", "TokenType.RParen",
|
||||||
";", "TokenType.semicolon",
|
";", "TokenType.Semicolon",
|
||||||
"<<", "TokenType.shiftLeft",
|
"<<", "TokenType.ShiftLeft",
|
||||||
"<<=", "TokenType.shiftLeftEqual",
|
"<<=", "TokenType.ShiftLeftEqual",
|
||||||
">>", "TokenType.shiftRight",
|
">>", "TokenType.ShiftRight",
|
||||||
">>=", "TokenType.shiftRightEqual",
|
">>=", "TokenType.ShiftRightEqual",
|
||||||
"..", "TokenType.slice",
|
"..", "TokenType.Slice",
|
||||||
"*", "TokenType.star",
|
"*", "TokenType.Star",
|
||||||
"?", "TokenType.ternary",
|
"?", "TokenType.Ternary",
|
||||||
"~", "TokenType.tilde",
|
"~", "TokenType.Tilde",
|
||||||
"--", "TokenType.uMinus",
|
"--", "TokenType.Decrement",
|
||||||
"!<>=", "TokenType.unordered",
|
"!<>=", "TokenType.Unordered",
|
||||||
">>>", "TokenType.unsignedShiftRight",
|
">>>", "TokenType.UnsignedShiftRight",
|
||||||
">>>=", "TokenType.unsignedShiftRightEqual",
|
">>>=", "TokenType.UnsignedShiftRightEqual",
|
||||||
"++", "TokenType.uPlus",
|
"++", "TokenType.Increment",
|
||||||
"...", "TokenType.vararg",
|
"...", "TokenType.Vararg",
|
||||||
"^", "TokenType.xor",
|
"^", "TokenType.Xor",
|
||||||
"^=", "TokenType.xorEquals",
|
"^=", "TokenType.XorEquals",
|
||||||
));
|
));
|
||||||
case '0': .. case '9':
|
case '0': .. case '9':
|
||||||
currentToken = lexNumber(inputString, endIndex);
|
currentToken = lexNumber(inputString, endIndex);
|
||||||
|
@ -662,7 +706,7 @@ Token[] tokenize(S)(S inputString, IterationStyle iterationStyle = IterationStyl
|
||||||
if (isEoF(inputString, endIndex))
|
if (isEoF(inputString, endIndex))
|
||||||
{
|
{
|
||||||
currentToken.value = "/";
|
currentToken.value = "/";
|
||||||
currentToken.type = TokenType.div;
|
currentToken.type = TokenType.Div;
|
||||||
currentToken.lineNumber = lineNumber;
|
currentToken.lineNumber = lineNumber;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -680,17 +724,17 @@ Token[] tokenize(S)(S inputString, IterationStyle iterationStyle = IterationStyl
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
currentToken.value = lexComment(inputString, endIndex, lineNumber);
|
currentToken.value = lexComment(inputString, endIndex, lineNumber);
|
||||||
currentToken.type = TokenType.comment;
|
currentToken.type = TokenType.Comment;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case '=':
|
case '=':
|
||||||
currentToken.value = "/=";
|
currentToken.value = "/=";
|
||||||
currentToken.type = TokenType.divEquals;
|
currentToken.type = TokenType.DivEquals;
|
||||||
++endIndex;
|
++endIndex;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
currentToken.value = "/";
|
currentToken.value = "/";
|
||||||
currentToken.type = TokenType.div;
|
currentToken.type = TokenType.Div;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -701,13 +745,13 @@ Token[] tokenize(S)(S inputString, IterationStyle iterationStyle = IterationStyl
|
||||||
currentToken.lineNumber = lineNumber;
|
currentToken.lineNumber = lineNumber;
|
||||||
currentToken.value = lexString(inputString, endIndex,
|
currentToken.value = lexString(inputString, endIndex,
|
||||||
lineNumber, inputString[endIndex], false);
|
lineNumber, inputString[endIndex], false);
|
||||||
currentToken.type = TokenType.stringLiteral;
|
currentToken.type = TokenType.StringLiteral;
|
||||||
break;
|
break;
|
||||||
case '`':
|
case '`':
|
||||||
currentToken.lineNumber = lineNumber;
|
currentToken.lineNumber = lineNumber;
|
||||||
currentToken.value = lexString(inputString, endIndex, lineNumber,
|
currentToken.value = lexString(inputString, endIndex, lineNumber,
|
||||||
inputString[endIndex], false);
|
inputString[endIndex], false);
|
||||||
currentToken.type = TokenType.stringLiteral;
|
currentToken.type = TokenType.StringLiteral;
|
||||||
break;
|
break;
|
||||||
case 'x':
|
case 'x':
|
||||||
++endIndex;
|
++endIndex;
|
||||||
|
@ -720,7 +764,7 @@ Token[] tokenize(S)(S inputString, IterationStyle iterationStyle = IterationStyl
|
||||||
currentToken.lineNumber = lineNumber;
|
currentToken.lineNumber = lineNumber;
|
||||||
currentToken.value = lexString(inputString, endIndex, lineNumber,
|
currentToken.value = lexString(inputString, endIndex, lineNumber,
|
||||||
inputString[endIndex]);
|
inputString[endIndex]);
|
||||||
currentToken.type = TokenType.stringLiteral;
|
currentToken.type = TokenType.StringLiteral;
|
||||||
break;
|
break;
|
||||||
case 'q':
|
case 'q':
|
||||||
currentToken.value = "q";
|
currentToken.value = "q";
|
||||||
|
@ -733,13 +777,13 @@ Token[] tokenize(S)(S inputString, IterationStyle iterationStyle = IterationStyl
|
||||||
currentToken.lineNumber = lineNumber;
|
currentToken.lineNumber = lineNumber;
|
||||||
currentToken.value ~= lexDelimitedString(inputString,
|
currentToken.value ~= lexDelimitedString(inputString,
|
||||||
endIndex, lineNumber);
|
endIndex, lineNumber);
|
||||||
currentToken.type = TokenType.stringLiteral;
|
currentToken.type = TokenType.StringLiteral;
|
||||||
break outerSwitch;
|
break outerSwitch;
|
||||||
case '{':
|
case '{':
|
||||||
currentToken.lineNumber = lineNumber;
|
currentToken.lineNumber = lineNumber;
|
||||||
currentToken.value ~= lexTokenString(inputString,
|
currentToken.value ~= lexTokenString(inputString,
|
||||||
endIndex, lineNumber);
|
endIndex, lineNumber);
|
||||||
currentToken.type = TokenType.stringLiteral;
|
currentToken.type = TokenType.StringLiteral;
|
||||||
break outerSwitch;
|
break outerSwitch;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Reference in New Issue