Special tokens, ddoc, better highlighter colors

This commit is contained in:
Hackerpilot 2013-01-27 01:09:02 +00:00
parent c7b84ca0cc
commit 9a6e765335
3 changed files with 505 additions and 380 deletions

View File

@ -17,6 +17,8 @@ void writeSpan(string cssClass, string value)
stdout.write(`<span class="`, cssClass, `">`, value.replace("&", "&amp;").replace("<", "&lt;"), `</span>`);
}
// http://ethanschoonover.com/solarized
void highlight(R)(R tokens)
{
stdout.writeln(q"[<!DOCTYPE html>
@ -25,13 +27,14 @@ void highlight(R)(R tokens)
<meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
<body>
<style type="text/css">
html { background-color: #fff; color: #222; }
.kwrd { font-weight: bold; color: blue; }
.com { color: green; font-style: italic;}
.num { color: orangered; font-weigth: bold; }
.str { color: red; font-style: italic; }
.op { color: 333; font-weight: bold; }
.type { color: magenta; font-weight: bold; }
html { background-color: #fdf6e3; color: #002b36; }
.kwrd { color: #b58900; font-weight: bold; }
.com { color: #93a1a1; font-style: italic; }
.num { color: #dc322f; font-weigth: bold; }
.str { color: #2aa198; font-style: italic; }
.op { color: #586e75; font-weight: bold; }
.type { color: #268bd2; font-weight: bold; }
.cons { color: #859900; font-weight: bold; }
</style>
<pre>]");
@ -49,6 +52,8 @@ html { background-color: #fff; color: #222; }
writeSpan("num", t.value);
else if (t.type > TokenType.OPERATORS_BEGIN && t.type < TokenType.OPERATORS_END)
writeSpan("op", t.value);
else if (t.type > TokenType.CONSTANTS_BEGIN && t.type < TokenType.CONSTANTS_END)
writeSpan("cons", t.value);
else
stdout.write(t.value.replace("<", "&lt;"));
}

8
main.d
View File

@ -160,13 +160,13 @@ int main(string[] args)
char[] buf;
while (stdin.readln(buf))
f.put(buf);
highlighter.highlight(f.data.byToken(IterationStyle.Everything,
StringStyle.Source));
highlighter.highlight(f.data.byToken("stdin", IterationStyle.Everything,
TokenStyle.Source));
}
else
{
highlighter.highlight(args[1].readText().byToken(
IterationStyle.Everything, StringStyle.Source));
highlighter.highlight(args[1].readText().byToken(args[1],
IterationStyle.Everything, TokenStyle.Source));
}
return 0;
}

View File

@ -1,7 +1,7 @@
// Written in the D programming language
/**
* This module contains a range-based lexer for the D programming language.
* This module contains a range-based _lexer for the D programming language.
*
* Examples:
*
@ -17,6 +17,7 @@
* stdout.write(`<span class="`, cssClass, `">`, value.replace("&", "&amp;").replace("<", "&lt;"), `</span>`);
* }
*
* // http://ethanschoonover.com/solarized
* void highlight(R)(R tokens)
* {
* stdout.writeln(q"[<!DOCTYPE html>
@ -25,13 +26,14 @@
* <meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
* <body>
* <style type="text/css">
* html { background-color: #fff; color: #222; }
* .kwrd { font-weight: bold; color: blue; }
* .com { color: green; font-style: italic;}
* .num { color: orangered; font-weigth: bold; }
* .str { color: red; font-style: italic; }
* .op { color: 333; font-weight: bold; }
* .type { color: magenta; font-weight: bold; }
* html { background-color: #fdf6e3; color: #002b36; }
* .kwrd { color: #b58900; font-weight: bold; }
* .com { color: #93a1a1; font-style: italic; }
* .num { color: #dc322f; font-weigth: bold; }
* .str { color: #2aa198; font-style: italic; }
* .op { color: #586e75; font-weight: bold; }
* .type { color: #268bd2; font-weight: bold; }
* .cons { color: #859900; font-weight: bold; }
* </style>
* <pre>]");
*
@ -57,9 +59,29 @@
*
* void main(string[] args)
* {
* args[1].readText().byToken(IterationStyle.Everything, StringStyle.Source).highlight();
* args[1].readText().byToken(args[1], IterationStyle.Everything, TokenStyle.Source).highlight();
* }
* ---
* Iterate by tokens that would be significant to a parser
* ---
* import std.range;
* import std.d.lexer;
*
* // ...
*
* string s = "import std.stdio; // comment";
* auto tokens = byToken(s);
* // The comment and whitespace are not included
* assert (walkLength(tokens) == 5);
* ---
* Replace special tokens
* ---
* string s = "#line 5\n__VERSION__";
* auto tokens = byToken(s, "example.d", IterationStyle.CodeOnly, TokenStyle.Default, "foo", "1.0");
* assert (tokens.front.type == TokenType.IntLiteral);
* assert (tokens.front.value == "1.0")
* assert (tokens.front.lineNumber == 5);
* ---
*
* Copyright: Brian Schott 2013
* License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt Boost, License 1.0)
@ -76,6 +98,8 @@ import std.conv;
import std.uni;
import std.ascii;
import std.exception;
import std.datetime;
import std.string;
import std.d.entities;
public:
@ -129,7 +153,8 @@ struct Token
}
/**
* Configure the behavior of the byToken() function
* Configure the behavior of the byToken() function. These flags may be
* combined using a bitwise or.
*/
enum IterationStyle
{
@ -139,21 +164,25 @@ enum IterationStyle
IncludeComments = 0b0001,
/// Includes whitespace
IncludeWhitespace = 0b0010,
/// Include $(LINK2 http://dlang.org/lex.html#specialtokens, special tokens)
/// Include $(LINK2 http://dlang.org/lex.html#Special%20Tokens%20Sequence, special token sequences)
IncludeSpecialTokens = 0b0100,
/// Include everything
Everything = IncludeComments | IncludeWhitespace
/// Do not terminate iteration upon reaching the ___EOF__ token
IgnoreEOF = 0b1000,
/// Include everything, including the __EOF__ token.
Everything = IncludeComments | IncludeWhitespace | IgnoreEOF
}
/**
* Configuration of the string lexing style
* Configuration of the string lexing style. These flags may be combined with a
* bitwise or.
*/
enum StringStyle : uint
enum TokenStyle : uint
{
/**
* Escape sequences will be replaced with their equivalent characters,
* enclosing quote characters will not be included. Useful for creating a
* compiler or interpreter.
* enclosing quote characters will not be included. Special tokens such as
* __VENDOR__ will be replaced with their equivalent strings. Useful for
* creating a compiler or interpreter.
*/
Default = 0b0000,
@ -170,27 +199,43 @@ enum StringStyle : uint
* include the $(D_STRING 'w') character as well as the opening and closing
* quotes$(RPAREN)
*/
IncludeQuotes = 0x0010,
IncludeQuotes = 0b0010,
/**
* Do not replace the value field of the special tokens such as ___DATE__
* with their string equivalents.
*/
DoNotReplaceSpecial = 0b0100,
/**
* Strings will be read exactly as they appeared in the source, including
* their opening and closing quote characters. Useful for syntax
* highlighting.
*/
Source = NotEscaped | IncludeQuotes,
Source = NotEscaped | IncludeQuotes | DoNotReplaceSpecial,
}
/// Default replacement for the ___VERSION__ special token
immutable string VERSION = "1.0";
/// Default replacement for the ___VENDOR__ special token
immutable string VENDOR = "std.d.lexer";
/**
* Iterate over the given range of characters by D tokens.
* Params:
* range = the range of characters
* iterationStyle = See IterationStyle
* stringStyle = see StringStyle
* stringStyle = see TokenStyle
* vendor = the string literal that should replace the ___VENDOR__ special token
* ver = the string literal that should replace the ___VERSION__ special token
* Returns:
* an input range of tokens
*/
TokenRange!(R) byToken(R)(R range, const IterationStyle iterationStyle = IterationStyle.CodeOnly,
const StringStyle stringStyle = StringStyle.Default) if (isForwardRange!(R) && isSomeChar!(ElementType!(R)))
TokenRange!(R) byToken(R)(R range, string fileName = "",
const IterationStyle iterationStyle = IterationStyle.CodeOnly,
const TokenStyle stringStyle = TokenStyle.Default, string vendor = VENDOR,
string ver = VERSION) if (isForwardRange!(R) && isSomeChar!(ElementType!(R)))
{
auto r = new TokenRange!(R)(range);
r.stringStyle = stringStyle;
@ -201,16 +246,10 @@ TokenRange!(R) byToken(R)(R range, const IterationStyle iterationStyle = Iterati
}
/**
* Range of tokens. Avoid creating instances of this manually. Use
* $(DDOC_PSYMBOL byToken$(LPAREN)$(RPAREN)) instead, as it does some initialization work.
* Range of tokens. Use byToken$(LPAREN)$(RPAREN) to instantiate.
*/
class TokenRange(R) : InputRange!(Token)
{
this(ref R range)
{
this.range = range;
}
/**
* Returns: true if the range is empty
*/
@ -294,6 +333,11 @@ class TokenRange(R) : InputRange!(Token)
private:
this(ref R range)
{
this.range = range;
}
/*
* Advances the range to the next token
*/
@ -482,6 +526,55 @@ private:
}
current.value = to!string(app.data);
current.type = lookupTokenType(current.value);
if (!(iterStyle & IterationStyle.IgnoreEOF) && current.type == TokenType.EOF)
{
_empty = true;
return;
}
if (!(iterStyle & TokenStyle.DoNotReplaceSpecial))
break;
switch (current.type)
{
case TokenType.Date:
current.type = TokenType.StringLiteral;
auto time = Clock.currTime();
current.value = format("%s %02d %04d", time.month, time.day, time.year);
break;
case TokenType.Time:
auto time = Clock.currTime();
current.type = TokenType.StringLiteral;
current.value = (cast(TimeOfDay)(time)).toISOExtString();
break;
case TokenType.Timestamp:
auto time = Clock.currTime();
auto dt = cast(DateTime) time;
current.type = TokenType.StringLiteral;
current.value = format("%s %s %02d %02d:%02d:%02d %04d",
dt.dayOfWeek, dt.month, dt.day, dt.hour, dt.minute,
dt.second, dt.year);
break;
case TokenType.Vendor:
current.type = TokenType.StringLiteral;
current.value = vendor;
break;
case TokenType.CompilerVersion:
current.type = TokenType.StringLiteral;
current.value = ver;
break;
case TokenType.Line:
current.type = TokenType.IntLiteral;
current.value = format("%d", current.lineNumber);
break;
case TokenType.File:
current.type = TokenType.StringLiteral;
current.value = fileName;
break;
default:
break;
}
break;
}
}
@ -492,7 +585,10 @@ private:
R range;
bool _empty;
IterationStyle iterStyle;
StringStyle stringStyle;
TokenStyle stringStyle;
string ver;
string vendor;
string fileName;
}
unittest
@ -521,7 +617,7 @@ unittest
* $(TR $(TD KEYWORDS_BEGIN) $(TD KEYWORDS) $(TD keywords) $(TD class, if, assert))
* $(TR $(TD ATTRIBUTES_BEGIN) $(TD ATTRIBUTES_END) $(TD attributes) $(TD override synchronized, __gshared))
* $(TR $(TD ATTRIBUTES_BEGIN) $(TD ATTRIBUTES_END) $(TD protection) $(TD public, protected))
* $(TR $(TD CONSTANTS_BEGIN) $(TD CONSTANTS_END) $(TD compile-time constants) $(TD __FILE__, __TIME__))
* $(TR $(TD CONSTANTS_BEGIN) $(TD CONSTANTS_END) $(TD compile-time constants) $(TD ___FILE__, ___TIME__))
* $(TR $(TD LITERALS_BEGIN) $(TD LITERALS_END) $(TD string and numeric literals) $(TD "str", 123))
* $(TR $(TD NUMBERS_BEGIN) $(TD NUMBERS_END) $(TD numeric literals) $(TD 0x123p+9, 0b0110))
* $(TR $(TD STRINGS_BEGIN) $(TD STRINGS_END) $(TD string literals) $(TD `123`c, q{tokens;}, "abcde"))
@ -533,69 +629,69 @@ enum TokenType: uint
{
// Operators
OPERATORS_BEGIN, ///
Assign, /// $(D_KEYWORD =)
At, /// $(D_KEYWORD @)
BitAnd, /// $(D_KEYWORD &)
BitAndEquals, /// $(D_KEYWORD &=)
BitOr, /// $(D_KEYWORD |)
BitOrEquals, /// $(D_KEYWORD |=)
CatEquals, /// $(D_KEYWORD ~=)
Colon, /// $(D_KEYWORD :)
Comma, /// $(D_KEYWORD ,)
Decrement, /// $(D_KEYWORD --)
Div, /// $(D_KEYWORD /)
DivEquals, /// $(D_KEYWORD /=)
Dollar, /// $(D_KEYWORD $)
Dot, /// $(D_KEYWORD .)
Equals, /// $(D_KEYWORD ==)
Assign, /// =
At, /// @
BitAnd, /// &
BitAndEquals, /// &=
BitOr, /// |
BitOrEquals, /// |=
CatEquals, /// ~=
Colon, /// :
Comma, /// ,
Decrement, /// --
Div, /// /
DivEquals, /// /=
Dollar, /// $
Dot, /// .
Equals, /// ==
GoesTo, // =>
Greater, /// $(D_KEYWORD >)
GreaterEqual, /// $(D_KEYWORD >=)
Hash, // $(D_KEYWORD #)
Increment, /// $(D_KEYWORD ++)
LBrace, /// $(D_KEYWORD {)
LBracket, /// $(D_KEYWORD [)
Less, /// $(D_KEYWORD <)
LessEqual, /// $(D_KEYWORD <=)
LessEqualGreater, // $(D_KEYWORD <>=)
LessOrGreater, /// $(D_KEYWORD <>)
LogicAnd, /// $(D_KEYWORD &&)
LogicOr, /// $(D_KEYWORD ||)
LParen, /// $(D_KEYWORD $(LPAREN))
Minus, /// $(D_KEYWORD -)
MinusEquals, /// $(D_KEYWORD -=)
Mod, /// $(D_KEYWORD %)
ModEquals, /// $(D_KEYWORD %=)
MulEquals, /// $(D_KEYWORD *=)
Not, /// $(D_KEYWORD !)
NotEquals, /// $(D_KEYWORD !=)
NotGreater, /// $(D_KEYWORD !>)
NotGreaterEqual, /// $(D_KEYWORD !>=)
NotLess, /// $(D_KEYWORD !<)
NotLessEqual, /// $(D_KEYWORD !<=)
NotLessEqualGreater, /// $(D_KEYWORD !<>)
Plus, /// $(D_KEYWORD +)
PlusEquals, /// $(D_KEYWORD +=)
Pow, /// $(D_KEYWORD ^^)
PowEquals, /// $(D_KEYWORD ^^=)
RBrace, /// $(D_KEYWORD })
RBracket, /// $(D_KEYWORD ])
RParen, /// $(D_KEYWORD $(RPAREN))
Semicolon, /// $(D_KEYWORD ;)
ShiftLeft, /// $(D_KEYWORD <<)
ShiftLeftEqual, /// $(D_KEYWORD <<=)
ShiftRight, /// $(D_KEYWORD >>)
ShiftRightEqual, /// $(D_KEYWORD >>=)
Greater, /// >
GreaterEqual, /// >=
Hash, // #
Increment, /// ++
LBrace, /// {
LBracket, /// [
Less, /// <
LessEqual, /// <=
LessEqualGreater, // <>=
LessOrGreater, /// <>
LogicAnd, /// &&
LogicOr, /// ||
LParen, /// $(LPAREN)
Minus, /// -
MinusEquals, /// -=
Mod, /// %
ModEquals, /// %=
MulEquals, /// *=
Not, /// !
NotEquals, /// !=
NotGreater, /// !>
NotGreaterEqual, /// !>=
NotLess, /// !<
NotLessEqual, /// !<=
NotLessEqualGreater, /// !<>
Plus, /// +
PlusEquals, /// +=
Pow, /// ^^
PowEquals, /// ^^=
RBrace, /// }
RBracket, /// ]
RParen, /// $(RPAREN)
Semicolon, /// ;
ShiftLeft, /// <<
ShiftLeftEqual, /// <<=
ShiftRight, /// >>
ShiftRightEqual, /// >>=
Slice, // ..
Star, /// $(D_KEYWORD *)
Ternary, /// $(D_KEYWORD ?)
Tilde, /// $(D_KEYWORD ~)
Unordered, /// $(D_KEYWORD !<>=)
UnsignedShiftRight, /// $(D_KEYWORD >>>)
UnsignedShiftRightEqual, /// $(D_KEYWORD >>>=)
Vararg, /// $(D_KEYWORD ...)
Xor, /// $(D_KEYWORD ^)
XorEquals, /// $(D_KEYWORD ^=)
Star, /// *
Ternary, /// ?
Tilde, /// ~
Unordered, /// !<>=
UnsignedShiftRight, /// >>>
UnsignedShiftRightEqual, /// >>>=
Vararg, /// ...
Xor, /// ^
XorEquals, /// ^=
OPERATORS_END, ///
@ -717,10 +813,16 @@ enum TokenType: uint
// Constants
CONSTANTS_BEGIN, ///
File, /// $(D_KEYWORD __FILE__)
Line, /// $(D_KEYWORD __LINE__)
Thread, /// $(D_KEYWORD __thread)
Traits, /// $(D_KEYWORD __traits)
Date, /// ___DATE__
EOF, /// ___EOF__
Time, /// ___TIME__
Timestamp, /// ___TIMESTAMP__
Vendor, /// ___VENDOR__
CompilerVersion, /// ___VERSION__
File, /// ___FILE__
Line, /// ___LINE__
Thread, /// ___thread
Traits, /// ___traits
CONSTANTS_END, ///
// Misc
@ -1119,7 +1221,7 @@ unittest
}
Token lexHexString(R, C = ElementType!R)(ref R input, ref uint index, ref uint lineNumber,
const StringStyle style = StringStyle.Default)
const TokenStyle style = TokenStyle.Default)
in
{
assert (input.front == 'x');
@ -1131,7 +1233,7 @@ body
t.startIndex = index;
t.type = TokenType.StringLiteral;
auto app = appender!(C[])();
if (style & StringStyle.IncludeQuotes)
if (style & TokenStyle.IncludeQuotes)
app.put("x\"");
input.popFront();
input.popFront();
@ -1149,7 +1251,7 @@ body
input.popFront();
++index;
}
else if (std.uni.isWhite(input.front) && (style & StringStyle.NotEscaped))
else if (std.uni.isWhite(input.front) && (style & TokenStyle.NotEscaped))
{
app.put(input.front);
input.popFront();
@ -1157,7 +1259,7 @@ body
}
else if (input.front == '"')
{
if (style & StringStyle.IncludeQuotes)
if (style & TokenStyle.IncludeQuotes)
app.put('"');
input.popFront();
++index;
@ -1179,7 +1281,7 @@ body
t.type = TokenType.DStringLiteral;
goto case 'c';
case 'c':
if (style & StringStyle.IncludeQuotes)
if (style & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
++index;
@ -1188,7 +1290,7 @@ body
break;
}
}
if (style & StringStyle.NotEscaped)
if (style & TokenStyle.NotEscaped)
t.value = to!string(app.data);
else
{
@ -1218,17 +1320,17 @@ unittest
assert (br == TokenType.WStringLiteral);
auto c = `x"6d"`;
auto cr = lexHexString(c, i, l, StringStyle.NotEscaped);
auto cr = lexHexString(c, i, l, TokenStyle.NotEscaped);
assert (cr == "6d");
auto d = `x"5e5f"d`;
auto dr = lexHexString(d, i, l, StringStyle.NotEscaped | StringStyle.IncludeQuotes);
auto dr = lexHexString(d, i, l, TokenStyle.NotEscaped | TokenStyle.IncludeQuotes);
assert (dr == `x"5e5f"d`);
assert (dr == TokenType.DStringLiteral);
}
Token lexString(R)(ref R input, ref uint index, ref uint lineNumber,
const StringStyle style = StringStyle.Default)
const TokenStyle style = TokenStyle.Default)
in
{
assert (input.front == '\'' || input.front == '"' || input.front == '`' || input.front == 'r');
@ -1243,7 +1345,7 @@ body
bool isWysiwyg = input.front == 'r' || input.front == '`';
if (input.front == 'r')
{
if (style & StringStyle.IncludeQuotes)
if (style & TokenStyle.IncludeQuotes)
app.put('r');
input.popFront();
}
@ -1251,7 +1353,7 @@ body
input.popFront();
++index;
if (style & StringStyle.IncludeQuotes)
if (style & TokenStyle.IncludeQuotes)
app.put(quote);
while (!isEoF(input))
{
@ -1262,7 +1364,7 @@ body
}
else if (input.front == '\\')
{
if (style & StringStyle.NotEscaped)
if (style & TokenStyle.NotEscaped)
{
auto r = input.save();
r.popFront();
@ -1294,7 +1396,7 @@ body
}
else if (input.front == quote)
{
if (style & StringStyle.IncludeQuotes)
if (style & TokenStyle.IncludeQuotes)
app.put(quote);
input.popFront();
++index;
@ -1318,7 +1420,7 @@ body
t.type = TokenType.DStringLiteral;
goto case 'c';
case 'c':
if (style & StringStyle.IncludeQuotes)
if (style & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
++index;
@ -1340,7 +1442,7 @@ unittest
auto b = "\"ab\\ncd\"";
assert (lexString(b, i, l) == "ab\ncd");
auto c = "`abc\\ndef`";
assert (lexString(c, i, l, StringStyle.NotEscaped) == "abc\\ndef");
assert (lexString(c, i, l, TokenStyle.NotEscaped) == "abc\\ndef");
auto d = `"12345"w`;
assert (lexString(d, i, l).type == TokenType.WStringLiteral);
auto e = `"abc"c`;
@ -1352,7 +1454,7 @@ unittest
}
Token lexDelimitedString(R)(ref R input, ref uint index,
ref uint lineNumber, const StringStyle stringStyle = StringStyle.Default)
ref uint lineNumber, const TokenStyle stringStyle = TokenStyle.Default)
in
{
assert(input.front == 'q');
@ -1368,7 +1470,7 @@ body
input.popFront(); // q
input.popFront(); // "
index += 2;
if (stringStyle & StringStyle.IncludeQuotes)
if (stringStyle & TokenStyle.IncludeQuotes)
{
app.put('q');
app.put('"');
@ -1414,7 +1516,7 @@ body
app.put('"');
++index;
input.popFront();
if (stringStyle & StringStyle.IncludeQuotes)
if (stringStyle & TokenStyle.IncludeQuotes)
t.value = to!string(app.data);
else
t.value = to!string(app.data[0 .. app.data.length - hereOpen.data.length - 1]);
@ -1430,7 +1532,7 @@ body
}
else
{
if (stringStyle & StringStyle.IncludeQuotes)
if (stringStyle & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
int depth = 1;
@ -1445,7 +1547,7 @@ body
--depth;
if (depth == 0)
{
if (stringStyle & StringStyle.IncludeQuotes)
if (stringStyle & TokenStyle.IncludeQuotes)
{
app.put(close);
app.put('"');
@ -1474,7 +1576,7 @@ body
t.type = TokenType.DStringLiteral;
goto case 'c';
case 'c':
if (stringStyle & StringStyle.IncludeQuotes)
if (stringStyle & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
++index;
@ -1503,13 +1605,13 @@ unittest
assert (br == TokenType.WStringLiteral);
auto c = `q"[<xml></xml>]");`;
auto cr = lexDelimitedString(c, i, l, StringStyle.Source);
auto cr = lexDelimitedString(c, i, l, TokenStyle.Source);
assert (cr == `q"[<xml></xml>]"`);
assert (cr == TokenType.StringLiteral);
}
Token lexTokenString(R)(ref R input, ref uint index, ref uint lineNumber,
const StringStyle stringStyle = StringStyle.Default)
const TokenStyle stringStyle = TokenStyle.Default)
in
{
assert (input.front == 'q');
@ -1524,12 +1626,12 @@ body
input.popFront(); // q
input.popFront(); // {
index += 2;
if (stringStyle & StringStyle.IncludeQuotes)
if (stringStyle & TokenStyle.IncludeQuotes)
{
app.put('q');
app.put('{');
}
auto r = byToken(input, IterationStyle.Everything, StringStyle.Source);
auto r = byToken(input, "", IterationStyle.Everything, TokenStyle.Source);
r.index = index;
int depth = 1;
while (!r.empty)
@ -1543,7 +1645,7 @@ body
--depth;
if (depth <= 0)
{
if (stringStyle & StringStyle.IncludeQuotes)
if (stringStyle & TokenStyle.IncludeQuotes)
app.put('}');
r.popFront();
break;
@ -1553,7 +1655,7 @@ body
r.popFront();
}
auto n = app.data.length - (stringStyle & StringStyle.IncludeQuotes ? 2 : 0);
auto n = app.data.length - (stringStyle & TokenStyle.IncludeQuotes ? 2 : 0);
input.popFrontN(n);
if (!input.isEoF())
{
@ -1566,7 +1668,7 @@ body
t.type = TokenType.DStringLiteral;
goto case 'c';
case 'c':
if (stringStyle & StringStyle.IncludeQuotes)
if (stringStyle & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
++index;
@ -1591,7 +1693,7 @@ unittest
assert (ar == "import std.stdio;");
auto b = `q{writeln("hello world");}`;
auto br = lexTokenString(b, i, l, StringStyle.Source);
auto br = lexTokenString(b, i, l, TokenStyle.Source);
assert (br == TokenType.StringLiteral);
assert (br == `q{writeln("hello world");}`);
}
@ -2384,6 +2486,7 @@ pure nothrow TokenType lookupTokenType(const string input)
default: break;
}
break;
case 6:
switch (input)
{
@ -2413,6 +2516,7 @@ pure nothrow TokenType lookupTokenType(const string input)
case 7:
switch (input)
{
case "__EOF__": return TokenType.EOF;
case "cdouble": return TokenType.Cdouble;
case "default": return TokenType.Default;
case "dstring": return TokenType.DString;
@ -2443,6 +2547,8 @@ pure nothrow TokenType lookupTokenType(const string input)
case "function": return TokenType.Function;
case "unittest": return TokenType.Unittest;
case "__FILE__": return TokenType.File;
case "__DATE__": return TokenType.Date;
case "__TIME__": return TokenType.Date;
default: break;
}
break;
@ -2458,14 +2564,26 @@ pure nothrow TokenType lookupTokenType(const string input)
}
break;
case 10:
if (input == "deprecated")
return TokenType.Deprecated;
switch (input)
{
case "deprecated": return TokenType.Deprecated;
case "__VENDOR__": return TokenType.Vendor;
default: break;
}
break;
case 11:
if (input == "__VERSION__")
return TokenType.CompilerVersion;
break;
case 12:
if (input == "synchronized")
return TokenType.Synchronized;
break;
case 13:
if (input == "__TIMESTAMP__")
return TokenType.Timestamp;
break;
case 15:
if (input == "foreach_reverse")
return TokenType.Foreach_reverse;
break;
@ -2573,3 +2691,5 @@ string generateCaseTrie(string[] args ...)
}
return printCaseStatements(t, "");
}
//void main(string[] args) {}