unit test coverage

This commit is contained in:
Hackerpilot 2013-02-14 22:24:15 +00:00
parent 9c3cde0d94
commit 102836503f
1 changed files with 2593 additions and 2441 deletions

View File

@ -119,6 +119,8 @@ import std.regex;
import std.string;
import std.traits;
import std.utf;
version (unittest) import std.stdio;
public:
@ -177,10 +179,10 @@ struct Token
/**
* Comparison operator orders tokens by start index.
*/
int opCmp(size_t i) const
int opCmp(ref const(Token) other) const
{
if (startIndex < i) return -1;
if (startIndex > i) return 1;
if (startIndex < other.startIndex) return -1;
if (startIndex > other.startIndex) return 1;
return 0;
}
}
@ -946,9 +948,13 @@ private:
case '-':
case '+':
if (foundSign || foundDigit)
{
errorMessage("Expected an exponent");
return;
}
foundSign = true;
keepNonNewlineChar();
break;
case '0': .. case '9':
case '_':
foundDigit = true;
@ -961,6 +967,8 @@ private:
lexFloatSuffix();
return;
default:
if (!foundDigit)
errorMessage("Expected an exponent");
return;
}
}
@ -973,7 +981,7 @@ private:
}
body
{
bool foundDot = false;
bool foundDot = buffer[0] == '.';
current.type = TokenType.intLiteral;
scope(exit) setTokenValue();
decimalLoop: while (!isEoF())
@ -996,6 +1004,8 @@ private:
lexIntSuffix();
return;
case 'i':
lexFloatSuffix();
return;
case 'L':
if (foundDot)
{
@ -1859,89 +1869,169 @@ private:
}
/**
* Returns: true if the token is an operator
*/
* Returns: true if the token is an operator
*/
pure nothrow bool isOperator(const TokenType t)
{
return t >= TokenType.assign && t <= TokenType.xorEquals;
}
/**
* Returns: true if the token is a keyword
*/
* ditto
*/
pure nothrow bool isOperator(ref const Token t)
{
return isOperator(t.type);
}
/**
* Returns: true if the token is a keyword
*/
pure nothrow bool isKeyword(const TokenType t)
{
return t >= TokenType.bool_ && t <= TokenType.with_;
}
/**
* Returns: true if the token is a built-in type
*/
* ditto
*/
pure nothrow bool isKeyword(ref const Token t)
{
return isKeyword(t.type);
}
/**
* Returns: true if the token is a built-in type
*/
pure nothrow bool isType(const TokenType t)
{
return t >= TokenType.bool_ && t <= TokenType.wchar_;
}
/**
* Returns: true if the token is an attribute
*/
* ditto
*/
pure nothrow bool isType(ref const Token t)
{
return isType(t.type);
}
/**
* Returns: true if the token is an attribute
*/
pure nothrow bool isAttribute(const TokenType t)
{
return t >= TokenType.align_ && t <= TokenType.static_;
}
/**
* Returns: true if the token is a protection attribute
*/
* ditto
*/
pure nothrow bool isAttribute(ref const Token t)
{
return isAttribute(t.type);
}
/**
* Returns: true if the token is a protection attribute
*/
pure nothrow bool isProtection(const TokenType t)
{
return t >= TokenType.export_ && t <= TokenType.public_;
}
/**
* Returns: true if the token is a compile-time constant such as ___DATE__
*/
* ditto
*/
pure nothrow bool isProtection(ref const Token t)
{
return isProtection(t.type);
}
/**
* Returns: true if the token is a compile-time constant such as ___DATE__
*/
pure nothrow bool isConstant(const TokenType t)
{
return t >= TokenType.date && t <= TokenType.traits;
}
/**
* Returns: true if the token is a string or number literal
*/
* ditto
*/
pure nothrow bool isConstant(ref const Token t)
{
return isConstant(t.type);
}
/**
* Returns: true if the token is a string or number literal
*/
pure nothrow bool isLiteral(const TokenType t)
{
return t >= TokenType.doubleLiteral && t <= TokenType.wstringLiteral;
}
/**
* Returns: true if the token is a number literal
*/
* ditto
*/
pure nothrow bool isLiteral(ref const Token t)
{
return isLiteral(t.type);
}
/**
* Returns: true if the token is a number literal
*/
pure nothrow bool isNumberLiteral(const TokenType t)
{
return t >= TokenType.doubleLiteral && t <= TokenType.ulongLiteral;
}
/**
* Returns: true if the token is a string literal
*/
* ditto
*/
pure nothrow bool isNumberLiteral(ref const Token t)
{
return isNumberLiteral(t.type);
}
/**
* Returns: true if the token is a string literal
*/
pure nothrow bool isStringLiteral(const TokenType t)
{
return t >= TokenType.dstringLiteral && t <= TokenType.wstringLiteral;
}
/**
* Returns: true if the token is whitespace, a commemnt, a special token
* sequence, or an identifier
*/
* ditto
*/
pure nothrow bool isStringLiteral(ref const Token t)
{
return isStringLiteral(t.type);
}
/**
* Returns: true if the token is whitespace, a commemnt, a special token
* sequence, or an identifier
*/
pure nothrow bool isMisc(const TokenType t)
{
return t >= TokenType.comment && t <= TokenType.specialTokenSequence;
}
/**
* Listing of all the tokens in the D language.
*/
* ditto
*/
pure nothrow bool isMisc(ref const Token t)
{
return isMisc(t.type);
}
/**
* Listing of all the tokens in the D language.
*/
enum TokenType: ushort
{
assign, /// =
@ -2852,7 +2942,6 @@ unittest
unittest
{
//import std.stdio;
auto source = cast(ubyte[]) ("=@& &=| |=~=:,--/ /=$.===>> >=++{[< <=<>=<>&&||(- -=%%=*=!!=!>!>=!<!<=!<>+ +=^^^^=}]);<< <<=>> >>=..*?~!<>=>>>>>>=...^ ^=");
auto expected = ["=", "@", "&", "&=", "|", "|=", "~=",
":", ",", "--", "/", "/=", "$", ".", "==",
@ -2871,22 +2960,85 @@ unittest
unittest
{
import std.stdio;
auto source = cast(ubyte[]) (q{
1 1.2 1.2f 1u 1uL 0b11 0b1u 0b1 0x11001uL
});
auto expected = ["1", "1.2", "1.2f", "1u", "1uL", "0b11", "0b1u", "0b1",
"0x11001uL"];
auto source = cast(ubyte[]) (`
1 1.2 //comment
1.2f 1u 1uL 0b011 0b1uu 0b1 /+abc/+def+/+/0x11001uL
123e1L 123e+1f 123e-1i 15e++ 4ea 1.2u 4i 1337L 4.2L 1..2 4.3.5.8
`);
auto expected = ["1", "1.2", "1.2f", "1u", "1uL", "0b011", "0b1u", "u", "0b1",
"0x11001uL", "123e1L", "123e+1f", "123e-1i", "15e+", "+", "4e", "a",
"1.2", "u", "4i", "1337L", "4.2L", "1", "..", "2", "4.3", ".5", ".8"];
int errCount = 0;
void errorFunction(string file, size_t index, uint line, uint col, string msg)
{
++errCount;
}
LexerConfig config;
config.errorFunc = &errorFunction;
auto tokens = byToken(source, config);
//writeln(tokens.map!"a.value"().array());
assert (equal(map!"a.value"(tokens), expected));
assert (errCount == 3);
}
unittest
{
auto source = cast(ubyte[]) ("int #line 4\n double q{abcde}");
LexerConfig config;
auto tokens = byToken(source, config);
writeln(tokens.map!"a.value"().array());
assert (equal(map!"a.value"(tokens), expected));
assert (tokens.front.line == 1);
assert (tokens.moveFront() == TokenType.int_);
assert (tokens.front.line == 4);
assert (isType(tokens.front));
assert (tokens.front.value == "double");
tokens.popFront();
assert (tokens.front.value == "abcde");
assert (isStringLiteral(tokens.front));
}
unittest
{
auto source = cast(ubyte[]) (`"string`);
int errCount = 0;
void errorFunction(string file, size_t index, uint line, uint col, string msg)
{
++errCount;
}
LexerConfig config;
config.errorFunc = &errorFunction;
auto tokens = byToken(source, config);
assert (errCount == 1);
}
unittest
{
auto source = cast(ubyte[]) ("import foo");
LexerConfig config;
auto tokens = byToken(source, config);
Token a = tokens.moveFront();
Token b = tokens.moveFront();
assert (a != b);
assert (a != "foo");
assert (a < b);
assert (b > a);
assert (!(a > a));
assert (tokens.empty);
}
unittest
{
auto source = cast(ubyte[]) ("import std.stdio; void main(){writeln(\"hello world\");}");
LexerConfig config;
auto tokens = byToken(source, config);
int tokenCount = 0;
foreach (t; tokens)
{
++tokenCount;
}
assert (tokenCount == 16);
}
void main(string[] args)
{
}