mirror of
https://github.com/dlang/tools.git
synced 2025-05-05 09:30:53 +03:00
Merge pull request #259 from wilzbach/update-dustmite
Update DustMite to 7403f0d053b96b2c21950df038923b5d335d557f merged-on-behalf-of: Petar Kirov <ZombineDev@users.noreply.github.com>
This commit is contained in:
parent
599600c391
commit
9dde9afb4b
2 changed files with 65 additions and 5 deletions
|
@ -9,6 +9,7 @@ import std.array;
|
||||||
import std.ascii;
|
import std.ascii;
|
||||||
import std.conv;
|
import std.conv;
|
||||||
import std.datetime;
|
import std.datetime;
|
||||||
|
import std.datetime.stopwatch : StopWatch;
|
||||||
import std.exception;
|
import std.exception;
|
||||||
import std.file;
|
import std.file;
|
||||||
import std.getopt;
|
import std.getopt;
|
||||||
|
@ -278,7 +279,7 @@ EOS");
|
||||||
else
|
else
|
||||||
reduce();
|
reduce();
|
||||||
|
|
||||||
auto duration = cast(Duration)times.total.peek();
|
auto duration = times.total.peek();
|
||||||
duration = dur!"msecs"(duration.total!"msecs"); // truncate anything below ms, users aren't interested in that
|
duration = dur!"msecs"(duration.total!"msecs"); // truncate anything below ms, users aren't interested in that
|
||||||
if (foundAnything)
|
if (foundAnything)
|
||||||
{
|
{
|
||||||
|
@ -296,7 +297,7 @@ EOS");
|
||||||
|
|
||||||
if (showTimes)
|
if (showTimes)
|
||||||
foreach (i, t; times.tupleof)
|
foreach (i, t; times.tupleof)
|
||||||
writefln("%s: %s", times.tupleof[i].stringof, cast(Duration)times.tupleof[i].peek());
|
writefln("%s: %s", times.tupleof[i].stringof, times.tupleof[i].peek());
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1424,9 +1425,17 @@ bool test(Reduction reduction)
|
||||||
{
|
{
|
||||||
if (!process.pid && !lookaheadIter.done)
|
if (!process.pid && !lookaheadIter.done)
|
||||||
{
|
{
|
||||||
|
auto initialReduction = lookaheadIter.front;
|
||||||
|
bool first = true;
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
auto reduction = lookaheadIter.front;
|
auto reduction = lookaheadIter.front;
|
||||||
|
|
||||||
|
if (!first && reduction == initialReduction)
|
||||||
|
break; // We've looped around using cached results
|
||||||
|
first = false;
|
||||||
|
|
||||||
auto digest = hash(reduction);
|
auto digest = hash(reduction);
|
||||||
|
|
||||||
if (digest in cache || digest in lookaheadResults || lookaheadProcesses[].canFind!(p => p.digest == digest))
|
if (digest in cache || digest in lookaheadResults || lookaheadProcesses[].canFind!(p => p.digest == digest))
|
||||||
|
|
|
@ -8,6 +8,7 @@ import std.ascii;
|
||||||
import std.algorithm;
|
import std.algorithm;
|
||||||
import std.array;
|
import std.array;
|
||||||
import std.conv;
|
import std.conv;
|
||||||
|
import std.exception;
|
||||||
import std.file;
|
import std.file;
|
||||||
import std.functional;
|
import std.functional;
|
||||||
import std.path;
|
import std.path;
|
||||||
|
@ -77,6 +78,7 @@ enum Splitter
|
||||||
lines, /// Split by line ends
|
lines, /// Split by line ends
|
||||||
words, /// Split by whitespace
|
words, /// Split by whitespace
|
||||||
D, /// Parse D source code
|
D, /// Parse D source code
|
||||||
|
diff, /// Unified diffs
|
||||||
}
|
}
|
||||||
immutable string[] splitterNames = [EnumMembers!Splitter].map!(e => e.text().toLower()).array();
|
immutable string[] splitterNames = [EnumMembers!Splitter].map!(e => e.text().toLower()).array();
|
||||||
|
|
||||||
|
@ -168,6 +170,8 @@ immutable ParseRule[] defaultRules =
|
||||||
[
|
[
|
||||||
{ "*.d" , Splitter.D },
|
{ "*.d" , Splitter.D },
|
||||||
{ "*.di" , Splitter.D },
|
{ "*.di" , Splitter.D },
|
||||||
|
{ "*.diff" , Splitter.diff },
|
||||||
|
{ "*.patch", Splitter.diff },
|
||||||
{ "*" , Splitter.files },
|
{ "*" , Splitter.files },
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -212,6 +216,9 @@ Entity loadFile(string name, string path, ParseOptions options)
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case Splitter.diff:
|
||||||
|
result.children = parseDiff(result.contents);
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(false); // default * rule should match everything
|
assert(false); // default * rule should match everything
|
||||||
|
@ -1190,6 +1197,50 @@ Entity[] parseSplit(alias fun)(string text)
|
||||||
alias parseToWords = parseSplit!isNotAlphaNum;
|
alias parseToWords = parseSplit!isNotAlphaNum;
|
||||||
alias parseToLines = parseSplit!isNewline;
|
alias parseToLines = parseSplit!isNewline;
|
||||||
|
|
||||||
|
/// Split s on end~start, preserving end and start on each chunk
|
||||||
|
private string[] split2(string end, string start)(string s)
|
||||||
|
{
|
||||||
|
enum sep = end ~ start;
|
||||||
|
return split2Impl(s, sep, end.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
private string[] split2Impl(string s, string sep, size_t endLength)
|
||||||
|
{
|
||||||
|
string[] result;
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
auto i = s.indexOf(sep);
|
||||||
|
if (i < 0)
|
||||||
|
return result ~ s;
|
||||||
|
i += endLength;
|
||||||
|
result ~= s[0..i];
|
||||||
|
s = s[i..$];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unittest
|
||||||
|
{
|
||||||
|
assert(split2!("]", "[")(null) == [""]);
|
||||||
|
assert(split2!("]", "[")("[foo]") == ["[foo]"]);
|
||||||
|
assert(split2!("]", "[")("[foo][bar]") == ["[foo]", "[bar]"]);
|
||||||
|
assert(split2!("]", "[")("[foo] [bar]") == ["[foo] [bar]"]);
|
||||||
|
}
|
||||||
|
|
||||||
|
Entity[] parseDiff(string s)
|
||||||
|
{
|
||||||
|
return s
|
||||||
|
.split2!("\n", "diff ")
|
||||||
|
.map!(
|
||||||
|
(string file)
|
||||||
|
{
|
||||||
|
auto chunks = file.split2!("\n", "@@ ");
|
||||||
|
return new Entity(chunks[0], chunks[1..$].map!(chunk => new Entity(chunk)).array);
|
||||||
|
}
|
||||||
|
)
|
||||||
|
.array
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
bool isNewline(char c) { return c == '\r' || c == '\n'; }
|
bool isNewline(char c) { return c == '\r' || c == '\n'; }
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue