summaryrefslogtreecommitdiffstats
path: root/src/eval_env.cc
diff options
context:
space:
mode:
authorEvan Martin <martine@danga.com>2011-12-29 21:00:27 (GMT)
committerEvan Martin <martine@danga.com>2011-12-29 21:14:39 (GMT)
commit8a0c96075786c1983bdfa2f37f32b75200ea0334 (patch)
tree95e2b0c24aedcda9ec5ed09329e69fd7a1925212 /src/eval_env.cc
parentad7d9f43f1bd8e04321d8fdb07ebf7b96ab525a1 (diff)
downloadNinja-8a0c96075786c1983bdfa2f37f32b75200ea0334.zip
Ninja-8a0c96075786c1983bdfa2f37f32b75200ea0334.tar.gz
Ninja-8a0c96075786c1983bdfa2f37f32b75200ea0334.tar.bz2
switch the core ninja parser to use re2c for the lexer
- Delete the old "Tokenizer" code. - Write separate tests for the lexer distinct from the parser. - Switch the parser to use the new code. - New lexer error output has file:line numbers so e.g. Emacs can jump your editor to the syntax error. - The EvalEnv ($-interpolation) code is now part of the lexer as well.
Diffstat (limited to 'src/eval_env.cc')
-rw-r--r--src/eval_env.cc80
1 files changed, 22 insertions, 58 deletions
diff --git a/src/eval_env.cc b/src/eval_env.cc
index fa5e35b..57c20c6 100644
--- a/src/eval_env.cc
+++ b/src/eval_env.cc
@@ -27,64 +27,6 @@ void BindingEnv::AddBinding(const string& key, const string& val) {
bindings_[key] = val;
}
-bool EvalString::Parse(const string& input, string* err, size_t* err_index) {
- unparsed_ = input;
-
- string::size_type start, end;
- start = 0;
- do {
- end = input.find('$', start);
- if (end == string::npos) {
- end = input.size();
- break;
- }
- if (end > start)
- parsed_.push_back(make_pair(input.substr(start, end - start), RAW));
- start = end + 1;
- if (start < input.size() && input[start] == '{') {
- ++start;
- for (end = start + 1; end < input.size(); ++end) {
- if (input[end] == '}')
- break;
- }
- if (end >= input.size()) {
- *err = "expected closing curly after ${";
- if (err_index)
- *err_index = end;
- return false;
- }
- parsed_.push_back(make_pair(input.substr(start, end - start), SPECIAL));
- ++end;
- } else if (start < input.size() && input[start] == '$') {
- parsed_.push_back(make_pair("$", RAW));
- end = start + 1;
- } else if (start < input.size() && input[start] == ' ') {
- parsed_.push_back(make_pair(" ", RAW));
- end = start + 1;
- } else {
- for (end = start; end < input.size(); ++end) {
- char c = input[end];
- if (!(('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') ||
- ('0' <= c && c <= '9') || c == '_')) {
- break;
- }
- }
- if (end == start) {
- *err = "expected variable after $";
- if (err_index)
- *err_index = start;
- return false;
- }
- parsed_.push_back(make_pair(input.substr(start, end - start), SPECIAL));
- }
- start = end;
- } while (end < input.size());
- if (end > start)
- parsed_.push_back(make_pair(input.substr(start, end - start), RAW));
-
- return true;
-}
-
string EvalString::Evaluate(Env* env) const {
string result;
for (TokenList::const_iterator i = parsed_.begin(); i != parsed_.end(); ++i) {
@@ -95,3 +37,25 @@ string EvalString::Evaluate(Env* env) const {
}
return result;
}
+
+void EvalString::Add(TokenType type, StringPiece text) {
+ // Add it to the end of an existing RAW token if possible.
+ if (type == RAW && !parsed_.empty() && parsed_.back().second == RAW) {
+ parsed_.back().first.append(text.str_, text.len_);
+ } else {
+ parsed_.push_back(make_pair(text.AsString(), type));
+ }
+}
+
+string EvalString::Serialize() const {
+ string result;
+ for (TokenList::const_iterator i = parsed_.begin();
+ i != parsed_.end(); ++i) {
+ result.append("[");
+ if (i->second == SPECIAL)
+ result.append("$");
+ result.append(i->first);
+ result.append("]");
+ }
+ return result;
+}