From 8fad7eb2b875e5eae88632f2c4773545e642cef7 Mon Sep 17 00:00:00 2001 From: Oliver Vinn Date: Mon, 13 Jan 2014 00:02:23 +0000 Subject: Added *.obj ignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 501a02d..e59e844 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *.pyc +*.obj *.exe *.pdb *.ilk -- cgit v0.12 From 45aa2085f984b707ed3f8543317eb4952af0158f Mon Sep 17 00:00:00 2001 From: Andrey Malets Date: Sat, 6 Sep 2014 23:28:05 +0600 Subject: Document target^ syntax. Added a note into the documentation about interesting target^ syntax --- doc/manual.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc index fcf3db3..07ecfcc 100644 --- a/doc/manual.asciidoc +++ b/doc/manual.asciidoc @@ -180,6 +180,12 @@ Run `ninja`. By default, it looks for a file named `build.ninja` in the current directory and builds all out-of-date targets. You can specify which targets (files) to build as command line arguments. +There is also a special syntax `target^` for specifying a target +as the first output of some rule containing the source you put in +the command line, if one exists. For example, if you specify target as +`foo.c^` then `foo.o` will get built (assuming you have those targets +in your build files). + `ninja -h` prints help output. Many of Ninja's flags intentionally match those of Make; e.g `ninja -C build -j 20` changes into the `build` directory and runs 20 build commands in parallel. (Note that -- cgit v0.12 From 77e3f6032463a732ce4a91e02f9668eaf66b60cb Mon Sep 17 00:00:00 2001 From: kwesolowski Date: Sat, 25 Oct 2014 15:21:53 +0200 Subject: Fixed cygwin compatibility (issue #806) Fixed platform specific issues causing cygwin build to fail. --- src/util.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/util.cc b/src/util.cc index 8b69b71..4d0ceae 100644 --- a/src/util.cc +++ b/src/util.cc @@ -14,7 +14,10 @@ #include "util.h" -#ifdef _WIN32 +#ifdef __CYGWIN__ +#include +#include +#elif defined( _WIN32) #include #include #include -- cgit v0.12 From f5f4b3ba7ca7808464f9715456e921ab765a8be7 Mon Sep 17 00:00:00 2001 From: Fanael Linithien Date: Mon, 24 Nov 2014 19:09:33 +0100 Subject: Remove unneeded save-excursion. syntax-propertize-function is allowed to move the point and mark. --- misc/ninja-mode.el | 43 +++++++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/misc/ninja-mode.el b/misc/ninja-mode.el index 71825d5..639e537 100644 --- a/misc/ninja-mode.el +++ b/misc/ninja-mode.el @@ -47,28 +47,27 @@ (defun ninja-syntax-propertize (start end) (save-match-data - (save-excursion - (goto-char start) - (while (search-forward "#" end t) - (let ((match-pos (match-beginning 0))) - (when (and - ;; Is it the first non-white character on the line? - (eq match-pos (save-excursion (back-to-indentation) (point))) - (save-excursion - (goto-char (line-end-position 0)) - (or - ;; If we're continuting the previous line, it's not a - ;; comment. - (not (eq ?$ (char-before))) - ;; Except if the previous line is a comment as well, as the - ;; continuation dollar is ignored then. - (nth 4 (syntax-ppss))))) - (put-text-property match-pos (1+ match-pos) 'syntax-table '(11)) - (let ((line-end (line-end-position))) - ;; Avoid putting properties past the end of the buffer. - ;; Otherwise we get an `args-out-of-range' error. - (unless (= line-end (1+ (buffer-size))) - (put-text-property line-end (1+ line-end) 'syntax-table '(12)))))))))) + (goto-char start) + (while (search-forward "#" end t) + (let ((match-pos (match-beginning 0))) + (when (and + ;; Is it the first non-white character on the line? + (eq match-pos (save-excursion (back-to-indentation) (point))) + (save-excursion + (goto-char (line-end-position 0)) + (or + ;; If we're continuting the previous line, it's not a + ;; comment. + (not (eq ?$ (char-before))) + ;; Except if the previous line is a comment as well, as the + ;; continuation dollar is ignored then. + (nth 4 (syntax-ppss))))) + (put-text-property match-pos (1+ match-pos) 'syntax-table '(11)) + (let ((line-end (line-end-position))) + ;; Avoid putting properties past the end of the buffer. + ;; Otherwise we get an `args-out-of-range' error. + (unless (= line-end (1+ (buffer-size))) + (put-text-property line-end (1+ line-end) 'syntax-table '(12))))))))) ;;;###autoload (define-derived-mode ninja-mode prog-mode "ninja" -- cgit v0.12 From 97a4fe200a60718ac0a03a23999462834dd9d3b4 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 1 Dec 2014 21:05:23 -0800 Subject: win: Let the "Pause" key or Ctrl-S pause output. In cmd.exe, hitting the "Pause" key or Ctrl-S will pause programs until a key is pressed. This is apparently implemented when stdout is writing to, so use printf instead of Console functions to reset the cursor to the start of the line. Also happens to simplify the code. (This already worked in -v mode since that already prints using printf.) Based on a patch from gmisocpp@gmail.com! --- src/line_printer.cc | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/src/line_printer.cc b/src/line_printer.cc index 813f63e..8e7b8f1 100644 --- a/src/line_printer.cc +++ b/src/line_printer.cc @@ -50,22 +50,17 @@ void LinePrinter::Print(string to_print, LineType type) { return; } -#ifdef _WIN32 - CONSOLE_SCREEN_BUFFER_INFO csbi; - GetConsoleScreenBufferInfo(console_, &csbi); -#endif - if (smart_terminal_) { -#ifndef _WIN32 printf("\r"); // Print over previous line, if any. -#else - csbi.dwCursorPosition.X = 0; - SetConsoleCursorPosition(console_, csbi.dwCursorPosition); -#endif + // On Windows, calling a C library function writing to stdout also handles + // pausing the executable when the "Pause" key or Ctrl-S is pressed. } if (smart_terminal_ && type == ELIDE) { #ifdef _WIN32 + CONSOLE_SCREEN_BUFFER_INFO csbi; + GetConsoleScreenBufferInfo(console_, &csbi); + // Don't use the full width or console will move to next line. size_t width = static_cast(csbi.dwSize.X) - 1; to_print = ElideMiddle(to_print, width); -- cgit v0.12 From e28b1aaf5d379211d290cabb5d78c8bf24c6085b Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 1 Dec 2014 22:14:25 -0800 Subject: win/lineprinter: Use a vector instead of manual memory management. No behavior change. Based on a patch from gmisocpp@gmail.com! --- src/line_printer.cc | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/line_printer.cc b/src/line_printer.cc index 8e7b8f1..e11bcc3 100644 --- a/src/line_printer.cc +++ b/src/line_printer.cc @@ -75,16 +75,12 @@ void LinePrinter::Print(string to_print, LineType type) { static_cast(csbi.dwCursorPosition.X + csbi.dwSize.X - 1), csbi.dwCursorPosition.Y }; - CHAR_INFO* char_data = new CHAR_INFO[csbi.dwSize.X]; - memset(char_data, 0, sizeof(CHAR_INFO) * csbi.dwSize.X); - for (int i = 0; i < csbi.dwSize.X; ++i) { - char_data[i].Char.AsciiChar = ' '; + vector char_data(csbi.dwSize.X); + for (size_t i = 0; i < static_cast(csbi.dwSize.X); ++i) { + char_data[i].Char.AsciiChar = i < to_print.size() ? to_print[i] : ' '; char_data[i].Attributes = csbi.wAttributes; } - for (size_t i = 0; i < to_print.size(); ++i) - char_data[i].Char.AsciiChar = to_print[i]; - WriteConsoleOutput(console_, char_data, buf_size, zero_zero, &target); - delete[] char_data; + WriteConsoleOutput(console_, &char_data[0], buf_size, zero_zero, &target); #else // Limit output to width of the terminal if provided so we don't cause // line-wrapping. -- cgit v0.12 From f2a905b9350fa07e7ec91ab3d43a362938637205 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 1 Dec 2014 22:21:54 -0800 Subject: win: Use full console width for status messages. The original overprinting code, added in 7b3d8c8e, used printf for printing the status. printf needs one column for the cursor, so the status message could only take up `width - 1` columns. fc554c22 changed Windows from printf to WriteConsoleOutput which doesn't move the cursor, so keeping one column empty is no longer needed. So stop doing that. Also remove a duplicate call to GetConsoleScreenBufferInfo. --- src/line_printer.cc | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/line_printer.cc b/src/line_printer.cc index e11bcc3..2cd3e17 100644 --- a/src/line_printer.cc +++ b/src/line_printer.cc @@ -61,13 +61,10 @@ void LinePrinter::Print(string to_print, LineType type) { CONSOLE_SCREEN_BUFFER_INFO csbi; GetConsoleScreenBufferInfo(console_, &csbi); - // Don't use the full width or console will move to next line. - size_t width = static_cast(csbi.dwSize.X) - 1; - to_print = ElideMiddle(to_print, width); - // We don't want to have the cursor spamming back and forth, so - // use WriteConsoleOutput instead which updates the contents of - // the buffer, but doesn't move the cursor position. - GetConsoleScreenBufferInfo(console_, &csbi); + to_print = ElideMiddle(to_print, static_cast(csbi.dwSize.X)); + // We don't want to have the cursor spamming back and forth, so instead of + // printf use WriteConsoleOutput which updates the contents of the buffer, + // but doesn't move the cursor position. COORD buf_size = { csbi.dwSize.X, 1 }; COORD zero_zero = { 0, 0 }; SMALL_RECT target = { -- cgit v0.12 From 475918a5130f5eca18266f907adff1036f3199fc Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 3 Dec 2014 08:10:44 -0800 Subject: Make configure.py work with Python. Fixes issue #877. Patch from @TheOneRing! --- configure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.py b/configure.py index aac4ad4..3cd1e82 100755 --- a/configure.py +++ b/configure.py @@ -164,7 +164,7 @@ class Bootstrap: """Run a subcommand, quietly. Prints the full command on error.""" try: subprocess.check_call(cmdline, shell=True) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError: print('when running: ', cmdline) raise -- cgit v0.12 From 8605b3daa6c68b29e4126e86193acdcfaf7cc2f1 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 3 Dec 2014 18:04:26 -0800 Subject: Revert #223, fixes #874. No test since there's still no good way to test code in ninja.cc. Going forward, either move NinjaMain to its own file with tests, or create a system that makes it possible to run integration tests on the ninja binary. Instead, add a comment that explains why the restat optimization isn't done. --- src/ninja.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/ninja.cc b/src/ninja.cc index 2c890c2..3e99782 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -241,12 +241,11 @@ bool NinjaMain::RebuildManifest(const char* input_file, string* err) { if (builder.AlreadyUpToDate()) return false; // Not an error, but we didn't rebuild. - if (!builder.Build(err)) - return false; - // The manifest was only rebuilt if it is now dirty (it may have been cleaned - // by a restat). - return node->dirty(); + // Even if the manifest was cleaned by a restat rule, claim that it was + // rebuilt. Not doing so can lead to crashes, see + // https://github.com/martine/ninja/issues/874 + return builder.Build(err); } Node* NinjaMain::CollectTarget(const char* cpath, string* err) { -- cgit v0.12 From 7802fb95558125a427c29d44fe6030122363de51 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 4 Dec 2014 09:04:15 -0800 Subject: Rename num_collisions to collision_count. All other counting variables are called foo_count, not num_foos. No behavior change. --- src/hash_collision_bench.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/hash_collision_bench.cc b/src/hash_collision_bench.cc index d0eabde..5be0531 100644 --- a/src/hash_collision_bench.cc +++ b/src/hash_collision_bench.cc @@ -46,7 +46,7 @@ int main() { sort(hashes, hashes + N); - int num_collisions = 0; + int collision_count = 0; for (int i = 1; i < N; ++i) { if (hashes[i - 1].first == hashes[i].first) { if (strcmp(commands[hashes[i - 1].second], @@ -54,9 +54,9 @@ int main() { printf("collision!\n string 1: '%s'\n string 2: '%s'\n", commands[hashes[i - 1].second], commands[hashes[i].second]); - num_collisions++; + collision_count++; } } } - printf("\n\n%d collisions after %d runs\n", num_collisions, N); + printf("\n\n%d collisions after %d runs\n", collision_count, N); } -- cgit v0.12 From 3d473e737c1140a55bd804b45df998be20a758cc Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Sun, 7 Dec 2014 14:42:12 -0800 Subject: remove two unneeded `explicit`s --- src/graph.cc | 2 +- src/state.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/graph.cc b/src/graph.cc index 2829669..af7a22d 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -217,7 +217,7 @@ bool Edge::AllInputsReady() const { struct EdgeEnv : public Env { enum EscapeKind { kShellEscape, kDoNotEscape }; - explicit EdgeEnv(Edge* edge, EscapeKind escape) + EdgeEnv(Edge* edge, EscapeKind escape) : edge_(edge), escape_in_out_(escape) {} virtual string LookupVariable(const string& var); diff --git a/src/state.h b/src/state.h index 8804532..311d740 100644 --- a/src/state.h +++ b/src/state.h @@ -37,8 +37,8 @@ struct Rule; /// the total scheduled weight diminishes enough (i.e. when a scheduled edge /// completes). struct Pool { - explicit Pool(const string& name, int depth) - : name_(name), current_use_(0), depth_(depth), delayed_(&WeightedEdgeCmp) { } + Pool(const string& name, int depth) + : name_(name), current_use_(0), depth_(depth), delayed_(&WeightedEdgeCmp) {} // A depth of 0 is infinite bool is_valid() const { return depth_ >= 0; } -- cgit v0.12 From a9dac57ecad0b8ff4822c39446e9bf35afc0a615 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Sun, 7 Dec 2014 14:49:49 -0800 Subject: Rename a few iterators. No behavior change. It confused me that the iterator iterating over `outputs_` was called `i` -- this always made me think of "input", not "iterator". Call iterators over edge outputs "o", iterators over edge inputs "i", iterators over node input edges "oe", and general iterators over edges "e". --- src/build.cc | 116 +++++++++++++++++++++++++++++------------------------------ src/graph.cc | 18 +++++----- 2 files changed, 67 insertions(+), 67 deletions(-) diff --git a/src/build.cc b/src/build.cc index 3e74131..ba2f93d 100644 --- a/src/build.cc +++ b/src/build.cc @@ -339,9 +339,9 @@ bool Plan::CheckDependencyCycle(Node* node, vector* stack, string* err) { Edge* Plan::FindWork() { if (ready_.empty()) return NULL; - set::iterator i = ready_.begin(); - Edge* edge = *i; - ready_.erase(i); + set::iterator e = ready_.begin(); + Edge* edge = *e; + ready_.erase(e); return edge; } @@ -368,39 +368,39 @@ void Plan::ResumeDelayedJobs(Edge* edge) { } void Plan::EdgeFinished(Edge* edge) { - map::iterator i = want_.find(edge); - assert(i != want_.end()); - if (i->second) + map::iterator e = want_.find(edge); + assert(e != want_.end()); + if (e->second) --wanted_edges_; - want_.erase(i); + want_.erase(e); edge->outputs_ready_ = true; // See if this job frees up any delayed jobs ResumeDelayedJobs(edge); // Check off any nodes we were waiting for with this edge. - for (vector::iterator i = edge->outputs_.begin(); - i != edge->outputs_.end(); ++i) { - NodeFinished(*i); + for (vector::iterator o = edge->outputs_.begin(); + o != edge->outputs_.end(); ++o) { + NodeFinished(*o); } } void Plan::NodeFinished(Node* node) { // See if we we want any edges from this node. - for (vector::const_iterator i = node->out_edges().begin(); - i != node->out_edges().end(); ++i) { - map::iterator want_i = want_.find(*i); - if (want_i == want_.end()) + for (vector::const_iterator oe = node->out_edges().begin(); + oe != node->out_edges().end(); ++oe) { + map::iterator want_e = want_.find(*oe); + if (want_e == want_.end()) continue; // See if the edge is now ready. - if ((*i)->AllInputsReady()) { - if (want_i->second) { - ScheduleWork(*i); + if ((*oe)->AllInputsReady()) { + if (want_e->second) { + ScheduleWork(*oe); } else { // We do not need to build this edge, but we might need to build one of // its dependents. - EdgeFinished(*i); + EdgeFinished(*oe); } } } @@ -409,42 +409,42 @@ void Plan::NodeFinished(Node* node) { void Plan::CleanNode(DependencyScan* scan, Node* node) { node->set_dirty(false); - for (vector::const_iterator ei = node->out_edges().begin(); - ei != node->out_edges().end(); ++ei) { + for (vector::const_iterator oe = node->out_edges().begin(); + oe != node->out_edges().end(); ++oe) { // Don't process edges that we don't actually want. - map::iterator want_i = want_.find(*ei); - if (want_i == want_.end() || !want_i->second) + map::iterator want_e = want_.find(*oe); + if (want_e == want_.end() || !want_e->second) continue; // Don't attempt to clean an edge if it failed to load deps. - if ((*ei)->deps_missing_) + if ((*oe)->deps_missing_) continue; // If all non-order-only inputs for this edge are now clean, // we might have changed the dirty state of the outputs. vector::iterator - begin = (*ei)->inputs_.begin(), - end = (*ei)->inputs_.end() - (*ei)->order_only_deps_; + begin = (*oe)->inputs_.begin(), + end = (*oe)->inputs_.end() - (*oe)->order_only_deps_; if (find_if(begin, end, mem_fun(&Node::dirty)) == end) { // Recompute most_recent_input. Node* most_recent_input = NULL; - for (vector::iterator ni = begin; ni != end; ++ni) { - if (!most_recent_input || (*ni)->mtime() > most_recent_input->mtime()) - most_recent_input = *ni; + for (vector::iterator i = begin; i != end; ++i) { + if (!most_recent_input || (*i)->mtime() > most_recent_input->mtime()) + most_recent_input = *i; } // Now, this edge is dirty if any of the outputs are dirty. // If the edge isn't dirty, clean the outputs and mark the edge as not // wanted. - if (!scan->RecomputeOutputsDirty(*ei, most_recent_input)) { - for (vector::iterator ni = (*ei)->outputs_.begin(); - ni != (*ei)->outputs_.end(); ++ni) { - CleanNode(scan, *ni); + if (!scan->RecomputeOutputsDirty(*oe, most_recent_input)) { + for (vector::iterator o = (*oe)->outputs_.begin(); + o != (*oe)->outputs_.end(); ++o) { + CleanNode(scan, *o); } - want_i->second = false; + want_e->second = false; --wanted_edges_; - if (!(*ei)->is_phony()) + if (!(*oe)->is_phony()) --command_edges_; } } @@ -453,10 +453,10 @@ void Plan::CleanNode(DependencyScan* scan, Node* node) { void Plan::Dump() { printf("pending: %d\n", (int)want_.size()); - for (map::iterator i = want_.begin(); i != want_.end(); ++i) { - if (i->second) + for (map::iterator e = want_.begin(); e != want_.end(); ++e) { + if (e->second) printf("want "); - i->first->Dump(); + e->first->Dump(); } printf("ready: %d\n", (int)ready_.size()); } @@ -477,9 +477,9 @@ struct RealCommandRunner : public CommandRunner { vector RealCommandRunner::GetActiveEdges() { vector edges; - for (map::iterator i = subproc_to_edge_.begin(); - i != subproc_to_edge_.end(); ++i) - edges.push_back(i->second); + for (map::iterator e = subproc_to_edge_.begin(); + e != subproc_to_edge_.end(); ++e) + edges.push_back(e->second); return edges; } @@ -516,9 +516,9 @@ bool RealCommandRunner::WaitForCommand(Result* result) { result->status = subproc->Finish(); result->output = subproc->GetOutput(); - map::iterator i = subproc_to_edge_.find(subproc); - result->edge = i->second; - subproc_to_edge_.erase(i); + map::iterator e = subproc_to_edge_.find(subproc); + result->edge = e->second; + subproc_to_edge_.erase(e); delete subproc; return true; @@ -541,11 +541,11 @@ void Builder::Cleanup() { vector active_edges = command_runner_->GetActiveEdges(); command_runner_->Abort(); - for (vector::iterator i = active_edges.begin(); - i != active_edges.end(); ++i) { - string depfile = (*i)->GetUnescapedDepfile(); - for (vector::iterator ni = (*i)->outputs_.begin(); - ni != (*i)->outputs_.end(); ++ni) { + for (vector::iterator e = active_edges.begin(); + e != active_edges.end(); ++e) { + string depfile = (*e)->GetUnescapedDepfile(); + for (vector::iterator o = (*e)->outputs_.begin(); + o != (*e)->outputs_.end(); ++o) { // Only delete this output if it was actually modified. This is // important for things like the generator where we don't want to // delete the manifest file if we can avoid it. But if the rule @@ -554,8 +554,8 @@ void Builder::Cleanup() { // mentioned in a depfile, and the command touches its depfile // but is interrupted before it touches its output file.) if (!depfile.empty() || - (*ni)->mtime() != disk_interface_->Stat((*ni)->path())) { - disk_interface_->RemoveFile((*ni)->path()); + (*o)->mtime() != disk_interface_->Stat((*o)->path())) { + disk_interface_->RemoveFile((*o)->path()); } } if (!depfile.empty()) @@ -690,9 +690,9 @@ bool Builder::StartEdge(Edge* edge, string* err) { // Create directories necessary for outputs. // XXX: this will block; do we care? - for (vector::iterator i = edge->outputs_.begin(); - i != edge->outputs_.end(); ++i) { - if (!disk_interface_->MakeDirs((*i)->path())) + for (vector::iterator o = edge->outputs_.begin(); + o != edge->outputs_.end(); ++o) { + if (!disk_interface_->MakeDirs((*o)->path())) return false; } @@ -752,14 +752,14 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { if (edge->GetBindingBool("restat") && !config_.dry_run) { bool node_cleaned = false; - for (vector::iterator i = edge->outputs_.begin(); - i != edge->outputs_.end(); ++i) { - TimeStamp new_mtime = disk_interface_->Stat((*i)->path()); - if ((*i)->mtime() == new_mtime) { + for (vector::iterator o = edge->outputs_.begin(); + o != edge->outputs_.end(); ++o) { + TimeStamp new_mtime = disk_interface_->Stat((*o)->path()); + if ((*o)->mtime() == new_mtime) { // The rule command did not change the output. Propagate the clean // state through the build graph. // Note that this also applies to nonexistent outputs (mtime == 0). - plan_.CleanNode(&scan_, *i); + plan_.CleanNode(&scan_, *o); node_cleaned = true; } } diff --git a/src/graph.cc b/src/graph.cc index af7a22d..57f790c 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -112,11 +112,11 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { // Finally, visit each output to mark off that we've visited it, and update // their dirty state if necessary. - for (vector::iterator i = edge->outputs_.begin(); - i != edge->outputs_.end(); ++i) { - (*i)->StatIfNecessary(disk_interface_); + for (vector::iterator o = edge->outputs_.begin(); + o != edge->outputs_.end(); ++o) { + (*o)->StatIfNecessary(disk_interface_); if (dirty) - (*i)->MarkDirty(); + (*o)->MarkDirty(); } // If an edge is dirty, its outputs are normally not ready. (It's @@ -132,11 +132,11 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { bool DependencyScan::RecomputeOutputsDirty(Edge* edge, Node* most_recent_input) { - string command = edge->EvaluateCommand(true); - for (vector::iterator i = edge->outputs_.begin(); - i != edge->outputs_.end(); ++i) { - (*i)->StatIfNecessary(disk_interface_); - if (RecomputeOutputDirty(edge, most_recent_input, command, *i)) + string command = edge->EvaluateCommand(/*incl_rsp_file=*/true); + for (vector::iterator o = edge->outputs_.begin(); + o != edge->outputs_.end(); ++o) { + (*o)->StatIfNecessary(disk_interface_); + if (RecomputeOutputDirty(edge, most_recent_input, command, *o)) return true; } return false; -- cgit v0.12 From 015c818cbe85093316115c5a510274eccdb4180b Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Sun, 7 Dec 2014 16:02:35 -0800 Subject: Let DependencyScan::RecomputeDirty() work correclty with cyclic graphs. RecomputDirty(edge) currently works roughly like this: RecomputeDirty(edge): LoadDeps(edge) for in in edge.inputs: if in.StatIfNecessary(): RecomputeDirty(in.in_edge) # recurse into inputs for out in edge.outputs: out.StatIfNecessary() # mark outputs as visited It uses the stat state of each node to mark nodes as visited and doesn't traverse across nodes that have been visited already. For cyclic graphs with an edge with multiple outputs on the cycle, nothing prevents an edge to be visited more than once if the cycle is entered through an output that isn't on the cycle. In other words, RecomputeDirty() for the same edge can be on the call stack more than once. This is bad for at least two reasons: 1. Deps are added multiple times, making the graph confusing to reason about. 2. LoadDeps() will insert into the inputs_ of an edge that's iterated over in a callframe higher up. This can invalidate the iterator, which causes a crash when the callframe with the loop over the now-invalidated iterator resumes. To fix this, let RecomputeDirty() mark all outputs of an edge as visited as the first thing it does. This way, even if the edge is on a cycle with several outputs, each output is already marked and no edge will have its deps loaded more than once. Fixes the crashes in #875. (In practice, it turns the crashes into "stuck [this is a bug]" messages for now, due to the example without duplicate rules in #867) --- src/build.cc | 1 - src/graph.cc | 17 ++++++++++--- src/graph_test.cc | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 4 deletions(-) diff --git a/src/build.cc b/src/build.cc index ba2f93d..5d66f4b 100644 --- a/src/build.cc +++ b/src/build.cc @@ -576,7 +576,6 @@ Node* Builder::AddTarget(const string& name, string* err) { } bool Builder::AddTarget(Node* node, string* err) { - node->StatIfNecessary(disk_interface_); if (Edge* in_edge = node->in_edge()) { if (!scan_.RecomputeDirty(in_edge, err)) return false; diff --git a/src/graph.cc b/src/graph.cc index 57f790c..44eca3c 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -62,6 +62,19 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { edge->outputs_ready_ = true; edge->deps_missing_ = false; + // RecomputeDirty() recursively walks the graph following the input nodes + // of |edge| and the in_edges of these nodes. It uses the stat state of each + // node to mark nodes as visited and doesn't traverse across nodes that have + // been visited already. To make sure that every edge is visited only once + // (important because an edge's deps are loaded every time it's visited), mark + // all outputs of |edge| visited as a first step. This ensures that edges + // with multiple inputs and outputs are visited only once, even in cyclic + // graphs. + for (vector::iterator o = edge->outputs_.begin(); + o != edge->outputs_.end(); ++o) { + (*o)->StatIfNecessary(disk_interface_); + } + if (!dep_loader_.LoadDeps(edge, err)) { if (!err->empty()) return false; @@ -110,11 +123,9 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { if (!dirty) dirty = RecomputeOutputsDirty(edge, most_recent_input); - // Finally, visit each output to mark off that we've visited it, and update - // their dirty state if necessary. + // Finally, visit each output and update their dirty state if necessary. for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { - (*o)->StatIfNecessary(disk_interface_); if (dirty) (*o)->MarkDirty(); } diff --git a/src/graph_test.cc b/src/graph_test.cc index 382d352..e41f5cc 100644 --- a/src/graph_test.cc +++ b/src/graph_test.cc @@ -252,6 +252,81 @@ TEST_F(GraphTest, NestedPhonyPrintsDone) { ASSERT_FALSE(plan_.more_to_do()); } +// Verify that cycles in graphs with multiple outputs are handled correctly +// in RecomputeDirty() and don't cause deps to be loaded multiple times. +TEST_F(GraphTest, CycleWithLengthZeroFromDepfile) { + AssertParse(&state_, +"rule deprule\n" +" depfile = dep.d\n" +" command = unused\n" +"build a b: deprule\n" + ); + fs_.Create("dep.d", "a: b\n"); + + string err; + Edge* edge = GetNode("a")->in_edge(); + EXPECT_TRUE(scan_.RecomputeDirty(edge, &err)); + ASSERT_EQ("", err); + + // Despite the depfile causing edge to be a cycle (it has outputs a and b, + // but the depfile also adds b as an input), the deps should have been loaded + // only once: + EXPECT_EQ(1, edge->inputs_.size()); + EXPECT_EQ("b", edge->inputs_[0]->path()); +} + +// Like CycleWithLengthZeroFromDepfile but with a higher cycle length. +TEST_F(GraphTest, CycleWithLengthOneFromDepfile) { + AssertParse(&state_, +"rule deprule\n" +" depfile = dep.d\n" +" command = unused\n" +"rule r\n" +" command = unused\n" +"build a b: deprule\n" +"build c: r b\n" + ); + fs_.Create("dep.d", "a: c\n"); + + string err; + Edge* edge = GetNode("a")->in_edge(); + EXPECT_TRUE(scan_.RecomputeDirty(edge, &err)); + ASSERT_EQ("", err); + + // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b, + // but c's in_edge has b as input but the depfile also adds |edge| as + // output)), the deps should have been loaded only once: + EXPECT_EQ(1, edge->inputs_.size()); + EXPECT_EQ("c", edge->inputs_[0]->path()); +} + +// Like CycleWithLengthOneFromDepfile but building a node one hop away from +// the cycle. +TEST_F(GraphTest, CycleWithLengthOneFromDepfileOneHopAway) { + AssertParse(&state_, +"rule deprule\n" +" depfile = dep.d\n" +" command = unused\n" +"rule r\n" +" command = unused\n" +"build a b: deprule\n" +"build c: r b\n" +"build d: r a\n" + ); + fs_.Create("dep.d", "a: c\n"); + + string err; + EXPECT_TRUE(scan_.RecomputeDirty(GetNode("d")->in_edge(), &err)); + ASSERT_EQ("", err); + + // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b, + // but c's in_edge has b as input but the depfile also adds |edge| as + // output)), the deps should have been loaded only once: + Edge* edge = GetNode("a")->in_edge(); + EXPECT_EQ(1, edge->inputs_.size()); + EXPECT_EQ("c", edge->inputs_[0]->path()); +} + #ifdef _WIN32 TEST_F(GraphTest, Decanonicalize) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, -- cgit v0.12 From 54d82356d8317a62f076eefdbe1ab62c88d1e6a6 Mon Sep 17 00:00:00 2001 From: Ben Boeckel Date: Wed, 10 Dec 2014 23:09:38 -0500 Subject: configure: add a verbose mode Required for Fedora infrastructure so that the commands used to build ninja are logged. --- configure.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/configure.py b/configure.py index 3cd1e82..0f34e77 100755 --- a/configure.py +++ b/configure.py @@ -106,8 +106,9 @@ class Bootstrap: It also proxies all calls to an underlying ninja_syntax.Writer, to behave like non-bootstrap mode. """ - def __init__(self, writer): + def __init__(self, writer, verbose=False): self.writer = writer + self.verbose = verbose # Map of variable name => expanded variable value. self.vars = {} # Map of rule name => dict of rule attributes. @@ -163,6 +164,8 @@ class Bootstrap: def _run_command(self, cmdline): """Run a subcommand, quietly. Prints the full command on error.""" try: + if self.verbose: + print(cmdline) subprocess.check_call(cmdline, shell=True) except subprocess.CalledProcessError: print('when running: ', cmdline) @@ -173,6 +176,8 @@ parser = OptionParser() profilers = ['gmon', 'pprof'] parser.add_option('--bootstrap', action='store_true', help='bootstrap a ninja binary from nothing') +parser.add_option('--verbose', action='store_true', + help='enable verbose build') parser.add_option('--platform', help='target platform (' + '/'.join(Platform.known_platforms()) + ')', @@ -217,7 +222,7 @@ if options.bootstrap: # Wrap ninja_writer with the Bootstrapper, which also executes the # commands. print('bootstrapping ninja...') - n = Bootstrap(n) + n = Bootstrap(n, verbose=options.verbose) n.comment('This file is used to build ninja itself.') n.comment('It is generated by ' + os.path.basename(__file__) + '.') @@ -602,6 +607,10 @@ n.build('all', 'phony', all_targets) n.close() print('wrote %s.' % BUILD_FILENAME) +verbose = '' +if options.verbose: + verbose = ' -v' + if options.bootstrap: print('bootstrap complete. rebuilding...') if platform.is_windows(): @@ -609,6 +618,6 @@ if options.bootstrap: if os.path.exists(bootstrap_exe): os.unlink(bootstrap_exe) os.rename('ninja.exe', bootstrap_exe) - subprocess.check_call('ninja.bootstrap.exe', shell=True) + subprocess.check_call('ninja.bootstrap.exe%s' % verbose, shell=True) else: - subprocess.check_call('./ninja', shell=True) + subprocess.check_call('./ninja%s' % verbose, shell=True) -- cgit v0.12 From 2d4a637c3010e1c8726967c9072cf73619fcd47d Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 18 Dec 2014 11:04:14 -0800 Subject: Remove 'Recompacting...' messages. Recompacting the build log used to be slow, so it made sense to print this message. We then made recompaction much faster, but didn't remove this message back then. The deps log only has it because the build log had it. Since both steps are effectively instant in practice, remove these log messages. --- src/build_log.cc | 4 +--- src/build_log.h | 2 -- src/build_log_test.cc | 1 - src/deps_log.cc | 2 -- src/deps_log.h | 4 +--- src/deps_log_test.cc | 2 -- 6 files changed, 2 insertions(+), 13 deletions(-) diff --git a/src/build_log.cc b/src/build_log.cc index b6f9874..281b851 100644 --- a/src/build_log.cc +++ b/src/build_log.cc @@ -102,7 +102,7 @@ BuildLog::LogEntry::LogEntry(const string& output, uint64_t command_hash, {} BuildLog::BuildLog() - : log_file_(NULL), needs_recompaction_(false), quiet_(false) {} + : log_file_(NULL), needs_recompaction_(false) {} BuildLog::~BuildLog() { Close(); @@ -354,8 +354,6 @@ bool BuildLog::WriteEntry(FILE* f, const LogEntry& entry) { bool BuildLog::Recompact(const string& path, const BuildLogUser& user, string* err) { METRIC_RECORD(".ninja_log recompact"); - if (!quiet_) - printf("Recompacting log...\n"); Close(); string temp_path = path + ".recompact"; diff --git a/src/build_log.h b/src/build_log.h index db0cfe0..fe81a85 100644 --- a/src/build_log.h +++ b/src/build_log.h @@ -80,7 +80,6 @@ struct BuildLog { /// Rewrite the known log entries, throwing away old data. bool Recompact(const string& path, const BuildLogUser& user, string* err); - void set_quiet(bool quiet) { quiet_ = quiet; } typedef ExternalStringHashMap::Type Entries; const Entries& entries() const { return entries_; } @@ -89,7 +88,6 @@ struct BuildLog { Entries entries_; FILE* log_file_; bool needs_recompaction_; - bool quiet_; }; #endif // NINJA_BUILD_LOG_H_ diff --git a/src/build_log_test.cc b/src/build_log_test.cc index 7ea2117..2c41ba6 100644 --- a/src/build_log_test.cc +++ b/src/build_log_test.cc @@ -290,7 +290,6 @@ TEST_F(BuildLogRecompactTest, Recompact) { ASSERT_TRUE(log2.LookupByOutput("out")); ASSERT_TRUE(log2.LookupByOutput("out2")); // ...and force a recompaction. - log2.set_quiet(true); EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err)); log2.Close(); diff --git a/src/deps_log.cc b/src/deps_log.cc index fc46497..aa8eb23 100644 --- a/src/deps_log.cc +++ b/src/deps_log.cc @@ -307,8 +307,6 @@ DepsLog::Deps* DepsLog::GetDeps(Node* node) { bool DepsLog::Recompact(const string& path, string* err) { METRIC_RECORD(".ninja_deps recompact"); - if (!quiet_) - printf("Recompacting deps...\n"); Close(); string temp_path = path + ".recompact"; diff --git a/src/deps_log.h b/src/deps_log.h index 9b81bc1..cec0257 100644 --- a/src/deps_log.h +++ b/src/deps_log.h @@ -64,7 +64,7 @@ struct State; /// wins, allowing updates to just be appended to the file. A separate /// repacking step can run occasionally to remove dead records. struct DepsLog { - DepsLog() : needs_recompaction_(false), quiet_(false), file_(NULL) {} + DepsLog() : needs_recompaction_(false), file_(NULL) {} ~DepsLog(); // Writing (build-time) interface. @@ -87,7 +87,6 @@ struct DepsLog { /// Rewrite the known log entries, throwing away old data. bool Recompact(const string& path, string* err); - void set_quiet(bool quiet) { quiet_ = quiet; } /// Returns if the deps entry for a node is still reachable from the manifest. /// @@ -109,7 +108,6 @@ struct DepsLog { bool RecordId(Node* node); bool needs_recompaction_; - bool quiet_; FILE* file_; /// Maps id -> Node. diff --git a/src/deps_log_test.cc b/src/deps_log_test.cc index ac2b315..cab06fb 100644 --- a/src/deps_log_test.cc +++ b/src/deps_log_test.cc @@ -252,7 +252,6 @@ TEST_F(DepsLogTest, Recompact) { ASSERT_EQ("foo.h", deps->nodes[0]->path()); ASSERT_EQ("baz.h", deps->nodes[1]->path()); - log.set_quiet(true); ASSERT_TRUE(log.Recompact(kTestFilename, &err)); // The in-memory deps graph should still be valid after recompaction. @@ -302,7 +301,6 @@ TEST_F(DepsLogTest, Recompact) { ASSERT_EQ("foo.h", deps->nodes[0]->path()); ASSERT_EQ("baz.h", deps->nodes[1]->path()); - log.set_quiet(true); ASSERT_TRUE(log.Recompact(kTestFilename, &err)); // The previous entries should have been removed. -- cgit v0.12 From d130968ecd5224fd86ce356086adf47bfe0b40ee Mon Sep 17 00:00:00 2001 From: Fraser Cormack Date: Sat, 20 Dec 2014 00:12:52 +0000 Subject: zsh-completion: remove use of 'head' with negative offset Some systems - like OSX - don't come with a version of head that supports a negative value for the -n flag. Such systems get a message such as this when tab-completing ninja's -d flag: ninja -dhead: illegal line count -- -1 Using sed instead should be more universally supported. --- misc/zsh-completion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/zsh-completion b/misc/zsh-completion index af24f89..fd9b3a7 100644 --- a/misc/zsh-completion +++ b/misc/zsh-completion @@ -32,7 +32,7 @@ __get_tools() { } __get_modes() { - ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | head -n -1 + ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | sed '$d' } __modes() { -- cgit v0.12 From d1e6a29fd30337e86c346208661c00233fd223f7 Mon Sep 17 00:00:00 2001 From: Beren Minor Date: Wed, 31 Dec 2014 12:17:21 +0100 Subject: Fix compilation errors on Visual Studio 2015 (_MSC_VER 1900). --- configure.py | 2 ++ src/deps_log.cc | 4 ++-- src/hash_map.h | 21 +++++++++++++++++++-- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/configure.py b/configure.py index 0f34e77..fe71372 100755 --- a/configure.py +++ b/configure.py @@ -284,6 +284,8 @@ if platform.is_msvc(): '/wd4512', '/wd4800', '/wd4702', '/wd4819', # Disable warnings about passing "this" during initialization. '/wd4355', + # Disable warnings about ignored typedef in DbgHelp.h + '/wd4091', '/GR-', # Disable RTTI. # Disable size_t -> int truncation warning. # We never have strings or arrays larger than 2**31. diff --git a/src/deps_log.cc b/src/deps_log.cc index aa8eb23..bd47ebc 100644 --- a/src/deps_log.cc +++ b/src/deps_log.cc @@ -239,13 +239,13 @@ bool DepsLog::Load(const string& path, State* state, string* err) { if (buf[path_size - 1] == '\0') --path_size; if (buf[path_size - 1] == '\0') --path_size; if (buf[path_size - 1] == '\0') --path_size; - StringPiece path(buf, path_size); + StringPiece subpath(buf, path_size); // It is not necessary to pass in a correct slash_bits here. It will // either be a Node that's in the manifest (in which case it will already // have a correct slash_bits that GetNode will look up), or it is an // implicit dependency from a .d which does not affect the build command // (and so need not have its slashes maintained). - Node* node = state->GetNode(path, 0); + Node* node = state->GetNode(subpath, 0); // Check that the expected index matches the actual index. This can only // happen if two ninja processes write to the same deps log concurrently. diff --git a/src/hash_map.h b/src/hash_map.h index 77e7586..9b98ca8 100644 --- a/src/hash_map.h +++ b/src/hash_map.h @@ -50,7 +50,22 @@ unsigned int MurmurHash2(const void* key, size_t len) { return h; } -#ifdef _MSC_VER +#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) +#include + +namespace std { +template<> +struct hash { + typedef StringPiece argument_type; + typedef std::size_t result_type; + + result_type operator()(argument_type const& s) const { + return MurmurHash2(s.str_, s.len_); + } +}; +} + +#elif defined(_MSC_VER) #include using stdext::hash_map; @@ -102,7 +117,9 @@ struct hash { /// mapping StringPiece => Foo*. template struct ExternalStringHashMap { -#ifdef _MSC_VER +#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) + typedef std::unordered_map Type; +#elif defined(_MSC_VER) typedef hash_map Type; #else typedef hash_map Type; -- cgit v0.12 From fd688d1df388906c02d4b0f1980750748f2b45fc Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Sat, 3 Jan 2015 17:58:15 -0800 Subject: Remove unused hash. ExternalStringHashMap used to store std::strings long ago. Since it doesn't anymore, this specialization isn't needed. No behavior change. --- src/hash_map.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/hash_map.h b/src/hash_map.h index 9b98ca8..b57a631 100644 --- a/src/hash_map.h +++ b/src/hash_map.h @@ -88,26 +88,17 @@ struct StringPieceCmp : public hash_compare { }; #else - #include using __gnu_cxx::hash_map; namespace __gnu_cxx { template<> -struct hash { - size_t operator()(const std::string& s) const { - return hash()(s.c_str()); - } -}; - -template<> struct hash { size_t operator()(StringPiece key) const { return MurmurHash2(key.str_, key.len_); } }; - } #endif -- cgit v0.12 From 88379d989d2a57cd46b2389751590b3ddc4ebef2 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Sat, 3 Jan 2015 18:50:32 -0800 Subject: Try to simplify d1e6a29 a bit. --- src/hash_map.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/hash_map.h b/src/hash_map.h index b57a631..abdba92 100644 --- a/src/hash_map.h +++ b/src/hash_map.h @@ -57,10 +57,10 @@ namespace std { template<> struct hash { typedef StringPiece argument_type; - typedef std::size_t result_type; + typedef size_t result_type; - result_type operator()(argument_type const& s) const { - return MurmurHash2(s.str_, s.len_); + size_t operator()(StringPiece key) const { + return MurmurHash2(key.str_, key.len_); } }; } -- cgit v0.12 From 2908829b7c129b6d6f1c04e65893647d2e0e8b36 Mon Sep 17 00:00:00 2001 From: Thiago Farina Date: Sat, 10 Jan 2015 20:00:43 -0200 Subject: Cleanup: Fix 'hasIdent' variable name/style. Seems more correct to name it has_indent_token and to use the unix_hacker style. Signed-off-by: Thiago Farina --- src/manifest_parser.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index 388b5bc..7ee990b 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -298,16 +298,16 @@ bool ManifestParser::ParseEdge(string* err) { return false; // Bindings on edges are rare, so allocate per-edge envs only when needed. - bool hasIdent = lexer_.PeekToken(Lexer::INDENT); - BindingEnv* env = hasIdent ? new BindingEnv(env_) : env_; - while (hasIdent) { + bool has_indent_token = lexer_.PeekToken(Lexer::INDENT); + BindingEnv* env = has_indent_token ? new BindingEnv(env_) : env_; + while (has_indent_token) { string key; EvalString val; if (!ParseLet(&key, &val, err)) return false; env->AddBinding(key, val.Evaluate(env_)); - hasIdent = lexer_.PeekToken(Lexer::INDENT); + has_indent_token = lexer_.PeekToken(Lexer::INDENT); } Edge* edge = state_->AddEdge(rule); -- cgit v0.12 From f2bfbda312ac92de3656ff3ae284a7ec82e59e74 Mon Sep 17 00:00:00 2001 From: Taiju Tsuiki Date: Sat, 10 Jan 2015 07:19:11 +0900 Subject: Check pending SIGINT after ppoll/pselect ppoll/pselect prioritizes file descriptor events over a signal delivery. So a flood of events prevents ninja from reacting keyboard interruption by the user. This CL adds a check for pending keyboard interruptions after file descriptor events. --- src/subprocess-posix.cc | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/subprocess-posix.cc b/src/subprocess-posix.cc index 743e406..40c9ae1 100644 --- a/src/subprocess-posix.cc +++ b/src/subprocess-posix.cc @@ -25,9 +25,24 @@ #include "util.h" +namespace { + +bool HasPendingInterruption() { + sigset_t pending; + sigemptyset(&pending); + if (sigpending(&pending) == -1) { + perror("ninja: sigpending"); + return false; + } + return sigismember(&pending, SIGINT); +} + +} // anonymous namespace + Subprocess::Subprocess(bool use_console) : fd_(-1), pid_(-1), use_console_(use_console) { } + Subprocess::~Subprocess() { if (fd_ >= 0) close(fd_); @@ -209,6 +224,9 @@ bool SubprocessSet::DoWork() { return interrupted_; } + if (HasPendingInterruption()) + return true; + nfds_t cur_nfd = 0; for (vector::iterator i = running_.begin(); i != running_.end(); ) { @@ -256,6 +274,9 @@ bool SubprocessSet::DoWork() { return interrupted_; } + if (HasPendingInterruption()) + return true; + for (vector::iterator i = running_.begin(); i != running_.end(); ) { int fd = (*i)->fd_; -- cgit v0.12 From a87516edd643e15d3a2a51048e6b507683f63ea7 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 19 Jan 2015 21:20:15 -0800 Subject: Remove an incorrect assert. The assert fires on cyclic manifests (found by afl-fuzz). Since there was explicit error handing for this case already, just remove the assert. --- src/manifest_parser_test.cc | 10 ++++++++++ src/state.cc | 1 - 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc index 6909ea9..a8f2e53 100644 --- a/src/manifest_parser_test.cc +++ b/src/manifest_parser_test.cc @@ -891,6 +891,16 @@ TEST_F(ParserTest, DefaultDefault) { EXPECT_EQ("", err); } +TEST_F(ParserTest, DefaultDefaultCycle) { + ASSERT_NO_FATAL_FAILURE(AssertParse( +"rule cat\n command = cat $in > $out\n" +"build a: cat a\n")); + + string err; + EXPECT_EQ(0u, state.DefaultNodes(&err).size()); + EXPECT_EQ("could not determine root nodes of build graph", err); +} + TEST_F(ParserTest, DefaultStatements) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n command = cat $in > $out\n" diff --git a/src/state.cc b/src/state.cc index 6e3e10d..1ceda45 100644 --- a/src/state.cc +++ b/src/state.cc @@ -187,7 +187,6 @@ vector State::RootNodes(string* err) { if (!edges_.empty() && root_nodes.empty()) *err = "could not determine root nodes of build graph"; - assert(edges_.empty() || !root_nodes.empty()); return root_nodes; } -- cgit v0.12 From 4e6f7e16c31f398828fcd417b45ea6c722f74198 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Tue, 20 Jan 2015 17:39:27 -0800 Subject: Document how to run gcov on ninja. --- HACKING.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/HACKING.md b/HACKING.md index 059a424..e3940ff 100644 --- a/HACKING.md +++ b/HACKING.md @@ -161,3 +161,19 @@ The trick is to install just the compilers, and not all of Visual Studio, by following [these instructions][win7sdk]. [win7sdk]: http://www.kegel.com/wine/cl-howto-win7sdk.html + +### Using gcov + +Do a clean debug build with the right flags: + + CFLAGS=-coverage LDFLAGS=-coverage ./configure.py --debug + ninja -t clean ninja_test && ninja ninja_test + +Run the test binary to generate `.gcda` and `.gcno` files in the build +directory, then run gcov on the .o files to generate `.gcov` files in the +root directory: + + ./ninja_test + gcov build/*.o + +Look at the generated `.gcov` files directly, or use your favorit gcov viewer. -- cgit v0.12 From 7b34ee99cdd350511942c2d84fc36ce1696e1686 Mon Sep 17 00:00:00 2001 From: Julien Tinnes Date: Thu, 29 Jan 2015 11:33:35 -0800 Subject: POSIX: detach background subprocesses from terminal. Put background subprocesses (i.e. subprocesses with no access to the console) in their own session and detach them from the terminal. This fixes martine/ninja#909. --- src/subprocess-posix.cc | 7 +++++-- src/subprocess_test.cc | 31 ++++++++++++++++++++++++++++--- 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/src/subprocess-posix.cc b/src/subprocess-posix.cc index 40c9ae1..cc8bff6 100644 --- a/src/subprocess-posix.cc +++ b/src/subprocess-posix.cc @@ -80,8 +80,11 @@ bool Subprocess::Start(SubprocessSet* set, const string& command) { break; if (!use_console_) { - // Put the child in its own process group, so ctrl-c won't reach it. - if (setpgid(0, 0) < 0) + // Put the child in its own session and process group. It will be + // detached from the current terminal and ctrl-c won't reach it. + // Since this process was just forked, it is not a process group leader + // and setsid() will succeed. + if (setsid() < 0) break; // Open /dev/null over stdin. diff --git a/src/subprocess_test.cc b/src/subprocess_test.cc index 8a0787c..76f5c6c 100644 --- a/src/subprocess_test.cc +++ b/src/subprocess_test.cc @@ -16,6 +16,8 @@ #include "test.h" +#include + #ifndef _WIN32 // SetWithLots need setrlimit. #include @@ -96,12 +98,23 @@ TEST_F(SubprocessTest, InterruptParent) { ASSERT_FALSE("We should have been interrupted"); } +// A shell command to check if the current process is connected to a terminal. +// This is different from having stdin/stdout/stderr be a terminal. (For +// instance consider the command "yes < /dev/null > /dev/null 2>&1". +// As "ps" will confirm, "yes" could still be connected to a terminal, despite +// not having any of the standard file descriptors be a terminal. +static const char kIsConnectedToTerminal[] = "tty < /dev/tty > /dev/null"; + TEST_F(SubprocessTest, Console) { // Skip test if we don't have the console ourselves. if (isatty(0) && isatty(1) && isatty(2)) { - Subprocess* subproc = subprocs_.Add("test -t 0 -a -t 1 -a -t 2", - /*use_console=*/true); - ASSERT_NE((Subprocess *) 0, subproc); + // Test that stdin, stdout and stderr are a terminal. + // Also check that the current process is connected to a terminal. + Subprocess* subproc = + subprocs_.Add(std::string("test -t 0 -a -t 1 -a -t 2 && ") + + std::string(kIsConnectedToTerminal), + /*use_console=*/true); + ASSERT_NE((Subprocess*)0, subproc); while (!subproc->Done()) { subprocs_.DoWork(); @@ -111,6 +124,18 @@ TEST_F(SubprocessTest, Console) { } } +TEST_F(SubprocessTest, NoConsole) { + Subprocess* subproc = + subprocs_.Add(kIsConnectedToTerminal, /*use_console=*/false); + ASSERT_NE((Subprocess*)0, subproc); + + while (!subproc->Done()) { + subprocs_.DoWork(); + } + + EXPECT_NE(ExitSuccess, subproc->Finish()); +} + #endif TEST_F(SubprocessTest, SetWithSingle) { -- cgit v0.12 From 5fbfa4ba72883cc0ecd199d17a70da3698be0402 Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Mon, 26 Jan 2015 13:52:20 -0800 Subject: Allow manifest rebuild to loop up to 100 times Ninja generators that bootstrap themselves with Ninja may need to rebuild build.ninja multiple times. Replace the 2 cycle loop with a 100 cycle loop, and print the pass number each time it restarts. Original-author: Jamie Gennis --- src/ninja.cc | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/ninja.cc b/src/ninja.cc index 3e99782..48c7239 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -1061,9 +1061,9 @@ int real_main(int argc, char** argv) { return (ninja.*options.tool->func)(argc, argv); } - // The build can take up to 2 passes: one to rebuild the manifest, then - // another to build the desired target. - for (int cycle = 0; cycle < 2; ++cycle) { + // Limit number of rebuilds, to prevent infinite loops. + const int kCycleLimit = 100; + for (int cycle = 1; cycle <= kCycleLimit; ++cycle) { NinjaMain ninja(ninja_command, config); RealFileReader file_reader; @@ -1086,16 +1086,13 @@ int real_main(int argc, char** argv) { if (options.tool && options.tool->when == Tool::RUN_AFTER_LOGS) return (ninja.*options.tool->func)(argc, argv); - // The first time through, attempt to rebuild the manifest before - // building anything else. - if (cycle == 0) { - if (ninja.RebuildManifest(options.input_file, &err)) { - // Start the build over with the new manifest. - continue; - } else if (!err.empty()) { - Error("rebuilding '%s': %s", options.input_file, err.c_str()); - return 1; - } + // Attempt to rebuild the manifest before building anything else + if (ninja.RebuildManifest(options.input_file, &err)) { + // Start the build over with the new manifest. + continue; + } else if (!err.empty()) { + Error("rebuilding '%s': %s", options.input_file, err.c_str()); + return 1; } int result = ninja.RunBuild(argc, argv); @@ -1104,7 +1101,9 @@ int real_main(int argc, char** argv) { return result; } - return 1; // Shouldn't be reached. + Error("manifest '%s' still dirty after %d tries\n", + options.input_file, kCycleLimit); + return 1; } } // anonymous namespace -- cgit v0.12 From 6ffb66d171a6962c2a7dad0492dffc42e65d666e Mon Sep 17 00:00:00 2001 From: tzik Date: Fri, 6 Feb 2015 19:45:42 +0900 Subject: Typo fix in graph.cc --- src/graph.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/graph.cc b/src/graph.cc index 44eca3c..6b977eb 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -450,7 +450,7 @@ bool ImplicitDepLoader::LoadDepsFromLog(Edge* edge, string* err) { // Deps are invalid if the output is newer than the deps. if (output->mtime() > deps->mtime) { - EXPLAIN("stored deps info out of date for for '%s' (%d vs %d)", + EXPLAIN("stored deps info out of date for '%s' (%d vs %d)", output->path().c_str(), deps->mtime, output->mtime()); return false; } -- cgit v0.12 From de57020bd1f2bf4781c14375ed1717a44cac8016 Mon Sep 17 00:00:00 2001 From: Pino Toscano Date: Sat, 28 Feb 2015 11:04:09 +0100 Subject: subprocess_test: gracefully handle rlim.rlim_cur < kNumProcs Instead of expecting that the number of open files is well above kNumProcs, simply "skip" the test in that case, still printing the message about the test limit (adding the current system limit too). --- src/subprocess_test.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/subprocess_test.cc b/src/subprocess_test.cc index 8a0787c..7759863 100644 --- a/src/subprocess_test.cc +++ b/src/subprocess_test.cc @@ -177,8 +177,8 @@ TEST_F(SubprocessTest, SetWithLots) { // Make sure [ulimit -n] isn't going to stop us from working. rlimit rlim; ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlim)); - if (!EXPECT_GT(rlim.rlim_cur, kNumProcs)) { - printf("Raise [ulimit -n] well above %u to make this test go\n", kNumProcs); + if (rlim.rlim_cur < kNumProcs) { + printf("Raise [ulimit -n] well above %u (currently %lu) to make this test go\n", kNumProcs, rlim.rlim_cur); return; } -- cgit v0.12 From f8f293730de2e12f6575c5d890a16504340f75fe Mon Sep 17 00:00:00 2001 From: Mohamed Bamakhrama Date: Mon, 2 Mar 2015 00:57:33 +0100 Subject: Allow scoping rules through subninja Ninja didn't support scoping rules through subninja and assumed a unique rule name in the whole namespace. With this change, this behavior is changed to allow scoping rules. Two rules can have the same name if they belong to two different scopes. However, two rules can NOT have the same name in the same scope. --- src/clean.cc | 4 ++-- src/eval_env.cc | 51 +++++++++++++++++++++++++++++++++++++++++++++ src/eval_env.h | 28 +++++++++++++++++++++++++ src/graph.cc | 29 +++++--------------------- src/graph.h | 21 ------------------- src/manifest_parser.cc | 6 +++--- src/manifest_parser_test.cc | 28 +++++++++++-------------- src/state.cc | 14 +------------ src/state.h | 8 +------ src/state_test.cc | 2 +- 10 files changed, 104 insertions(+), 87 deletions(-) diff --git a/src/clean.cc b/src/clean.cc index 98c638c..7b044c5 100644 --- a/src/clean.cc +++ b/src/clean.cc @@ -220,7 +220,7 @@ int Cleaner::CleanRule(const char* rule) { assert(rule); Reset(); - const Rule* r = state_->LookupRule(rule); + const Rule* r = state_->bindings_.LookupRule(rule); if (r) { CleanRule(r); } else { @@ -237,7 +237,7 @@ int Cleaner::CleanRules(int rule_count, char* rules[]) { PrintHeader(); for (int i = 0; i < rule_count; ++i) { const char* rule_name = rules[i]; - const Rule* rule = state_->LookupRule(rule_name); + const Rule* rule = state_->bindings_.LookupRule(rule_name); if (rule) { if (IsVerbose()) printf("Rule %s\n", rule_name); diff --git a/src/eval_env.cc b/src/eval_env.cc index 834b7e1..8f5c8ee 100644 --- a/src/eval_env.cc +++ b/src/eval_env.cc @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include + #include "eval_env.h" string BindingEnv::LookupVariable(const string& var) { @@ -27,6 +29,55 @@ void BindingEnv::AddBinding(const string& key, const string& val) { bindings_[key] = val; } +void BindingEnv::AddRule(const Rule* rule) { + assert(LookupRule(rule->name()) == NULL); + rules_[rule->name()] = rule; +} + +const Rule* BindingEnv::LookupRuleCurrentScope(const string& rule_name) { + map::iterator i = rules_.find(rule_name); + if (i == rules_.end()) + return NULL; + return i->second; +} + +const Rule* BindingEnv::LookupRule(const string& rule_name) { + map::iterator i = rules_.find(rule_name); + if (i != rules_.end()) + return i->second; + if (parent_) + return parent_->LookupRule(rule_name); + return NULL; +} + +void Rule::AddBinding(const string& key, const EvalString& val) { + bindings_[key] = val; +} + +const EvalString* Rule::GetBinding(const string& key) const { + map::const_iterator i = bindings_.find(key); + if (i == bindings_.end()) + return NULL; + return &i->second; +} + +// static +bool Rule::IsReservedBinding(const string& var) { + return var == "command" || + var == "depfile" || + var == "description" || + var == "deps" || + var == "generator" || + var == "pool" || + var == "restat" || + var == "rspfile" || + var == "rspfile_content"; +} + +const map BindingEnv::GetRules() const { + return rules_; +} + string BindingEnv::LookupWithFallback(const string& var, const EvalString* eval, Env* env) { diff --git a/src/eval_env.h b/src/eval_env.h index f3c959a..46ea131 100644 --- a/src/eval_env.h +++ b/src/eval_env.h @@ -24,10 +24,32 @@ using namespace std; struct EvalString; +/// An invokable build command and associated metadata (description, etc.). +struct Rule { + explicit Rule(const string& name) : name_(name) {} + + const string& name() const { return name_; } + + typedef map Bindings; + void AddBinding(const string& key, const EvalString& val); + + static bool IsReservedBinding(const string& var); + + const EvalString* GetBinding(const string& key) const; + + private: + // Allow the parsers to reach into this object and fill out its fields. + friend struct ManifestParser; + + string name_; + map bindings_; +}; + /// An interface for a scope for variable (e.g. "$foo") lookups. struct Env { virtual ~Env() {} virtual string LookupVariable(const string& var) = 0; + virtual const Rule* LookupRule(const string& rule_name) = 0; }; /// An Env which contains a mapping of variables to values @@ -39,6 +61,11 @@ struct BindingEnv : public Env { virtual ~BindingEnv() {} virtual string LookupVariable(const string& var); + void AddRule(const Rule* rule); + const Rule* LookupRule(const string& rule_name); + const Rule* LookupRuleCurrentScope(const string& rule_name); + const map GetRules() const; + void AddBinding(const string& key, const string& val); /// This is tricky. Edges want lookup scope to go in this order: @@ -51,6 +78,7 @@ struct BindingEnv : public Env { private: map bindings_; + map rules_; Env* parent_; }; diff --git a/src/graph.cc b/src/graph.cc index 6b977eb..cbf7921 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -33,30 +33,6 @@ bool Node::Stat(DiskInterface* disk_interface) { return mtime_ > 0; } -void Rule::AddBinding(const string& key, const EvalString& val) { - bindings_[key] = val; -} - -const EvalString* Rule::GetBinding(const string& key) const { - map::const_iterator i = bindings_.find(key); - if (i == bindings_.end()) - return NULL; - return &i->second; -} - -// static -bool Rule::IsReservedBinding(const string& var) { - return var == "command" || - var == "depfile" || - var == "description" || - var == "deps" || - var == "generator" || - var == "pool" || - var == "restat" || - var == "rspfile" || - var == "rspfile_content"; -} - bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { bool dirty = false; edge->outputs_ready_ = true; @@ -231,6 +207,7 @@ struct EdgeEnv : public Env { EdgeEnv(Edge* edge, EscapeKind escape) : edge_(edge), escape_in_out_(escape) {} virtual string LookupVariable(const string& var); + virtual const Rule* LookupRule(const string& rule_name); /// Given a span of Nodes, construct a list of paths suitable for a command /// line. @@ -242,6 +219,10 @@ struct EdgeEnv : public Env { EscapeKind escape_in_out_; }; +const Rule* EdgeEnv::LookupRule(const string& rule_name) { + return NULL; +} + string EdgeEnv::LookupVariable(const string& var) { if (var == "in" || var == "in_newline") { int explicit_deps_count = edge_->inputs_.size() - edge_->implicit_deps_ - diff --git a/src/graph.h b/src/graph.h index 9aada38..9eafc05 100644 --- a/src/graph.h +++ b/src/graph.h @@ -121,27 +121,6 @@ private: int id_; }; -/// An invokable build command and associated metadata (description, etc.). -struct Rule { - explicit Rule(const string& name) : name_(name) {} - - const string& name() const { return name_; } - - typedef map Bindings; - void AddBinding(const string& key, const EvalString& val); - - static bool IsReservedBinding(const string& var); - - const EvalString* GetBinding(const string& key) const; - - private: - // Allow the parsers to reach into this object and fill out its fields. - friend struct ManifestParser; - - string name_; - map bindings_; -}; - /// An edge in the dependency graph; links between Nodes using Rules. struct Edge { Edge() : rule_(NULL), env_(NULL), outputs_ready_(false), deps_missing_(false), diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index 7ee990b..044b259 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -156,7 +156,7 @@ bool ManifestParser::ParseRule(string* err) { if (!ExpectToken(Lexer::NEWLINE, err)) return false; - if (state_->LookupRule(name) != NULL) + if (env_->LookupRuleCurrentScope(name) != NULL) return lexer_.Error("duplicate rule '" + name + "'", err); Rule* rule = new Rule(name); // XXX scoped_ptr @@ -185,7 +185,7 @@ bool ManifestParser::ParseRule(string* err) { if (rule->bindings_["command"].empty()) return lexer_.Error("expected 'command =' line", err); - state_->AddRule(rule); + env_->AddRule(rule); return true; } @@ -252,7 +252,7 @@ bool ManifestParser::ParseEdge(string* err) { if (!lexer_.ReadIdent(&rule_name)) return lexer_.Error("expected build command name", err); - const Rule* rule = state_->LookupRule(rule_name); + const Rule* rule = env_->LookupRule(rule_name); if (!rule) return lexer_.Error("unknown build rule '" + rule_name + "'", err); diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc index a8f2e53..301a35c 100644 --- a/src/manifest_parser_test.cc +++ b/src/manifest_parser_test.cc @@ -60,8 +60,8 @@ TEST_F(ParserTest, Rules) { "\n" "build result: cat in_1.cc in-2.O\n")); - ASSERT_EQ(3u, state.rules_.size()); - const Rule* rule = state.rules_.begin()->second; + ASSERT_EQ(3u, state.bindings_.GetRules().size()); + const Rule* rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("cat", rule->name()); EXPECT_EQ("[cat ][$in][ > ][$out]", rule->GetBinding("command")->Serialize()); @@ -93,8 +93,8 @@ TEST_F(ParserTest, IgnoreIndentedComments) { "build result: cat in_1.cc in-2.O\n" " #comment\n")); - ASSERT_EQ(2u, state.rules_.size()); - const Rule* rule = state.rules_.begin()->second; + ASSERT_EQ(2u, state.bindings_.GetRules().size()); + const Rule* rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("cat", rule->name()); Edge* edge = state.GetNode("result", 0)->in_edge(); EXPECT_TRUE(edge->GetBindingBool("restat")); @@ -126,8 +126,8 @@ TEST_F(ParserTest, ResponseFiles) { "build out: cat_rsp in\n" " rspfile=out.rsp\n")); - ASSERT_EQ(2u, state.rules_.size()); - const Rule* rule = state.rules_.begin()->second; + ASSERT_EQ(2u, state.bindings_.GetRules().size()); + const Rule* rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("cat_rsp", rule->name()); EXPECT_EQ("[cat ][$rspfile][ > ][$out]", rule->GetBinding("command")->Serialize()); @@ -143,8 +143,8 @@ TEST_F(ParserTest, InNewline) { "build out: cat_rsp in in2\n" " rspfile=out.rsp\n")); - ASSERT_EQ(2u, state.rules_.size()); - const Rule* rule = state.rules_.begin()->second; + ASSERT_EQ(2u, state.bindings_.GetRules().size()); + const Rule* rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("cat_rsp", rule->name()); EXPECT_EQ("[cat ][$in_newline][ > ][$out]", rule->GetBinding("command")->Serialize()); @@ -204,8 +204,8 @@ TEST_F(ParserTest, Continuation) { "build a: link c $\n" " d e f\n")); - ASSERT_EQ(2u, state.rules_.size()); - const Rule* rule = state.rules_.begin()->second; + ASSERT_EQ(2u, state.bindings_.GetRules().size()); + const Rule* rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("link", rule->name()); EXPECT_EQ("[foo bar baz]", rule->GetBinding("command")->Serialize()); } @@ -823,18 +823,14 @@ TEST_F(ParserTest, MissingSubNinja) { } TEST_F(ParserTest, DuplicateRuleInDifferentSubninjas) { - // Test that rules live in a global namespace and aren't scoped to subninjas. + // Test that rules are scoped to subninjas. files_["test.ninja"] = "rule cat\n" " command = cat\n"; ManifestParser parser(&state, this); string err; - EXPECT_FALSE(parser.ParseTest("rule cat\n" + EXPECT_TRUE(parser.ParseTest("rule cat\n" " command = cat\n" "subninja test.ninja\n", &err)); - EXPECT_EQ("test.ninja:1: duplicate rule 'cat'\n" - "rule cat\n" - " ^ near here" - , err); } TEST_F(ParserTest, Include) { diff --git a/src/state.cc b/src/state.cc index 1ceda45..89f90c0 100644 --- a/src/state.cc +++ b/src/state.cc @@ -73,23 +73,11 @@ Pool State::kConsolePool("console", 1); const Rule State::kPhonyRule("phony"); State::State() { - AddRule(&kPhonyRule); + bindings_.AddRule(&kPhonyRule); AddPool(&kDefaultPool); AddPool(&kConsolePool); } -void State::AddRule(const Rule* rule) { - assert(LookupRule(rule->name()) == NULL); - rules_[rule->name()] = rule; -} - -const Rule* State::LookupRule(const string& rule_name) { - map::iterator i = rules_.find(rule_name); - if (i == rules_.end()) - return NULL; - return i->second; -} - void State::AddPool(Pool* pool) { assert(LookupPool(pool->name()) == NULL); pools_[pool->name()] = pool; diff --git a/src/state.h b/src/state.h index 311d740..db0e3aa 100644 --- a/src/state.h +++ b/src/state.h @@ -79,7 +79,7 @@ struct Pool { DelayedEdges delayed_; }; -/// Global state (file status, loaded rules) for a single run. +/// Global state (file status) for a single run. struct State { static Pool kDefaultPool; static Pool kConsolePool; @@ -87,9 +87,6 @@ struct State { State(); - void AddRule(const Rule* rule); - const Rule* LookupRule(const string& rule_name); - void AddPool(Pool* pool); Pool* LookupPool(const string& pool_name); @@ -119,9 +116,6 @@ struct State { typedef ExternalStringHashMap::Type Paths; Paths paths_; - /// All the rules used in the graph. - map rules_; - /// All the pools used in the graph. map pools_; diff --git a/src/state_test.cc b/src/state_test.cc index bd133b6..458b519 100644 --- a/src/state_test.cc +++ b/src/state_test.cc @@ -29,7 +29,7 @@ TEST(State, Basic) { Rule* rule = new Rule("cat"); rule->AddBinding("command", command); - state.AddRule(rule); + state.bindings_.AddRule(rule); Edge* edge = state.AddEdge(rule); state.AddIn(edge, "in1", 0); -- cgit v0.12 From 70e7007b327cd26682d398562a8ccda76588f95b Mon Sep 17 00:00:00 2001 From: Pierre Schweitzer Date: Sat, 7 Mar 2015 14:23:41 +0100 Subject: Directly pass the string instead of char * to Truncate util function. It will prevent useless conversions. --- src/deps_log.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/deps_log.cc b/src/deps_log.cc index bd47ebc..89c6023 100644 --- a/src/deps_log.cc +++ b/src/deps_log.cc @@ -275,7 +275,7 @@ bool DepsLog::Load(const string& path, State* state, string* err) { } fclose(f); - if (!Truncate(path.c_str(), offset, err)) + if (!Truncate(path, offset, err)) return false; // The truncate succeeded; we'll just report the load error as a -- cgit v0.12 From 2d645be0f0ffb1ad257f72ae89182e1d153ca617 Mon Sep 17 00:00:00 2001 From: Mohamed Bamakhrama Date: Sun, 8 Mar 2015 04:13:12 +0100 Subject: Added a new test to illustrate scoped rules The new test shows the added value of scoped rules by demonstrating a multi-level build where a single rules file gets included at all the levels. By scoping rules, this is possible. --- src/manifest_parser_test.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc index 301a35c..e13d728 100644 --- a/src/manifest_parser_test.cc +++ b/src/manifest_parser_test.cc @@ -833,6 +833,19 @@ TEST_F(ParserTest, DuplicateRuleInDifferentSubninjas) { "subninja test.ninja\n", &err)); } +TEST_F(ParserTest, DuplicateRuleInDifferentSubninjasWithInclude) { + // Test that rules are scoped to subninjas even with includes. + files_["rules.ninja"] = "rule cat\n" + " command = cat\n"; + files_["test.ninja"] = "include rules.ninja\n" + "build x : cat\n"; + ManifestParser parser(&state, this); + string err; + EXPECT_TRUE(parser.ParseTest("include rules.ninja\n" + "subninja test.ninja\n" + "build y : cat\n", &err)); +} + TEST_F(ParserTest, Include) { files_["include.ninja"] = "var = inner\n"; ASSERT_NO_FATAL_FAILURE(AssertParse( -- cgit v0.12 From 160af7d034b0016409b2602cd766dd0a7a590a8e Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Sun, 8 Mar 2015 19:09:01 -0500 Subject: Test for Clang by checking --version --- configure.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/configure.py b/configure.py index fe71372..cb08c17 100755 --- a/configure.py +++ b/configure.py @@ -317,8 +317,12 @@ else: cflags.remove('-fno-rtti') # Needed for above pedanticness. else: cflags += ['-O2', '-DNDEBUG'] - if 'clang' in os.path.basename(CXX): - cflags += ['-fcolor-diagnostics'] + try: + proc = subprocess.Popen([CXX, '--version'], stdout=subprocess.PIPE) + if 'clang' in proc.communicate()[0].decode('utf-8'): + cflags += ['-fcolor-diagnostics'] + except: + pass if platform.is_mingw(): cflags += ['-D_WIN32_WINNT=0x0501'] ldflags = ['-L$builddir'] -- cgit v0.12 From 52e667fa3d77d59ed07fa136e5a19f169dc9fc0b Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Sun, 8 Mar 2015 21:46:34 -0700 Subject: Fix build with libc++ after #921. It failed with error: field has incomplete type 'EvalString' note: in instantiation of exception specification for 'map' requested here explicit Rule(const string& name) : name_(name) {} ^ --- src/eval_env.h | 58 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/src/eval_env.h b/src/eval_env.h index 46ea131..7b9bdf5 100644 --- a/src/eval_env.h +++ b/src/eval_env.h @@ -22,7 +22,35 @@ using namespace std; #include "string_piece.h" -struct EvalString; +struct Rule; + +/// An interface for a scope for variable (e.g. "$foo") lookups. +struct Env { + virtual ~Env() {} + virtual string LookupVariable(const string& var) = 0; + virtual const Rule* LookupRule(const string& rule_name) = 0; +}; + +/// A tokenized string that contains variable references. +/// Can be evaluated relative to an Env. +struct EvalString { + string Evaluate(Env* env) const; + + void Clear() { parsed_.clear(); } + bool empty() const { return parsed_.empty(); } + + void AddText(StringPiece text); + void AddSpecial(StringPiece text); + + /// Construct a human-readable representation of the parsed state, + /// for use in tests. + string Serialize() const; + +private: + enum TokenType { RAW, SPECIAL }; + typedef vector > TokenList; + TokenList parsed_; +}; /// An invokable build command and associated metadata (description, etc.). struct Rule { @@ -45,13 +73,6 @@ struct Rule { map bindings_; }; -/// An interface for a scope for variable (e.g. "$foo") lookups. -struct Env { - virtual ~Env() {} - virtual string LookupVariable(const string& var) = 0; - virtual const Rule* LookupRule(const string& rule_name) = 0; -}; - /// An Env which contains a mapping of variables to values /// as well as a pointer to a parent scope. struct BindingEnv : public Env { @@ -82,25 +103,4 @@ private: Env* parent_; }; -/// A tokenized string that contains variable references. -/// Can be evaluated relative to an Env. -struct EvalString { - string Evaluate(Env* env) const; - - void Clear() { parsed_.clear(); } - bool empty() const { return parsed_.empty(); } - - void AddText(StringPiece text); - void AddSpecial(StringPiece text); - - /// Construct a human-readable representation of the parsed state, - /// for use in tests. - string Serialize() const; - -private: - enum TokenType { RAW, SPECIAL }; - typedef vector > TokenList; - TokenList parsed_; -}; - #endif // NINJA_EVAL_ENV_H_ -- cgit v0.12 From 00a061cde9acee7ceec69880439f6118a0a27fb2 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 9 Mar 2015 10:24:41 -0700 Subject: Make diagnostics colored with new gccs (4.9+) too. Both clang and gcc understand -fdiagnostics-color, so use that flag name. (This will disable colored diagnostics for clangs older than LLVM 3.3, but that is several years old by now.) --- configure.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/configure.py b/configure.py index cb08c17..661662f 100755 --- a/configure.py +++ b/configure.py @@ -318,9 +318,12 @@ else: else: cflags += ['-O2', '-DNDEBUG'] try: - proc = subprocess.Popen([CXX, '--version'], stdout=subprocess.PIPE) - if 'clang' in proc.communicate()[0].decode('utf-8'): - cflags += ['-fcolor-diagnostics'] + proc = subprocess.Popen( + [CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null'], + stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT) + proc.wait() + if proc.returncode == 0: + cflags += ['-fdiagnostics-color'] except: pass if platform.is_mingw(): -- cgit v0.12 From 51f06facf46e7a1a5338a4ca2ec9b8441c44c405 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Tue, 10 Mar 2015 09:09:49 -0700 Subject: Simplify. No behavior change. --- configure.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/configure.py b/configure.py index 661662f..48da08a 100755 --- a/configure.py +++ b/configure.py @@ -321,8 +321,7 @@ else: proc = subprocess.Popen( [CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null'], stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT) - proc.wait() - if proc.returncode == 0: + if proc.wait() == 0: cflags += ['-fdiagnostics-color'] except: pass -- cgit v0.12 From 7ed4457f3b305649a71f4759aeaa7f9847b166fe Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Tue, 10 Mar 2015 13:45:17 -0700 Subject: Remove option no longer needed now that we don't use gtest. --- configure.py | 1 - 1 file changed, 1 deletion(-) diff --git a/configure.py b/configure.py index 48da08a..2eacbfe 100755 --- a/configure.py +++ b/configure.py @@ -291,7 +291,6 @@ if platform.is_msvc(): # We never have strings or arrays larger than 2**31. '/wd4267', '/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS', - '/D_VARIADIC_MAX=10', '/DNINJA_PYTHON="%s"' % options.with_python] if options.bootstrap: # In bootstrap mode, we have no ninja process to catch /showIncludes -- cgit v0.12 From 8bab23be060ef6c79e35daa8e6deca18d00341f6 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 12 Mar 2015 11:59:45 -0400 Subject: Reject depfiles that don't contain a : after the target name. This is a prerequisite for fixing #417. --- src/build_test.cc | 3 +-- src/depfile_parser.cc | 4 ++++ src/depfile_parser.in.cc | 4 ++++ src/depfile_parser_test.cc | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/build_test.cc b/src/build_test.cc index bd1cd30..82da57b 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -816,8 +816,7 @@ TEST_F(BuildTest, DepFileParseError) { fs_.Create("foo.c", ""); fs_.Create("foo.o.d", "randomtext\n"); EXPECT_FALSE(builder_.AddTarget("foo.o", &err)); - EXPECT_EQ("expected depfile 'foo.o.d' to mention 'foo.o', got 'randomtext'", - err); + EXPECT_EQ("foo.o.d: expected ':' in depfile", err); } TEST_F(BuildTest, OrderOnlyDeps) { diff --git a/src/depfile_parser.cc b/src/depfile_parser.cc index 4ca3943..7268f31 100644 --- a/src/depfile_parser.cc +++ b/src/depfile_parser.cc @@ -230,5 +230,9 @@ yy16: return false; } } + if (parsing_targets) { + *err = "expected ':' in depfile"; + return false; + } return true; } diff --git a/src/depfile_parser.in.cc b/src/depfile_parser.in.cc index b59baf0..deaee5b 100644 --- a/src/depfile_parser.in.cc +++ b/src/depfile_parser.in.cc @@ -112,5 +112,9 @@ bool DepfileParser::Parse(string* content, string* err) { return false; } } + if (parsing_targets) { + *err = "expected ':' in depfile"; + return false; + } return true; } diff --git a/src/depfile_parser_test.cc b/src/depfile_parser_test.cc index e67ef79..8b57a1e 100644 --- a/src/depfile_parser_test.cc +++ b/src/depfile_parser_test.cc @@ -106,7 +106,7 @@ TEST_F(DepfileParserTest, Escapes) { // it through. string err; EXPECT_TRUE(Parse( -"\\!\\@\\#$$\\%\\^\\&\\\\", +"\\!\\@\\#$$\\%\\^\\&\\\\:", &err)); ASSERT_EQ("", err); EXPECT_EQ("\\!\\@#$\\%\\^\\&\\", -- cgit v0.12 From 157a71f39b19d4fac43c814211c2093de205adab Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 12 Mar 2015 12:06:50 -0400 Subject: On unexpected output in a .d file, rebuild instead erroring. Fixes #417. --- src/build_test.cc | 17 +++++++++++++++++ src/graph.cc | 7 ++++--- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/src/build_test.cc b/src/build_test.cc index 82da57b..65d189d 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -2042,6 +2042,23 @@ TEST_F(BuildWithDepsLogTest, RestatMissingDepfileDepslog) { ASSERT_EQ(0u, command_runner_.commands_ran_.size()); } +TEST_F(BuildTest, WrongOutputInDepfileCausesRebuild) { + string err; + const char* manifest = +"rule cc\n" +" command = cc $in\n" +" depfile = $out.d\n" +"build foo.o: cc foo.c\n"; + + fs_.Create("foo.c", ""); + fs_.Create("foo.o", ""); + fs_.Create("header.h", ""); + fs_.Create("foo.o.d", "bar.o.d: header.h\n"); + + RebuildTarget("foo.o", manifest, "build_log", "ninja_deps"); + ASSERT_EQ(1u, command_runner_.commands_ran_.size()); +} + TEST_F(BuildTest, Console) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule console\n" diff --git a/src/graph.cc b/src/graph.cc index cbf7921..76c4e9a 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -390,12 +390,13 @@ bool ImplicitDepLoader::LoadDepFile(Edge* edge, const string& path, &depfile.out_.len_, &unused, err)) return false; - // Check that this depfile matches the edge's output. + // Check that this depfile matches the edge's output, if not return false to + // mark the edge as dirty. Node* first_output = edge->outputs_[0]; StringPiece opath = StringPiece(first_output->path()); if (opath != depfile.out_) { - *err = "expected depfile '" + path + "' to mention '" + - first_output->path() + "', got '" + depfile.out_.AsString() + "'"; + EXPLAIN("expected depfile '%s' to mention '%s', got '%s'", path.c_str(), + first_output->path().c_str(), depfile.out_.AsString().c_str()); return false; } -- cgit v0.12 From 56e9d08c028d2c16f787041dc1f41271464a8bdc Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Sun, 15 Mar 2015 19:34:03 -0400 Subject: Build self-consistent graphs for dupe edges with multiple outputs. Fixes #867, both the crashes and "[stuck]" issues. The problem was that a duplicate edge would modify the in_edge of the outputs of the new build rule, but the edge corresponding to the old build rule would still think that the in_edge points to itself. `old_edge->outputs_[0]->in_edge()` would not return `old_edge`, which confused the scan logic. As fix, let `State::AddOut()` reject changing in_edge if it's already set. This changes behavior in a minor way: Previously, if there were multiple edges for a single output, the last edge would be kept. Now, the first edge is kept. This only had mostly-well-defined semantics if all duplicate edges are the same (which is the only case I've seen in practice), and for that case the behavior doesn't change. For testing, add a VerifyGraph() function and call that every time any test graph is parsed. That's a bit more code than just copying the test cases from the bug into build_test.cc, but it also yields better test coverage overall. --- src/manifest_parser.cc | 8 ++++++++ src/manifest_parser_test.cc | 13 +++++++++++++ src/state.cc | 3 ++- src/test.cc | 22 ++++++++++++++++++++++ src/test.h | 1 + 5 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index 044b259..f0457dd 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -340,6 +340,14 @@ bool ManifestParser::ParseEdge(string* err) { edge->implicit_deps_ = implicit; edge->order_only_deps_ = order_only; + if (edge->outputs_.empty()) { + // All outputs of the edge are already created by other edges. Don't add + // this edge. + state_->edges_.pop_back(); + delete edge; + return true; + } + // Multiple outputs aren't (yet?) supported with depslog. string deps_type = edge->GetBinding("deps"); if (!deps_type.empty() && edge->outputs_.size() > 1) { diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc index e13d728..7e38fc6 100644 --- a/src/manifest_parser_test.cc +++ b/src/manifest_parser_test.cc @@ -28,6 +28,7 @@ struct ParserTest : public testing::Test, string err; EXPECT_TRUE(parser.ParseTest(input, &err)); ASSERT_EQ("", err); + VerifyGraph(state); } virtual bool ReadFile(const string& path, string* content, string* err) { @@ -340,6 +341,18 @@ TEST_F(ParserTest, CanonicalizePathsBackslashes) { } #endif +TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputs) { + ASSERT_NO_FATAL_FAILURE(AssertParse( +"rule cat\n" +" command = cat $in > $out\n" +"build out1 out2: cat in1\n" +"build out1: cat in2\n" +"build final: cat out1\n" +)); + // AssertParse() checks that the generated build graph is self-consistent. + // That's all the checking that this test needs. +} + TEST_F(ParserTest, ReservedWords) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule build\n" diff --git a/src/state.cc b/src/state.cc index 89f90c0..18c0c8c 100644 --- a/src/state.cc +++ b/src/state.cc @@ -141,13 +141,14 @@ void State::AddIn(Edge* edge, StringPiece path, unsigned int slash_bits) { void State::AddOut(Edge* edge, StringPiece path, unsigned int slash_bits) { Node* node = GetNode(path, slash_bits); - edge->outputs_.push_back(node); if (node->in_edge()) { Warning("multiple rules generate %s. " "builds involving this target will not be correct; " "continuing anyway", path.AsString().c_str()); + return; } + edge->outputs_.push_back(node); node->set_in_edge(edge); } diff --git a/src/test.cc b/src/test.cc index f667fef..76b8416 100644 --- a/src/test.cc +++ b/src/test.cc @@ -28,6 +28,7 @@ #endif #include "build_log.h" +#include "graph.h" #include "manifest_parser.h" #include "util.h" @@ -98,12 +99,33 @@ void AssertParse(State* state, const char* input) { string err; EXPECT_TRUE(parser.ParseTest(input, &err)); ASSERT_EQ("", err); + VerifyGraph(*state); } void AssertHash(const char* expected, uint64_t actual) { ASSERT_EQ(BuildLog::LogEntry::HashCommand(expected), actual); } +void VerifyGraph(const State& state) { + for (vector::const_iterator e = state.edges_.begin(); + e != state.edges_.end(); ++e) { + // All edges need at least one output. + EXPECT_FALSE((*e)->outputs_.empty()); + // Check that the edge's inputs have the edge as out edge. + for (vector::const_iterator in_node = (*e)->inputs_.begin(); + in_node != (*e)->inputs_.end(); ++in_node) { + const vector& out_edges = (*in_node)->out_edges(); + EXPECT_NE(std::find(out_edges.begin(), out_edges.end(), *e), + out_edges.end()); + } + // Check that the edge's outputs have the edge as in edge. + for (vector::const_iterator out_node = (*e)->outputs_.begin(); + out_node != (*e)->outputs_.end(); ++out_node) { + EXPECT_EQ((*out_node)->in_edge(), *e); + } + } +} + void VirtualFileSystem::Create(const string& path, const string& contents) { files_[path].mtime = now_; diff --git a/src/test.h b/src/test.h index 4c15203..3ce510d 100644 --- a/src/test.h +++ b/src/test.h @@ -124,6 +124,7 @@ struct StateTestWithBuiltinRules : public testing::Test { void AssertParse(State* state, const char* input); void AssertHash(const char* expected, uint64_t actual); +void VerifyGraph(const State& state); /// An implementation of DiskInterface that uses an in-memory representation /// of disk state. It also logs file accesses and directory creations -- cgit v0.12 From ffa1beb78feee904acefce74a6bba2e4cc5416cb Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 18 Mar 2015 12:35:57 -0400 Subject: Env should only be about variables. No behavior change. --- src/eval_env.h | 5 ++--- src/graph.cc | 5 ----- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/src/eval_env.h b/src/eval_env.h index 7b9bdf5..250fa55 100644 --- a/src/eval_env.h +++ b/src/eval_env.h @@ -28,7 +28,6 @@ struct Rule; struct Env { virtual ~Env() {} virtual string LookupVariable(const string& var) = 0; - virtual const Rule* LookupRule(const string& rule_name) = 0; }; /// A tokenized string that contains variable references. @@ -77,7 +76,7 @@ struct Rule { /// as well as a pointer to a parent scope. struct BindingEnv : public Env { BindingEnv() : parent_(NULL) {} - explicit BindingEnv(Env* parent) : parent_(parent) {} + explicit BindingEnv(BindingEnv* parent) : parent_(parent) {} virtual ~BindingEnv() {} virtual string LookupVariable(const string& var); @@ -100,7 +99,7 @@ struct BindingEnv : public Env { private: map bindings_; map rules_; - Env* parent_; + BindingEnv* parent_; }; #endif // NINJA_EVAL_ENV_H_ diff --git a/src/graph.cc b/src/graph.cc index 76c4e9a..e3253fd 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -207,7 +207,6 @@ struct EdgeEnv : public Env { EdgeEnv(Edge* edge, EscapeKind escape) : edge_(edge), escape_in_out_(escape) {} virtual string LookupVariable(const string& var); - virtual const Rule* LookupRule(const string& rule_name); /// Given a span of Nodes, construct a list of paths suitable for a command /// line. @@ -219,10 +218,6 @@ struct EdgeEnv : public Env { EscapeKind escape_in_out_; }; -const Rule* EdgeEnv::LookupRule(const string& rule_name) { - return NULL; -} - string EdgeEnv::LookupVariable(const string& var) { if (var == "in" || var == "in_newline") { int explicit_deps_count = edge_->inputs_.size() - edge_->implicit_deps_ - -- cgit v0.12 From bc38ef76ebfabe503b1b56a1e32827a851037766 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 18 Mar 2015 16:39:26 -0400 Subject: Add a missing &. (No behavior change, only used in tests.) --- src/eval_env.cc | 2 +- src/eval_env.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/eval_env.cc b/src/eval_env.cc index 8f5c8ee..e03a82e 100644 --- a/src/eval_env.cc +++ b/src/eval_env.cc @@ -74,7 +74,7 @@ bool Rule::IsReservedBinding(const string& var) { var == "rspfile_content"; } -const map BindingEnv::GetRules() const { +const map& BindingEnv::GetRules() const { return rules_; } diff --git a/src/eval_env.h b/src/eval_env.h index 250fa55..28c4d16 100644 --- a/src/eval_env.h +++ b/src/eval_env.h @@ -84,7 +84,7 @@ struct BindingEnv : public Env { void AddRule(const Rule* rule); const Rule* LookupRule(const string& rule_name); const Rule* LookupRuleCurrentScope(const string& rule_name); - const map GetRules() const; + const map& GetRules() const; void AddBinding(const string& key, const string& val); -- cgit v0.12 From b334523f1da03adfcd23b6e7e7a66c8fcbf87840 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 19 Mar 2015 08:42:46 -0700 Subject: Make failing stat() calls abort the build. Fixes #830, fixes #904. In practice, this either happens with 64-bit inodes and a 32-bit userspace when building without -D_FILE_OFFSET_BITS=64 in CFLAGS, or when a filename is longer than the system file length limit. Since DiskInterface::Stat() returns -1 on error, and Node used -1 on "stat state unknown", not aborting the build lead to ninja stat()ing the same file over and over again, until it finally ran out of stack. That's now fixed. * Change RecomputeOutputsDirty() to return success instead of dirty state (like RecomputeDirty()) and return the dirty state in a bool outparam * Node::Stat()s old return value wasn't used anywhere, change the function to return success instead and add an |err| outparam * Node::StatIfNecessary()'s old return value was used only in one place. Change that place to explicitly check status_known() and make StatIfNecessary() return success and add an |err| outparam * Plan::CleanNode() can now fail, make it return bool and add an |err| outparam --- src/build.cc | 21 +++++++++++++++++---- src/build.h | 3 ++- src/build_test.cc | 36 +++++++++++++++++++++++++----------- src/disk_interface_test.cc | 16 ++++++++++++---- src/graph.cc | 32 ++++++++++++++++++++++---------- src/graph.h | 20 ++++++++++---------- 6 files changed, 88 insertions(+), 40 deletions(-) diff --git a/src/build.cc b/src/build.cc index 5d66f4b..1e10c7c 100644 --- a/src/build.cc +++ b/src/build.cc @@ -406,7 +406,7 @@ void Plan::NodeFinished(Node* node) { } } -void Plan::CleanNode(DependencyScan* scan, Node* node) { +bool Plan::CleanNode(DependencyScan* scan, Node* node, string* err) { node->set_dirty(false); for (vector::const_iterator oe = node->out_edges().begin(); @@ -436,10 +436,16 @@ void Plan::CleanNode(DependencyScan* scan, Node* node) { // Now, this edge is dirty if any of the outputs are dirty. // If the edge isn't dirty, clean the outputs and mark the edge as not // wanted. - if (!scan->RecomputeOutputsDirty(*oe, most_recent_input)) { + bool outputs_dirty = false; + if (!scan->RecomputeOutputsDirty(*oe, most_recent_input, + &outputs_dirty, err)) { + return false; + } + if (!outputs_dirty) { for (vector::iterator o = (*oe)->outputs_.begin(); o != (*oe)->outputs_.end(); ++o) { - CleanNode(scan, *o); + if (!CleanNode(scan, *o, err)) + return false; } want_e->second = false; @@ -449,6 +455,7 @@ void Plan::CleanNode(DependencyScan* scan, Node* node) { } } } + return true; } void Plan::Dump() { @@ -758,7 +765,8 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { // The rule command did not change the output. Propagate the clean // state through the build graph. // Note that this also applies to nonexistent outputs (mtime == 0). - plan_.CleanNode(&scan_, *o); + if (!plan_.CleanNode(&scan_, *o, err)) + return false; node_cleaned = true; } } @@ -805,6 +813,11 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { assert(edge->outputs_.size() == 1 && "should have been rejected by parser"); Node* out = edge->outputs_[0]; TimeStamp deps_mtime = disk_interface_->Stat(out->path()); + if (deps_mtime == -1) { + // TODO: Let DiskInterface::Stat() take err instead of it calling Error(). + *err = "stat failed"; + return false; + } if (!scan_.deps_log()->RecordDeps(out, deps_mtime, deps_nodes)) { *err = string("Error writing to deps log: ") + strerror(errno); return false; diff --git a/src/build.h b/src/build.h index eb3636a..5d5db80 100644 --- a/src/build.h +++ b/src/build.h @@ -61,7 +61,8 @@ struct Plan { void EdgeFinished(Edge* edge); /// Clean the given node during the build. - void CleanNode(DependencyScan* scan, Node* node); + /// Return false on error. + bool CleanNode(DependencyScan* scan, Node* node, string* err); /// Number of edges with commands to run. int command_edge_count() const { return command_edges_; } diff --git a/src/build_test.cc b/src/build_test.cc index 65d189d..0cdcd87 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -51,9 +51,9 @@ struct PlanTest : public StateTestWithBuiltinRules { }; TEST_F(PlanTest, Basic) { - AssertParse(&state_, + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat mid\n" -"build mid: cat in\n"); +"build mid: cat in\n")); GetNode("mid")->MarkDirty(); GetNode("out")->MarkDirty(); string err; @@ -84,9 +84,9 @@ TEST_F(PlanTest, Basic) { // Test that two outputs from one rule can be handled as inputs to the next. TEST_F(PlanTest, DoubleOutputDirect) { - AssertParse(&state_, + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat mid1 mid2\n" -"build mid1 mid2: cat in\n"); +"build mid1 mid2: cat in\n")); GetNode("mid1")->MarkDirty(); GetNode("mid2")->MarkDirty(); GetNode("out")->MarkDirty(); @@ -111,11 +111,11 @@ TEST_F(PlanTest, DoubleOutputDirect) { // Test that two outputs from one rule can eventually be routed to another. TEST_F(PlanTest, DoubleOutputIndirect) { - AssertParse(&state_, + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat b1 b2\n" "build b1: cat a1\n" "build b2: cat a2\n" -"build a1 a2: cat in\n"); +"build a1 a2: cat in\n")); GetNode("a1")->MarkDirty(); GetNode("a2")->MarkDirty(); GetNode("b1")->MarkDirty(); @@ -149,11 +149,11 @@ TEST_F(PlanTest, DoubleOutputIndirect) { // Test that two edges from one output can both execute. TEST_F(PlanTest, DoubleDependent) { - AssertParse(&state_, + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat a1 a2\n" "build a1: cat mid\n" "build a2: cat mid\n" -"build mid: cat in\n"); +"build mid: cat in\n")); GetNode("mid")->MarkDirty(); GetNode("a1")->MarkDirty(); GetNode("a2")->MarkDirty(); @@ -186,11 +186,11 @@ TEST_F(PlanTest, DoubleDependent) { } TEST_F(PlanTest, DependencyCycle) { - AssertParse(&state_, + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat mid\n" "build mid: cat in\n" "build in: cat pre\n" -"build pre: cat out\n"); +"build pre: cat out\n")); GetNode("out")->MarkDirty(); GetNode("mid")->MarkDirty(); GetNode("in")->MarkDirty(); @@ -1413,7 +1413,7 @@ TEST_F(BuildTest, RspFileFailure) { ASSERT_EQ("Another very long command", fs_.files_["out.rsp"].contents); } -// Test that contens of the RSP file behaves like a regular part of +// Test that contents of the RSP file behaves like a regular part of // command line, i.e. triggers a rebuild if changed TEST_F(BuildWithLogTest, RspFileCmdLineChange) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, @@ -1495,6 +1495,20 @@ TEST_F(BuildTest, InterruptCleanup) { EXPECT_EQ(0, fs_.Stat("out2")); } +TEST_F(BuildTest, StatFailureAbortsBuild) { + const string kTooLongToStat(400, 'i'); + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, +("build " + kTooLongToStat + ": cat " + kTooLongToStat + "\n").c_str())); + // Also cyclic, for good measure. + + // This simulates a stat failure: + fs_.files_[kTooLongToStat].mtime = -1; + + string err; + EXPECT_FALSE(builder_.AddTarget(kTooLongToStat, &err)); + EXPECT_EQ("stat failed", err); +} + TEST_F(BuildTest, PhonyWithNoInputs) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build nonexistent: phony\n" diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc index 05d509c..658fffd 100644 --- a/src/disk_interface_test.cc +++ b/src/disk_interface_test.cc @@ -204,7 +204,9 @@ TEST_F(StatTest, Simple) { "build out: cat in\n")); Node* out = GetNode("out"); - out->Stat(this); + string err; + EXPECT_TRUE(out->Stat(this, &err)); + EXPECT_EQ("", err); ASSERT_EQ(1u, stats_.size()); scan_.RecomputeDirty(out->in_edge(), NULL); ASSERT_EQ(2u, stats_.size()); @@ -218,7 +220,9 @@ TEST_F(StatTest, TwoStep) { "build mid: cat in\n")); Node* out = GetNode("out"); - out->Stat(this); + string err; + EXPECT_TRUE(out->Stat(this, &err)); + EXPECT_EQ("", err); ASSERT_EQ(1u, stats_.size()); scan_.RecomputeDirty(out->in_edge(), NULL); ASSERT_EQ(3u, stats_.size()); @@ -236,7 +240,9 @@ TEST_F(StatTest, Tree) { "build mid2: cat in21 in22\n")); Node* out = GetNode("out"); - out->Stat(this); + string err; + EXPECT_TRUE(out->Stat(this, &err)); + EXPECT_EQ("", err); ASSERT_EQ(1u, stats_.size()); scan_.RecomputeDirty(out->in_edge(), NULL); ASSERT_EQ(1u + 6u, stats_.size()); @@ -255,7 +261,9 @@ TEST_F(StatTest, Middle) { mtimes_["out"] = 1; Node* out = GetNode("out"); - out->Stat(this); + string err; + EXPECT_TRUE(out->Stat(this, &err)); + EXPECT_EQ("", err); ASSERT_EQ(1u, stats_.size()); scan_.RecomputeDirty(out->in_edge(), NULL); ASSERT_FALSE(GetNode("in")->dirty()); diff --git a/src/graph.cc b/src/graph.cc index e3253fd..b19dc85 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -27,10 +27,15 @@ #include "state.h" #include "util.h" -bool Node::Stat(DiskInterface* disk_interface) { +bool Node::Stat(DiskInterface* disk_interface, string* err) { METRIC_RECORD("node stat"); mtime_ = disk_interface->Stat(path_); - return mtime_ > 0; + if (mtime_ == -1) { + // TODO: Let DiskInterface::Stat() take err instead of it calling Error(). + *err = "stat failed"; + return false; + } + return true; } bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { @@ -48,7 +53,8 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { // graphs. for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { - (*o)->StatIfNecessary(disk_interface_); + if (!(*o)->StatIfNecessary(disk_interface_, err)) + return false; } if (!dep_loader_.LoadDeps(edge, err)) { @@ -62,7 +68,9 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { Node* most_recent_input = NULL; for (vector::iterator i = edge->inputs_.begin(); i != edge->inputs_.end(); ++i) { - if ((*i)->StatIfNecessary(disk_interface_)) { + if (!(*i)->status_known()) { + if (!(*i)->StatIfNecessary(disk_interface_, err)) + return false; if (Edge* in_edge = (*i)->in_edge()) { if (!RecomputeDirty(in_edge, err)) return false; @@ -97,7 +105,8 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { // We may also be dirty due to output state: missing outputs, out of // date outputs, etc. Visit all outputs and determine whether they're dirty. if (!dirty) - dirty = RecomputeOutputsDirty(edge, most_recent_input); + if (!RecomputeOutputsDirty(edge, most_recent_input, &dirty, err)) + return false; // Finally, visit each output and update their dirty state if necessary. for (vector::iterator o = edge->outputs_.begin(); @@ -117,16 +126,19 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { return true; } -bool DependencyScan::RecomputeOutputsDirty(Edge* edge, - Node* most_recent_input) { +bool DependencyScan::RecomputeOutputsDirty(Edge* edge, Node* most_recent_input, + bool* outputs_dirty, string* err) { string command = edge->EvaluateCommand(/*incl_rsp_file=*/true); for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { - (*o)->StatIfNecessary(disk_interface_); - if (RecomputeOutputDirty(edge, most_recent_input, command, *o)) + if (!(*o)->StatIfNecessary(disk_interface_, err)) + return false; + if (RecomputeOutputDirty(edge, most_recent_input, command, *o)) { + *outputs_dirty = true; return true; + } } - return false; + return true; } bool DependencyScan::RecomputeOutputDirty(Edge* edge, diff --git a/src/graph.h b/src/graph.h index 9eafc05..9526712 100644 --- a/src/graph.h +++ b/src/graph.h @@ -41,15 +41,14 @@ struct Node { in_edge_(NULL), id_(-1) {} - /// Return true if the file exists (mtime_ got a value). - bool Stat(DiskInterface* disk_interface); + /// Return false on error. + bool Stat(DiskInterface* disk_interface, string* err); - /// Return true if we needed to stat. - bool StatIfNecessary(DiskInterface* disk_interface) { + /// Return false on error. + bool StatIfNecessary(DiskInterface* disk_interface, string* err) { if (status_known()) - return false; - Stat(disk_interface); - return true; + return true; + return Stat(disk_interface, err); } /// Mark as not-yet-stat()ed and not dirty. @@ -236,9 +235,10 @@ struct DependencyScan { /// Returns false on failure. bool RecomputeDirty(Edge* edge, string* err); - /// Recompute whether any output of the edge is dirty. - /// Returns true if so. - bool RecomputeOutputsDirty(Edge* edge, Node* most_recent_input); + /// Recompute whether any output of the edge is dirty, if so sets |*dirty|. + /// Returns false on failure. + bool RecomputeOutputsDirty(Edge* edge, Node* most_recent_input, + bool* dirty, string* err); BuildLog* build_log() const { return build_log_; -- cgit v0.12 From 5560e2e26bd9ed6968ed6971610d68846ead7a86 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 2 Mar 2015 17:11:30 -0800 Subject: Add notes on using afl-fuzz to HACKING. --- HACKING.md | 30 ++++++++++++++++++++++++++++++ misc/afl-fuzz-tokens/kw_build | 1 + misc/afl-fuzz-tokens/kw_default | 1 + misc/afl-fuzz-tokens/kw_include | 1 + misc/afl-fuzz-tokens/kw_pool | 1 + misc/afl-fuzz-tokens/kw_rule | 1 + misc/afl-fuzz-tokens/kw_subninja | 1 + misc/afl-fuzz-tokens/misc_a | 1 + misc/afl-fuzz-tokens/misc_b | 1 + misc/afl-fuzz-tokens/misc_colon | 1 + misc/afl-fuzz-tokens/misc_cont | 1 + misc/afl-fuzz-tokens/misc_dollar | 1 + misc/afl-fuzz-tokens/misc_eq | 1 + misc/afl-fuzz-tokens/misc_indent | 1 + misc/afl-fuzz-tokens/misc_pipe | 1 + misc/afl-fuzz-tokens/misc_pipepipe | 1 + misc/afl-fuzz-tokens/misc_space | 1 + misc/afl-fuzz/build.ninja | 5 +++++ 18 files changed, 51 insertions(+) create mode 100644 misc/afl-fuzz-tokens/kw_build create mode 100644 misc/afl-fuzz-tokens/kw_default create mode 100644 misc/afl-fuzz-tokens/kw_include create mode 100644 misc/afl-fuzz-tokens/kw_pool create mode 100644 misc/afl-fuzz-tokens/kw_rule create mode 100644 misc/afl-fuzz-tokens/kw_subninja create mode 100644 misc/afl-fuzz-tokens/misc_a create mode 100644 misc/afl-fuzz-tokens/misc_b create mode 100644 misc/afl-fuzz-tokens/misc_colon create mode 100644 misc/afl-fuzz-tokens/misc_cont create mode 100644 misc/afl-fuzz-tokens/misc_dollar create mode 100644 misc/afl-fuzz-tokens/misc_eq create mode 100644 misc/afl-fuzz-tokens/misc_indent create mode 100644 misc/afl-fuzz-tokens/misc_pipe create mode 100644 misc/afl-fuzz-tokens/misc_pipepipe create mode 100644 misc/afl-fuzz-tokens/misc_space create mode 100644 misc/afl-fuzz/build.ninja diff --git a/HACKING.md b/HACKING.md index e3940ff..9c6830f 100644 --- a/HACKING.md +++ b/HACKING.md @@ -177,3 +177,33 @@ root directory: gcov build/*.o Look at the generated `.gcov` files directly, or use your favorit gcov viewer. + +### Using afl-fuzz + +Build with afl-clang++: + + CXX=path/to/afl-1.20b/afl-clang++ ./configure.py + ninja + +Then run afl-fuzz like so: + + afl-fuzz -i misc/afl-fuzz -o /tmp/afl-fuzz-out ./ninja -n -f @@ + +You can pass `-x misc/afl-fuzz-tokens` to use the token dictionary. In my +testing, that did not seem more effective though. + +#### Using afl-fuzz with asan + +If you want to use asan (the `isysroot` bit is only needed on OS X; if clang +can't find C++ standard headers make sure your LLVM checkout includes a libc++ +checkout and has libc++ installed in the build directory): + + CFLAGS="-fsanitize=address -isysroot $(xcrun -show-sdk-path)" \ + LDFLAGS=-fsanitize=address CXX=path/to/afl-1.20b/afl-clang++ \ + ./configure.py + AFL_CXX=path/to/clang++ ninja + +Make sure ninja can find the asan runtime: + + DYLD_LIBRARY_PATH=path/to//lib/clang/3.7.0/lib/darwin/ \ + afl-fuzz -i misc/afl-fuzz -o /tmp/afl-fuzz-out ./ninja -n -f @@ diff --git a/misc/afl-fuzz-tokens/kw_build b/misc/afl-fuzz-tokens/kw_build new file mode 100644 index 0000000..c795b05 --- /dev/null +++ b/misc/afl-fuzz-tokens/kw_build @@ -0,0 +1 @@ +build \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/kw_default b/misc/afl-fuzz-tokens/kw_default new file mode 100644 index 0000000..331d858 --- /dev/null +++ b/misc/afl-fuzz-tokens/kw_default @@ -0,0 +1 @@ +default \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/kw_include b/misc/afl-fuzz-tokens/kw_include new file mode 100644 index 0000000..2996fba --- /dev/null +++ b/misc/afl-fuzz-tokens/kw_include @@ -0,0 +1 @@ +include \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/kw_pool b/misc/afl-fuzz-tokens/kw_pool new file mode 100644 index 0000000..e783591 --- /dev/null +++ b/misc/afl-fuzz-tokens/kw_pool @@ -0,0 +1 @@ +pool \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/kw_rule b/misc/afl-fuzz-tokens/kw_rule new file mode 100644 index 0000000..841e840 --- /dev/null +++ b/misc/afl-fuzz-tokens/kw_rule @@ -0,0 +1 @@ +rule \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/kw_subninja b/misc/afl-fuzz-tokens/kw_subninja new file mode 100644 index 0000000..c4fe0c7 --- /dev/null +++ b/misc/afl-fuzz-tokens/kw_subninja @@ -0,0 +1 @@ +subninja \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/misc_a b/misc/afl-fuzz-tokens/misc_a new file mode 100644 index 0000000..2e65efe --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_a @@ -0,0 +1 @@ +a \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/misc_b b/misc/afl-fuzz-tokens/misc_b new file mode 100644 index 0000000..63d8dbd --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_b @@ -0,0 +1 @@ +b \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/misc_colon b/misc/afl-fuzz-tokens/misc_colon new file mode 100644 index 0000000..22ded55 --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_colon @@ -0,0 +1 @@ +: \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/misc_cont b/misc/afl-fuzz-tokens/misc_cont new file mode 100644 index 0000000..857f13a --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_cont @@ -0,0 +1 @@ +$ diff --git a/misc/afl-fuzz-tokens/misc_dollar b/misc/afl-fuzz-tokens/misc_dollar new file mode 100644 index 0000000..6f4f765 --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_dollar @@ -0,0 +1 @@ +$ \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/misc_eq b/misc/afl-fuzz-tokens/misc_eq new file mode 100644 index 0000000..851c75c --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_eq @@ -0,0 +1 @@ += \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/misc_indent b/misc/afl-fuzz-tokens/misc_indent new file mode 100644 index 0000000..136d063 --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_indent @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/misc_pipe b/misc/afl-fuzz-tokens/misc_pipe new file mode 100644 index 0000000..a3871d4 --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_pipe @@ -0,0 +1 @@ +| \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/misc_pipepipe b/misc/afl-fuzz-tokens/misc_pipepipe new file mode 100644 index 0000000..27cc728 --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_pipepipe @@ -0,0 +1 @@ +|| \ No newline at end of file diff --git a/misc/afl-fuzz-tokens/misc_space b/misc/afl-fuzz-tokens/misc_space new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/misc/afl-fuzz-tokens/misc_space @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/misc/afl-fuzz/build.ninja b/misc/afl-fuzz/build.ninja new file mode 100644 index 0000000..52cd2f1 --- /dev/null +++ b/misc/afl-fuzz/build.ninja @@ -0,0 +1,5 @@ +rule b + command = clang -MMD -MF $out.d -o $out -c $in + description = building $out + +build a.o: b a.c -- cgit v0.12 From f5c5789aad8001e15a7e4f1ee0dee0c2b688e993 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 19 Mar 2015 16:42:06 -0700 Subject: Another crash fix for duplicate edges. Fixes #939. Patch #933 fixed a crash with duplicate edges by not adding edges to the graph if all the edge's outputs are already built by other edges. However, it added the edge to the out_edges of the edge's input nodes before deleting it, letting inputs refer to dead edges. To fix, move the check for deleting an edge above the code that adds inputs. Expand VerifyGraph() to check that nodes don't refer to edges that aren't present in the state. --- src/manifest_parser.cc | 24 ++++++++++++------------ src/manifest_parser_test.cc | 11 +++++++++++ src/state.cc | 1 + src/test.cc | 16 ++++++++++++++-- 4 files changed, 38 insertions(+), 14 deletions(-) diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index f0457dd..4e639d1 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -321,14 +321,6 @@ bool ManifestParser::ParseEdge(string* err) { edge->pool_ = pool; } - for (vector::iterator i = ins.begin(); i != ins.end(); ++i) { - string path = i->Evaluate(env); - string path_err; - unsigned int slash_bits; - if (!CanonicalizePath(&path, &slash_bits, &path_err)) - return lexer_.Error(path_err, err); - state_->AddIn(edge, path, slash_bits); - } for (vector::iterator i = outs.begin(); i != outs.end(); ++i) { string path = i->Evaluate(env); string path_err; @@ -337,17 +329,25 @@ bool ManifestParser::ParseEdge(string* err) { return lexer_.Error(path_err, err); state_->AddOut(edge, path, slash_bits); } - edge->implicit_deps_ = implicit; - edge->order_only_deps_ = order_only; - if (edge->outputs_.empty()) { // All outputs of the edge are already created by other edges. Don't add - // this edge. + // this edge. Do this check before input nodes are connected to the edge. state_->edges_.pop_back(); delete edge; return true; } + for (vector::iterator i = ins.begin(); i != ins.end(); ++i) { + string path = i->Evaluate(env); + string path_err; + unsigned int slash_bits; + if (!CanonicalizePath(&path, &slash_bits, &path_err)) + return lexer_.Error(path_err, err); + state_->AddIn(edge, path, slash_bits); + } + edge->implicit_deps_ = implicit; + edge->order_only_deps_ = order_only; + // Multiple outputs aren't (yet?) supported with depslog. string deps_type = edge->GetBinding("deps"); if (!deps_type.empty() && edge->outputs_.size() > 1) { diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc index 7e38fc6..7e72b34 100644 --- a/src/manifest_parser_test.cc +++ b/src/manifest_parser_test.cc @@ -353,6 +353,17 @@ TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputs) { // That's all the checking that this test needs. } +TEST_F(ParserTest, NoDeadPointerFromDuplicateEdge) { + ASSERT_NO_FATAL_FAILURE(AssertParse( +"rule cat\n" +" command = cat $in > $out\n" +"build out: cat in\n" +"build out: cat in\n" +)); + // AssertParse() checks that the generated build graph is self-consistent. + // That's all the checking that this test needs. +} + TEST_F(ParserTest, ReservedWords) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule build\n" diff --git a/src/state.cc b/src/state.cc index 18c0c8c..0426b85 100644 --- a/src/state.cc +++ b/src/state.cc @@ -61,6 +61,7 @@ void Pool::Dump() const { } } +// static bool Pool::WeightedEdgeCmp(const Edge* a, const Edge* b) { if (!a) return b; if (!b) return false; diff --git a/src/test.cc b/src/test.cc index 76b8416..ddecd3d 100644 --- a/src/test.cc +++ b/src/test.cc @@ -111,19 +111,31 @@ void VerifyGraph(const State& state) { e != state.edges_.end(); ++e) { // All edges need at least one output. EXPECT_FALSE((*e)->outputs_.empty()); - // Check that the edge's inputs have the edge as out edge. + // Check that the edge's inputs have the edge as out-edge. for (vector::const_iterator in_node = (*e)->inputs_.begin(); in_node != (*e)->inputs_.end(); ++in_node) { const vector& out_edges = (*in_node)->out_edges(); EXPECT_NE(std::find(out_edges.begin(), out_edges.end(), *e), out_edges.end()); } - // Check that the edge's outputs have the edge as in edge. + // Check that the edge's outputs have the edge as in-edge. for (vector::const_iterator out_node = (*e)->outputs_.begin(); out_node != (*e)->outputs_.end(); ++out_node) { EXPECT_EQ((*out_node)->in_edge(), *e); } } + + // The union of all in- and out-edges of each nodes should be exactly edges_. + set node_edge_set; + for (State::Paths::const_iterator p = state.paths_.begin(); + p != state.paths_.end(); ++p) { + const Node* n = p->second; + if (n->in_edge()) + node_edge_set.insert(n->in_edge()); + node_edge_set.insert(n->out_edges().begin(), n->out_edges().end()); + } + set edge_set(state.edges_.begin(), state.edges_.end()); + EXPECT_EQ(node_edge_set, edge_set); } void VirtualFileSystem::Create(const string& path, -- cgit v0.12 From eb7167d456b8ef2dad3846ca2ba6438b060518c9 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 18 Mar 2015 16:54:16 -0400 Subject: Don't crash on cyclic references between rule bindings. Fixes #902. This dynamically detects cycles. I like this approach less than detecting them statically when parsing rules [1], but it has the advantage that it doesn't break existing ninja manifest files. It has the disadvantage that it slows down manifest_parser_perftest by 3.9%. 1: https://github.com/martine/ninja/commit/cc6f54d6d436047 --- src/graph.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/graph.cc b/src/graph.cc index b19dc85..41055ec 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -226,6 +226,7 @@ struct EdgeEnv : public Env { vector::iterator end, char sep); + vector lookups_; Edge* edge_; EscapeKind escape_in_out_; }; @@ -243,8 +244,19 @@ string EdgeEnv::LookupVariable(const string& var) { ' '); } + vector::const_iterator it; + if ((it = find(lookups_.begin(), lookups_.end(), var)) != lookups_.end()) { + string cycle; + for (; it != lookups_.end(); ++it) + cycle.append(*it + " -> "); + cycle.append(var); + Fatal(("cycle in rule variables: " + cycle).c_str()); + } + // See notes on BindingEnv::LookupWithFallback. const EvalString* eval = edge_->rule_->GetBinding(var); + if (eval) + lookups_.push_back(var); return edge_->env_->LookupWithFallback(var, eval, this); } -- cgit v0.12 From 4d44291739222e453421a237ff77d9e5daaa3dd4 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Fri, 20 Mar 2015 21:11:31 -0700 Subject: Recover slowdown for cyclic rule bindings fix. --- src/graph.cc | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/graph.cc b/src/graph.cc index 41055ec..d99c8bd 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -217,7 +217,7 @@ struct EdgeEnv : public Env { enum EscapeKind { kShellEscape, kDoNotEscape }; EdgeEnv(Edge* edge, EscapeKind escape) - : edge_(edge), escape_in_out_(escape) {} + : edge_(edge), escape_in_out_(escape), recursive_(false) {} virtual string LookupVariable(const string& var); /// Given a span of Nodes, construct a list of paths suitable for a command @@ -226,9 +226,11 @@ struct EdgeEnv : public Env { vector::iterator end, char sep); + private: vector lookups_; Edge* edge_; EscapeKind escape_in_out_; + bool recursive_; }; string EdgeEnv::LookupVariable(const string& var) { @@ -244,19 +246,25 @@ string EdgeEnv::LookupVariable(const string& var) { ' '); } - vector::const_iterator it; - if ((it = find(lookups_.begin(), lookups_.end(), var)) != lookups_.end()) { - string cycle; - for (; it != lookups_.end(); ++it) - cycle.append(*it + " -> "); - cycle.append(var); - Fatal(("cycle in rule variables: " + cycle).c_str()); + if (recursive_) { + vector::const_iterator it; + if ((it = find(lookups_.begin(), lookups_.end(), var)) != lookups_.end()) { + string cycle; + for (; it != lookups_.end(); ++it) + cycle.append(*it + " -> "); + cycle.append(var); + Fatal(("cycle in rule variables: " + cycle).c_str()); + } } // See notes on BindingEnv::LookupWithFallback. const EvalString* eval = edge_->rule_->GetBinding(var); - if (eval) + if (recursive_ && eval) lookups_.push_back(var); + + // In practice, variables defined on rules never use another rule variable. + // For performance, only start checking for cycles after the first lookup. + recursive_ = true; return edge_->env_->LookupWithFallback(var, eval, this); } -- cgit v0.12 From 9aab00003c62f8d6b8142e6ecfe8f0aeefc81f74 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Fri, 20 Mar 2015 21:21:14 -0700 Subject: Preallocate edge node vectors. ~1% faster. --- src/manifest_parser.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index 4e639d1..b747ad4 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -321,6 +321,7 @@ bool ManifestParser::ParseEdge(string* err) { edge->pool_ = pool; } + edge->outputs_.reserve(outs.size()); for (vector::iterator i = outs.begin(); i != outs.end(); ++i) { string path = i->Evaluate(env); string path_err; @@ -337,6 +338,7 @@ bool ManifestParser::ParseEdge(string* err) { return true; } + edge->inputs_.reserve(ins.size()); for (vector::iterator i = ins.begin(); i != ins.end(); ++i) { string path = i->Evaluate(env); string path_err; -- cgit v0.12 From 96d873e6efb5da29a683ff48294c2216a52e34c1 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 23 Mar 2015 18:16:36 -0700 Subject: Move warning emission on dupe edges from State to ManifestParser. This will make it easier to optionally make this an error (because ManifestParser has a way of printing errors), and it'll also make it easier to make the tests quiet again. No behavior change. --- src/manifest_parser.cc | 7 ++++++- src/state.cc | 12 ++++-------- src/state.h | 2 +- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index b747ad4..ede5adb 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -328,7 +328,12 @@ bool ManifestParser::ParseEdge(string* err) { unsigned int slash_bits; if (!CanonicalizePath(&path, &slash_bits, &path_err)) return lexer_.Error(path_err, err); - state_->AddOut(edge, path, slash_bits); + if (!state_->AddOut(edge, path, slash_bits)) { + Warning("multiple rules generate %s. " + "builds involving this target will not be correct; " + "continuing anyway", + path.c_str()); + } } if (edge->outputs_.empty()) { // All outputs of the edge are already created by other edges. Don't add diff --git a/src/state.cc b/src/state.cc index 0426b85..a70f211 100644 --- a/src/state.cc +++ b/src/state.cc @@ -140,17 +140,13 @@ void State::AddIn(Edge* edge, StringPiece path, unsigned int slash_bits) { node->AddOutEdge(edge); } -void State::AddOut(Edge* edge, StringPiece path, unsigned int slash_bits) { +bool State::AddOut(Edge* edge, StringPiece path, unsigned int slash_bits) { Node* node = GetNode(path, slash_bits); - if (node->in_edge()) { - Warning("multiple rules generate %s. " - "builds involving this target will not be correct; " - "continuing anyway", - path.AsString().c_str()); - return; - } + if (node->in_edge()) + return false; edge->outputs_.push_back(node); node->set_in_edge(edge); + return true; } bool State::AddDefault(StringPiece path, string* err) { diff --git a/src/state.h b/src/state.h index db0e3aa..5000138 100644 --- a/src/state.h +++ b/src/state.h @@ -97,7 +97,7 @@ struct State { Node* SpellcheckNode(const string& path); void AddIn(Edge* edge, StringPiece path, unsigned int slash_bits); - void AddOut(Edge* edge, StringPiece path, unsigned int slash_bits); + bool AddOut(Edge* edge, StringPiece path, unsigned int slash_bits); bool AddDefault(StringPiece path, string* error); /// Reset state. Keeps all nodes and edges, but restores them to the -- cgit v0.12 From d209733d66b5e537e2522891c51d34b5262b4de9 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 23 Mar 2015 18:18:01 -0700 Subject: Make tests quiet again. --- src/manifest_parser.cc | 12 +++++++----- src/manifest_parser.h | 2 ++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index ede5adb..638d751 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -25,7 +25,7 @@ #include "version.h" ManifestParser::ManifestParser(State* state, FileReader* file_reader) - : state_(state), file_reader_(file_reader) { + : state_(state), file_reader_(file_reader), quiet_(false) { env_ = &state->bindings_; } @@ -329,10 +329,12 @@ bool ManifestParser::ParseEdge(string* err) { if (!CanonicalizePath(&path, &slash_bits, &path_err)) return lexer_.Error(path_err, err); if (!state_->AddOut(edge, path, slash_bits)) { - Warning("multiple rules generate %s. " - "builds involving this target will not be correct; " - "continuing anyway", - path.c_str()); + if (!quiet_) { + Warning("multiple rules generate %s. " + "builds involving this target will not be correct; " + "continuing anyway", + path.c_str()); + } } } if (edge->outputs_.empty()) { diff --git a/src/manifest_parser.h b/src/manifest_parser.h index 5212f72..00a711d 100644 --- a/src/manifest_parser.h +++ b/src/manifest_parser.h @@ -39,6 +39,7 @@ struct ManifestParser { /// Parse a text string of input. Used by tests. bool ParseTest(const string& input, string* err) { + quiet_ = true; return Parse("input", input, err); } @@ -64,6 +65,7 @@ private: BindingEnv* env_; FileReader* file_reader_; Lexer lexer_; + bool quiet_; }; #endif // NINJA_MANIFEST_PARSER_H_ -- cgit v0.12 From 6bac2fba25df89df748c44de1d9d3b9f546d3aac Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Tue, 24 Mar 2015 13:37:11 -0700 Subject: Swap order of -k and -l in help output, to keep the list alphabetized. --- src/ninja.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ninja.cc b/src/ninja.cc index 48c7239..4987bb2 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -192,8 +192,8 @@ void Usage(const BuildConfig& config) { " -f FILE specify input build file [default=build.ninja]\n" "\n" " -j N run N jobs in parallel [default=%d, derived from CPUs available]\n" -" -l N do not start new jobs if the load average is greater than N\n" " -k N keep going until N jobs fail [default=1]\n" +" -l N do not start new jobs if the load average is greater than N\n" " -n dry run (don't run commands but act like they succeeded)\n" " -v show all command lines while building\n" "\n" -- cgit v0.12 From 92b74c340ab6397cb728391995f86dfdb39bb186 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Tue, 24 Mar 2015 14:16:07 -0700 Subject: Add an opt-in flag to make duplicate edges an error (`-w dupbuild=err`). This is step 1 on #931. Duplicated edges will become an error by default in the future. --- src/manifest_parser.cc | 14 ++++++++++---- src/manifest_parser.h | 6 ++++-- src/manifest_parser_test.cc | 13 +++++++++++++ src/ninja.cc | 41 ++++++++++++++++++++++++++++++++++++++--- 4 files changed, 65 insertions(+), 9 deletions(-) diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index 638d751..e8c0436 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -24,8 +24,10 @@ #include "util.h" #include "version.h" -ManifestParser::ManifestParser(State* state, FileReader* file_reader) - : state_(state), file_reader_(file_reader), quiet_(false) { +ManifestParser::ManifestParser(State* state, FileReader* file_reader, + bool dupe_edge_should_err) + : state_(state), file_reader_(file_reader), + dupe_edge_should_err_(dupe_edge_should_err), quiet_(false) { env_ = &state->bindings_; } @@ -329,10 +331,14 @@ bool ManifestParser::ParseEdge(string* err) { if (!CanonicalizePath(&path, &slash_bits, &path_err)) return lexer_.Error(path_err, err); if (!state_->AddOut(edge, path, slash_bits)) { - if (!quiet_) { + if (dupe_edge_should_err_) { + lexer_.Error("multiple rules generate " + path + " [-w dupbuild=err]", + err); + return false; + } else if (!quiet_) { Warning("multiple rules generate %s. " "builds involving this target will not be correct; " - "continuing anyway", + "continuing anyway [-w dupbuild=warn]", path.c_str()); } } diff --git a/src/manifest_parser.h b/src/manifest_parser.h index 00a711d..f72cd6f 100644 --- a/src/manifest_parser.h +++ b/src/manifest_parser.h @@ -32,10 +32,11 @@ struct ManifestParser { virtual bool ReadFile(const string& path, string* content, string* err) = 0; }; - ManifestParser(State* state, FileReader* file_reader); + ManifestParser(State* state, FileReader* file_reader, + bool dupe_edge_should_err = false); /// Load and parse a file. - bool Load(const string& filename, string* err, Lexer* parent=NULL); + bool Load(const string& filename, string* err, Lexer* parent = NULL); /// Parse a text string of input. Used by tests. bool ParseTest(const string& input, string* err) { @@ -65,6 +66,7 @@ private: BindingEnv* env_; FileReader* file_reader_; Lexer lexer_; + bool dupe_edge_should_err_; bool quiet_; }; diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc index 7e72b34..8f7b575 100644 --- a/src/manifest_parser_test.cc +++ b/src/manifest_parser_test.cc @@ -364,6 +364,19 @@ TEST_F(ParserTest, NoDeadPointerFromDuplicateEdge) { // That's all the checking that this test needs. } +TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputsError) { + const char kInput[] = +"rule cat\n" +" command = cat $in > $out\n" +"build out1 out2: cat in1\n" +"build out1: cat in2\n" +"build final: cat out1\n"; + ManifestParser parser(&state, this, /*dupe_edges_should_err=*/true); + string err; + EXPECT_FALSE(parser.ParseTest(kInput, &err)); + EXPECT_EQ("input:5: multiple rules generate out1 [-w dupbuild=err]\n", err); +} + TEST_F(ParserTest, ReservedWords) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule build\n" diff --git a/src/ninja.cc b/src/ninja.cc index 4987bb2..1e43137 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -64,6 +64,9 @@ struct Options { /// Tool to run rather than building. const Tool* tool; + + /// Whether duplicate rules for one target should warn or print an error. + bool dupe_edges_should_err; }; /// The Ninja main() loads up a series of data structures; various tools need @@ -199,7 +202,8 @@ void Usage(const BuildConfig& config) { "\n" " -d MODE enable debugging (use -d list to list modes)\n" " -t TOOL run a subtool (use -t list to list subtools)\n" -" terminates toplevel options; further flags are passed to the tool\n", +" terminates toplevel options; further flags are passed to the tool\n" +" -w FLAG adjust warnings (use -w list to list warnings)\n", kNinjaVersion, config.parallelism); } @@ -792,6 +796,32 @@ bool DebugEnable(const string& name) { } } +/// Set a warning flag. Returns false if Ninja should exit instead of +/// continuing. +bool WarningEnable(const string& name, Options* options) { + if (name == "list") { + printf("warning flags:\n" +" dupbuild={err,warn} multiple build lines for one target\n"); + return false; + } else if (name == "dupbuild=err") { + options->dupe_edges_should_err = true; + return true; + } else if (name == "dupbuild=warn") { + options->dupe_edges_should_err = false; + return true; + } else { + const char* suggestion = + SpellcheckString(name.c_str(), "dupbuild=err", "dupbuild=warn", NULL); + if (suggestion) { + Error("unknown warning flag '%s', did you mean '%s'?", + name.c_str(), suggestion); + } else { + Error("unknown warning flag '%s'", name.c_str()); + } + return false; + } +} + bool NinjaMain::OpenBuildLog(bool recompact_only) { string log_path = ".ninja_log"; if (!build_dir_.empty()) @@ -962,7 +992,7 @@ int ReadFlags(int* argc, char*** argv, int opt; while (!options->tool && - (opt = getopt_long(*argc, *argv, "d:f:j:k:l:nt:vC:h", kLongOptions, + (opt = getopt_long(*argc, *argv, "d:f:j:k:l:nt:vw:C:h", kLongOptions, NULL)) != -1) { switch (opt) { case 'd': @@ -1011,6 +1041,10 @@ int ReadFlags(int* argc, char*** argv, case 'v': config->verbosity = BuildConfig::VERBOSE; break; + case 'w': + if (!WarningEnable(optarg, options)) + return 1; + break; case 'C': options->working_dir = optarg; break; @@ -1067,7 +1101,8 @@ int real_main(int argc, char** argv) { NinjaMain ninja(ninja_command, config); RealFileReader file_reader; - ManifestParser parser(&ninja.state_, &file_reader); + ManifestParser parser(&ninja.state_, &file_reader, + options.dupe_edges_should_err); string err; if (!parser.Load(options.input_file, &err)) { Error("%s", err.c_str()); -- cgit v0.12 From 0cfa7ce14d28bef390070b321fcccca579ba2559 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 25 Mar 2015 12:19:14 -0700 Subject: make notes on how to update docs more detailed --- RELEASING | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/RELEASING b/RELEASING index fd4affb..4d08253 100644 --- a/RELEASING +++ b/RELEASING @@ -23,6 +23,9 @@ Make announcement on mailing list: 1. copy old mail Update website: -(note to self: website is now in github.com/martine/martine.github.io) -1. rebuild manual, put in place on website -2. update home page mention of latest version. +1. Make sure your ninja checkout is on the v1.5.0 tag +2. Clone https://github.com/martine/martine.github.io +3. In that repo, `cd ninja && ./update-docs.sh` +4. Update index.html with newest version and link to release notes +5. git commit -m 'run update-docs.sh, 1.5.0 release' +6. git push origin master -- cgit v0.12 From 3beebde51a2089ecb01820f1428efe0263deaeea Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Tue, 31 Mar 2015 08:12:12 -0700 Subject: Let Stat() have an err outparam instead of writing to stderr. Also check for Stat() failure in a few more places. This way, ninja doesn't print two "ninja: error: " lines if stat() fails during a build. It also makes it easier to keep the stat tests quiet. Every caller of Stat() needs to explicitly log the error string if that's desired. --- src/build.cc | 27 +++++++----- src/build_test.cc | 9 ++-- src/clean.cc | 6 ++- src/clean_test.cc | 85 ++++++++++++++++++++----------------- src/disk_interface.cc | 50 ++++++++++------------ src/disk_interface.h | 11 ++--- src/disk_interface_test.cc | 93 +++++++++++++++++++++++++++-------------- src/graph.cc | 8 +--- src/manifest_parser_perftest.cc | 19 +++++---- src/ninja.cc | 13 +++++- src/test.cc | 6 ++- src/test.h | 3 +- 12 files changed, 192 insertions(+), 138 deletions(-) diff --git a/src/build.cc b/src/build.cc index 1e10c7c..d898929 100644 --- a/src/build.cc +++ b/src/build.cc @@ -560,10 +560,12 @@ void Builder::Cleanup() { // need to rebuild an output because of a modified header file // mentioned in a depfile, and the command touches its depfile // but is interrupted before it touches its output file.) - if (!depfile.empty() || - (*o)->mtime() != disk_interface_->Stat((*o)->path())) { + string err; + TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), &err); + if (new_mtime == -1) // Log and ignore Stat() errors. + Error("%s", err.c_str()); + if (!depfile.empty() || (*o)->mtime() != new_mtime) disk_interface_->RemoveFile((*o)->path()); - } } if (!depfile.empty()) disk_interface_->RemoveFile(depfile); @@ -760,7 +762,9 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { - TimeStamp new_mtime = disk_interface_->Stat((*o)->path()); + TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err); + if (new_mtime == -1) + return false; if ((*o)->mtime() == new_mtime) { // The rule command did not change the output. Propagate the clean // state through the build graph. @@ -776,14 +780,18 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { // (existing) non-order-only input or the depfile. for (vector::iterator i = edge->inputs_.begin(); i != edge->inputs_.end() - edge->order_only_deps_; ++i) { - TimeStamp input_mtime = disk_interface_->Stat((*i)->path()); + TimeStamp input_mtime = disk_interface_->Stat((*i)->path(), err); + if (input_mtime == -1) + return false; if (input_mtime > restat_mtime) restat_mtime = input_mtime; } string depfile = edge->GetUnescapedDepfile(); if (restat_mtime != 0 && deps_type.empty() && !depfile.empty()) { - TimeStamp depfile_mtime = disk_interface_->Stat(depfile); + TimeStamp depfile_mtime = disk_interface_->Stat(depfile, err); + if (depfile_mtime == -1) + return false; if (depfile_mtime > restat_mtime) restat_mtime = depfile_mtime; } @@ -812,12 +820,9 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { if (!deps_type.empty() && !config_.dry_run) { assert(edge->outputs_.size() == 1 && "should have been rejected by parser"); Node* out = edge->outputs_[0]; - TimeStamp deps_mtime = disk_interface_->Stat(out->path()); - if (deps_mtime == -1) { - // TODO: Let DiskInterface::Stat() take err instead of it calling Error(). - *err = "stat failed"; + TimeStamp deps_mtime = disk_interface_->Stat(out->path(), err); + if (deps_mtime == -1) return false; - } if (!scan_.deps_log()->RecordDeps(out, deps_mtime, deps_nodes)) { *err = string("Error writing to deps log: ") + strerror(errno); return false; diff --git a/src/build_test.cc b/src/build_test.cc index 0cdcd87..13d1e7e 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -1483,7 +1483,7 @@ TEST_F(BuildTest, InterruptCleanup) { EXPECT_FALSE(builder_.Build(&err)); EXPECT_EQ("interrupted by user", err); builder_.Cleanup(); - EXPECT_GT(fs_.Stat("out1"), 0); + EXPECT_GT(fs_.Stat("out1", &err), 0); err = ""; // A touched output of an interrupted command should be deleted. @@ -1492,7 +1492,7 @@ TEST_F(BuildTest, InterruptCleanup) { EXPECT_FALSE(builder_.Build(&err)); EXPECT_EQ("interrupted by user", err); builder_.Cleanup(); - EXPECT_EQ(0, fs_.Stat("out2")); + EXPECT_EQ(0, fs_.Stat("out2", &err)); } TEST_F(BuildTest, StatFailureAbortsBuild) { @@ -1503,6 +1503,7 @@ TEST_F(BuildTest, StatFailureAbortsBuild) { // This simulates a stat failure: fs_.files_[kTooLongToStat].mtime = -1; + fs_.files_[kTooLongToStat].stat_error = "stat failed"; string err; EXPECT_FALSE(builder_.AddTarget(kTooLongToStat, &err)); @@ -1626,7 +1627,7 @@ TEST_F(BuildWithDepsLogTest, Straightforward) { EXPECT_EQ("", err); // The deps file should have been removed. - EXPECT_EQ(0, fs_.Stat("in1.d")); + EXPECT_EQ(0, fs_.Stat("in1.d", &err)); // Recreate it for the next step. fs_.Create("in1.d", "out: in2"); deps_log.Close(); @@ -1706,7 +1707,7 @@ TEST_F(BuildWithDepsLogTest, ObsoleteDeps) { fs_.Create("out", ""); // The deps file should have been removed, so no need to timestamp it. - EXPECT_EQ(0, fs_.Stat("in1.d")); + EXPECT_EQ(0, fs_.Stat("in1.d", &err)); { State state; diff --git a/src/clean.cc b/src/clean.cc index 7b044c5..1d6ba9e 100644 --- a/src/clean.cc +++ b/src/clean.cc @@ -49,7 +49,11 @@ int Cleaner::RemoveFile(const string& path) { } bool Cleaner::FileExists(const string& path) { - return disk_interface_->Stat(path) > 0; + string err; + TimeStamp mtime = disk_interface_->Stat(path, &err); + if (mtime == -1) + Error("%s", err.c_str()); + return mtime > 0; // Treat Stat() errors as "file does not exist". } void Cleaner::Report(const string& path) { diff --git a/src/clean_test.cc b/src/clean_test.cc index 5869bbb..395343b 100644 --- a/src/clean_test.cc +++ b/src/clean_test.cc @@ -44,10 +44,11 @@ TEST_F(CleanTest, CleanAll) { EXPECT_EQ(4u, fs_.files_removed_.size()); // Check they are removed. - EXPECT_EQ(0, fs_.Stat("in1")); - EXPECT_EQ(0, fs_.Stat("out1")); - EXPECT_EQ(0, fs_.Stat("in2")); - EXPECT_EQ(0, fs_.Stat("out2")); + string err; + EXPECT_EQ(0, fs_.Stat("in1", &err)); + EXPECT_EQ(0, fs_.Stat("out1", &err)); + EXPECT_EQ(0, fs_.Stat("in2", &err)); + EXPECT_EQ(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); EXPECT_EQ(0, cleaner.CleanAll()); @@ -75,10 +76,11 @@ TEST_F(CleanTest, CleanAllDryRun) { EXPECT_EQ(0u, fs_.files_removed_.size()); // Check they are not removed. - EXPECT_NE(0, fs_.Stat("in1")); - EXPECT_NE(0, fs_.Stat("out1")); - EXPECT_NE(0, fs_.Stat("in2")); - EXPECT_NE(0, fs_.Stat("out2")); + string err; + EXPECT_LT(0, fs_.Stat("in1", &err)); + EXPECT_LT(0, fs_.Stat("out1", &err)); + EXPECT_LT(0, fs_.Stat("in2", &err)); + EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); EXPECT_EQ(0, cleaner.CleanAll()); @@ -105,10 +107,11 @@ TEST_F(CleanTest, CleanTarget) { EXPECT_EQ(2u, fs_.files_removed_.size()); // Check they are removed. - EXPECT_EQ(0, fs_.Stat("in1")); - EXPECT_EQ(0, fs_.Stat("out1")); - EXPECT_NE(0, fs_.Stat("in2")); - EXPECT_NE(0, fs_.Stat("out2")); + string err; + EXPECT_EQ(0, fs_.Stat("in1", &err)); + EXPECT_EQ(0, fs_.Stat("out1", &err)); + EXPECT_LT(0, fs_.Stat("in2", &err)); + EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); ASSERT_EQ(0, cleaner.CleanTarget("out1")); @@ -135,11 +138,12 @@ TEST_F(CleanTest, CleanTargetDryRun) { EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); - // Check they are removed. - EXPECT_NE(0, fs_.Stat("in1")); - EXPECT_NE(0, fs_.Stat("out1")); - EXPECT_NE(0, fs_.Stat("in2")); - EXPECT_NE(0, fs_.Stat("out2")); + // Check they are not removed. + string err; + EXPECT_LT(0, fs_.Stat("in1", &err)); + EXPECT_LT(0, fs_.Stat("out1", &err)); + EXPECT_LT(0, fs_.Stat("in2", &err)); + EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); ASSERT_EQ(0, cleaner.CleanTarget("out1")); @@ -168,10 +172,11 @@ TEST_F(CleanTest, CleanRule) { EXPECT_EQ(2u, fs_.files_removed_.size()); // Check they are removed. - EXPECT_EQ(0, fs_.Stat("in1")); - EXPECT_NE(0, fs_.Stat("out1")); - EXPECT_EQ(0, fs_.Stat("in2")); - EXPECT_NE(0, fs_.Stat("out2")); + string err; + EXPECT_EQ(0, fs_.Stat("in1", &err)); + EXPECT_LT(0, fs_.Stat("out1", &err)); + EXPECT_EQ(0, fs_.Stat("in2", &err)); + EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); ASSERT_EQ(0, cleaner.CleanRule("cat_e")); @@ -200,11 +205,12 @@ TEST_F(CleanTest, CleanRuleDryRun) { EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); - // Check they are removed. - EXPECT_NE(0, fs_.Stat("in1")); - EXPECT_NE(0, fs_.Stat("out1")); - EXPECT_NE(0, fs_.Stat("in2")); - EXPECT_NE(0, fs_.Stat("out2")); + // Check they are not removed. + string err; + EXPECT_LT(0, fs_.Stat("in1", &err)); + EXPECT_LT(0, fs_.Stat("out1", &err)); + EXPECT_LT(0, fs_.Stat("in2", &err)); + EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); ASSERT_EQ(0, cleaner.CleanRule("cat_e")); @@ -328,12 +334,13 @@ TEST_F(CleanTest, CleanRsp) { EXPECT_EQ(6u, fs_.files_removed_.size()); // Check they are removed. - EXPECT_EQ(0, fs_.Stat("in1")); - EXPECT_EQ(0, fs_.Stat("out1")); - EXPECT_EQ(0, fs_.Stat("in2")); - EXPECT_EQ(0, fs_.Stat("out2")); - EXPECT_EQ(0, fs_.Stat("in2.rsp")); - EXPECT_EQ(0, fs_.Stat("out2.rsp")); + string err; + EXPECT_EQ(0, fs_.Stat("in1", &err)); + EXPECT_EQ(0, fs_.Stat("out1", &err)); + EXPECT_EQ(0, fs_.Stat("in2", &err)); + EXPECT_EQ(0, fs_.Stat("out2", &err)); + EXPECT_EQ(0, fs_.Stat("in2.rsp", &err)); + EXPECT_EQ(0, fs_.Stat("out2.rsp", &err)); } TEST_F(CleanTest, CleanFailure) { @@ -345,6 +352,7 @@ TEST_F(CleanTest, CleanFailure) { } TEST_F(CleanTest, CleanPhony) { + string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build phony: phony t1 t2\n" "build t1: cat\n" @@ -358,7 +366,7 @@ TEST_F(CleanTest, CleanPhony) { Cleaner cleaner(&state_, config_, &fs_); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(2, cleaner.cleaned_files_count()); - EXPECT_NE(0, fs_.Stat("phony")); + EXPECT_LT(0, fs_.Stat("phony", &err)); fs_.Create("t1", ""); fs_.Create("t2", ""); @@ -366,7 +374,7 @@ TEST_F(CleanTest, CleanPhony) { // Check that CleanTarget does not remove "phony". EXPECT_EQ(0, cleaner.CleanTarget("phony")); EXPECT_EQ(2, cleaner.cleaned_files_count()); - EXPECT_NE(0, fs_.Stat("phony")); + EXPECT_LT(0, fs_.Stat("phony", &err)); } TEST_F(CleanTest, CleanDepFileAndRspFileWithSpaces) { @@ -391,8 +399,9 @@ TEST_F(CleanTest, CleanDepFileAndRspFileWithSpaces) { EXPECT_EQ(4, cleaner.cleaned_files_count()); EXPECT_EQ(4u, fs_.files_removed_.size()); - EXPECT_EQ(0, fs_.Stat("out 1")); - EXPECT_EQ(0, fs_.Stat("out 2")); - EXPECT_EQ(0, fs_.Stat("out 1.d")); - EXPECT_EQ(0, fs_.Stat("out 2.rsp")); + string err; + EXPECT_EQ(0, fs_.Stat("out 1", &err)); + EXPECT_EQ(0, fs_.Stat("out 2", &err)); + EXPECT_EQ(0, fs_.Stat("out 1.d", &err)); + EXPECT_EQ(0, fs_.Stat("out 2.rsp", &err)); } diff --git a/src/disk_interface.cc b/src/disk_interface.cc index 9810708..70282c0 100644 --- a/src/disk_interface.cc +++ b/src/disk_interface.cc @@ -23,6 +23,7 @@ #include #ifdef _WIN32 +#include #include #include // _mkdir #endif @@ -67,16 +68,13 @@ TimeStamp TimeStampFromFileTime(const FILETIME& filetime) { return (TimeStamp)mtime; } -TimeStamp StatSingleFile(const string& path, bool quiet) { +TimeStamp StatSingleFile(const string& path, string* err) { WIN32_FILE_ATTRIBUTE_DATA attrs; if (!GetFileAttributesEx(path.c_str(), GetFileExInfoStandard, &attrs)) { - DWORD err = GetLastError(); - if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND) + DWORD win_err = GetLastError(); + if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) return 0; - if (!quiet) { - Error("GetFileAttributesEx(%s): %s", path.c_str(), - GetLastErrorString().c_str()); - } + *err = "GetFileAttributesEx(" + path + "): " + GetLastErrorString(); return -1; } return TimeStampFromFileTime(attrs.ftLastWriteTime); @@ -98,7 +96,7 @@ bool IsWindows7OrLater() { #endif bool StatAllFilesInDir(const string& dir, map* stamps, - bool quiet) { + string* err) { // FindExInfoBasic is 30% faster than FindExInfoStandard. static bool can_use_basic_info = IsWindows7OrLater(); // This is not in earlier SDKs. @@ -111,13 +109,10 @@ bool StatAllFilesInDir(const string& dir, map* stamps, FindExSearchNameMatch, NULL, 0); if (find_handle == INVALID_HANDLE_VALUE) { - DWORD err = GetLastError(); - if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND) + DWORD win_err = GetLastError(); + if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) return true; - if (!quiet) { - Error("FindFirstFileExA(%s): %s", dir.c_str(), - GetLastErrorString().c_str()); - } + *err = "FindFirstFileExA(" + dir + "): " + GetLastErrorString(); return false; } do { @@ -139,9 +134,12 @@ bool DiskInterface::MakeDirs(const string& path) { string dir = DirName(path); if (dir.empty()) return true; // Reached root; assume it's there. - TimeStamp mtime = Stat(dir); - if (mtime < 0) - return false; // Error. + string err; + TimeStamp mtime = Stat(dir, &err); + if (mtime < 0) { + Error("%s", err.c_str()); + return false; + } if (mtime > 0) return true; // Exists already; we're done. @@ -154,19 +152,19 @@ bool DiskInterface::MakeDirs(const string& path) { // RealDiskInterface ----------------------------------------------------------- -TimeStamp RealDiskInterface::Stat(const string& path) const { +TimeStamp RealDiskInterface::Stat(const string& path, string* err) const { #ifdef _WIN32 // MSDN: "Naming Files, Paths, and Namespaces" // http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx if (!path.empty() && path[0] != '\\' && path.size() > MAX_PATH) { - if (!quiet_) { - Error("Stat(%s): Filename longer than %i characters", - path.c_str(), MAX_PATH); - } + ostringstream err_stream; + err_stream << "Stat(" << path << "): Filename longer than " << MAX_PATH + << " characters"; + *err = err_stream.str(); return -1; } if (!use_cache_) - return StatSingleFile(path, quiet_); + return StatSingleFile(path, err); string dir = DirName(path); string base(path.substr(dir.size() ? dir.size() + 1 : 0)); @@ -177,7 +175,7 @@ TimeStamp RealDiskInterface::Stat(const string& path) const { Cache::iterator ci = cache_.find(dir); if (ci == cache_.end()) { ci = cache_.insert(make_pair(dir, DirCache())).first; - if (!StatAllFilesInDir(dir.empty() ? "." : dir, &ci->second, quiet_)) { + if (!StatAllFilesInDir(dir.empty() ? "." : dir, &ci->second, err)) { cache_.erase(ci); return -1; } @@ -189,9 +187,7 @@ TimeStamp RealDiskInterface::Stat(const string& path) const { if (stat(path.c_str(), &st) < 0) { if (errno == ENOENT || errno == ENOTDIR) return 0; - if (!quiet_) { - Error("stat(%s): %s", path.c_str(), strerror(errno)); - } + *err = "stat(" + path + "): " + strerror(errno); return -1; } return st.st_mtime; diff --git a/src/disk_interface.h b/src/disk_interface.h index a13bced..b61d192 100644 --- a/src/disk_interface.h +++ b/src/disk_interface.h @@ -30,7 +30,7 @@ struct DiskInterface { /// stat() a file, returning the mtime, or 0 if missing and -1 on /// other errors. - virtual TimeStamp Stat(const string& path) const = 0; + virtual TimeStamp Stat(const string& path, string* err) const = 0; /// Create a directory, returning false on failure. virtual bool MakeDir(const string& path) = 0; @@ -56,21 +56,18 @@ struct DiskInterface { /// Implementation of DiskInterface that actually hits the disk. struct RealDiskInterface : public DiskInterface { - RealDiskInterface() : quiet_(false) + RealDiskInterface() #ifdef _WIN32 - , use_cache_(false) + : use_cache_(false) #endif {} virtual ~RealDiskInterface() {} - virtual TimeStamp Stat(const string& path) const; + virtual TimeStamp Stat(const string& path, string* err) const; virtual bool MakeDir(const string& path); virtual bool WriteFile(const string& path, const string& contents); virtual string ReadFile(const string& path, string* err); virtual int RemoveFile(const string& path); - /// Whether to print on errors. Used to make a test quieter. - bool quiet_; - /// Whether stat information can be cached. Only has an effect on Windows. void AllowStatCache(bool allow); diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc index 658fffd..9d210b4 100644 --- a/src/disk_interface_test.cc +++ b/src/disk_interface_test.cc @@ -47,49 +47,64 @@ struct DiskInterfaceTest : public testing::Test { }; TEST_F(DiskInterfaceTest, StatMissingFile) { - EXPECT_EQ(0, disk_.Stat("nosuchfile")); + string err; + EXPECT_EQ(0, disk_.Stat("nosuchfile", &err)); + EXPECT_EQ("", err); // On Windows, the errno for a file in a nonexistent directory // is different. - EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile")); + EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err)); + EXPECT_EQ("", err); // On POSIX systems, the errno is different if a component of the // path prefix is not a directory. ASSERT_TRUE(Touch("notadir")); - EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile")); + EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile", &err)); + EXPECT_EQ("", err); } TEST_F(DiskInterfaceTest, StatBadPath) { - disk_.quiet_ = true; + string err; #ifdef _WIN32 string bad_path("cc:\\foo"); - EXPECT_EQ(-1, disk_.Stat(bad_path)); + EXPECT_EQ(-1, disk_.Stat(bad_path, &err)); + EXPECT_NE("", err); #else string too_long_name(512, 'x'); - EXPECT_EQ(-1, disk_.Stat(too_long_name)); + EXPECT_EQ(-1, disk_.Stat(too_long_name, &err)); + EXPECT_NE("", err); #endif - disk_.quiet_ = false; } TEST_F(DiskInterfaceTest, StatExistingFile) { + string err; ASSERT_TRUE(Touch("file")); - EXPECT_GT(disk_.Stat("file"), 1); + EXPECT_GT(disk_.Stat("file", &err), 1); + EXPECT_EQ("", err); } TEST_F(DiskInterfaceTest, StatExistingDir) { + string err; ASSERT_TRUE(disk_.MakeDir("subdir")); ASSERT_TRUE(disk_.MakeDir("subdir/subsubdir")); - EXPECT_GT(disk_.Stat("."), 1); - EXPECT_GT(disk_.Stat("subdir"), 1); - EXPECT_GT(disk_.Stat("subdir/subsubdir"), 1); + EXPECT_GT(disk_.Stat(".", &err), 1); + EXPECT_EQ("", err); + EXPECT_GT(disk_.Stat("subdir", &err), 1); + EXPECT_EQ("", err); + EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1); + EXPECT_EQ("", err); - EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/.")); - EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/subsubdir/..")); - EXPECT_EQ(disk_.Stat("subdir/subsubdir"), disk_.Stat("subdir/subsubdir/.")); + EXPECT_EQ(disk_.Stat("subdir", &err), + disk_.Stat("subdir/.", &err)); + EXPECT_EQ(disk_.Stat("subdir", &err), + disk_.Stat("subdir/subsubdir/..", &err)); + EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err), + disk_.Stat("subdir/subsubdir/.", &err)); } #ifdef _WIN32 TEST_F(DiskInterfaceTest, StatCache) { + string err; disk_.AllowStatCache(true); ASSERT_TRUE(Touch("file1")); @@ -100,27 +115,43 @@ TEST_F(DiskInterfaceTest, StatCache) { ASSERT_TRUE(Touch("subdir\\SUBFILE2")); ASSERT_TRUE(Touch("subdir\\SUBFILE3")); - EXPECT_GT(disk_.Stat("FIle1"), 1); - EXPECT_GT(disk_.Stat("file1"), 1); + EXPECT_GT(disk_.Stat("FIle1", &err), 1); + EXPECT_EQ("", err); + EXPECT_GT(disk_.Stat("file1", &err), 1); + EXPECT_EQ("", err); - EXPECT_GT(disk_.Stat("subdir/subfile2"), 1); - EXPECT_GT(disk_.Stat("sUbdir\\suBFile1"), 1); + EXPECT_GT(disk_.Stat("subdir/subfile2", &err), 1); + EXPECT_EQ("", err); + EXPECT_GT(disk_.Stat("sUbdir\\suBFile1", &err), 1); + EXPECT_EQ("", err); - EXPECT_GT(disk_.Stat("."), 1); - EXPECT_GT(disk_.Stat("subdir"), 1); - EXPECT_GT(disk_.Stat("subdir/subsubdir"), 1); + EXPECT_GT(disk_.Stat(".", &err), 1); + EXPECT_EQ("", err); + EXPECT_GT(disk_.Stat("subdir", &err), 1); + EXPECT_EQ("", err); + EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1); + EXPECT_EQ("", err); - EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/.")); - EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/subsubdir/..")); - EXPECT_EQ(disk_.Stat("subdir/subsubdir"), disk_.Stat("subdir/subsubdir/.")); + EXPECT_EQ(disk_.Stat("subdir", &err), + disk_.Stat("subdir/.", &err)); + EXPECT_EQ("", err); + EXPECT_EQ(disk_.Stat("subdir", &err), + disk_.Stat("subdir/subsubdir/..", &err)); + EXPECT_EQ("", err); + EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err), + disk_.Stat("subdir/subsubdir/.", &err)); + EXPECT_EQ("", err); // Test error cases. - disk_.quiet_ = true; string bad_path("cc:\\foo"); - EXPECT_EQ(-1, disk_.Stat(bad_path)); - EXPECT_EQ(-1, disk_.Stat(bad_path)); - EXPECT_EQ(0, disk_.Stat("nosuchfile")); - EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile")); + EXPECT_EQ(-1, disk_.Stat(bad_path, &err)); + EXPECT_NE("", err); err.clear(); + EXPECT_EQ(-1, disk_.Stat(bad_path, &err)); + EXPECT_NE("", err); err.clear(); + EXPECT_EQ(0, disk_.Stat("nosuchfile", &err)); + EXPECT_EQ("", err); + EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err)); + EXPECT_EQ("", err); } #endif @@ -168,7 +199,7 @@ struct StatTest : public StateTestWithBuiltinRules, StatTest() : scan_(&state_, NULL, NULL, this) {} // DiskInterface implementation. - virtual TimeStamp Stat(const string& path) const; + virtual TimeStamp Stat(const string& path, string* err) const; virtual bool WriteFile(const string& path, const string& contents) { assert(false); return true; @@ -191,7 +222,7 @@ struct StatTest : public StateTestWithBuiltinRules, mutable vector stats_; }; -TimeStamp StatTest::Stat(const string& path) const { +TimeStamp StatTest::Stat(const string& path, string* err) const { stats_.push_back(path); map::const_iterator i = mtimes_.find(path); if (i == mtimes_.end()) diff --git a/src/graph.cc b/src/graph.cc index d99c8bd..fcfeba0 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -29,13 +29,7 @@ bool Node::Stat(DiskInterface* disk_interface, string* err) { METRIC_RECORD("node stat"); - mtime_ = disk_interface->Stat(path_); - if (mtime_ == -1) { - // TODO: Let DiskInterface::Stat() take err instead of it calling Error(). - *err = "stat failed"; - return false; - } - return true; + return (mtime_ = disk_interface->Stat(path_, err)) != -1; } bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { diff --git a/src/manifest_parser_perftest.cc b/src/manifest_parser_perftest.cc index ca62fb2..6b56ab0 100644 --- a/src/manifest_parser_perftest.cc +++ b/src/manifest_parser_perftest.cc @@ -42,15 +42,19 @@ struct RealFileReader : public ManifestParser::FileReader { } }; -bool WriteFakeManifests(const string& dir) { +bool WriteFakeManifests(const string& dir, string* err) { RealDiskInterface disk_interface; - if (disk_interface.Stat(dir + "/build.ninja") > 0) - return true; + TimeStamp mtime = disk_interface.Stat(dir + "/build.ninja", err); + if (mtime != 0) // 0 means that the file doesn't exist yet. + return mtime != -1; + string command = "python misc/write_fake_manifests.py " + dir; printf("Creating manifest data..."); fflush(stdout); - int err = system(("python misc/write_fake_manifests.py " + dir).c_str()); + int exit_code = system(command.c_str()); printf("done.\n"); - return err == 0; + if (exit_code != 0) + *err = "Failed to run " + command; + return exit_code == 0; } int LoadManifests(bool measure_command_evaluation) { @@ -93,8 +97,9 @@ int main(int argc, char* argv[]) { const char kManifestDir[] = "build/manifest_perftest"; - if (!WriteFakeManifests(kManifestDir)) { - fprintf(stderr, "Failed to write test data\n"); + string err; + if (!WriteFakeManifests(kManifestDir, &err)) { + fprintf(stderr, "Failed to write test data: %s\n", err.c_str()); return 1; } diff --git a/src/ninja.cc b/src/ninja.cc index 1e43137..e5bf98d 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -143,6 +143,8 @@ struct NinjaMain : public BuildLogUser { virtual bool IsPathDead(StringPiece s) const { Node* n = state_.LookupNode(s); + if (!n || !n->in_edge()) + return false; // Just checking n isn't enough: If an old output is both in the build log // and in the deps log, it will have a Node object in state_. (It will also // have an in edge if one of its inputs is another output that's in the deps @@ -152,7 +154,11 @@ struct NinjaMain : public BuildLogUser { // which seems good enough for this corner case.) // Do keep entries around for files which still exist on disk, for // generators that want to use this information. - return (!n || !n->in_edge()) && disk_interface_.Stat(s.AsString()) == 0; + string err; + TimeStamp mtime = disk_interface_.Stat(s.AsString(), &err); + if (mtime == -1) + Error("%s", err.c_str()); // Log and ignore Stat() errors. + return mtime == 0; } }; @@ -481,7 +487,10 @@ int NinjaMain::ToolDeps(int argc, char** argv) { continue; } - TimeStamp mtime = disk_interface.Stat((*it)->path()); + string err; + TimeStamp mtime = disk_interface.Stat((*it)->path(), &err); + if (mtime == -1) + Error("%s", err.c_str()); // Log and ignore Stat() errors; printf("%s: #deps %d, deps mtime %d (%s)\n", (*it)->path().c_str(), deps->node_count, deps->mtime, (!mtime || mtime > deps->mtime ? "STALE":"VALID")); diff --git a/src/test.cc b/src/test.cc index ddecd3d..aed8db7 100644 --- a/src/test.cc +++ b/src/test.cc @@ -145,10 +145,12 @@ void VirtualFileSystem::Create(const string& path, files_created_.insert(path); } -TimeStamp VirtualFileSystem::Stat(const string& path) const { +TimeStamp VirtualFileSystem::Stat(const string& path, string* err) const { FileMap::const_iterator i = files_.find(path); - if (i != files_.end()) + if (i != files_.end()) { + *err = i->second.stat_error; return i->second.mtime; + } return 0; } diff --git a/src/test.h b/src/test.h index 3ce510d..156e68a 100644 --- a/src/test.h +++ b/src/test.h @@ -142,7 +142,7 @@ struct VirtualFileSystem : public DiskInterface { } // DiskInterface - virtual TimeStamp Stat(const string& path) const; + virtual TimeStamp Stat(const string& path, string* err) const; virtual bool WriteFile(const string& path, const string& contents); virtual bool MakeDir(const string& path); virtual string ReadFile(const string& path, string* err); @@ -151,6 +151,7 @@ struct VirtualFileSystem : public DiskInterface { /// An entry for a single in-memory file. struct Entry { int mtime; + string stat_error; // If mtime is -1. string contents; }; -- cgit v0.12 From abe52429a36e209b7fdd97c3ae87e43b07081aa8 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 1 Apr 2015 07:56:14 -0700 Subject: Cleanup: Don't modify during cycle checking. --- src/build.cc | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/build.cc b/src/build.cc index d898929..574aecc 100644 --- a/src/build.cc +++ b/src/build.cc @@ -322,17 +322,13 @@ bool Plan::CheckDependencyCycle(Node* node, vector* stack, string* err) { if (ri == stack->rend()) return false; - // Add this node onto the stack to make it clearer where the loop - // is. - stack->push_back(node); - vector::iterator start = find(stack->begin(), stack->end(), node); *err = "dependency cycle: "; for (vector::iterator i = start; i != stack->end(); ++i) { - if (i != start) - err->append(" -> "); err->append((*i)->path()); + err->append(" -> "); } + err->append(node->path()); return true; } -- cgit v0.12 From 813205b43c63239c7e37f9e296d345d55b12d565 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 1 Apr 2015 07:59:38 -0700 Subject: Cleanup: Make stack a const ref now that it's no longer modified. --- src/build.cc | 15 ++++++++------- src/build.h | 3 ++- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/build.cc b/src/build.cc index 574aecc..c51ce53 100644 --- a/src/build.cc +++ b/src/build.cc @@ -278,7 +278,7 @@ bool Plan::AddSubTarget(Node* node, vector* stack, string* err) { return false; } - if (CheckDependencyCycle(node, stack, err)) + if (CheckDependencyCycle(node, *stack, err)) return false; if (edge->outputs_ready()) @@ -316,15 +316,16 @@ bool Plan::AddSubTarget(Node* node, vector* stack, string* err) { return true; } -bool Plan::CheckDependencyCycle(Node* node, vector* stack, string* err) { - vector::reverse_iterator ri = - find(stack->rbegin(), stack->rend(), node); - if (ri == stack->rend()) +bool Plan::CheckDependencyCycle(Node* node, const vector& stack, + string* err) { + vector::const_reverse_iterator ri = + find(stack.rbegin(), stack.rend(), node); + if (ri == stack.rend()) return false; - vector::iterator start = find(stack->begin(), stack->end(), node); + vector::const_iterator start = find(stack.begin(), stack.end(), node); *err = "dependency cycle: "; - for (vector::iterator i = start; i != stack->end(); ++i) { + for (vector::const_iterator i = start; i != stack.end(); ++i) { err->append((*i)->path()); err->append(" -> "); } diff --git a/src/build.h b/src/build.h index 5d5db80..4b48c5f 100644 --- a/src/build.h +++ b/src/build.h @@ -69,7 +69,8 @@ struct Plan { private: bool AddSubTarget(Node* node, vector* stack, string* err); - bool CheckDependencyCycle(Node* node, vector* stack, string* err); + bool CheckDependencyCycle(Node* node, const vector& stack, + string* err); void NodeFinished(Node* node); /// Submits a ready edge as a candidate for execution. -- cgit v0.12 From 90621fd177660f94aa27ce5963506f814f859da2 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 1 Apr 2015 08:00:27 -0700 Subject: Cleanup: Don't search stack for cycle elements twice. The common case is that there is no cycle. In that case, CheckDependencyCycle() searched the stack for a dupe from the back, wouldn't find one, and return false. If there was a cycle, it would then search again from the front (probably because the push_back() that used to be here would invalidate the ri iterator). Since the push_back() is gone, just search from the front once. No intended behavior change. --- src/build.cc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/build.cc b/src/build.cc index c51ce53..9f40d2d 100644 --- a/src/build.cc +++ b/src/build.cc @@ -318,12 +318,10 @@ bool Plan::AddSubTarget(Node* node, vector* stack, string* err) { bool Plan::CheckDependencyCycle(Node* node, const vector& stack, string* err) { - vector::const_reverse_iterator ri = - find(stack.rbegin(), stack.rend(), node); - if (ri == stack.rend()) + vector::const_iterator start = find(stack.begin(), stack.end(), node); + if (start == stack.end()) return false; - vector::const_iterator start = find(stack.begin(), stack.end(), node); *err = "dependency cycle: "; for (vector::const_iterator i = start; i != stack.end(); ++i) { err->append((*i)->path()); -- cgit v0.12 From 8e8cee81964996981473ed4ab004e61eb6600235 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Wed, 1 Apr 2015 08:09:28 -0700 Subject: Don't get stuck on cyclic graphs where one edge has multiple outputs. Fixes #934. Plan::AddSubTarget() tracks in want_ if each edge has been visited and visits every edge only once. But Plan::CheckDependencyCycle() worked on nodes however, so if a cycle was entered through an edge with multiple outputs, ninja would fail to detect that cycle. Move cycle detection to look for duplicate edges instead of nodes to fix this. The extra jump makes CheckDependencyCycle() a bit slower: for a synthetic build file with 10000 serial build steps, `ninja -n` now takes 0.32s instead of 0.26s before on my laptop. In practice, projects have a dependency change length on the order of 50, so there shouldn't be a noticeable slowdown in practice. (If this does end up being a problem: CheckDependencyCycle() currently does O(n) work and is called O(m) times from AddSubTarget(), so I think cycle checking is O(n^2) in the build depth. So instead of worrying about constant overhead, we could use a set<> instead of a stack<>. But it doesn't seem to be a problem in practice.) --- src/build.cc | 25 +++++++++++++++++++++---- src/build_test.cc | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/src/build.cc b/src/build.cc index 9f40d2d..ccdb37f 100644 --- a/src/build.cc +++ b/src/build.cc @@ -318,16 +318,33 @@ bool Plan::AddSubTarget(Node* node, vector* stack, string* err) { bool Plan::CheckDependencyCycle(Node* node, const vector& stack, string* err) { - vector::const_iterator start = find(stack.begin(), stack.end(), node); + vector::const_iterator start = stack.begin(); + while (start != stack.end() && (*start)->in_edge() != node->in_edge()) + ++start; if (start == stack.end()) return false; + // Build error string for the cycle. + vector cycle(start, stack.end()); + cycle.push_back(node); + + if (cycle.front() != cycle.back()) { + // Consider + // build a b: cat c + // build c: cat a + // stack will contain [b, c], node will be a. To not print b -> c -> a, + // shift by one to get c -> a -> c which makes the cycle clear. + cycle.erase(cycle.begin()); + cycle.push_back(cycle.front()); + assert(cycle.front() == cycle.back()); + } + *err = "dependency cycle: "; - for (vector::const_iterator i = start; i != stack.end(); ++i) { + for (vector::const_iterator i = cycle.begin(); i != cycle.end(); ++i) { + if (i != cycle.begin()) + err->append(" -> "); err->append((*i)->path()); - err->append(" -> "); } - err->append(node->path()); return true; } diff --git a/src/build_test.cc b/src/build_test.cc index 13d1e7e..a313693 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -201,6 +201,43 @@ TEST_F(PlanTest, DependencyCycle) { ASSERT_EQ("dependency cycle: out -> mid -> in -> pre -> out", err); } +TEST_F(PlanTest, CycleInEdgesButNotInNodes1) { + string err; + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, +"build a b: cat a\n")); + EXPECT_FALSE(plan_.AddTarget(GetNode("b"), &err)); + ASSERT_EQ("dependency cycle: a -> a", err); +} + +TEST_F(PlanTest, CycleInEdgesButNotInNodes2) { + string err; + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, +"build b a: cat a\n")); + EXPECT_FALSE(plan_.AddTarget(GetNode("b"), &err)); + ASSERT_EQ("dependency cycle: a -> a", err); +} + +TEST_F(PlanTest, CycleInEdgesButNotInNodes3) { + string err; + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, +"build a b: cat c\n" +"build c: cat a\n")); + EXPECT_FALSE(plan_.AddTarget(GetNode("b"), &err)); + ASSERT_EQ("dependency cycle: c -> a -> c", err); +} + +TEST_F(PlanTest, CycleInEdgesButNotInNodes4) { + string err; + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, +"build d: cat c\n" +"build c: cat b\n" +"build b: cat a\n" +"build a e: cat d\n" +"build f: cat e\n")); + EXPECT_FALSE(plan_.AddTarget(GetNode("f"), &err)); + ASSERT_EQ("dependency cycle: d -> c -> b -> a -> d", err); +} + void PlanTest::TestPoolWithDepthOne(const char* test_case) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, test_case)); GetNode("out1")->MarkDirty(); -- cgit v0.12 From a4448505688eeeaf165ded9f2dbb48e774ca67df Mon Sep 17 00:00:00 2001 From: Spencer Judge Date: Tue, 7 Apr 2015 14:18:43 -0700 Subject: Fix backslashes in graphviz causing incorrect rendering on windows. --- src/graphviz.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/graphviz.cc b/src/graphviz.cc index 8354a22..dce8b32 100644 --- a/src/graphviz.cc +++ b/src/graphviz.cc @@ -15,6 +15,7 @@ #include "graphviz.h" #include +#include #include "graph.h" @@ -22,7 +23,9 @@ void GraphViz::AddTarget(Node* node) { if (visited_nodes_.find(node) != visited_nodes_.end()) return; - printf("\"%p\" [label=\"%s\"]\n", node, node->path().c_str()); + string pathstr = node->path(); + replace(pathstr.begin(), pathstr.end(), '\\', '/'); + printf("\"%p\" [label=\"%s\"]\n", node, pathstr.c_str()); visited_nodes_.insert(node); Edge* edge = node->in_edge(); -- cgit v0.12 From 5ee7238bda57e711749be799e9824bfa2d791192 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 9 Apr 2015 11:41:40 -0700 Subject: Fix an assert (and tests in --debug mode) after #921. --- src/eval_env.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eval_env.cc b/src/eval_env.cc index e03a82e..e991d21 100644 --- a/src/eval_env.cc +++ b/src/eval_env.cc @@ -30,7 +30,7 @@ void BindingEnv::AddBinding(const string& key, const string& val) { } void BindingEnv::AddRule(const Rule* rule) { - assert(LookupRule(rule->name()) == NULL); + assert(LookupRuleCurrentScope(rule->name()) == NULL); rules_[rule->name()] = rule; } -- cgit v0.12 From 8c8834acffdc0da0d94119725929acc712c9dfad Mon Sep 17 00:00:00 2001 From: Rui Ueyama Date: Fri, 17 Apr 2015 14:14:44 -0700 Subject: Run more than 34 processes on Win32 if we have 32+ cores. For compatiblity reason, dwNumberOfProcessors in Win32 is capped at 32. So even if your machine has more than 32 cores, Ninja spawns at most 34 subprocesses. This patch fixes the issue by using GetNativeSystemInfo, which returns the system info from Wow64 point of view, instead of GetSystemInfo. --- src/util.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util.cc b/src/util.cc index 720ccbd..aa47f2f 100644 --- a/src/util.cc +++ b/src/util.cc @@ -500,7 +500,7 @@ string StripAnsiEscapeCodes(const string& in) { int GetProcessorCount() { #ifdef _WIN32 SYSTEM_INFO info; - GetSystemInfo(&info); + GetNativeSystemInfo(&info); return info.dwNumberOfProcessors; #else return sysconf(_SC_NPROCESSORS_ONLN); -- cgit v0.12 From 6652258c2fa510f043d32be95ef4c44eaa2800dc Mon Sep 17 00:00:00 2001 From: Scott Graham Date: Thu, 23 Apr 2015 15:54:28 -0700 Subject: Fix pool use count going unbalanced --- src/build.cc | 4 +++- src/build_test.cc | 44 ++++++++++++++++++++++++++++++++++++-------- src/state.h | 1 + 3 files changed, 40 insertions(+), 9 deletions(-) diff --git a/src/build.cc b/src/build.cc index ccdb37f..f14831e 100644 --- a/src/build.cc +++ b/src/build.cc @@ -411,7 +411,9 @@ void Plan::NodeFinished(Node* node) { ScheduleWork(*oe); } else { // We do not need to build this edge, but we might need to build one of - // its dependents. + // its dependents. Make sure the pool schedules it before it's finished + // otherwise the pool use count may be invalid. + (*oe)->pool()->EdgeScheduled(**oe); EdgeFinished(*oe); } } diff --git a/src/build_test.cc b/src/build_test.cc index a313693..52a17c9 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -495,8 +495,8 @@ struct BuildTest : public StateTestWithBuiltinRules, public BuildLogUser { /// State of command_runner_ and logs contents (if specified) ARE MODIFIED. /// Handy to check for NOOP builds, and higher-level rebuild tests. void RebuildTarget(const string& target, const char* manifest, - const char* log_path = NULL, - const char* deps_path = NULL); + const char* log_path = NULL, const char* deps_path = NULL, + State* state = NULL); // Mark a path dirty. void Dirty(const string& path); @@ -516,10 +516,13 @@ struct BuildTest : public StateTestWithBuiltinRules, public BuildLogUser { }; void BuildTest::RebuildTarget(const string& target, const char* manifest, - const char* log_path, const char* deps_path) { - State state; - ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); - AssertParse(&state, manifest); + const char* log_path, const char* deps_path, + State* state) { + State local_state, *pstate = &local_state; + if (state) + pstate = state; + ASSERT_NO_FATAL_FAILURE(AddCatRule(pstate)); + AssertParse(pstate, manifest); string err; BuildLog build_log, *pbuild_log = NULL; @@ -532,13 +535,13 @@ void BuildTest::RebuildTarget(const string& target, const char* manifest, DepsLog deps_log, *pdeps_log = NULL; if (deps_path) { - ASSERT_TRUE(deps_log.Load(deps_path, &state, &err)); + ASSERT_TRUE(deps_log.Load(deps_path, pstate, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_path, &err)); ASSERT_EQ("", err); pdeps_log = &deps_log; } - Builder builder(&state, config_, pbuild_log, pdeps_log, &fs_); + Builder builder(pstate, config_, pbuild_log, pdeps_log, &fs_); EXPECT_TRUE(builder.AddTarget(target, &err)); command_runner_.commands_ran_.clear(); @@ -1092,6 +1095,31 @@ TEST_F(BuildTest, SwallowFailuresLimit) { ASSERT_EQ("cannot make progress due to previous errors", err); } +TEST_F(BuildTest, PoolEdgesReadyButNotWanted) { + fs_.Create("x", ""); + + const char* manifest = + "pool some_pool\n" + " depth = 4\n" + "rule touch\n" + " command = touch $out\n" + " pool = some_pool\n" + "rule cc\n" + " command = touch grit\n" + "\n" + "build B.d.stamp: cc | x\n" + "build C.stamp: touch B.d.stamp\n" + "build final.stamp: touch || C.stamp\n"; + + RebuildTarget("final.stamp", manifest); + + fs_.RemoveFile("B.d.stamp"); + + State save_state; + RebuildTarget("final.stamp", manifest, NULL, NULL, &save_state); + EXPECT_GE(save_state.LookupPool("some_pool")->current_use(), 0); +} + struct BuildWithLogTest : public BuildTest { BuildWithLogTest() { builder_.SetBuildLog(&build_log_); diff --git a/src/state.h b/src/state.h index 5000138..d7987ba 100644 --- a/src/state.h +++ b/src/state.h @@ -44,6 +44,7 @@ struct Pool { bool is_valid() const { return depth_ >= 0; } int depth() const { return depth_; } const string& name() const { return name_; } + int current_use() const { return current_use_; } /// true if the Pool might delay this edge bool ShouldDelayEdge() const { return depth_ != 0; } -- cgit v0.12 From b4bc5cf7c924be251a5c8abdfca58f47c3f5c185 Mon Sep 17 00:00:00 2001 From: Nicolas Despres Date: Wed, 16 Apr 2014 10:05:25 +0200 Subject: Allow SIGTERM for interruption. Default signal sent by many other programs (mainly kill(1)) to gently terminates another one is SIGTERM. --- src/subprocess-posix.cc | 15 +++++++++++---- src/subprocess.h | 3 ++- src/subprocess_test.cc | 26 +++++++++++++++++++++++++- 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/src/subprocess-posix.cc b/src/subprocess-posix.cc index cc8bff6..c784a39 100644 --- a/src/subprocess-posix.cc +++ b/src/subprocess-posix.cc @@ -74,7 +74,9 @@ bool Subprocess::Start(SubprocessSet* set, const string& command) { // Track which fd we use to report errors on. int error_pipe = output_pipe[1]; do { - if (sigaction(SIGINT, &set->old_act_, 0) < 0) + if (sigaction(SIGINT, &set->old_int_act_, 0) < 0) + break; + if (sigaction(SIGTERM, &set->old_term_act_, 0) < 0) break; if (sigprocmask(SIG_SETMASK, &set->old_mask_, 0) < 0) break; @@ -148,7 +150,7 @@ ExitStatus Subprocess::Finish() { if (exit == 0) return ExitSuccess; } else if (WIFSIGNALED(status)) { - if (WTERMSIG(status) == SIGINT) + if (WTERMSIG(status) == SIGINT || WTERMSIG(status) == SIGTERM) return ExitInterrupted; } return ExitFailure; @@ -173,20 +175,25 @@ SubprocessSet::SubprocessSet() { sigset_t set; sigemptyset(&set); sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); if (sigprocmask(SIG_BLOCK, &set, &old_mask_) < 0) Fatal("sigprocmask: %s", strerror(errno)); struct sigaction act; memset(&act, 0, sizeof(act)); act.sa_handler = SetInterruptedFlag; - if (sigaction(SIGINT, &act, &old_act_) < 0) + if (sigaction(SIGINT, &act, &old_int_act_) < 0) + Fatal("sigaction: %s", strerror(errno)); + if (sigaction(SIGTERM, &act, &old_term_act_) < 0) Fatal("sigaction: %s", strerror(errno)); } SubprocessSet::~SubprocessSet() { Clear(); - if (sigaction(SIGINT, &old_act_, 0) < 0) + if (sigaction(SIGINT, &old_int_act_, 0) < 0) + Fatal("sigaction: %s", strerror(errno)); + if (sigaction(SIGTERM, &old_term_act_, 0) < 0) Fatal("sigaction: %s", strerror(errno)); if (sigprocmask(SIG_SETMASK, &old_mask_, 0) < 0) Fatal("sigprocmask: %s", strerror(errno)); diff --git a/src/subprocess.h b/src/subprocess.h index b7a1a4c..daeeef6 100644 --- a/src/subprocess.h +++ b/src/subprocess.h @@ -91,7 +91,8 @@ struct SubprocessSet { static void SetInterruptedFlag(int signum); static bool interrupted_; - struct sigaction old_act_; + struct sigaction old_int_act_; + struct sigaction old_term_act_; sigset_t old_mask_; #endif }; diff --git a/src/subprocess_test.cc b/src/subprocess_test.cc index 1838c43..07cc52f 100644 --- a/src/subprocess_test.cc +++ b/src/subprocess_test.cc @@ -98,6 +98,30 @@ TEST_F(SubprocessTest, InterruptParent) { ASSERT_FALSE("We should have been interrupted"); } +TEST_F(SubprocessTest, InterruptChildWithSigTerm) { + Subprocess* subproc = subprocs_.Add("kill -TERM $$"); + ASSERT_NE((Subprocess *) 0, subproc); + + while (!subproc->Done()) { + subprocs_.DoWork(); + } + + EXPECT_EQ(ExitInterrupted, subproc->Finish()); +} + +TEST_F(SubprocessTest, InterruptParentWithSigTerm) { + Subprocess* subproc = subprocs_.Add("kill -TERM $PPID ; sleep 1"); + ASSERT_NE((Subprocess *) 0, subproc); + + while (!subproc->Done()) { + bool interrupted = subprocs_.DoWork(); + if (interrupted) + return; + } + + ASSERT_FALSE("We should have been interrupted"); +} + // A shell command to check if the current process is connected to a terminal. // This is different from having stdin/stdout/stderr be a terminal. (For // instance consider the command "yes < /dev/null > /dev/null 2>&1". @@ -221,7 +245,7 @@ TEST_F(SubprocessTest, SetWithLots) { } ASSERT_EQ(kNumProcs, subprocs_.finished_.size()); } -#endif // !__APPLE__ && !_WIN32 +#endif // !__APPLE__ && !_WIN32 // TODO: this test could work on Windows, just not sure how to simply // read stdin. -- cgit v0.12 From 9fff9551b2f3d3e5c994512b06832b40560d510b Mon Sep 17 00:00:00 2001 From: Nicolas Despres Date: Wed, 16 Apr 2014 10:08:42 +0200 Subject: Forward interruption signal to child processes. --- src/subprocess-posix.cc | 56 ++++++++++++++++++++++++------------------------- src/subprocess.h | 7 ++++++- 2 files changed, 34 insertions(+), 29 deletions(-) diff --git a/src/subprocess-posix.cc b/src/subprocess-posix.cc index c784a39..f3baec2 100644 --- a/src/subprocess-posix.cc +++ b/src/subprocess-posix.cc @@ -25,20 +25,6 @@ #include "util.h" -namespace { - -bool HasPendingInterruption() { - sigset_t pending; - sigemptyset(&pending); - if (sigpending(&pending) == -1) { - perror("ninja: sigpending"); - return false; - } - return sigismember(&pending, SIGINT); -} - -} // anonymous namespace - Subprocess::Subprocess(bool use_console) : fd_(-1), pid_(-1), use_console_(use_console) { } @@ -164,11 +150,23 @@ const string& Subprocess::GetOutput() const { return buf_; } -bool SubprocessSet::interrupted_; +int SubprocessSet::interrupted_; void SubprocessSet::SetInterruptedFlag(int signum) { - (void) signum; - interrupted_ = true; + interrupted_ = signum; +} + +void SubprocessSet::HandlePendingInterruption() { + sigset_t pending; + sigemptyset(&pending); + if (sigpending(&pending) == -1) { + perror("ninja: sigpending"); + return; + } + if (sigismember(&pending, SIGINT)) + interrupted_ = SIGINT; + else if (sigismember(&pending, SIGTERM)) + interrupted_ = SIGTERM; } SubprocessSet::SubprocessSet() { @@ -224,17 +222,18 @@ bool SubprocessSet::DoWork() { ++nfds; } - interrupted_ = false; + interrupted_ = 0; int ret = ppoll(&fds.front(), nfds, NULL, &old_mask_); if (ret == -1) { if (errno != EINTR) { perror("ninja: ppoll"); return false; } - return interrupted_; + return IsInterrupted(); } - if (HasPendingInterruption()) + HandlePendingInterruption(); + if (IsInterrupted()) return true; nfds_t cur_nfd = 0; @@ -255,7 +254,7 @@ bool SubprocessSet::DoWork() { ++i; } - return interrupted_; + return IsInterrupted(); } #else // !defined(USE_PPOLL) @@ -274,17 +273,18 @@ bool SubprocessSet::DoWork() { } } - interrupted_ = false; + interrupted_ = 0; int ret = pselect(nfds, &set, 0, 0, 0, &old_mask_); if (ret == -1) { if (errno != EINTR) { perror("ninja: pselect"); return false; } - return interrupted_; + return IsInterrupted(); } - if (HasPendingInterruption()) + HandlePendingInterruption(); + if (IsInterrupted()) return true; for (vector::iterator i = running_.begin(); @@ -301,7 +301,7 @@ bool SubprocessSet::DoWork() { ++i; } - return interrupted_; + return IsInterrupted(); } #endif // !defined(USE_PPOLL) @@ -316,10 +316,10 @@ Subprocess* SubprocessSet::NextFinished() { void SubprocessSet::Clear() { for (vector::iterator i = running_.begin(); i != running_.end(); ++i) - // Since the foreground process is in our process group, it will receive a - // SIGINT at the same time as us. + // Since the foreground process is in our process group, it will receive + // the interruption signal (i.e. SIGINT or SIGTERM) at the same time as us. if (!(*i)->use_console_) - kill(-(*i)->pid_, SIGINT); + kill(-(*i)->pid_, interrupted_); for (vector::iterator i = running_.begin(); i != running_.end(); ++i) delete *i; diff --git a/src/subprocess.h b/src/subprocess.h index daeeef6..a001fc9 100644 --- a/src/subprocess.h +++ b/src/subprocess.h @@ -89,7 +89,12 @@ struct SubprocessSet { static HANDLE ioport_; #else static void SetInterruptedFlag(int signum); - static bool interrupted_; + static void HandlePendingInterruption(); + /// Store the signal number that causes the interruption. + /// 0 if not interruption. + static int interrupted_; + + static bool IsInterrupted() { return interrupted_ != 0; } struct sigaction old_int_act_; struct sigaction old_term_act_; -- cgit v0.12 From dcdd042ca93fd0057d7e1b0eedfbab49790b2e00 Mon Sep 17 00:00:00 2001 From: Scott Graham Date: Fri, 24 Apr 2015 14:25:17 -0700 Subject: add comment --- src/state.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/state.h b/src/state.h index d7987ba..58fac13 100644 --- a/src/state.h +++ b/src/state.h @@ -71,6 +71,9 @@ struct Pool { /// |current_use_| is the total of the weights of the edges which are /// currently scheduled in the Plan (i.e. the edges in Plan::ready_). + /// This is generally <= to depth_. It can exceed it very briefly when the + /// pool is notified about an edge that's about to be finished that will + /// not actually be started. See Plan::NodeFinished(). int current_use_; int depth_; -- cgit v0.12 From 4c4887d26011225deaa20b6752193be0293624ab Mon Sep 17 00:00:00 2001 From: Scott Graham Date: Fri, 24 Apr 2015 16:06:55 -0700 Subject: avoid calling ResumeDelayedJobs instead --- src/build.cc | 13 +++++++------ src/build.h | 2 +- src/state.h | 3 --- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/build.cc b/src/build.cc index f14831e..e266b5c 100644 --- a/src/build.cc +++ b/src/build.cc @@ -379,7 +379,7 @@ void Plan::ResumeDelayedJobs(Edge* edge) { edge->pool()->RetrieveReadyEdges(&ready_); } -void Plan::EdgeFinished(Edge* edge) { +void Plan::EdgeFinished(Edge* edge, bool directly_wanted) { map::iterator e = want_.find(edge); assert(e != want_.end()); if (e->second) @@ -388,7 +388,10 @@ void Plan::EdgeFinished(Edge* edge) { edge->outputs_ready_ = true; // See if this job frees up any delayed jobs - ResumeDelayedJobs(edge); + if (directly_wanted) + ResumeDelayedJobs(edge); + else + edge->pool()->RetrieveReadyEdges(&ready_); // Check off any nodes we were waiting for with this edge. for (vector::iterator o = edge->outputs_.begin(); @@ -411,10 +414,8 @@ void Plan::NodeFinished(Node* node) { ScheduleWork(*oe); } else { // We do not need to build this edge, but we might need to build one of - // its dependents. Make sure the pool schedules it before it's finished - // otherwise the pool use count may be invalid. - (*oe)->pool()->EdgeScheduled(**oe); - EdgeFinished(*oe); + // its dependents. + EdgeFinished(*oe, want_e->second); } } } diff --git a/src/build.h b/src/build.h index 4b48c5f..6eabae5 100644 --- a/src/build.h +++ b/src/build.h @@ -58,7 +58,7 @@ struct Plan { /// Mark an edge as done building. Used internally and by /// tests. - void EdgeFinished(Edge* edge); + void EdgeFinished(Edge* edge, bool directly_wanted = true); /// Clean the given node during the build. /// Return false on error. diff --git a/src/state.h b/src/state.h index 58fac13..d7987ba 100644 --- a/src/state.h +++ b/src/state.h @@ -71,9 +71,6 @@ struct Pool { /// |current_use_| is the total of the weights of the edges which are /// currently scheduled in the Plan (i.e. the edges in Plan::ready_). - /// This is generally <= to depth_. It can exceed it very briefly when the - /// pool is notified about an edge that's about to be finished that will - /// not actually be started. See Plan::NodeFinished(). int current_use_; int depth_; -- cgit v0.12 From bf2f4b7356c8ea918718651bae586149ab97ebee Mon Sep 17 00:00:00 2001 From: Scott Graham Date: Mon, 27 Apr 2015 11:12:43 -0700 Subject: simplify & inline --- src/build.cc | 19 +++++++------------ src/build.h | 7 +------ 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/src/build.cc b/src/build.cc index e266b5c..cdb8e0a 100644 --- a/src/build.cc +++ b/src/build.cc @@ -374,24 +374,19 @@ void Plan::ScheduleWork(Edge* edge) { } } -void Plan::ResumeDelayedJobs(Edge* edge) { - edge->pool()->EdgeFinished(*edge); - edge->pool()->RetrieveReadyEdges(&ready_); -} - -void Plan::EdgeFinished(Edge* edge, bool directly_wanted) { +void Plan::EdgeFinished(Edge* edge) { map::iterator e = want_.find(edge); assert(e != want_.end()); - if (e->second) + bool directly_wanted = e->second; + if (directly_wanted) --wanted_edges_; want_.erase(e); edge->outputs_ready_ = true; - // See if this job frees up any delayed jobs + // See if this job frees up any delayed jobs. if (directly_wanted) - ResumeDelayedJobs(edge); - else - edge->pool()->RetrieveReadyEdges(&ready_); + edge->pool()->EdgeFinished(*edge); + edge->pool()->RetrieveReadyEdges(&ready_); // Check off any nodes we were waiting for with this edge. for (vector::iterator o = edge->outputs_.begin(); @@ -415,7 +410,7 @@ void Plan::NodeFinished(Node* node) { } else { // We do not need to build this edge, but we might need to build one of // its dependents. - EdgeFinished(*oe, want_e->second); + EdgeFinished(*oe); } } } diff --git a/src/build.h b/src/build.h index 6eabae5..8106faa 100644 --- a/src/build.h +++ b/src/build.h @@ -58,7 +58,7 @@ struct Plan { /// Mark an edge as done building. Used internally and by /// tests. - void EdgeFinished(Edge* edge, bool directly_wanted = true); + void EdgeFinished(Edge* edge); /// Clean the given node during the build. /// Return false on error. @@ -78,11 +78,6 @@ private: /// currently-full pool. void ScheduleWork(Edge* edge); - /// Allows jobs blocking on |edge| to potentially resume. - /// For example, if |edge| is a member of a pool, calling this may schedule - /// previously pending jobs in that pool. - void ResumeDelayedJobs(Edge* edge); - /// Keep track of which edges we want to build in this plan. If this map does /// not contain an entry for an edge, we do not want to build the entry or its /// dependents. If an entry maps to false, we do not want to build it, but we -- cgit v0.12 From f0f36ad1c72f6100a8957f035769fda50b69919f Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Sat, 2 May 2015 14:38:43 -0700 Subject: Fix typo in comment. --- src/edit_distance.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/edit_distance.cc b/src/edit_distance.cc index 9553c6e..a6719d3 100644 --- a/src/edit_distance.cc +++ b/src/edit_distance.cc @@ -28,7 +28,7 @@ int EditDistance(const StringPiece& s1, // http://en.wikipedia.org/wiki/Levenshtein_distance // // Although the algorithm is typically described using an m x n - // array, only two rows are used at a time, so this implemenation + // array, only two rows are used at a time, so this implementation // just keeps two separate vectors for those two rows. int m = s1.len_; int n = s2.len_; -- cgit v0.12 From 1d5d08848d8b0b36a043c51b4a83531ffe3516dc Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 14 May 2015 17:37:50 -0700 Subject: Add a missing EXPLAIN() call. --- src/graph.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/graph.cc b/src/graph.cc index fcfeba0..355285c 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -55,6 +55,7 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) { if (!err->empty()) return false; // Failed to load dependency info: rebuild to regenerate it. + // LoadDeps() did EXPLAIN() already, no need to do it here. dirty = edge->deps_missing_ = true; } @@ -142,7 +143,12 @@ bool DependencyScan::RecomputeOutputDirty(Edge* edge, if (edge->is_phony()) { // Phony edges don't write any output. Outputs are only dirty if // there are no inputs and we're missing the output. - return edge->inputs_.empty() && !output->exists(); + if (edge->inputs_.empty() && !output->exists()) { + EXPLAIN("output %s of phony edge with no inputs doesn't exist", + output->path().c_str()); + return true; + } + return false; } BuildLog::LogEntry* entry = 0; -- cgit v0.12 From 78b1e52a5df429d336d4a08dd0d77fc74bb477dc Mon Sep 17 00:00:00 2001 From: Jason Haslam Date: Tue, 26 May 2015 15:19:28 -0600 Subject: Allow configure script to bootstrap out of source. --- configure.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/configure.py b/configure.py index 2eacbfe..69f6075 100755 --- a/configure.py +++ b/configure.py @@ -28,7 +28,8 @@ import string import subprocess import sys -sys.path.insert(0, 'misc') +sourcedir = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(sourcedir, 'misc')) import ninja_syntax @@ -251,11 +252,11 @@ if platform.is_msvc(): objext = '.obj' def src(filename): - return os.path.join('src', filename) + return os.path.join('$sourcedir', 'src', filename) def built(filename): return os.path.join('$builddir', filename) def doc(filename): - return os.path.join('doc', filename) + return os.path.join('$sourcedir', 'doc', filename) def cc(name, **kwargs): return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs) def cxx(name, **kwargs): @@ -267,6 +268,7 @@ def binary(name): return exe return name +n.variable('sourcedir', sourcedir) n.variable('builddir', 'build') n.variable('cxx', CXX) if platform.is_msvc(): @@ -415,10 +417,10 @@ objs = [] if platform.supports_ninja_browse(): n.comment('browse_py.h is used to inline browse.py.') n.rule('inline', - command='src/inline.sh $varname < $in > $out', + command=src('inline.sh') + ' $varname < $in > $out', description='INLINE $out') n.build(built('browse_py.h'), 'inline', src('browse.py'), - implicit='src/inline.sh', + implicit=src('inline.sh'), variables=[('varname', 'kBrowsePy')]) n.newline() @@ -591,11 +593,12 @@ n.newline() if not host.is_mingw(): n.comment('Regenerate build files if build script changes.') n.rule('configure', - command='${configure_env}%s configure.py $configure_args' % + command='${configure_env}%s $sourcedir/configure.py $configure_args' % options.with_python, generator=True) n.build('build.ninja', 'configure', - implicit=['configure.py', os.path.normpath('misc/ninja_syntax.py')]) + implicit=['$sourcedir/configure.py', + os.path.normpath('$sourcedir/misc/ninja_syntax.py')]) n.newline() n.default(ninja) -- cgit v0.12 From 681f761da16521a2c71412d86294066a5e67a34d Mon Sep 17 00:00:00 2001 From: Jason Haslam Date: Tue, 26 May 2015 16:47:18 -0600 Subject: Search for generated headers relative to build dir. --- configure.py | 3 +++ src/browse.cc | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/configure.py b/configure.py index 69f6075..27516b4 100755 --- a/configure.py +++ b/configure.py @@ -355,6 +355,9 @@ if platform.supports_ppoll() and not options.force_pselect: if platform.supports_ninja_browse(): cflags.append('-DNINJA_HAVE_BROWSE') +# Search for generated headers relative to build dir. +cflags.append('-I.') + def shell_escape(str): """Escape str such that it's interpreted as a single argument by the shell.""" diff --git a/src/browse.cc b/src/browse.cc index 83bfe43..8673919 100644 --- a/src/browse.cc +++ b/src/browse.cc @@ -18,7 +18,7 @@ #include #include -#include "../build/browse_py.h" +#include "build/browse_py.h" void RunBrowsePython(State* state, const char* ninja_command, const char* initial_target) { -- cgit v0.12 From 27b82ff99588bbe90bd6caa71d29b76156b65c4b Mon Sep 17 00:00:00 2001 From: Brad King Date: Tue, 16 Jun 2015 13:38:55 -0400 Subject: Add missing member initializer in Edge constructor In commit v1.1.0^2~15^2~20 (stub out an api and de-constify Pool, 2012-10-03) the Edge "pool_" member was added as a raw pointer but the initializer was accidentally left out of the constructor. Add it now. --- src/graph.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/graph.h b/src/graph.h index 9526712..5f8d41a 100644 --- a/src/graph.h +++ b/src/graph.h @@ -122,7 +122,8 @@ private: /// An edge in the dependency graph; links between Nodes using Rules. struct Edge { - Edge() : rule_(NULL), env_(NULL), outputs_ready_(false), deps_missing_(false), + Edge() : rule_(NULL), pool_(NULL), env_(NULL), + outputs_ready_(false), deps_missing_(false), implicit_deps_(0), order_only_deps_(0) {} /// Return true if all inputs' in-edges are ready. -- cgit v0.12 From ade82bd0b04a3282a538d99b3deaaf790e5bc42d Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 18 Jun 2015 17:22:48 -0700 Subject: Document the change from #921 in the manual. --- doc/manual.asciidoc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc index 217ff28..003c71e 100644 --- a/doc/manual.asciidoc +++ b/doc/manual.asciidoc @@ -920,9 +920,12 @@ Evaluation and scoping Top-level variable declarations are scoped to the file they occur in. +Rule declarations are also scoped to the file they occur in. +_(Available since Ninja 1.6)_ + The `subninja` keyword, used to include another `.ninja` file, introduces a new scope. The included `subninja` file may use the -variables from the parent file, and shadow their values for the file's +variables and rules from the parent file, and shadow their values for the file's scope, but it won't affect values of the variables in the parent. To include another `.ninja` file in the current scope, much like a C -- cgit v0.12 From 4256ff44d9ec0db9e68e9b56d91f681869417388 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 22 Jun 2015 13:35:37 -0700 Subject: Revert "Bootstrap out of source" --- configure.py | 20 +++++++------------- src/browse.cc | 2 +- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/configure.py b/configure.py index 27516b4..2eacbfe 100755 --- a/configure.py +++ b/configure.py @@ -28,8 +28,7 @@ import string import subprocess import sys -sourcedir = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(sourcedir, 'misc')) +sys.path.insert(0, 'misc') import ninja_syntax @@ -252,11 +251,11 @@ if platform.is_msvc(): objext = '.obj' def src(filename): - return os.path.join('$sourcedir', 'src', filename) + return os.path.join('src', filename) def built(filename): return os.path.join('$builddir', filename) def doc(filename): - return os.path.join('$sourcedir', 'doc', filename) + return os.path.join('doc', filename) def cc(name, **kwargs): return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs) def cxx(name, **kwargs): @@ -268,7 +267,6 @@ def binary(name): return exe return name -n.variable('sourcedir', sourcedir) n.variable('builddir', 'build') n.variable('cxx', CXX) if platform.is_msvc(): @@ -355,9 +353,6 @@ if platform.supports_ppoll() and not options.force_pselect: if platform.supports_ninja_browse(): cflags.append('-DNINJA_HAVE_BROWSE') -# Search for generated headers relative to build dir. -cflags.append('-I.') - def shell_escape(str): """Escape str such that it's interpreted as a single argument by the shell.""" @@ -420,10 +415,10 @@ objs = [] if platform.supports_ninja_browse(): n.comment('browse_py.h is used to inline browse.py.') n.rule('inline', - command=src('inline.sh') + ' $varname < $in > $out', + command='src/inline.sh $varname < $in > $out', description='INLINE $out') n.build(built('browse_py.h'), 'inline', src('browse.py'), - implicit=src('inline.sh'), + implicit='src/inline.sh', variables=[('varname', 'kBrowsePy')]) n.newline() @@ -596,12 +591,11 @@ n.newline() if not host.is_mingw(): n.comment('Regenerate build files if build script changes.') n.rule('configure', - command='${configure_env}%s $sourcedir/configure.py $configure_args' % + command='${configure_env}%s configure.py $configure_args' % options.with_python, generator=True) n.build('build.ninja', 'configure', - implicit=['$sourcedir/configure.py', - os.path.normpath('$sourcedir/misc/ninja_syntax.py')]) + implicit=['configure.py', os.path.normpath('misc/ninja_syntax.py')]) n.newline() n.default(ninja) diff --git a/src/browse.cc b/src/browse.cc index 8673919..83bfe43 100644 --- a/src/browse.cc +++ b/src/browse.cc @@ -18,7 +18,7 @@ #include #include -#include "build/browse_py.h" +#include "../build/browse_py.h" void RunBrowsePython(State* state, const char* ninja_command, const char* initial_target) { -- cgit v0.12 From d18eda4c3ed7d81c3f8d6d46972a0988f1ebb05e Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Mon, 29 Jun 2015 10:16:35 -0700 Subject: mark this 1.6.0.git --- src/version.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.cc b/src/version.cc index f2d6711..4c2aea1 100644 --- a/src/version.cc +++ b/src/version.cc @@ -18,7 +18,7 @@ #include "util.h" -const char* kNinjaVersion = "1.5.3.git"; +const char* kNinjaVersion = "1.6.0.git"; void ParseVersion(const string& version, int* major, int* minor) { size_t end = version.find('.'); -- cgit v0.12