summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid 'Digit' Turner <digit+github@google.com>2024-04-23 17:01:51 (GMT)
committerJan Niklas Hasse <jhasse@bixense.com>2024-05-11 11:40:50 (GMT)
commit436abee2faf087d80ea94dd978c1c725ab337da7 (patch)
treee4fbc5679562a40d1f53eb10e2971fbf5a055e1b
parent5239a6d66fa76a3905b9470c22635c1ccd9d13da (diff)
downloadNinja-436abee2faf087d80ea94dd978c1c725ab337da7.zip
Ninja-436abee2faf087d80ea94dd978c1c725ab337da7.tar.gz
Ninja-436abee2faf087d80ea94dd978c1c725ab337da7.tar.bz2
Simplify ComputeCriticalPath() function.
Using simple unordered sets is enough to perform the same computation with less complexity, smaller and faster code. On a large Fuchsia build plan, this reduces the ComputeCriticalPath metric from 2.6s to 2.1s!
-rw-r--r--src/build.cc40
1 files changed, 11 insertions, 29 deletions
diff --git a/src/build.cc b/src/build.cc
index 7e2ccfa..fb29732 100644
--- a/src/build.cc
+++ b/src/build.cc
@@ -16,11 +16,13 @@
#include <assert.h>
#include <errno.h>
+#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+
#include <climits>
-#include <stdint.h>
#include <functional>
+#include <unordered_set>
#if defined(__SVR4) && defined(__sun)
#include <sys/termios.h>
@@ -451,18 +453,6 @@ void Plan::UnmarkDependents(const Node* node, set<Node*>* dependents) {
namespace {
-template <typename T>
-struct SeenBefore {
- std::set<const T*>* seen_;
-
- SeenBefore(std::set<const T*>* seen) : seen_(seen) {}
-
- bool operator() (const T* item) {
- // Return true if the item has been seen before
- return !seen_->insert(item).second;
- }
-};
-
// Heuristic for edge priority weighting.
// Phony edges are free (0 cost), all other edges are weighted equally.
int64_t EdgeWeightHeuristic(Edge *edge) {
@@ -474,28 +464,22 @@ int64_t EdgeWeightHeuristic(Edge *edge) {
void Plan::ComputeCriticalPath() {
METRIC_RECORD("ComputeCriticalPath");
// Remove duplicate targets
- {
- std::set<const Node*> seen;
- SeenBefore<Node> seen_before(&seen);
- targets_.erase(std::remove_if(targets_.begin(), targets_.end(), seen_before),
- targets_.end());
- }
+ std::unordered_set<const Node*> unique_targets(targets_.begin(),
+ targets_.end());
// Use backflow algorithm to compute the critical path for all
// nodes, starting from the destination nodes.
// XXX: ignores pools
std::queue<Edge*> work_queue; // Queue, for breadth-first traversal
// The set of edges currently in work_queue, to avoid duplicates.
- std::set<const Edge*> active_edges;
- SeenBefore<Edge> seen_edge(&active_edges);
+ std::unordered_set<const Edge*> active_edges;
- for (size_t i = 0; i < targets_.size(); ++i) {
- const Node* target = targets_[i];
+ for (const Node* target : unique_targets) {
if (Edge* in = target->in_edge()) {
int64_t edge_weight = EdgeWeightHeuristic(in);
in->set_critical_path_weight(
std::max<int64_t>(edge_weight, in->critical_path_weight()));
- if (!seen_edge(in)) {
+ if (active_edges.insert(in).second) {
work_queue.push(in);
}
}
@@ -508,10 +492,8 @@ void Plan::ComputeCriticalPath() {
// edge may need to be processed again. So re-allow insertion.
active_edges.erase(e);
- for (std::vector<Node*>::iterator it = e->inputs_.begin(),
- end = e->inputs_.end();
- it != end; ++it) {
- Edge* in = (*it)->in_edge();
+ for (const Node* input : e->inputs_) {
+ Edge* in = input->in_edge();
if (!in) {
continue;
}
@@ -520,7 +502,7 @@ void Plan::ComputeCriticalPath() {
const int64_t proposed_weight = e->critical_path_weight() + edge_weight;
if (proposed_weight > in->critical_path_weight()) {
in->set_critical_path_weight(proposed_weight);
- if (!seen_edge(in)) {
+ if (active_edges.insert(in).second) {
work_queue.push(in);
}
}