def load_graph(self, root_node, check_updates, update, remotes, profile_host, graph_lock=None): check_updates = check_updates or update initial = graph_lock.initial_counter if graph_lock else None dep_graph = DepsGraph(initial_node_id=initial) # compute the conanfile entry point for this dependency graph name = root_node.name root_node.public_closure = OrderedDict([(name, root_node)]) root_node.transitive_closure = OrderedDict([(name, root_node)]) root_node.public_deps = {name: root_node} root_node.ancestors = set() dep_graph.add_node(root_node) # enter recursive computation t1 = time.time() self._expand_node(root_node, dep_graph, Requirements(), None, None, check_updates, update, remotes, profile_host, graph_lock) logger.debug("GRAPH: Time to load deps %s" % (time.time() - t1)) return dep_graph
def load_graph(self, root_node, check_updates, update, remotes, profile_host, profile_build, graph_lock=None): check_updates = check_updates or update initial = graph_lock.initial_counter if graph_lock else None dep_graph = DepsGraph(initial_node_id=initial) # compute the conanfile entry point for this dependency graph root_node.public_closure.add(root_node) root_node.public_deps.add(root_node) root_node.transitive_closure[root_node.name] = root_node if profile_build: root_node.conanfile.settings_build = profile_build.processed_settings.copy( ) root_node.conanfile.settings_target = None dep_graph.add_node(root_node) # enter recursive computation t1 = time.time() self._expand_node(root_node, dep_graph, Requirements(), None, None, check_updates, update, remotes, profile_host, profile_build, graph_lock) logger.debug("GRAPH: Time to load deps %s" % (time.time() - t1)) return dep_graph
def load_graph(self, root_node, check_updates, update, remotes, processed_profile): check_updates = check_updates or update dep_graph = DepsGraph() # compute the conanfile entry point for this dependency graph name = root_node.name root_node.public_closure = OrderedDict([(name, root_node)]) root_node.public_deps = {name: root_node} root_node.ancestors = set() dep_graph.add_node(root_node) # enter recursive computation t1 = time.time() self._load_deps(dep_graph, root_node, Requirements(), None, None, check_updates, update, remotes, processed_profile) logger.debug("GRAPH: Time to load deps %s" % (time.time() - t1)) return dep_graph
def load_graph(self, conanfile, check_updates, update): check_updates = check_updates or update dep_graph = DepsGraph() # compute the conanfile entry point for this dependency graph root_node = Node(None, conanfile) dep_graph.add_node(root_node) public_deps = {} # {name: Node} dict with public nodes, so they are not added again aliased = {} # enter recursive computation t1 = time.time() loop_ancestors = [] self._load_deps(root_node, Requirements(), dep_graph, public_deps, None, None, loop_ancestors, aliased, check_updates, update) logger.debug("Deps-builder: Time to load deps %s" % (time.time() - t1)) t1 = time.time() dep_graph.propagate_info() logger.debug("Deps-builder: Propagate info %s" % (time.time() - t1)) return dep_graph
def extend_build_requires(self, graph, node, build_requires_refs, check_updates, update, remotes, processed_profile, graph_lock): # The options that will be defined in the node will be the real options values that have # been already propagated downstream from the dependency graph. This will override any # other possible option in the build_requires dependency graph. This means that in theory # an option conflict while expanding the build_requires is impossible node.conanfile.build_requires_options.clear_unscoped_options() new_options = node.conanfile.build_requires_options._reqs_options new_reqs = Requirements() conanfile = node.conanfile scope = conanfile.display_name requires = [Requirement(ref) for ref in build_requires_refs] if graph_lock: graph_lock.lock_node(node, requires, build_requires=True) self._resolve_ranges(graph, requires, scope, update, remotes) for require in requires: name = require.ref.name require.build_require = True self._handle_require(name, node, require, graph, check_updates, update, remotes, processed_profile, new_reqs, new_options, graph_lock) new_nodes = set(n for n in graph.nodes if n.package_id is None) # This is to make sure that build_requires have precedence over the normal requires ordered_closure = list(node.public_closure.items()) ordered_closure.sort(key=lambda x: x[1] not in new_nodes) node.public_closure = OrderedDict(ordered_closure) subgraph = DepsGraph() subgraph.aliased = graph.aliased subgraph.evaluated = graph.evaluated subgraph.nodes = new_nodes for n in subgraph.nodes: n.build_require = True return subgraph
def load_graph(self, conanfile, check_updates, update, remote_name): check_updates = check_updates or update dep_graph = DepsGraph() # compute the conanfile entry point for this dependency graph root_node = Node(None, conanfile) dep_graph.add_node(root_node) public_deps = {} # {name: Node} dict with public nodes, so they are not added again aliased = {} # enter recursive computation t1 = time.time() loop_ancestors = [] self._load_deps(root_node, Requirements(), dep_graph, public_deps, None, None, loop_ancestors, aliased, check_updates, update, remote_name) logger.debug("Deps-builder: Time to load deps %s" % (time.time() - t1)) t1 = time.time() dep_graph.compute_package_ids() logger.debug("Deps-builder: Propagate info %s" % (time.time() - t1)) return dep_graph