def run_build(build_state: BuildState, targets: List[str], num_threads: int, quiet: bool): build_state.status_monitor = create_status_monitor(num_threads, quiet) for target in targets: root_rule = build_state.target_rule_lookup.try_lookup( target) or build_state.target_rule_lookup.lookup( build_state, ":" + target) build_state.scheduled_but_not_ready.add(root_rule) build_state.work_queue.put(root_rule) build_state.status_monitor.move(total=1) thread_instances = [] for i in range(num_threads): thread = Thread(target=worker, args=(build_state, i), daemon=True) thread_instances.append(thread) thread.start() build_state.work_queue.join() if build_state.failure is not None: raise build_state.failure for _ in range(num_threads): build_state.work_queue.put(None) for thread in thread_instances: thread.join() build_state.status_monitor.stop() if build_state.scheduled_but_not_ready: # there is a dependency cycle somewhere! for root_rule in targets: if root_rule in build_state.scheduled_but_not_ready: break else: raise BuildException("An internal error occurred.") chain = [] pos = root_rule while True: if pos in chain: chain.append(pos) raise BuildException( f"Circular dependency detected: Rule {pos} depends on itself " f"through the path: {' -> '.join(map(str, chain))}") else: chain.append(pos) pos = next(iter(pos.pending_rule_dependencies))
def initialize_workspace( setup_rule_lookup: TargetLookup, setup_target: str, state_directory: str, quiet: bool, ): # we don't need the indirect lookup as we only have rule and source deps direct_lookup: Dict[str, Rule] = setup_rule_lookup.direct_lookup if setup_target not in direct_lookup: raise BuildException( f"Unknown or unspecified setup target {setup_target}") rebuilt: Set[str] = set() ready: Set[str] = set() work_queue = [direct_lookup[setup_target]] cache_fetcher, _ = make_cache_fetcher(state_directory) cache_memorize, _ = make_cache_memorize(state_directory) status_monitor = create_status_monitor(1, quiet) status_monitor.move(total=1) while work_queue: todo = work_queue.pop() log(f"Popping setup rule {todo} off work queue") hashstate = HashState() ctx = WorkspaceExecutionContext(hashstate) unchecked_rules = [] for dep in todo.deps: hashstate.record(dep) if dep.startswith(":"): if dep not in direct_lookup: raise BuildException(f"Unable to find setup rule {dep}") dep_rule = direct_lookup[dep] if dep_rule not in ready: unchecked_rules.append(dep_rule) continue ctx.deps[dep] = dep_rule.provided_value setattr(ctx.deps, dep[1:], dep_rule.provided_value) else: try: hashstate.update(hash_file(dep)) except FileNotFoundError: raise BuildException(f"Source file {dep} not found.") if unchecked_rules: for dep in unchecked_rules: if dep not in work_queue: log(f"Setup rule {todo} is enqueuing {dep}") status_monitor.move(total=1) work_queue.append(dep) else: log(f"Setup rule {todo} is waiting on {dep}, which is already enqueued" ) dep.runtime_dependents.add(todo) todo.pending_rule_dependencies.add(dep) else: # our dependent rules are ready, now we need to see if we need to rerun todo.provided_value = todo.impl(ctx) if todo.name is None: raise BuildException( f"All setup rules must have names, but {todo} does not.") try: ok = cache_fetcher("workspace", todo.name) == hashstate.state() if not ok: log(f"State mismatch for rule {todo}, need to rerun") except CacheMiss: log(f"State not found for rule {todo}, need to run for first time" ) ok = False for dep in todo.deps: if dep.startswith(":"): if direct_lookup[dep] in rebuilt: log(f"Dependency {dep} of setup rule {todo} was rebuilt, so we must rebuild {todo} as well" ) ok = True for out in todo.outputs: if not os.path.exists(out): log(f"Output {out} is missing for setup rule {todo}, forcing rerun" ) ok = False break if not ok: # we need to fully run ctx.run_shell_queue() rebuilt.add(todo) cache_memorize("workspace", todo.name, hashstate.state()) # either way, now we can trigger our dependents ready.add(todo) for dep in todo.runtime_dependents: dep.pending_rule_dependencies.remove(todo) if not dep.pending_rule_dependencies: work_queue.append(dep) status_monitor.move(total=1) status_monitor.move(curr=1)
def initialize_workspace( setup_rule_lookup: TargetLookup, setup_targets: List[str], state_directory: str, quiet: bool, ): # we don't need the indirect lookup as we only have rule and source deps direct_lookup: Dict[str, Rule] = setup_rule_lookup.direct_lookup work_queue = [] for setup_target in setup_targets: if setup_target not in direct_lookup: raise BuildException( f"Unknown or unspecified setup target {setup_target}") work_queue.append(direct_lookup[setup_target]) rebuilt: Set[str] = set() ready: Set[str] = set() cache_load_string, _ = make_cache_load(state_directory) cache_store_string, _ = make_cache_store(state_directory) if work_queue: status_monitor = create_status_monitor(1, quiet) status_monitor.move(total=len(work_queue)) def dep_fetcher(dep): if dep.startswith(":"): if dep not in direct_lookup: raise BuildException(f"Unable to find setup rule {dep}") dep_rule = direct_lookup[dep] log(f"Looking up setup rule {dep}") if dep_rule not in ready: raise MissingDependency(dep) return dep_rule while work_queue: todo = work_queue.pop() log(f"Popping setup rule {todo} off work queue") try: if todo.name is None: raise BuildException( f"All setup rules must have names, but {todo} does not.") hashstate = HashState() ctx = WorkspaceExecutionContext(hashstate, dep_fetcher) unchecked_rules = [] try: todo.set_provided_value( todo.impl(ctx), None, ctx.inputs, ctx.deferred_inputs, [], # todo: implement output providers for setup rules ) if ctx.out_of_date_deps: raise MissingDependency(*ctx.out_of_date_deps) except MissingDependency as e: unchecked_rules = [direct_lookup[x] for x in e.paths] if unchecked_rules: for dep in unchecked_rules: if dep not in work_queue: log(f"Setup rule {todo} is enqueuing {dep}") status_monitor.move(total=1) work_queue.append(dep) else: log(f"Setup rule {todo} is waiting on {dep}, which is already enqueued" ) dep.runtime_dependents.add(todo) todo.pending_rule_dependencies.add(dep) else: log(f"Setup rule {todo} ran with inputs {ctx.inputs + ctx.deferred_inputs}" ) for dep in ctx.inputs + ctx.deferred_inputs: if dep.startswith(":"): continue try: hashstate.record(dep) hashstate.update(hash_file(dep)) except FileNotFoundError: raise BuildException(f"Source file {dep} not found.") try: ok = cache_load_string("workspace", todo.name) == hashstate.state() if not ok: log(f"State mismatch for rule {todo}, need to rerun") except CacheMiss: log(f"State not found for rule {todo}, need to run for first time" ) ok = False for dep in ctx.inputs + ctx.deferred_inputs: if dep.startswith(":"): if direct_lookup[dep] in rebuilt: log(f"Dependency {dep} of setup rule {todo} was rebuilt, so we must rebuild {todo} as well" ) ok = False for out in todo.outputs: if not os.path.exists(out): log(f"Output {out} is missing for setup rule {todo}, forcing rerun" ) ok = False break if not ok: # we need to fully run log(f"Fully running setup rule {todo}") ctx.run_shell_queue() rebuilt.add(todo) cache_store_string("workspace", todo.name, hashstate.state()) # either way, now we can trigger our dependents ready.add(todo) for dep in todo.runtime_dependents: dep.pending_rule_dependencies.remove(todo) if not dep.pending_rule_dependencies: work_queue.append(dep) status_monitor.move(total=1) status_monitor.move(curr=1) except Exception as e: if not isinstance(e, BuildException): suffix = f"\n{Style.RESET_ALL}" + traceback.format_exc() else: suffix = "" status_monitor.stop() raise BuildException(f"Error while executing rule {todo}: " + str(e) + suffix)