def run(args: argparse.Namespace) -> None: workspace = get_workspace_with_repos(args) status_collector = StatusCollector(workspace) repos = workspace.repos if not repos: ui.info_2("Workspace is empty") return ui.info_1(f"Collecting statuses of {len(repos)} repo(s)") num_jobs = get_num_jobs(args) process_items(repos, status_collector, num_jobs=num_jobs) erase_last_line() ui.info_2("Workspace status:") statuses = status_collector.statuses max_dest = max(len(x) for x in statuses.keys()) for dest, status in statuses.items(): message = [ui.green, "*", ui.reset, dest.ljust(max_dest)] message += describe_status(status) ui.info(*message)
def run(args: argparse.Namespace) -> None: # Note: # we want to support both: # $ tsrc foreach -c 'shell command' # and # $ tsrc foreach -- some-cmd --some-opts # # Due to argparse limitations, `cmd` will always be a list, # but we need a *string* when using 'shell=True'. # # So transform use the value from `cmd` and `shell` so that: # * action.command is suitable as argument to pass to subprocess.run() # * action.description is suitable for display purposes command: Command = [] if args.shell: if len(args.cmd) != 1: die("foreach -c must be followed by exactly one argument") command = args.cmd[0] description = args.cmd[0] else: if not args.cmd: die("needs a command to run") command = args.cmd description = " ".join(args.cmd) shell = args.shell command = command description = description num_jobs = get_num_jobs(args) workspace = get_workspace_with_repos(args) cmd_runner = CmdRunner(workspace.root_path, command, description, shell=shell) repos = workspace.repos ui.info_1(f"Running `{description}` on {len(repos)} repos") collection = process_items(repos, cmd_runner, num_jobs=num_jobs) errors = collection.errors if errors: ui.error(f"Command failed for {len(errors)} repo(s)") if cmd_runner.parallel: # Print output of failed commands that were hidden for (item, error) in errors.items(): ui.info(item) ui.info("-" * len(item)) ui.info(error) else: # Just print the repos for item in errors: ui.info(ui.green, "*", ui.reset, item) raise ForeachError() else: ui.info("OK", ui.check)
def set_remotes(self, num_jobs: int = 1) -> None: if self.config.singular_remote: return ui.info_2("Configuring remotes") remote_setter = RemoteSetter(self.root_path) collection = process_items(self.repos, remote_setter, num_jobs=num_jobs) collection.print_summary() if collection.errors: ui.error("Failed to set remotes for the following repos:") collection.print_errors() raise RemoteSetterError
def run(args: argparse.Namespace) -> None: workspace = get_workspace_with_repos(args) num_jobs = get_num_jobs(args) from_ref = args.from_ref to_ref = args.to_ref repos = workspace.repos log_collector = LogCollector(workspace.root_path, from_ref=from_ref, to_ref=to_ref) collection = process_items(repos, log_collector, num_jobs=num_jobs) collection.print_summary() if collection.errors: ui.error("Error when collecting logs") collection.print_errors() raise LogCollectorFailed
def sync(self, *, force: bool = False, num_jobs: int = 1) -> None: syncer = Syncer(self.root_path, force=force, remote_name=self.config.singular_remote) repos = self.repos ui.info_2("Synchronizing repos") collection = process_items(repos, syncer, num_jobs=num_jobs) if collection.summary: ui.info_2("Updated repos:") for summary in collection.summary: if summary: ui.info(summary) if collection.errors: ui.error("Failed to synchronize the following repos:") collection.print_errors() raise SyncError
def perform_filesystem_operations(self, manifest: Optional[Manifest] = None ) -> None: repos = self.repos if not manifest: manifest = self.get_manifest() operator = FileSystemOperator(self.root_path, repos) operations = manifest.file_system_operations known_repos = [x.dest for x in repos] operations = [x for x in operations if x.get_repo() in known_repos] if operations: ui.info_2("Performing filesystem operations") # Not sure it's a good idea to have FileSystemOperations running in parallel collection = process_items(operations, operator, num_jobs=1) collection.print_summary() if collection.errors: ui.error( "Failed to perform the following file system operations") collection.print_errors() raise FileSystemOperatorError
def clone_missing(self, *, num_jobs: int = 1) -> None: to_clone = [] for repo in self.repos: repo_path = self.root_path / repo.dest if not is_git_repository(repo_path): to_clone.append(repo) cloner = Cloner( self.root_path, shallow=self.config.shallow_clones, remote_name=self.config.singular_remote, ) ui.info_2("Cloning missing repos") collection = process_items(to_clone, cloner, num_jobs=num_jobs) if collection.summary: ui.info_2("Cloned repos:") for summary in collection.summary: ui.info(ui.green, "*", ui.reset, summary) if collection.errors: ui.error("Failed to clone the following repos") collection.print_errors() raise ClonerError
def test_parallel_sad() -> None: task = FakeTask() actual = process_items(["foo", "bar", "failing", "baz", "quux"], task, num_jobs=2) errors = actual.errors assert errors["failing"].message == "Kaboom"
def test_sequence_sad() -> None: task = FakeTask() actual = process_items(["foo", "failing", "bar"], task) assert actual.errors["failing"].message == "Kaboom"