def _run_local(job_df: tfs.TfsDataFrame, num_processes: int) -> None: LOG.info( f"Running {len(job_df.index)} jobs locally in {num_processes:d} processes." ) pool = multiprocessing.Pool(processes=num_processes) res = pool.map(_execute_shell, job_df.iterrows()) if any(res): LOG.error("At least one job has failed.") raise RuntimeError("At least one job has failed. Check output logs!")
def _drop_already_ran_jobs(job_df: tfs.TfsDataFrame, drop_jobs: bool, output_dir: str, check_files: str): LOG.debug("Dropping already finished jobs, if necessary.") finished_jobs = [] if drop_jobs: finished_jobs = [ idx for idx, row in job_df.iterrows() if _job_was_successful(row, output_dir, check_files) ] LOG.info(f"{len(finished_jobs):d} of {len(job_df.index):d}" " Jobs have already finished and will be skipped.") job_df = job_df.drop(index=finished_jobs) return job_df, finished_jobs