def main(): start = time.time() foundruns = runs.find_runs() cleaned = 0 for run in foundruns: if run.synced is not None: logger.info("Cleaning {}".format(run.path)) shutil.rmtree(run.path) cleaned += 1 logger.info("Cleaned {} runs in {} seconds".format(cleaned, time.time() - start))
def main(): # Parse args. parser = base_parser( description='Removes stale run data that have already synced.') parser.parse_args() start = time.time() foundruns = runs.find_runs() cleaned = 0 for run in foundruns: if run.synced is not None: logger.info("Cleaning {}".format(run.path)) shutil.rmtree(run.path) cleaned += 1 logger.info("Cleaned {} runs in {:.3f} seconds".format( cleaned, time.time() - start))
def main(): start = time.time() args = parser.parse_args() foundruns = runs.find_runs() foundruns = sorted(foundruns, key=sort_key) with ProcessPoolExecutor(max_workers=args.concurrency) as executor: future_to_sync = set() for _, group in groupby(foundruns, groupby_key): paths = [r.path for r in group] future_to_sync.add(executor.submit(sync, paths)) for future in as_completed(future_to_sync): try: future.result() except Exception: logger.exception( 'An exception occurred while processing group.') logger.info("Finished in {} seconds".format(time.time() - start))