approximate_cost += estimateUsage(indices, groupSize, cores, total_hours) # figure out how many indices to expect size = exp.numPermutations() * runs # log how many are missing print(path, f'{len(indices)} / {size}') return out, approximate_cost # ---------------- # Scheduling logic # ---------------- slurm = Slurm.fromFile(slurm_path) # compute how many "tasks" to clump into each job groupSize = slurm.cores * slurm.sequential # compute how much time the jobs are going to take hours, minutes, seconds = slurm.time.split(':') total_hours = int(hours) + (int(minutes) / 60) + (int(seconds) / 3600) # gather missing and sum up cost missing, cost = gatherMissing(experiment_paths, runs, groupSize, slurm.cores, total_hours) print( f"Expected to use {cost[0]:.2f} core years, which is {cost[1]:.4f}% of our annual allocation" )
for i, _ in enumerate(it): print(f'{i + 1}/{size}', end='\r') if i - 1 == size: print() yield _ # ---------------- # Scheduling logic # ---------------- for path in experiment_paths: print(path) # load the experiment json file exp = Experiment.load(path) # load the slurm config file slurm = Slurm.fromFile(slurm_path) if exp.agent in SLOW_ALGS: slurm.sequential = 1 # figure out how many indices to use size = exp.numPermutations() * runs paths = listResultsPaths(exp, runs) res_path = first(paths) data = [] data_path = f'{res_path}/returns.csv' if os.path.exists(data_path): f = open(data_path, 'r') data = f.readlines()
cwd = os.getcwd() def getJobScript(parallel): return f"""#!/bin/bash cd {cwd} {parallel} """ for path in experiment_paths: print(path) exp = Experiment.load(path) slurm = Slurm.fromFile(slurm_path) size = exp.numPermutations() * runs paths = listResultsPaths(exp, runs) paths = printProgress(size, paths) indices = generateMissing(paths) groupSize = slurm.tasks * slurm.tasksPerNode for g in group(indices, groupSize): l = list(g) print("scheduling:", path, l) parallel = Slurm.buildParallel( executable, l, { 'ntasks': slurm.tasks,