def test_first(self): # lists arr = [1, 2, 3, 4] got = first(arr) expected = 1 self.assertEqual(got, expected) # iterators arr = ['a', 'b', 'c'] it = arr.__iter__() got = first(it) expected = 'a' self.assertEqual(got, expected)
def getBest(results): best = first(results) for r in results: a = r.load()[0] b = best.load()[0] am = np.mean(a) bm = np.mean(b) if am > bm: best = r return best
def getBest(results: ResultList, reducer: Callable = np.mean): best = first(results) for r in results: a = r.mean() b = best.mean() am = reducer(a) bm = reducer(b) if am > bm: best = r return best
def getBest(results: ResultList, steps: Optional[int] = None, percent: float = 1.0, comparator=lambda a, b: a < b): low = first(results) if steps is None: steps = low.mean().shape[0] steps = int(steps * percent) for r in results: a = r.mean() b = low.mean() am = np.mean(a[0 - steps:]) bm = np.mean(b[0 - steps:]) if np.isnan(bm) or comparator(am, bm): low = r return low
# ---------------- for path in experiment_paths: print(path) # load the experiment json file exp = Experiment.load(path) # load the slurm config file slurm = Slurm.fromFile(slurm_path) if exp.agent in SLOW_ALGS: slurm.sequential = 1 # figure out how many indices to use size = exp.numPermutations() * runs paths = listResultsPaths(exp, runs) res_path = first(paths) data = [] data_path = f'{res_path}/returns.csv' if os.path.exists(data_path): f = open(data_path, 'r') data = f.readlines() f.close() indices = listIndices(exp, runs) # get all of the indices corresponding to missing results indices = generateMissing(exp, indices, data) indices = printProgress(size, indices) # compute how many "tasks" to clump into each job groupSize = slurm.cores * slurm.sequential