def test_Results(self): results = [Result('fake/path', exp, i) for i in listIndices(exp)] r = results[0] self.assertDictEqual(r.params, { 'alpha': 1.0, 'ratio': 1.0, 'model': { 'name': 'PR' } }) r = results[1] self.assertDictEqual(r.params, { 'alpha': 0.5, 'ratio': 1.0, 'model': { 'name': 'PR' } }) self.assertEqual(r.idx, 1) # can overload load function class TestResult(Result): def _load(self): # (mean, std, runs) return (1, 2, 3) results = [TestResult('fake/path', exp, i) for i in listIndices(exp)] r = results[0] self.assertEqual(r.mean(), 1)
def test_splitOverParameter(self): results = (Result('fake/path', exp, i) for i in listIndices(exp)) split_results = splitOverParameter(results, 'alpha') self.assertEqual(list(split_results), [1.0, 0.5, 0.25]) # check keys self.assertEqual(len(split_results[1.0]), 8) for key in split_results: sub_results = split_results[key] for res in sub_results: self.assertEqual(res.params['alpha'], key) results = (Result('fake/path', exp, i) for i in listIndices(exp)) split_results = splitOverParameter(results, 'model.name') self.assertEqual(list(split_results), ['PR', 'ESARSA']) # check keys self.assertEqual(len(split_results['PR']), 12)
def test_getBest(self): # lowest load_counter = 0 class TestResult(Result): def _load(self): nonlocal load_counter load_counter += 1 return (np.ones(100) * load_counter, np.ones(100), 3) results = (TestResult('fake/path', exp, i) for i in listIndices(exp)) best = getBest(results) self.assertEqual(best.mean()[0], 1) # highest results = (TestResult('fake/path', exp, i) for i in listIndices(exp)) best = getBest(results, comparator=lambda a, b: a > b) self.assertEqual(best.mean()[0], load_counter)
def test_listIndices(self): exp = ExperimentDescription({ 'metaParameters': { 'alpha': [0.01, 0.02, 0.04, 0.08, 0.16], 'lambda': [1.0, 0.99, 0.98, 0.96, 0.92], } }) expected = list(range(25)) got = list(listIndices(exp)) self.assertListEqual(got, expected)
def test_whereParametersEqual(self): results = (Result('fake/path', exp, i) for i in listIndices(exp)) results = whereParametersEqual( results, { 'alpha': 1.0, 'epsilon': 2, # if a parameter in the filter list does not exist, ignore it 'model': { 'name': 'ESARSA', }, }) results = list(results) self.assertEqual(len(results), 4) got = [r.params['ratio'] for r in results] expected = [1.0, 2.0, 4.0, 8.0] self.assertListEqual(got, expected)
) exit(0) runs = sys.argv[1] args = Args.ArgsModel({ 'experiment_paths': sys.argv[3:], 'base_path': sys.argv[2], 'runs': 1, 'executable': "python src/runs.py", }) for path in args.experiment_paths: exp = loadExperiment(path) # get all of the indices corresponding to missing results indices = listIndices( exp, args.runs) if args.retry else listMissingResults(exp, args.runs) # build the parallel command parallel_cmd = parallel.buildParallel({ 'executable': args.executable + ' ' + runs + ' ' + path, 'tasks': indices, 'cores': multiprocessing.cpu_count(), }) try: parallel_cmd = parallel_cmd.insist() except: continue
slurm.sequential = 1 # figure out how many indices to use size = exp.numPermutations() * runs paths = listResultsPaths(exp, runs) res_path = first(paths) data = [] data_path = f'{res_path}/returns.csv' if os.path.exists(data_path): f = open(data_path, 'r') data = f.readlines() f.close() indices = listIndices(exp, runs) # get all of the indices corresponding to missing results indices = generateMissing(exp, indices, data) indices = printProgress(size, indices) # compute how many "tasks" to clump into each job groupSize = slurm.cores * slurm.sequential for g in group(indices, groupSize): l = list(g) print("scheduling:", path, l) # build the executable string runner = f'python {executable} {path} ' # generate the gnu-parallel command for dispatching to many CPUs across server nodes parallel = Slurm.buildParallel(
exp = Experiment.load(path) paths = listResultsPaths(exp, args.runs) res_path = first(paths) data = [] raise NotImplementedError( 'Make sure to change the expected result file!!') data_path = f'{res_path}/TODO-CHANGE-ME.csv' if os.path.exists(data_path): f = open(data_path, 'r') data = f.readlines() f.close() indices = listIndices(exp, args.runs) # get all of the indices corresponding to missing results indices = generateMissing(exp, indices, data) indices = count(path, indices) for idx in indices: exe = f'{args.executable} {path} {idx}' cmds.append(exe) print(len(cmds)) res = pool.imap_unordered( partial(subprocess.run, shell=True, stdout=subprocess.PIPE), cmds) for i, _ in enumerate(res): sys.stderr.write(f'\r{i+1}/{len(cmds)}') sys.stderr.write('\n')
pool = Pool() runs = sys.argv[2] args = Args.ArgsModel({ 'experiment_paths': sys.argv[4:], 'base_path': sys.argv[3], 'runs': 1, 'executable': "python " + sys.argv[1], }) cmds = [] for path in args.experiment_paths: exp = Experiment.load(path) paths = listResultsPaths(exp, args.runs) # get all of the indices corresponding to missing results indices = listIndices( exp, args.runs) if args.retry else generateMissing(paths) for idx in indices: exe = f'{args.executable} {runs} {path} {idx}' cmds.append(exe) print(len(cmds)) res = pool.imap_unordered( partial(subprocess.run, shell=True, stdout=subprocess.PIPE), cmds) for i, _ in enumerate(res): sys.stderr.write(f'\r{i}/{len(cmds)}') sys.stderr.write('\n')
cmds = [] for path in args.experiment_paths: exp = Experiment.load(path) paths = listResultsPaths(exp, args.runs) res_path = first(paths) data = [] data_path = f'{res_path}/mspbe.csv' if os.path.exists(data_path): f = open(data_path, 'r') data = f.readlines() f.close() indices = listIndices(exp, 1) # get all of the indices corresponding to missing results indices = generateMissing(exp, indices, data) indices = count(path, indices) for idx in indices: exe = f'{args.executable} {args.runs} {path} {idx}' cmds.append(exe) print(len(cmds)) res = pool.imap_unordered( partial(subprocess.run, shell=True, stdout=subprocess.PIPE), cmds) for i, _ in enumerate(res): sys.stderr.write(f'\r{i+1}/{len(cmds)}') sys.stderr.write('\n')