def setUp(self): self.tmpdir = tempfile.mkdtemp() self.job = Job(self.tmpdir, 'test', SCRIPT) self.cfg = os.path.join(self.tmpdir, 'test.cfg') self.out = os.path.join(self.tmpdir, 'test.out') self.err = os.path.join(self.tmpdir, 'test.err') self.results = os.path.join(self.tmpdir, 'test.result')
def setUp(self): self.tmpdir = tempfile.mkdtemp() self.jobs = [ Job(self.tmpdir, 'test' + str(i), SCRIPT) for i in range(10) ] self.pool = MultiprocessPool(4, refresh_period=.005) self.pool.extend(self.jobs) for j in self.jobs: with open(j.config, 'w+') as f: f.write(CONFIG)
def test_pool_status_is_failed(self): self.pool.append(Job(self.tmpdir, 'loser', '/bin/false')) self.pool.run() self.pool.wait() self.assertEqual(self.pool.status, status.FAILED)
def test_fails(self): self.init_config() self.job = Job(self.tmpdir, 'test', '/bin/false') self.job.run() self.assertEqual(self.job.status, status.FAILED)
class TestJob(TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() self.job = Job(self.tmpdir, 'test', SCRIPT) self.cfg = os.path.join(self.tmpdir, 'test.cfg') self.out = os.path.join(self.tmpdir, 'test.out') self.err = os.path.join(self.tmpdir, 'test.err') self.results = os.path.join(self.tmpdir, 'test.result') def tearDown(self): shutil.rmtree(self.tmpdir) def init_config(self): with open(self.cfg, 'w+') as f: f.write(CONFIG) def test_files(self): self.assertEqual(self.job.config, self.cfg) self.assertEqual(self.job.out, self.out) self.assertEqual(self.job.err, self.err) def test_check_config(self): self.assertFalse(self.job.check_config()) self.init_config() self.assertTrue(self.job.check_config()) def test_run_writes_files(self): self.init_config() self.job.run() self.assertTrue(os.path.exists(self.out)) self.assertTrue(os.path.exists(self.err)) def test_run_out(self): self.init_config() self.job.run() self.assertTrue(os.path.exists(self.out)) with open(self.out, 'r+') as f: self.assertEqual(f.read(), RUNNER_OUT) def test_run_err(self): self.init_config() self.job.run() with open(self.err, 'r+') as f: self.assertEqual(f.read(), RUNNER_ERR) def test_run_writes_results(self): self.init_config() self.job.run() self.assertTrue(os.path.exists(self.results)) def test_run_results(self): self.init_config() self.job.run() with open(self.results, 'r+') as f: self.assertEqual(f.read(), RUNNER_RESULT) def test_run_status(self): self.init_config() self.job.run() self.assertEqual(self.job.status, status.DONE) def test_fails(self): self.init_config() self.job = Job(self.tmpdir, 'test', '/bin/false') self.job.run() self.assertEqual(self.job.status, status.FAILED)
descriptor_sets = ([[f] for f in image_features] + [['SURF', 'color'], ['SURF_pairs', 'color_pairs'], ['SURF_pairs', 'color_triplets'], image_features] ) exp_images = [("image_sound_feats_{}_{}".format('_'.join(descriptors), i), TwoModalitiesExperiment( {'image': ObjectsLoader(descriptors), 'sound': AcornsLoader(1)}, 50, 50, 50, **DEFAULT_PARAMS) ) for descriptors in descriptor_sets for i in range(N_RUN)] exps = exps_2 + exps_3 + exp_images jobs = [Job(WORKDIR, n, SCRIPT2) for n, e in exps_2] jobs += [Job(WORKDIR, n, SCRIPT3) for n, e in exps_3] jobs += [Job(WORKDIR, n, SCRIPT2) for n, e in exp_images] if LAUNCHER == 'process': pool = MultiprocessPool() elif LAUNCHER == 'torque': pool = TorquePool(default_walltime=240) pool.extend(jobs) MOD_PAIRS = [('motion', 'sound'), ('image', 'motion'), ('image', 'sound')] EXPS_BY_NAME = dict(exps) JOBS_BY_NAME = dict({j.name: j for j in jobs})
def test_job_inherits_walltime(self): pool = TorquePool(default_walltime=3.) pool.append(Job('/dev/null', 'test1', SCRIPT)) self.assertEqual(pool.jobs[0].walltime, 3.)
def setUp(self): self.jobs = [ Job('/dev/null', 'test' + str(i), SCRIPT) for i in range(10) ] self.pool = TorquePool() self.pool.extend(self.jobs)
DEFAULT_PARAMS = { 'debug': False, 'shuffle_labels': True, 'run_mode': 'single', } exps = [("image_motion_sound_{}_{}".format(K, i), ThreeModalitiesExperiment( { 'image': ObjectsLoader(['SURF', 'color']), 'motion': Choreo2Loader(), 'sound': AcornsLoader(1) }, K, 50, 50, **DEFAULT_PARAMS)) for i in range(N_RUN)] jobs = [Job(WORKDIR, n, SCRIPT3) for n, e in exps] MOD_PAIRS = [('motion', 'sound'), ('image', 'motion'), ('image', 'sound')] EXPS_BY_NAME = dict(exps) JOBS_BY_NAME = dict({j.name: j for j in jobs}) def internal_histograms_by_label(internal_values, by_labels_idx): """Compute a histogram of coefficient values for the group of samples corresponding to each label. """ value_range = (internal_values.min(), internal_values.max()) return [ np.histogram(internal_values[idx], bins=10, range=value_range)[0] for idx in by_labels_idx ]
horizon_length_transitions = [3, 5, 7, 10, 15, 20, 25, 30, 35, 40, 60, 80, 100] horizon_length_htm = list(range(1, 10)) horizon_types = (['transitions'] * len(horizon_length_transitions) + ['htm'] * len(horizon_length_htm)) horizon_lengths = horizon_length_transitions + horizon_length_htm exps = [('{}-{}-{}'.format(t, l, 's' if s else 'ns'), {'horizon-type': t, 'horizon-length': l, 'intermediate-rewards': s}) for t, l in zip(horizon_types, horizon_lengths) for s in (True, False)] # intermediate rewards (for subtasks) n_iterations = [1, 2, 5] + list(range(10, 101, 5)) n_particles = [100, 150] exps.extend([('iterations-{}-{}'.format(i, p), {'iterations': i, 'n_particles': p}) for i in n_iterations for p in n_particles]) jobs = {name: Job(args.path, name, SCRIPT) for name, exp in exps} exps = dict(exps) if args.action in ('prepare', 'run', 'status'): if args.launcher == 'process': pool = MultiprocessPool() elif args.launcher == 'torque': pool = TorquePool(default_walltime=720) else: pool = Pool() pool.extend(jobs.values()) # Helpers for the printing progress