def retro_analyze_experiment(predir): '''Retro-analyze all experiment level datas.''' logger.info('Retro-analyzing experiment from file') from slm_lab.experiment.control import Experiment # mock experiment spec, info_space = mock_spec_info_space(predir) experiment = Experiment(spec, info_space) experiment.trial_data_dict = trial_data_dict_from_file(predir) return analyze_experiment(experiment)
def retro_analyze_experiment(predir): '''Retro-analyze all experiment level datas.''' logger.info('Retro-analyzing experiment from file') from slm_lab.experiment.control import Experiment # mock experiment spec, info_space = mock_info_space_spec(predir) experiment = Experiment(spec, info_space) trial_data_dict = trial_data_dict_from_file(predir) experiment.trial_data_dict = trial_data_dict return analyze_experiment(experiment)
def retro_analyze_experiment(predir): '''Retro-analyze all experiment level datas.''' logger.info('Retro-analyzing experiment from file') from slm_lab.experiment.control import Experiment _, _, _, spec_name, _, _ = util.prepath_split(predir) prepath = f'{predir}/{spec_name}' spec, info_space = util.prepath_to_spec_info_space(prepath) experiment = Experiment(spec, info_space) experiment.trial_data_dict = trial_data_dict_from_file(predir) return analyze_experiment(experiment)
def run_by_mode(spec_file, spec_name, lab_mode): logger.info(f'Running lab in mode: {lab_mode}') spec = spec_util.get(spec_file, spec_name) info_space = InfoSpace() os.environ['PREPATH'] = util.get_prepath(spec, info_space) reload(logger) # to set PREPATH properly # expose to runtime, '@' is reserved for 'enjoy@{prepath}' os.environ['lab_mode'] = lab_mode.split('@')[0] if lab_mode == 'search': info_space.tick('experiment') Experiment(spec, info_space).run() elif lab_mode == 'train': info_space.tick('trial') Trial(spec, info_space).run() elif lab_mode.startswith('enjoy'): prepath = lab_mode.split('@')[1] spec, info_space = util.prepath_to_spec_info_space(prepath) Session(spec, info_space).run() elif lab_mode == 'generate_benchmark': benchmarker.generate_specs(spec, const='agent') elif lab_mode == 'benchmark': # TODO allow changing const to env run_benchmark(spec, const='agent') elif lab_mode == 'dev': spec = util.override_dev_spec(spec) info_space.tick('trial') Trial(spec, info_space).run() else: logger.warn( 'lab_mode not recognized; must be one of `search, train, enjoy, benchmark, dev`.' )
def run_by_mode(spec_file, spec_name, run_mode): spec = spec_util.get(spec_file, spec_name) # TODO remove when analysis can save all plotly plots os.environ['run_mode'] = run_mode if run_mode == 'search': Experiment(spec).run() elif run_mode == 'train': Trial(spec).run() elif run_mode == 'enjoy': # TODO turn on save/load model mode # Session(spec).run() pass elif run_mode == 'generate_benchmark': benchmarker.generate_specs(spec, const='agent') elif run_mode == 'benchmark': # TODO allow changing const to env run_benchmark(spec, const='agent') elif run_mode == 'dev': os.environ['PY_ENV'] = 'test' # to not save in viz spec = util.override_dev_spec(spec) Trial(spec).run() else: logger.warn( 'run_mode not recognized; must be one of `search, train, enjoy, benchmark, dev`.' )
def run_benchmark(spec, const): benchmark_specs = benchmarker.generate_specs(spec, const) logger.info('Running benchmark') for spec_name, benchmark_spec in benchmark_specs.items(): # run only if not already exist; benchmark mode only if not any(spec_name in filename for filename in os.listdir('data')): Experiment(benchmark_spec).run()
def test_experiment(): spec = spec_util.get('demo.json', 'dqn_cartpole') spec_util.save(spec, unit='experiment') spec = spec_util.override_spec(spec, 'test') spec_util.tick(spec, 'experiment') experiment_df = Experiment(spec).run() assert isinstance(experiment_df, pd.DataFrame)
def run_by_mode(spec_file, spec_name, lab_mode): logger.info(f'Running lab in mode: {lab_mode}') spec = spec_util.get(spec_file, spec_name) info_space = InfoSpace() analysis.save_spec(spec, info_space, unit='experiment') # '@' is reserved for 'enjoy@{prepath}' os.environ['lab_mode'] = lab_mode.split('@')[0] os.environ['PREPATH'] = util.get_prepath(spec, info_space) reload(logger) # to set PREPATH properly if lab_mode == 'search': info_space.tick('experiment') Experiment(spec, info_space).run() elif lab_mode.startswith('train'): if '@' in lab_mode: prepath = lab_mode.split('@')[1] spec, info_space = util.prepath_to_spec_info_space(prepath) else: info_space.tick('trial') Trial(spec, info_space).run() elif lab_mode.startswith('enjoy'): prepath = lab_mode.split('@')[1] spec, info_space = util.prepath_to_spec_info_space(prepath) Session(spec, info_space).run() elif lab_mode.startswith('enjoy'): prepath = lab_mode.split('@')[1] spec, info_space = util.prepath_to_spec_info_space(prepath) Session(spec, info_space).run() elif lab_mode == 'dev': spec = util.override_dev_spec(spec) info_space.tick('trial') Trial(spec, info_space).run() else: logger.warn('lab_mode not recognized; must be one of `search, train, enjoy, benchmark, dev`.')
def test_experiment(test_info_space): spec = spec_util.get('demo.json', 'dqn_cartpole') analysis.save_spec(spec, test_info_space, unit='experiment') spec = spec_util.override_test_spec(spec) test_info_space.tick('experiment') experiment_data = Experiment(spec, test_info_space).run() assert isinstance(experiment_data, pd.DataFrame)
def run_spec(spec, lab_mode): '''Run a spec in lab_mode''' os.environ['lab_mode'] = lab_mode # set lab_mode spec = spec_util.override_spec(spec, lab_mode) # conditionally override spec if lab_mode in TRAIN_MODES: spec_util.save(spec) # first save the new spec if lab_mode == 'search': spec_util.tick(spec, 'experiment') Experiment(spec).run() else: spec_util.tick(spec, 'trial') Trial(spec).run() elif lab_mode in EVAL_MODES: Session(spec).run() else: raise ValueError(f'Unrecognizable lab_mode not of {TRAIN_MODES} or {EVAL_MODES}')
def run_new_mode(spec_file, spec_name, lab_mode): '''Run to generate new data with `search, train, dev`''' spec = spec_util.get(spec_file, spec_name) info_space = InfoSpace() analysis.save_spec(spec, info_space, unit='experiment') # first save the new spec if lab_mode == 'search': info_space.tick('experiment') Experiment(spec, info_space).run() elif lab_mode.startswith('train'): info_space.tick('trial') Trial(spec, info_space).run() elif lab_mode == 'dev': spec = spec_util.override_dev_spec(spec) info_space.tick('trial') Trial(spec, info_space).run() else: raise ValueError(f'Unrecognizable lab_mode not of {TRAIN_MODES}')
def run_by_mode(spec_file, spec_name, run_mode): spec = spec_util.get(spec_file, spec_name) if run_mode == 'search': Experiment(spec).run() elif run_mode == 'train': Trial(spec).run() elif run_mode == 'enjoy': # TODO turn on save/load model mode # Session(spec).run() pass elif run_mode == 'benchmark': # TODO need to spread benchmark over spec on Experiment pass elif run_mode == 'dev': os.environ['PY_ENV'] = 'test' # to not save in viz logger.set_level('DEBUG') spec = util.override_dev_spec(spec) Trial(spec).run() else: logger.warn( 'run_mode not recognized; must be one of `search, train, enjoy, benchmark, dev`.' )
def test_experiment(): spec = spec_util.get('demo.json', 'dqn_cartpole') spec = util.override_test_spec(spec) experiment_data = Experiment(spec).run() assert isinstance(experiment_data, pd.DataFrame)
def test_experiment(test_spec): experiment = Experiment(test_spec) experiment_data = experiment.run() assert isinstance(experiment_data, pd.DataFrame)
def test_experiment(test_spec): experiment = Experiment(test_spec) experiment_data = experiment.run() # TODO experiment data checker method assert isinstance(experiment_data, pd.DataFrame)