Пример #1
0
def _retro_analyze_trial(trial_spec_path):
    '''Method to retro analyze a single trial given only a path to its spec'''
    trial_spec = util.read(trial_spec_path)
    meta_spec = trial_spec['meta']
    info_prepath = meta_spec['info_prepath']
    session_metrics_list = [util.read(f'{info_prepath}_s{s}_session_metrics_eval.pkl') for s in range(meta_spec['max_session'])]
    analysis.analyze_trial(trial_spec, session_metrics_list)
Пример #2
0
def _retro_analyze_session(session_spec_path):
    '''Method to retro analyze a single session given only a path to its spec'''
    session_spec = util.read(session_spec_path)
    info_prepath = session_spec['meta']['info_prepath']
    for df_mode in ('eval', 'train'):
        session_df = util.read(f'{info_prepath}_session_df_{df_mode}.csv')
        analysis.analyze_session(session_spec, session_df, df_mode)
Пример #3
0
def retro_analyze_experiment(predir):
    '''Retro analyze an experiment'''
    logger.info('Running retro_analyze_experiment')
    trial_spec_paths = glob(f'{predir}/*_t*_spec.json')
    # remove trial and session spec paths
    experiment_spec_paths = ps.difference(glob(f'{predir}/*_spec.json'), trial_spec_paths)
    experiment_spec_path = experiment_spec_paths[0]
    spec = util.read(experiment_spec_path)
    info_prepath = spec['meta']['info_prepath']
    if not os.path.exists(f'{info_prepath}_trial_data_dict.json'):
        return  # only run analysis if experiment had been ran
    trial_data_dict = util.read(f'{info_prepath}_trial_data_dict.json')
    analysis.analyze_experiment(spec, trial_data_dict)
Пример #4
0
def retro_analyze_trials(predir):
    '''Retro-analyze all trial level datas.'''
    logger.info('Retro-analyzing trials from file')
    from slm_lab.experiment.control import Trial
    for filename in os.listdir(predir):
        if filename.endswith('_trial_data.json'):
            filepath = f'{predir}/{filename}'
            tn = filename.replace('_trial_data.json', '').split('_')[-1]
            trial_index = int(tn[1:])
            # mock trial
            spec, info_space = mock_info_space_spec(predir, trial_index)
            trial = Trial(spec, info_space)
            session_data_dict = session_data_dict_from_file(
                predir, trial_index)
            trial.session_data_dict = session_data_dict
            trial_fitness_df = analyze_trial(trial)
            # write trial_data that was written from ray search
            fitness_vec = trial_fitness_df.iloc[0].to_dict()
            fitness = calc_fitness(trial_fitness_df)
            trial_data = util.read(filepath)
            trial_data.update({
                **fitness_vec,
                'fitness': fitness,
                'trial_index': trial_index,
            })
            util.write(trial_data, filepath)
Пример #5
0
def retro_analyze_trials(predir):
    '''Retro-analyze all trial level datas.'''
    logger.info('Retro-analyzing trials from file')
    from slm_lab.experiment.control import Trial
    filenames = ps.filter_(os.listdir(predir),
                           lambda filename: filename.endswith('_trial_df.csv'))
    for idx, filename in enumerate(filenames):
        filepath = f'{predir}/{filename}'
        prepath = filepath.replace('_trial_df.csv', '')
        spec, info_space = util.prepath_to_spec_info_space(prepath)
        trial_index, _ = util.prepath_to_idxs(prepath)
        trial = Trial(spec, info_space)
        trial.session_data_dict = session_data_dict_from_file(
            predir, trial_index, ps.get(info_space, 'ckpt'))
        # zip only at the last
        zip = (idx == len(filenames) - 1)
        trial_fitness_df = analysis.analyze_trial(trial, zip)

        # write trial_data that was written from ray search
        trial_data_filepath = filepath.replace('_trial_df.csv',
                                               '_trial_data.json')
        if os.path.exists(trial_data_filepath):
            fitness_vec = trial_fitness_df.iloc[0].to_dict()
            fitness = analysis.calc_fitness(trial_fitness_df)
            trial_data = util.read(trial_data_filepath)
            trial_data.update({
                **fitness_vec,
                'fitness': fitness,
                'trial_index': trial_index,
            })
            util.write(trial_data, trial_data_filepath)
Пример #6
0
def get_spec(spec_file, spec_name, lab_mode, pre_):
    '''Get spec using args processed from inputs'''
    if lab_mode in TRAIN_MODES:
        if pre_ is None:  # new train trial
            spec = spec_util.get(spec_file, spec_name)
        else:
            # for resuming with train@{predir}
            # e.g. train@latest (fill find the latest predir)
            # e.g. train@data/reinforce_cartpole_2020_04_13_232521
            predir = pre_
            if predir == 'latest':
                predir = sorted(glob(f'data/{spec_name}*/'))[
                    -1]  # get the latest predir with spec_name
            _, _, _, _, experiment_ts = util.prepath_split(
                predir)  # get experiment_ts to resume train spec
            logger.info(f'Resolved to train@{predir}')
            spec = spec_util.get(spec_file, spec_name, experiment_ts)
    elif lab_mode == 'enjoy' or lab_mode == 'record':
        # for enjoy@{session_spec_file}
        # e.g. enjoy@data/reinforce_cartpole_2020_04_13_232521/reinforce_cartpole_t0_s0_spec.json
        session_spec_file = pre_
        assert session_spec_file is not None, 'enjoy/record mode must specify a `enjoy/record@{session_spec_file}`'
        spec = util.read(f'{session_spec_file}')
    else:
        raise ValueError(
            f'Unrecognizable lab_mode not of {TRAIN_MODES} or {EVAL_MODES}')
    return spec
Пример #7
0
def plot_session_from_file(session_df_filepath):
    '''
    Method to plot session from its session_df file
    @example

    from slm_lab.experiment import analysis
    filepath = 'data/reinforce_cartpole_2018_01_22_211751/reinforce_cartpole_t0_s0_session_df.csv'
    analysis.plot_session_from_file(filepath)
    '''
    from slm_lab.experiment.monitor import InfoSpace
    spec_name = util.prepath_to_spec_name(session_df_filepath)
    session_spec = {'name': spec_name}
    session_df = util.read(session_df_filepath,
                           header=[0, 1, 2, 3],
                           index_col=0,
                           dtype=np.float32)
    session_data = util.session_df_to_data(session_df)
    tn, sn = session_df_filepath.replace('_session_df.csv', '').split('_')[-2:]
    info_space = InfoSpace()
    info_space.set('experiment', 0)
    info_space.set('trial', int(tn[1:]))
    info_space.set('session', int(sn[1:]))
    session_fig = plot_session(session_spec, info_space, session_data)
    viz.save_image(
        session_fig,
        session_df_filepath.replace('_session_df.csv', '_session_graph.png'))
Пример #8
0
def retro_analyze_experiment(predir):
    '''Retro analyze an experiment'''
    logger.info('Running retro_analyze_experiment')
    if ps.is_empty(glob(f'{predir}/info/*_trial_data_dict.json')):
        logger.info(
            'Skipping retro_analyze_experiment since no experiment was ran.')
        return  # only run analysis if experiment had been ran
    trial_spec_paths = glob(f'{predir}/*_t*_spec.json')
    # remove trial and session spec paths
    experiment_spec_paths = ps.difference(glob(f'{predir}/*_spec.json'),
                                          trial_spec_paths)
    experiment_spec_path = experiment_spec_paths[0]
    spec = util.read(experiment_spec_path)
    info_prepath = spec['meta'].get('info_prepath')
    trial_data_dict = util.read(f'{info_prepath}_trial_data_dict.json')
    analysis.analyze_experiment(spec, trial_data_dict)
Пример #9
0
def session_data_from_file(predir, trial_index, session_index):
    '''Build session.session_data from file'''
    for filename in os.listdir(predir):
        if filename.endswith(f'_t{trial_index}_s{session_index}_session_df.csv'):
            filepath = f'{predir}/{filename}'
            session_df = util.read(filepath, header=[0, 1, 2, 3], index_col=0)
            session_data = util.session_df_to_data(session_df)
            return session_data
Пример #10
0
def session_data_from_file(predir, trial_index, session_index):
    '''Build session.session_data from file'''
    for filename in os.listdir(predir):
        if filename.endswith(f'_t{trial_index}_s{session_index}_session_df.csv'):
            filepath = f'{predir}/{filename}'
            session_df = util.read(filepath, header=[0, 1, 2, 3], index_col=0)
            session_data = util.session_df_to_data(session_df)
            return session_data
Пример #11
0
    def __init__(self, env, spec, aeb=(0, 0, 0)):
        # essential reference variables
        self.agent = None  # set later
        self.env = env
        self.spec = spec
        # agent, env, body index for multi-agent-env
        self.a, self.e, self.b = self.aeb = aeb

        # variables set during init_algorithm_params
        self.explore_var = np.nan  # action exploration: epsilon or tau
        self.entropy_coef = np.nan  # entropy for exploration

        # debugging/logging variables, set in train or loss function
        self.loss = np.nan
        self.mean_entropy = np.nan
        self.mean_grad_norm = np.nan

        # total_reward_ma from eval for model checkpoint saves
        self.best_total_reward_ma = -np.inf
        self.total_reward_ma = np.nan

        # dataframes to track data for analysis.analyze_session
        # track training data per episode
        self.train_df = pd.DataFrame(columns=[
            'epi', 't', 'wall_t', 'opt_step', 'frame', 'fps', 'total_reward',
            'total_reward_ma', 'loss', 'lr', 'explore_var', 'entropy_coef',
            'entropy', 'grad_norm'
        ])

        # in train@ mode, override from saved train_df if exists
        if util.in_train_lab_mode() and self.spec['meta']['resume']:
            train_df_filepath = util.get_session_df_path(self.spec, 'train')
            if os.path.exists(train_df_filepath):
                self.train_df = util.read(train_df_filepath)
                self.env.clock.load(self.train_df)

        # track eval data within run_eval. the same as train_df except for reward
        if self.spec['meta']['rigorous_eval']:
            self.eval_df = self.train_df.copy()
        else:
            self.eval_df = self.train_df

        # the specific agent-env interface variables for a body
        self.observation_space = self.env.observation_space
        self.action_space = self.env.action_space
        self.observable_dim = self.env.observable_dim
        self.state_dim = self.observable_dim['state']
        self.action_dim = self.env.action_dim
        self.is_discrete = self.env.is_discrete
        # set the ActionPD class for sampling action
        self.action_type = policy_util.get_action_type(self.action_space)
        self.action_pdtype = ps.get(spec,
                                    f'agent.{self.a}.algorithm.action_pdtype')
        if self.action_pdtype in (None, 'default'):
            self.action_pdtype = policy_util.ACTION_PDS[self.action_type][0]
        self.ActionPD = policy_util.get_action_pd_cls(self.action_pdtype,
                                                      self.action_type)
Пример #12
0
def run_benchmark(spec_file):
    logger.info('Running benchmark')
    spec_dict = util.read(f'{spec_util.SPEC_DIR}/{spec_file}')
    for spec_name in spec_dict:
        # run only if not already exist; benchmark mode only
        if not any(spec_name in filename for filename in os.listdir('data')):
            run_by_mode(spec_file, spec_name, 'search')
        else:
            logger.info(f'{spec_name} is already ran and present in data/')
Пример #13
0
def trial_data_dict_from_file(predir):
    '''Build experiment.trial_data_dict from file'''
    trial_data_dict = {}
    for filename in os.listdir(predir):
        if filename.endswith('_trial_data.json'):
            filepath = f'{predir}/{filename}'
            exp_trial_data = util.read(filepath)
            trial_index = exp_trial_data.pop('trial_index')
            trial_data_dict[trial_index] = exp_trial_data
    return trial_data_dict
Пример #14
0
def trial_data_dict_from_file(predir):
    '''Build experiment.trial_data_dict from file'''
    trial_data_dict = {}
    for filename in os.listdir(predir):
        if filename.endswith('_trial_data.json'):
            filepath = f'{predir}/{filename}'
            exp_trial_data = util.read(filepath)
            trial_index = exp_trial_data.pop('trial_index')
            trial_data_dict[trial_index] = exp_trial_data
    return trial_data_dict
Пример #15
0
def session_data_from_file(predir, trial_index, session_index):
    '''Build session.session_data from file'''
    ckpt_str = '_ckpt-eval' if util.get_lab_mode() in ('enjoy', 'eval') else ''
    for filename in os.listdir(predir):
        if filename.endswith(
                f'_t{trial_index}_s{session_index}{ckpt_str}_session_df.csv'):
            filepath = f'{predir}/{filename}'
            session_df = util.read(filepath, header=[0, 1, 2, 3], index_col=0)
            session_data = util.session_df_to_data(session_df)
            return session_data
Пример #16
0
def get_trial_metrics_scalar(algo, env, data_folder):
    try:
        filepaths = glob(
            f'{data_folder}/{algo}*{env}*/{trial_metrics_scalar_path}')
        assert len(filepaths) == 1, f'{algo}, {env}, {filepaths}'
        filepath = filepaths[0]
        return util.read(filepath)
    except Exception as e:
        # blank fill
        return {'final_return_ma': ''}
Пример #17
0
def plot_multi_trial(trial_metrics_path_list,
                     legend_list,
                     title,
                     graph_prepath,
                     ma=False,
                     name_time_pairs=None,
                     frame_scales=None,
                     palette=None,
                     showlegend=True):
    '''
    Plot multiple trial graphs together
    This method can be used in analysis and also custom plotting by specifying the arguments manually
    @example

    trial_metrics_path_list = [
        'data/dqn_cartpole_2019_06_11_092512/info/dqn_cartpole_t0_trial_metrics.pkl',
        'data/dqn_cartpole_2019_06_11_092512/info/dqn_cartpole_t1_trial_metrics.pkl',
    ]
    legend_list = [
        '0',
        '1',
    ]
    title = f'Multi trial trial graphs'
    graph_prepath = 'data/my_exp'
    viz.plot_multi_trial(trial_metrics_path_list, legend_list, title, graph_prepath)
    '''
    local_metrics_list = [
        util.read(path)['local'] for path in trial_metrics_path_list
    ]
    # for plotting with async runs to adjust frame scale
    if frame_scales is not None:
        for idx, scale in frame_scales:
            local_metrics_list[idx][
                'frames'] = local_metrics_list[idx]['frames'] * scale
    name_time_pairs = name_time_pairs or [
        ('mean_returns', 'frames'), ('strengths', 'frames'),
        ('sample_efficiencies', 'frames'),
        ('training_efficiencies', 'opt_steps'), ('stabilities', 'frames')
    ]
    for name, time in name_time_pairs:
        if ma:
            for local_metrics in local_metrics_list:
                sr_list = local_metrics[name]
                sr_list = [calc_sr_ma(sr) for sr in sr_list]
                local_metrics[f'{name}_ma'] = sr_list
            name = f'{name}_ma'  # for labeling
        fig = plot_multi_local_metrics(local_metrics_list, legend_list, name,
                                       time, title, palette, showlegend)
        save_image(fig,
                   f'{graph_prepath}_multi_trial_graph_{name}_vs_{time}.png')
        if name in ('mean_returns', 'mean_returns_ma'
                    ):  # save important graphs in prepath directly
            prepath = graph_prepath.replace('/graph/', '/')
            save_image(fig,
                       f'{prepath}_multi_trial_graph_{name}_vs_{time}.png')
Пример #18
0
def session_data_dict_from_file(predir, trial_index):
    '''Build trial.session_data_dict from file'''
    session_data_dict = {}
    for filename in os.listdir(predir):
        if f'_t{trial_index}_' in filename and filename.endswith('_session_fitness_df.csv'):
            filepath = f'{predir}/{filename}'
            fitness_df = util.read(filepath, header=[0, 1, 2, 3], index_col=0, dtype=np.float32)
            util.fix_multiindex_dtype(fitness_df)
            session_index = fitness_df.index[0]
            session_data_dict[session_index] = fitness_df
    return session_data_dict
Пример #19
0
def main():
    '''Main method to run jobs from scheduler or from a spec directly'''
    args = sys.argv[1:]
    if len(args) <= 1:  # use scheduler
        job_file = args[0] if len(args) == 1 else 'job/experiments.json'
        for spec_file, spec_and_mode in util.read(job_file).items():
            for spec_name, lab_mode in spec_and_mode.items():
                read_spec_and_run(spec_file, spec_name, lab_mode)
    else:  # run single spec
        assert len(args) == 3, f'To use sys args, specify spec_file, spec_name, lab_mode'
        read_spec_and_run(*args)
Пример #20
0
def session_data_dict_from_file(predir, trial_index):
    '''Build trial.session_data_dict from file'''
    session_data_dict = {}
    for filename in os.listdir(predir):
        if f'_t{trial_index}_' in filename and filename.endswith('_session_fitness_df.csv'):
            filepath = f'{predir}/{filename}'
            fitness_df = util.read(filepath, header=[0, 1, 2, 3], index_col=0, dtype=np.float32)
            util.fix_multiindex_dtype(fitness_df)
            session_index = fitness_df.index[0]
            session_data_dict[session_index] = fitness_df
    return session_data_dict
Пример #21
0
def get(spec_file, spec_name):
    '''
    Get an experiment spec from spec_file, spec_name.
    Auto-check spec.
    @example

    spec = spec_util.get('base.json', 'base_case_openai')
    '''
    if 'data/' in spec_file:
        assert spec_name in spec_file, 'spec_file in data/ must be lab-generated and contains spec_name'
        spec = util.read(spec_file)
    else:
        spec_file = f'{SPEC_DIR}/{spec_file}'  # allow direct filename
        spec_dict = util.read(spec_file)
        assert spec_name in spec_dict, f'spec_name {spec_name} is not in spec_file {spec_file}. Choose from:\n {ps.join(spec_dict.keys(), ",")}'
        spec = spec_dict[spec_name]
        spec['name'] = spec_name
        spec['git_SHA'] = util.get_git_sha()
    check(spec)
    return spec
Пример #22
0
def main():
    if len(sys.argv) > 1:
        args = sys.argv[1:]
        assert len(args) == 3, f'To use sys args, specify spec_file, spec_name, lab_mode'
        run_by_mode(*args)
        return

    experiments = util.read('config/experiments.json')
    for spec_file in experiments:
        for spec_name, lab_mode in experiments[spec_file].items():
            run_by_mode(spec_file, spec_name, lab_mode)
Пример #23
0
def get(spec_file, spec_name):
    '''
    Get an experiment spec from spec_file, spec_name.
    Auto-check spec.
    @example

    spec = spec_util.get('demo.json', 'dqn_cartpole')
    '''
    spec_file = spec_file.replace(SPEC_DIR, '')  # cleanup
    if 'data/' in spec_file:
        assert spec_name in spec_file, 'spec_file in data/ must be lab-generated and contains spec_name'
        spec = util.read(spec_file)
    else:
        spec_file = f'{SPEC_DIR}/{spec_file}'  # allow direct filename
        spec_dict = util.read(spec_file)
        assert spec_name in spec_dict, f'spec_name {spec_name} is not in spec_file {spec_file}. Choose from:\n {ps.join(spec_dict.keys(), ",")}'
        spec = spec_dict[spec_name]
        # fill-in info at runtime
        spec['name'] = spec_name
        spec = extend_meta_spec(spec)
    check(spec)
    return spec
Пример #24
0
def check_all():
    '''Check all spec files, all specs.'''
    spec_files = _.filter_(os.listdir(SPEC_DIR), lambda f: f.endswith('.json'))
    for spec_file in spec_files:
        spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
        for spec_name, spec in spec_dict.items():
            try:
                spec['name'] = spec_name
                check(spec)
            except Exception as e:
                logger.exception(f'spec_file {spec_file} fails spec check')
                raise e
    logger.info(f'Checked all specs from: {_.join(spec_files, ",")}')
    return True
Пример #25
0
def get(spec_file, spec_name):
    '''
    Get an experiment spec from spec_file, spec_name.
    Auto-check spec.
    @example

    spec = spec_util.get('base.json', 'base_case')
    '''
    spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
    assert spec_name in spec_dict, f'spec_name {spec_name} is not in spec_file {spec_file}. Choose from:\n {_.join(spec_dict.keys(), ",")}'
    spec = spec_dict[spec_name]
    spec['name'] = spec_name
    check(spec)
    return spec
Пример #26
0
def session_data_from_file(predir,
                           trial_index,
                           session_index,
                           ckpt=None,
                           prefix=''):
    '''Build session.session_data from file'''
    ckpt_str = '' if ckpt is None else f'_ckpt-{ckpt}'
    for filename in os.listdir(predir):
        if filename.endswith(
                f'_t{trial_index}_s{session_index}{ckpt_str}_{prefix}session_df.csv'
        ):
            filepath = f'{predir}/{filename}'
            session_df = util.read(filepath, header=[0, 1, 2, 3], index_col=0)
            session_data = util.session_df_to_data(session_df)
            return session_data
Пример #27
0
def get(spec_file, spec_name):
    '''
    Get an experiment spec from spec_file, spec_name.
    Auto-check spec.
    @example

    spec = spec_util.get('base.json', 'base_case')
    '''
    spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
    assert spec_name in spec_dict, f'spec_name {spec_name} is not in spec_file {spec_file}. Choose from:\n {ps.join(spec_dict.keys(), ",")}'
    spec = spec_dict[spec_name]
    spec['name'] = spec_name
    spec['git_SHA'] = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
    check(spec)
    return spec
Пример #28
0
def check_all():
    '''Check all spec files, all specs.'''
    spec_files = ps.filter_(os.listdir(SPEC_DIR), lambda f: f.endswith('.json') and not f.startswith('_'))
    for spec_file in spec_files:
        spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
        for spec_name, spec in spec_dict.items():
            try:
                spec['name'] = spec_name
                spec['git_SHA'] = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
                check(spec)
            except Exception as e:
                logger.exception(f'spec_file {spec_file} fails spec check')
                raise e
    logger.info(f'Checked all specs from: {ps.join(spec_files, ",")}')
    return True
Пример #29
0
def generic_algo_test(spec, algo_name):
    '''Need new InfoSpace() per trial otherwise session id doesn't tick correctly'''
    trial = Trial(spec, info_space=InfoSpace())
    trial_data = trial.run()
    folders = [x for x in os.listdir('data/') if x.startswith(algo_name)]
    assert len(folders) == 1
    path = 'data/' + folders[0]
    sess_data = util.read(path + '/' + algo_name + '_t0_s0_session_df.csv')
    rewards = sess_data['0.2'].replace("reward", -1).astype(float)
    print(f'rewards: {rewards}')
    maxr = rewards.max()
    '''Delete test data folder and trial'''
    shutil.rmtree(path)
    del trial
    return maxr
Пример #30
0
def check_all():
    '''Check all spec files, all specs.'''
    spec_files = ps.filter_(os.listdir(SPEC_DIR), lambda f: f.endswith('.json') and not f.startswith('_'))
    for spec_file in spec_files:
        spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
        for spec_name, spec in spec_dict.items():
            # fill-in info at runtime
            spec['name'] = spec_name
            spec = extend_meta_spec(spec)
            try:
                check(spec)
            except Exception as e:
                logger.exception(f'spec_file {spec_file} fails spec check')
                raise e
    logger.info(f'Checked all specs from: {ps.join(spec_files, ",")}')
    return True
Пример #31
0
def session_data_dict_from_file(predir, trial_index):
    '''Build trial.session_data_dict from file'''
    ckpt_str = 'ckpt-eval' if util.get_lab_mode() in ('enjoy', 'eval') else ''
    session_data_dict = {}
    for filename in os.listdir(predir):
        if f'_t{trial_index}_' in filename and filename.endswith(
                f'{ckpt_str}_session_fitness_df.csv'):
            filepath = f'{predir}/{filename}'
            fitness_df = util.read(filepath,
                                   header=[0, 1, 2, 3],
                                   index_col=0,
                                   dtype=np.float32)
            util.fix_multi_index_dtype(fitness_df)
            session_index = fitness_df.index[0]
            session_data_dict[session_index] = fitness_df
    return session_data_dict
Пример #32
0
def get(spec_file, spec_name):
    '''
    Get an experiment spec from spec_file, spec_name.
    Auto-check spec.
    @example

    spec = spec_util.get('base.json', 'base_case')
    '''
    spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
    assert spec_name in spec_dict, f'spec_name {spec_name} is not in spec_file {spec_file}. Choose from:\n {ps.join(spec_dict.keys(), ",")}'
    spec = spec_dict[spec_name]
    spec['name'] = spec_name
    spec['git_SHA'] = subprocess.check_output(['git', 'rev-parse',
                                               'HEAD']).decode().strip()
    check(spec)
    return spec
Пример #33
0
def mock_spec_info_space(predir, trial_index=None, session_index=None):
    '''Helper for retro analysis to build mock info_space and spec'''
    from slm_lab.experiment.monitor import InfoSpace
    _, _, _, spec_name, experiment_ts, _ = util.prepath_split(predir)
    info_space = InfoSpace()
    info_space.experiment_ts = experiment_ts
    info_space.set('experiment', 0)
    if trial_index is None:
        filepath = f'{predir}/{spec_name}_spec.json'
    else:
        info_space.set('trial', trial_index)
        filepath = f'{predir}/{spec_name}_t{trial_index}_spec.json'
    if session_index is not None:
        info_space.set('session', session_index)
    spec = util.read(filepath)
    return spec, info_space
Пример #34
0
def mock_info_space_spec(predir, trial_index=None, session_index=None):
    '''Helper for retro analysis to build mock info_space and spec'''
    from slm_lab.experiment.monitor import InfoSpace
    spec_name = util.prepath_to_spec_name(predir)
    experiment_ts = util.prepath_to_experiment_ts(predir)
    info_space = InfoSpace()
    info_space.experiment_ts = experiment_ts
    info_space.set('experiment', 0)
    if trial_index is None:
        filepath = f'{predir}/{spec_name}_spec.json'
    else:
        info_space.set('trial', trial_index)
        filepath = f'{predir}/{spec_name}_t{trial_index}_spec.json'
    if session_index is not None:
        info_space.set('session', session_index)
    spec = util.read(filepath)
    return spec, info_space
Пример #35
0
def check_all():
    '''Check all spec files, all specs.'''
    spec_files = ps.filter_(
        os.listdir(SPEC_DIR),
        lambda f: f.endswith('.json') and not f.startswith('_'))
    for spec_file in spec_files:
        spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
        for spec_name, spec in spec_dict.items():
            try:
                spec['name'] = spec_name
                spec['git_SHA'] = subprocess.check_output(
                    ['git', 'rev-parse', 'HEAD']).decode().strip()
                check(spec)
            except Exception as e:
                logger.exception(f'spec_file {spec_file} fails spec check')
                raise e
    logger.info(f'Checked all specs from: {ps.join(spec_files, ",")}')
    return True
Пример #36
0
def plot_experiment_from_file(experiment_df_filepath):
    '''
    Method to plot experiment from its experiment_df file
    @example

    from slm_lab.experiment import analysis

    filepath = 'data/reinforce_cartpole_2018_01_22_190720/reinforce_cartpole_experiment_df.csv'
    analysis.plot_experiment_from_file(filepath)
    '''
    spec_name = '_'.join(experiment_df_filepath.split('/')[1].split('_')[:-4])
    experiment_spec = {'name': spec_name}
    experiment_df = util.read(experiment_df_filepath)
    experiment_fig = plot_experiment(experiment_spec, experiment_df)
    viz.save_image(
        experiment_fig,
        experiment_df_filepath.replace('_experiment_df.csv',
                                       '_experiment_graph.png'))
Пример #37
0
def plot_session_from_file(session_df_filepath):
    '''
    Method to plot session from its session_df file
    @example

    from slm_lab.experiment import analysis
    filepath = 'data/reinforce_cartpole_2018_01_22_211751/reinforce_cartpole_t0_s0_session_df.csv'
    analysis.plot_session_from_file(filepath)
    '''
    from slm_lab.experiment.monitor import InfoSpace
    spec_name = util.prepath_to_spec_name(session_df_filepath)
    session_spec = {'name': spec_name}
    session_df = util.read(session_df_filepath, header=[0, 1, 2, 3], index_col=0, dtype=np.float32)
    session_data = util.session_df_to_data(session_df)
    tn, sn = session_df_filepath.replace('_session_df.csv', '').split('_')[-2:]
    info_space = InfoSpace()
    info_space.set('experiment', 0)
    info_space.set('trial', int(tn[1:]))
    info_space.set('session', int(sn[1:]))
    session_fig = plot_session(session_spec, info_space, session_data)
    viz.save_image(session_fig, session_df_filepath.replace('_session_df.csv', '_session_graph.png'))
Пример #38
0
def generate_specs(spec, const='agent'):
    '''
    Generate benchmark specs with compatible  discrete/continuous/both types:
    - take a spec
    - for each in benchmark envs
        - use the template env spec to update spec
        - append to benchmark specs
    Interchange agent and env for the reversed benchmark.
    '''
    if const == 'agent':
        const_name = ps.get(spec, 'agent.0.algorithm.name')
        variant = 'env'
    else:
        const_name = ps.get(spec, 'env.0.name')
        variant = 'agent'

    filepath = f'{spec_util.SPEC_DIR}/benchmark_{const_name}.json'
    if os.path.exists(filepath):
        logger.info(f'Benchmark for {const_name} exists at {filepath} already, not overwriting.')
        benchmark_specs = util.read(filepath)
        return benchmark_specs

    logger.info(f'Generating benchmark for {const_name}')
    benchmark_variants = []
    benchmark_specs = {}
    for dist_cont, const_names in BENCHMARK[const].items():
        if const_name in const_names:
            benchmark_variants.extend(BENCHMARK[variant][dist_cont])
    for vary_name in benchmark_variants:
        vary_spec = ENV_TEMPLATES[vary_name]
        spec_name = f'{const_name}_{vary_name}'
        benchmark_spec = spec.copy()
        benchmark_spec['name'] = spec_name
        benchmark_spec[variant] = [vary_spec]
        benchmark_specs[spec_name] = benchmark_spec

    util.write(benchmark_specs, filepath)
    logger.info(f'Benchmark for {const_name} written to {filepath}.')
    return benchmark_specs
Пример #39
0
def retro_analyze_trials(predir):
    '''Retro-analyze all trial level datas.'''
    logger.info('Retro-analyzing trials from file')
    from slm_lab.experiment.control import Trial
    for filename in os.listdir(predir):
        if filename.endswith('_trial_data.json'):
            filepath = f'{predir}/{filename}'
            tn = filename.replace('_trial_data.json', '').split('_')[-1]
            trial_index = int(tn[1:])
            # mock trial
            spec, info_space = mock_info_space_spec(predir, trial_index)
            trial = Trial(spec, info_space)
            session_data_dict = session_data_dict_from_file(predir, trial_index)
            trial.session_data_dict = session_data_dict
            trial_fitness_df = analyze_trial(trial)
            # write trial_data that was written from ray search
            fitness_vec = trial_fitness_df.iloc[0].to_dict()
            fitness = calc_fitness(trial_fitness_df)
            trial_data = util.read(filepath)
            trial_data.update({
                **fitness_vec, 'fitness': fitness, 'trial_index': trial_index,
            })
            util.write(trial_data, filepath)
Пример #40
0
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, util, viz
import colorlover as cl
import numpy as np
import os
import pandas as pd
import pydash as ps

DATA_AGG_FNS = {
    't': 'sum',
    'reward': 'sum',
    'loss': 'mean',
    'explore_var': 'mean',
}
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
FITNESS_STD = util.read('slm_lab/spec/_fitness_std.json')
NOISE_WINDOW = 0.05
MA_WINDOW = 100
logger = logger.get_logger(__name__)


def get_session_data(session):
    '''
    Gather data from session: MDP, Agent, Env data, hashed by aeb; then aggregate.
    @returns {dict, dict} session_mdp_data, session_data
    '''
    data_names = AGENT_DATA_NAMES + ENV_DATA_NAMES
    mdp_data_names = ['t', 'epi'] + data_names
    agg_data_names = ['epi'] + list(DATA_AGG_FNS.keys())
    data_h_v_dict = {data_name: session.aeb_space.get_history_v(data_name) for data_name in data_names}
    session_mdp_data, session_data = {}, {}
Пример #41
0
'''
The benchmarker
Run the benchmark of agent vs environments, or environment vs agents, or both.
Generate benchmark specs like so:
- take a spec
- for each in benchmark envs
    - use the template env spec to update spec
    - append to benchmark specs
Interchange agent and env for the reversed benchmark.
'''
from slm_lab.lib import logger, util
from slm_lab.spec import spec_util
import os
import pydash as ps

AGENT_TEMPLATES = util.read(f'{spec_util.SPEC_DIR}/_agent.json')
ENV_TEMPLATES = util.read(f'{spec_util.SPEC_DIR}/_env.json')
BENCHMARK = util.read(f'{spec_util.SPEC_DIR}/_benchmark.json')
logger = logger.get_logger(__name__)


def generate_specs(spec, const='agent'):
    '''
    Generate benchmark specs with compatible  discrete/continuous/both types:
    - take a spec
    - for each in benchmark envs
        - use the template env spec to update spec
        - append to benchmark specs
    Interchange agent and env for the reversed benchmark.
    '''
    if const == 'agent':
Пример #42
0
def test_read_file_not_found():
    fake_rel_path = 'test/lib/test_util.py_fake'
    with pytest.raises(FileNotFoundError) as excinfo:
        util.read(fake_rel_path)
Пример #43
0
def test_write_read_as_plain_list(test_str, filename, dtype):
    data_path = f'test/fixture/lib/util/{filename}'
    util.write(test_str, util.smart_path(data_path))
    assert os.path.exists(data_path)
    data_dict = util.read(util.smart_path(data_path))
    assert isinstance(data_dict, dtype)
Пример #44
0
def test_write_read_as_df(test_df, filename, dtype):
    data_path = f'test/fixture/lib/util/{filename}'
    util.write(test_df, util.smart_path(data_path))
    assert os.path.exists(data_path)
    data_df = util.read(util.smart_path(data_path))
    assert isinstance(data_df, dtype)