def calc_session_metrics(session_df, env_name, info_prepath=None, df_mode=None): ''' Calculate the session metrics: strength, efficiency, stability @param DataFrame:session_df Dataframe containing reward, frame, opt_step @param str:env_name Name of the environment to get its random baseline @param str:info_prepath Optional info_prepath to auto-save the output to @param str:df_mode Optional df_mode to save with info_prepath @returns dict:metrics Consists of scalar metrics and series local metrics ''' mean_return = session_df['avg_return'] if df_mode == 'eval' else session_df['avg_return'] mean_length = session_df['avg_len'] if df_mode == 'eval' else None mean_success = session_df['avg_success'] if df_mode == 'eval' else None frames = session_df['frame'] opt_steps = session_df['opt_step'] # all the session local metrics local = { 'mean_return': mean_return, 'mean_length': mean_length, 'mean_success': mean_success, 'frames': frames, 'opt_steps': opt_steps, } metrics = { 'local': local, } if info_prepath is not None: # auto-save if info_prepath is given util.write(metrics, f'{info_prepath}_session_metrics_{df_mode}.pkl') return metrics
def calc_trial_metrics(session_metrics_list, info_prepath=None): ''' Calculate the trial metrics: mean(strength), mean(efficiency), mean(stability), consistency @param list:session_metrics_list The metrics collected from each session; format: {session_index: {'scalar': {...}, 'local': {...}}} @param str:info_prepath Optional info_prepath to auto-save the output to @returns dict:metrics Consists of scalar metrics and series local metrics ''' # calculate mean of session metrics mean_return_list = [sm['local']['mean_return'] for sm in session_metrics_list] mean_length_list = [sm['local']['mean_length'] for sm in session_metrics_list] mean_success_list = [sm['local']['mean_success'] for sm in session_metrics_list] frames = session_metrics_list[0]['local']['frames'] opt_steps = session_metrics_list[0]['local']['opt_steps'] # for plotting: gather all local series of sessions local = { 'mean_return': mean_return_list, 'mean_length': mean_length_list, 'mean_success': mean_success_list, 'frames': frames, 'opt_steps': opt_steps, } metrics = { 'local': local, } if info_prepath is not None: # auto-save if info_prepath is given util.write(metrics, f'{info_prepath}_trial_metrics.pkl') return metrics
def analyze_session(session_spec, session_df, df_mode): '''Analyze session and save data, then return metrics. Note there are 2 types of session_df: body.eval_df and body.train_df''' info_prepath = session_spec['meta']['info_prepath'] session_df = session_df.copy() assert len(session_df) > 1, f'Need more than 1 datapoint to calculate metrics' util.write(session_df, f'{info_prepath}_session_df_{df_mode}.csv') # calculate metrics session_metrics = calc_session_metrics(session_df, ps.get(session_spec, 'env.0.name'), info_prepath, df_mode) # plot graph viz.plot_session(session_spec, session_metrics, session_df, df_mode) return session_metrics
def analyze_experiment(spec, trial_data_dict): '''Analyze experiment and save data''' info_prepath = spec['meta']['info_prepath'] util.write(trial_data_dict, f'{info_prepath}_trial_data_dict.json') # calculate experiment df experiment_df = calc_experiment_df(trial_data_dict, info_prepath) # plot graph viz.plot_experiment(spec, experiment_df, METRICS_COLS) # zip files predir, _, _, _, _, _ = util.prepath_split(info_prepath) shutil.make_archive(predir, 'zip', predir) logger.info(f'All experiment data zipped to {predir}.zip') return experiment_df
def calc_experiment_df(trial_data_dict, info_prepath=None): '''Collect all trial data (metrics and config) from trials into a dataframe''' experiment_df = pd.DataFrame(trial_data_dict).transpose() cols = METRICS_COLS config_cols = sorted(ps.difference(experiment_df.columns.tolist(), cols)) sorted_cols = config_cols + cols experiment_df = experiment_df.reindex(sorted_cols, axis=1) experiment_df.sort_values(by=['strength'], ascending=False, inplace=True) if info_prepath is not None: util.write(experiment_df, f'{info_prepath}_experiment_df.csv') # save important metrics in info_prepath directly util.write(experiment_df, f'{info_prepath.replace("info/", "")}_experiment_df.csv') return experiment_df
def save(spec, unit='experiment'): '''Save spec to proper path. Called at Experiment or Trial init.''' prepath = util.get_prepath(spec, unit) util.write(spec, f'{prepath}_spec.json')