コード例 #1
0
def models_from_config(config_idx):
    config = load_config(op.join('configs', 'config%s' % config_idx))
    model_list = []
    for m_str, params in zip(config['models'], config['hparam_list']):
        m = locate('graph_models.' + m_str)(*params)
        model_list.append(m)
    return model_list
コード例 #2
0
def hardness_dsets(expe_idx, n_test=0):
    """
    Plots, for each model, the mean accuracy (over the random seeds) over each
    dataset.
    """
    config = load_config(op.join('configs', 'config%s' % expe_idx))
    path = op.join(config['save_dir'], 'expe%s' % expe_idx)
    for m_str in config['models']:
        mpath = op.join(path, m_str, 'data')
        d_paths = os.listdir(mpath)
        mdata = []
        for p in d_paths:
            s = re.search(
                r'^{}_([0-9]+)_([0-9]+)_test_acc.npy$'.format(n_test), p)
            if s:
                # file name, dataset number, seed number
                mdata.append((s[0], int(s[1]), int(s[2])))
        means = []
        Ndsets = max([m[1] for m in mdata])
        for h in range(Ndsets + 1):
            l = [np.load(op.join(mpath, m[0])) for m in mdata if m[1] == h]
            means.append(np.mean(np.array(l)))
        plt.bar(np.arange(len(means)), means)
        print(means)
        mean_acc = np.mean(means)
        mean_acc = str(np.around(mean_acc, 2))
        # title = m_str + '; acc : {}'.format(mean_acc)
        # plt.xticklabels(np.arange(len(means)))
        # plt.title(title)
        plt.show()
コード例 #3
0
def test_on(expe_idx, tpath, midx=0, dir_prefix='', var='std'):
    """
    Loads trained models as specified by the expe_idx, and tests them on all
    test data at path specified by tpath. The model tested corresponds to the
    one at index midx of the model list specified by the configuration file
    corresponding to expe_idx.
    The variability metric reported can be the standard deviation (var='std')
    or the 95 percent confidence interval (var='conf').
    """
    test_paths = os.listdir(tpath)
    config = load_config(op.join('configs', dir_prefix, 'config%s' % expe_idx))
    path = op.join(config['save_dir'], dir_prefix, 'expe%s' % expe_idx)
    hparams = config['hparam_list'][midx]
    res_dict = {}
    for test_path in test_paths:
        print('loading dl')
        test_dl = load_dl(op.join(tpath, test_path), double=True)
        print('done')
        model_name = config['models'][midx]
        mpaths = op.join(path, model_name, 'models')
        try:
            model = eval('gm.' + model_name + '(*hparams)')
        except:
            model = eval('bm.' + model_name + '(*hparams)')
        mpathlist = os.listdir(mpaths)
        accs = []
        mpathlist = [p for p in mpathlist \
                     if re.search(r'^ds0_seed[0-9]+.pt', p)]
        for mpath in mpathlist:
            mpath = op.join(mpaths, mpath)
            model = load_model(model, mpath)
            opt = torch.optim.Adam(model.parameters(), lr=1e-3)
            l, a, i = one_step(
                model,
                opt,
                test_dl,
                criterion=criterion,
                train=False,
                report_indices=True,
            )
            accs += list(a)
        if var == 'std':
            v = np.std(accs)
        elif var == 'conf':
            v = st.t.interval(0.95,
                              len(accs) - 1,
                              loc=np.mean(accs),
                              scale=st.sem(accs))
        else:
            raise ValueError(f'Invalid value for "var": {var}, must be one' \
                + 'of "std" or "conf"')
        a = np.mean(accs)
        print(f'dset : {test_path}, mean acc {a} +- {v}')
        res_dict[test_path] = (a, v)
    return res_dict
コード例 #4
0
def load_models_from_config(dset, seed, config_idx):
    # index of experiment is same as index of config
    config = load_config(op.join('configs', 'config%s' % config_idx))
    path = op.join(config['save_dir'], 'expe%s' % config_idx)
    model_list = models_from_config(config_idx)
    loaded_models = []
    for m_str, m in zip(config['models'], model_list):
        mpath = op.join(path, m_str, 'models',
                        'ds{}_seed{}.pt'.format(dset, seed))
        m = load_model(m, mpath)
        loaded_models.append(m)
    return loaded_models
コード例 #5
0
def model_metrics(expe_idx, dir_prefix='', n_test=0, var='std'):
    """
    Print the mean accuracy of the model on the test data specified by the 
    expe_idx. dir_prefix allows us to use data/models in subdirectories of the
    standard datsavea and config directories.
    The variability reported can be 'std' for standard deviation or 'conf' for 
    95 percent confidence intervals.
    """
    config = load_config(op.join('configs', dir_prefix, 'config%s' % expe_idx))
    path = op.join(config['save_dir'], dir_prefix, 'expe%s' % expe_idx)
    res_dict = {}
    for m_str in config['models']:
        mpath = op.join(path, m_str, 'data')
        d_paths = os.listdir(mpath)
        mdata = []
        for p in d_paths:
            s = re.search(
                r'^{}_([0-9]+)_([0-9]+)_test_acc.npy$'.format(n_test), p)
            if s:
                # file name, dataset number, seed number
                mdata.append((s[0], int(s[1]), int(s[2])))
        aa = [np.mean(np.load(op.join(mpath, m[0]))) for m in mdata]
        # if var == 'std':
        #     std = str(np.around(np.std(aa), 3))[:5]
        #     mean_acc = str(np.around(np.mean(aa), 2))[:4]
        # elif var == 'conf':
        #     mean_acc = np.mean(accs)
        #     v = st.t.interval(
        #         0.95,
        #         len(accs) - 1,
        #         loc=np.mean(accs),
        #         scale=st.sem(accs))
        # else:
        #     raise ValueError(f'Invalid value for "var": {var}, must be one' \
        #         + 'of "std" or "conf"')
        # res_dict[m_str] = (mean_acc, v)
        # v_round = str(np.around(v, 3))[:5]
        # mean_acc_round = str(np.around(mean_acc, 2))[:4]
        v_round = str(np.around(np.std(aa), 3))[:5]
        mean_acc_round = str(np.around(np.mean(aa), 2))[:4]
        plt.hist(aa, bins=20)
        title = m_str + '; {0} +- {1}'.format(mean_acc_round, v_round)
        plt.title(title)
        plt.show()
    return res_dict
コード例 #6
0
def plot_heatmap_simple_all(config_idx):
    g = gen.SameConfigGen()
    config = load_config(op.join('configs', 'config%s' % config_idx))
    for dset in range(5):
        g.load(op.join(config['load_dir'], config['test_datasets'][dset]))
        directory = op.join('images', 'heatmaps', 'simple')
        path = op.join(directory, 'dset%s.png' % dset)
        img = g.render_ref_state(show=False)
        img = np.flip(img, 0)
        cv2.imwrite(path, img)
        for seed in range(5):
            seed = seed
            model_list = load_models_from_config(dset, seed, config_idx)
            plot_heat_map_simple_several_models(model_list,
                                                g,
                                                save=True,
                                                show=False,
                                                seed=seed,
                                                expe=config_idx,
                                                dset=dset)
        g.reset()
コード例 #7
0
def plot_heatmap_double_all(config_idx, n_obj=5, n_draws=5):
    config = load_config(op.join('configs', 'config%s' % config_idx))
    env = Env()
    for draw in range(n_draws):
        env.random_config(n_obj)
        s = env.to_state_list(norm=True)
        directory = op.join('images', 'heatmaps', 'double')
        path = op.join(directory, 'draw%s.png' % draw)
        img = env.render(show=False, mode='envsize')
        img = np.flip(img, 0)
        cv2.imwrite(path, img)
        for seed in range(5):
            seed = seed
            model_list = load_models_from_config(0, seed, config_idx)
            plot_heat_map_double_several_models(model_list,
                                                n_obj=n_obj,
                                                s=s,
                                                save=True,
                                                show=False,
                                                seed=seed,
                                                expe=config_idx,
                                                draw=draw)
        env.reset()
コード例 #8
0
def main(setup_name, dir_name, duration):
    config = load_config(setup_name)

    iptables_data = get_iptables_data(config, dir_name, setup_name)
    summary_data = []
    flows = []
    for host in iptables_data:
        for peer in iptables_data[host]:
            try:
                flow = f'{host} -> {peer}'
                add_flow_record(summary_data, flow, flows, duration,
                                iptables_data[host][peer]['tx'],
                                iptables_data[peer][host]['rx'])
                # reverse flow
                flow = f'{peer} -> {host}'
                add_flow_record(summary_data, flow, flows, duration,
                                iptables_data[peer][host]['tx'],
                                iptables_data[host][peer]['rx'])
            except KeyError:
                print(
                    f'Failed to find flow: host={host}, peer={peer}. Check that traffic flows defined in inventory are bidirectional.'
                )
                s = (
                    "Consider adding the following to the inventory file:\n"
                    f"    {peer}:\n"
                    "      dst:\n"
                    f"        - {host}\n\n"
                    "Then run generate_config.py, ansible-playbook init.yml and rer-run the test."
                )
                print(s)
                traceback.print_exc()
                sys.exit(1)
    result = get_result(summary_data)
    print(result)
    with open(f'../tests/{setup_name}/{dir_name}/summary.log', 'w') as f:
        f.write(result)
コード例 #9
0
from generate_config import load_config

parser = ArgumentParser()
parser.add_argument('-c',
                    '--config',
                    dest='config',
                    help='name of the config file',
                    default='')

if __name__ == '__main__':
    args = parser.parse_args()
    cfg_name = args.config
else:
    cfg_name = 'config0'

config = load_config(op.join('configs', cfg_name))

try:
    n_obj = config['hparams']['n_objects']
except:
    pass
expe_idx = config['expe_idx']
d = config['load_dir']
s = config['save_dir']
train_datasets = config['train_datasets']
train_indices = config['train_dataset_indices']
test_datasets = config['test_datasets']
test_indices = config['test_dataset_indices']
seeds = config['seeds']
log_file_output_dir = s
model_list = config['models']