Esempio n. 1
0
def main(args):
    config = get_config(args.model, to_bunch=True)
    net = load_model_path(args.model, config)

    data, loss = test_model(net, config, n_tests=150)

    ys = [[] for i in range(net.args.T)]
    ys_stim = [[] for i in range(net.args.T)]

    for d in data:
        context, idx, trial, x, y, out, loss = d
        y_ready, y_set, y_go = trial.rsg
        y_prod = np.argmax(out >= 1)
        t_y = y_set - y_ready
        t_p = y_prod - y_set
        t_ym = y_go - y_set

        ys[context].append((t_y, t_p))
        ys_stim[context].append((t_y, t_ym))

    ys = [list(zip(*np.array(y))) for y in ys]
    ys_stim = [list(zip(*np.array(y))) for y in ys_stim]

    for i in range(net.args.T):
        plt.scatter(ys_stim[i][0],
                    ys_stim[i][1],
                    marker='o',
                    c='black',
                    s=20,
                    edgecolors=cols[i])
        plt.scatter(ys[i][0], ys[i][1], color=cols[i], s=15)

    plt.xlabel('desired t_p')
    plt.ylabel('produced t_p')

    plt.show()
Esempio n. 2
0
    J = model['W_f.weight']
    v = J.std()
    shp = J.shape
    model['W_f.weight'] += torch.normal(0, v * .5, shp)

    J = model['W_ro.weight']
    v = J.std()
    shp = J.shape
    model['W_ro.weight'] += torch.normal(0, v * .5, shp)

config = fill_undefined_args(args, config, overwrite_none=True)

net = load_model_path(args.model, config=config)

if args.test_all:
    _, loss2 = test_model(net, config)
    print('avg summed loss (all):', loss2)

if not args.no_plot:
    data, loss = test_model(net, config, n_tests=6)
    print('avg summed loss (plotted):', loss)

    run_id = '/'.join(args.model.split('/')[-3:-1])

    fig, ax = plt.subplots(2, 3, sharex=True, sharey=True, figsize=(12, 7))

    if 'goals' in config.dataset:
        p_fn = get_potential(config)
        for i, ax in enumerate(fig.axes):
            ix, x, y, z, loss = data[i]
            xr = np.arange(len(x))
Esempio n. 3
0
intervals = [{}, {}, {}]
for j, dset in enumerate(dsets):
    subset = dt[dt.dset == dset]

    for iterr in range(len(subset)):

        job_id = subset.iloc[iterr].slurm_id

        model_folder = os.path.join('..', 'logs', run_id, str(job_id))
        model_path = os.path.join(model_folder, 'model_best.pth')
        config = get_config(model_path, ctype='model', to_bunch=True)
        config.m_noise = 0
        config.dataset = dset_map[config.dataset]
        net = load_model_path(model_path, config=config)

        data, loss = test_model(net, config, n_tests=200, dset_base='../')
        dset = load_rb(os.path.join('..', config.dataset))

        distr = {}

        for k in range(len(data)):
            dset_idx, x, _, z, _ = data[k]
            r, s, g = dset[dset_idx][2]

            t_first = torch.nonzero(z >= 1)
            if len(t_first) > 0:
                t_first = t_first[0, 0]
            else:
                t_first = len(x)

            val = np.array(t_first - s - 5)
Esempio n. 4
0
    model = torch.load(f)

if args.noise != 0:
    J = model['W_f.weight']
    v = J.std()
    shp = J.shape
    model['W_f.weight'] += torch.normal(0, v * .5, shp)

    J = model['W_ro.weight']
    v = J.std()
    shp = J.shape
    model['W_ro.weight'] += torch.normal(0, v * .5, shp)

net = load_model_path(args.model, params={'dset': args.dataset, 'out_act': args.out_act})
dset = load_rb(args.dataset)
data = test_model(net, dset, n_tests=0)

run_id = '/'.join(args.model.split('/')[-3:-1])

fig, ax = plt.subplots(3,4,sharex=True, sharey=True, figsize=(12,7))

for i, ax in enumerate(fig.axes):
    ix, x, y, z, loss = data[i]
    xr = np.arange(len(x))

    ax.axvline(x=0, color='dimgray', alpha = 1)
    ax.axhline(y=0, color='dimgray', alpha = 1)
    ax.grid(True, which='major', lw=1, color='lightgray', alpha=0.4)
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.spines['left'].set_visible(False)
Esempio n. 5
0
                    default=None,
                    help='path to config file if custom')
args = parser.parse_args()

if args.config is None:
    config = get_config(args.model, ctype='model')
else:
    config = json.load(open(args.config, 'r'))
config = update_args(args, config)
dsets = config.dataset

net = load_model_path(args.model, config=config)
# assuming config is in the same folder as the model

if args.test_all:
    _, loss = test_model(net, config)
    print('avg summed loss (all):', loss)

if not args.no_plot:
    data, t_losses = test_model(net, config, n_tests=12)
    print('avg losses:')
    for t, j in t_losses.items():
        print(t + ': ' + str(j))
    run_id = '/'.join(args.model.split('/')[-3:-1])

    fig, ax = plt.subplots(3, 4, sharex=False, sharey=False, figsize=(12, 8))
    for i, ax in enumerate(fig.axes):
        context, ix, trial, x, y, z, loss = data[i]
        xr = np.arange(x.shape[-1])

        ax.axvline(x=0, color='dimgray', alpha=1)
Esempio n. 6
0
with open(args.model, 'rb') as f:
    m_dict = torch.load(f)

J = m_dict['W_f.weight']
v = J.std()
shp = J.shape
m_dict['W_f.weight'] += torch.normal(0, v * .01, shp)

J = m_dict['W_ro.weight']
v = J.std()
shp = J.shape
m_dict['W_ro.weight'] += torch.normal(0, v * .01, shp)

dset = load_rb(args.dset)

test_data = test_model(m_dict, dset, n_tests=200)
i, xs, ys, zs, losses = list(zip(*test_data))

print(np.mean(losses))

# bunch = Bunch()
# bunch.N = 250
# bunch.D = 250
# bunch.O = 1

# bunch.res_init_type = 'gaussian'
# bunch.res_init_params = {'std': 1.5}
# bunch.reservoir_seed = 0
# net = Network(bunch)

# nums = ['146', '152', '158', '164', '170', '176', '182', '188']