コード例 #1
0
ファイル: bootstrap.py プロジェクト: fdamani/animal-behavior
def simulate_dataset(model_params, T, dim, num_obs_samples):
    transition_log_scale = model_params['transition_log_scale']
    init_prior = ([0.0] * dim, [math.log(1.0)] * dim)
    model = LearningDynamicsModel(init_prior, transition_log_scale, dim=dim)
    num_obs = 50
    y, x, z_true = model.sample(T=T, num_obs_samples=num_obs_samples)
    return (y, x, z_true)
コード例 #2
0
def simulate_datasets(model_params, model_params_grad, dim, num_obs_samples,
                      num_datasets):
    # instantiate model
    model = LearningDynamicsModel(model_params, model_params_grad, dim=dim)
    num_obs = 50
    datasets = []
    for i in range(num_datasets):
        y, x, z_true = model.sample(T=T,
                                    num_obs_samples=num_obs_samples,
                                    dim=dim)
        datasets.append((y, x, z_true))
    return datasets
コード例 #3
0
def estimation(dataset,
               boot_index,
               model_params,
               model_params_grad,
               num_obs_samples,
               num_future_steps,
               category_tt_split,
               num_mc_samples,
               output_file,
               true_model_params=None):
    y, x = dataset
    y_complete = y.clone().detach()
    y_complete = y_complete[0:-num_future_steps]
    category_tt_split = 'session'
    y, x, y_future, x_future = train_future_split(y, x, num_future_steps)
    y_train, y_test, test_inds = train_test_split(y.cpu(),
                                                  x.cpu(),
                                                  cat=category_tt_split)
    x = x.clone().detach()  #torch.tensor(x, dtype=dtype, device=device)
    y_train = y_train.clone().detach(
    )  #torch.tensor(y_train, dtype=dtype, device=device)
    y_test = torch.tensor(y_test, dtype=dtype, device=device)
    test_inds = torch.tensor(test_inds, dtype=torch.long, device=device)
    y_future = y_future.clone().detach(
    )  #torch.tensor(y_future, dtype=dtype, device=device)
    x_future = x_future.clone().detach(
    )  #torch.tensor(x_future, dtype=dtype, device=device)

    y_train = torch.tensor(y, device=device)
    data = [y_train, x, y_test, test_inds, y_future, x_future, y_complete]

    model = LearningDynamicsModel(dim=dim)

    boot_output_file = output_file + '/' + str(boot_index)
    os.makedirs(boot_output_file)
    os.makedirs(boot_output_file + '/model_structs')
    os.makedirs(boot_output_file + '/data')
    os.makedirs(boot_output_file + '/plots')

    inference = Inference(
        data=data,
        model=model,
        model_params=model_params,
        model_params_grad=model_params_grad,
        savedir=boot_output_file,
        num_obs_samples=num_obs_samples,
        num_future_steps=num_future_steps,
        num_mc_samples=num_mc_samples,
        ppc_window=50,
        z_true=z_true,
        true_model_params=true_model_params)  # pass in just for figures

    opt_params = inference.run()
    torch.save(opt_params, boot_output_file + '/model_structs/opt_params.npy')
    torch.save(dataset, boot_output_file + '/data/dataset.npy')
    torch.save(model_params,
               boot_output_file + '/model_structs/model_params.npy')
    return opt_params
コード例 #4
0
ファイル: bootstrap.py プロジェクト: fdamani/animal-behavior
def estimation(dataset, num_obs_samples, num_future_steps, num_mc_samples):
    y, x, z_true = dataset
    init_prior = ([0.0] * dim, [math.log(1.0)] * dim)
    init_transition_log_scale = [math.log(1.)]
    model = LearningDynamicsModel(init_prior, init_transition_log_scale, dim=3)
    inference = Inference(data,
                          model,
                          savedir='',
                          num_obs_samples=num_obs_samples,
                          num_future_steps=num_future_steps,
                          num_mc_samples=num_mc_samples,
                          z_true=z_true)
    opt_params = inference.optimize()
    return opt_params
コード例 #5
0
ファイル: dirty_main.py プロジェクト: fdamani/animal-behavior
    grad_latents = True
    grad_model_params = False

    inference_types = ['map', 'mfvi', 'is', 'smc', 'vsmc']
    inference_type = inference_types[4]
    # T = 200 # 100
    T = 100
    num_particles = 20# 200
    # time-series model
    # sim model parameters
    dim = 3
    init_prior = ([0.0]*dim, [math.log(0.1)]*dim)
    transition_scale = [math.log(0.1)] * dim
    beta = 4. # sigmoid(4.) = .9820
    log_alpha = -1.
    model = LearningDynamicsModel(init_prior, transition_scale, beta, log_alpha, dim=3)
    #model = LogReg_LDS(init_prior=(0.0, 0.02), transition_scale=1e-3)
    num_obs_samples = 2
    y, x, z_true = model.sample(T=T, num_obs_samples=num_obs_samples)

    plt.plot(to_numpy(z_true))
    # plt.show()
    # model params
    init_prior = ([0.0]*dim, [math.log(0.1)]*dim)
    transition_scale = [math.log(0.1)] * dim
    model = LearningDynamicsModel(init_prior, transition_scale, beta, dim=3)
    # proposal params
    smc_init_prior = ([0.0]*dim, [math.log(0.1)]*dim)
    smc_transition_scale = [math.log(0.1)] * dim
    q_init_latent_loc = torch.tensor([smc_init_prior[0]], 
        requires_grad=False, device=device)
コード例 #6
0
                     dtype=dtype,
                     device=device,
                     requires_grad=model_params_grad['beta']),
        'log_alpha':
        torch.tensor([math.log(.05)],
                     dtype=dtype,
                     device=device,
                     requires_grad=model_params_grad['log_alpha'])
    }
    # [math.log(.05)]*2
    torch.save(model_params_grad,
               output_file + '/model_structs/model_params_grad.pth')
    torch.save(model_params,
               output_file + '/model_structs/init_model_params.pth')

    model = LearningDynamicsModel(dim)
    inference = Inference(datasets,
                          model,
                          model_params,
                          model_params_grad,
                          savedir=output_file,
                          num_obs_samples=num_obs_samples,
                          num_future_steps=num_future_steps,
                          num_mc_samples=num_mc_samples,
                          ppc_window=ppc_window,
                          z_true=z_true,
                          true_model_params=None)  # pass in just for figures

    opt_params = inference.run()
    torch.save(opt_params, output_file + '/model_structs/opt_params.pth')
コード例 #7
0
ファイル: bootstrap.py プロジェクト: fdamani/animal-behavior
        'W065.csv', 'W066.csv', 'W068.csv', 'W072.csv', 'W073.csv', 'W074.csv',
        'W075.csv', 'W078.csv', 'W080.csv', 'W081.csv', 'W082.csv', 'W083.csv',
        'W088.csv', 'W089.csv', 'W094.csv'
    ]
    if sim:
        # T = 200 # 100
        T = 200
        # time-series model
        # sim model parameters
        dim = 3
        init_prior = ([0.0] * dim, [math.log(1.0)] * dim)
        transition_scale = [math.log(.2)]  # * dim
        log_gamma = math.log(1e-0)
        beta = 10.  # sigmoid(4.) = .9820
        log_alpha = math.log(1e-2)
        model = LearningDynamicsModel(init_prior, transition_scale, dim=3)
        #model = LogReg_LDS(init_prior=(0.0, 0.02), transition_scale=1e-3)
        num_obs_samples = 50
        y, x, z_true = model.sample(T=T, num_obs_samples=num_obs_samples)
        y = y.detach().cpu().numpy()
        x = x.detach().cpu().numpy()
        z_true = z_true.detach().cpu().numpy()

        plt.cla()
        plt.plot(z_true)
        #plt.show()
        plt.savefig('sim_z.png')
        # embed()
        # model params
    else:
        num_obs_samples = 200
コード例 #8
0
                         requires_grad=model_params_grad['log_alpha']),
            'log_alpha_init_latent_log_scale':
            torch.tensor(
                [math.log(0.005)],
                dtype=dtype,
                device=device,
                requires_grad=model_params_grad['init_latent_log_scale']),
            'log_alpha_init_latent_loc':
            torch.tensor(
                [math.log(0.01)],
                dtype=dtype,
                device=device,
                requires_grad=model_params_grad['init_latent_log_scale'])
        }

        model = LearningDynamicsModel(dim=dim)
        num_obs_samples = 1
        log_alpha = model.sample_gaussian_random_walk(
            T - 1, model_params['log_alpha_init_latent_loc'],
            model_params['log_alpha_init_latent_log_scale'],
            model_params['alpha_log_diffusion_prior'],
            model_params['alpha_decay_prior'])
        model_params['log_alpha'] = torch.tensor(
            log_alpha,
            dtype=dtype,
            device=device,
            requires_grad=model_params_grad['log_alpha'])
        model_params['log_alpha_var_log_scale'] = log_scale = torch.tensor(
            -5 * torch.ones(T - 1, dtype=dtype, device=device),
            requires_grad=True,
            dtype=dtype,
コード例 #9
0
import matplotlib.pyplot as plt
import datetime
import utils
from utils import sigmoid
#dtype = torch.cuda.float if torch.cuda.is_available() else torch.float
#dtype = torch.float32
dtype = torch.double

f = '../output/switching_alpha_shared_model_kfold_leave_out_6/model_structs/opt_params.pth'
#f = '../output/single_alpha_shared_model_kfold_leave_out_6/model_structs/opt_params.pth'
model_params = torch.load(f, map_location='cpu')
f = '../output/switching_alpha_shared_model_kfold_leave_out_6/data/data.pth'
#f = '../output/single_alpha_shared_model_kfold_leave_out_6/data/data.pth'
data = torch.load(f, map_location='cpu')
y, x = data[0], data[1]
model = LearningDynamicsModel(dim=7)
z_switch = []
z_switch.append(model.sample_init_prior(model_params))
# set diffusion noise to small value
model_params['transition_log_scale'] = torch.tensor([-20.],
                                                    dtype=dtype,
                                                    device=device)
for i in range(1, 10000):
    z_switch.append(
        model.sample_prior(model_params,
                           z_switch[i - 1],
                           y[i - 1],
                           x[i - 1],
                           switching=True))
z_switch = torch.stack(z_switch).numpy().squeeze()
コード例 #10
0
ファイル: new_main.py プロジェクト: fdamani/animal-behavior
                         requires_grad=model_params_grad['log_gamma']),
            'beta':
            torch.tensor([100.],
                         dtype=dtype,
                         device=device,
                         requires_grad=model_params_grad['beta']),
            'log_alpha':
            torch.tensor([math.log(.05)],
                         dtype=dtype,
                         device=device,
                         requires_grad=model_params_grad['log_alpha'])
        }

        torch.save(model_params,
                   output_file + '/model_structs/true_model_params.pth')
        model = LearningDynamicsModel(dim=dim)
        num_obs_samples = 1
        y, x, z_true = model.sample(T=T,
                                    model_params=model_params,
                                    num_obs_samples=num_obs_samples,
                                    dim=dim)

        rw = torch.mean(model.rat_reward_vec(y, x), dim=1)

        y = y.detach().cpu().numpy()
        x = x.detach().cpu().numpy()
        z_true = z_true.detach().cpu().numpy()

        plt.cla()
        plt.plot(z_true, alpha=1.)
        plt.savefig(output_file + '/plots/sim_z.png')
コード例 #11
0
            'transition_log_scale': transition_log_scale,
            'log_gamma': log_gamma,
            'beta': beta,
            'log_alpha': log_alpha
        }
        torch.save(true_model_params,
                   output_file + '/model_structs/true_model_params.pth')
        model_params_grad = {
            'init_prior': False,
            'transition_log_scale': False,
            'log_gamma': False,
            'beta': False,
            'log_alpha': False
        }
        model = LearningDynamicsModel(true_model_params,
                                      model_params_grad,
                                      dim=3)
        num_obs_samples = 1
        y, x, z_true = model.sample(T=T, num_obs_samples=num_obs_samples)
        # sim_results = sim_data.generateSim()

        # x = sim_results['X'][:, None, :]
        # y = sim_results['all_Y'][0][:, None]
        # z_true = sim_results['W']
        # T = 10000
        # dim = 4
        # num_obs_samples = 1

        #rw = torch.mean(model.rat_reward_vec(y, x), dim=1)
        # window=100
        # rw_avg = np.convolve(rw, np.ones(window))/ float(window)
コード例 #12
0
ファイル: bootstrap.py プロジェクト: fdamani/animal-behavior
        z_true = z[inds]
        datasets.append((y1, x1, z_true, z1))
    return datasets


datasets = bootstrap(x, y, particles)
og = [(y[0:-1], x[0:-1], particles[1:], particles[0:-1])]

bootstrapped_params = []

for data in datasets:
    y1, x1, z, z1 = data
    model = LearningDynamicsModel(init_prior=init_prior,
                                  transition_log_scale=transition_log_scale,
                                  beta=beta,
                                  log_alpha=log_alpha,
                                  log_gamma=log_gamma,
                                  dim=dim,
                                  grad=False)
    model.init_grad_vbles()
    lr = 5e-2
    opt_params = [
        model.beta, model.log_alpha, model.transition_log_scale,
        model.log_gamma
    ]

    optimizer = torch.optim.Adam(opt_params, lr=lr)
    num_iters = 3000
    outputs = []
    for t in range(num_iters):
        output = -model.complete_data_log_likelihood_bootstrap(
コード例 #13
0
            'transition_log_scale': transition_log_scale,
            'log_gamma': log_gamma,
            'beta': beta,
            'log_alpha': log_alpha
        }
        torch.save(true_model_params,
                   output_file + '/model_structs/true_model_params.pth')
        model_params_grad = {
            'init_prior': False,
            'transition_log_scale': False,
            'log_gamma': True,
            'beta': False,
            'log_alpha': False
        }
        model = LearningDynamicsModel(true_model_params,
                                      model_params_grad,
                                      dim=3)
        num_obs_samples = 500
        y, x, z_true = model.sample(T=T, num_obs_samples=num_obs_samples)

        rw = torch.mean(model.rat_reward_vec(y, x), dim=1)
        # window=100
        # rw_avg = np.convolve(rw, np.ones(window))/ float(window)
        #rw_avg = rw_avg[window:-window]
        # plt.plot(rw_avg)
        # plt.show()

        y = y.detach().cpu().numpy()
        x = x.detach().cpu().numpy()
        z_true = z_true.detach().cpu().numpy()
        plt.cla()