Esempio n. 1
0
def simulate_dataset(model_params, T, dim, num_obs_samples):
    transition_log_scale = model_params['transition_log_scale']
    init_prior = ([0.0] * dim, [math.log(1.0)] * dim)
    model = LearningDynamicsModel(init_prior, transition_log_scale, dim=dim)
    num_obs = 50
    y, x, z_true = model.sample(T=T, num_obs_samples=num_obs_samples)
    return (y, x, z_true)
def simulate_datasets(model_params, model_params_grad, dim, num_obs_samples,
                      num_datasets):
    # instantiate model
    model = LearningDynamicsModel(model_params, model_params_grad, dim=dim)
    num_obs = 50
    datasets = []
    for i in range(num_datasets):
        y, x, z_true = model.sample(T=T,
                                    num_obs_samples=num_obs_samples,
                                    dim=dim)
        datasets.append((y, x, z_true))
    return datasets
Esempio n. 3
0
    inference_types = ['map', 'mfvi', 'is', 'smc', 'vsmc']
    inference_type = inference_types[4]
    # T = 200 # 100
    T = 100
    num_particles = 20# 200
    # time-series model
    # sim model parameters
    dim = 3
    init_prior = ([0.0]*dim, [math.log(0.1)]*dim)
    transition_scale = [math.log(0.1)] * dim
    beta = 4. # sigmoid(4.) = .9820
    log_alpha = -1.
    model = LearningDynamicsModel(init_prior, transition_scale, beta, log_alpha, dim=3)
    #model = LogReg_LDS(init_prior=(0.0, 0.02), transition_scale=1e-3)
    num_obs_samples = 2
    y, x, z_true = model.sample(T=T, num_obs_samples=num_obs_samples)

    plt.plot(to_numpy(z_true))
    # plt.show()
    # model params
    init_prior = ([0.0]*dim, [math.log(0.1)]*dim)
    transition_scale = [math.log(0.1)] * dim
    model = LearningDynamicsModel(init_prior, transition_scale, beta, dim=3)
    # proposal params
    smc_init_prior = ([0.0]*dim, [math.log(0.1)]*dim)
    smc_transition_scale = [math.log(0.1)] * dim
    q_init_latent_loc = torch.tensor([smc_init_prior[0]], 
        requires_grad=False, device=device)
    q_init_latent_log_scale = torch.tensor([smc_init_prior[1]], 
        requires_grad=False, device=device)
    q_transition_log_scale = torch.tensor([smc_transition_scale], 
            -5 * torch.ones(T - 1, dtype=dtype, device=device),
            requires_grad=True,
            dtype=dtype,
            device=device)
        torch.save(model_params,
                   output_file + '/model_structs/true_model_params.pth')

        plt.cla()
        plt.plot(np.exp(to_numpy(log_alpha)))
        plt.savefig(output_file + '/plots/sim_alpha.png')
        # plt.show()
        # embed()
        y, x, z_true = model.sample(T=T,
                                    model_params=model_params,
                                    num_obs_samples=num_obs_samples,
                                    dim=dim,
                                    x=None,
                                    switching=False,
                                    alpha_time_dep=True)
        rw = torch.mean(model.rat_reward_vec(y, x), dim=1)

        y = y.detach().cpu().numpy()
        x = x.detach().cpu().numpy()
        z_true = z_true.detach().cpu().numpy()

        plt.cla()
        plt.plot(z_true, alpha=1.)
        plt.savefig(output_file + '/plots/sim_z.png')

    else:
        num_obs_samples = 1
Esempio n. 5
0
                         dtype=dtype,
                         device=device,
                         requires_grad=model_params_grad['beta']),
            'log_alpha':
            torch.tensor([math.log(.05)],
                         dtype=dtype,
                         device=device,
                         requires_grad=model_params_grad['log_alpha'])
        }

        torch.save(model_params,
                   output_file + '/model_structs/true_model_params.pth')
        model = LearningDynamicsModel(dim=dim)
        num_obs_samples = 1
        y, x, z_true = model.sample(T=T,
                                    model_params=model_params,
                                    num_obs_samples=num_obs_samples,
                                    dim=dim)

        rw = torch.mean(model.rat_reward_vec(y, x), dim=1)

        y = y.detach().cpu().numpy()
        x = x.detach().cpu().numpy()
        z_true = z_true.detach().cpu().numpy()

        plt.cla()
        plt.plot(z_true, alpha=1.)
        plt.savefig(output_file + '/plots/sim_z.png')

    else:
        num_obs_samples = 1
        #f = '/tigress/fdamani/neuro_data/data/clean/LearningData_W066_minmaxnorm.txt'