Exemple #1
0
def model_inverse(test_y, test_z):
    x_test = model([test_z], c=[test_y], rev=True)
    return x_test
Exemple #2
0
###   TRAINING CONFIGURATION AND HYPERPARAMETERS   ###
######################################################

c = {
    # GENERAL STUFF
    'suffix':
    f'{model.name}_conditional_cinn-2',  # identifier for trained models and outputs
    'device': 'cuda',  # 'cuda' for GPU, 'cpu' for CPU
    'interactive_visualization':
    True,  # requires visdom package to be installed

    # DATA
    'ndim_x': n_parameters,
    'ndim_y': n_observations,
    'ndim_z': n_parameters,
    'data_model': model(),
    'vis_y_target': (0.52466008, 0.21816375, 2.29708147),

    # MODEL ARCHITECTURE
    'n_blocks': 2,
    'hidden_layer_sizes': 151,  # 200k
    'init_scale': 0.005,

    # TRAINING HYPERPARAMETERS
    'n_epochs': 50,  # total number of epochs
    'max_batches_per_epoch':
    100,  # iterations per epoch (or if training data exhausted)
    'batch_size': 10000,
    'n_test': 100000,
    'n_train': 1000000,
    'lr_init': 0.01,  # initial learning rate
Exemple #3
0
def model_inverse(test_z):
    return model(test_z, rev=True)
Exemple #4
0
def sample_conditional(y, z_x=None):
    if z_x is None:
        z_x = torch.randn(y.shape[0], n_parameters).to(device)
    z_y, _ = model([y, z_x])
    y, x = model([z_y, z_x], rev=True)
    return x
Exemple #5
0
def sample_joint(n_samples):
    return model([
        torch.randn(n_samples, n_observations).to(device),
        torch.randn(n_samples, n_parameters).to(device)
    ],
                 rev=True)
Exemple #6
0
def model_inverse(test_y, test_z):
    z_y, z_x = model(
        [test_y,
         torch.randn(test_y.shape[0], n_parameters).to(c.device)])
    y_test, x_test = model([z_y, test_z], rev=True)
    return x_test