if parent == 'Treatment':
            continue
        key= d.encoded_feature_names.index(parent)
        temp.append(key)

    constrained_feat_indices.append(temp)
    print(node, parents, temp)    
    
    # Model Parameters
    num_params=len(temp)
    temp_param= []
    for idx in range(0, num_params):
        temp_param.append( torch.rand(1, requires_grad=True).to(cuda).detach().requires_grad_(True))
    
    model_param.append(temp_param)
    print(temp_param)
    
for idx in range(len(model_param)):
    for epoch in range(200):
        model_param[idx]= linear_model_feature_approx(vae_train_dataset, constrained_feat_indices[idx], model_param[idx])

#traverse(vae_train_dataset, 1, len(vae_train_dataset))

for epoch in range(args.epoch):
    np.random.shuffle(vae_train_dataset)
    loss_val.append( train_constraint_loss( cf_vae, ae_vae, model_param, constrained_feat_indices, vae_train_dataset, cf_vae_optimizer, validity_reg, constraint_reg, ae_reg, normalise_weights, margin, delta_case, 1, batch_size) )
    print('Done training for epoch: ', epoch)

#Saving the final model
torch.save(cf_vae.state_dict(),  base_model_dir + dataset_name + '-delta-case-' + str(args.delta_case) + '-margin-' + str(margin) + '-ae-reg-' + str(args.ae_reg) + '-constraint_reg-'+ str(args.constraint_reg) +  '-validity_reg-'+ str(args.validity_reg) + '-constrained_node-' + str(args.constrained_nodes) + '-epoch-' + str(args.epoch) + '-' + 'model-approx-gen' + '.pth')
    'params':
    filter(lambda p: p.requires_grad, cf_vae.decoder_mean.parameters()),
    'weight_decay':
    wm3
}],
                              lr=learning_rate)

#Train CFVAE
loss_val = []
likelihood_val = []
valid_cf_count = []

validity_reg = args.validity_reg
constraint_reg = args.constraint_reg
margin = args.margin

#traverse(vae_train_dataset, 1, len(vae_train_dataset))

for epoch in range(args.epoch):
    np.random.shuffle(vae_train_dataset)
    loss_val.append(
        train_constraint_loss(cf_vae, vae_train_dataset, cf_vae_optimizer,
                              normalise_weights, validity_reg, constraint_reg,
                              margin, 1, batch_size))

#Saving the final model
torch.save(
    cf_vae.state_dict(), base_model_dir + dataset_name + '-margin-' +
    str(args.margin) + '-constraint-reg-' + str(args.constraint_reg) +
    '-validity_reg-' + str(args.validity_reg) + '-epoch-' + str(args.epoch) +
    '-' + 'unary-ed-gen' + '.pth')
if args.htune == 0:
    base_model_dir = base_model_dir + args.oracle_data[:-5] + '/'

# Include the label prediction term loss while training the VAE
loss_val = []
likelihood_val = []
valid_cf_count = []

#Evaluation Time
eval_time = 0.0
for epoch in range(args.epoch):
    wrapped = wrapper(train_cflabel_likelihood_loss, cf_vae, cf_vae_optimizer,
                      normalise_weights, validity_reg, oracle_reg, margin, 0,
                      1, batch_size)
    eval_time += timeit.timeit(wrapped, number=1)
    print('-----------------------------------')
    print('Time taken: ', eval_time)
    print('-----------------------------------')

    print('Done training for epoch: ', epoch)

#Saving the final model
torch.save(
    cf_vae.state_dict(),
    base_model_dir + dataset_name + '-eval-case-' + str(args.eval_case) +
    '-supervision-limit-' + str(args.supervision_limit) + '-const-case-' +
    str(args.const_case) + '-margin-' + str(args.margin) + '-oracle_reg-' +
    str(args.oracle_reg) + '-validity_reg-' + str(args.validity_reg) +
    '-epoch-' + str(args.epoch) + '-' + 'oracle-gen' + '.pth')
예제 #4
0
    {'params': filter(lambda p: p.requires_grad, cf_vae.decoder_mean_fc.parameters()),'weight_decay': wm3},
    ], lr=learning_rate)

#Train CFVAE
loss_val = []
likelihood_val = []
valid_cf_count = []

validity_reg=args.validity_reg

#traverse(vae_train_dataset, 1, len(vae_train_dataset))

for epoch in range(args.epoch):
    np.random.shuffle(vae_train_dataset)
    loss_val.append( train( cf_vae, vae_train_dataset, cf_vae_optimizer, validity_reg, marign, 1, batch_size) )
    print('Done training for epoch: ', epoch)
    if epoch == args.epoch -1:
#         print('Train Set Score: ')
#         likelihood, valid_cf_num= test( cf_vae, vae_train_dataset, 1, batch_size)
        print('Val Set Score: ')
        likelihood, valid_cf_num= test( cf_vae, vae_val_dataset, auto_encoder_models_cem, 1, batch_size=64)
#         likelihood_val.append( likelihood )
#         valid_cf_count.append( valid_cf_num )
        
print(loss_val)
print(likelihood_val)
print(valid_cf_count)

#Saving the final model
torch.save(cf_vae.state_dict(),  base_model_dir + dataset_name + '-margin-' + str(args.margin) + '-validity_reg-'+ str(args.validity_reg) + '-epoch-' + str(args.epoch) + '-' + 'base-gen' + '.pth')
    filter(lambda p: p.requires_grad, cf_vae.decoder_mean.parameters()),
    'weight_decay':
    wm3
}],
                              lr=learning_rate)

#Train CFVAE
loss_val = []

validity_reg = args.validity_reg
scm_reg = args.scm_reg
delta_case = args.delta_case
margin = args.margin

#traverse(vae_train_dataset, 1, len(vae_train_dataset))

for epoch in range(args.epoch):
    np.random.shuffle(vae_train_dataset)
    loss_val.append(
        train_scm_loss(cf_vae, vae_train_dataset, cf_vae_optimizer,
                       normalise_weights, validity_reg, scm_reg, margin,
                       delta_case, 1, batch_size))
    print('Done training for epoch: ', epoch)

#Saving the final model
torch.save(
    cf_vae.state_dict(), base_model_dir + dataset_name + '-delta-case-' +
    str(args.delta_case) + '-margin-' + str(args.margin) + '-scm_reg-' +
    str(args.scm_reg) + '-validity_reg-' + str(args.validity_reg) + '-epoch-' +
    str(args.epoch) + '-' + 'scm-gen' + '.pth')