pred_model.load_state_dict(torch.load(path))
pred_model.eval()

encoded_size=10
ae_vae = AutoEncoder(data_size, encoded_size, d).to(cuda)
ae_path= base_model_dir + args.ae_path
ae_vae.load_state_dict(torch.load(ae_path))
ae_vae.eval()

# Initiliase new model
wm1=1e-2
wm2=1e-2
wm3=1e-2

encoded_size=10
cf_vae = CF_VAE(data_size, encoded_size, d).to(cuda)
learning_rate = args.learning_rate
batch_size= args.batch_size
cf_vae_optimizer = optim.Adam([
    {'params': filter(lambda p: p.requires_grad, cf_vae.encoder_mean.parameters()),'weight_decay': wm1},
    {'params': filter(lambda p: p.requires_grad, cf_vae.encoder_var.parameters()),'weight_decay': wm2},
    {'params': filter(lambda p: p.requires_grad, cf_vae.decoder_mean.parameters()),'weight_decay': wm3}
], lr=learning_rate)

#Train CFVAE
loss_val = []
likelihood_val = []
valid_cf_count = []


validity_reg=args.validity_reg
Ejemplo n.º 2
0
json_file.close()
ae_model = tf.keras.models.model_from_json(loaded_model_json)
# load weights into new model
ae_model.load_weights(path+".h5")
#ae_model= load_model(path)
#ae_model.compile(loss=nll, optimizer='adam')
auto_encoder_models_cem.append(ae_model)        
    
    
# Initiliase new model
wm1=1e-2
wm2=1e-2
wm3=1e-2
wm4=1e-2

cf_vae = CF_VAE(data_size, encoded_size).to(cuda)
learning_rate = 2*1e-2
batch_size= args.batch_size
marign= args.margin
cf_vae_optimizer = optim.Adam([
    {'params': filter(lambda p: p.requires_grad, cf_vae.encoder_mean_conv.parameters()),'weight_decay': wm1},
    {'params': filter(lambda p: p.requires_grad, cf_vae.encoder_mean_fc.parameters()),'weight_decay': wm1},       
    {'params': filter(lambda p: p.requires_grad, cf_vae.encoder_var_conv.parameters()),'weight_decay': wm2},
    {'params': filter(lambda p: p.requires_grad, cf_vae.encoder_var_fc.parameters()),'weight_decay': wm2},
    {'params': filter(lambda p: p.requires_grad, cf_vae.decoder_mean_conv.parameters()),'weight_decay': wm3},
    {'params': filter(lambda p: p.requires_grad, cf_vae.decoder_mean_fc.parameters()),'weight_decay': wm3},
    ], lr=learning_rate)

#Train CFVAE
loss_val = []
likelihood_val = []
#Load Black Box Prediction Model
data_size = len(d.encoded_feature_names)
pred_model = BlackBox(data_size).to(cuda)
path = base_model_dir + dataset_name + '.pth'
pred_model.load_state_dict(torch.load(path))
pred_model.eval()

# Initiliase new model
wm1 = 1e-2
wm2 = 1e-2
wm3 = 1e-2
wm4 = 1e-2

encoded_size = 10
cf_vae = CF_VAE(data_size, encoded_size, d).to(cuda)
learning_rate = 1e-2
batch_size = args.batch_size
cf_vae_optimizer = optim.Adam([{
    'params':
    filter(lambda p: p.requires_grad, cf_vae.encoder_mean.parameters()),
    'weight_decay':
    wm1
}, {
    'params':
    filter(lambda p: p.requires_grad, cf_vae.encoder_var.parameters()),
    'weight_decay':
    wm2
}, {
    'params':
    filter(lambda p: p.requires_grad, cf_vae.decoder_mean.parameters()),
#Load Black Box Prediction Model
data_size = len(d.encoded_feature_names)
pred_model = BlackBox(data_size).to(cuda)
path = base_model_dir + dataset_name + '.pth'
pred_model.load_state_dict(torch.load(path))
pred_model.eval()

# Initiliase new model
wm1 = 1e-2
wm2 = 1e-2
wm3 = 1e-2
wm4 = 1e-2

encoded_size = 10
cf_vae = CF_VAE(data_size, encoded_size, d).to(cuda)
learning_rate = 1e-2
batch_size = args.batch_size
margin = args.margin
cf_vae_optimizer = optim.Adam([
    {
        'params':
        filter(lambda p: p.requires_grad, cf_vae.encoder_mean.parameters()),
        'weight_decay':
        wm1
    },
    {
        'params':
        filter(lambda p: p.requires_grad, cf_vae.encoder_var.parameters()),
        'weight_decay':
        wm2