batch_size=batch_size, learning_rate=learning_rate, n_epochs=n_epochs, dropout_rate=dropout_rate, cuda=cuda, model_name=model_name_I, header=header_I, device=device) newB_dict = swap_model_B.state_dict() newI_dict = swap_model_I.state_dict() newB_dict.update(ae_dict_B) newI_dict.update(ae_dict_I) newB_dict.update(classifier_dict_I) newI_dict.update(classifier_dict_B) swap_model_B.load_state_dict(newB_dict) swap_model_I.load_state_dict(newI_dict) swap_model_B.to(device) swap_model_I.to(device) swap_model_B.is_fitted = True swap_model_I.is_fitted = True swap_model_B.eval() swap_model_I.eval() # Test new models with swapped classifiers correct_B = 0 correct_I = 0 test_num = 0 for i, (XI, XB, y) in enumerate(test_loader):
hidden_layer_depth = hidden_layer_depth, latent_length = latent_length, batch_size = batch_size, learning_rate = learning_rate, n_epochs = n_epochs, dropout_rate = dropout_rate, cuda = cuda, print_every=print_every, clip=clip, max_grad_norm=max_grad_norm, dload = logDir, model_name=model_name, header=header, device = device) net_trained.load_state_dict(torch.load(logDir + model_name + '.pt')) net_trained.eval() # In[11]: correct = 0 test_num = 0 for i, (XI, XB, y) in enumerate(test_loader): if model.header == 'CNN': x = XI else: x = XB x, y = x.to(device), y.long().to(device)
batch_size=batch_size, learning_rate=learning_rate, n_epochs=n_epochs, dropout_rate=dropout_rate, cuda=cuda, print_every=print_every, clip=clip, max_grad_norm=max_grad_norm, dload=logDir, model_name=model_name_B, header=header_B, w_r=w_r, w_k=w_k, w_c=w_c, device=device) model_B_trained.load_state_dict(torch.load(logDir + model_name_B + '.pt')) model_B_trained.to(device) model_B_trained.eval() model_I_trained = VRAEC(num_class=num_class, sequence_length=sequence_length_I, number_of_features=number_of_features_I, hidden_size=hidden_size, hidden_layer_depth=hidden_layer_depth, latent_length=latent_length, batch_size=batch_size, learning_rate=learning_rate, n_epochs=n_epochs, dropout_rate=dropout_rate, cuda=cuda, print_every=print_every,
latent_length=latent_length, batch_size=batch_size, learning_rate=learning_rate, n_epochs=n_epochs, dropout_rate=dropout_rate, cuda=cuda, print_every=print_every, clip=clip, max_grad_norm=max_grad_norm, dload=logDir, model_name=model_name_B, header=header_B, device=device) model_B_pretrained_dir = logDir + model_name_B + '.pt' if device == torch.device('cpu'): model_B_pretrained.load_state_dict( torch.load(model_B_pretrained_dir, map_location=torch.device('cpu'))) else: model_B_pretrained.load_state_dict(torch.load(model_B_pretrained_dir)) model_B_pretrained.to(device) model_B_pretrained.eval() print("load model from") print(model_name_B) model_I = VRAEC(num_class=num_class, sequence_length=sequence_length_I, number_of_features=number_of_features_I, hidden_size=hidden_size, hidden_layer_depth=hidden_layer_depth, latent_length=latent_length,
# print(classifier_dict_I) # load only ae part of the original pretrained_model # model_B_trained.load_state_dict(model_B.state_dict()) # model_I_trained.load_state_dict(model_I.state_dict()) newB_dict = model_B_trained.state_dict() newI_dict = model_I_trained.state_dict() newB_dict.update(ae_dict_B) newI_dict.update(ae_dict_I) # overwrite classifer for new models newB_dict.update(classifier_dict_I) newI_dict.update(classifier_dict_B) # load the new state_dict model_B_trained.load_state_dict(newB_dict) model_I_trained.load_state_dict(newI_dict) classifier_dict_new_B = { k: v for k, v in model_B_trained.state_dict().items() if k in classifier_keys } # print("classifier_dict_new_B") # print(classifier_dict_new_B) model_B_trained.to(device) model_I_trained.to(device) model_B_trained.eval() model_I_trained.eval() # In[13]: model_B_trained.is_fitted = True