if epoch < 20 or epoch % 200 == 0: print( "Epoch {}: Loss: lc {:.3f}, train_B {:.3f}, val_B {:.3f}, train_I {:.3f}, val_I {:.3f}, \n\t\t Acc: train_B {:.3f}, val_B {:.3f}, train_I {:.3f}, val_I {:.3f}" .format(epoch, loss_C, train_loss_B, val_loss_B, train_loss_I, val_loss_I, train_accuracy_B, val_accuracy_B, train_accuracy_I, val_accuracy_I)) print("-" * 20) # choose model # TODO: not save at the same time, may have bad common representation if max_val_acc_B <= val_accuracy_B: model_dir = logDir + model_name_B + '.pt' print("Saving model at {} epoch to {}".format(epoch, model_dir)) max_val_acc_B = val_accuracy_B torch.save(model_B.state_dict(), model_dir) if max_val_acc_I <= val_accuracy_I: model_dir = logDir + model_name_I + '.pt' print("Saving model at {} epoch to {}".format(epoch, model_dir)) max_val_acc_I = val_accuracy_I torch.save(model_I.state_dict(), model_dir) training_end = datetime.now() training_time = training_end - training_start print("RAE training takes time {}".format(training_time)) model_B.is_fitted = True model_I.is_fitted = True model_B.eval()
val_accuracy = correct / val_num# / len(val_loader.dataset) val_loss /= val_num #len(val_loader.dataset) epoch_val_loss.append(val_loss) # only save the last batch epoch_val_acc.append(val_accuracy) if epoch < 20 or epoch%200 == 0: print("train_num {}, val_num {}".format(train_num, val_num)) print('Epoch: {} Loss: train {:.3f}, valid {:.3f}. Accuracy: train: {:.3f}, valid {:.3f}'.format(epoch, train_loss, val_loss, train_accuracy, val_accuracy)) # choose model if max_val_acc <= val_accuracy: model_dir = logDir + model_name + '.pt' print('Saving model at {} epoch to{}'.format(epoch, model_dir)) max_val_acc = val_accuracy torch.save(model.state_dict(), model_dir) training_end = datetime.now() training_time = training_end -training_start print("training takes time {}".format(training_time)) # In[9]: model.is_fitted = True model.eval() # In[10]:
epoch_val_loss.append(val_loss) # only save the last batch epoch_val_acc.append(val_accuracy) if epoch < 20 or epoch % 200 == 0: print("train_num {}, val_num {}".format(train_num, val_num)) print( 'Epoch: {} Loss: train {}, valid {}. Accuracy: train: {}, valid {}' .format(epoch, train_loss, val_loss, train_accuracy, val_accuracy)) print("-" * 20) # choose model if max_val_acc <= val_accuracy: print('Saving model at ', epoch, ' epoch') max_val_acc = val_accuracy torch.save(model.state_dict(), logDir + model_name + '.pt') training_end = datetime.now() training_time = training_end - training_start print("training takes time {}".format(training_time)) model.is_fitted = True # testing set check net_trained = VRAEC(num_class=num_class, sequence_length=sequence_length, number_of_features=number_of_features, hidden_size=hidden_size, hidden_layer_depth=hidden_layer_depth, latent_length=latent_length, batch_size=batch_size,
if epoch < 20 or epoch % 200 == 0: # print("train_num {}, val_num {}".format(train_num, val_num)) print( "Epoch {}: Loss: lc {:.3f}, train_B {:.3f}, val_B {:.3f}, train_I{:.3f}, val_I{:.3f}, \n\t\t Acc: train_B {:.3f}, val_B {:.3f}, train_I {:.3f}, val_I {:.3f}" .format(epoch, loss_C, train_loss_B, val_loss_B, train_loss_I, val_loss_I, train_accuracy_B, val_accuracy_B, train_accuracy_I, val_accuracy_I)) print("-" * 20) # choose model # TODO: not save at the same time, may have bad common representation if max_val_acc_B <= val_accuracy_B: model_dir = logDir + model_name_B + '.pt' print("Saving model at {} epoch to {}".format(epoch, model_dir)) max_val_acc_B = val_accuracy_B torch.save(model_B.state_dict(), model_dir) if max_val_acc_I <= val_accuracy_I: model_dir = logDir + model_name_I + '.pt' print("Saving model at {} epoch to {}".format(epoch, model_dir)) max_val_acc_I = val_accuracy_I torch.save(model_I.state_dict(), model_dir) training_end = datetime.now() training_time = training_end - training_start print("RAE training takes time {}".format(training_time)) # In[ ]: model_B.is_fitted = True model_I.is_fitted = True
epoch_val_loss_C.append(val_loss_C) epoch_val_loss_tot.append(val_loss_tot) if epoch < 20 or epoch % 200 == 0: print( "Epoch {}: Loss: lc {:.3f}, train_B{:.3f}, val_B {:.3f}, \n\t Acc: train_B {:.3f}, val_B {:.3f}, train_I {:.3f}, val_I {:.3f}" .format(epoch, loss_C, train_loss_B, val_loss_B, train_accuracy_B, val_accuracy_B, train_accuracy_I, val_accuracy_I)) print("-" * 20) # choose model if max_val_acc_B <= val_accuracy_B: model_dir = logDir + model_name_B + '.pt' print("Saving model at {} epoch to {}".format(epoch, model_dir)) max_val_acc_B = val_accuracy_B torch.save(model_B.state_dict(), model_dir) training_end = datetime.now() training_time = training_end - training_start print("RAE training takes time {}".format(training_time)) model_B.is_fitted = True model_I_pretrained.is_fitted = True model_B.eval() model_I_pretrained.eval() # copy the classifier from B to I to examine the classification result classifier_keys = ['classifier.0.weight', 'classifier.0.bias'] classifier_dict_B = { k: v
epoch_val_loss_I.append(val_loss_I) epoch_val_acc_I.append(val_accuracy_I) epoch_val_loss_C.append(val_loss_C) epoch_val_loss_tot.append(val_loss_tot) if epoch < 20 or epoch%200 == 0: print("Epoch {}: Loss: lc {:.3f}, train_I {:.3f}, val_I {:.3f}, \n\t Acc: train_B {:.3f}, val_B {:.3f}, train_I {:.3f}, val_I {:.3f}" .format(epoch, loss_C, train_loss_I, val_loss_I, train_accuracy_B, val_accuracy_B, train_accuracy_I, val_accuracy_I)) print("-"*20) # choose model if max_val_acc_I <= val_accuracy_I: model_dir = logDir + model_name_I + '.pt' print("Saving model at {} epoch to {}".format(epoch, model_dir)) max_val_acc_I = val_accuracy_I torch.save(model_I.state_dict(), model_dir) training_end = datetime.now() training_time = training_end -training_start print("RAE training takes time {}".format(training_time)) model_B_pretrained.is_fitted = True model_I.is_fitted = True model_B_pretrained.eval() model_I.eval() # copy the classifier from B to I to examine the classification result classifier_keys = ['classifier.0.weight', 'classifier.0.bias'] classifier_dict_B = {k: v for k, v in model_B_pretrained.state_dict().items() if k in classifier_keys}