learning_history['train_error_2_loss'].append(f'{avg_train_error_2:.4e}') learning_history['val_acc'].append(f'{avg_val_acc:.4f}') learning_history['val_total_loss'].append(f'{avg_val_loss:.4f}') learning_history['val_class_loss'].append(f'{avg_val_class_error:.4e}') learning_history['val_ae_loss'].append(f'{avg_val_ae_error:.4e}') learning_history['val_error_1_loss'].append(f'{avg_val_error_1:.4e}') learning_history['val_error_2_loss'].append(f'{avg_val_error_2:.4e}') learning_history['test_acc'].append(f'{avg_test_acc:.4f}') result_save(f'./result/csv/train_history_50.csv', learning_history) # save model, prototype and ae_out if epoch % save_step == 0 or epoch == num_epochs - 1 or ( epoch > 200 and epoch % final_pruning_step == 0): with torch.no_grad(): parameter_save( f'./result/pkl/train_model_epoch{epoch + 1}_{prototype_num}.pkl', net) f_width = int( math.sqrt(len(net.prototype_feature_vectors[0]) / class_num)) f_height = int( math.sqrt(len(net.prototype_feature_vectors[0]) / class_num)) prototype_imgs = net.decoder( # prototype_imgs = net.cifar_decoder( # prototype_imgs = net.simple_decoder( net.prototype_feature_vectors.reshape(prototype_num, class_num, f_width, f_height)).cpu().numpy() # net.prototype_feature_vectors).cpu().numpy() n_cols = 5 n_rows = prototype_num // n_cols + 1 if prototype_num % n_cols != 0 else prototype_num // n_cols
learning_history['train_ae_loss'].append( f'{avg_train_ae_error:.4e}') learning_history['train_error_1_loss'].append( f'{avg_train_error_1:.4e}') learning_history['train_error_2_loss'].append( f'{avg_train_error_2:.4e}') learning_history['test_acc'].append(f'{avg_test_acc:.4f}') result_save( f'./result/csv/conv_prune_finetune_train_history_{prototype}.csv', learning_history) # prototype if epoch == num_epochs - 1: with torch.no_grad(): parameter_save( f'./result/pkl/conv_prune_finetune_train_model_prune{count}_{prototype}.pkl', train_net) f_width = int( math.sqrt( len(train_net.prototype_feature_vectors[1]) / class_num)) f_height = int( math.sqrt( len(train_net.prototype_feature_vectors[1]) / class_num)) prototype_imgs = train_net.decoder( # prototype_imgs = net.cifar_decoder( train_net.prototype_feature_vectors.reshape( int(prototype), class_num, f_width, f_height)).cpu().numpy() n_cols = 5
# save the learning history if epoch == num_epochs - 1: learning_history['epoch'].append(epoch + 1) learning_history['train_acc'].append(f'{avg_train_acc:.4f}') learning_history['train_total_loss'].append(f'{avg_train_loss:.4f}') learning_history['train_class_loss'].append(f'{avg_train_class_error:.4e}') learning_history['train_ae_loss'].append(f'{avg_train_ae_error:.4e}') learning_history['train_error_1_loss'].append(f'{avg_train_error_1:.4e}') learning_history['train_error_2_loss'].append(f'{avg_train_error_2:.4e}') learning_history['test_acc'].append(f'{avg_test_acc:.4f}') result_save(f'./result/csv/prune_train_history_{prototype}.csv', learning_history) # prototype if epoch == num_epochs - 1: with torch.no_grad(): parameter_save(f'./result/pkl/prune_train_model_epoch{k}_{prototype}.pkl', train_net) f_width = int(math.sqrt(len(train_net.prototype_feature_vectors[1]) / class_num)) f_height = int(math.sqrt(len(train_net.prototype_feature_vectors[1]) / class_num)) prototype_imgs = train_net.decoder( # prototype_imgs = net.cifar_decoder( train_net.prototype_feature_vectors.reshape(int(prototype), class_num, f_width, f_height)).cpu().numpy() n_cols = 5 n_rows = int(prototype) // n_cols + 1 if int(prototype) % n_cols != 0 else int(prototype) // n_cols g, b = plt.subplots(n_rows, n_cols, figsize=(n_cols, n_rows), squeeze=False) for i in range(n_rows): for j in range(n_cols): if i * n_cols + j < int(prototype): b[i][j].imshow( prototype_imgs[i * n_cols + j].reshape(in_height, in_width),