def save_model(self): # SAVING print("MODEL SAVED AT LOCATION:", self.model_history_path) create_missing_folders(self.model_history_path) torch.save( self.state_dict(), self.model_history_path + self.flavour + "_" + self.model_file_name + '.state_dict') if self.supervised == "semi": torch.save( self.classifier.state_dict(), self.model_history_path + self.flavour + "_" + self.model_file_name + 'classifier.state_dict') torch.save( self.train_loss_history, self.model_history_path + self.flavour + "_" + self.model_file_name + '.train_loss') torch.save( self.train_rec_history, self.model_history_path + self.flavour + "_" + self.model_file_name + '.train_re') torch.save( self.train_kl_history, self.model_history_path + self.flavour + "_" + self.model_file_name + '.train_kl') torch.save( self.val_loss_history, self.model_history_path + self.flavour + "_" + self.model_file_name + '.val_loss') torch.save( self.val_rec_history, self.model_history_path + self.flavour + "_" + self.model_file_name + '.val_re') torch.save( self.val_kl_history, self.model_history_path + self.flavour + "_" + self.model_file_name + '.val_kl') torch.save( self.epoch, self.model_history_path + self.flavour + "_" + self.model_file_name + '.epoch')
def display_reconstruction(self, data, reconstruction): self.eval() print("GENERATING RECONSTRUCTION IMAGES autoencoder!") hparams_string = "/".join([ "num_elements" + str(self.num_elements), "n_flows" + str(self.n_flows), "z_dim" + str(self.z_dim_last), "unsupervised", "lr" + str(self.lr), "ladder" + str(self.ladder), self.flavour ]) x = data.view(-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]).data x_grid = tv.utils.make_grid(x) x_recon = reconstruction.view(-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]).data x_recon_grid = tv.utils.make_grid(x_recon) images_path = self.hparams_string + "/recon/" print("Images location:", images_path) create_missing_folders(images_path) tv.utils.save_image( x_grid, images_path + "original_" + str(self.epoch) + ".png") tv.utils.save_image( x_recon_grid, images_path + "reconstruction_example_" + str(self.epoch) + ".png")
def plot_losses(losses, labels, n_neurons=None, results_path="~",filename="NoName"): filename = "_".join([filename, "loss.png"]) create_missing_folders(results_path + "/plots/hnet/") fig, ax1 = plt.subplots() plt.ylim([0., 1000.]) ax1.plot(losses, 'g-.', label='train') # plotting t, a separately ax1.set_xlabel('epochs') ax1.set_ylabel('Loss') # ax1.tick_params('y') handles, labels = ax1.get_legend_handles_labels() ax1.legend(handles, labels) if n_neurons is not None: ax22 = ax1.twinx() for i, n in enumerate(n_neurons): ax22.plot(n_neurons[i], '--', label="Hidden Layer " + str(i)) # plotting t, a separately ax22.set_ylabel('#Neurons') handles, labels = ax22.get_legend_handles_labels() ax22.legend(handles, labels) fig.tight_layout() # pylab.show() pylab.savefig(results_path + "/plots/hnet/" + filename) plt.close()
def save_model(self): # SAVING print("MODEL (with classifier) SAVED AT LOCATION:", self.model_history_path) create_missing_folders(self.model_history_path) torch.save( self.state_dict(), self.model_history_path + self.model_file_name + '.state_dict')
def input_pruning(self, results_path, min_n_input_dims=20, minimum_neurons=20): """ :param net: :param gt: :param min_n_input_dims: :param minimum_neurons: :return: """ self.eval() with torch.no_grad(): hebb_input = self.hebb_input_values.data.copy_( self.hebb_input_values.data).cpu().numpy() if len(hebb_input) >= min_n_input_dims: to_keep = hebb_input > float(self.gt_input) print("min_hebb_value:", self.gt_input) valid_indices = indices_h(to_keep) if len(valid_indices) < minimum_neurons: # TODO Replace neurons that could not be removed? valid_indices = indices_h( torch.sort(hebb_input)[1] < minimum_neurons) print("Minimum neurons on layer 1", sep="\t", file=self.hebb_log) print("previous_valid_len", self.previous_valid_len) self.valid_bool = [ 1. if x in valid_indices else 0. for x in range(self.input_size) ] self.alive_inputs = [ x for x in range(len(hebb_input)) if x in valid_indices ] alive_inputs = np.array(self.alive_inputs) #if len(self.alive_inputs) < self.previous_valid_len: masks_path = results_path + "/images/masks/" + str( self.dataset_name) + "/" create_missing_folders(masks_path) img_path = "_".join([ "alive_inputs", str(len(valid_indices)), str(self.epoch), ".png" ]) print("self.n_channels", self.n_channels) if len(self.input_shape) == 3: print("SAVING MASK at", results_path) mask = np.reshape(self.valid_bool, newshape=(28, 28)) # TODO change hard coding plt.imsave(masks_path + img_path, mask) self.previous_valid_len = len(valid_indices) self.valid_bool_tensor = self.valid_bool_tensor * torch.Tensor( self.valid_bool).cuda() return self.valid_bool, self.alive_inputs
def histograms_hidden_layers(xs, results_path, normalized, is_mean=True, epoch=0, depth=0, activated=False, mu=None, var=None, axis=0, bins=50, flat=True, neuron=None): ax = plt.subplot(111) ax.set_xlabel("Hidden value") ax.set_ylabel("Frequency") plt.title("PDF of preactivation values") if neuron is None: neurons = "all" else: neurons = "single" xs = xs[:, neuron] if is_mean: xs = np.mean(xs, axis=axis) ax.hist(xs, bins=bins, alpha=0.5, density=True) if mu is None and var is None: mean_mean = float(np.mean(xs)) mean_var = float(np.var(xs)) elif mu is not None and var is not None: mean_mean = float(mu) mean_var = float(var) else: print( "No images saved. Both mu and var must be either None or both numpy" ) return normal_curve(ax, mean_mean, mean_var) if activated: plt.axvline(x=float(np.mean(xs)), c="g", linewidth=1) # half_normal_curve(ax, mu, var, float(np.mean(xs))) destination_folder_path = "/".join( (results_path, "layers_histograms", "depth_" + str(depth), "activated_" + str(activated), "normalized_" + str(normalized))) + "/" create_missing_folders(destination_folder_path) destination_file_path = destination_folder_path + "Hidden_values_hist_" + str(epoch) + "_activated"+ \ str(activated) + "_normalized" + str(normalized) + "_mean" + str(is_mean) + "_flat"\ + str(flat) + "_" + neurons + "neurons.png" plt.savefig(destination_file_path) plt.close()
def plot_z_stats(self, z, path, generate="generated", max=5000): fig, ax = plt.subplots() # create figure and axis plt.boxplot(z) handles, labels = ax.get_legend_handles_labels() ax.legend(handles, labels) plt.tight_layout() fig.tight_layout() path = "/".join([path, "plots/vae_z_stats", generate]) + "/" create_missing_folders(path) fig.savefig(path + self.flavour + "_" + str(self.epoch) + '_lr' + str(self.lr) + '_bs' + str(self.batch_size) + ".png") plt.close(fig) del z, path, generate
def set_configs(self, home_path, dataset_name, extra_class, num_classes, results_folder="results", data_folder="data", destination_folder="annleukemia", meta_destination_folder="meta_pandas_dataframes", csv_filename="csv_loggers", lr=1e-3, mom=0, is_unlabelled=True): self.extra_class = extra_class if extra_class: self.num_classes = num_classes + 1 else: self.num_classes = num_classes # Hyper-parameters self.lr = float(lr) self.mom = mom self.is_unlabelled = is_unlabelled # Files names if type(dataset_name) == list: names = [name for name in dataset_name] dataset_name = "_".join(names) self.dataset_name = dataset_name self.filename = dataset_name + '_history' self.csv_filename = csv_filename # Folder names self.results_folder = results_folder self.destination_folder = destination_folder self.data_folder = data_folder self.meta_destination_folder = meta_destination_folder # Paths self.home_path = home_path self.results_path = "/".join( [self.home_path, self.destination_folder, self.results_folder]) self.models_path = "/".join([self.results_path, "models"]) self.model_history_path = self.models_path + "/history/" self.csv_logger_path = "/".join([self.results_path, csv_filename]) self.data_folder_path = "/".join( [home_path, self.destination_folder, self.data_folder]) self.meta_data_folder_path = "/".join( [self.data_folder_path, self.meta_destination_folder]) create_missing_folders(self.csv_logger_path) create_missing_folders(self.models_path) create_missing_folders(self.meta_data_folder_path) create_missing_folders(self.model_history_path) self.filename = self.dataset_name + '_history'
def plot_latent(self, zs, labs, latent_type, generated, step, max_samples=1000): fig, ax = plt.subplots() # create figure and axis if type(labs) is not list: if len(labs.shape) > 1: labs = [x.tolist().index(1) for x in labs] for i, label in enumerate(self.labels_set): if label == "N/A": continue pos1 = np.array( [l for l, x in enumerate(labs) if str(x) == str(label)]) # np.random.shuffle(pos) # pos2 = np.array(pos1[:max_samples], dtype=int) try: ax.scatter(zs[pos1, 0], zs[pos1, 1], s=3, marker='.', label=str(label)) except: zs = np.vstack(zs) ax.scatter(zs[pos1, 0], zs[pos1, 1], s=3, marker='.', label=str(label)) handles, labels = ax.get_legend_handles_labels() ax.legend(handles, labels) plt.tight_layout() #fig.tight_layout() new_string = "/".join([ self.hparams_string, latent_type, step, "generated:" + str(generated) ]) print("Plotting", latent_type, " at:\n", new_string, "\n") create_missing_folders(new_string) fig.savefig(new_string + "/" + str(self.epoch)) plt.close(fig)
def generate_uniform_gaussian_percentiles(self, n=20, verbose=1, max=1000): self.eval() print("GENERATING gaussian percentiles IMAGES autoencoder!") xs_grid = torch.Tensor( np.vstack([ np.linspace(norm.ppf(0.01), norm.ppf(0.99), n**2) for _ in range(self.z_dim_last) ]).T) this_path = self.hparams_string + "/gaussian_percentiles/" if verbose > 0: print("GENERATING SS DGM IMAGES AT", this_path) print("image path:", this_path) create_missing_folders(this_path) grid = torch.Tensor(xs_grid).to(device) if self.z_dim_last == 2: self.plot_z_stats(xs_grid, generate="/ugp_generated/", path=this_path, max=max) try: new_x = torch.stack([self.sample(g.view(1, -1)) for g in grid]) except: new_x = self.sample(grid) if len(self.input_shape) > 1: images = new_x.view(-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]).data assert n == int(images.shape[0]) / n images_grid = tv.utils.make_grid(images, int(np.sqrt(images.shape[0]))) create_missing_folders(this_path) tv.utils.save_image( images_grid, this_path + str(self.epoch) + self.dataset_name + "gaussian_uniform_generated.png") del images_grid, images, new_x, xs_grid
def generate_random(self, max=1000): self.eval() print("GENERATING RANDOM IMAGES autoencoder!") images_path = self.hparams_string + "/generated_random/" create_missing_folders(images_path) rand_z = torch.randn(self.batch_size, self.z_dim_last).cuda() self.plot_z_stats(rand_z.detach().cpu().numpy(), generate="/random_generated/" + self.prior_dist + "/", path=images_path, max=max) new_x = self.sample(rand_z) if len(self.input_shape) > 1: images = new_x.view(-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]).data images_grid = tv.utils.make_grid(images) print("Images location:", images_path) tv.utils.save_image( images_grid, images_path + str(self.epoch) + self.dataset_name + "generated.png") del images_grid, images del rand_z, new_x, images_path
def plot_performance(values, labels, n_list=None, results_path="~", filename="NoName"): fig2, ax21 = plt.subplots() ax21.plot(values["train"], 'b-', label='Train:' + str(len(labels["train"]))) # plotting t, a separately ax21.plot(values["valid"], 'g-', label='Valid:' + str(len(labels["valid"]))) # plotting t, a separately ax21.plot(values["valid"], 'r-', label='Test:' + str(len(labels["valid"]))) # plotting t, a separately ax21.set_xlabel('epochs') ax21.set_ylabel('Accuracy') handles, labels = ax21.get_legend_handles_labels() ax21.legend(handles, labels) ax22 = ax21.twinx() #colors = ["b", "g", "r", "c", "m", "y", "k"] if n_list is not None: for i, n in enumerate(n_list): ax22.plot(n_list[i], '--', label="Hidden Layer " + str(i)) # plotting t, a separately ax22.set_ylabel('#Neurons') handles, labels = ax22.get_legend_handles_labels() ax22.legend(handles, labels) fig2.tight_layout() # pylab.show() pylab.savefig(results_path + "/plots/hnet/" + filename) create_missing_folders(results_path + "/plots/hnet/") plt.close()
def plot_performance(loss_total, accuracy, labels, results_path, filename="NoName", verbose=0, std_loss=None, std_accuracy=None): """ :param loss_total: :param loss_labelled: :param loss_unlabelled: :param accuracy: :param labels: :param results_path: :param filename: :param verbose: :return: """ fig2, ax21 = plt.subplots() n = list(range(len(accuracy["train"]))) try: ax21.plot(loss_total["train"], 'b-', label='Train total loss:' + str(len(labels["train"]))) # plotting t, a separately ax21.plot(loss_total["valid"], 'g-', label='Valid total loss:' + str(len(labels["valid"]))) # plotting t, a separately #ax21.plot(values["valid"], 'r-', label='Test:' + str(len(labels["valid"]))) # plotting t, a separately except: ax21.plot(loss_total["train"], 'b-', label='Train total loss:') # plotting t, a separately ax21.plot(loss_total["valid"], 'g-', label='Valid total loss:') # plotting t, a separately if std_accuracy is not None: ax21.errorbar( x=n, y=loss_total["train"], yerr=[np.array(std_loss["train"]), np.array(std_loss["train"])], c="b", label='Train') # plotting t, a separately if std_accuracy is not None: ax21.errorbar( x=n, y=loss_total["valid"], yerr=[np.array(std_loss["valid"]), np.array(std_loss["valid"])], c="g", label='Valid') # plotting t, a separately ax21.set_xlabel('epochs') ax21.set_ylabel('Loss') handles, labels = ax21.get_legend_handles_labels() ax21.legend(handles, labels) ax22 = ax21.twinx() #colors = ["b", "g", "r", "c", "m", "y", "k"] # if n_list is not None: # for i, n in enumerate(n_list): # ax22.plot(n_list[i], '--', label="Hidden Layer " + str(i)) # plotting t, a separately ax22.set_ylabel('Accuracy') ax22.plot(accuracy["train"], 'c--', label='Train') # plotting t, a separately ax22.plot(accuracy["valid"], 'k--', label='Valid') # plotting t, a separately if std_accuracy is not None: ax22.errorbar(x=n, y=accuracy["train"], yerr=[ np.array(std_accuracy["train"]), np.array(std_accuracy["train"]) ], c="c", label='Train') # plotting t, a separately if std_accuracy is not None: ax22.errorbar(x=n, y=accuracy["valid"], yerr=[ np.array(std_accuracy["valid"]), np.array(std_accuracy["valid"]) ], c="k", label='Valid') # plotting t, a separately handles, labels = ax22.get_legend_handles_labels() ax22.legend(handles, labels) fig2.tight_layout() # pylab.show() if verbose > 0: print("Performance at ", results_path) create_missing_folders(results_path + "/plots/") pylab.savefig(results_path + "/plots/" + filename) plt.show() plt.close()