def plot_heatmap(acc_list, algorithm, param1_space, param2_space): """plot heatmap of accuracy with regard to different hyperparameters Parameters ------------- acc_list: a matrix with the values of f1-scores to plot in a heatmap with param1_space and param2_space on the axis algorithm: either "lle" or "tsne" param1_space: the most important hyperparam and linear data. lle -> number of neighbors, tsne -> perplexity param2_space: the less important hyperparam and logistic data. lle -> regularization, tsne -> min_grad_norm """ fig, ax = plt.subplots(figsize=(10, 8)) ax = sns.heatmap(acc_list, cmap="YlGnBu_r", ax=ax, cbar_kws={'label': 'F1-score'}) if algorithm == "lle": ax.set_xlabel("Regularization term (R)") ax.set_ylabel("Number of Neighbors (K)") elif algorithm == "tsne": ax.set_xlabel("Tolerance (tol)") ax.set_ylabel("Perplexity (Perp)") ax.set_xticklabels(HL.round_array(param2_space), rotation=90) ax.set_yticklabels(param1_space, rotation=0) plt.tight_layout() plt.savefig("images/MNIST_heatmap_" + algorithm) plt.show()
def __str__(self): msg = "RUN "+str(self.run_id)+"\n" msg += "seed: "+str(self.seed) msg += "\n\n\n##### BEST TEAM METRICS PER VALIDATION" msg += "\n\nBest Team Fitness per Validation: "+str(round_array(self.train_score_per_validation)) msg += "\n\nBest Team Validation Score per Validation (champion): "+str(round_array(self.test_score_per_validation)) msg += "\n\nBest Team Recall per Action per Validation: "+str(self.recall_per_validation) msg += "\n\n\n##### GLOBAL METRICS PER VALIDATION" msg += "\n\nGlobal Mean Fitness Score per Validation: "+str(self.global_mean_fitness_score_per_validation) msg += "\n\nGlobal Max. Fitness Score per Validation: "+str(self.global_max_fitness_score_per_validation) msg += "\n\nGlobal Mean Validation Score per Validation: "+str(self.global_mean_validation_score_per_validation) msg += "\n\nGlobal Max. Validation Score per Validation: "+str(self.global_max_validation_score_per_validation) if len(Config.RESTRICTIONS['used_diversities']) > 0: msg += "\n\nGlobal Diversities per Validation" for key in self.global_diversity_per_validation: msg += "\n"+str(key)+": "+str(self.global_diversity_per_validation[key]) msg += "\n\n\n##### DISTRIBUTION METRICS PER VALIDATION" msg += "\n\nDistribution of Actions per Validation: "+str(self.actions_distribution_per_validation) msg += "\n\nDistribution of Inputs per Validation (per instruction): "+str(self.inputs_distribution_per_instruction_per_validation) msg += "\n\nDistribution of Inputs per Validation (per team): "+str(self.inputs_distribution_per_team_per_validation) msg += "\n\n\n##### METRICS FOR SIZES PER VALIDATION" msg += "\n\nMean Team Sizes: "+str(self.mean_team_size_per_validation) msg += "\n\nMean Program Sizes (with introns): "+str(self.mean_program_size_with_introns_per_validation) msg += "\n\nMean Program Sizes (without introns): "+str(self.mean_program_size_without_introns_per_validation) msg += "\n\n\n##### GLOBAL METRICS PER TRAINING" msg += "\n\nGlobal Mean Fitness Score per Training: "+str(self.global_mean_fitness_per_generation) msg += "\n\nGlobal Max. Fitness Score per Training: "+str(self.global_max_fitness_per_generation) msg += "\n\nGlobal Fitness Score per Training (per diversity):" if len(Config.RESTRICTIONS['used_diversities']) > 1: for key in self.global_fitness_per_diversity_per_generation: msg += "\n"+str(key)+": "+str(self.global_fitness_per_diversity_per_generation[key]) if len(Config.RESTRICTIONS['used_diversities']) > 0: msg += "\n\nGlobal Diversities per Training" for key in self.global_diversity_per_generation: msg += "\n"+str(key)+": "+str(self.global_diversity_per_generation[key]) if len(Config.RESTRICTIONS['used_diversities']) > 1: msg += "\n\nDiversity Type per Training: "+str(self.novelty_type_per_generation) return msg
def __str__(self): msg = "RUN " + str(self.run_id) + "\n" msg += "seed: " + str(self.seed) msg += "\n\n\n\n#################### General Metrics:" msg += "\n\n\n##### GLOBAL METRICS PER VALIDATION" msg += "\n\nChampion Fitness per Validation: " + str( round_array(self.train_score_per_validation_)) msg += "\nChampion Score per Validation: " + str( round_array(self.champion_score_per_validation_)) if len(Config.USER['advanced_training_parameters']['diversity'] ['metrics']) > 0: msg += "\n\nGlobal Diversities per Validation" for key in self.global_diversity_per_validation_: msg += "\n - " + str(key) + ": " + str( self.global_diversity_per_validation_[key]) msg += "\n\n\n##### GLOBAL METRICS PER TRAINING" msg += "\n\nGlobal Mean Fitness Score per Training: " + str( self.global_mean_fitness_per_generation_) msg += "\nGlobal Max. Fitness Score per Training: " + str( self.global_max_fitness_per_generation_) if len(Config.USER['advanced_training_parameters']['diversity'] ['metrics']) > 1: msg += "\n\n\nGlobal Fitness Score per Training (per diversity):" for key in self.global_fitness_per_diversity_per_generation_: msg += "\n - " + str(key) + ": " + str( self.global_fitness_per_diversity_per_generation_[key]) if len(Config.USER['advanced_training_parameters']['diversity'] ['metrics']) > 0: msg += "\n\nGlobal Diversities per Training" for key in self.global_diversity_per_generation_: msg += "\n - " + str(key) + ": " + str( self.global_diversity_per_generation_[key]) if len(Config.USER['advanced_training_parameters']['diversity'] ['metrics']) > 1: msg += "\n\nDiversity Type per Training: " + str( self.novelty_type_per_generation_) msg += "\n\n\n##### DISTRIBUTION METRICS PER VALIDATION" msg += "\n\nDistribution of Actions" msg += "\n - last validation: " + str( self.actions_distribution_per_validation_[-1]) msg += "\n - per validation: " + str( self.actions_distribution_per_validation_) msg += "\n\nDistribution of Inputs (per program)" msg += "\n - last validation: " + str( self.inputs_distribution_per_instruction_per_validation_[-1]) msg += "\n - per validation: " + str( self.inputs_distribution_per_instruction_per_validation_) msg += "\n\nDistribution of Inputs (per team)" msg += "\n - last validation: " + str( self.inputs_distribution_per_team_per_validation_[-1]) msg += "\n - per validation: " + str( self.inputs_distribution_per_team_per_validation_) msg += "\n\n\n##### SIZE METRICS PER VALIDATION" msg += "\n\nMean Team Sizes" msg += "\n - last validation: " + str( self.mean_team_size_per_validation_[-1]) msg += "\n - per validation: " + str( self.mean_team_size_per_validation_) msg += "\n\nMean Program Sizes (with introns)" msg += "\n - last validation: " + str( self.mean_program_size_with_introns_per_validation_[-1]) msg += "\n - per validation: " + str( self.mean_program_size_with_introns_per_validation_) msg += "\n\nMean Program Sizes (without introns)" msg += "\n - last validation: " + str( self.mean_program_size_without_introns_per_validation_[-1]) msg += "\n - per validation: " + str( self.mean_program_size_without_introns_per_validation_) msg += self.environment.metrics_.generate_output_for_attributes_for_run_info( self) return msg