コード例 #1
0
ファイル: sbb.py プロジェクト: MatKallada/SBBClassifier
 def _process_scores(self, score_per_generation_per_run):
     score_means = []
     score_stds = []
     for index in range(len(score_per_generation_per_run[0])):
         column = [row[index] for row in score_per_generation_per_run]
         score_means.append(round_value(numpy.mean(column)))
         score_stds.append(round_value(numpy.std(column)))
     return score_means, score_stds
コード例 #2
0
ファイル: sbb.py プロジェクト: MatKallada/SBBClassifier
    def _generate_overall_metrics_output(self, run_infos):       
        msg = "\n\n\n#################### OVERALL RESULTS ####################"
        msg += "\n\n\n##### BEST TEAM METRICS"
        score_per_run = []
        for run in run_infos:
            score_per_run.append(round_value(run.best_team.score_testset_))
        self.best_scores_per_runs_ = score_per_run
        msg += "\n\nBest Team Validation Score per Run: "+str(score_per_run)
        msg += "\nmean: "+str(round_value(numpy.mean(score_per_run)))
        msg += "\nstd. deviation: "+str(round_value(numpy.std(score_per_run)))
        scores = [run.best_team.score_testset_ for run in run_infos]
        best_run = run_infos[scores.index(max(scores))]
        msg += "\nbest run: "+str(best_run.run_id)

        score_means, score_stds = self._process_scores([run.train_score_per_validation for run in run_infos])
        msg += "\n\nBest Team Train Score per Validation across Runs:"
        msg += "\nmean: "+str(score_means)
        msg += "\nstd. deviation: "+str(score_stds)

        score_means, score_stds = self._process_scores([run.test_score_per_validation for run in run_infos])
        msg += "\n\nBest Team Validation Score per Validation across Runs:"
        msg += "\nmean: "+str(score_means)
        msg += "\nstd. deviation: "+str(score_stds)

        msg += "\n\n\n##### GLOBAL METRICS"
        final_scores = [run.global_mean_validation_score_per_validation[-1] for run in run_infos]
        msg += "\n\nGlobal Validation Score per Run: "+str(final_scores)
        msg += "\nmean: "+str(round_value(numpy.mean(final_scores)))
        msg += "\nstd. deviation: "+str(round_value(numpy.std(final_scores)))
        best_run = run_infos[final_scores.index(max(final_scores))]
        msg += "\nbest run: "+str(best_run.run_id)

        score_means, score_stds = self._process_scores([run.global_mean_fitness_score_per_validation for run in run_infos])
        msg += "\n\nGlobal Train Score per Validation across Runs:"
        msg += "\nmean: "+str(score_means)
        msg += "\nstd. deviation: "+str(score_stds)

        score_means, score_stds = self._process_scores([run.global_mean_validation_score_per_validation for run in run_infos])
        msg += "\n\nGlobal Validation Score per Validation across Runs:"
        msg += "\nmean: "+str(score_means)
        msg += "\nstd. deviation: "+str(score_stds)

        for key in Config.RESTRICTIONS['used_diversities']:
            score_means, score_stds = self._process_scores([run.global_diversity_per_validation[key] for run in run_infos])
            msg += "\n\nMean Diversity per Validation across Runs ("+str(key)+"):"
            msg += "\nmean: "+str(score_means)
            msg += "\nstd. deviation: "+str(score_stds)

        elapseds_per_run = [run.elapsed_time for run in run_infos]
        msg += "\n\nFinished execution, total elapsed time: "+str(round_value(sum(elapseds_per_run)))+" mins "
        msg += "(mean: "+str(round_value(numpy.mean(elapseds_per_run)))+", std: "+str(round_value(numpy.std(elapseds_per_run)))+")"
        return msg
コード例 #3
0
ファイル: sbb.py プロジェクト: MatKallada/SBBClassifier
 def _store_per_generation_metrics(self, run_info, teams_population):
     older_teams = [team for team in teams_population if team.generation != self.current_generation_]
     generation_info = []
     for team in older_teams:
         team_info = []
         team_info.append(round_value(team.fitness_, round_decimals_to = 3))
         team_info.append(round_value(team.score_testset_, round_decimals_to = 3))
         for diversity in Config.RESTRICTIONS['used_diversities']:
             if diversity in team.diversity_:
                 value = round_value(team.diversity_[diversity], round_decimals_to = 3)
             else:
                 value = 0.0
             team_info.append(value)
         generation_info.append(team_info)
     run_info.info_per_team_per_generation.append(generation_info)
     mean_fitness = round_value(numpy.mean([team.fitness_ for team in older_teams]), 3)
     run_info.global_mean_fitness_per_generation.append(mean_fitness)
     run_info.global_max_fitness_per_generation.append(round_value(max([team.fitness_ for team in older_teams])))
     for diversity in Config.RESTRICTIONS['used_diversities']:
         run_info.global_diversity_per_generation[diversity].append(round_value(numpy.mean([t.diversity_[diversity] for t in older_teams]), 3))
     if len(Config.RESTRICTIONS['used_diversities']) > 1 and self.selection.previous_diversity_:
         run_info.global_fitness_per_diversity_per_generation[self.selection.previous_diversity_].append(mean_fitness)
         run_info.novelty_type_per_generation.append(Config.RESTRICTIONS['used_diversities'].index(self.selection.previous_diversity_))
コード例 #4
0
ファイル: sbb.py プロジェクト: MatKallada/SBBClassifier
    def run(self):
        print "\n### Starting pSBB"

        # initialize the environment and the selection algorithm
        self.environment = self._initialize_environment()
        self.selection = Selection(self.environment)

        overall_info = ""
        overall_info += "\n### CONFIG: "+str(Config.USER)+"\n"
        overall_info +=  "\n### RESTRICTIONS: "+str(Config.RESTRICTIONS)+"\n"
        overall_info += self.environment.metrics()
        overall_info += "\nSeeds per run: "+str(self.seeds_per_run_)
        overall_info += "\nDiversities: "+str(Config.RESTRICTIONS['used_diversities'])
        print overall_info

        run_infos = []
        for run_id in range(Config.USER['training_parameters']['runs_total']):
            start_time = time.time()
            run_info = RunInfo(run_id+1, self.seeds_per_run_[run_id])
            print "\nStarting run: "+str(run_info.run_id)

            self._set_seed(run_info.seed)

            # randomly initialize populations
            self.current_generation_ = 0
            teams_population, programs_population = self._initialize_populations()
            
            self.environment.reset()
            while not self._stop_criterion():
                self.current_generation_ += 1
                
                if self.current_generation_ == 1 or self.current_generation_ % Config.USER['training_parameters']['validate_after_each_generation'] == 0:
                    validation = True
                else:
                    validation = False

                # selection
                teams_population, programs_population, pareto_front = self.selection.run(self.current_generation_, teams_population, programs_population, validation)

                # final pruning
                if self._stop_criterion():
                    older_teams = [team for team in teams_population if team.generation != self.current_generation_]
                    for team in older_teams:
                        team.prune_total()

                # validation
                if not validation:
                    print ".",
                    sys.stdout.flush()
                    self._store_per_generation_metrics(run_info, teams_population)
                else:
                    best_team = self.environment.validate(self.current_generation_, teams_population)
                    self._store_per_generation_metrics(run_info, teams_population)
                    self._print_and_store_per_validation_metrics(run_info, best_team, teams_population, programs_population)

            self._store_per_run_metrics(run_info, best_team, teams_population, pareto_front)
            run_info.elapsed_time = round_value((time.time() - start_time)/60.0)
            print("\nFinished run "+str(run_info.run_id)+", elapsed time: "+str(run_info.elapsed_time)+" mins")
            run_infos.append(run_info)
            sys.stdout.flush()
        
        # finalize execution (get final metrics, print to output, print to file)
        overall_info += self._generate_overall_metrics_output(run_infos)
        print overall_info
        sys.stdout.flush()

        if Config.RESTRICTIONS['write_output_files']:
            self.filepath_ = self._create_folder()
            self._write_output_files(run_infos, overall_info)
            self._save_teams_data_per_generation(run_infos)
コード例 #5
0
ファイル: sbb.py プロジェクト: MatKallada/SBBClassifier
    def _print_and_store_per_validation_metrics(self, run_info, best_team, teams_population, programs_population):
        print "\n\n>>>>> Generation: "+str(self.current_generation_)+", run: "+str(run_info.run_id)
        run_info.train_score_per_validation.append(best_team.fitness_)
        run_info.test_score_per_validation.append(best_team.score_testset_)
        run_info.recall_per_validation.append(best_team.extra_metrics_['recall_per_action'])
        print("\n### Best Team Metrics: "+best_team.metrics()+"\n")

        older_teams = [team for team in teams_population if team.generation != self.current_generation_]

        fitness_score_mean = round_value(numpy.mean([team.fitness_ for team in older_teams]))

        validation_score_mean = round_value(numpy.mean([team.score_testset_ for team in older_teams]))
        run_info.global_mean_validation_score_per_validation.append(validation_score_mean)

        print
        for key in best_team.diversity_:
            run_info.global_diversity_per_validation[key].append(run_info.global_diversity_per_generation[key][-1])
            print str(key)+": "+str(best_team.diversity_[key])+" (global: "+str(run_info.global_diversity_per_generation[key][-1])+")"

        print "\n### Global Metrics:"

        run_info.global_mean_fitness_score_per_validation.append(fitness_score_mean)
        run_info.global_max_fitness_score_per_validation.append(round_value(max([team.fitness_ for team in older_teams])))
        print "\nfitness (global): "+str(fitness_score_mean)

        actions_distribution = Counter([p.action for p in programs_population])
        print "\nactions distribution: "+str(actions_distribution)
        actions_distribution_array = []
        for action in range(Config.RESTRICTIONS['total_actions']):
            if action in actions_distribution:
                actions_distribution_array.append(actions_distribution[action])
            else:
                actions_distribution_array.append(0)
        run_info.actions_distribution_per_validation.append(actions_distribution_array)

        inputs_distribution_per_instruction = Counter()
        inputs_distribution_per_team = Counter()
        for team in older_teams:
            inputs_distribution_per_instruction.update(team.inputs_distribution())
            inputs_distribution_per_team.update(list(team.inputs_distribution()))
        inputs_distribution_per_instruction_array = []
        inputs_distribution_per_team_array = []
        for value in range(Config.RESTRICTIONS['total_inputs']):
            if value in inputs_distribution_per_instruction:
                inputs_distribution_per_instruction_array.append(inputs_distribution_per_instruction[value])
            else:
                inputs_distribution_per_instruction_array.append(0)
            if value in inputs_distribution_per_team:
                inputs_distribution_per_team_array.append(inputs_distribution_per_team[value])
            else:
                inputs_distribution_per_team_array.append(0)
        print "inputs distribution (global, per instruction): "+str(inputs_distribution_per_instruction_array)
        print "inputs distribution (global, per team): "+str(inputs_distribution_per_team_array)
        run_info.inputs_distribution_per_instruction_per_validation.append(inputs_distribution_per_instruction_array)
        run_info.inputs_distribution_per_team_per_validation.append(inputs_distribution_per_team_array)

        print
        print "Global Fitness (last 10 gen.): "+str(run_info.global_mean_fitness_per_generation[-10:])
        
        if len(Config.RESTRICTIONS['used_diversities']) > 0:
            print "Global Diversity (last 10 gen.):"
            for diversity in Config.RESTRICTIONS['used_diversities']:
                print "- "+str(diversity)+": "+str(run_info.global_diversity_per_generation[diversity][-10:])
        if len(Config.RESTRICTIONS['used_diversities']) > 1:
            print "Diversity Type (last 10 gen.): "+str(run_info.novelty_type_per_generation[-10:])

        avg_team_size = round_value(numpy.mean([len(team.programs) for team in older_teams]))
        avg_program_with_intros_size = round_value(numpy.mean(flatten([[len(program.instructions) for program in team.programs] for team in older_teams])))
        avg_program_without_intros_size = round_value(numpy.mean(flatten([[len(program.instructions_without_introns_) for program in team.programs] for team in older_teams])))
        run_info.mean_team_size_per_validation.append(avg_team_size)
        run_info.mean_program_size_with_introns_per_validation.append(avg_program_with_intros_size)
        run_info.mean_program_size_without_introns_per_validation.append(avg_program_without_intros_size)
        print "\nMean Team Sizes: "+str(run_info.mean_team_size_per_validation[-10:])
        print "Mean Program Sizes (with introns): "+str(run_info.mean_program_size_with_introns_per_validation[-10:])
        print "Mean Program Sizes (without introns): "+str(run_info.mean_program_size_without_introns_per_validation[-10:])

        print "\n<<<<< Generation: "+str(self.current_generation_)+", run: "+str(run_info.run_id)
コード例 #6
0
ファイル: sbb.py プロジェクト: gaybro8777/SBBFramework
    def run(self):
        print "\n### Starting pSBB"

        initial_info = self._generate_initial_message_output()
        print initial_info

        for run_id in range(Config.USER['training_parameters']['runs_total']):
            
            run_info = RunInfo(run_id+1, self.environment_, self.seeds_per_run_[run_id])
            print "\nStarting run: "+str(run_info.run_id)

            self._set_seed(run_info.seed)

            if Config.USER['advanced_training_parameters']['second_layer']['enabled']:
                self._initialize_actions(run_info)

            self.current_generation_ = 0

            teams_population, programs_population = self._initialize_populations()
            
            self.environment_.reset()

            while not self._stop_criterion():
                self.current_generation_ += 1
                
                validation = False
                if self._is_validation():
                    validation = True

                teams_population, programs_population, pareto_front = self.selection_.run(
                    self.current_generation_, teams_population, programs_population)

                if self._stop_criterion():
                    older_teams = [team for team in teams_population if team.generation != self.current_generation_]
                    for team in older_teams:
                        team.prune_total()

                self.environment_.metrics_.store_per_generation_metrics(run_info, teams_population, 
                    self.current_generation_, self.selection_.previous_diversity_)

                if not validation:
                    print ".",
                    sys.stdout.flush()
                else:
                    best_team = self.environment_.validate(self.current_generation_, teams_population)
                    self.environment_.metrics_.store_per_validation_metrics(run_info, best_team, 
                        teams_population, programs_population, self.current_generation_)
                    print "\n\n>>>>> Generation: "+str(self.current_generation_)+", run: "+str(run_info.run_id)
                    self.environment_.metrics_.print_per_validation_metrics(run_info, best_team)
                    print "\n<<<<< Generation: "+str(self.current_generation_)+", run: "+str(run_info.run_id)

            self.environment_.metrics_.store_per_run_metrics(run_info, best_team, teams_population, pareto_front, 
                self.current_generation_)

            run_info.end()
            print("\nFinished run "+str(run_info.run_id)+", elapsed time: "+str(run_info.elapsed_time_)+" mins")
            self.run_infos_.append(run_info)
            sys.stdout.flush()
        
        # finalize execution (get final metrics, print to output, print to file)
        msg = self.environment_.metrics_.generate_overall_metrics_output(self.run_infos_)

        elapseds_per_run = [run.elapsed_time_ for run in self.run_infos_]
        msg += "\n\nFinished execution, total elapsed time: "+str(round_value(sum(elapseds_per_run)))+" mins "
        msg += "(mean: "+str(round_value(numpy.mean(elapseds_per_run)))+", std: "+str(round_value(numpy.std(elapseds_per_run)))+")"

        initial_info += msg
        self.best_scores_per_runs_ = [round_value(run.best_team_.score_champion_) for run in self.run_infos_]
        print initial_info
        sys.stdout.flush()
        if Config.RESTRICTIONS['write_output_files']:
            self._write_output_files(initial_info)