Exemplo n.º 1
0
    def _get_execution_configuration(self, report):
        genome_dict = report.data['best_individual']
        best_individual_fitness = report.data['best_individual_fitness']
        print(f'Fitness of best individual: {best_individual_fitness}')

        genome = Genome.from_dict(genome_dict)
        config = genome.genome_config
        return config
Exemplo n.º 2
0
    def _generate_row(self, report, absolute_best=True):
        execution_id = report.execution_id
        correlation_id = report.correlation_id
        if absolute_best:
            genome_dict = report.data['best_individual']
            best_individual_fitness = report.data['best_individual_fitness']
        else:
            genome_dict = report.data['fine_tuning'][
                'best_genome_before_fine_tuning']
            best_individual_fitness = report.data['fine_tuning'][
                'best_fitness_before_fine_tuning']

        genome = Genome.from_dict(genome_dict)
        config = genome.genome_config
        self.configurations[execution_id] = config
        self.best_genomes[execution_id] = genome
        self.best_networks[execution_id] = ComplexStochasticNetwork(
            genome=genome)

        set_configuration(config)
        # evaluate genome
        loss = get_loss(problem_type=config.problem_type)
        print(f'Train percentage: {config.train_percentage}')
        print(f'Random state: {config.dataset_random_state}')
        dataset = get_dataset(config.dataset,
                              train_percentage=config.train_percentage,
                              testing=True,
                              random_state=config.dataset_random_state,
                              noise=config.noise,
                              label_noise=config.label_noise)
        x, y_true, y_pred_prob, loss_value = evaluate_genome(
            genome=genome,
            dataset=dataset,
            loss=loss,
            problem_type=config.problem_type,
            beta_type=config.beta_type,
            batch_size=config.batch_size,
            n_samples=self.n_samples,
            is_gpu=config.is_gpu,
            is_testing=True,
            return_all=True)
        y_pred = torch.argmax(y_pred_prob, dim=1)
        train_percentage = config.train_percentage
        noise = config.noise
        label_noise = config.label_noise
        duration = report.duration
        n_parameters = genome.calculate_number_of_parameters()
        n_nodes = genome.n_bias_parameters // 2
        n_connections = genome.n_weight_parameters // 2
        n_layers = self._get_number_of_layers(genome)
        mean_genome_std = get_mean_std(genome)
        end_condition = report.data['end_condition']
        chunk = pd.DataFrame(
            {
                'correlation_id': correlation_id,
                'execution_id': execution_id,
                'train_percentage': train_percentage,
                'noise': noise,
                'label_noise': label_noise,
                'is_bayesian': False if config.fix_std else True,
                'beta': config.beta,
                'loss_training': -best_individual_fitness,
                'loss_testing': loss_value,
                'duration': duration,
                'end_condition': end_condition,
                'n_parameters': n_parameters,
                'n_nodes': n_nodes,
                'n_connections': n_connections,
                'n_layers': n_layers,
                'mean_genome_std': mean_genome_std,
            },
            index=[0])
        if config.problem_type == 'classification':
            chunk['accuracy'] = accuracy_score(y_true, y_pred) * 100
            chunk['precision'] = precision_score(y_true,
                                                 y_pred,
                                                 average='weighted')
            chunk['recall'] = recall_score(y_true, y_pred, average='weighted')
            chunk['f1'] = f1_score(y_true, y_pred, average='weighted')
            ece, _ = expected_calibration_error(
                y_true.numpy(),
                y_pred_prob.numpy(),
                n_bins=ECE_N_BINS,
                uniform_binning=UNIFORM_BINNING)
            chunk['ece'] = ece
        else:
            chunk['mse'] = mean_squared_error(y_true, y_pred)
            chunk['mae'] = mean_absolute_error(y_true, y_pred)
        return chunk