Beispiel #1
0
def main():
    ALGORITHM_VERSION = 'bayes-neat'
    DATASET = 'mnist_binary'
    CORRELATION_ID = 'test'
    # execution_id = 'f6d2d5e3-26a3-4069-9071-b74009323761' # 2 hours run
    execution_id = 'bf516f54-c29b-4f88-949c-102ab67930b3'  # 10 hours run (learning architecture)
    # execution_id = '59cbe09c-4ee7-4e7e-9b17-26c866113cfe' # test-run
    # execution_id = 'c5551a6c-177b-4c2c-8ecd-a75e79ae0ec2'
    execution_id = '1f30c172-9056-4012-9651-0765527bd550'  # fitness -0.2
    execution_id = 'a91761a0-6201-4a1d-9293-5e713f305fbf'  # fitness -0.86
    execution_id = '991b275d-6282-4f7d-8e97-3908baf94726'
    report_repository = ReportRepository.create(project='neuro-evolution',
                                                logs_path=LOGS_PATH)
    report = report_repository.get_report(algorithm_version=ALGORITHM_VERSION,
                                          dataset=DATASET,
                                          correlation_id=CORRELATION_ID,
                                          execution_id=execution_id)
    genome_dict = report.data['best_individual']
    best_individual_fitness = report.data['best_individual_fitness']
    print(f'Fitness of best individual: {best_individual_fitness}')

    genome = Genome.create_from_julia_dict(genome_dict)
    # config = get_configuration()
    print(f'Execution id: {execution_id}')

    loss = get_loss(problem_type=config.problem_type)

    ##### EVALUATE ######
    print('Evaluating results')
    evaluate_with_parallel(genome, loss, config)

    dataset = get_dataset(config.dataset, testing=True)
    dataset.generate_data()
    # TODO: remove data-loader. If we want to sample the dataset in each generation, the we can create a
    #  middlelayer between evaluation and dataset
    x, y_true, y_pred, loss_value = evaluate_genome(
        genome=genome,
        dataset=dataset,
        loss=loss,
        problem_type=config.problem_type,
        beta_type=config.beta_type,
        batch_size=config.batch_size,
        n_samples=config.n_samples,
        is_gpu=config.is_gpu,
        return_all=True)
    y_pred = torch.argmax(y_pred, dim=1)

    from sklearn.metrics import confusion_matrix, accuracy_score
    print(f'Loss: {loss_value}')
    print('Confusion Matrix:')
    print(confusion_matrix(y_true, y_pred))

    print(f'Accuracy: {accuracy_score(y_true, y_pred) * 100} %')
Beispiel #2
0
 def download_reports(self,
                      project,
                      algorithm_version,
                      dataset,
                      correlation_id,
                      base_dir='./'):
     report_repository = ReportRepository.create(project=project,
                                                 logs_path=None)
     report_repository.download_reports_to_dir(algorithm_version,
                                               dataset,
                                               correlation_id,
                                               base_dir=base_dir)
Beispiel #3
0
def main():
    ALGORITHM_VERSION = 'bayes-neat'
    DATASET = 'classification_example_1'
    CORRELATION_ID = 'test'
    execution_id = '180186eb-46c8-4bbd-9f8a-26a36cbe57e4'

    report_repository = ReportRepository.create(project='neuro-evolution',
                                                logs_path=LOGS_PATH)
    report = report_repository.get_report(algorithm_version=ALGORITHM_VERSION,
                                          dataset=DATASET,
                                          correlation_id=CORRELATION_ID,
                                          execution_id=execution_id)
    genome = report.data['best_individual']
    best_individual_fitness = report.data['best_individual_fitness']
    print(f'Fitness of best individual: {best_individual_fitness}')

    config_dict = report.config
    config = jsons.load(config_dict, BaseConfiguration)

    loss = get_loss(problem_type=config.problem_type)
    dataset = get_dataset(config.dataset, testing=True)
    dataset.generate_data()

    x, y_true, y_pred, loss_value = evaluate_genome_jupyneat(
        genome=genome,
        problem_type=config.problem_type,
        n_input=config.n_input,
        n_output=config.n_output,
        activation_type=config.node_activation,
        dataset=dataset,
        loss=loss,
        beta_type=config.beta_type,
        batch_size=config.batch_size,
        n_samples=config.n_samples,
        is_gpu=config.is_gpu,
        return_all=True)

    y_pred = torch.argmax(y_pred, dim=1)

    from sklearn.metrics import confusion_matrix, accuracy_score
    print(f'Loss: {loss_value}')
    print('Confusion Matrix:')
    print(confusion_matrix(y_true, y_pred))

    print(f'Accuracy: {accuracy_score(y_true, y_pred) * 100} %')
Beispiel #4
0
def main():
    report_repository = ReportRepository.create(project='neuro-evolution', logs_path=LOGS_PATH)
    notifier = SlackNotifier.create(channel='batch-jobs')
    for retry in range(3):
        print('###################################################')
        print('###################################################')
        print('###################################################')
        report = EvolutionReportJupyNeat(report_repository=report_repository,
                                         algorithm_version=ALGORITHM_VERSION,
                                         dataset=DATASET,
                                         correlation_id=CORRELATION_ID)
        print(report.report.execution_id)

        config.experiment = CORRELATION_ID
        config.dataset = DATASET
        config.execution = report.report.execution_id
        # execute scenario
        evaluation_engine = JupyNeatFSEvaluationEngine.create(report=report, notifier=notifier)
        evaluation_engine.run()
Beispiel #5
0
    def _get_reports(self):
        reports = {}
        report_repository = ReportRepository.create(project=self.project,
                                                    logs_path=LOGS_PATH)
        for correlation_id in self.correlation_ids:
            print('###########')
            print(f'CORRELATION ID: {correlation_id}')
            execution_ids = list(
                report_repository.get_executions(
                    algorithm_version=self.algorithm_version,
                    dataset=self.dataset_name,
                    correlation_id=correlation_id))
            print(f'{len(execution_ids)} executions')
            for execution_id in execution_ids:
                report = report_repository.get_report(
                    algorithm_version=self.algorithm_version,
                    dataset=self.dataset_name,
                    correlation_id=correlation_id,
                    execution_id=execution_id)
                reports[execution_id] = report

        return reports
Beispiel #6
0
    def test_nas_standard_integrate_with_report(self):
        correlation_id = 'test_probabilistic'
        algorithm_version = 'test_neat'
        project = 'test-ne'
        notifier = Mock()

        report_repository = ReportRepository.create(project=project,
                                                    logs_path=LOGS_PATH)

        report_ne = EvolutionReport(report_repository=report_repository,
                                    algorithm_version=algorithm_version,
                                    dataset=self.config.dataset,
                                    correlation_id=correlation_id)
        evolution_engine = EvolutionEngine(report=report_ne,
                                           notifier=notifier,
                                           is_cuda=False)
        evolution_engine.run()

        experiment_data_ne = ExperimentDataNE(
            correlation_ids=[correlation_id],
            dataset_name=self.config.dataset,
            n_samples=10,
            project=project,
            algorithm_version=algorithm_version,
            keep_top=1.0,
            filter_checkpoint_finish=False)

        reports_all = experiment_data_ne._get_reports()
        reports = {}
        for execution_id, report in reports_all.items():
            if execution_id == report_ne.report.execution_id:
                reports[execution_id] = report

        data_df = experiment_data_ne._process_reports(reports)

        self.assertEqual(type(data_df), DataFrame)
Beispiel #7
0
def main():
    report_repository = ReportRepository.create(project='neuro-evolution',
                                                logs_path=LOGS_PATH)
    notifier = SlackNotifier.create(channel=get_slack_channel(
        dataset_name=dataset_name))

    failed = 0
    total = 0
    for retry in range(1):

        print('Another Try')
        total += 1

        report = EvolutionReport(report_repository=report_repository,
                                 algorithm_version=ALGORITHM_VERSION,
                                 dataset=DATASET,
                                 correlation_id=CORRELATION_ID)
        print(report.report.execution_id)
        evolution_engine = EvolutionEngine(report=report,
                                           notifier=notifier,
                                           is_cuda=is_cuda)
        evolution_engine.run()

    print(f'It failed {failed} times out of {total}')
Beispiel #8
0
def main():
    execution_id = '4cfdcb16-bd67-485b-ac48-107199211b46'
    report_repository = ReportRepository.create(project='neuro-evolution')
    report = report_repository.get_report(algorithm_version=ALGORITHM_VERSION, dataset=DATASET,
                                          correlation_id=CORRELATION_ID, execution_id=execution_id)
    print(report.__dict__)
Beispiel #9
0
    def test_nas_standard_integrate_with_report(self):
        correlation_id_standard = 'test_standard'
        correlation_id_probabilistic = 'test_probabilistic'
        project = 'test'
        notifier = Mock()

        report_repository = ReportRepository.create(project='test',
                                                    logs_path=LOGS_PATH)

        backprop_report_standard = neural_architecture_search(
            EvaluateDL=EvaluateStandardDL,
            n_hidden_layers_values=[1],
            n_neurons_per_layer_values=[1],
            correlation_id=correlation_id_standard,
            config=self.config,
            batch_size=1000000,
            lr=0.01,
            weight_decay=0.0005,
            n_epochs=1,
            notifier=notifier,
            report_repository=report_repository,
            is_cuda=False,
            n_repetitions=1)

        backprop_report_probabilistic = neural_architecture_search(
            EvaluateDL=EvaluateProbabilisticDL,
            n_hidden_layers_values=[1],
            n_neurons_per_layer_values=[1],
            correlation_id=correlation_id_probabilistic,
            config=self.config,
            batch_size=1000000,
            lr=0.01,
            weight_decay=0.0005,
            n_epochs=1,
            notifier=notifier,
            report_repository=report_repository,
            is_cuda=False,
            n_repetitions=1)

        experiment_data_nas = ExperimentDataNAS(correlation_ids=[correlation_id_standard,correlation_id_probabilistic],
                                                dataset_name=self.config.dataset,
                                                n_samples=10,
                                                project=project,
                                                algorithm_version=ALGORITHM_VERSION,
                                                keep_top=1.0,
                                                filter_checkpoint_finish=False)\
                # .process_data()

        reports_all = experiment_data_nas._get_reports()
        reports = {}
        for execution_id, report in reports_all.items():
            if execution_id in [
                    backprop_report_standard.report.execution_id,
                    backprop_report_probabilistic.report.execution_id
            ]:

                reports[execution_id] = report

        data_df = experiment_data_nas._process_reports(reports)

        self.assertEqual(type(data_df), DataFrame)

        expected_accuracy = data_df.loc[
            (data_df['correlation_id'] == correlation_id_standard) &
            (data_df['execution_id'] ==
             backprop_report_standard.report.execution_id),
            'accuracy'].values[0]