Beispiel #1
0
 def setUp(self) -> None:
     self.config = create_configuration(
         filename='/classification-miso.json')
     self.config.pop_size = 20
     self.config.is_fine_tuning = True
     self.config.epochs_fine_tuning = 1
     self.config.n_generations = 1
Beispiel #2
0
    def test_mapping(self):
        config = create_configuration(filename='/classification-miso.json')
        n_neurons_per_layer = 3
        network = FeedForward(n_input=config.n_input, n_output=config.n_output,
                              n_neurons_per_layer=n_neurons_per_layer,
                              n_hidden_layers=1)

        std = 0.1
        genome = get_genome_from_standard_network(network, std=std)
        self.assertEqual(type(genome), Genome)

        parameters = network.state_dict()
        # hidden to output
        self.assertEqual(parameters['layer_0.weight'][0, 0], genome.connection_genes[(2, 0)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][1, 0], genome.connection_genes[(2, 1)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][0, 1], genome.connection_genes[(3, 0)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][1, 1], genome.connection_genes[(3, 1)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][0, 2], genome.connection_genes[(4, 0)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][1, 2], genome.connection_genes[(4, 1)].get_mean())

        self.assertEqual(parameters['layer_0.bias'][0], genome.node_genes[0].get_mean())
        self.assertEqual(parameters['layer_0.bias'][1], genome.node_genes[1].get_mean())

        # input to hidden
        self.assertEqual(parameters['layer_1.bias'][0], genome.node_genes[2].get_mean())
        self.assertEqual(parameters['layer_1.bias'][1], genome.node_genes[3].get_mean())
        self.assertEqual(parameters['layer_1.bias'][2], genome.node_genes[4].get_mean())
Beispiel #3
0
    def test_network_structure_5(self):
        self.config = create_configuration(filename='/regression-miso.json')
        self.config.node_activation = 'identity'
        self.config.n_output = 2
        graph = ((-1, 1), (-2, 0), (-2, 1), (-1, 2), (2, 0), (-1, 0))
        weights = (1, 1, 1, 1, 1, 1)

        genome = generate_genome_given_graph(graph, weights)
        layers = transform_genome_to_layers(genome=genome)

        layer_0 = layers[0]
        self.assertEqual(layer_0.input_keys, [2, -2, -1])
        self.assertEqual(layer_0.output_keys, [0, 1])
        self.assertEqual(layer_0.indices_of_needed_nodes, [(1, 0), (1, 1)])
        self.assertEqual(layer_0.indices_of_nodes_to_cache, [])
        self.assertTrue(
            torch.allclose(layer_0.weight_mean,
                           torch.tensor([[1.0, 1.0, 1.0], [0.0, 1.0, 1.0]]),
                           atol=1e-02))

        layer_1 = layers[1]
        self.assertEqual(layer_1.input_keys, [-2, -1])
        self.assertEqual(layer_1.output_keys, [2])
        self.assertEqual(layer_1.indices_of_needed_nodes, [])
        self.assertEqual(layer_1.indices_of_nodes_to_cache, [0, 1])
        self.assertTrue(
            torch.allclose(layer_1.weight_mean,
                           torch.tensor([[0.0, 1.0]]),
                           atol=1e-02))
Beispiel #4
0
    def test_network_structure_1(self):
        '''
        2 layers
        '''
        self.config = create_configuration(filename='/regression-miso.json')
        self.config.node_activation = 'identity'
        genome = generate_genome_given_graph(graph=((-1, 1), (-2, 1), (1, 0)),
                                             connection_weights=(1.0, 2.0,
                                                                 3.0))

        layers = transform_genome_to_layers(genome)

        self.assertEqual(layers[0].input_keys, [1])
        self.assertEqual(layers[0].indices_of_needed_nodes, [])
        self.assertEqual(layers[0].indices_of_nodes_to_cache, [])
        self.assertTrue(
            torch.allclose(layers[0].weight_mean,
                           torch.tensor([[3.0]]),
                           atol=1e-02))

        self.assertEqual(layers[1].input_keys, [-2, -1])
        self.assertEqual(layers[1].indices_of_needed_nodes, [])
        self.assertEqual(layers[1].indices_of_nodes_to_cache, [])
        self.assertTrue(
            torch.allclose(layers[1].weight_mean,
                           torch.tensor([[2.0, 1.0]]),
                           atol=1e-02))
Beispiel #5
0
    def test_regression_case(self):
        config = create_configuration(filename='/regression-siso.json')
        config.parallel_evaluation = False

        genome = Genome(key=1)
        genome.create_random_genome()

        dataset = get_dataset(config.dataset,
                              train_percentage=config.train_percentage,
                              testing=True)

        n_samples = 3
        network = ComplexStochasticNetwork(genome=genome)

        x, y_true, output_distribution = calculate_prediction_distribution(
            network,
            dataset=dataset,
            problem_type=config.problem_type,
            is_testing=True,
            n_samples=n_samples,
            use_sigmoid=False)
        expected_output_distribution_shape = [
            len(y_true), n_samples, config.n_output
        ]
        self.assertEqual(list(output_distribution.shape),
                         expected_output_distribution_shape)
Beispiel #6
0
 def setUp(self) -> None:
     self.report = Mock()
     self.notifier = Mock()
     self.config = create_configuration(
         filename='/classification-miso.json')
     self.config.node_activation = 'identity'
     self.config.n_generations = 1
     self.config.pop_size = 20
Beispiel #7
0
    def test_network_structure_siso(self):
        self.config = create_configuration(filename='/regression-siso.json')
        genome = generate_genome_with_hidden_units(
            n_input=self.config.n_input, n_output=self.config.n_output)

        model = StochasticNetworkOld(genome=genome)

        input = torch.tensor([1.0])
        result = model(input.data)

        self.assertEqual(len(result), self.config.n_output)
Beispiel #8
0
    def test_network_case(self):
        config = create_configuration(filename='/regression-siso.json')
        n_samples = 3
        network = ProbabilisticFeedForward(1, 1, False, 1, 1)
        estimator = PredictionDistributionEstimatorNetwork(network=network, config=config, testing=True,
                                                           n_samples=n_samples)\
            .estimate() \
            .enrich_with_dispersion_quantile() \
            .calculate_metrics_by_dispersion_quantile()

        results = estimator.results
        self.assertTrue(isinstance(results, pd.DataFrame))
Beispiel #9
0
    def test_same_input_gives_different_result(self):
        self.config = create_configuration(filename='/regression-siso.json')
        genome = generate_genome_with_hidden_units(
            n_input=self.config.n_input, n_output=self.config.n_output)

        model = StochasticNetworkOld(genome=genome)

        input = torch.tensor([[1.0]])
        result_1 = model(input.data)
        result_2 = model(input.data)

        self.assertNotEqual(result_1.item(), result_2.item())
Beispiel #10
0
    def test_happy_path_miso(self):
        # Multiple-Input Single-Output
        self.config = create_configuration(
            filename='/classification-miso.json')
        self.config.parallel_evaluation = False
        self.config.n_processes = 1
        genome = generate_genome_with_hidden_units(
            n_input=self.config.n_input, n_output=self.config.n_output)
        population = {1: genome}
        evaluation_engine = EvaluationStochasticEngine()

        population = evaluation_engine.evaluate(population=population)

        self.assertEqual(type(population.get(1).fitness), float)
Beispiel #11
0
    def test_classification_case(self):
        config = create_configuration(filename='/classification-miso.json')
        genome = Genome(key=1)
        genome.create_random_genome()
        n_samples = 5

        estimator = PredictionDistributionEstimatorGenome(genome=genome, config=config, testing=True,
                                                          n_samples=n_samples)\
            .estimate() \
            .enrich_with_dispersion_quantile() \
            .calculate_metrics_by_dispersion_quantile()

        results = estimator.results
        self.assertTrue(isinstance(results, pd.DataFrame))
Beispiel #12
0
    def test_standard_network_to_genome_to_stochastic_network(self):
        config = create_configuration(filename='/classification-miso.json')
        n_neurons_per_layer = 3
        network = FeedForward(n_input=config.n_input, n_output=config.n_output,
                              n_neurons_per_layer=n_neurons_per_layer,
                              n_hidden_layers=1)

        std = 0.1
        genome = get_genome_from_standard_network(network, std=std)

        stochastic_network = ComplexStochasticNetwork(genome=genome)

        parameters = network.state_dict()

        self.assertTrue(torch.allclose(parameters['layer_0.weight'], stochastic_network.layer_0.qw_mean, atol=1e-02))
        self.assertTrue(torch.allclose(parameters['layer_1.weight'], stochastic_network.layer_1.qw_mean, atol=1e-02))
Beispiel #13
0
def main():
    dataset = MNISTDownsampledDataset(train_percentage=0.4, dataset_type='train')
    # dataset = MNISTDataset(dataset_type='test')
    # dataset = MNISTBinaryDataset(dataset_type='test')
    DATASET = 'mnist_downsampled'
    config = create_configuration(filename=f'/{DATASET}.json')
    config.n_output = 10
    dataset.generate_data()

    print(len(dataset))
    print(dataset)
    selection = random.choice(list(range(len(dataset))))
    print(selection)
    print(dataset.y)
    x, y = dataset.__getitem__(selection)
    x = x.squeeze().numpy()
    plt.imshow(x)
    plt.show()
Beispiel #14
0
import torch

from config_files.configuration_utils import create_configuration
from deep_learning.probabilistic.evaluate_probabilistic_dl import EvaluateProbabilisticDL
from neat.dataset.regression_example import RegressionExample1Dataset

import matplotlib.pyplot as plt

from neat.evaluation.utils import get_dataset

# config_file = '/regression-siso.json'
dataset_name = 'regression-siso'
config = create_configuration(filename=f'/{dataset_name}.json')
config.train_percentage = 0.75
config.n_samples = 100
# network_filename = f'network-probabilistic-classification.pt'
dataset = get_dataset(dataset=config.dataset,
                      train_percentage=config.train_percentage,
                      random_state=config.dataset_random_state,
                      noise=config.noise,
                      label_noise=config.label_noise)

# TODO: fix Memory-leakage in this network when doing backprop
n_samples = 1000
is_cuda = False

lr = 0.01
weight_decay = 0.0005
n_epochs = 1000

batch_size = 50000
Beispiel #15
0
 def setUp(self) -> None:
     self.config = create_configuration(filename='/mnist_binary.json')
     self.config.n_output = 2
Beispiel #16
0
 def setUp(self) -> None:
     self.config = create_configuration('/classification-miso.json')
     self.path = tempfile.mkdtemp()
Beispiel #17
0
 def setUp(self) -> None:
     self.config = create_configuration(filename='/regression-miso.json')
Beispiel #18
0
 def setUp(self) -> None:
     self.config = create_configuration(filename='/classification-miso.json')
     self.config.fix_std = False
Beispiel #19
0
 def setUp(self) -> None:
     self.config = create_configuration(filename='/regression-miso.json')
     self.config.n_initial_hidden_neurons = 0
     self.config.is_initial_fully_connected = True
Beispiel #20
0
 def setUp(self) -> None:
     self.config = create_configuration(filename='/regression-miso.json')
     self.config.node_activation = 'identity'
Beispiel #21
0
import os

from config_files.configuration_utils import create_configuration
from experiments.reporting.report_repository import ReportRepository
from experiments.slack_client import SlackNotifier
from neat.evaluation_engine import JupyNeatFSEvaluationEngine
from neat.neat_logger import get_neat_logger
from neat.reporting.reports_jupyneat import EvolutionReportJupyNeat
from neat.utils import timeit

config_file = 'classification-miso'
config = create_configuration(filename=f'/{config_file}.json')

LOGS_PATH = f'{os.getcwd()}/'
logger = get_neat_logger(path=LOGS_PATH)

# TODO: better mechanism for override
config.n_generations = 1000
config.pop_size = 150
config.n_samples = 40

config.max_stagnation = 30
config.node_add_prob = 0.5

ALGORITHM_VERSION = 'bayes-neat'
DATASET = 'classification_example_1'
CORRELATION_ID = 'test'


@timeit
def main():
Beispiel #22
0
 def setUp(self) -> None:
     self.config = create_configuration(filename='/classification-miso.json')
     self.config.node_activation = 'identity'
     self.config.parallel_evaluation = False
     self.config.n_samples = 2
     self.config.beta = 0.0
Beispiel #23
0
 def setUp(self) -> None:
     self.config = create_configuration(filename='/classification-miso.json')
     self.config.node_activation = 'identity'
     self.n_epochs = 2
Beispiel #24
0
import random

import numpy as np
import os
from config_files.configuration_utils import create_configuration

from deep_learning.standard.evaluate_standard_dl import EvaluateStandardDL
from neat.evaluation.utils import get_dataset
from neat.neat_logger import get_neat_logger

DATASET = 'iris'
# DATASET = 'titanic'
# DATASET = 'mnist_downsampled'

config = create_configuration(filename=f'/{DATASET}.json')
config.noise = 0.0
config.label_noise = 0.0
config.train_percentage = 0.75

lr = 0.01
weight_decay = 0.0005
n_epochs = 4000

config.dataset_random_state = random.sample(list(range(100)), k=1)[0]

# config.n_output = 3
LOGS_PATH = f'{os.getcwd()}/'
logger = get_neat_logger(path=LOGS_PATH)

network_filename = f'network-{DATASET}.pt'
dataset = get_dataset(dataset=config.dataset,