コード例 #1
0
def main():
    model_filename = 'network-regression_1.pt'
    network = FeedForward(n_input=config.n_input,
                          n_output=config.n_output,
                          n_neurons_per_layer=n_neurons_per_layer,
                          n_hidden_layers=1)
    parameters = torch.load(f'./../../deep_learning/models/{model_filename}')
    network.load_state_dict(parameters)

    std = -3.1
    genome = get_genome_from_standard_network(network, std=std)

    evaluation_engine = EvaluationStochasticEngine(testing=False)


    x, y_true, y_pred, kl_posterior = \
        evaluation_engine.evaluate_genome(genome, n_samples=1, is_gpu=False, return_all=True)

    print()
    print(f'KL Posterior: {kl_posterior}')

    x = x.numpy()
    x = evaluation_engine.dataset.input_scaler.inverse_transform(x)
    y_pred = evaluation_engine.dataset.output_scaler.inverse_transform(
        y_pred.numpy())
    y_true = evaluation_engine.dataset.output_scaler.inverse_transform(
        y_true.numpy())

    # plot results
    plt.figure(figsize=(20, 20))
    plt.plot(x, y_true, 'r*')
    plt.plot(x, y_pred, 'b*')
    plt.show()

    print(f'MSE: {mean_squared_error(y_true, y_pred) * 100} %')
コード例 #2
0
    def test_mapping(self):
        config = create_configuration(filename='/classification-miso.json')
        n_neurons_per_layer = 3
        network = FeedForward(n_input=config.n_input, n_output=config.n_output,
                              n_neurons_per_layer=n_neurons_per_layer,
                              n_hidden_layers=1)

        std = 0.1
        genome = get_genome_from_standard_network(network, std=std)
        self.assertEqual(type(genome), Genome)

        parameters = network.state_dict()
        # hidden to output
        self.assertEqual(parameters['layer_0.weight'][0, 0], genome.connection_genes[(2, 0)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][1, 0], genome.connection_genes[(2, 1)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][0, 1], genome.connection_genes[(3, 0)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][1, 1], genome.connection_genes[(3, 1)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][0, 2], genome.connection_genes[(4, 0)].get_mean())
        self.assertEqual(parameters['layer_0.weight'][1, 2], genome.connection_genes[(4, 1)].get_mean())

        self.assertEqual(parameters['layer_0.bias'][0], genome.node_genes[0].get_mean())
        self.assertEqual(parameters['layer_0.bias'][1], genome.node_genes[1].get_mean())

        # input to hidden
        self.assertEqual(parameters['layer_1.bias'][0], genome.node_genes[2].get_mean())
        self.assertEqual(parameters['layer_1.bias'][1], genome.node_genes[3].get_mean())
        self.assertEqual(parameters['layer_1.bias'][2], genome.node_genes[4].get_mean())
コード例 #3
0
    def from_dict(network_dict: dict) -> FeedForward:

        network = FeedForward(
            n_input=network_dict['n_input'],
            n_output=network_dict['n_output'],
            n_neurons_per_layer=network_dict['n_neurons_per_layer'],
            n_hidden_layers=network_dict['n_hidden_layers'])

        # set weights and biases
        network_state = OrderedDict()
        for key, list_ in network_dict['state'].items():
            network_state[key] = torch.Tensor(np.array(list_))
        network.load_state_dict(network_state)
        return network
コード例 #4
0
 def _initialize(self):
     self.network = FeedForward(
         n_input=self.config.n_input,
         n_output=self.config.n_output,
         n_neurons_per_layer=self.n_neurons_per_layer,
         n_hidden_layers=self.n_hidden_layers)
     self.network.reset_parameters()
     self.criterion = _get_loss_by_problem(
         problem_type=self.config.problem_type)
     self.optimizer = Adam(self.network.parameters(),
                           lr=self.lr,
                           weight_decay=self.weight_decay)
     if self.is_cuda:
         self.network.cuda()
         self.criterion.cuda()
コード例 #5
0
    def test_standard_network_to_genome_to_stochastic_network(self):
        config = create_configuration(filename='/classification-miso.json')
        n_neurons_per_layer = 3
        network = FeedForward(n_input=config.n_input, n_output=config.n_output,
                              n_neurons_per_layer=n_neurons_per_layer,
                              n_hidden_layers=1)

        std = 0.1
        genome = get_genome_from_standard_network(network, std=std)

        stochastic_network = ComplexStochasticNetwork(genome=genome)

        parameters = network.state_dict()

        self.assertTrue(torch.allclose(parameters['layer_0.weight'], stochastic_network.layer_0.qw_mean, atol=1e-02))
        self.assertTrue(torch.allclose(parameters['layer_1.weight'], stochastic_network.layer_1.qw_mean, atol=1e-02))
コード例 #6
0
def main():
    model_filename = 'network-classification.pt'
    network = FeedForward(n_input=config.n_input, n_output=config.n_output,
                          n_neurons_per_layer=n_neurons_per_layer,
                          n_hidden_layers=1)
    parameters = torch.load(f'./../../deep_learning/models/{model_filename}')
    network.load_state_dict(parameters)

    std = -3.1
    genome = get_genome_from_standard_network(network, std=std)

    # genome = generate_genome_with_hidden_units(2, 2, n_hidden=1)

    evaluation_engine = EvaluationStochasticEngine(testing=False)

    x, y_true, y_pred, kl_posterior = \
        evaluation_engine.evaluate_genome(genome, n_samples=100, is_gpu=False, return_all=True)

    print()
    print(f'KL Posterior: {kl_posterior}')

    x = x.numpy()
    x = evaluation_engine.dataset.input_scaler.inverse_transform(x)
    y_true = y_true.numpy()

    # plot results
    y_pred = np.argmax(y_pred.numpy(), 1)
    df = pd.DataFrame(x, columns=['x1', 'x2'])
    df['y'] = y_pred

    x1_limit, x2_limit = evaluation_engine.dataset.get_separation_line()

    plt.figure()
    ax = sns.scatterplot(x='x1', y='x2', hue='y', data=df)
    ax.plot(x1_limit, x2_limit, 'g-', linewidth=2.5)
    plt.show()


    print('Confusion Matrix:')
    print(confusion_matrix(y_true, y_pred))

    print(f'Accuracy: {accuracy_score(y_true, y_pred) * 100} %')
コード例 #7
0
def regression_problem_learn_from_nn():

    # standard network
    network = FeedForward(n_input=config.n_input,
                          n_output=config.n_output,
                          n_neurons_per_layer=n_neurons_per_layer,
                          n_hidden_layers=1)
    parameters = torch.load('./deep_learning/models/network.pt')
    network.load_state_dict(parameters)

    genome = prepare_genome(parameters)
    print(genome)
    evaluation_engine = EvaluationStochasticEngine(testing=False,
                                                   batch_size=None)

    x, y_true, y_pred, kl_posterior = \
        evaluation_engine.evaluate_genome(genome, n_samples=100, is_gpu=False, return_all=True)

    plt.figure(figsize=(20, 20))
    plt.plot(x.numpy().reshape(-1), y_true.numpy().reshape(-1), 'b*')
    plt.plot(x.numpy().reshape(-1), y_pred.numpy().reshape(-1), 'r*')
    plt.show()
    print(f'KL Div - Posterior: {kl_posterior}')
コード例 #8
0
def main():

    # standard network
    network = FeedForward(n_input=config.n_input,
                          n_output=config.n_output,
                          n_neurons_per_layer=n_neurons_per_layer,
                          n_hidden_layers=1)
    parameters = torch.load('./../deep_learning/models/network.pt')
    network.load_state_dict(parameters)

    genome = generate_genome_with_hidden_units(n_input=config.n_input,
                                               n_output=config.n_output,
                                               n_hidden=n_neurons_per_layer)

    # genome = prepare_genome(parameters)
    print(genome)

    print('KL (q(w)||p(w)')
    kl_qw_pw = compute_kl_qw_pw(genome=genome)
    print(kl_qw_pw)

    evaluation_engine = EvaluationEngine(testing=False, batch_size=1)

    # setup network
    network = StochasticNetworkOld(genome=genome)
    network.eval()

    x, y_true, y_pred, kl_posterior, kl_qw_pw = \
        evaluation_engine.evaluate_genome(genome, n_samples=10, is_gpu=False, return_all=True)
    plt.figure(figsize=(20, 20))
    plt.plot(x.numpy().reshape(-1), y_true.numpy().reshape(-1), 'b*')
    plt.plot(x.numpy().reshape(-1), y_pred.numpy().reshape(-1), 'r*')
    plt.show()
    print(f'KL Div - Posterior: {kl_posterior}')
    print(f'KL Div - Prior: {kl_qw_pw}')
    print(f'MSE: {kl_posterior - kl_qw_pw}')
コード例 #9
0
class EvaluateStandardDL:
    def __init__(self,
                 dataset,
                 batch_size,
                 lr,
                 weight_decay,
                 n_epochs,
                 n_neurons_per_layer,
                 n_hidden_layers,
                 is_cuda,
                 n_repetitions=1,
                 backprop_report=Mock(),
                 n_samples=0,
                 beta=0.0):
        self.config = get_configuration()
        self.is_cuda = is_cuda
        self.dataset = dataset
        self.batch_size = batch_size
        self.lr = lr
        self.weight_decay = weight_decay
        self.n_epochs = n_epochs
        self.n_neurons_per_layer = n_neurons_per_layer
        self.n_hidden_layers = n_hidden_layers
        self.n_repetitions = n_repetitions
        self.backprop_report = backprop_report

        self.last_update = 0
        self.best_loss_val = 100000
        self.best_loss_val_rep = None
        self.best_network_rep = None
        self.best_network = None

    def run(self):
        for i in range(self.n_repetitions):
            self._run()
            if self.best_loss_val_rep < self.best_loss_val:
                self.best_loss_val = self.best_loss_val_rep
                self.best_network = self.best_network_rep
            print(f'BEST LOST: {self.best_loss_val_rep}')
        print(f'BEST LOST: {self.best_loss_val}')

    def _run(self):
        self.best_network_rep = None
        self.best_loss_val_rep = 100000

        self._initialize()

        x_batch, y_batch = self.dataset.x_train, self.dataset.y_train
        x_train, x_val, y_train, y_val = self.train_val_split(x_batch,
                                                              y_batch,
                                                              val_ratio=0.2)

        x_train, y_train = _prepare_batch_data(
            x_batch=x_train,
            y_batch=y_train,
            problem_type=self.config.problem_type,
            is_gpu=self.config.is_gpu,
            n_input=self.config.n_input,
            n_output=self.config.n_output,
            n_samples=1)

        x_val, y_val = _prepare_batch_data(
            x_batch=x_val,
            y_batch=y_val,
            problem_type=self.config.problem_type,
            is_gpu=self.config.is_gpu,
            n_input=self.config.n_input,
            n_output=self.config.n_output,
            n_samples=1)

        if self.is_cuda:
            x_train = x_train.cuda()
            y_train = y_train.cuda()
            x_val = x_val.cuda()
            y_val = y_val.cuda()

        # train
        for epoch in range(self.n_epochs):
            loss_train = self._train_one(x_train, y_train)
            _, _, _, loss_val = self._evaluate(x_val,
                                               y_val,
                                               network=self.network)
            self.backprop_report.report_epoch(epoch, loss_train, loss_val)
            if loss_val < self.best_loss_val_rep:
                self.best_loss_val_rep = loss_val
                self.best_network_rep = copy.deepcopy(self.network)
                self.last_update = epoch
                print(f'New best network: {loss_val}')

            if epoch - self.last_update > N_EPOCHS_WITHOUT_IMPROVING:
                print(
                    f'Breaking training as not improving for {N_EPOCHS_WITHOUT_IMPROVING} epochs'
                )
                break

            if epoch % 100 == 0:
                print(f'Epoch = {epoch}. Error: {loss_train}')

        print(f'Final Train Error: {loss_train}')
        print(f'Best Val Error: {self.best_loss_val_rep}')

    def _initialize(self):
        self.network = FeedForward(
            n_input=self.config.n_input,
            n_output=self.config.n_output,
            n_neurons_per_layer=self.n_neurons_per_layer,
            n_hidden_layers=self.n_hidden_layers)
        self.network.reset_parameters()
        self.criterion = _get_loss_by_problem(
            problem_type=self.config.problem_type)
        self.optimizer = Adam(self.network.parameters(),
                              lr=self.lr,
                              weight_decay=self.weight_decay)
        if self.is_cuda:
            self.network.cuda()
            self.criterion.cuda()

    def _train_one(self, x_batch, y_batch):
        loss_epoch = 0
        output = self.network(x_batch)
        loss = self.criterion(output, y_batch)
        loss_epoch += loss.data.item()
        self.optimizer.zero_grad()
        loss.backward()  # Backward Propagation
        self.optimizer.step()  # Optimizer update
        return loss_epoch

    def save_network(self, name):
        # save weights
        filename = ''.join([
            os.path.dirname(os.path.realpath(__file__)), f'/../models/{name}'
        ])
        torch.save(self.network.state_dict(), filename)

    def evaluate(self):
        x_batch, y_batch = self.dataset.x_test, self.dataset.y_test
        x_batch, y_batch = _prepare_batch_data(
            x_batch=x_batch,
            y_batch=y_batch,
            problem_type=self.config.problem_type,
            is_gpu=self.config.is_gpu,
            n_input=self.config.n_input,
            n_output=self.config.n_output,
            n_samples=1)

        if self.is_cuda:
            x_batch = x_batch.cuda()
            y_batch = y_batch.cuda()

        x, y_true, y_pred, _ = self._evaluate(x_batch,
                                              y_batch,
                                              network=self.best_network)

        if self.is_cuda:
            x = x.cpu()
            y_pred = y_pred.cpu()
            y_true = y_true.cpu()

        return x, y_true, y_pred

    def _evaluate(self, x_batch, y_batch, network):
        network.eval()

        chunks_x = []
        chunks_y_pred = []
        chunks_y_true = []

        with torch.no_grad():
            output = network(x_batch)
            loss = self.criterion(output, y_batch)
            loss_eval = loss.data.item()

            m = Softmax(dim=1)
            output = m(output)

            chunks_x.append(x_batch)
            chunks_y_pred.append(output)
            chunks_y_true.append(y_batch)

        x = torch.cat(chunks_x, dim=0)
        y_pred = torch.cat(chunks_y_pred, dim=0)
        y_true = torch.cat(chunks_y_true, dim=0)

        return x, y_true, y_pred, loss_eval

    def train_val_split(self, x_batch, y_batch, val_ratio=0.2):
        x_train, x_val, y_train, y_val = train_test_split(x_batch.numpy(),
                                                          y_batch.numpy(),
                                                          test_size=val_ratio)
        x_train = torch.tensor(x_train).float()
        x_val = torch.tensor(x_val).float()
        if self.config.problem_type == 'classification':
            y_train = torch.tensor(y_train).long()
            y_val = torch.tensor(y_val).long()
        elif self.config.problem_type == 'regression':
            y_train = torch.tensor(y_train).float()
            y_val = torch.tensor(y_val).float()

        return x_train, x_val, y_train, y_val