def main(): model_filename = 'network-regression_1.pt' network = FeedForward(n_input=config.n_input, n_output=config.n_output, n_neurons_per_layer=n_neurons_per_layer, n_hidden_layers=1) parameters = torch.load(f'./../../deep_learning/models/{model_filename}') network.load_state_dict(parameters) std = -3.1 genome = get_genome_from_standard_network(network, std=std) evaluation_engine = EvaluationStochasticEngine(testing=False) x, y_true, y_pred, kl_posterior = \ evaluation_engine.evaluate_genome(genome, n_samples=1, is_gpu=False, return_all=True) print() print(f'KL Posterior: {kl_posterior}') x = x.numpy() x = evaluation_engine.dataset.input_scaler.inverse_transform(x) y_pred = evaluation_engine.dataset.output_scaler.inverse_transform( y_pred.numpy()) y_true = evaluation_engine.dataset.output_scaler.inverse_transform( y_true.numpy()) # plot results plt.figure(figsize=(20, 20)) plt.plot(x, y_true, 'r*') plt.plot(x, y_pred, 'b*') plt.show() print(f'MSE: {mean_squared_error(y_true, y_pred) * 100} %')
def test_mapping(self): config = create_configuration(filename='/classification-miso.json') n_neurons_per_layer = 3 network = FeedForward(n_input=config.n_input, n_output=config.n_output, n_neurons_per_layer=n_neurons_per_layer, n_hidden_layers=1) std = 0.1 genome = get_genome_from_standard_network(network, std=std) self.assertEqual(type(genome), Genome) parameters = network.state_dict() # hidden to output self.assertEqual(parameters['layer_0.weight'][0, 0], genome.connection_genes[(2, 0)].get_mean()) self.assertEqual(parameters['layer_0.weight'][1, 0], genome.connection_genes[(2, 1)].get_mean()) self.assertEqual(parameters['layer_0.weight'][0, 1], genome.connection_genes[(3, 0)].get_mean()) self.assertEqual(parameters['layer_0.weight'][1, 1], genome.connection_genes[(3, 1)].get_mean()) self.assertEqual(parameters['layer_0.weight'][0, 2], genome.connection_genes[(4, 0)].get_mean()) self.assertEqual(parameters['layer_0.weight'][1, 2], genome.connection_genes[(4, 1)].get_mean()) self.assertEqual(parameters['layer_0.bias'][0], genome.node_genes[0].get_mean()) self.assertEqual(parameters['layer_0.bias'][1], genome.node_genes[1].get_mean()) # input to hidden self.assertEqual(parameters['layer_1.bias'][0], genome.node_genes[2].get_mean()) self.assertEqual(parameters['layer_1.bias'][1], genome.node_genes[3].get_mean()) self.assertEqual(parameters['layer_1.bias'][2], genome.node_genes[4].get_mean())
def from_dict(network_dict: dict) -> FeedForward: network = FeedForward( n_input=network_dict['n_input'], n_output=network_dict['n_output'], n_neurons_per_layer=network_dict['n_neurons_per_layer'], n_hidden_layers=network_dict['n_hidden_layers']) # set weights and biases network_state = OrderedDict() for key, list_ in network_dict['state'].items(): network_state[key] = torch.Tensor(np.array(list_)) network.load_state_dict(network_state) return network
def _initialize(self): self.network = FeedForward( n_input=self.config.n_input, n_output=self.config.n_output, n_neurons_per_layer=self.n_neurons_per_layer, n_hidden_layers=self.n_hidden_layers) self.network.reset_parameters() self.criterion = _get_loss_by_problem( problem_type=self.config.problem_type) self.optimizer = Adam(self.network.parameters(), lr=self.lr, weight_decay=self.weight_decay) if self.is_cuda: self.network.cuda() self.criterion.cuda()
def test_standard_network_to_genome_to_stochastic_network(self): config = create_configuration(filename='/classification-miso.json') n_neurons_per_layer = 3 network = FeedForward(n_input=config.n_input, n_output=config.n_output, n_neurons_per_layer=n_neurons_per_layer, n_hidden_layers=1) std = 0.1 genome = get_genome_from_standard_network(network, std=std) stochastic_network = ComplexStochasticNetwork(genome=genome) parameters = network.state_dict() self.assertTrue(torch.allclose(parameters['layer_0.weight'], stochastic_network.layer_0.qw_mean, atol=1e-02)) self.assertTrue(torch.allclose(parameters['layer_1.weight'], stochastic_network.layer_1.qw_mean, atol=1e-02))
def main(): model_filename = 'network-classification.pt' network = FeedForward(n_input=config.n_input, n_output=config.n_output, n_neurons_per_layer=n_neurons_per_layer, n_hidden_layers=1) parameters = torch.load(f'./../../deep_learning/models/{model_filename}') network.load_state_dict(parameters) std = -3.1 genome = get_genome_from_standard_network(network, std=std) # genome = generate_genome_with_hidden_units(2, 2, n_hidden=1) evaluation_engine = EvaluationStochasticEngine(testing=False) x, y_true, y_pred, kl_posterior = \ evaluation_engine.evaluate_genome(genome, n_samples=100, is_gpu=False, return_all=True) print() print(f'KL Posterior: {kl_posterior}') x = x.numpy() x = evaluation_engine.dataset.input_scaler.inverse_transform(x) y_true = y_true.numpy() # plot results y_pred = np.argmax(y_pred.numpy(), 1) df = pd.DataFrame(x, columns=['x1', 'x2']) df['y'] = y_pred x1_limit, x2_limit = evaluation_engine.dataset.get_separation_line() plt.figure() ax = sns.scatterplot(x='x1', y='x2', hue='y', data=df) ax.plot(x1_limit, x2_limit, 'g-', linewidth=2.5) plt.show() print('Confusion Matrix:') print(confusion_matrix(y_true, y_pred)) print(f'Accuracy: {accuracy_score(y_true, y_pred) * 100} %')
def regression_problem_learn_from_nn(): # standard network network = FeedForward(n_input=config.n_input, n_output=config.n_output, n_neurons_per_layer=n_neurons_per_layer, n_hidden_layers=1) parameters = torch.load('./deep_learning/models/network.pt') network.load_state_dict(parameters) genome = prepare_genome(parameters) print(genome) evaluation_engine = EvaluationStochasticEngine(testing=False, batch_size=None) x, y_true, y_pred, kl_posterior = \ evaluation_engine.evaluate_genome(genome, n_samples=100, is_gpu=False, return_all=True) plt.figure(figsize=(20, 20)) plt.plot(x.numpy().reshape(-1), y_true.numpy().reshape(-1), 'b*') plt.plot(x.numpy().reshape(-1), y_pred.numpy().reshape(-1), 'r*') plt.show() print(f'KL Div - Posterior: {kl_posterior}')
def main(): # standard network network = FeedForward(n_input=config.n_input, n_output=config.n_output, n_neurons_per_layer=n_neurons_per_layer, n_hidden_layers=1) parameters = torch.load('./../deep_learning/models/network.pt') network.load_state_dict(parameters) genome = generate_genome_with_hidden_units(n_input=config.n_input, n_output=config.n_output, n_hidden=n_neurons_per_layer) # genome = prepare_genome(parameters) print(genome) print('KL (q(w)||p(w)') kl_qw_pw = compute_kl_qw_pw(genome=genome) print(kl_qw_pw) evaluation_engine = EvaluationEngine(testing=False, batch_size=1) # setup network network = StochasticNetworkOld(genome=genome) network.eval() x, y_true, y_pred, kl_posterior, kl_qw_pw = \ evaluation_engine.evaluate_genome(genome, n_samples=10, is_gpu=False, return_all=True) plt.figure(figsize=(20, 20)) plt.plot(x.numpy().reshape(-1), y_true.numpy().reshape(-1), 'b*') plt.plot(x.numpy().reshape(-1), y_pred.numpy().reshape(-1), 'r*') plt.show() print(f'KL Div - Posterior: {kl_posterior}') print(f'KL Div - Prior: {kl_qw_pw}') print(f'MSE: {kl_posterior - kl_qw_pw}')