def test_network_sizes_when_add_layer_method_used_without_calling_compile(
            self):
        # given
        network = Network(inputs_n=5)
        network.add_layer(Layer(3, Sigmoid))
        network.add_layer(Layer(1, Sigmoid))

        expected_n_layers = 2
        expected_n_neurons_in_layers = [3, 1]
        expected_first_layer_inputs_n = 5
        expected_second_layer_inputs_n = 3

        # then
        self.assertEqual(network.n_layers, expected_n_layers)
        self.assertEqual(network.n_neurons_in_layers,
                         expected_n_neurons_in_layers)
        self.assertEqual(network.layers[0].inputs_n,
                         expected_first_layer_inputs_n)
        self.assertEqual(network.layers[1].inputs_n,
                         expected_second_layer_inputs_n)

        # because compile method not called
        self.assertRaises(KeyError, lambda: network.weights[0])
        self.assertRaises(KeyError, lambda: network.weights[1])
        self.assertRaises(KeyError, lambda: network.biases[0])
        self.assertRaises(KeyError, lambda: network.biases[0])
Exemple #2
0
    def test_backward_propagate(self):
        # given
        self.network = Network(inputs_n=4)
        self.network.add_layer(Layer(neurons_n=3, activation_f=Sigmoid))
        self.network.add_layer(Layer(neurons_n=1, activation_f=Sigmoid))
        self.network.compile()

        iris = load_iris()
        X = iris.data

        # Create a variable for the target data
        y = iris.target
        X_train, X_test, y_train, y_test = \
            train_test_split(X[:100], y[:100], test_size=0.2, shuffle=True)

        # when
        # for epoch in range(50):
        for epoch in range(112):
            for i in range(y_train.size):
                inputs_x = X_train[i].T  # 1 set of inputs
                desired_output = y_train[i]
                self.network.set_inputs(np.reshape(inputs_x, (4, 1)))
                self.network.set_desired_outputs(
                    np.reshape(desired_output, (1, 1)))
                self.network.forward_propagate()
                self.network.backward_propagate()

        print('a')

        correct_predictions = 0
        for i in range(y_test.size):
            inputs_x = X_test[i].T  # 1 set of inputs
            desired_output = y_test[i]
            self.network.set_inputs(np.reshape(inputs_x, (4, 1)))
            self.network.set_desired_outputs(np.reshape(
                desired_output, (1, 1)))
            self.network.forward_propagate()

            predicted = convert_output_to_prediction(
                self.network.get_actual_outputs())
            if predicted == y_test[i]:
                correct_predictions += 1

            # print("inputs: ", self.network.inputs_x)
            print("output predicted: ", self.network.get_actual_outputs())
            print("predicted: ", predicted)
            print("actual: ", y_test[i], "\n")

        print("correct predictions: ", correct_predictions)
Exemple #3
0
    def test_forward_propagate_linear_activation(self):
        # given
        inputs_x = [[1], [2]]  # 1 set of inputs
        self.network = Network(inputs_n=2)
        self.network.add_layer(Layer(neurons_n=3, activation_f=Sigmoid))
        self.network.add_layer(Layer(neurons_n=1, activation_f=Sigmoid))
        self.network.compile()
        self.network.set_inputs(inputs_x)

        # set weights to 1 - so calculations are easy to do by hand
        self.network.set_all_weights_to_one()

        # when
        self.network.forward_propagate()

        # then
        expected_output = Sigmoid.value(3 * Sigmoid.value(4) + 1)
        self.assertEqual(self.network.get_actual_outputs(), expected_output)
Exemple #4
0
    def __init__(self, num_inputs, num_units, num_layers, time_step, size,
                 scope):

        self.num_inputs = num_inputs
        self.num_units = num_units
        self.num_layers = num_layers
        self.time_step = time_step
        self.size = size
        self.scope = scope
        self.init = tf.random_normal_initializer()
        self.activation = tf.nn.tanh()

        assert (input.shape[1] == self.time_step
                ), "Input dimension doesn't match with the time step"

        self.proposal = Network(num_inputs=self.num_inputs,
                                num_units=self.num_units,
                                num_layers=self.num_layers,
                                num_levels=self.time_step,
                                scope_r="recognition")
Exemple #5
0
    def test_forward_propagate_linear_activation(self):
        # given
        inputs_x = [[1], [2]]  # 1 set of inputs
        self.network = Network(inputs_n=2)
        self.network.add_layer(Layer(neurons_n=3, activation_f=Linear))
        self.network.add_layer(Layer(neurons_n=1, activation_f=Linear))
        self.network.compile()
        self.network.set_inputs(inputs_x)

        # set weights to 1 - so calculations are easy to do by hand
        self.network.set_all_weights_to_one()

        # when
        self.network.forward_propagate()

        # then
        #                [layer][example_nr][neuron_nr]
        self.assertEqual(self.network.z[0][0][0], 4.0)
        self.assertEqual(self.network.z[1][0][0], 13.0)
        self.assertEqual(self.network.actual_outputs_a, 13)
    def test_network_sizes_when_add_layer_method_used_and_compile_method_called(
            self):
        # given
        network = Network(inputs_n=5)
        network.add_layer(Layer(3, Sigmoid))
        network.add_layer(Layer(1, Sigmoid))
        network.compile()

        # then
        expected_n_layers = 2
        expected_n_neurons_in_layers = [3, 1]
        expected_first_layer_biases_n = 3
        expected_second_layer_biases_n = 1
        expected_first_layer_weights_n = 5
        expected_second_layer_weights_n = 3
        expected_first_layer_inputs_n = 5
        expected_second_layer_inputs_n = 3

        self.assertEqual(network.n_layers, expected_n_layers)
        self.assertEqual(network.n_neurons_in_layers,
                         expected_n_neurons_in_layers)
        self.assertEqual(len(network.weights[0]),
                         expected_first_layer_weights_n)
        self.assertEqual(len(network.weights[1]),
                         expected_second_layer_weights_n)
        self.assertEqual(len(network.biases[0]), expected_first_layer_biases_n)
        self.assertEqual(len(network.biases[1]),
                         expected_second_layer_biases_n)
        self.assertEqual(network.layers[0].inputs_n,
                         expected_first_layer_inputs_n)
        self.assertEqual(network.layers[1].inputs_n,
                         expected_second_layer_inputs_n)
Exemple #7
0
 def setUp(self):
     self.network = Network(inputs_n=3)
     self.network.add_layer(Layer(neurons_n=2, activation_f=Sigmoid))
     self.network.add_layer(Layer(neurons_n=1, activation_f=Sigmoid))
     self.network.compile()
Exemple #8
0
class TestNetworkInputSetting(unittest.TestCase):

    def setUp(self):
        self.network = Network(inputs_n=3)
        self.network.add_layer(Layer(neurons_n=2, activation_f=Sigmoid))
        self.network.add_layer(Layer(neurons_n=1, activation_f=Sigmoid))
        self.network.compile()

    # <Set Inputs> ------------------------------------------------------------------------
    def test_setting_inputs_from_list(self):
        # given
        inputs_x = [[1], [2], [3]]  # 1 set of inputs

        # when
        self.network.set_inputs(inputs_x)

        # then
        self.assertEqual(self.network.inputs_x.shape, (3, 1))

    def test_setting_inputs_from_np_array(self):
        # given
        inputs_x = np.array([[1, 2, 3]]).T

        # when
        self.network.set_inputs(inputs_x)

        # then
        self.assertEqual(self.network.inputs_x.shape, (3, 1))

    def test_setting_inputs_from_np_array_ArrayDimensionError_1_dimensional_array_passed(self):
        # given
        inputs_x = np.array([1, 2, 3]).T

        # when
        self.assertRaises(ArrayDimensionError, lambda: self.network.set_inputs(inputs_x))

    def test_setting_inputs_from_np_array_ArrayDimensionError_wrong_number_of_inputs(self):
        # given
        inputs_x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]]).T

        # when
        self.assertRaises(ArrayDimensionError, lambda: self.network.set_inputs(inputs_x))

    def test_setting_inputs_from_int_TypeError(self):
        # given
        inputs_x = 10

        # when
        self.assertRaises(TypeError, lambda: self.network.set_inputs(inputs_x))
Exemple #9
0
class Q_RNN(object):
    def __init__(self, num_inputs, num_units, num_layers, time_step, size,
                 scope):

        self.num_inputs = num_inputs
        self.num_units = num_units
        self.num_layers = num_layers
        self.time_step = time_step
        self.size = size
        self.scope = scope
        self.init = tf.random_normal_initializer()
        self.activation = tf.nn.tanh()

        assert (input.shape[1] == self.time_step
                ), "Input dimension doesn't match with the time step"

        self.proposal = Network(num_inputs=self.num_inputs,
                                num_units=self.num_units,
                                num_layers=self.num_layers,
                                num_levels=self.time_step,
                                scope_r="recognition")

    def build_network(self, status):
        # get parameters from the given input(status)
        param_stack = self.proposal.get_latent_samples(status=status)
        list = []
        with tf.variable_scope(name_or_scope=self.scope, reuse=tf.AUTO_REUSE):
            # iterate over the time step
            # when t == 0
            time = 0
            [q_mean, q_cov] = param_stack.pop()
            samples = q_mean + tf.matmul(
                tf.random_normal(shape=(1, self.num_units)), q_cov)
            # transition
            p_mean = tf.zeros(shape=(1, self.num_units))
            p_cov = tf.eye(num_rows=self.num_units)
            # emission
            h = tf.layers.dense(inputs=samples,
                                units=self.num_units,
                                activation=self.activation,
                                kernel_initializer=self.init,
                                name="emission")
            mu = tf.layers.dense(inputs=h,
                                 units=self.num_units,
                                 kernel_initializer=self.init,
                                 name="mu")
            logd = tf.layers.dense(inputs=h,
                                   units=self.num_units,
                                   kernel_initializer=self.init,
                                   name="logd")
            cov = tf.diag(tf.exp(logd[0]))
            x_dist = MultivariateNormalFullCovariance(loc=mu,
                                                      covariance_matrix=cov)
            x = tf.reshape(tensor=status[0], shape=(1, self.num_units))
            x_prob = x_dist.prob(value=x)
            list.append([q_mean, q_cov, p_mean, p_cov, x_prob, samples])

            while len(param_stack) != 0:
                [q_mean, q_cov] = param_stack.pop()
                # transition
                for i in range(self.num_layers):
                    p_mean = tf.layers.dense(inputs=samples,
                                             units=self.num_units,
                                             use_bias=False,
                                             kernel_initializer=self.init,
                                             name="dense" + str(i))

                samples = q_mean + tf.matmul(
                    tf.random_normal(shape=(1, self.num_units)), q_cov)
                p_cov = tf.diag(
                    tf.exp(tf.random_normal(shape=(self.num_units))))
                # emission
                h = tf.layers.dense(inputs=samples,
                                    units=self.num_units,
                                    activation=self.activation,
                                    kernel_initializer=self.init,
                                    name="emission")
                mu = tf.layers.dense(inputs=h,
                                     units=self.num_units,
                                     kernel_initializer=self.init,
                                     name="mu")
                logd = tf.layers.dense(inputs=h,
                                       units=self.num_units,
                                       kernel_initializer=self.init,
                                       name="logd")
                cov = tf.diag(tf.exp(logd[0]))
                x_dist = MultivariateNormalFullCovariance(
                    loc=mu, covariance_matrix=cov)
                x = tf.reshape(tensor=status[0], shape=(1, self.num_units))
                x_prob = x_dist.prob(value=x)

                list.append([q_mean, q_cov, p_mean, p_cov, x_prob, samples])

        return list

    def compute_loss(self, param_list):
        # iterate over time
        sum = 0.0
        for t in range(len(param_list)):
            # transition
            [q_mean, q_cov, p_mean, p_cov, x_prob, samples] = list.pop()
            q_dist = MultivariateNormalFullCovariance(loc=q_mean,
                                                      covariance_matrix=q_cov)
            p_dist = MultivariateNormalFullCovariance(loc=p_mean,
                                                      covariance_matrix=p_cov)
            kl_div = q_dist.kl_divergence(other=p_dist)
            sum = sum - kl_div
            # emission
            log_prob = tf.log(x_prob)
            sum = sum + log_prob
        return sum

    def get_trainable(self):
        return tf.trainable_variables(scope=self.scope)
Exemple #10
0
from activation_function.Linear import Linear
from activation_function.Sigmoid import Sigmoid
from model.Network import Network

network = Network(2)
network.add_layer(3, Sigmoid)
network.add_layer(1, Sigmoid)

network.set_inputs([-2, 0])
network.set_desired_outputs([2.4])

network.set_all_weights_to(1)
network.forward_propagate()
print("outputs", network.get_outputs())

network.backward_propagate()
network.forward_propagate()

print("outputs", network.get_outputs())
i = seeds[4]

# for i in seeds:
GlobalVars.seed = i
folder = 'crossroads'
name = 'advanceright_nocross'

out_file = 'output/{}/{}_{}.txt'.format(folder, name, str(GlobalVars.seed))

random.seed(GlobalVars.seed)

# Read all data_queue_vessels_waiting_lock
IO.read_data()

network = Network()
GlobalVars.network = network
# Simulation
GlobalVars.crossroad_type = CrossRoadType.AdvanceRight
GlobalVars.animate = False
GlobalVars.zoom = True
GlobalVars.x_min = 3.140237
GlobalVars.y_min = 50.794897
GlobalVars.x_min, GlobalVars.y_min = Utilities.normalize(GlobalVars.x_min, GlobalVars.y_min)
GlobalVars.x_max = 3.6
GlobalVars.x_max = 3.5
GlobalVars.y_max = 50.8
GlobalVars.x_max, GlobalVars.y_max = Utilities.normalize(GlobalVars.x_max, GlobalVars.y_max)
env = sim.Environment(trace=False, time_unit='minutes', random_seed=GlobalVars.seed)
GlobalVars.environment = env
if GlobalVars.zoom:
Exemple #12
0
import tensorflow as tf
from model.Network import Network
from tensorflow.examples.tutorials.mnist import input_data

import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import interactive
interactive(True)

# Reset Graph
tf.reset_default_graph()

# Initialize Constructor
nw = Network()

# Config Parameters
batch_size = 50
max_steps = 300

# Find Project Directory Path
#current_directory = os.path.dirname(__file__)
current_directory = os.path.dirname(os.path.realpath(__file__))
project_directory, _ = os.path.split(current_directory)

# Initialize Data and Log Paths
log_dir = project_directory + '/log'
data_dir = project_directory + '/resource'

# Delete Old Log Files
Exemple #13
0
class TestNetworkForwardPropagation(unittest.TestCase):

    def test_forward_propagate_linear_activation(self):
        # given
        inputs_x = [[1], [2]]  # 1 set of inputs
        self.network = Network(inputs_n=2)
        self.network.add_layer(Layer(neurons_n=3, activation_f=Linear))
        self.network.add_layer(Layer(neurons_n=1, activation_f=Linear))
        self.network.compile()
        self.network.set_inputs(inputs_x)

        # set weights to 1 - so calculations are easy to do by hand
        self.network.set_all_weights_to_one()

        # when
        self.network.forward_propagate()

        # then
        #                [layer][example_nr][neuron_nr]
        self.assertEqual(self.network.z[0][0][0], 4.0)
        self.assertEqual(self.network.z[1][0][0], 13.0)
        self.assertEqual(self.network.actual_outputs_a, 13)

    def test_forward_propagate_linear_activation(self):
        # given
        inputs_x = [[1], [2]]  # 1 set of inputs
        self.network = Network(inputs_n=2)
        self.network.add_layer(Layer(neurons_n=3, activation_f=Sigmoid))
        self.network.add_layer(Layer(neurons_n=1, activation_f=Sigmoid))
        self.network.compile()
        self.network.set_inputs(inputs_x)

        # set weights to 1 - so calculations are easy to do by hand
        self.network.set_all_weights_to_one()

        # when
        self.network.forward_propagate()

        # then
        expected_output = Sigmoid.value(3 * Sigmoid.value(4) + 1)
        self.assertEqual(self.network.get_actual_outputs(), expected_output)
    def test_forward_propagation(self):
        # given
        network = Network(4)
        network.add_layer(3, Sigmoid)
        network.add_layer(1, Linear)

        iris = load_iris()
        X = iris.data
        y = iris.target
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.2,
                                                            shuffle=True)

        for epoch in range(100):
            for example_n in range(y_train.size):
                network.set_inputs(X_train[example_n])
                network.set_desired_outputs([y_train[example_n]])
                network.forward_propagate()
                network.backward_propagate()

        for i in range(y_test.size):
            network.set_inputs(X_test[i])
            network.set_desired_outputs([y_test[i]])

            network.forward_propagate()

            print("desired output: ", y_test[i])
            print("actual output:", network.get_outputs(), "\n")

        self.assertEquals(1, 1)
Exemple #15
0
import os
import sys

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import tensorflow as tf
from model.Network import Network
from tensorflow.examples.tutorials.mnist import input_data

# Reset Graph
tf.reset_default_graph()

# Initialize Constructor
nw = Network()

# Config Parameters
batch_size = 250
max_steps = 1000

# Find Project Directory Path
#current_directory = os.path.dirname(__file__)
current_directory = os.path.dirname(os.path.realpath(__file__))
project_directory, _ = os.path.split(current_directory)

# Initialize Data and Log Paths
log_dir = project_directory + '/log'
data_dir = project_directory + '/resource'

# Delete Old Log Files
#if tf.gfile.Exists(log_dir):
#	tf.gfile.DeleteRecursively(log_dir)
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import tensorflow as tf
from model.Network import Network
from tensorflow.examples.tutorials.mnist import input_data

# Reset Graph
tf.reset_default_graph()

# Initialize Constructor
nw = Network()

# Config Parameters
batch_size = 50
max_steps = 1100

# Find Project Directory Path
#current_directory = os.path.dirname(__file__)
current_directory = os.path.dirname(os.path.realpath(__file__))
project_directory, _ = os.path.split(current_directory)

# Initialize Data and Log Paths
log_dir = project_directory + '/log'
data_dir = project_directory + '/resource'

# Delete Old Log Files
#if tf.gfile.Exists(log_dir):
#	tf.gfile.DeleteRecursively(log_dir)
#tf.gfile.MakeDirs(log_dir)