Ejemplo n.º 1
0
def plot_activations():
    for activation in common.Activation.available:
        x = np.linspace(-10.0, 10.0, 100)
        y = common.Activation(activation).function(x, deriv=False)
        dy = common.Activation(activation).function(x, deriv=True)
        viz_client.plot_func(x, y, title=activation)
        viz_client.plot_func(x, dy, title="d_" + activation)
Ejemplo n.º 2
0
    def test_tanh(self):
        activation = nnc.Activation("tanh")

        # Compare scalars
        self.assertEqual(activation.function(1), math.tanh(1))
        self.assertEqual(activation.function(0), math.tanh(0))

        # Compare vectors
        vect = np.array([1, 2, 3])
        true_values = np.array(
            [0.7615941559557649, 0.9640275800758169, 0.9950547536867305]
        )
        self.assertTrue(np.array_equal(activation.function(vect), true_values))

        # Compare matrices
        matrix = np.array([[0.2, 0.0000001, 3], [-0.5, 1000, -10]])
        true_values = np.array(
            [[0.19737532, 0.0000001, 0.99505475], [-0.46211716, 1.0, -1.0]]
        )
        self.assertTrue(
            np.array_equal(np.round(activation.function(matrix), 8), true_values)
        )

        # Test derivative and scalars
        self.assertEqual(activation._tanh(1, deriv=True), 1 - math.tanh(1) ** 2)
        self.assertEqual(activation._tanh(0, deriv=True), 1 - math.tanh(0) ** 2)

        # Test derivative and vectors
        true_values = np.array(
            [0.41997434161402614, 0.07065082485316443, 0.009866037165440211]
        )
        self.assertTrue(np.array_equal(activation._tanh(vect, deriv=True), true_values))

        # Test derivative and matrices
        true_values = np.array(
            [[0.96104298, 1.0, 0.00986604], [0.78644773, 0.0, 0.00000001]]
        )
        self.assertTrue(
            np.array_equal(
                np.round(activation._tanh(matrix, deriv=True), 8), true_values
            )
        )
Ejemplo n.º 3
0
    def __init__(self, num_features, batch_size, num_hidden=0, hidden_sizes=None,
                 activation="sigmoid", learning_rate=0.01,
                 learning_decay=None, weight_decay=None, dropout_rate=None,
                 init_weight_spread=1.0, random_seed=None):
        """
        Initialize a blank numpy net object
        This object will have input/output layers and weights (neurons and synapses)
        Both will be lists of numpy arrays having varying sizes
        Synapses are initialized with random weights with mean 0
        :param num_features: (int) Shape of the input layer
        :param batch_size: (int) Size of the batches you will be running through the net while training
        :param num_hidden: (int) Number of hidden layers
        :param hidden_sizes: (list[int]) The sizes you want your hidden layers to be
        :param activation: (str) or (list[str]) The activation function you want to use,
                           if given a list will specify an activation function for each layer explicitly
        :param learning_rate:
        :param learning_decay:
        :param weight_decay:
        :param dropout_rate:
        :param init_weight_spread:
        :param random_seed: (int) Can specify the random seed if you want to reproduce the same runs
        """
        self.default_layer_size = 16
        # Initialize arrays used for neurons and synapses
        self.batch_size = batch_size
        self.num_layers = 2 + num_hidden  # Input, output, and hidden layers
        self.num_hidden = num_hidden
        self.layer = [np.empty(0)] * self.num_layers
        self.weight = [np.empty(0)] * (self.num_layers - 1)

        # Set all of the activation functions, you need one for each layer of weights, or one
        # less than the number of layers. This can be a list of different functions or a string for all the same type
        if isinstance(activation, str):
            self.activation_function = [common.Activation(activation).function] * (self.num_layers - 1)
            self.activation_names = [activation] * self.num_layers
        elif isinstance(activation, list):
            if len(activation) == self.num_layers - 1:
                self.activation_function = list()
                for i in range(self.num_layers - 1):
                    print(i, activation[i])
                    self.activation_function.append(common.Activation(activation[i]).function)
                self.activation_names = activation
            else:
                msg = ("activation_function must be one less than the number of layers in your network "
                       "(num_layers-1 = " + str(self.num_layers - 1) + ")")
                log.out.error(msg)
                raise ValueError(msg)
        else:
            msg = "activation_function must be a string or a list of strings"
            log.out.error(msg)
            raise ValueError(msg)

        # Set network hyperparameters
        self.learning_rate = learning_rate
        self.learning_decay = learning_decay
        self.weight_decay = weight_decay
        self.dropout_rate = dropout_rate

        # For diagnostics
        self.loss_history = list()  # This will be appended to a lot so a list is a bit better
        self.predict_space = None  # If left undefined will define by training input bounds
        self.input_shape = [batch_size, num_features]
        if hidden_sizes is None:
            self.hidden_sizes = [self.default_layer_size] * self.num_hidden
        else:
            self.hidden_sizes = hidden_sizes
        self.output_shape = [batch_size, 1]

        # If requested seed random numbers to make calculation (makes repeatable)
        if random_seed is not None:
            np.random.seed(random_seed)
        else:
            current_seed = int(np.random.random(1) * 4.0E9)  # 4 billion is close to limit of 32 bit unsigned int
            np.random.seed(current_seed)
            log.out.info("No random seed selected, using: " + str(current_seed))

        # Initialize weights with random noise centered around zero, spread set by init_weight_spread
        self.weight[0] = (init_weight_spread * 2) * np.random.random([self.input_shape[1], self.hidden_sizes[0]]) - init_weight_spread
        for i in range(self.num_hidden-1):
            self.weight[i+1] = (init_weight_spread * 2) * np.random.random([self.weight[i].shape[1], self.hidden_sizes[i+1]]) - init_weight_spread
        self.weight[self.num_hidden] = (init_weight_spread * 2) * np.random.random([self.weight[self.num_hidden-1].shape[1], self.output_shape[1]]) - init_weight_spread

        # Initialize layers with zeros
        self.forward(np.zeros(self.input_shape))

        # Add this list of all the layer sizes for easy access later
        self.layer_sizes = list()
        for layer in self.layer:
            self.layer_sizes.append(layer.shape[1])