Example #1
0
def activation_forward(A_prev, W, b, activation_way):
    """

    :param A_prev:
    :param W:
    :param b:
    :param activation_way: -- a text string indicate the way we activate this layer, "sigmoid","relu",...
    :return:
    A -- the activation output of this layer
    cache -- a dictionary contains "linear_cache' and "activation_cache"
    """
    cache = dict()
    if activation_way == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)
    elif activation_way == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif activation_way == "tanh":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = tanh(Z)
    elif activation_way == "softmax":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = softmax(Z)

    cache["linear_cache"] = linear_cache
    cache["activation_cache"] = activation_cache

    return A, cache
Example #2
0
 def feedforward_test(self, layer, in_put, label):
     """Return the output of the network if ``a`` is input and display the activations in the specified layer and the last output layer"""
     a_list = [in_put]
     index = 0
     for i in range(0, len(label)):
         if label[i] == 1:
             index = i
             break
     for layer_index, (b, w) in enumerate(zip(self.biases, self.weights)):
         activations = [0] * self.sizes[layer_index + 1]
         for m in range(0, self.sizes[layer_index + 1]):
             if layer_index == self.num_layers - 1:
                 activations[m] = activation_functions.relu(np.dot(w[m], a_list[-1]) - b[m])
             else:
                 activations[m] = activation_functions.modified_tanh(np.dot(w[m], a_list[-1]) - b[m], self.tanh_activations_coefficient)
         if layer_index == layer:
             pixels = np.array(a_list[layer])
             size = int(math.sqrt(self.sizes[layer_index]))
             pixels = pixels.reshape((size, size))
             plt.title('Label is {label}'.format(label=index))
             plt.imshow(pixels, cmap='gray')
             plt.show()
         a_list.append(activations)
     print("Predicted Output From Test is : {0}".format(a_list[-1]))
     return a_list[-1]
Example #3
0
def linear_activation_forward(A_prev, W, b, activation, output_size):
    #     """
    #     Implement the forward propagation for the LINEAR->ACTIVATION layer

    #     Arguments:
    #     A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
    #     W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
    #     b -- bias vector, numpy array of shape (size of the current layer, 1)
    #     activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
    #     output_size - bit width of output data

    #     Returns:
    #     A -- the output of the activation function, also called the post-activation value
    #     cache -- a python tuple containing "linear_cache" and "activation_cache";
    #              stored for computing the backward pass efficiently
    #     """

    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z, output_size)

    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z, output_size)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
Example #4
0
def mlp_fpass(data, w_1, b_1, w_2, b_2):
    """
  Initializes the MLP weights using Xavier initialization (uniform)
  and the biases with zero
  """
    z_1 = np.add(np.dot(data, w_1), b_1)
    a_1 = af.relu(z_1)
    z_2 = np.add(np.dot(a_1, w_2), b_2)
    a_2 = af.softmax(z_2)

    return z_1, a_1, z_2, a_2
Example #5
0
 def feedforward(self, a):
     """Return the output of the network if ``a`` is input."""
     a_list = [a]
     for layer_index, (b, w) in enumerate(zip(self.biases, self.weights)):
         activations = [0] * self.sizes[layer_index + 1]
         for m in range(0, self.sizes[layer_index + 1]):
             if layer_index == self.num_layers-1:
                 activations[m] = activation_functions.relu(np.dot(w[m], a_list[-1]) - b[m])
             else:
                 activations[m] = activation_functions.modified_tanh(np.dot(w[m], a_list[-1]) - b[m], self.tanh_activations_coefficient)
         a_list.append(activations)
     return a_list[-1]
Example #6
0
def linear_activation_forward(A_prev, W, b, activation):

    if activation == 'sigmoid':
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)

    elif activation == 'relu':
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert A.shape == (W.shape[0], A_prev.shape[1])
    cache = (linear_cache, activation_cache)

    return A, cache
Example #7
0
def linear_activation_forward(A_prev, W, b, activation):

    if activation == "sigmoid":
        # Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)

    elif activation == "relu":
        # Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
Example #8
0
    def __run_network(self,
                      registers: np.array,
                      debug: dict = None) -> np.array:
        def take_params(values, i):
            """Return the next pair of weights and biases after the
            starting index and the new starting index."""
            return values[i], values[i + 1], i + 2

        # Extract the 0th (i.e. P( x = 0 )) component from all registers.
        last_hidden_layer = np.array(registers[:, 0][None, ...],
                                     dtype=np.float32)

        # Propogate forward to hidden layers.
        idx = 0
        for i in range(self.context.num_hidden_layers):
            W, b, idx = take_params(self.context.network, idx)
            last_hidden_layer = relu(last_hidden_layer.dot(W) + b)

        controller_coefficients = []
        for i, gate in enumerate(self.context.gates):
            coeffs = []
            for j in range(gate.arity):
                W, b, idx = take_params(self.context.network, idx)
                coeff = softmax(last_hidden_layer.dot(W) + b)
                coeffs.append(coeff)
            controller_coefficients.append(coeffs)

        # Forward propogate to new register value coefficients.
        for i in range(self.context.num_regs):
            W, b, idx = take_params(self.context.network, idx)
            coeff = softmax(last_hidden_layer.dot(W) + b)
            controller_coefficients.append(coeff)

        # Forward propogate to generate willingness to complete.
        W, b, idx = take_params(self.context.network, idx)
        complete = sigmoid(last_hidden_layer.dot(W) + b)

        if debug is not None:
            debug.fi = np.around(complete.sum(), 3)

        return controller_coefficients, complete
Example #9
0
    def feedforward_with_learning(self, a, target_output, asynch=False):
        """Return the output of the network if ``a`` is input while modifying the weights as we go through each layer"""

        a_list = [a]
        for layer_index, (b, w) in enumerate(zip(self.biases, self.weights)):
            if layer_index + 1 == self.num_layers - 1:
                a_list.append(target_output)
            else:
                activations = [0] * self.sizes[layer_index + 1]
                for m in range(0, self.sizes[layer_index + 1]):
                    if layer_index == self.num_layers - 1:
                        activations[m] = activation_functions.relu(np.dot(w[m], a_list[-1]) - b[m])
                    else:
                        activations[m] = activation_functions.modified_tanh(np.dot(w[m], a_list[-1]) - b[m], self.tanh_activations_coefficient)
                a_list.append(activations)
            second_layer_size, first_layer_size = w.shape
            new_weights = w.copy()
            new_weight = 0.0
            for j in range(0, second_layer_size):
                if layer_index < self.start_learning_at_index:
                    continue
                for i in range(0, first_layer_size):
                    x = a_list[-2][i]
                    y = a_list[-1][j]
                    if x * y >= self.create_new_connection_threshold:
                        if w[j][i] != 0:
                            dw = self.eta_ltp * x * y
                            new_weight = activation_functions.relu_tanh(w[j][i] + dw, self.tanh_weights_coefficient)
                        else:
                            new_weight = 0.50
                    else:
                        dw = self.eta_ltd * x * y
                        new_weight = activation_functions.relu_tanh(w[j][i] - dw, self.tanh_weights_coefficient)
                        if new_weight < 0:
                            new_weight = 0
                    new_weights[j][i] = new_weight
            if asynch:
                self.weights[layer_index] = new_weights
Example #10
0
 def feed_forward(self, x):
     if self.activation_function == 'sigmoid':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.sigmoid(z)
         # y = output
         y = a
     elif self.activation_function == 'heavyside':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.heavyside(z)
         # y = output
         y = a
     elif self.activation_function == 'relu':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.relu(z)
         # y = output
         y = a
     elif self.activation_function == 'leaky_relu':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.leaky_relu(z)
         # y = output
         y = a
     elif self.activation_function == 'tanh':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.tanh(z)
         # y = output
         y = a
     return y, z
 def test_relu(self):
     result = af.relu(np.array([[10, -5.3], [-195, 13.6]]))
     expected_result = np.array([[10, 0], [0, 13.6]])
     difference = result - expected_result
     self.assertTrue(np.linalg.norm(difference) < 1e-4)
Example #12
0
 def relu_layer(self, i_layer):
     self.layero_size = layeri_size
     self.o_layer = relu(i_layer)
Example #13
0
if __name__ == "__main__":
    import sys
    sys.path.append('..')
    from autograd import Variable, Matrix, zeros
    from activation_functions import sigmoid, relu

    W0 = zeros(500, 2)
    W0.init_normal()

    b0 = zeros(500, 1)
    b0.init_normal()

    W1 = zeros(1, 500)
    W1.init_normal()

    b1 = zeros(1, 1)
    b1.init_normal()

    x = Matrix([[Variable(1)], [Variable(2)]])

    hidden = relu(W0 * x + b0)
    output = relu(W1 * hidden + b1)
    print(output)
    print(output.get_grad(W0))
    print(output.get_grad(b0))
    print(output.get_grad(W1))
    print(output.get_grad(b1))