Beispiel #1
0
    def activation_forward(self,input,W,b,activation_type):
        '''
        :param input: the input of the current layer
        :param W: the weights of the current layer
        :param b: biases of the current layer
        :param activation_type: Type of activation function used in the forward propagation
        :return: - A --> the output of the activation function
                 - packet_of_packets --> Tuple of 2 elements which will be used in backward propagation :
                     1- linear packer : contains ( input , weights , biases ) of the current layer
                     2- activation packet : contains ( Z ) which is the input to the activation function
        '''
        if activation_type == "sigmoid":
            Z, linear_packet = self.identity_forward(input, W, b) ## Z = input * w + b
            temp=activations.Sigmoid()
            A, activation_packet = temp.forward(Z) ## A = sig(z)

        elif activation_type == "relu":
            Z, linear_packet = self.identity_forward(input, W, b)
            temp = activations.relu()
            A, activation_packet = temp.forward(Z)

        elif activation_type == "leaky_relu":
            Z, linear_packet = self.identity_forward(input, W, b)
            temp = activations.leaky_relu()
            A, activation_packet = temp.forward(Z)
        elif activation_type == "tanh":
            Z, linear_packet = self.identity_forward(input, W, b)
            temp = activations.tanh()
            A, activation_packet = temp.forward(Z)
        elif activation_type == "softmax":
            Z, linear_packet = self.identity_forward(input, W, b)
            #temp =
            A, activation_packet = activations.Softmax().forward(Z)
        elif activation_type == "linear":
            Z, linear_packet = self.identity_forward(input, W, b)
            # temp =
            A, activation_packet = Z,Z

        else:
            raise ValueError("ERROR : Activation Function is Not Determined")

        packet_of_packets = linear_packet, activation_packet
        return A, packet_of_packets
Beispiel #2
0
    def __init__(self, f=''):
        # Forward computations
        self.x = None  # layer input signal
        self.z = None  # weighted sum (forward)
        self.a = None  # activation (forward)
        self.f = None

        # Set activation
        if f == 'sigmoid':
            self.f = activations.Sigmoid()
        elif f == 'tanh':
            self.f = activations.Tanh()
        elif f == 'softmax':
            self.f = activations.Softmax()
        else:
            raise Exception('Currently need to specify activation.')

        # Back computations
        self.delta = None  # delta for this layer
    def activation_backward(self, delta_A, packet_of_packets, activation_type,
                            lambd):
        '''
        :param delta_A: the derivative of the loss function w.r.t the activation function
        :param packet_of_packets: Tuple of 2 elements which will be used in backward propagation :
                     1- linear packer : contains ( input , weights , biases ) of the current layer
                     2- activation packet : contains ( Z ) which is the input to the activation function
        :param activation_type: the type of the activation function used in this layer
        :param lambd: regularization parameter
        :return: - delta_input_previous , the gradient of the past input
                 - delta_w : gradient of the weights of the current layer
                 - delta_b : the gradient of the biases of the current layer
        '''
        linear_packet, act_packet = packet_of_packets

        if activation_type == "relu":
            #print("hi")
            temp = activations.relu()
            dZ = temp.backaward(
                delta_A,
                act_packet)  # we have to implement this relu backward function
            dA_prev, dW, db = self.identity_backward(dZ, linear_packet, lambd)
        elif activation_type == "sigmoid":
            #print("hi")
            temp = activations.Sigmoid()
            dZ = temp.backward(delta_A, act_packet)
            dA_prev, dW, db = self.identity_backward(dZ, linear_packet, lambd)
        # we will start from here tomorrow , we have to deal with Y_hat , y_true while creating instance from cost class

    # temp = Losses.square_difference()
    #dA = temp.backprop_cost(self.linear_packet)
        elif activation_type == "softmax":
            temp = activations.Softmax()
            dZ = temp.diff(delta_A)
            dA_prev, dW, db = self.identity_backward(dZ, linear_packet, lambd)

        return dA_prev, dW, db
 def __init__(self):
     self.activation = activations.Softmax()
     self.loss = CategoricalCrossentropy()
     self._d_inputs = None
    plt.plot(X_test, y_test)
    plt.plot(X_test, predictions)
    plt.show()


    ''' ''
    EPOCHS = 10001
    LEARNING_RATE = 0.05

    X_train, y_train = spiral_data(samples=100, classes=3)
    X_val, y_val = spiral_data(samples=100, classes=3)

    model = network.NeuralNetwork()

    model.add_layer(
        layers.Dense(2,
                     64,
                     weight_regularizer_l2=0.000005,
                     bias_regularizer_l2=0.000005))
    model.add_layer(activations.ReLU())
    model.add_layer(layers.Dropout(rate=0.2))
    model.add_layer(layers.Dense(64, 3))
    model.add_layer(activations.Softmax())

    model.set(loss=losses.CategoricalCrossentropy(),
              optimizier=optimizers.Adam(learning_rate=LEARNING_RATE),
              accuracy=metrics.CategoricalAccuracy())

    model.fit(X_train, y_train, epochs=EPOCHS, validation_data=(X_val, y_val))