示例#1
0
class MnistNetMiniBatch:
    def __init__(self):
        self.d1_layer = Dense(784, 100)
        self.a1_layer = ReLu()
        self.drop1_layer = Dropout(0.5)

        self.d2_layer = Dense(100, 50)
        self.a2_layer = ReLu()
        self.drop2_layer = Dropout(0.25)

        self.d3_layer = Dense(50, 10)
        self.a3_layer = Softmax()

    def forward(self, x, train=True):
        net = self.d1_layer.forward(x)
        net = self.a1_layer.forward(net)
        net = self.drop1_layer.forward(net, train)

        net = self.d2_layer.forward(net)
        net = self.a2_layer.forward(net)
        net = self.drop2_layer.forward(net, train)

        net = self.d3_layer.forward(net)
        net = self.a3_layer.forward(net)

        return (net)

    def backward(self,
                 dz,
                 learning_rate=0.01,
                 mini_batch=True,
                 update=False,
                 len_mini_batch=None):

        dz = self.a3_layer.backward(dz)
        dz = self.d3_layer.backward(dz,
                                    learning_rate=learning_rate,
                                    mini_batch=mini_batch,
                                    update=update,
                                    len_mini_batch=len_mini_batch)

        dz = self.drop2_layer.backward(dz)
        dz = self.a2_layer.backward(dz)
        dz = self.d2_layer.backward(dz,
                                    learning_rate=learning_rate,
                                    mini_batch=mini_batch,
                                    update=update,
                                    len_mini_batch=len_mini_batch)

        dz = self.drop1_layer.backward(dz)
        dz = self.a1_layer.backward(dz)
        dz = self.d1_layer.backward(dz,
                                    learning_rate=learning_rate,
                                    mini_batch=mini_batch,
                                    update=update,
                                    len_mini_batch=len_mini_batch)

        return dz
示例#2
0
 def test_dense_layer_NUMERICAL_GRADIENT_CHECK(self):
     x = np.linspace(-1, 1, 10 * 32).reshape([10, 32])
     l = Dense(32, 64)
     numeric_grads = eval_numerical_gradient(lambda x: l.forward(x).sum(),
                                             x)
     grads = l.backward(x, np.ones([10, 64]), optim='gd', lr=0)
     self.assertTrue(np.allclose(grads, numeric_grads, rtol=1e-5, atol=0),
                     msg="input gradient does not match numeric grad")
示例#3
0
    def test_dense_layer_FORWARD(self):
        layer = Dense(3, 4)
        x = np.linspace(-1, 1, 2 * 3).reshape([2, 3])
        layer.weights = np.linspace(-1, 1, 3 * 4).reshape([3, 4])
        layer.biases = np.linspace(-1, 1, 4)

        self.assertTrue(
            np.allclose(
                layer.forward(x),
                np.array([[0.07272727, 0.41212121, 0.75151515, 1.09090909],
                          [-0.90909091, 0.08484848, 1.07878788, 2.07272727]])))
示例#4
0
    layer1 = Dense(trainingData.shape[2], 16)
    activation1 = Sigmoid()
    layer2 = Dense(16, 10)
    activation2 = SoftMax()
    cost = CostMeanSquared()

for epoch in range(EPOCHS):
    print('Epoch: ' + str(epoch + 1) + '/' + str(EPOCHS))
    print('')
    correct = 0
    for batch in range(total // BATCH_SIZE):

        ### SOCHASIC GRADIENT DESCENT ###

        layer1.forward(trainingData[batch])
        activation1.forward(layer1.outputs)
        layer2.forward(activation1.outputs)
        activation2.forward(layer2.outputs)
        cost.forward(activation2.outputs, labels[batch], 10)

        for sample in range(activation2.outputs.shape[1]):
            if np.argmax(activation2.outputs[:, sample]) == np.argmax(
                    labels[batch, sample]):
                correct += 1

        cost.backward(activation2.outputs, labels[batch], 10)
        activation2.backward(layer2.outputs, layer2.weights.shape[0],
                             BATCH_SIZE)
        layer2.backward(activation1.outputs)
        activation1.backward(layer1.outputs)
示例#5
0
    W2 = np.array([[0.4, 0.45], [0.50, 0.55]])
    b2 = 0.60

    y_true = np.array([[0.01, 0.99]])

    dense = Dense(2, W1, b1)
    sigmoid = Activations('Sigmoid')
    swish1 = Activations('Swish')
    dense2 = Dense(2, W2, b2)
    swish2 = Activations('Swish')
    activation2 = Activations('Sigmoid')

    loss_fn = MSE()

    z1 = dense.forward(x)
    sig1 = sigmoid.forward(z1)
    z2 = dense2.forward(sig1)
    y_pred = activation2.forward(z2)

    sw1 = swish1.forward(z1)
    sw2 = dense2.forward(sw1)
    y_pre = swish2.forward(sw2)

    # loss = loss_fn.loss(y_true, y_pred)
    # print("loss: ", loss)
    # print("loss's mean: ",np.mean(loss))

    # sigloss = loss_fn.loss(y_true, y_pre)
    # print("SIG loss: ", sigloss)
    # print("sig loss's mean: ",np.mean(sigloss))
示例#6
0
 def compute_out_given_wb(w, b):
     layer = Dense(32, 64)
     layer.weights = np.array(w)
     layer.biases = np.array(b)
     x = np.linspace(-1, 1, 10 * 32).reshape([10, 32])
     return layer.forward(x)
    y_true = np.array([[.01, .99]])

    #Layers Generation
    dense = Dense(2, W1, b1)
    dense2 = Dense(2, W2, b2)

    activation1 = Sigmoid()
    # activation2=Sigmoid()
    activation2 = Activation("sigmoid")

    loss_func = MSE()

    #Forward Pass
    # Dense -> Activation -> Dense -> Activation -> y_pred

    z1 = dense.forward(x)
    a1 = activation1.forward(z1)
    print("Activation Value:", a1)

    z2 = dense2.forward(a1)
    a2 = activation2.forward(z2)
    y_pred = a2

    loss = loss_func.loss(y_true, y_pred)

    print("Individual Loss:", loss)
    total_loss = np.mean(loss)
    print("Total Loss:", total_loss)

    #Backward Propagation
    dLdy_pred = loss_func.gradient(y_true, y_pred)