Esempio n. 1
0
    def test_import_model():
        file_name = "cubic_model.plk"
        basic_NN = joblib.load(file_name)
        csv_file = r'C:\Users\josia\Desktop\Josiah_Folder\UNI\Semester_1\PEP1\robotics_club\YOLOv3_tiny\labelled_data2D_3.csv'
        import pandas as pd
        from random_utils import feature_norm, plot_decision_boundary

        plots = pd.read_csv(csv_file)
        plots = plots.to_numpy()

        X = plots[:, [0, 1]].T

        X, mu, sigma = feature_norm(X)

        Y = plots[:, -1].astype(np.uint8).reshape(1, -1)

        fig_num = 1

        AL = basic_NN.forward_prop(X)

        print(f'accuracy: {np.mean((AL >= 0.5) == Y) * 100}%')

        plot_decision_boundary(X, Y, basic_NN, fig_num)
        plt.show()
Esempio n. 2
0
    def test_forward_prop():
        file_name = 'cubic_model.plk'
        basic_NN = JTDNN()
        input = basic_NN.input(input_dims=(2, None))
        Z1 = layers.Linear(output_dims=(10, None),
                           initialiser="glorot",
                           name="linear")(input)  #10
        A1 = activations.Sigmoid(Z1, name='sigmoid')
        Z2 = layers.Linear(output_dims=(5, None),
                           initialiser="glorot",
                           name="linear")(A1)  # 5
        A2 = activations.Sigmoid(Z2, name='sigmoid')
        Z3 = layers.Linear(output_dims=(1, None),
                           initialiser="glorot",
                           name="linear")(A2)
        output = activations.Sigmoid(Z3, name='sigmoid')

        optimiser = optimisers.GradientDesc(learning_rate=0.01)

        fig_num_cost = 2

        loss = losses.BinaryCrossEntropy(basic_NN,
                                         store_cost=True,
                                         fig_num=fig_num_cost)

        basic_NN.compile(
            input=input,
            output=output,
            lambd=0.01,
            loss=loss,
            metrics="Accuracy",
            optimiser=optimiser)  # BGD stands for Batch Gradient Descent

        csv_file = r'C:\Users\josia\Desktop\Josiah_Folder\UNI\Semester_1\PEP1\robotics_club\YOLOv3_tiny\labelled_data2D_3.csv'

        plots = pd.read_csv(csv_file)
        plots = plots.to_numpy()

        X = plots[:, [0, 1]].T

        X, mu, sigma = feature_norm(X)

        Y = plots[:, -1].astype(np.uint8).reshape(1, -1)

        fig_num_dec = 1

        for itera in range(1000000):
            AL = basic_NN.forward_prop(X)
            if itera % 10000 == 0:
                loss = basic_NN.compute_cost(Y)
                print(loss)
                #print('accuracy after iteration %d: %4.2f' % itera, np.mean((AL >= 0.5) == Y) * 100)
            basic_NN.back_prop(Y)

            basic_NN.update_weights()
        basic_NN.plot_cost(title="Cost per Iteration",
                           xlabel="Number of number of iterations (10000s)",
                           ylabel="Cost")
        print(basic_NN.get_costs())
        plot_decision_boundary(X, Y, basic_NN, fig_num_dec)

        plt.show()
        #joblib.dump(basic_NN, file_name) # testing whether we can dump the object in a file
        #print(A1, A2, output, Z1, Z2, Z3) # prints out all the objects
        """ sequence generated from print statements
Esempio n. 3
0
    def test_mini_batches():

        file_name = 'cubic_model.plk'
        basic_NN = JTDNN()
        input = basic_NN.input(input_dims=(2, None))
        Z1 = layers.Linear(output_dims=(10, None),
                           initialiser="glorot",
                           name="linear")(input)  #10
        A1 = activations.Sigmoid(Z1, name='sigmoid')
        Z2 = layers.Linear(output_dims=(5, None),
                           initialiser="glorot",
                           name="linear")(A1)  # 5
        A2 = activations.Sigmoid(Z2, name='sigmoid')
        Z3 = layers.Linear(output_dims=(1, None),
                           initialiser="glorot",
                           name="linear")(A2)
        output = activations.Sigmoid(Z3, name='sigmoid')

        optimiser = optimisers.GradientDesc(learning_rate=0.001)

        fig_num_cost = 2

        loss = losses.BinaryCrossEntropy(basic_NN,
                                         store_cost=True,
                                         fig_num=fig_num_cost)

        basic_NN.compile(
            input=input,
            output=output,
            lambd=0.01,
            loss=loss,
            metrics="Accuracy",
            optimiser=optimiser
        )  # BGD stands for Batch Gradient Descent # BGD stands for Batch Gradient Descent

        csv_file = r'C:\Users\josia\Desktop\Josiah_Folder\UNI\Semester_1\PEP1\robotics_club\YOLOv3_tiny\labelled_data2D_3.csv'

        plots = pd.read_csv(csv_file)
        plots = plots.to_numpy()

        X = plots[:, [0, 1]].T

        X, mu, sigma = feature_norm(X)

        Y = plots[:, -1].astype(np.uint8).reshape(1, -1)

        fig_num_dec = 1
        mini_batch_size = 64
        num_epoches = 10
        """
        for epoch in range(num_epoches):
            for mini_batch in mini_batch_generator(X, Y, mini_batch_size):
                print ("shape of mini_batch_X: " + str(mini_batch[0].shape))
                print ("shape of mini_batch_Y: " + str(mini_batch[1].shape))
        """
        """
        shape of mini_batch_X: (2, 64)
        shape of mini_batch_Y: (1, 64)
        shape of mini_batch_X: (2, 64)
        shape of mini_batch_Y: (1, 64)
        shape of mini_batch_X: (2, 64)
        shape of mini_batch_Y: (1, 64)
        shape of mini_batch_X: (2, 7)
        shape of mini_batch_Y: (1, 7)
        """
        for epoch in range(num_epoches):
            mini_batch_num = 1
            for mini_batch_X, mini_batch_Y in mini_batch_generator(
                    X, Y, mini_batch_size):
                """
                #random experiment here
                if mini_batch_X.shape[-1] != mini_batch_size:
                    print(mini_batch_X.shape)
                    continue
                """
                AL = basic_NN.forward_prop(mini_batch_X)

                cost = basic_NN.compute_cost(mini_batch_Y)

                print(
                    'epoch %d accuracy after iteration %d: %4.2f' %
                    (epoch, mini_batch_num, np.mean(
                        (AL >= 0.5) == mini_batch_Y) * 100))
                basic_NN.back_prop(mini_batch_Y)
                basic_NN.update_weights()
                mini_batch_num += 1
        """
        for itera in range(1000000):
            AL = basic_NN.forward_prop(X)
            if itera % 10000 == 0:
                loss = basic_NN.compute_cost(Y)
                print(loss)
                #print('accuracy after iteration %d: %4.2f' % itera, np.mean((AL >= 0.5) == Y) * 100)
            basic_NN.back_prop(Y)
            
            basic_NN.update_weights()
        """
        basic_NN.plot_cost(title="Cost per Iteration",
                           xlabel="Number of iterations",
                           ylabel="Cost")

        plot_decision_boundary(X, Y, basic_NN, fig_num_dec)

        plt.show()
Esempio n. 4
0
    def load_and_test_mini_batches():
        file_name = "cubic_model.plk"

        basic_NN = joblib.load(file_name)

        optimiser = optimisers.GradientDesc(learning_rate=0.05)

        fig_num_cost = 2

        loss = losses.BinaryCrossEntropy(basic_NN,
                                         store_cost=True,
                                         fig_num=fig_num_cost)

        basic_NN.compile(
            lambd=0.01, loss=loss, metrics="Accuracy", optimiser=optimiser
        )  # BGD stands for Batch Gradient Descent # BGD stands for Batch Gradient Descent

        csv_file = r'C:\Users\josia\Desktop\Josiah_Folder\UNI\Semester_1\PEP1\robotics_club\YOLOv3_tiny\labelled_data2D_3.csv'

        plots = pd.read_csv(csv_file)
        plots = plots.to_numpy()

        X = plots[:, [0, 1]].T

        X, mu, sigma = feature_norm(X)

        Y = plots[:, -1].astype(np.uint8).reshape(1, -1)
        fig_num_dec = 1
        """
        mini_batch_size = 64
        num_epoches = 1000
        for epoch in range(num_epoches):
            mini_batch_num = 1
            for mini_batch_X, mini_batch_Y in mini_batch_generator(X, Y, mini_batch_size):
                AL = basic_NN.forward_prop(mini_batch_X)
                
                cost = basic_NN.compute_cost(mini_batch_Y)
                
                print('epoch %d accuracy after iteration %d: %4.2f' % (epoch, mini_batch_num, np.mean((AL >= 0.5) == mini_batch_Y) * 100))
                basic_NN.back_prop(mini_batch_Y)
                basic_NN.update_weights()
                mini_batch_num +=1
        """

        for itera in range(1000000):
            AL = basic_NN.forward_prop(X)
            if itera % 10000 == 0:
                loss = basic_NN.compute_cost(Y)
                print(loss)
                #print('accuracy after iteration %d: %4.2f' % itera, np.mean((AL >= 0.5) == Y) * 100)
            basic_NN.back_prop(Y)

            basic_NN.update_weights()

        basic_NN.plot_cost(title="Cost per Iteration",
                           xlabel="Number of iterations",
                           ylabel="Cost")

        plot_decision_boundary(X, Y, basic_NN, fig_num_dec)

        plt.show()