Пример #1
0
def Franke_plot(X,
                X_train,
                X_test,
                z_train,
                eta=0,
                lmbd=0,
                batch_size=0,
                n_hidden_neurons=0,
                epochs=0):

    eta = 4.33148322e-03
    lmbd = 3.75469422e-11
    batch_size = 2
    n_hidden_neurons = 422
    epochs = 197
    # For these parameters above we got: MSE = 0.004922969949345497 R2-score =0.9397964833194705

    n_categories = 1

    dnn = NN(X_train,
             z_train,
             eta=eta,
             lmbd=lmbd,
             epochs=int(epochs),
             batch_size=batch_size,
             n_hidden_neurons=n_hidden_neurons,
             n_categories=n_categories,
             cost_grad='MSE',
             activation='sigmoid',
             activation_out='ELU')
    dnn.train_and_validate()
    z_pred = dnn.predict_probabilities(X)
    print(mean_squared_error(z, z_pred))
    print(r2_score(z, z_pred))
    xsize = x.shape[0]
    ysize = y.shape[0]

    rows = np.arange(ysize)
    cols = np.arange(xsize)

    [X1, Y1] = np.meshgrid(cols, rows)

    z_mesh = np.reshape(z, (ysize, xsize))
    z_predict_mesh = np.reshape(z_pred, (ysize, xsize))

    fig, axs = plt.subplots(1, 2, subplot_kw={'projection': '3d'})

    ax = fig.axes[0]
    ax.plot_surface(X1, Y1, z_predict_mesh, cmap=cm.viridis, linewidth=0)
    ax.set_title('Fitted Franke')

    ax = fig.axes[1]
    ax.plot_surface(X1, Y1, z_mesh, cmap=cm.viridis, linewidth=0)
    ax.set_title('Real Franke')

    plt.xlabel('X')
    plt.ylabel('Y')

    plt.show()
Пример #2
0
def Franke_plot(X, X_train, X_test):
    eta = 4.33148322e-03
    lmbd = 3.75469422e-11
    batch_size = 2
    n_hidden_neurons = 422
    epochs = 197

    n_categories = 1

    dnn = NN(X_train,
             z_train,
             eta=eta,
             lmbd=lmbd,
             epochs=epochs + 1,
             batch_size=batch_size,
             n_hidden_neurons=n_hidden_neurons,
             n_categories=n_categories,
             cost_grad='MSE',
             activation='sigmoid',
             activation_out='ELU')
    dnn.train(X)
    z_pred = dnn.y_predict_epoch[epochs]

    print(mean_squared_error(z, z_pred))
    print(r2_score(z, z_pred))
    xsize = x.shape[0]
    ysize = y.shape[0]

    rows = np.arange(ysize)
    cols = np.arange(xsize)

    [X, Y] = np.meshgrid(cols, rows)

    z_mesh = np.reshape(z, (ysize, xsize))
    z_predict_mesh = np.reshape(z_pred, (ysize, xsize))

    fig, axs = plt.subplots(1, 2, subplot_kw={'projection': '3d'})
    # plt.figure()

    ax = fig.axes[0]
    ax.plot_surface(X, Y, z_predict_mesh, cmap=cm.viridis, linewidth=0)
    ax.set_title('Fitted terrain cut')

    ax = fig.axes[1]
    ax.plot_surface(X, Y, z_mesh, cmap=cm.viridis, linewidth=0)
    ax.set_title('Terrain cut')

    plt.xlabel('X')
    plt.ylabel('Y')

    plt.show()
Пример #3
0
    def __init__(self, window=False):

        self.length = 0
        self.history = []
        self.lastMove = None
        self.brain = NN([24, 20, 5, 4])
        self.isDead = False
        self.speed = 20
        self.rows = 50
        self.cols = 50
        self.width = self.rows * self.speed
        self.height = self.rows * self.speed
        self.pos = Vector(self.speed * self.cols / 2,
                          self.speed * self.rows / 2)
        self.food = Vector(500, random.randrange(0, self.height, self.speed))
        self.window = False

        if window:
            self.window = True
            self.display_screen = pygame.display.set_mode(
                (self.width, self.height))
            pygame.display.set_caption("Snake game")
            self.clock = pygame.time.Clock()
Пример #4
0
def Franke_plot_overfitting(X_train,
                            X_test,
                            eta=0,
                            lmbd=0,
                            batch_size=0,
                            n_hidden_neurons=0,
                            epochs=0):

    eta = 3.16227766e-01
    lmbd = 2.68269580e-08
    batch_size = 1
    n_hidden_neurons = 57
    epochs = 1000
    n_categories = 1

    np.random.seed(seed)
    dnn = NN(X_train,
             z_train,
             eta=eta,
             lmbd=lmbd,
             epochs=int(epochs),
             batch_size=batch_size,
             n_hidden_neurons=n_hidden_neurons,
             n_categories=n_categories,
             cost_grad='MSE',
             activation='sigmoid',
             activation_out='ELU')
    dnn.train_and_validate(X_val, z_val, MSE_store=True, validate=False)
    MSE_val, MSE_train = dnn.MSE_epoch()
    epo = np.arange(len(MSE_train))
    plt.plot(epo, MSE_val, label='MSE val')
    plt.plot(epo, MSE_train, label='MSE train')
    plt.xlabel("Number of epochs")
    plt.ylabel("MSE")
    plt.title("MSE vs epochs")
    plt.legend()
    plt.show()
Пример #5
0
def NN():
    epochs = 20
    batch_size = 10
    eta = 15
    lmbd = 0.01
    n_hidden_neurons = 30
    n_categories = 2

    DNN = NN(X_train_scaled,
             y_train,
             eta=eta,
             lmbd=lmbd,
             epochs=epochs,
             batch_size=batch_size,
             n_hidden_neurons=n_hidden_neurons,
             n_categories=n_categories,
             cost_grad='crossentropy',
             activation='sigmoid',
             activation_out='ELU')
    DNN.train()
    test_predict = DNN.predict(X_test_scaled)
    test_predict1 = DNN.predict_probabilities(X_test_scaled)[:, 1:2]
    #
    # accuracy score from scikit library
    #print("Accuracy score on test set: ", accuracy_score(y_test, test_predict))
    #
    # def accuracy_score_numpy(Y_test, Y_pred):
    #     return np.sum(Y_test == Y_pred) / len(Y_test)

    false_pos, true_pos = roc_curve(y_test, test_predict1)[0:2]
    print("Area under curve ST: ", auc(false_pos, true_pos))
    plt.plot([0, 1], [0, 1], "k--")
    plt.plot(false_pos, true_pos)
    plt.xlabel("False Positive rate")
    plt.ylabel("True Positive rate")
    plt.title("ROC curve gradient descent")
    plt.show()
Пример #6
0
from NeuralNetwork import NeuralNetwork as NN
import numpy

nn = NN(3, 3, 3, 0.3)
Пример #7
0
negative_ys = negative_ys[:-nres]

# select train dataset and test dataset with ratio 8:2
train_xs = np.vstack((positive_xs[:-2 * pslide], negative_xs[:-2 * nslide]))
train_ys = np.concatenate(
    (positive_ys[:-2 * pslide], negative_ys[:-2 * nslide]))
test_xs = np.vstack((positive_xs[-2 * pslide:], negative_xs[-2 * nslide:]))
test_ys = np.concatenate(
    (positive_ys[-2 * pslide:], negative_ys[-2 * nslide:]))

epochs = 100

# constant lr
print("### Constant Learning Rate ###")
nn = NN([xs.shape[1], 64, 64, len(set(ys))], ["relu", "relu", "softmax"],
        lr_init=0.01,
        regularization="L2",
        regularization_lambda=0.1)
lr_const_BP_loss = []
for epoch in tqdm(range(epochs)):
    nn.train(train_xs, train_ys)
    lr_const_BP_loss.append(nn.loss)
lr_const_BP_acc = np.mean(np.argmax(nn.forward(test_xs), axis=-1) == test_ys)

# exponential decay lr
print("\n### Exponential Decay Learning Rate ###")
nn = NN([xs.shape[1], 64, 64, len(set(ys))], ["relu", "relu", "softmax"],
        lr_init=0.01,
        lr_decay=0.99,
        lr_min=0.0001,
        regularization="L2",
        regularization_lambda=0.1)
Пример #8
0
    np.save("chr1_IH1", sortedPop1[0].IHWeights)
    np.save("chr1_HO1", sortedPop1[0].HOWeights)
    np.save("chr1_IH2", sortedPop1[1].IHWeights)
    np.save("chr1_HO2", sortedPop1[1].HOWeights)

    np.save("chr2_IH1", sortedPop2[0].IHWeights)
    np.save("chr2_HO1", sortedPop2[0].HOWeights)
    np.save("chr2_IH2", sortedPop2[1].IHWeights)
    np.save("chr2_HO2", sortedPop2[1].HOWeights)


while True:
    curChromosone1 = pop1.chromosones[pop1.curChromosone]
    curChromosone2 = pop2.chromosones[pop2.curChromosone]
    NN1 = NN()
    NN2 = NN()

    game = CT()
    game_over = False
    score1 = 0
    score2 = 0

    while not game_over:
        if (iterationCounter % 20 == 0):
            print(score1)
            print(score2)
            game.print_board(game.board)

        if (iterationCounter % 1000 == 0):
            saveState(pop1, pop2)
Пример #9
0
# Lets varify that our targets are correct
# for type in range(3):
# 	for i in range(dataset[type][1].shape[0]):
# 		print(dataset[type][0][i].astype(np.uint8))
# 		misc.imsave(
# 			'images/{0}-{1}-{2}.png'.format(
# 				type,i,np.argmax(dataset[type][1][i])
# 				),
# 			dataset[type][0][i].astype(np.uint8)
# 		)

nn = NN(
	# For conv nets we take
	# (None, 1, height, width)
	batch_size=batch_size,
	input_shape=(32, 32),
	# We want to classify 62 characters
	n_out=10
)

# Add our convolution
# Which takes the parameters
# n_kerns, height, and width
# nn.add(
# 	'Convolution',
# 	n_kerns=115,
# 	height=12,
# 	width=12
# )

# # Now we want to add pooling
Пример #10
0
    arrU, arrV = Fun.readFile(args.fileName, args.inNeurones, args.outNeurones)
else:
    arrU, arrV = Fun.readFile(args.fileName, args.inNeurones, 1)
    Fun.translate(args.translate, arrV)

extraLabel = ''

if '' != args.querryFile:
    qrrU, qrrV = Fun.readFile(args.querryFile, args.inNeurones,
                              args.outNeurones)
    if args.extraQuerry is True:
        args.hidNeurones += args.hidNeurones
        extraLabel = ', dane testowe'

for n in range(len(args.hidNeurones)):
    network = (NN(args.inNeurones, args.hidNeurones[n], args.outNeurones,
                  args.alpha, args.beta, args.bias, args.outputLinear))

    if ('' != args.querryFile) and (args.extraQuerry is True):
        if n == (len(args.hidNeurones) / 2):
            extraLabel = ', dane treningowe'
            args.querryFile = ''

    it = 0
    errX = []
    errY = []
    condition = True

    while condition is True:
        it += 1

        for k in range(len(arrU)):
Пример #11
0
from NeuralNetwork import NeuralNetwork as NN
import random

nn = NN(2,[2,3,5],2)

data = [{'target':[0.8,0.4], 'input':[0.8,0.1]},
        {'target':[0.8,0.4], 'input':[0.1,0.8]},
        {'target':[0.4,0.8], 'input':[0.8,0.8]},
        {'target':[0.4,0.8], 'input':[0.1,0.1]} ]

for _ in range(10000):
  random.shuffle(data)
  for dat in data:
    nn.train(dat['input'],dat['target'])

answerkey = [True,False]
print('True XOR True = {}'.format(answerkey[nn.predict([0.8,0.8]).argmax()]))
print('True XOR False = {}'.format(answerkey[nn.predict([0.8,0.1]).argmax()]))
print('False XOR True = {}'.format(answerkey[nn.predict([0.1,0.8]).argmax()]))
print('False XOR False = {}'.format(answerkey[nn.predict([0.1,0.1]).argmax()]))
Пример #12
0
def Plots(epochs, AUC_time_plot = 0, ROC_plot = 0, Lift_plot_test_NN = 0, Lift_plot_train_NN = 0, GD_plot = 0, MB_GD_plot = 0, Stoch_GD_plot = 0,
          Newton_plot = 0, Scatter_GD_plot = 0):

    if (ROC_plot == 1 or AUC_time_plot == 1):
        GRAD_start_time = time.time()
        np.random.seed(seed)
        beta_init = np.random.randn(X_train.shape[1],1)
        w = Weight(X_train,y_train,beta_init,6.892612104349695e-05, epochs)
        final_betas_grad,cost = w.train(w.gradient_descent)
        prob_grad, y_pred_grad = classification(X_test, final_betas_grad, y_test)[0:2]
        false_pos_grad, true_pos_grad = roc_curve(y_test, prob_grad)[0:2]
        AUC_GRAD = auc(false_pos_grad, true_pos_grad)
        print("Area under curve gradient: ", AUC_GRAD)
        GRAD_time = time.time() - GRAD_start_time

        SGD_start_time = time.time()
        np.random.seed(seed)
        beta_init = np.random.randn(X_train.shape[1], 1)
        w2 = Weight(X_train, y_train, beta_init, 0.0007924828983539169, epochs)
        final_betas_ST, _ = w2.train(w2.stochastic_gradient_descent)
        prob_ST, y_pred_ST = classification(X_test, final_betas_ST, y_test)[0:2]  ### HERE
        false_pos_ST, true_pos_ST = roc_curve(y_test, prob_ST)[0:2]
        AUC_SGD = auc(false_pos_ST, true_pos_ST)
        print("Area under curve ST: ", AUC_SGD)
        SGD_time = time.time() - SGD_start_time

        """np.random.seed(seed)
        beta_init = np.random.randn(X_train.shape[1],1)
        w3 = Weight(X_train,y_train,beta_init,0.001, 20)
        final_betas_Newton,_ = w3.train(w3.newtons_method)
        prob_Newton, y_pred_Newton = classification(X_train,final_betas_Newton, y_test)[0:2]
        false_pos_Newton, true_pos_Newton = roc_curve(y_test, prob_Newton)[0:2]
        print("Area under curve Newton: ", auc(false_pos_Newton, true_pos_Newton))"""

        AUC_MB5 = 0
        MB5_time = 0
        AUC_MB1000 = 0
        MB1000_time = 0
        AUC_MB6000 = 0
        MB6000_time = 0
        AUC_MB = 0
        false_pos_MB = 0
        true_pos_MB = 0
        if(AUC_time_plot != 0):
            AUC_MB5, MB5_time, _, _ = mini_batch_SGD(0.0038625017292608175, 5, epochs)
            AUC_MB1000, MB1000_time, _, _ = mini_batch_SGD(0.0009501185073181439, 1000, epochs)
            AUC_MB6000, MB6000_time, _ ,_ = mini_batch_SGD(0.0001999908383831537, 6000, epochs)
            return AUC_SGD, AUC_GRAD, AUC_MB5, AUC_MB1000, AUC_MB6000, SGD_time, GRAD_time, MB5_time, MB1000_time, MB6000_time
        else:
            AUC_MB, _,false_pos_MB, true_pos_MB  = mini_batch_SGD(0.0038625017292608175, 32, epochs)

        np.random.seed(seed)
        beta_init = np.random.randn(X_train.shape[1], 1)
        w4 = Weight(X_train, y_train, beta_init, 0.0007924828983539169, epochs)
        final_betas_ST_Skl,_ = w.train(w4.stochastic_gradient_descent_Skl)
        prob_ST_Skl, y_pred_ST_Skl = classification(X_test,final_betas_ST_Skl[0], y_test)[0:2]
        false_pos_ST_Skl, true_pos_ST_Skl = roc_curve(y_test, prob_ST_Skl)[0:2]
        print("Area under curve ST_skl: ", auc(false_pos_ST_Skl, true_pos_ST_Skl))

        epochs = 20
        batch_size = 25
        eta = 0.1
        lmbd = 0.01
        n_hidden_neurons = 41
        ####################
        # epochs = 20
        # batch_size = 26
        # eta = 3.14230708e+00
        # lmbd = 1.25472709e-02
        # n_hidden_neurons = 66

        np.random.seed(seed)
        n_categories = 1

        dnn = NN(X_train, y_train, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size,
                    n_hidden_neurons=n_hidden_neurons, n_categories=n_categories,
                    cost_grad = 'crossentropy', activation = 'sigmoid', activation_out='sigmoid')
        dnn.train_and_validate()

        y_predict = dnn.predict_probabilities(X_test)

        false_pos_NN, true_pos_NN = roc_curve(y_test, y_predict)[0:2]
        print("AUC score NN: ", auc(false_pos_NN, true_pos_NN))

        plt.plot([0, 1], [0, 1], "k--")
        plt.plot(false_pos_grad, true_pos_grad,label="Gradient")
        plt.plot(false_pos_ST, true_pos_ST, label="Stoch")
        plt.plot(false_pos_ST_Skl, true_pos_ST_Skl, label="Stoch_Skl")
        plt.plot(false_pos_MB, true_pos_MB, label="Mini")
        # plt.plot(false_pos_Newton, true_pos_Newton, label="Newton")
        plt.plot(false_pos_NN, true_pos_NN, label='NeuralNetwork')
        plt.legend()
        plt.xlabel("False Positive rate")
        plt.ylabel("True Positive rate")
        plt.title("ROC curve")
        plt.show()

    """Creates cumulative gain charts/lift plots for Neural network. The two optimal parameters sets from tuning are listed below"""
    if (Lift_plot_test_NN == 1):

        np.random.seed(seed)

        # epochs = 20
        # batch_size = 26
        # eta = 3.14230708e+00
        # lmbd = 1.25472709e-02
        # n_hidden_neurons = 66
        epochs = 20
        batch_size = 25
        eta = 0.1
        lmbd = 0.01
        n_hidden_neurons = 41

        n_categories = 1

        dnn = NN(X_train, y_train, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size,
                 n_hidden_neurons=n_hidden_neurons, n_categories=n_categories,
                 cost_grad='crossentropy', activation='sigmoid', activation_out='sigmoid')
        dnn.train_and_validate()

        y_predict_proba = dnn.predict_probabilities(X_test)
        y_predict_proba_tuple = np.concatenate((1 - y_predict_proba, y_predict_proba), axis=1)

        pos_true = y_test.sum()
        pos_true_perc = pos_true / len(y_test)

        x = np.linspace(0, 1, len(y_test))
        m = 1 / pos_true_perc

        best_line = np.zeros((len(x)))
        for i in range(len(x)):
            best_line[i] = m * x[i]
            if (x[i] > pos_true_perc):
                best_line[i] = 1

        x_, y_ = skplt.helpers.cumulative_gain_curve(y_test, y_predict_proba_tuple[:, 1])

        Score = (np.trapz(y_, x=x_) - 0.5) / (np.trapz(best_line, dx=(1 / len(y_predict_proba))) - 0.5)
        print('Area ratio score(test)', Score)  # The score  Area ratio = 0.49129354889528054 Neural Network test against predicted
        perc = np.linspace(0, 100, len(y_test))
        plt.plot(x_*100, y_*100)
        plt.plot(perc, best_line*100)
        plt.plot(perc, perc, "k--")

        plt.xlabel("Percentage of clients")
        plt.ylabel("Cumulative % of defaults")
        plt.title("Cumulative Gain Chart for Test Data")
        plt.show()

        """Let's you insert a threshold and classify"""
        _, y_predict, y_predict_tot = classification(y_prob_input=y_predict_proba, threshold=0.5)
        pos = y_predict.sum()
        neg = len(y_predict) - pos
        pos_perc = (pos / len(y_predict))
        neg_perc = (neg / len(y_predict))
        print("default: ", pos_perc)
        print("Non-default: ", neg_perc)

    if (Lift_plot_train_NN == 1):

        np.random.seed(seed)

        # epochs = 20
        # batch_size = 26
        # eta = 3.14230708e+00
        # lmbd = 1.25472709e-02
        # n_hidden_neurons = 66
        epochs = 20
        batch_size = 25
        eta = 0.1
        lmbd = 0.01
        n_hidden_neurons = 41
        n_categories = 1

        dnn = NN(X_train, y_train, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size,
                 n_hidden_neurons=n_hidden_neurons, n_categories=n_categories,
                 cost_grad='crossentropy', activation='sigmoid', activation_out='sigmoid')
        dnn.train_and_validate()

        y_predict_proba = dnn.predict_probabilities(X_train)
        y_predict_proba_tuple = np.concatenate((1 - y_predict_proba, y_predict_proba), axis=1)

        pos_true = y_train.sum()
        pos_true_perc = pos_true / len(y_train)

        x = np.linspace(0, 1, len(y_train))
        m = 1 / pos_true_perc

        best_line = np.zeros((len(x)))
        for i in range(len(x)):
            best_line[i] = m * x[i]
            if (x[i] > pos_true_perc):
                best_line[i] = 1

        x_, y_ = skplt.helpers.cumulative_gain_curve(y_train, y_predict_proba_tuple[:, 1])

        Score = (np.trapz(y_, x=x_) - 0.5) / (np.trapz(best_line, dx=(1 / len(y_predict_proba))) - 0.5)
        print('Area ratio score(train)', Score)
        perc = np.linspace(0, 100, len(y_train))
        plt.plot(x_ * 100, y_ * 100)
        plt.plot(perc, best_line * 100)
        plt.plot(perc, perc, "k--")

        plt.xlabel("Percentage of clients")
        plt.ylabel("Cumulative % of defaults")
        plt.title("Cumulative Gain Chart for Train Data")
        plt.show()

        """Let's you insert a threshold and classify"""
        _, y_predict, y_predict_tot = classification(y_prob_input=y_predict_proba, threshold=0.5)
        pos = y_predict.sum()
        neg = len(y_predict) - pos
        pos_perc = (pos / len(y_predict))
        neg_perc = (neg / len(y_predict))
        print("default: ", pos_perc)
        print("Non-default: ", neg_perc)

    beta_init = np.random.randn(X_train.shape[1], 1)
    w = Weight(X_train, y_train, beta_init, 0.0007924828983539169, epochs)

    if (GD_plot == 1):
        _, cost_all = w.train(w.gradient_descent)
        epoch = np.arange(len(cost_all))

        plt.plot(epoch, cost_all)
        plt.show()

    if (MB_GD_plot == 1):
        _, cost_all = w.train(w.mini_batch_gradient_descent)
        batch = np.arange(len(cost_all))

        plt.plot(batch, cost_all)
        plt.show()

    if (Stoch_GD_plot == 1):
        _, cost_all = w.train(w.stochastic_gradient_descent)
        batch = np.arange(len(cost_all))

        plt.plot(batch, cost_all)
        plt.show()

    if (Newton_plot == 1):
        _, cost_all = w.train(w.newtons_method)
        epochs = np.arange(len(cost_all))

        plt.plot(epochs, cost_all)
        plt.show()

    if (Scatter_GD_plot == 1):
        final_betas, _ = w.train(w.gradient_descent)
        prob_train = classification(X_train, final_betas)[0]
        x_sigmoid = np.dot(X_train, final_betas)
        plt.scatter(x_sigmoid, prob_train)
        plt.show()
def corr(ans, res):
    num_corr = np.zeros(ans.shape[0])
    for i in range(ans.shape[0]):
        if (np.array_equal(ans[i], res[i])):
            num_corr[i] = 1
    return num_corr

vout = np.vectorize(convert_output)

x = np.zeros((10, 784))

for i in range(10):
    img = cv2.imread(os.path.join('custom_imgs', str(i + 1) + '.resized.JPG'), 0)
    img = cv2.bitwise_not(img)

    if(img.shape[0] < 28):
        img = np.append(img, np.zeros(((28 - img.shape[0]), 28)), axis = 0)
    elif(img.shape[1] < 28):
        img = np.append(img, np.zeros((28, (28 - img.shape[1]))), axis = 1)

    x[i] += np.reshape(img, (784,)) / 255

nn = NN(layers = [784, 800, 10], activations = ['sigmoid', 'softmax'])
nn.load()

res = vout(nn.fprop(x))

print("Predictions: ")
print(res)
Пример #14
0
def Franke_plot_fit_3D(X,
                       z,
                       X_train,
                       X_test,
                       z_train,
                       z_test,
                       indicies,
                       eta=0,
                       lmbd=0,
                       batch_size=0,
                       n_hidden_neurons=0,
                       epochs=0):

    eta = 3.16227766e-01
    lmbd = 2.68269580e-08
    batch_size = 1
    n_hidden_neurons = 57
    epochs = 91
    n_categories = 1

    # With the parameters above we got these values for MSE and R2 for 10 000 points and no noise:
    #MSE z_test_predict:  0.0012524282064846637
    #R2 z_test_predict:  0.9851769055209932
    #MSE z_train_predict  0.0012329368999059254
    #R2 z_train_predict 0.9848613850303888

    # With parameters above we got these values for MSE and R2 for 10 000 poinst with noise 0.1:
    #MSE z_test_predict:  0.027152301040701644
    #R2 z_test_predict:  0.71125231579683
    #MSE z_train_predict  0.027160342432850662
    #R2 z_train_predict 0.7113015602592969

    np.random.seed(seed)
    dnn = NN(X_train,
             z_train,
             eta=eta,
             lmbd=lmbd,
             epochs=int(epochs),
             batch_size=batch_size,
             n_hidden_neurons=n_hidden_neurons,
             n_categories=n_categories,
             cost_grad='MSE',
             activation='sigmoid',
             activation_out='ELU')
    dnn.train_and_validate()

    z_pred_test_unscaled = dnn.predict_probabilities(X_test)
    z_pred_train_unscaled = dnn.predict_probabilities(X_train)
    print("MSE z_test_predict: ",
          mean_squared_error(z_test, z_pred_test_unscaled))
    print("R2 z_test_predict: ", r2_score(z_test, z_pred_test_unscaled))
    print("MSE z_train_predict ",
          mean_squared_error(z_train, z_pred_train_unscaled))
    print("R2 z_train_predict", r2_score(z_train, z_pred_train_unscaled))

    X_train_, X_test_ = backshuffle(X, z, X_train, X_test, z_train, z_test,
                                    indicies)

    z_pred_test = dnn.predict_probabilities(X_test_)

    ysize_test = int(np.sqrt(z_pred_test.shape[0]))
    xsize_test = int(np.sqrt(z_pred_test.shape[0]))
    rows_test = np.linspace(0, 1, ysize_test)
    cols_test = np.linspace(0, 1, xsize_test)
    z_predict_mesh_test = np.reshape(z_pred_test, (ysize_test, xsize_test))
    [X1, Y1] = np.meshgrid(cols_test, rows_test)

    ####################

    z_pred_train = dnn.predict_probabilities(X_train_)

    ysize_train = int(np.sqrt(z_pred_train.shape[0]))
    xsize_train = int(np.sqrt(z_pred_train.shape[0]))
    rows_train = np.linspace(0, 1, ysize_train)
    cols_train = np.linspace(0, 1, xsize_train)
    z_predict_mesh_train = np.reshape(z_pred_train, (ysize_train, xsize_train))
    [X2, Y2] = np.meshgrid(cols_train, rows_train)

    ####################

    ysize = int(np.sqrt(z.shape[0]))
    xsize = int(np.sqrt(z.shape[0]))
    rows = np.linspace(0, 1, ysize)
    cols = np.linspace(0, 1, xsize)
    z_mesh = np.reshape(z, (ysize, xsize))
    [X3, Y3] = np.meshgrid(cols, rows)

    fig, axs = plt.subplots(1, 3, subplot_kw={'projection': '3d'})

    ax = fig.axes[0]
    ax.scatter3D(X1,
                 Y1,
                 z_predict_mesh_test,
                 cmap=cm.viridis,
                 linewidth=0,
                 s=1.3)
    ax.set_title('Fitted test')

    ax = fig.axes[1]
    ax.scatter3D(X2,
                 Y2,
                 z_predict_mesh_train,
                 cmap=cm.viridis,
                 linewidth=0,
                 s=1.3)
    ax.set_title('Fitted train')

    ax = fig.axes[2]
    ax.plot_surface(X3, Y3, z_mesh, cmap=cm.viridis, linewidth=0)
    ax.set_title('Real Franke')

    plt.xlabel('X')
    plt.ylabel('Y')

    plt.show()
Пример #15
0
from NeuralNetwork import NeuralNetwork as NN
import json
import threading
import random
import MNIST
import numpy as np

normalize = np.vectorize(lambda x: np.float(x/255.0))

nn = NN(28*28,[32,16],10)
nn.learingRate = .1
answerkey = [0,1,2,3,4,5,6,7,8,9]
train_images =  MNIST.get_images('mnist/train-images-idx3-ubyte')
test_images  =  MNIST.get_images('mnist/t10k-images-idx3-ubyte')
train_labels =  MNIST.get_labels('mnist/train-labels-idx1-ubyte')
test_labels  =  MNIST.get_labels('mnist/t10k-labels-idx1-ubyte')

if( train_images is None or test_images is None or train_labels is None or test_labels is None):
  raise Exception('Retrieved Data is NONE')  
 


test_data = []
train_data = []

data_amount = len(train_images)
for i in range(data_amount):
    label=[0,0,0,0,0,0,0,0,0,0]
    label[train_labels[i]]=1
    train_data.append({'input':train_images[i],'target':label})
    
Пример #16
0
import numpy as np
from NeuralNetwork import NN

network = NN(3, 3, 3, 100000, .00001, .0001, 0, 0)

X = np.matrix('1,2,3;4,3,6;1,2,5;0,1,2;1,4,5;1,0,1')
y = np.matrix('0;2;1;1;1;2')
X_cv = np.matrix('1,0,0;1,2,4;10,7,4')
y_cv = np.matrix('0;1;1')

print(network.fit(X, y))
print(network.predict(X_cv))
network.cross_validate(X_cv, y_cv)
Пример #17
0
from NeuralNetwork import NeuralNetworks as NN
from csv_data_reader import CSVObject as csv_o

batch = 10000

data = csv_o(2, 1)
inputs = [0 for i in range(data.arr_length())]
targets = [0]

nn = NN(2, 20, 1)

for i in range(batch):
    train_data = data.result()
    nn.train(train_data[0], train_data[1])
    #print(train_data)
    if (i % 5 == 0):
        print("İşleniyor: ", round((i / batch) * 100), "%")

nn.Predict([0, 1])
from NeuralNetwork import NN
import numpy as np


o=NN(0.1,2000)
Xtr=np.array([[0,0],[0,1],[1,0],[1,1]]).T
Ytr=np.array([[0,1,1,0]])
layerInfo=[[2,'tanh'],[1,'sigmoid']]
o.train(Xtr,Ytr,layerInfo,'Log')
print('Log ->\n',o.test(Xtr,layerInfo))
o.train(Xtr,Ytr,layerInfo,'Quadratic')
print('Quadratic ->\n',o.test(Xtr,layerInfo))
Пример #19
0
  print 'Reading database'
  #data = readDatabase('networkVarInput.txt')
  data = readDatabase('DB_TALOS_186_secStruct_nn')
  
  print 'Making training set'
  trainingSet = makeTrainingDataMissingShift(data)
  
  nIn  = len(trainingSet[0][0])
  nOut = len(trainingSet[0][1])
  nHid = 3
  
  print "Inputs", nIn
  print "Hidden", nHid
  print "Output", nOut

  nn = NN(nIn, nHid, nOut, testFuncMissingShift)

  nn.train(trainingSet, iterations=50, N=0.5, M=0.2)
 
  #testFuncMissingShift(nn, trainingSet)
  """

  for iter in range(1):

    print iter

    # Train predict angles
    print 'Reading database'
    data = readDatabase('DB_TALOS_186_secStruct_nn')
    print 'Read %d entries' % len(data)
Пример #20
0
def main():
    datapath = '../data/'
    data = read_data(datapath)
    [X, y] = create_features(data)

    part1 = False
    part2 = True
    part3 = False
    part4 = False
    part5 = False
    '''---- Clustering ---'''
    paramlist_clustering = list(np.arange(2, 15, 1))
    model_KM = KMCluster(data=X,
                         target=y,
                         num_clusters=paramlist_clustering,
                         plot=True,
                         title='Shopping_Intention')
    model_EM = EMCluster(data=X,
                         target=y,
                         num_clusters=paramlist_clustering,
                         plot=True,
                         title='Shopping_Intention')

    if part1:
        model_KM.tester()
        model_EM.tester()
    '''---- Dimension Reduction ---'''
    paralist_dr = list(np.arange(1, 7, 1))
    model_PCA = PCA_DR(data=X,
                       target=y,
                       dim_param=paralist_dr,
                       plot=True,
                       title='Shopping_Intention')
    model_ICA = ICA_DR(data=X,
                       target=y,
                       dim_param=paralist_dr,
                       plot=True,
                       title='Shopping_Intention')
    model_RP = RP_DR(data=X,
                     target=y,
                     dim_param=paralist_dr,
                     plot=True,
                     title='Shopping_Intention')
    model_RF = RF_DR(data=X,
                     target=y,
                     dim_param=paralist_dr,
                     plot=True,
                     title='Shopping_Intention')

    if part2:
        model_PCA.tester()
        model_ICA.tester()
        model_RP.tester()
        model_RF.tester()
    '''---- Clustering After DR ---'''
    pcaX = model_PCA.run(2)
    icaX = model_ICA.run(3)
    rpX = model_RP.run(2)
    rfX = model_RF.run(0.95)  #threshold = 0.95

    if part3:
        title = 'Shopping_Intention-PCA'
        model_KM_PCA = KMCluster(data=pcaX,
                                 target=y,
                                 num_clusters=paramlist_clustering,
                                 plot=True,
                                 title=title)
        model_KM_PCA.tester()
        model_EM_PCA = EMCluster(data=pcaX,
                                 target=y,
                                 num_clusters=paramlist_clustering,
                                 plot=True,
                                 title=title)
        model_EM_PCA.tester()
        plot_all(ds_title='Shopping_Intention',
                 title=title,
                 paramlist=paramlist_clustering).run()

        title = 'Shopping_Intention-ICA'
        model_KM_ICA = KMCluster(data=icaX,
                                 target=y,
                                 num_clusters=paramlist_clustering,
                                 plot=True,
                                 title=title)
        model_KM_ICA.tester()
        model_EM_ICA = EMCluster(data=icaX,
                                 target=y,
                                 num_clusters=paramlist_clustering,
                                 plot=True,
                                 title=title)
        model_EM_ICA.tester()
        plot_all(ds_title='Shopping_Intention',
                 title=title,
                 paramlist=paramlist_clustering).run()

        title = 'Shopping_Intention-RP'
        model_KM_RP = KMCluster(data=rpX,
                                target=y,
                                num_clusters=paramlist_clustering,
                                plot=True,
                                title=title)
        model_KM_RP.tester()
        model_EM_RP = EMCluster(data=rpX,
                                target=y,
                                num_clusters=paramlist_clustering,
                                plot=True,
                                title=title)
        model_EM_RP.tester()
        plot_all(ds_title='Shopping_Intention',
                 title=title,
                 paramlist=paramlist_clustering).run()

        title = 'Shopping_Intention-RF'
        model_KM_RF = KMCluster(data=rfX,
                                target=y,
                                num_clusters=paramlist_clustering,
                                plot=True,
                                title=title)
        model_KM_RF.tester()
        model_EM_RF = EMCluster(data=rfX,
                                target=y,
                                num_clusters=paramlist_clustering,
                                plot=True,
                                title=title)
        model_EM_RF.tester()
        plot_all(ds_title='Shopping_Intention',
                 title=title,
                 paramlist=paramlist_clustering).run()
    '''---- NN After DR ---'''
    alphalist = list(np.arange(0.5, 4, 0.5))
    X_pca_train, X_pca_test, y_pca_train, y_pca_test = train_test_split(
        pcaX, y, test_size=0.20)
    X_ica_train, X_ica_test, y_ica_train, y_ica_test = train_test_split(
        icaX, y, test_size=0.20)
    X_rp_train, X_rp_test, y_rp_train, y_rp_test = train_test_split(
        rpX, y, test_size=0.20)
    X_rf_train, X_rf_test, y_rf_train, y_rf_test = train_test_split(
        np.array(rfX), y, test_size=0.20)

    if part4:
        title = 'Shopping_Intention-PCA'
        model_NN_PCA = NN(X_pca_train,
                          X_pca_test,
                          y_pca_train,
                          y_pca_test,
                          dim_param=alphalist,
                          plot=True,
                          title=title)
        model_NN_PCA.tester()

        title = 'Shopping_Intention-ICA'
        model_NN_ICA = NN(X_ica_train,
                          X_ica_test,
                          y_ica_train,
                          y_ica_test,
                          dim_param=alphalist,
                          plot=True,
                          title=title)
        model_NN_ICA.tester()

        title = 'Shopping_Intention-RP'
        model_NN_RP = NN(X_rp_train,
                         X_rp_test,
                         y_rp_train,
                         y_rp_test,
                         dim_param=alphalist,
                         plot=True,
                         title=title)
        model_NN_RP.tester()

        title = 'Shopping_Intention-RF'
        model_NN_RF = NN(X_rf_train,
                         X_rf_test,
                         y_rf_train,
                         y_rf_test,
                         dim_param=alphalist,
                         plot=True,
                         title=title)
        model_NN_RF.tester()
    '''---- NN After Clustering ---'''
    kmX = model_KM.run(2)
    emX = model_EM.run(2)

    X_km_train, X_km_test, y_km_train, y_km_test = train_test_split(
        kmX, y, test_size=0.20)
    X_em_train, X_em_test, y_em_train, y_em_test = train_test_split(
        emX, y, test_size=0.20)

    if part5:
        title = 'Shopping_Intention-KM'
        model_NN_KM = NN(X_km_train,
                         X_km_test,
                         y_km_train,
                         y_km_test,
                         dim_param=alphalist,
                         plot=True,
                         title=title)
        model_NN_KM.tester()

        title = 'Shopping_Intention-EM'
        model_NN_EM = NN(X_em_train,
                         X_em_test,
                         y_em_train,
                         y_em_test,
                         dim_param=alphalist,
                         plot=True,
                         title=title)
        model_NN_EM.tester()
Пример #21
0
def hypertuning_CreditCard(X_train, y_train, X_val, y_val, iterations, cols, etamin, etamax, lmbdmin, lmbdmax, batch_sizemin, batch_sizemax, hiddenmin,hiddenmax, epochs = 5):

    start_time = time.time()
    if (cols < iterations):
        cols = iterations
        print("cols must be larger than 'iterations. Cols is set equal to iterations")
    rows = 5
    hyper = np.zeros((rows, cols))
    AUC_array = np.zeros(iterations)
    #Making the matrix of parameters 
    hyper[0] =  np.logspace(etamin, etamax, cols)
    hyper[1] = np.logspace(lmbdmin, lmbdmax, cols)
    hyper[2] = np.round(np.linspace(batch_sizemin, batch_sizemax, cols, dtype='int'))
    hyper[3] = np.round(np.linspace(hiddenmin, hiddenmax, cols))
    hyper[4] = np.zeros((cols))

    for i in range(rows-1):
        np.random.shuffle(hyper[i])

    n_categories = 1

    #iterating over all parameters 
    for it in range(iterations):
        hyper_choice = hyper[:,it]
        eta = hyper_choice[0]
        lmbd = hyper_choice[1]
        batch_size = hyper_choice[2]
        n_hidden_neurons = hyper_choice[3]

        
        dnn = NN(X_train, y_train, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size, n_hidden_neurons=n_hidden_neurons, n_categories=n_categories,
                 cost_grad='crossentropy', activation='sigmoid', activation_out='sigmoid')
        
        dnn.train_and_validate()
        y_pred = dnn.predict_probabilities(X_val)

        AUC_array[it] = roc_auc_score(y_val, y_pred)
        
        hyper[4][it] = dnn.epoch +1

        #Estimating the time the iteration takes
        if (it%m.ceil((iterations/40))==0):
            print('Iteration: ', it)
            t = round((time.time() - start_time))
            if (t >= 60) and (it > 0):
                sec = t % 60
                print("--- %s min," % int(t/60),"%s sec ---" % sec)
                print("Estimated minutes left: ", int((t/it)*(iterations-it)/60))
            else:
                print("--- %s sec ---" %int(t))

    # Finding the best parameters:            
    AUC_best_index = np.argmax(AUC_array)
    AUC_best = np.max(AUC_array)
    print("AUC array: ", AUC_array)
    print("best index: ",AUC_best_index)
    print("best AUC: ", AUC_best)
    final_hyper = hyper[:,AUC_best_index]

    print("parameters: eta, lmbd, batch, hidden, epochs ", final_hyper)
    eta_best = final_hyper[0]
    lmbd_best = final_hyper[1]
    batch_size_best = final_hyper[2]
    n_hidden_neurons_best = final_hyper[3]
    epochs_best = final_hyper[4]
    return hyper[:,AUC_best_index]
Пример #22
0
def hypertuning(iterations, cols, etamax, etamin, lmbdmax, lmbdmin,
                batch_sizemax, batch_sizemin, hiddenmax, hiddenmin):
    start_time = time.time()
    if (cols < iterations):
        cols = iterations
        print(
            "cols must be larger than 'iterations. Cols is set equal to iterations"
        )
    sig = signature(hypertuning)
    rows = int(len(sig.parameters) / 2)
    hyper = np.zeros((rows, cols))
    MSE_array = np.zeros(iterations)
    hyper[0] = np.logspace(etamin, etamax, cols)
    hyper[1] = np.logspace(lmbdmin, lmbdmax, cols)
    hyper[2] = np.round(
        np.linspace(batch_sizemin, batch_sizemax, cols, dtype='int'))
    hyper[3] = np.round(np.linspace(hiddenmin, hiddenmax, cols))
    hyper[4] = np.zeros((cols))
    #print(hyper)
    for i in range(rows - 1):
        np.random.shuffle(hyper[i])
        #print(np.apply_along_axis(np.random.shuffle, 1, hyper[i]))

    for it in range(iterations):
        hyper_choice = hyper[:, it]
        eta = hyper_choice[0]
        lmbd = hyper_choice[1]
        batch_size = hyper_choice[2]
        n_hidden_neurons = hyper_choice[3]

        dnn = NN(X_train,
                 z_train,
                 eta=eta,
                 lmbd=lmbd,
                 epochs=epochs,
                 batch_size=batch_size,
                 n_hidden_neurons=n_hidden_neurons,
                 n_categories=n_categories,
                 cost_grad='MSE',
                 activation='sigmoid',
                 activation_out='ELU')

        dnn.train(X_val)
        MSE_val = dnn.MSE_epoch(z_val)

        best_pred_epoch = np.argmin(MSE_val)

        dnn_test = NN(X_train,
                      z_train,
                      eta=eta,
                      lmbd=lmbd,
                      epochs=best_pred_epoch + 1,
                      batch_size=batch_size,
                      n_hidden_neurons=n_hidden_neurons,
                      n_categories=n_categories,
                      cost_grad='MSE',
                      activation='sigmoid',
                      activation_out='ELU')
        dnn_test.train(X_test)

        # kan jo bare bruke predict probabilities på siste her

        z_pred = dnn_test.y_predict_epoch[best_pred_epoch]

        MSE_array[it] = mean_squared_error(z_test, z_pred)
        hyper[4][it] = best_pred_epoch
        print(it)
        if (it % m.ceil((iterations / 40)) == 0):
            t = round((time.time() - start_time))
            if t >= 60:
                sec = t % 60
                print("--- %s min," % int(t / 60), "%s sec ---" % sec)
            else:
                print("--- %s sec ---" % int(t))
    MSE_best_index = np.argmin(MSE_array)
    MSE_best = np.min(MSE_array)
    print("MSE array: ", MSE_array)
    print("best index: ", MSE_best_index)
    print("best MSE: ", MSE_best)

    return hyper[:, MSE_best_index]
Пример #23
0
def hypertuning_franke(z, x, y, iterations, cols, etamin, etamax, lmbdmin, lmbdmax, batch_sizemin, batch_sizemax, hiddenmin,hiddenmax, polymin, polymax, epochs = 1000, plot_MSE = False, validate = True):

    start_time = time.time()
    if (cols < iterations):
        cols = iterations
        print("cols must be larger than 'iterations. Cols is set equal to iterations")
    rows = 6
    hyper = np.zeros((rows, cols))
    MSE_array = np.zeros(iterations)
    #Making the matrix of parameters 
    hyper[0] =  np.logspace(etamin, etamax, cols)
    hyper[1] = np.logspace(lmbdmin, lmbdmax, cols)
    hyper[2] = np.round(np.linspace(batch_sizemin, batch_sizemax, cols, dtype='int'))
    hyper[3] = np.round(np.linspace(hiddenmin, hiddenmax, cols))
    hyper[4] = np.random.randint(polymin, polymax, size=cols, dtype='int')
    hyper[5] = np.zeros((cols))
    

    
    for i in range(rows-1):
        np.random.shuffle(hyper[i])

    n_categories = 1
    
    #iterating over all parameters 
    for it in range(iterations):
        hyper_choice = hyper[:,it]
        eta = hyper_choice[0]
        lmbd = hyper_choice[1]
        batch_size = hyper_choice[2]
        n_hidden_neurons = hyper_choice[3]
        X, X_train, X_test, X_val, z_train, z_test, z_val, indicies = CreateDesignMatrix_X(z, x,y, int(hyper[4][it]))

        np.random.seed(seed)
        dnn = NN(X_train, z_train, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size, n_hidden_neurons=n_hidden_neurons, n_categories=n_categories,
                 cost_grad='MSE', activation='sigmoid', activation_out='ELU')
        dnn.train_and_validate(X_val, z_val, MSE_store = plot_MSE, validate=validate)

        z_pred = dnn.predict_probabilities(X_val)
        MSE_array[it] = mean_squared_error(z_val,z_pred)
        hyper[5][it] = dnn.epoch +1

        #Optional: If one wishes to see how the parameter combination is doing, pass plot_MSE = True:
        if(plot_MSE):
            print("parameters: eta, lmbd, batch, hidden, poly, epochs \n", hyper[:,it:it+1])
            MSE_val, MSE_train = dnn.MSE_epoch()
            epo = np.arange(len(MSE_val))
            plt.plot(epo, MSE_val, label='MSE val')
            plt.plot(epo, MSE_train, label='MSE train')
            plt.xlabel("Number of epochs")
            plt.ylabel("MSE")
            plt.title("MSE vs epochs")
            plt.legend()
            plt.show()

        #Estimating the time the iteration takes
        if (it%m.ceil((iterations/60))==0):
            print('Iteration: ', it)
            t = round((time.time() - start_time))
            if (t >= 60) and (it > 0):
                sec = t % 60
                print("--- %s min," % int(t/60),"%s sec ---" % sec)
                print("Estimated minutes left: ", int((t/it)*(iterations-it)/60))
            else:
                print("--- %s sec ---" %int(t))

    # Finding the best parameters:
    MSE_best_index = np.argmin(MSE_array)
    MSE_best = np.min(MSE_array)
    print("MSE array: ", MSE_array)
    print("best index: ",MSE_best_index)
    print("best MSE: ", MSE_best)
    final_hyper = hyper[:,MSE_best_index]

    print("parameters: eta, lmbd, batch, hidden, poly, epochs ", final_hyper)
    eta_best = final_hyper[0]
    lmbd_best = final_hyper[1]
    batch_size_best = final_hyper[2]
    n_hidden_neurons_best = final_hyper[3]
    poly_best = final_hyper[4]
    epochs_best = final_hyper[5]
    return hyper[:,MSE_best_index]
Пример #24
0
def NeuralNetwork(X, z, test=False):
    """Wrapper for a neural network. Trains a neural network using X and z.

    Args:
        X (np.ndarray): Input data the network is to be trained on.
        z (np.ndarray): Response data the network is to be trained against.
        test (bool, optional): If true, will search a hard-coded parameter-
                               space for optimal parameters instead of 
                               training a network. Defaults to False.

    Returns:
        (float, list): (score reached, [testing set prediction, testing set])
    """
    if not test:
        hiddenLayers = 2
        hiddenNeurons = 64
        epochN = 500
        minibatchSize = 32
        eta = (None, 1e-03)
        lmbd = 1e-06
        alpha = 1e-00
        activationFunction = sigmoid
        outputFunction = softMax

        Xtr, Xte, ztr, zte = train_test_split(X, z)

        network = NN(hiddenNN=hiddenNeurons, hiddenLN=hiddenLayers)
        network.giveInput(Xtr, ztr)
        network.giveParameters(epochN=epochN,
                               minibatchSize=minibatchSize,
                               eta=etaDefinerDefiner(eta[0], eta[1]),
                               lmbd=lmbd,
                               alpha=alpha,
                               activationFunction=activationFunction,
                               outputFunction=outputFunction)
        network.train(splitData=False)

        network.predict(Xte, zte)

        return network.score, [network.predictedLabel, zte]

    else:
        # Benchmarking parameters; random search
        parameters = {
            "hiddenLN": [0, 1, 2, 4],
            "hiddenNN": [16, 32, 64, 128, 256],
            "epochN": [500],
            "minibatchSize": [32, 64],
            "eta": [[j, i**k] for i in np.logspace(0, 6, 7)
                    for j, k in [(1, 1), (None, -1)]],
            "lmbd":
            np.logspace(-1, -6, 3),
            "alpha":
            np.logspace(-0, -1, 1),
            "activationFunction": [sigmoid, ReLU_leaky, ReLU],
            "outputFunction": [softMax],
            "#repetitions":
            5,
            "datafraction":
            1
        }

        optimalScore, optimalParams, optimalParamSTR = benchmarkNN(
            X,
            z,
            parameters,
            NN,
            mode="classification",
            randomSearch=False,
            writingPermissions=False,
            N=int(1e3))
        print("Optimal Neural Network parameters:",
              optimalScore,
              optimalParamSTR,
              sep="\n",
              end="\n\n")
Пример #25
0
# load data
data = xlrd.open_workbook('../WTMLDataSet_3.0alpha.xlsx')
table = data.sheet_by_name('WTML')

dataset = []
for i in range(table.nrows):
    line = table.row_values(i)
    dataset.append(line)
dataset = np.array(dataset)

xs = dataset[1:, 1:-1].astype(np.float64)
ys = (dataset[1:, -1] == '是').astype(np.int32)

# train a neural network to learn from watermelon dataset
nn = NN([xs.shape[1], 16, len(set(ys))], ["sigmoid", "softmax"],
        lr_init=0.1,
        regularization=None)
for batch_idx in range(50000):
    nn.train(xs, ys)
    if batch_idx % 100 == 0:
        print("Loss = %.4f" % nn.loss)

# calculate accuracy
preds = nn.forward(xs)
preds = np.argmax(preds, axis=-1)
print("Accuracy: %.4f" % np.mean(preds == ys))

# plot data
positive_xs = xs[ys == 1]
negative_xs = xs[ys == 0]
plt.scatter(positive_xs[:, 0],
Пример #26
0
        xs[:,
           attr_idx] = np.array([values.index(val) for val in xs[:, attr_idx]])
xs = xs.astype(np.float64)

# partition dataset into train-set and test-set
train_indices = [0, 1, 2, 5, 6, 9, 13, 14, 15, 16]
test_indices = [3, 4, 7, 8, 10, 11, 12]
train_xs, train_ys = xs[train_indices], ys[train_indices]
test_xs, test_ys = xs[test_indices], ys[test_indices]

epochs = 100

# Standard BP
print("### Standard BP ###")
nn = NN([xs.shape[1], 8, len(set(ys))], ["relu", "softmax"],
        lr_init=0.05,
        regularization="L2")
stdBP_loss = []
start = time.time()
for epoch in tqdm(range(epochs)):
    this_epoch_losses = []
    for sample_xs, sample_ys in zip(train_xs, train_ys):
        nn.train(sample_xs.reshape(1, -1), sample_ys.reshape(-1))
        this_epoch_losses.append(nn.loss)
    stdBP_loss.append(np.mean(this_epoch_losses))
end = time.time()
stdBP_time = end - start
stdBP_acc = np.mean(np.argmax(nn.forward(test_xs), axis=-1) == test_ys)

# Accumulated BP
print("\n### Accumulated BP ###")
Пример #27
0
test_ys_for_svm = np.where(test_ys==0, -1, 1)
# Linear SVM
print("\nTesting SVM with linear kernel...")
Linear_SVM = SVM(xs.shape[1], func='Linear')
Linear_SVM.fit(train_xs, train_ys_for_svm, C=100, epsilon=0.01, iters=10000)
Linear_svm_acc = np.mean(Linear_SVM.predict(test_xs)==test_ys_for_svm)

# Gaussian SVM
print("\nTesting SVM with Gaussian kernel...")
Gaussian_SVM = SVM(xs.shape[1], func='Gaussian', sigma=0.1)
Gaussian_SVM.fit(train_xs, train_ys_for_svm, C=1, epsilon=0.01, iters=100)
Gaussian_svm_acc = np.mean(Gaussian_SVM.predict(test_xs)==test_ys_for_svm)

# Neural Network
print("\nTesting Neural Network...")
nn = NN([xs.shape[1], 64, len(set(ys))], ["relu", "softmax"], lr_init=0.01, regularization="L2", regularization_lambda=0.1)
for epoch in tqdm(range(100)): nn.train(train_xs, train_ys)
nn_acc = np.mean(np.argmax(nn.forward(test_xs), axis=-1)==test_ys)

# Decision Tree
print("\nTesting Decision Tree...")
decisionTree = DecisionTree(train_xs, train_ys, test_xs, test_ys, attributes, isdiscs, labels)
decisionTree.buildTree(partIndex='InformationGain', prepruning=True)
decisionTree_acc = decisionTree.test(test_xs, test_ys)

# Demo
print("\nTest Accuracy:")
print("- Linear SVM      :    %.2f"%(Linear_svm_acc*100)+"%")
print("- Gaussian SVM    :    %.2f"%(Gaussian_svm_acc*100)+"%")
print("- Neural Network  :    %.2f"%(nn_acc*100)+"%")
print("- Decision Tree   :    %.2f"%(decisionTree_acc*100)+"%")