Beispiel #1
0
def Bonus():
    l = 10
    m = 20
    n = 80

    for n in range(40, 200, 40):
        data_y, data_x = gen.gen(l, m, n, 10000, True)

        Ada = AdaGrad.AdaGrad(len(data_x[0]), 0.25)

        mistake_arr = []
        lose_arr = []

        for i in range(50):
            mistake, lose = Ada.trainLose(data_x, data_y)
            mistake_arr.append(mistake)
            lose_arr.append(lose)

        plt.plot(mistake_arr, 'r', label='Misclassification error')
        plt.xlabel('number of training sessions N')
        plt.ylabel('error value')
        plt.show()

        plt.plot(lose_arr, 'g', label='Hinge lose')
        plt.xlabel('number of training sessions N')
        plt.ylabel('error value')
        plt.show()
Beispiel #2
0
def trainConvergePlot(datasetx, datasety, d, pteta, winNalpha, winMalpha,
                      winMgamma, Adaeta):
    # perceptron with no margin
    print("Perceptron with no margin full training ...")
    ptN = perceptron.perceptron_nomargin(len(datasetx[0]))
    ptN_mistakearr, ptN_mistake = ptN.trainConverge(datasetx, datasety, d)
    print(ptN_mistake)

    #perceptron with margin
    print("Perceptron with margin full training ...")
    ptM = perceptron.perceptron_margin(len(datasetx[0]), pteta)
    ptM_mistakearr, ptM_mistake = ptM.trainConverge(datasetx, datasety, d)
    print(ptM_mistake)

    # Winnow with no margin
    print("Winnow with no margin full training ...")
    winN = winnow.winnow_nomargin(len(datasetx[0]), winNalpha)
    winN_mistakearr, winN_mistake = winN.trainConverge(datasetx, datasety, d)
    print(winN_mistake)

    # Winnow with margin
    print("Winnow with margin full training ...")
    winM = winnow.winnow_margin(len(datasetx[0]), winMalpha, winMgamma)
    winM_mistakearr, winM_mistake = winM.trainConverge(datasetx, datasety, d)
    print(winM_mistake)

    # AdaGrad model
    print("AdaGrad full training ...")
    Ada = AdaGrad.AdaGrad(len(datasetx[0]), Adaeta)
    Ada_mistakearr, Ada_mistake = Ada.trainConverge(datasetx, datasety, 10)
    print(Ada_mistake)

    # plotting mistake vs sample n on converge
    return ptN_mistake, ptN_mistakearr, ptM_mistake, ptM_mistakearr, winN_mistake, winN_mistakearr, winM_mistake, winM_mistakearr, Ada_mistake, Ada_mistakearr
Beispiel #3
0
    def allmodelTun(self):
        # generate tunning data
        D1x, D1y, D2x, D2y = self.datasplit()

        # tuning perceptron with margin
        pt = perceptron.perceptron_margin(len(self.datax[0]), 0)
        pteta, ptresult = self.tunPerceptron(pt, self.pt_eta, D1x, D1y, D2x, D2y)
        print("Data l, m, n : ", self.l, self.m, self.n, "~~~~~~~~~~~ Best Perceptron eta is : ", pteta, " result is : ", ptresult)

        # tuning winnow with no margin
        winnowNoM = winnow.winnow_nomargin(len(self.datax[0]), 0)
        winNalpha, winNresult = self.tunWinnowNoMargin(winnowNoM, self.win_alpha, D1x, D1y, D2x, D2y)
        print("Data l, m, n : ", self.l, self.m, self.n, "~~~~~~~~~~~ Best winN alpha1 is : ", winNalpha, " result is : ", winNresult)

        # tuning winnow with margin
        winnowM = winnow.winnow_margin(len(self.datax[0]),0,0)
        winMalpha, winMgamma, winMresult = self.tunWinnowMargin(winnowM, self.win_alpha, self.win_gamma, D1x, D1y, D2x, D2y)
        print("Data l, m, n : ", self.l, self.m, self.n, "~~~~~~~~~~~ Best winM alpha1 is : ", winMalpha, " Best winM gamma1 is : ", winMgamma, " result is : ", winMresult)

        # tuning AdaGrad
        Ada = AdaGrad.AdaGrad(len(self.datax[0]), 0)
        Adaeta, Adaresult = self.tunAdaGrad(Ada, self.Ada_eta, D1x, D1y, D2x, D2y)
        print("Data l, m, n : ", self.l, self.m, self.n, "~~~~~~~~~~~ Best Ada eta1 is : ", Adaeta, " result is : ", Adaresult)

        return pteta, winNalpha, winMalpha, winMgamma, Adaeta
Beispiel #4
0
def AdaGradTunTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    tune = tuning.tuning()
    tune.load(dataset1x, dataset1y)

    Ada = AdaGrad.AdaGrad(len(dataset1x[0]), 0)
    eta = [1.5, 0.25, 0.03, 0.005, 0.001]
    best_eta, best_result = tune.tunAdaGrad(Ada, eta)
    print(" AdaGrad best eta is : " , best_eta)
    print(" AdaGrad best result is : ", best_result)
Beispiel #5
0
def trainMistakePlot(datasetx, datasety, pteta, winNalpha, winMalpha,
                     winMgamma, Adaeta):
    # perceptron with no margin
    print("Perceptron with no margin full training ...")
    ptN = perceptron.perceptron_nomargin(len(datasetx[0]))
    ptN_mistake = ptN.train(datasetx, datasety)
    print(ptN_mistake[len(ptN_mistake) - 1])
    # perceptron with margin
    print("Perceptron with margin full training ...")
    ptM = perceptron.perceptron_margin(len(datasetx[0]), pteta)
    ptM_mistake = ptM.train(datasetx, datasety)
    print(ptM_mistake[len(ptM_mistake) - 1])

    # Winnow with no margin
    print("Winnow with no margin full training ...")
    winN = winnow.winnow_nomargin(len(datasetx[0]), winNalpha)
    winN_mistake = winN.train(datasetx, datasety)
    print(winN_mistake[len(winN_mistake) - 1])
    # Winnow with margin
    print("Winnow with margin full training ...")
    winM = winnow.winnow_margin(len(datasetx[0]), winMalpha, winMgamma)
    winM_mistake = winM.train(datasetx, datasety)
    print(winM_mistake[len(winM_mistake) - 1])

    # AdaGrad model
    print("AdaGrad full training ...")
    Ada = AdaGrad.AdaGrad(len(datasetx[0]), Adaeta)
    Ada_mistake = Ada.train(datasetx, datasety)
    print(Ada_mistake[len(Ada_mistake) - 1])

    # plotting mistake vs sample n
    pylab.plot(ptN_mistake, 'r', label='perceptron No margin')
    pylab.plot(ptM_mistake, 'g', label='perceptron with margin')
    pylab.plot(winN_mistake, 'b', label='Winnows No margin')
    pylab.plot(winM_mistake, 'c', label='Winnows with margin')
    pylab.plot(Ada_mistake, 'y', label='AdaGrad model')
    pylab.legend(loc='upper left')
    pylab.xlabel('number of examples N')
    pylab.ylabel('number of mistakes M')
    pylab.show()
Beispiel #6
0
def Question3():
    l = 10
    n = 1000
    m = [100, 500, 1000]

    for i in range(2, 3):
        # (a) Data Generation
        (trainy, trainx) = gen.gen(l, m[i], n, 50000, True)
        (testy, testx) = gen.gen(l, m[i], n, 10000, False)

        # (b) Parameter Tune
        tune = tuning.tuning()
        tune.load(trainx, trainy, l, m[i], n)
        pteta, winNalpha, winMalpha, winMgamma, Adaeta = tune.allmodelTun()

        # (c) Training
        ptN = perceptron.perceptron_nomargin(len(trainx[0]))
        ptM = perceptron.perceptron_margin(len(trainx[0]), pteta)
        winN = winnow.winnow_nomargin(len(trainx[0]), winNalpha)
        winM = winnow.winnow_margin(len(trainx[0]), winMalpha, winMgamma)
        Ada = AdaGrad.AdaGrad(len(trainx[0]), Adaeta)
        for j in range(20):
            ptN.train(trainx, trainy)
            ptM.train(trainx, trainy)
            winN.train(trainx, trainy)
            winM.train(trainx, trainy)
            Ada.train(trainx, trainy)

        # (d) Testing
        ptNresult = ptN.test(testx, testy)
        ptMresult = ptM.test(testx, testy)
        winNresult = winN.test(testx, testy)
        winMresult = winM.test(testx, testy)
        Adaresult = Ada.test(testx, testy)

        print(pteta, winNalpha, winMalpha, winMgamma, Adaeta)
        print(ptNresult, ptMresult, winNresult, winMresult, Adaresult)
Beispiel #7
0
def AdaGradTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    Ada = AdaGrad.AdaGrad(len(dataset1x[0]), 1.5)
    Ada.train(dataset1x, dataset1y)
    output = Ada.test(dataset1x, dataset1y)
    print(output)
            loss=network.loss(train_images_batch,train_lables_batch)
            train_loss_list1.append(loss)
    '''
            if i%iter_per_epoch==0:
                batch_mask=np.random.choice(test_size,batch_size)
                test_images_batch=test_images[batch_mask]
                test_lables_batch=test_lables[batch_mask]
                train_acc=network.accuracy(train_images_batch,train_lables_batch)
                test_acc=network.accuracy(test_images_batch,test_lables_batch)
                train_acc_list1.append(train_acc)
                test_acc_list1.append(test_acc)
                print('train_acc:'+str(train_acc))
                print("test_acc:"+str(test_acc))
                '''
    if s == 2:
        optimizer=adagrad.AdaGrad(learning_rate)
        for i in range(iters_num):
            batch_mask=np.random.choice(train_size,batch_size)
            train_images_batch=train_images[batch_mask]
            train_lables_batch=train_lables[batch_mask]

            #通过 误差 反向传播算法求 梯度 
            grad=network.gradient(train_images_batch,train_lables_batch)
            '''
            w1_list.append(np.mean(grad['w1']))
            b1_list.append(np.mean(grad['b1']))
            w2_list.append(np.mean(grad['w2']))
            b2_list.append(np.mean(grad['b2']))
            '''

            #更 新