Exemplo n.º 1
0
def trainConvergePlot(datasetx, datasety, d, pteta, winNalpha, winMalpha,
                      winMgamma, Adaeta):
    # perceptron with no margin
    print("Perceptron with no margin full training ...")
    ptN = perceptron.perceptron_nomargin(len(datasetx[0]))
    ptN_mistakearr, ptN_mistake = ptN.trainConverge(datasetx, datasety, d)
    print(ptN_mistake)

    #perceptron with margin
    print("Perceptron with margin full training ...")
    ptM = perceptron.perceptron_margin(len(datasetx[0]), pteta)
    ptM_mistakearr, ptM_mistake = ptM.trainConverge(datasetx, datasety, d)
    print(ptM_mistake)

    # Winnow with no margin
    print("Winnow with no margin full training ...")
    winN = winnow.winnow_nomargin(len(datasetx[0]), winNalpha)
    winN_mistakearr, winN_mistake = winN.trainConverge(datasetx, datasety, d)
    print(winN_mistake)

    # Winnow with margin
    print("Winnow with margin full training ...")
    winM = winnow.winnow_margin(len(datasetx[0]), winMalpha, winMgamma)
    winM_mistakearr, winM_mistake = winM.trainConverge(datasetx, datasety, d)
    print(winM_mistake)

    # AdaGrad model
    print("AdaGrad full training ...")
    Ada = AdaGrad.AdaGrad(len(datasetx[0]), Adaeta)
    Ada_mistakearr, Ada_mistake = Ada.trainConverge(datasetx, datasety, 10)
    print(Ada_mistake)

    # plotting mistake vs sample n on converge
    return ptN_mistake, ptN_mistakearr, ptM_mistake, ptM_mistakearr, winN_mistake, winN_mistakearr, winM_mistake, winM_mistakearr, Ada_mistake, Ada_mistakearr
Exemplo n.º 2
0
    def allmodelTun(self):
        # generate tunning data
        D1x, D1y, D2x, D2y = self.datasplit()

        # tuning perceptron with margin
        pt = perceptron.perceptron_margin(len(self.datax[0]), 0)
        pteta, ptresult = self.tunPerceptron(pt, self.pt_eta, D1x, D1y, D2x, D2y)
        print("Data l, m, n : ", self.l, self.m, self.n, "~~~~~~~~~~~ Best Perceptron eta is : ", pteta, " result is : ", ptresult)

        # tuning winnow with no margin
        winnowNoM = winnow.winnow_nomargin(len(self.datax[0]), 0)
        winNalpha, winNresult = self.tunWinnowNoMargin(winnowNoM, self.win_alpha, D1x, D1y, D2x, D2y)
        print("Data l, m, n : ", self.l, self.m, self.n, "~~~~~~~~~~~ Best winN alpha1 is : ", winNalpha, " result is : ", winNresult)

        # tuning winnow with margin
        winnowM = winnow.winnow_margin(len(self.datax[0]),0,0)
        winMalpha, winMgamma, winMresult = self.tunWinnowMargin(winnowM, self.win_alpha, self.win_gamma, D1x, D1y, D2x, D2y)
        print("Data l, m, n : ", self.l, self.m, self.n, "~~~~~~~~~~~ Best winM alpha1 is : ", winMalpha, " Best winM gamma1 is : ", winMgamma, " result is : ", winMresult)

        # tuning AdaGrad
        Ada = AdaGrad.AdaGrad(len(self.datax[0]), 0)
        Adaeta, Adaresult = self.tunAdaGrad(Ada, self.Ada_eta, D1x, D1y, D2x, D2y)
        print("Data l, m, n : ", self.l, self.m, self.n, "~~~~~~~~~~~ Best Ada eta1 is : ", Adaeta, " result is : ", Adaresult)

        return pteta, winNalpha, winMalpha, winMgamma, Adaeta
Exemplo n.º 3
0
def perceptronTuneTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    tune = tuning.tuning()
    pt = perceptron.perceptron_margin(len(dataset1x[0]), 0)
    eta = [1.5, 0.25, 0.03, 0.005, 0.001]

    tune.load(dataset1x, dataset1y)
    best_eta, best_result = tune.tunPerceptron(pt, eta)
    print("Best eta is : ", best_eta)
    print("Best result is : ", best_result)
Exemplo n.º 4
0
def perceptronTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    dataset2y, dataset2x = gen.gen(10, 100, 1000, 50000, False)
    pNoMargin = perceptron.perceptron_nomargin(len(dataset1x[0]))
    pNoMargin.train(dataset1x, dataset1y)
    output = pNoMargin.test(dataset1x, dataset1y)
    print(output)
    pMargin = perceptron.perceptron_margin(len(dataset2x[0]), 1.5)
    pMargin.reset(1.5)
    pMargin.train(dataset2x, dataset2y)
    output = pMargin.test(dataset2x, dataset2y)
    print(output)
Exemplo n.º 5
0
def trainMistakePlot(datasetx, datasety, pteta, winNalpha, winMalpha,
                     winMgamma, Adaeta):
    # perceptron with no margin
    print("Perceptron with no margin full training ...")
    ptN = perceptron.perceptron_nomargin(len(datasetx[0]))
    ptN_mistake = ptN.train(datasetx, datasety)
    print(ptN_mistake[len(ptN_mistake) - 1])
    # perceptron with margin
    print("Perceptron with margin full training ...")
    ptM = perceptron.perceptron_margin(len(datasetx[0]), pteta)
    ptM_mistake = ptM.train(datasetx, datasety)
    print(ptM_mistake[len(ptM_mistake) - 1])

    # Winnow with no margin
    print("Winnow with no margin full training ...")
    winN = winnow.winnow_nomargin(len(datasetx[0]), winNalpha)
    winN_mistake = winN.train(datasetx, datasety)
    print(winN_mistake[len(winN_mistake) - 1])
    # Winnow with margin
    print("Winnow with margin full training ...")
    winM = winnow.winnow_margin(len(datasetx[0]), winMalpha, winMgamma)
    winM_mistake = winM.train(datasetx, datasety)
    print(winM_mistake[len(winM_mistake) - 1])

    # AdaGrad model
    print("AdaGrad full training ...")
    Ada = AdaGrad.AdaGrad(len(datasetx[0]), Adaeta)
    Ada_mistake = Ada.train(datasetx, datasety)
    print(Ada_mistake[len(Ada_mistake) - 1])

    # plotting mistake vs sample n
    pylab.plot(ptN_mistake, 'r', label='perceptron No margin')
    pylab.plot(ptM_mistake, 'g', label='perceptron with margin')
    pylab.plot(winN_mistake, 'b', label='Winnows No margin')
    pylab.plot(winM_mistake, 'c', label='Winnows with margin')
    pylab.plot(Ada_mistake, 'y', label='AdaGrad model')
    pylab.legend(loc='upper left')
    pylab.xlabel('number of examples N')
    pylab.ylabel('number of mistakes M')
    pylab.show()
Exemplo n.º 6
0
def Question3():
    l = 10
    n = 1000
    m = [100, 500, 1000]

    for i in range(2, 3):
        # (a) Data Generation
        (trainy, trainx) = gen.gen(l, m[i], n, 50000, True)
        (testy, testx) = gen.gen(l, m[i], n, 10000, False)

        # (b) Parameter Tune
        tune = tuning.tuning()
        tune.load(trainx, trainy, l, m[i], n)
        pteta, winNalpha, winMalpha, winMgamma, Adaeta = tune.allmodelTun()

        # (c) Training
        ptN = perceptron.perceptron_nomargin(len(trainx[0]))
        ptM = perceptron.perceptron_margin(len(trainx[0]), pteta)
        winN = winnow.winnow_nomargin(len(trainx[0]), winNalpha)
        winM = winnow.winnow_margin(len(trainx[0]), winMalpha, winMgamma)
        Ada = AdaGrad.AdaGrad(len(trainx[0]), Adaeta)
        for j in range(20):
            ptN.train(trainx, trainy)
            ptM.train(trainx, trainy)
            winN.train(trainx, trainy)
            winM.train(trainx, trainy)
            Ada.train(trainx, trainy)

        # (d) Testing
        ptNresult = ptN.test(testx, testy)
        ptMresult = ptM.test(testx, testy)
        winNresult = winN.test(testx, testy)
        winMresult = winM.test(testx, testy)
        Adaresult = Ada.test(testx, testy)

        print(pteta, winNalpha, winMalpha, winMgamma, Adaeta)
        print(ptNresult, ptMresult, winNresult, winMresult, Adaresult)