def part_3(l, m, n):
    """ function for question 3.
        1. initialize variables
        2. generate training & test dataset
        3. build online learning algorithms
    """
    # initilize variables
    size = 50000
    samples, loops, R = size / 10, 20, None
    lrs = [1.5, 0.25, 0.03, 0.005, 0.001]
    alphas = [1.1, 1.01, 1.005, 1.0005, 1.0001]
    gammas = [2.0, 0.3, 0.04, 0.006, 0.001]
    
    # divide dataset: 10% - training, 10% - test.
    # y, x = [], []
    (y, x) = gen(l, m, n, 10, False)
    (y_train, x_train) = gen(l, m, n, 50000, True)
    (y_test, x_test) = gen(l, m, n, 10000, False)
    
    # build online learning algorithms
    perceptron = Perceptron(R, x_train, x_test, y_train, y_test, n, size, size)
    perceptron_margin = Perceptron_Margin(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, lrs)
    winnon = Winnon(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, alphas)
    winnon_margin = Winnon_Margin(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, alphas, gammas)
    adagrad = AdaGrad(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, lrs)
Exemplo n.º 2
0
def Question1():
    # (a). Generating two dataset
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    dataset2y, dataset2x = gen.gen(10, 100, 5000, 50000, False)

    # (b). Tuning parameters, record optimal parameters
    tune1 = tuning.tuning()
    tune2 = tuning.tuning()
    tune1.load(dataset1x, dataset1y, 10, 100, 500)
    tune2.load(dataset2x, dataset2y, 10, 100, 5000)

    pteta1, winNalpha1, winMalpha1, winMgamma1, Adaeta1 = tune1.allmodelTun()
    pteta2, winNalpha2, winMalpha2, winMgamma2, Adaeta2 = tune2.allmodelTun()
    print(pteta1, winNalpha1, winMalpha1, winMgamma1, Adaeta1)
    print(pteta2, winNalpha2, winMalpha2, winMgamma2, Adaeta2)

    # (c,d) running best parameter on entire training set. Plot mistake vs sample n
    #  --- training set with n = 500 ---
    # trainMistakePlot(dataset1x, dataset1y, 0.005, 1.1, 1.1, 0.04, 0.25)
    trainMistakePlot(dataset1x, dataset1y, pteta1, winNalpha1, winMalpha1,
                     winMgamma1, Adaeta1)
    #  --- training set with n = 5000 ---
    # trainMistakePlot(dataset2x, dataset2y, 0.001, 1.01, 1.1, 0.04, 0.03)
    trainMistakePlot(dataset2x, dataset2y, pteta2, winNalpha2, winMalpha2,
                     winMgamma2, Adaeta2)
Exemplo n.º 3
0
def test_gen(o, env):
    file = Indenting(StringIO())
    fixup = Indenting(StringIO())
    print(file=file)  # This makes the 'expect' string in the unit tests
    # easier to write and read.
    gen(o, file, fixup, env)
    fixup.seek(0)
    for line in fixup:
        print(line, file=file, end='')
    return file.getvalue()
Exemplo n.º 4
0
def make():
    gen()
    with open("gen.txt", "r", encoding="utf-8", errors="ignore") as f:
        wL = f.readlines()
    query = random.choice(wL)
    imgUrl = getImg(query)
    res = requests.get(imgUrl)
    while res.status_code != 200:
        res = requests.get(getImg(query))
    with open("img.jpg", "wb") as f:
        f.write(res.content)
    return query
Exemplo n.º 5
0
def perceptronTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    dataset2y, dataset2x = gen.gen(10, 100, 1000, 50000, False)
    pNoMargin = perceptron.perceptron_nomargin(len(dataset1x[0]))
    pNoMargin.train(dataset1x, dataset1y)
    output = pNoMargin.test(dataset1x, dataset1y)
    print(output)
    pMargin = perceptron.perceptron_margin(len(dataset2x[0]), 1.5)
    pMargin.reset(1.5)
    pMargin.train(dataset2x, dataset2y)
    output = pMargin.test(dataset2x, dataset2y)
    print(output)
    def export_string(self):
        """Export HTML code to custom modules."""

        import string_filter
        
        with open("data.html", "r") as f:
            string_filter.string_filter(f)

        import gen

        gen.gen()

        tkMessageBox.showinfo("Done!", "Generate complete!\nTime: %.3f s.\n\nOpen 'Schedule.xls' to see schedule table.\n\nPlease use 'LibreOffice Calc' to open file." % (time.time()-self.start_time))
Exemplo n.º 7
0
def post():
    web.go_to('https://web.talklife.co/post/new')
    web.click(tag='button', classname='black')
    stamp = datetime.datetime.now()
    wsd = g.gen()

    post = "W I S D O M:  " + wsd + "\n--------------------------------\nStamp: " + str(
        datetime.datetime.now(
        )) + "\n--------------------------------\n\n\n" + gen.sentence()

    time.sleep(2)
    web.type(post,
             into="Write your post",
             multiple=False,
             tag='textarea',
             xpath="/html/body/div/div/div[4]/div/div/textarea",
             loose_match=True)

    web.click(tag='button',
              classname='next',
              xpath="/html/body/div/div/div[4]/div/div/div/button[2]",
              multiple=False)
    web.click(tag='img',
              xpath="/html/body/div/div/div[3]/div/div/ul/li[5]/div[1]/img")
    time.sleep(2)
    web.click(tag="button",
              classname="black",
              xpath="/html/body/div/div/div[3]/div/div/div/button[2]",
              text="Post")
Exemplo n.º 8
0
def getTrueStatements(bgc, shapeDescList):
    #Build dictionary representation of the grammar
    gramDict = getGramDict(bgc, shapeDescList)
    #Use that grammar to make assertions about the scene (as rep. by the sDL)
    assertions = gen(gramDict,shapeDescList)
    #Get ready for the loop
    trueStatements,whole,nxt = [], len(assertions), 10
    for i in range(whole):
        tree = assertions[i]
        #Tell progress, as a percentage
        if int(float(i)*100/whole) == nxt:
            print nxt, 'percent of assertions processed'
            nxt += 10
            pass
        try:
            #Check each assertion if it is true
            #Only fetch the result we need, not the 'assertion' bool result
            ans = processWords(tree.leaves(),bgc,shapeDescList)[0]
            if ans == YES: trueStatements.append(' '.join(tree.leaves())+'\n')
            pass
        except:
            print tree.leaves()
            break
        pass
    return trueStatements
Exemplo n.º 9
0
def Bonus():
    l = 10
    m = 20
    n = 80

    for n in range(40, 200, 40):
        data_y, data_x = gen.gen(l, m, n, 10000, True)

        Ada = AdaGrad.AdaGrad(len(data_x[0]), 0.25)

        mistake_arr = []
        lose_arr = []

        for i in range(50):
            mistake, lose = Ada.trainLose(data_x, data_y)
            mistake_arr.append(mistake)
            lose_arr.append(lose)

        plt.plot(mistake_arr, 'r', label='Misclassification error')
        plt.xlabel('number of training sessions N')
        plt.ylabel('error value')
        plt.show()

        plt.plot(lose_arr, 'g', label='Hinge lose')
        plt.xlabel('number of training sessions N')
        plt.ylabel('error value')
        plt.show()
Exemplo n.º 10
0
def generateData(l, m, n, instance_num, noise_flag, filename):
    (y, x) = gen(l, m, n, instance_num, noise_flag)

    numpy.save(DATA_PATH_PREFIX + filename + "_all_y", y)
    print "Generated " + DATA_PATH_PREFIX + filename + "_all_y.npy"

    numpy.save(DATA_PATH_PREFIX + filename + "_all_x", x)
    print "Generated " + DATA_PATH_PREFIX + filename + "_all_x.npy"

    sample_num = int(SAMPLE_PERCENTAGE * instance_num)

    instances = zip(x, y)
    sampled_instances = random.sample(instances, sample_num * 2)
    random.shuffle(sampled_instances)

    train_instances = sampled_instances[:sample_num]
    test_instances = sampled_instances[sample_num:]

    (train_x, train_y) = zip(*train_instances)
    (test_x, test_y) = zip(*test_instances)

    numpy.save(DATA_PATH_PREFIX + filename + "_d1_y", list(train_y))
    print "Generated " + DATA_PATH_PREFIX + filename + "_d1_y.npy"

    numpy.save(DATA_PATH_PREFIX + filename + "_d1_x", list(train_x))
    print "Generated " + DATA_PATH_PREFIX + filename + "_d1_x.npy"

    numpy.save(DATA_PATH_PREFIX + filename + "_d2_y", list(test_y))
    print "Generated " + DATA_PATH_PREFIX + filename + "_d2_y.npy"

    numpy.save(DATA_PATH_PREFIX + filename + "_d2_x", list(test_x))
    print "Generated " + DATA_PATH_PREFIX + filename + "_d2_x.npy"
def part_1(l, m, n, size, noise):
    """ function for question 1.
        1. initialize variables
        2. divide training & test dataset
        3. build online learning algorithms
        4. draw plots
    """
    # initilize variables
    (y, x) = gen(l, m, n, size, noise)
    samples, loops, R = size / 10, 20, None
    lrs = [1.5, 0.25, 0.03, 0.005, 0.001]
    alphas = [1.1, 1.01, 1.005, 1.0005, 1.0001]
    gammas = [2.0, 0.3, 0.04, 0.006, 0.001]
    
    # divide dataset: 10% - training, 10% - test.
    left, right = random.sample(range(10),2)
    x_train, x_test = x[left*samples:(left+1)*samples], x[right*samples:(right+1)*samples]
    y_train, y_test = y[left*samples:(left+1)*samples], y[right*samples:(right+1)*samples]
    
    # build online learning algorithms
    perceptron = Perceptron(R, x_train, x_test, y_train, y_test, n, samples, size)
    perceptron_margin = Perceptron_Margin(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, lrs)
    winnon = Winnon(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, alphas)
    winnon_margin = Winnon_Margin(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, alphas, gammas)
    adagrad = AdaGrad(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, lrs)
    
    # draw plots
    draw_1(size, l, m, n, perceptron.error, perceptron_margin.error, winnon.error, winnon_margin.error, adagrad.error)
Exemplo n.º 12
0
def manSeq( puz, cfg, lg, run ):
    i = 0
    lastline=""
    
    lg.sep( run )
    runs = ""
    runs += str(run+1)
    runs += '\t'
    if int(cfg[MAIN][TOTAL_RUNS]) > 9:
        runs += '\t' 
    util.delprn( ''.join([str(run+1), "\t"]), 0 )
    prnBase( cfg, False )
    thisgen = gen.gen( conf=cfg, genNum=run, puz=puz )    
    thisgen.statistics( )
    lg.gen( thisgen )

    while runCriteria(cfg[TERMINATION], thisgen):
        if thisgen.num % 25:
            lg.flush( )
        prnBase( cfg, thisgen )
        thisgen.reproduce( )
        thisgen.natSelection( )
        thisgen.num += 1
        thisgen.statistics( )
        lg.gen( thisgen )
    prnBase( cfg, thisgen )
    lg.spacer( )
    # Clear best's reference so we can die when our other stuff is done and when best loses its reference.
    return thisgen
Exemplo n.º 13
0
def winnowTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    wNoMargin = winnow.winnow_nomargin(len(dataset1x[0]), 1.005)
    wNoMargin.train(dataset1x, dataset1y)
    output = wNoMargin.test(dataset1x, dataset1y)
    print(output)
    wMargin = winnow.winnow_margin(len(dataset1x[0]), 1.01, 0.04)
    wMargin.train(dataset1x, dataset1y)
    output = wMargin.test(dataset1x, dataset1y)
    print(output)
Exemplo n.º 14
0
def perceptronTuneTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    tune = tuning.tuning()
    pt = perceptron.perceptron_margin(len(dataset1x[0]), 0)
    eta = [1.5, 0.25, 0.03, 0.005, 0.001]

    tune.load(dataset1x, dataset1y)
    best_eta, best_result = tune.tunPerceptron(pt, eta)
    print("Best eta is : ", best_eta)
    print("Best result is : ", best_result)
Exemplo n.º 15
0
def AdaGradTunTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    tune = tuning.tuning()
    tune.load(dataset1x, dataset1y)

    Ada = AdaGrad.AdaGrad(len(dataset1x[0]), 0)
    eta = [1.5, 0.25, 0.03, 0.005, 0.001]
    best_eta, best_result = tune.tunAdaGrad(Ada, eta)
    print(" AdaGrad best eta is : " , best_eta)
    print(" AdaGrad best result is : ", best_result)
Exemplo n.º 16
0
def get_data():

    batch = gen.batch()
    # 这里一定要用生成器而不能使用[gint(1,10)] * 2
    n, (m, k) = 5, [gen.randint(1, 10) for _ in range(2)]
    permutation = gen.shuffle_int(1, n)
    tree1 = gen.tree(1, n, "fa")
    tree2 = gen.tree(1, n, "eage", w=(1, k))

    # 如果sp选择了simple简单图,一定要附加上此图示"sparse"稀疏图还是"dense"稠密图
    graph1 = gen.graph(1,
                       n,
                       m,
                       w=(1, 9),
                       sp=["undirected", "simple", "sparse"])
    graph2 = gen.graph(1, n, m, sp=["directed", "simple", "dense"])
    graph3 = gen.graph(1, n, m, sp=["undirected", "multi", "noselfloop"])
    graph4 = gen.graph(1, n, m, sp=["directed", "multi", "selfloop"])

    graph5 = gen.graph_DAG(1, n, m)
    graph6 = gen.graph_DAG(1, n, m, sp="multi")

    batch.addline([n, m, k])
    batch.addline(permutation)
    batch.addline(tree1)

    batch.addline("")
    batch.addline(n)
    batch.addline(tree2)

    # batch.addline("")
    # batch.addline([n,m])
    # batch.addline(graph1)
    # batch.addline(graph2)
    # batch.addline(graph3)
    # batch.addline(graph4)
    # batch.addline(graph5)
    # batch.addline(graph6)

    data1 = gen.gen(batch)
    data2 = gen.gen(batch, 10)

    return data1
def bonus(l, m, n):
    """ function for question 3.
        1. initialize variables
        2. generate training & test dataset
        3. build online learning algorithms
    """
    # initialize variables
    lrs = [1.5, 0.25, 0.03, 0.005, 0.001]
    (y, x) = gen(l, m, n, 10000, True) 
    # placeholder 
    (y_train, x_train) = gen(l, m, n, 1, True)
    (y_test, x_test) = gen(l, m, n, 1, False)
    size, samples, loops, R = 10000, 10000, 50, None
    
    # build online learning algorithms
    adagrad = AdaGrad(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, lrs)
    
    # draw plots
    draw_bonus_error(adagrad.error)
    draw_bonus_loss(adagrad.loss)
Exemplo n.º 18
0
def Question3():
    l = 10
    n = 1000
    m = [100, 500, 1000]

    for i in range(2, 3):
        # (a) Data Generation
        (trainy, trainx) = gen.gen(l, m[i], n, 50000, True)
        (testy, testx) = gen.gen(l, m[i], n, 10000, False)

        # (b) Parameter Tune
        tune = tuning.tuning()
        tune.load(trainx, trainy, l, m[i], n)
        pteta, winNalpha, winMalpha, winMgamma, Adaeta = tune.allmodelTun()

        # (c) Training
        ptN = perceptron.perceptron_nomargin(len(trainx[0]))
        ptM = perceptron.perceptron_margin(len(trainx[0]), pteta)
        winN = winnow.winnow_nomargin(len(trainx[0]), winNalpha)
        winM = winnow.winnow_margin(len(trainx[0]), winMalpha, winMgamma)
        Ada = AdaGrad.AdaGrad(len(trainx[0]), Adaeta)
        for j in range(20):
            ptN.train(trainx, trainy)
            ptM.train(trainx, trainy)
            winN.train(trainx, trainy)
            winM.train(trainx, trainy)
            Ada.train(trainx, trainy)

        # (d) Testing
        ptNresult = ptN.test(testx, testy)
        ptMresult = ptM.test(testx, testy)
        winNresult = winN.test(testx, testy)
        winMresult = winM.test(testx, testy)
        Adaresult = Ada.test(testx, testy)

        print(pteta, winNalpha, winMalpha, winMgamma, Adaeta)
        print(ptNresult, ptMresult, winNresult, winMresult, Adaresult)
Exemplo n.º 19
0
def AG_objetivo(pMut, pCru):
    fila = 0  #fila y columna son los indices de la matriz pMut y pCru
    columna = 0
    pMut = pMut.reshape(pMut.shape[0], int(pMut.size / pMut.shape[0]))
    pCru = pCru.reshape(pMut.shape[0], int(pMut.size / pMut.shape[0]))
    promedio = np.zeros([pMut.shape[0], pMut.shape[1]])
    while fila < pMut.shape[0]:
        columna = 0
        while columna < pMut.shape[1]:
            #print("   Par de Probabilidad : "+str(a)+"/"+str(50))
            dominio_funcion = 10
            largo = 8  #longitud del gen (debe ser par)
            Ngen = 40  #cantidad de generaciones
            Nind = 40  #numero de individuos
            mut = pMut[fila][
                columna]  #toma una tasa de mutacion de la matriz pMut
            cross = pCru[fila][
                columna]  #toma una tasa de cruzamiento de la matriz pCru
            Nprueba = 20

            ##domimio de la poblacion inicial##
            xmin = -dominio_funcion
            xmax = dominio_funcion
            ymin = -dominio_funcion
            ymax = dominio_funcion

            prueba = 0
            mejorIndividuo = np.zeros(Nprueba)
            while prueba < Nprueba:
                #ciclo principal
                genes = ran([xmin, ymin], [xmax, ymax], Nind)
                individuos = fun_objetivo(genes[:, 0], genes[:, 1])
                rank = 1 / (1 - individuos)
                iter = 0
                while iter < Ngen:
                    genotipos = genotipo(genes, largo, xmax, ymax, xmin, ymin)
                    genotipoHijos = pareja(genotipos, rank, mut, cross, 2)
                    genes = gen(genotipoHijos, largo, xmax, ymax, xmin, ymin)
                    individuos = fun_objetivo(genes[:, 0], genes[:, 1])
                    rank = 1 / (1 - individuos)
                    iter = iter + 1
                mejorIndividuo[prueba] = np.amax(individuos)
                prueba = prueba + 1
            promediofc = mejorIndividuo.mean()
            #print(np.sqrt(np.sum((promedio-mejorIndividuo)**2)/2))
            promedio[fila][columna] = promediofc
            columna = columna + 1
        fila = fila + 1
    return promedio
Exemplo n.º 20
0
def winnowTunTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    tune = tuning.tuning()
    tune.load(dataset1x, dataset1y)

    winnowNoM = winnow.winnow_nomargin(len(dataset1x[0]), 0)
    alpha = [1.1, 1.01, 1.005, 1.0005, 1.0001]
    best_alpha, best_result = tune.tunWinnowNoMargin(winnowNoM, alpha)
    print("No margin best alpha is : ", best_alpha)
    print("No margin best result is : ", best_result)

    winnowM = winnow.winnow_margin(len(dataset1x[0]), 0, 0)
    gamma = [2.0, 0.3, 0.04, 0.006, 0.001]
    best_alpha, best_gamma, best_result = tune.tunWinnowMargin(winnowM, alpha, gamma)
    print("With margin best alpha is : ", best_alpha)
    print("With margin best gamma is : ", best_gamma)
    print("With margin best result is : ", best_result)
def part_2(R):
    """ function for question 1.
        1. initialize variables
        2. loop for n in [40, 80, 120, 160, 200]
        3. for each n value, divide training & test dataset
        4. build online learning algorithms
        5. count the number of mistakes made when get R correct predictions.
        6. draw plots
    """
    # initialize variables
    l, m, size, noise = 10, 20, 50000, False
    samples, loops = size / 10, 20
    lrs = [1.5, 0.25, 0.03, 0.005, 0.001]
    alphas = [1.1, 1.01, 1.005, 1.0005, 1.0001]
    gammas = [2.0, 0.3, 0.04, 0.006, 0.001]
    error_p, error_pm, error_w, error_wm, error_a = [], [], [], [], []

    # loop for n
    for n in range(40, 240, 40):
        print 'n = {}'.format(n)
        # divide dataset: 10% - training, 10% - test. (fixed random seed)
        (y, x) = gen(l, m, n, size, noise)
        random.seed(1)
        left, right = random.sample(range(10),2)
        x_train, x_test = x[left*samples:(left+1)*samples], x[right*samples:(right+1)*samples]
        y_train, y_test = y[left*samples:(left+1)*samples], y[right*samples:(right+1)*samples]
        
        # build online learning algorithms, and count mistakes made
        perceptron = Perceptron(R, x_train, x_test, y_train, y_test, n, samples, size)
        error_p.append(perceptron.mistakes)
        # [517, 607, 605, 595, 611]
        perceptron_margin = Perceptron_Margin(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, lrs)
        error_pm.append(perceptron_margin.mistakes)
        # [737, 684, 640, 699, 603]
        winnon = Winnon(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, alphas)
        error_w.append(winnon.mistakes)
        # [118, 235, 300, 352, 378]
        winnon_margin = Winnon_Margin(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, alphas, gammas)
        error_wm.append(winnon_margin.mistakes)
        # [559, 615, 305, 349, 374]
        adagrad = AdaGrad(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, lrs)
        error_a.append(adagrad.mistakes)
        # [583, 543, 527, 548, 601]

    # draw plots
    draw_2(l, m, R, error_p, error_pm, error_w, error_wm, error_a)
Exemplo n.º 22
0
Arquivo: log.py Projeto: brhoades/irps
    def absBestFinish( self, cfg, best ):
        self.res.write( "\nTree with the Global Best Fitness\n" )

        #Mock container generation
        generation = gen( cfg )

        #Avoiding errors
        best.gen = generation

        self.res.write( "\nRandom GP Performance\n" )
        self.res.write( "Global best's gen #: " + str(best.gennum) + "\n" )

        #Clear old payoffs
        best.payoffs = []

        #Randomly make many individuals to face.
        for i in range(30):
            generation.inds.append( agent( generation ) )

        for opp in generation.inds:
            beforepayoff = best.mem*2
            for j in range(0,generation.seqs):
                tmoves = opp.mymoves
                oppres = opp.run( best.mymoves )
                myres = best.run( opp.mymoves )

                if j > beforepayoff:
                    best.upres( myres, oppres )
                    opp.upres( oppres, myres )

        avg = 0
        for i in best.payoffs:
            avg += i
        avg /= len(best.payoffs)
        self.res.write( "Random fit: " + str(avg) + "\n" )

        self.csv.write( "\n\n" + "Global Best Gen #,avgabsfit,lastwinfit,csv,random fit" + "\n" )
        self.csv.write( str(best.gennum) + "," + str(best.fit) + "," + str(best.fits[0]) + "," + str(best.fits[1]) + "," + str(avg) + "\n" )
Exemplo n.º 23
0
Arquivo: run.py Projeto: brhoades/irps
def run( cfg, i, lg ):
    #read some cfg stuff in and convert it. Also init various caches.
    
    cfg[TERMINATE][NO_CHANGE_FITNESS] = int(cfg[TERMINATE][NO_CHANGE_FITNESS])
    cfg[TERMINATE][FITEVALS] = int(cfg[TERMINATE][FITEVALS])
        
    generation = gen( cfg, i )
        
    prnBase( cfg, i, generation )
        
    generation.initialize( )
        
    prnBase( cfg, i, generation )
        
    while noTerminate( cfg, generation ):
        lg.entry(generation)
        
        #Recomb + Mutation
        generation.recombination( )

        generation.reevalFitness( )
        
        #Survival
        generation.survivalselection( )
        prnBase( cfg, i, generation )
        
    lg.entry(generation)
    
    #delicately extract the best from the generation
    best = generation.best( )
    lg.bestFinish( best )
    lg.spacer( )

    generation.delete( best )
    best.gen = None
        
    return best
Exemplo n.º 24
0
            Nind)  #Generacion aleatoria de la primera generacion de genes
individuos = AG_objetivo(
    genes[:, 0], genes[:, 1]
)  #Se calcula cada "INDIVIDUO" a partir de los genes, AG_objetivo es la funcion objetivo
rank = 1 / (1 - individuos)  #Se obtiene la "APTITUD" de cada individuo
iter = 0
while iter < Ngen:
    print("AG_1 : " + str(iter))
    genotipos = genotipo(
        genes, largo, xmax, ymax, xmin,
        ymin)  #se obtiene cada "GENOTIPO" a partir de los genes
    genotipoHijos = pareja(
        genotipos, rank, mut, cross, 1
    )  #Se obtiene "GENOTIPOS HIJOS", seleccionando, mutando y cruzando los genotipos
    genes = gen(
        genotipoHijos, largo, xmax, ymax, xmin, ymin
    )  #Se obtiene la nueva generacion de "GENES" a partir de los genotipos hijos
    individuos = AG_objetivo(
        genes[:, 0], genes[:, 1]
    )  #Obtengo los nuevos "INDIVIDUOS", a partir de los nuevos genes
    rank = 1 / (1 - individuos
                )  #Se obtiene la nueva "APTITUD" de cada individuo
    '''Datos de los graficos'''
    mejoresElementos[iter] = np.amax(rank)
    sum_offLine = sum_offLine + np.amax(individuos)
    sum_onLine = sum_onLine + np.mean(individuos)
    off_line[iter] = sum_offLine / (iter + 1)
    on_line[iter] = sum_onLine / (iter + 1)
    iter = iter + 1
'''Obtencion de la mejor solucion'''
mutFinal = genes[np.argmax(individuos), 0]
Exemplo n.º 25
0
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

import sys, os

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
import gen
gen.gen()

# -- General configuration -----------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['breathe']

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix of source filenames.
source_suffix = '.rst'
Exemplo n.º 26
0
from algorithms import Adagrad
from gen import gen
import numpy as np

(y, x) = gen(l=10, m=20, n=40, number_of_instances=10000, noise=1)

def trainAdagrad(clf, x=x, y=y, rounds=50):
    errors = np.zeros(rounds)
    hingeloss = np.zeros(rounds)
    for round in rounds:
        clf.trainAll(x, y)
        errors[round] = 1-clf.accuracy(x, y)
        hingeloss[round] = 1-clf.hingeLoss(x, y)
Exemplo n.º 27
0
    
if __name__ == "__main__":
    l = 10
    all_m = [100, 500, 1000]
    n = 1000
    num_iter = 20
    Perceptron1_Acc = []
    Perceptron2_Acc = []
    Winnow1_Acc = []
    Winnow2_Acc = []
    AdaGrad_Acc = []

    for m in all_m:
        print "m =",m
        (all_train_label, all_train_data) = gen(l, m, n, 50000, True)
        (all_test_label, all_test_data) = gen(l, m, n, 10000, False)
        (train_data, train_label, test_data, test_label) = random_10(all_train_data, all_train_label)

        ######### Tuning Parameters #########################################
        ######### Perceptron1: LR = 1, Margin = 0
        ######### Perceptron2: LR = [1.5, 0.25, 0.03, 0.005, 0.001], Margin = 1
        ######### Winnow1: Alpha = [1.1, 1.01, 1.005, 1.0005, 1.0001], Margin = 0
        ######### Winnow2: Alpha = [1.1, 1.01, 1.005, 1.0005, 1.0001], Margin = [2.0, 0.3, 0.04, 0.006, 0.001]
        ######### AdaGrad: LR = [1.5, 0.25, 0.03, 0.005, 0.001]

        Per2_LR = [1.5, 0.25, 0.03, 0.005, 0.001]
        Win_Alpha = [1.1, 1.01, 1.005, 1.0005, 1.0001]
        Win2_Margin = [2.0, 0.3, 0.04, 0.006, 0.001]
        Ada_LR = [1.5, 0.25, 0.03, 0.005, 0.001]
Exemplo n.º 28
0
def Question2():
    l = 10
    m = 20
    ptNmistake = []
    ptNlocation = []
    ptMmistake = []
    ptMlocation = []
    winNmistake = []
    winNlocation = []
    winMmistake = []
    winMlocation = []
    Adamistake = []
    Adalocation = []
    for n in range(40, 240, 40):
        datasety, datasetx = gen.gen(l, m, n, 50000, False)
        tune = tuning.tuning()
        tune.load(datasetx, datasety, l, m, n)
        pteta, winNalpha, winMalpha, winMgamma, Adaeta = tune.allmodelTun()
        ptN_mistake, ptN_mistakearr, ptM_mistake, ptM_mistakearr, winN_mistake, winN_mistakearr, winM_mistake, winM_mistakearr, Ada_mistake, Ada_mistakearr = trainConvergePlot(
            datasetx, datasety, 1000, pteta, winNalpha, winMalpha, winMgamma,
            Adaeta)
        ptNmistake.append(ptN_mistake)
        ptNlocation.append(ptN_mistakearr[len(ptN_mistakearr) - 1])
        ptMmistake.append(ptM_mistake)
        ptMlocation.append(ptM_mistakearr[len(ptM_mistakearr) - 1])
        winNmistake.append(winN_mistake)
        winNlocation.append(winN_mistakearr[len(winN_mistakearr) - 1])
        winMmistake.append(winM_mistake)
        winMlocation.append(winM_mistakearr[len(winM_mistakearr) - 1])
        Adamistake.append(Ada_mistake)
        Adalocation.append(Ada_mistakearr[len(Ada_mistakearr) - 1])

        fig = plt.figure()
        plt.plot(ptN_mistakearr, 'r', label='perceptron No margin')
        plt.plot(ptM_mistakearr, 'g', label='perceptron with margin')
        plt.plot(winN_mistakearr, 'b', label='Winnows No margin')
        plt.plot(winM_mistakearr, 'c', label='Winnows with margin')
        plt.plot(Ada_mistakearr, 'y', label='AdaGrad model')
        plt.legend(loc='lower right')
        pylab.xlabel('number of examples N')
        pylab.ylabel('number of mistakes M')

        plt.show()

    # plotting mistake vs sample n at converge time

    fig = plt.figure()
    ax = fig.add_subplot(111)

    plt.plot(ptNlocation, ptNmistake, 'r*', label='perceptron No margin')
    plt.plot(ptMlocation, ptMmistake, 'g*', label='perceptron with margin')
    plt.plot(winNlocation, winNmistake, 'b*', label='Winnows No margin')
    plt.plot(winMlocation, winMmistake, 'c*', label='Winnows with margin')
    plt.plot(Adalocation, Adamistake, 'y*', label='AdaGrad model')
    plt.legend(loc='upper left')
    pylab.xlabel('number of examples N')
    pylab.ylabel('number of mistakes M')

    for i in range(len(Adalocation)):
        ax.annotate('n=%s,#mistake=%s' % (ptNlocation[i], ptNmistake[i]),
                    xy=(ptNlocation[i], ptNmistake[i]),
                    textcoords='data')
        ax.annotate('n=%s,#mistake=%s' % (ptMlocation[i], ptMmistake[i]),
                    xy=(ptMlocation[i], ptMmistake[i]),
                    textcoords='data')
        ax.annotate('n=%s,#mistake=%s' % (winNlocation[i], winNmistake[i]),
                    xy=(winNlocation[i], winNmistake[i]),
                    textcoords='data')
        ax.annotate('n=%s,#mistake=%s' % (winMlocation[i], winMmistake[i]),
                    xy=(winMlocation[i], winMmistake[i]),
                    textcoords='data')
        ax.annotate('n=%s,#mistake=%s' % (Adalocation[i], Adamistake[i]),
                    xy=(Adalocation[i], Adamistake[i]),
                    textcoords='data')

    plt.show()
Exemplo n.º 29
0

if __name__ == "__main__":
    l = 10
    m = 20
    all_n = np.linspace(40, 200, 5)
    N = 50000
    Perceptron1_Mistake = []
    Perceptron2_Mistake = []
    Winnow1_Mistake = []
    Winnow2_Mistake = []
    AdaGrad_Mistake = []

    for n in all_n:
        print "n =", n
        (all_label, all_data) = gen(l, m, n, N, False)
        (train_data, train_label, test_data,
         test_label) = random_10(all_data, all_label)

        ######### Tuning Parameters #########################################
        ######### Perceptron1: LR = 1, Margin = 0
        ######### Perceptron2: LR = [1.5, 0.25, 0.03, 0.005, 0.001], Margin = 1
        ######### Winnow1: Alpha = [1.1, 1.01, 1.005, 1.0005, 1.0001], Margin = 0
        ######### Winnow2: Alpha = [1.1, 1.01, 1.005, 1.0005, 1.0001], Margin = [2.0, 0.3, 0.04, 0.006, 0.001]
        ######### AdaGrad: LR = [1.5, 0.25, 0.03, 0.005, 0.001]

        Per2_LR = [1.5, 0.25, 0.03, 0.005, 0.001]
        Win_Alpha = [1.1, 1.01, 1.005, 1.0005, 1.0001]
        Win2_Margin = [2.0, 0.3, 0.04, 0.006, 0.001]
        Ada_LR = [1.5, 0.25, 0.03, 0.005, 0.001]
Exemplo n.º 30
0
from algorithms import sampleData, getBestAlgorithm, classifer_group
from gen import gen

# experiment 3

for m in [100, 500, 1000]:
    ytrain, xtrain = gen(l=10, m=m, n=1000, number_of_instances=50000, noise=True)
    ytest, xtest = gen(l=10, m=m, n=1000, number_of_instances=10000, noise=False)
    xtrain_t, ytrain_t, xtest_t, ytest_t = sampleData(xtrain, ytrain)
    classifierss = classifer_group(xtrain.shape[1])
    trained_clfs = [getBestAlgorithm(
        xtrain_t, ytrain_t, xtest_t, ytest_t, clfs, iters=20) for clfs in classifierss]
    print("for m=%d"%m)
    for clf in trained_clfs:
        clf.reset()
        for i in range(20):
            clf.trainAll(xtrain, ytrain)
        print(clf.name())
        print(clf.accuracy(xtest, ytest))
        print(clf.accuracy(xtrain, ytrain))
Exemplo n.º 31
0
def generateWeights(S, s_count, d_count, t_count):
	return g.gen(S, utl.gen_method_1, s_count, d_count, t_count)
Exemplo n.º 32
0
	derivations = []

	stack = []
	stack.append(([], (ur,)))

	while stack:
		derivation = stack.pop()

		# check for convergence
		if len(derivation[1]) > 1 and derivation[1][-2] == derivation[1][-1]:
			derivations.append(derivation)
			continue

		# Generate candidate set
		input = derivation[1][-1]
		candidates = gen(input)

		# Assemble tableau
		tableau = []
		for constraint in con:
			tableau.append([constraint.vios(candidate) for candidate in candidates])

		# Find minimal violation for all constraints
		viominima = [tableau[c][0][:] for c in range(len(con))]

		for c in range(1, len(candidates)):
			for v in range(len(con)):
				if not leq(viominima[v], tableau[v][c]):
						viominima[v] = tableau[v][c][:]

		# Iterate through candidates
Exemplo n.º 33
0
from algorithms import sampleData, getBestConvergence, classifer_group
from gen import gen
import numpy as np
from matplotlib import pylab

grid = np.zeros((5, 5))
for k,n in enumerate([40, 80, 120, 160, 200]):
    y, x = gen(l=10, m=20, n=n, number_of_instances=50000, noise=0)
    xtrain, ytrain, xtest, ytest = sampleData(x, y)
    classifierss = classifer_group(x.shape[1])
    trained_clfs = [getBestConvergence(
        xtrain, ytrain, xtest, ytest, clfs
    ) for clfs in classifierss]
    print("n = %d" % n)
    for (i, clf) in enumerate(trained_clfs):
        print("%s => %f" % (clf.name(), clf.accuracy(xtest, ytest)))
        grid[k, i] = clf.t - clf.errors[-1]

pylab.ion()
pylab.figure("Mistake vs n plot")
pylab.legend(handles=[pylab.plot([40, 80, 120, 160, 200], grid[i, :], label=clf.__class__.__name__)[0]
                      for (i, clf) in enumerate(trained_clfs)
])
pylab.savefig("mistake-n.png")
input()
Exemplo n.º 34
0
__author__ = 'Steffen'

from CMUDict import CmuDict
from gen import gen

text = gen(corpus='lyrics_out.txt')
print text
stressDict = CmuDict()
print stressDict.text(text)
Exemplo n.º 35
0
            Wrong += 1
    Acc = 1 - float(Wrong) / N
    return Acc

    
if __name__ == "__main__":
    l = 10
    m = 20
    n = 40
    N = 10000
    LR = 1.5
    num_round = 50
    misclf_rate = []
    hinge_loss = []

    (train_label, train_data) = gen(l, m, n, N, True)

    clf_AdaGrad = AdaGrad(LR, train_data, n)

    for i in range(num_round):
        print i
        clf_AdaGrad.train(train_data, train_label)
        #est_label = clf_AdaGrad.predict(train_data)
        temp_misclf_error = clf_AdaGrad.misclf_error(train_data,train_label)
        #print temp_misclf_error
        misclf_rate.append(temp_misclf_error)
        temp_hinge_loss = clf_AdaGrad.hinge_loss(train_data, train_label)
        #print temp_hinge_loss
        hinge_loss.append(temp_hinge_loss)

    N = np.linspace(1,50,50)
Exemplo n.º 36
0
try:
    import standalone
except ImportError:
    standalone = None
if standalone is not None:
    sentinel = object()
    old_standalone = sys.modules.get("standalone", sentinel)
    with warnings.catch_warnings(record=True):
        from docutils.readers import standalone
        sys.modules["standalone"] = standalone

# Autogenerate the rst files on the first run.
if not os.path.exists("index.rst"):
    sys.path.append(os.path.abspath("."))
    from gen import gen
    gen()

# -- General configuration -----------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix of source filenames.
source_suffix = '.rst'
Exemplo n.º 37
0
    return w[0:-1], w[-1], error, Q


def test_bonus():
    w1, theta1, error_t, Q = adagrad(np.tile(dy, 50), np.tile(dx, (50, 1)),
                                     1.5)
    index = np.linspace(0, 50 * 10000, num=50, endpoint=False, dtype=int)
    # print(len(index))
    error_plot = [error_t[i] for i in index]
    for i in range(49, 0, -1):
        error_plot[i] = error_plot[i] - error_plot[i - 1]
    plt.plot(np.linspace(1, 50, dtype=int), error_plot, color="blue")
    plt.xlabel("round")
    plt.ylabel("# of mistakes")
    plt.title("AdaGrad mistakes over rounds")
    plt.show()
    plt.figure()
    Q_plot = [Q[i] for i in index]
    for i in range(49, 0, -1):
        Q_plot[i] = Q_plot[i] - Q_plot[i - 1]
    plt.plot(np.linspace(1, 50, dtype=int), Q_plot, color="blue")
    plt.xlabel("round")
    plt.ylabel("hinge loss")
    plt.title("hinge loss over rounds")
    plt.show()


dy, dx = gen.gen(10, 20, 40, 10000, True)

test_bonus()
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
#options.add_argument('headless') #uncomment if u want no gui feedback.
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)

web = webdriver.Chrome(options=options)
wait = WebDriverWait(web, 10)

web.get('https://www.github.com/login')

username = ''  #username here
password = ''  #password here

reponame = gen.gen() + '-active-boosted'
reporm = "Boosted repo added to fill git activity grid. For details, check out github-activity-booster."

time.sleep(2)

user = web.find_element_by_id('login_field')
user.send_keys(username)
pw = web.find_element_by_id('password')
pw.send_keys(password)

time.sleep(1)
web.find_element_by_xpath(
    '/html/body/div[3]/main/div/form/div[4]/input[9]').click()

while (web.current_url == 'https://github.com/sessions/verified-device'):
Exemplo n.º 39
0
 def do(self, which_callback, *args):
     gen()
Exemplo n.º 40
0
# coding: utf-8
from gen import gen

phis, rs = gen()

with open("data.csv", "w") as data:
    for pair in zip(phis, rs):
        data.write(",".join(map(str, pair)) + "\n")
Exemplo n.º 41
0
 def do(self, which_callback, *args):
     output = gen()
     f = open('generated/sample '+str(datetime.utcnow())+".xml", 'w')
     f.write(output)
     f.close()
Exemplo n.º 42
0
def feed():
    response = make_response(gen.gen())
    response.headers["Content-type"] = "text/plain"
    return response
Exemplo n.º 43
0
def AdaGradTest():
    dataset1y, dataset1x = gen.gen(10, 100, 500, 50000, False)
    Ada = AdaGrad.AdaGrad(len(dataset1x[0]), 1.5)
    Ada.train(dataset1x, dataset1y)
    output = Ada.test(dataset1x, dataset1y)
    print(output)