def main():
    random.seed(64)
    
    creator.create("FitnessMax", base.Fitness, weights=(-1.0,))
    creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMax) #@UndefinedVariable
    
    toolbox = base.Toolbox()
    
    # Attribute generator
    toolbox.register("attr_bool", random.randint, 0, 1)
    
    # Structure initializers
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, DIM*L) #@UndefinedVariable
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    
    
    toolbox.register("evaluate", rastrigin_arg0)
    toolbox.register("mate", tools.cxTwoPoints)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.001)
    toolbox.register("select", tools.selTournament, tournsize=5)
    
    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)
    
    algorithms.eaSimple(pop, toolbox, cxpb=0.8, mutpb=1, ngen=100, stats=stats,
                        halloffame=hof, verbose=True)
    
    return pop, stats, hof
def ea(evaluator, pop_size=50, ngen=40, mutation_rate=0.05):

    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", array.array, typecode='l', fitness=creator.FitnessMax)

    toolbox = base.Toolbox()

    # Attribute generator
    toolbox.register("attr_bool", random.randint, 0, 2**PADDING - 1)

    # Structure initializers
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, 3)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", evaluator)
    toolbox.register("mate", crossover)  # was cxTwoPoint
    toolbox.register("mutate", mutation, indpb=mutation_rate)  # was 0.05
    toolbox.register("select", tools.selRoulette)  # was setTournament

    pop = toolbox.population(n=pop_size)  # was 300
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    algorithms.eaSimple(pop, toolbox, cxpb=1, mutpb=1, ngen=ngen,  # was 0.5, 0.2, 40
                        stats=stats, halloffame=hof, verbose=True)

    return hof[0]
예제 #3
0
def main(argv=None):

    outputName = argv[1]

    seedValue = 29
    if len(argv) > 2:
        seedValue = argv[2]

    print seedValue
    random.seed(seedValue)

    # here starts the algorithm
    pop = toolbox.population(n=npop)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)

    algorithms.eaSimple(pop, toolbox, cxpb, mutpb, ngen, stats, halloffame=hof)

    # print pop, stats, hof
    print stats, hof
    print "Fitness in training = ", map(toolbox.evaluate, hof)
    fitnessInTest = map(toolbox.evaluateTest, hof)
    print "Fitness in test = ", fitnessInTest

    with open(outputName, "a+") as f:
        f.write("%.5f\n" % (fitnessInTest[0][0] * 100.0))

    return fitnessInTest[0][0]
예제 #4
0
def main(ton):
    pop = toolbox.population(n=400)
    hof = tools.HallOfFame(3)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register('avg', numpy.mean)
    stats.register('std', numpy.std)
    stats.register('min', numpy.min)
    stats.register('max', numpy.max)

    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb=0.5,
                                   mutpb=0.3,
                                   ngen=70,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)
    while min(log.select('min')) > 15:
        pop = toolbox.population(n=400)
        pop, log = algorithms.eaSimple(pop,
                                       toolbox,
                                       cxpb=0.5,
                                       mutpb=0.3,
                                       ngen=70,
                                       stats=stats,
                                       halloffame=hof,
                                       verbose=True)

    for best in hof:
        print([x[0] for x in best])

        transform_lilypond(ton, [x[0] for x in best])
예제 #5
0
def main(ngen, npop, mutpb, cxpb, seedValue, tournSize, heightMaxCreation, heightMexNew, heightLimit):

    toolbox.register("evaluate", evaluate)
    toolbox.register("select", tools.selTournament, tournsize=tournSize)
    toolbox.register("mate", staticLimitCrossover, heightLimit=heightLimit, toolbox=toolbox)
    toolbox.register("expr_mut", gp.genGrow, min_=0, max_=heightMaxNew)
    toolbox.register("mutate", staticLimitMutation, expr=toolbox.expr_mut, heightLimit=heightLimit, toolbox=toolbox)
    toolbox.register("expr", gp.genRamped, pset=pset, min_=0, max_=heightMaxNew)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
 
    #here starts the algorithm
    random.seed(seedValue)
    pop = toolbox.population(n=npop)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)

    algorithms.eaSimple(pop, toolbox, cxpb, mutpb, ngen, stats, halloffame=hof)
    #algorithms.eaMuPlusLambda(pop, toolbox, npop, npop + 50, cxpb, mutpb, ngen, stats, halloffame=hof)

    #print pop, stats, hof
    print stats, hof
    print "Fitness in training = ", map(toolbox.evaluate, hof)
 def solve(self):
     """
     :rtype: MultiModeClasses.Solution
     """
     toolbox = self.generate_toolbox_for_problem()
     population = toolbox.population(n = self.size_of_population)
     algorithms.eaSimple(population, toolbox, cxpb = self.crossover_probability , mutpb = self.mutation_probability, ngen = self.number_of_generations, verbose=False)
     return self.Solution.generate_solution_from_serial_schedule_generation_scheme(tools.selBest(population, 1)[0], self.problem)
def main():
    generations = 100
    cxpb = 0.9
    mpb = 0.1
    pop = toolbox.population(n=100)
    
    algorithms.eaSimple(pop, toolbox, cxpb, mpb, generations, verbose=False)
    
    return pop
def main():
  generations = 1000
  cxpb = 1.0
  mpb = 1.0
  pop = toolbox.population(n=100)
  
  algorithms.eaSimple(pop, toolbox, cxpb, mpb, generations)

  return pop
예제 #9
0
파일: test.py 프로젝트: BambooL/pbe
def main():
    random.seed(10)
    pop = toolbox.population(n=1000)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)
    algorithms.eaSimple(pop, toolbox, 0.5, 0.2, 30, stats, halloffame=hof)
    print result_program
    return result_program
예제 #10
0
파일: gp_spambase.py 프로젝트: nwrush/GCA
def main():
    random.seed(10)
    pop = toolbox.population(n=100)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)
    
    algorithms.eaSimple(pop, toolbox, 0.5, 0.2, 40, stats, halloffame=hof)

    return pop, stats, hof
예제 #11
0
def compute():
    random.seed(47)
    pop = toolbox.population(n=180)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("Avg", tools.mean)
    stats.register("Std", tools.std)
    stats.register("Min", min)
    stats.register("Max", max)
    
    algorithms.eaSimple(toolbox, pop, 0.4, 0.3, 410, halloffame=hof)
    #algorithms.eaMuPlusLambda(toolbox, pop, 500, 100, 0.8 , 0.1, 500, hof)
    return sorted(list(hof[-1]))
예제 #12
0
def test_deap():
    import array
    import random

    from deap import algorithms
    from deap import base
    from deap import creator
    from deap import tools

    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMax)

    toolbox = base.Toolbox()

    # Attribute generator
    toolbox.register("attr_bool", random.randint, 0, 1)

    # Structure initializers
    toolbox.register("individual",
        tools.initRepeat,
        creator.Individual,
        toolbox.attr_bool, 
        100 )
    toolbox.register("population",
        tools.initRepeat,
        list,
        toolbox.individual )

    def evalOneMax(individual):
        return sum(individual),

    toolbox.register("evaluate", evalOneMax)
    toolbox.register("mate", tools.cxTwoPoints)
    toolbox.register("mutate", tools.mutFlipBit, indpb = 0.05)
    toolbox.register("select", tools.selTournament, tournsize = 3)

    pop = toolbox.population(n=100)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)

    algorithms.eaSimple(pop, toolbox,
        cxpb = 0.5,
        mutpb = 0.2,
        ngen = 10,
        stats = stats,
        halloffame = hof,
        verbose = True )
예제 #13
0
def findCaptureBoardsWithDeap():
    random = Random()
    manager = Manager()
    slow_queue = manager.Queue()
    results_queue = manager.Queue()
    creator.create("FitnessMax", base.Fitness, weights=BoardAnalysis.WEIGHTS)
    creator.create("Individual",
                   Board,
                   fitness=creator.FitnessMax)  # @UndefinedVariable

    CXPB, MUTPB, NPOP, NGEN, WIDTH, HEIGHT = 0.0, 0.5, 1000, 300, 6, 5
    toolbox = base.Toolbox()
    pool = Pool()
    halloffame = LoggingHallOfFame(10)
    pool.apply_async(evaluateSlowBoards, [slow_queue, results_queue])
    toolbox.register("map", loggedMap, pool)
    toolbox.register("individual",
                     createRandomBoard,
                     creator.Individual,  # @UndefinedVariable
                     random,
                     WIDTH,
                     HEIGHT)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate",
                     mutateBoard,
                     creator.Individual,  # @UndefinedVariable
                     random)
    toolbox.register("select",
                     selectBoards,
                     partial(tools.selTournament, tournsize=3),
                     results_queue,
                     halloffame,
                     creator.Individual)  # @UndefinedVariable
    toolbox.register("evaluate", evaluateBoard, slow_queue)

    pop = toolbox.population(n=NPOP)
    stats = Statistics()
    stats.register("best", BoardAnalysis.best_score)
    verbose = True
    eaSimple(pop, toolbox, CXPB, MUTPB, NGEN, stats, halloffame, verbose)
    for board in halloffame:
        print
        print board.display()
        score = board.fitness.values[0]
        if score < 0:
            print('{} score.'.format(score))
        else:
            analysis = BoardAnalysis(board)
            print(analysis.display())
예제 #14
0
파일: pg.py 프로젝트: thiagorizuti/ufjf-pg
def main():
    pop = toolbox.population(n=100)
    hof = tools.HallOfFame(1)
    cxpb = 0.8
    mutpb = 0.2
    ngen = 500
    algorithms.eaSimple(pop, toolbox, cxpb, mutpb,ngen, halloffame=hof,verbose=True)
    for ind in hof:
        ind1 = ind
        print accuracy(ind,test)
        print " "


    return 0
예제 #15
0
def main():
  random.seed(69)

  trail_file = open("santafe_trail.txt")
  ant.parse_matrix(trail_file)

  cxpb = 1.0
  mpb = 0.1
  generations = 1000
  pop = toolbox.population(n=100)

  algorithms.eaSimple(pop, toolbox, mpb, cxpb, generations, verbose=False)

  return pop
예제 #16
0
파일: dtm_ga_onemax.py 프로젝트: nwrush/GCA
def main():
    random.seed(64)

    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)
    
    algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=40, stats=stats,
                        halloffame=hof, verbose=True)
    logging.info("Best individual is %s, %s", hof[0], hof[0].fitness.values)
예제 #17
0
파일: nqueens.py 프로젝트: AiTeamUSTC/GPE
def main(seed=0):
    random.seed(seed)

    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("Avg", numpy.mean)
    stats.register("Std", numpy.std)
    stats.register("Min", numpy.min)
    stats.register("Max", numpy.max)

    algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=100, stats=stats,
                        halloffame=hof, verbose=True)

    return pop, stats, hof
예제 #18
0
파일: GA.py 프로젝트: maxberggren/SiteOpt
def main():
    random.seed(63)
    
    pop = toolbox.population(n=600)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)
    
    algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.1, ngen=10000, stats=stats,
                        halloffame=hof, verbose=True)
    
    return pop, stats, hof
예제 #19
0
def main():
    #testcube.scramble(15)
    testcube.move_R()
    testcube.move_U2()
    testcube.move_Ra()
    testcube.move_D()
    testcube.move_b2()
    testcube.move_Ra()
    testcube.move_D()
    testcube.move_R2()
    testcube.move_b()
    testcube.move_u()
    testcube.move_Bb2()
    testcube.move_U2()
    testcube.move_Ll2()
    testcube.move_d2()
    testcube.move_u2()
    #print(fitness1(testcube.getFaces()))
    #testcube.printCube()
    testcube._store()
    testcube._restore()

    pop = toolbox.population(n=80)
    hof=tools.HallOfFame(2)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    gen = int(sys.argv[1])
    cxpb = float(sys.argv[2])
    mutpb = float(sys.argv[3])

    print("CXPB = "+str(cxpb)+"  MUTPB = "+str(mutpb)+"  GEN = "+str(gen))

    algorithms.eaSimple(pop, toolbox, cxpb, mutpb, gen, stats, halloffame=hof, verbose=False)

    print(hof[0])
    print(hof[0].fitness)
    print(hof[1])
    print(hof[1].fitness)
    bestMoves = gp.compile(hof[0],pset)
    testcube.run(bestMoves)
    #testcube.printCube()
    print

    return pop, stats, hof
예제 #20
0
def main(ngen, npop, mutpb, cxpb, seedValue, tournSize, heightMaxCreation, heightMexNew, heightLimit):
    toolbox.register("evaluate", evaluate)
    toolbox.register("test_evaluate", test_evaluate)
    toolbox.register("final_test", final_test)
    toolbox.register("select", tools.selTournament, tournsize=tournSize)
    toolbox.register("mate", staticLimitCrossover, heightLimit=heightLimit, toolbox=toolbox)
    toolbox.register("expr_mut", gp.genGrow, min_=0, max_=heightMaxNew)
    toolbox.register("mutate", staticLimitMutation, expr=toolbox.expr_mut, heightLimit=heightLimit, toolbox=toolbox)
    toolbox.register("expr", gp.genRamped, pset=pset, min_=0, max_=heightMaxNew)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
 
    #here starts the algorithm
    random.seed(seedValue)
    pop = toolbox.population(n=npop)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)

    #kf = KFold( X.shape[0], n_folds=10, random_state=0)
    #fitnessTest = []
    #for train_index, test_index in kf:

        #X_train, X_test = X[train_index], X[test_index]
        #y_train, y_test = logy_views[train_index], logy_views[test_index]

    algorithms.eaSimple(pop, toolbox, cxpb, mutpb, ngen, stats, halloffame=hof)

    print stats, hof
    print "Fitness in training = ", map(toolbox.evaluate, hof)

    if training:    
        fitnessInTest = map(toolbox.test_evaluate, hof)
        print "Fitness in test = %.4f" % ( fitnessInTest[0][0])

    #    import ipdb
    #    ipdb.set_trace()
    else:
        values  = np.array(map(toolbox.final_test, hof)[0][0])
    
        outfile = open("gp.csv", "wb")
        open_file_object = csv.writer(outfile)
        open_file_object.writerow(["id","num_views","num_votes","num_comments"])
        open_file_object.writerows(zip(np.array(ids), values, values, values))
        outfile.close()
예제 #21
0
파일: DeapRunner.py 프로젝트: andreh12/evb
    def run(self):
        # run the evolutionary algorithm

        # keep track of the initial rate before tuning was started
        # to be able to print improvements in each log message
        self.initialRateMean, self.initialRateStd, lastRate = self.goalFunction.getEventRate()
        logging.info("readout rate before tuning: %.1f +/- %.1f kHz" % (self.initialRateMean / 1e3, self.initialRateStd / 1e3))

        self.generation = 0
        self.evalIndex = None

        from deap import tools, algorithms

        # 60 individuals in the population at about 10 seconds evaluation
        # per individual will result in a new generation of solutions
        # every 600 seconds (10 minutes)
        pop = self.toolbox.population(n = 60)

        # best solution(s) found
        hof = tools.HallOfFame(10)

        self.population, self.log = algorithms.eaSimple(pop, self.toolbox, 
                                                        cxpb  = 0.5,         # crossing probability
                                                        mutpb = 0.2,         # mutation probability
                                                        ngen  = 100000000,   # (maximum) number of generations to run
                                                        # stats = stats, 
                                                        halloffame = hof, 
                                                        verbose = True)
예제 #22
0
def mainGA(NAME, target_output, target_image):
    """ Runs the main loop of GA.""" 
    global toolbox

    print("Target image: {0} Target output: {1}".format(target_image, target_output))    
    sys.stdout.flush()

    model = load_model(NAME)    
    fit = Fitness(NAME, model, target_image, target_output)

    #Genetic operators 
    toolbox.register("evaluate", fit.evaluate)
    toolbox.register("mate", cxTwoPointCopy) 
    #toolbox.register("mate", cxUniform)
    toolbox.register("mutate", tools.mutGaussian, mu=0.0, sigma=0.1, indpb=0.05)
    toolbox.register("select", tools.selTournament, tournsize=3)
    

    pop = toolbox.population(n=50)
    hof = tools.HallOfFame(1, similar=np.array_equal)
    
    #stats = tools.Statistics(lambda ind: ind.fitness.values)
    #stats.register("avg", np.mean)
    #stats.register("std", np.std)
    #stats.register("min", np.min)
    #stats.register("max", np.max)
  
    pop, log = algorithms.eaSimple(pop, toolbox, cxpb=CXPB, mutpb=MUTPB, 
                                   ngen=NGEN, halloffame=hof, 
                                   verbose=False)

    return hof[0] 
def evolve_string(text):
    """Use evolutionary algorithm (EA) to evolve 'text' string"""

    # Set random number generator initial seed so that results are repeatable.
    # See: https://docs.python.org/2/library/random.html#random.seed
    #      and http://xkcd.com/221
    random.seed(4)

    # Get configured toolbox and create a population of random Messages
    toolbox = get_toolbox(text)
    pop = toolbox.population(n=300)

    # Collect statistics as the EA runs
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    # Run simple EA
    # (See: http://deap.gel.ulaval.ca/doc/dev/api/algo.html for details)
    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb=0.5,    # Prob. of crossover (mating)
                                   mutpb=0.2,   # Probability of mutation
                                   ngen=400,    # Num. of generations to run
                                   stats=stats)

    return pop, log
예제 #24
0
def deap_test():
    # onemax example evolves to print list of ones: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
    numpy.random.seed(1)
    def evalOneMax(individual):
        return sum(individual),

    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", list, typecode='b', fitness=creator.FitnessMax)

    toolbox = base.Toolbox()
    toolbox.register("attr_bool", numpy.random.randint, 0, 1)
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, 10)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("evaluate", evalOneMax)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
    toolbox.register("select", tools.selTournament, tournsize=3)

    pop   = toolbox.population(n=50)
    hof   = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=30, 
                                   stats=stats, halloffame=hof, verbose=False) # change to verbose=True to see evolution table
    print "deap test >>>", hof[0]
def run(num_gen,
        n,
        mutpb,
        cxpb):
    """
    Runs multiple episodes, evolving the RNN parameters using a GA
    """
    history = tools.History()
    # Decorate the variation operators
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    pool = multiprocessing.Pool(processes=12)
    toolbox.register("map", pool.map)

    pop = toolbox.population(n=n)
    history.update(pop)

    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb=cxpb,
                                   mutpb=mutpb,
                                   ngen=num_gen,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)

    return pop, log, hof, history
예제 #26
0
    def optimize_model(self, ngen=30, cxpb=0.5, mutpb=0.1, pop_size=15):
        """
        DEAP Optimization
        """
        print('OPTIMIZATION STARTED')
        creator.create("FitnessMax", base.Fitness, weights=(1.0,))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        toolbox = base.Toolbox()
        toolbox.register("candidate", self.generate_candidate,
                         [self.nlay, self.nrow, self.ncol])
        toolbox.register("individual", tools.initIterate,
                         creator.Individual, toolbox.candidate)
        toolbox.register("population", tools.initRepeat,
                         list, toolbox.individual)
        toolbox.register("mate", tools.cxOnePoint)
        toolbox.register("evaluate", self.evaluate)
        toolbox.register("mutate", self.mutate)
        toolbox.register("select", tools.selTournament, tournsize=3)

        pop = toolbox.population(n=pop_size)

        self.hall_of_fame = tools.HallOfFame(maxsize=100)

        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("mean", np.mean, axis=0)
        stats.register("std", np.std, axis=0)
        stats.register("min", np.min, axis=0)
        stats.register("max", np.max, axis=0)
        self.result, self.log = algorithms.eaSimple(
            pop, toolbox,
            cxpb=cxpb, mutpb=mutpb,
            ngen=ngen, stats=stats,
            halloffame=self.hall_of_fame, verbose=False
            )
        return self.hall_of_fame
예제 #27
0
def main():
    random.seed(318)

    pop = toolbox.population(n=5000)
    hof = tools.HallOfFame(1)
    
    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register("avg", numpy.mean)
    mstats.register("std", numpy.std)
    mstats.register("min", numpy.min)
    mstats.register("max", numpy.max)

    pop, log = algorithms.eaSimple(pop, toolbox, 0.9, 0.1, 50, stats=mstats,
                                   halloffame=hof, verbose=True)
    
    trainingError = evalSymbReg(hof[0])[0]
    
    testingError = describe(hof[0])
    
    print 'training error: ', trainingError
    print 'testing error: ', testingError
    # print log
    return pop, log, hof
예제 #28
0
def gp(train, target):
    def sigmoid(x) :
	    return (1 / (1 + np.exp(-x)))
    def evalSymbReg(individual, data):
        # Transform the tree expression in a callable function
        func = toolbox.compile(expr=individual)
        # Evaluate the mean squared error between the expression
        sqerrors = ((sigmoid(func(data[i])) - target[i]) ** 2 for i in range(target.size))
        return math.fsum(sqerrors) / len(points),
    
    toolbox.register("evaluate", evalSymbReg, data = train)

    random.seed(318)

    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(1)
    
    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register("avg", np.mean)
    mstats.register("std", np.std)
    mstats.register("min", np.min)
    mstats.register("max", np.max)

    pop, log = algorithms.eaSimple(pop, toolbox, 0.5, 0.1, 40, stats=mstats,
                                   halloffame=hof, verbose=True)
    # print log
    return pop, log, hof
예제 #29
0
파일: auto_feature.py 프로젝트: EvoML/EvoML
    def fit(self, X, y):
        #ToDo: Check that the columns or the feature names are not same
        #ToDo: All other general sanity checks are also to be made.
        #ToDo: make all the parameters in the init function

        input_feat = list(X.columns.values);
        creator.create("FitnessMax", base.Fitness, weights=(-1.0,))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size)
        self.X_train = X_train
        self.X_test = X_test
        self.y_train = y_train
        self.y_test = y_test
        
        toolbox = base.Toolbox()
        toolbox.register("attr_bool", self.get_indiv_sample, data=X_train, output=y_train, base_estimator=self.base_estimator)
        toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=self.N_individual)
        toolbox.register("population", tools.initRepeat, list, toolbox.individual)
        toolbox.register("evaluate", evalOneMax,x_te = X_test, y_te = y_test, test_frac = self.test_frac, test_frac_flag = self.test_frac_flag)
        toolbox.register("mate", self.crossover_func)
        toolbox.register("mutate", mutate_feat, indpb=self.indpb,input_fe = input_feat, X_tr = X_train)
        toolbox.register("select", tools.selTournament, tournsize=3)
        
        pop = toolbox.population(n=self.N_population)
        hof = tools.HallOfFame(1, similar=compare_hof);
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean)
        stats.register("min", np.min)
        stats.register("max", np.max)
        self.pop, self.logbook = algorithms.eaSimple(pop, toolbox, cxpb=self.cxpb, mutpb=self.mutpb, ngen=self.ngen, stats=stats, halloffame=hof,  verbose=True)
        self.hof = hof
        #return pop, logbook, hof
        return self
예제 #30
0
def main():
    random.seed(64)
    
    pool = Pool(processes=6)
    toolbox.register("map", pool.map)
    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=15, 
                        stats=stats, halloffame=hof)
    return pop, stats, hof
def main():
    random.seed(24)

    # cria populacao inicial
    pop = toolbox.population(n=30)

    # CXPB - probabilidade de crossover
    # MUTPB - probabilidade de mutacao
    # NGEN - numero de geracoes
    CXPB, MUTPB, NGEN =0.8, 0.05, 10

    #stats a serem guardados
    stats = tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("avg", numpy.mean)
    stats.register("max", numpy.max)

    #Roda o algoritmo
    pop, logbook = algorithms.eaSimple(pop, toolbox, CXPB, MUTPB, NGEN, stats=stats)

    #Seleciona o melhor individuo da populacao resultante
    best_ind = tools.selSPEA2(pop, 1)

    #Imprime as infromações do melhor individuo
    print_ind(best_ind[0])

    # Plota o Gráfico
    # plot_log(logbook)

    mse = 0

    image = cv2.imread(target_image)
    rows, cols, _ = image.shape  # Size of background Image

    new_image = numpy.zeros((rows, cols, 3))

    for i in range(rows):
        for j in range(cols):
            input_data = [[i * 1.0 / rows], [j * 1.0 / cols], [np.sin(20 * 3.14 * (i + j) * 1.0 / cols)]]
            red_bin = int_to_bin(image[i, j, 0])
            green_bin = int_to_bin(image[i, j, 1])
            blue_bin = int_to_bin(image[i, j, 2])
            # target_data = [[r],
            #                [g],
            #                [b]]
            target_data = [[red_bin[0] * 1.0],
                           [red_bin[1] * 1.0],
                           [red_bin[2] * 1.0],
                           [red_bin[3] * 1.0],
                           [red_bin[4] * 1.0],
                           [red_bin[5] * 1.0],
                           [red_bin[6] * 1.0],
                           [red_bin[7] * 1.0],
                           [green_bin[0] * 1.0],
                           [green_bin[1] * 1.0],
                           [green_bin[2] * 1.0],
                           [green_bin[3] * 1.0],
                           [green_bin[4] * 1.0],
                           [green_bin[5] * 1.0],
                           [green_bin[6] * 1.0],
                           [green_bin[7] * 1.0],
                           [blue_bin[0] * 1.0],
                           [blue_bin[1] * 1.0],
                           [blue_bin[2] * 1.0],
                           [blue_bin[3] * 1.0],
                           [blue_bin[4] * 1.0],
                           [blue_bin[5] * 1.0],
                           [blue_bin[6] * 1.0],
                           [blue_bin[7] * 1.0]]

            nn_output = nn.get_output(input_data)
            if np.isnan(np.sum(nn_output)):
                continue
            # print('For epoch %s and input %s got output %s given target %s' \
            #     % (e, i, nn_output, targets[i]))
            # print("Current weights: ")
            # print(str(nn.layers[0].weights))
            # print("Current bias: ")
            # print(str(nn.layers[0].bias))
            nn_error = target_data - nn_output
            # print("Current error: ")
            # print(str(nn_error))
            mse = mse + np.inner(np.transpose(nn_error), np.transpose(nn_error))

            # new_image[i, j, 0] = min(255, max(0, int(255 * nn_output[0])))
            # new_image[i, j, 1] = min(255, max(0, int(255 * nn_output[1])))
            # new_image[i, j, 2] = min(255, max(0, int(255 * nn_output[2])))
            def norm_data(x):
                if x > 0.5:
                    return 1
                else:
                    return 0
            red = norm_data(nn_output[0]) + norm_data(nn_output[1]) * 2 + norm_data(nn_output[2]) * 4 + \
                norm_data(nn_output[3]) * 8 + norm_data(nn_output[4]) * 16 + norm_data(nn_output[5]) * 32 + \
                norm_data(nn_output[6]) * 64 + norm_data(nn_output[7]) * 128

            blue = norm_data(nn_output[8]) + norm_data(nn_output[9]) * 2 + norm_data(nn_output[10]) * 4 + \
                norm_data(nn_output[11]) * 8 + norm_data(nn_output[12]) * 16 + norm_data(nn_output[13]) * 32 + \
                norm_data(nn_output[14]) * 64 + norm_data(nn_output[15]) * 128

            green = norm_data(nn_output[16]) + norm_data(nn_output[17]) * 2 + norm_data(nn_output[18]) * 4 + \
                norm_data(nn_output[19]) * 8 + norm_data(nn_output[20]) * 16 + norm_data(nn_output[21]) * 32 + \
                norm_data(nn_output[22]) * 64 + norm_data(nn_output[23]) * 128

            new_image[i, j, 0] = int(min(255, max(0, 255 * red)))
            new_image[i, j, 1] = int(min(255, max(0, 255 * blue)))
            new_image[i, j, 2] = int(min(255, max(0, 255 * green)))

    print("Error on testing dataset: " + str(mse))
    cv2.imwrite("generated_image.png", new_image)
예제 #32
0
toolbox.register("attr_float", random.random)
toolbox.register("individual", tools.initRepeat, creator.Individual,
                 toolbox.attr_float, n=IND_SIZE)


pop = []
for x in range(POP_SIZE):
    ind = toolbox.individual()
    pop.append(ind)

toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evaluateInd)

pop, logbook = algorithms.eaSimple(pop, toolbox, CXPB, MUTPB, NGEN)

print("Simple statistics")
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
record = stats.compile(pop)
print(record)

print("Multi objective statistics")
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
예제 #33
0
파일: main.py 프로젝트: autommat/evocomp1
                          names=[
                              "index", "sepal length", "sepal width",
                              "petal length", "petal width"
                          ]).drop(columns=['index'])
    dataset = Dataset(df_iris)

    toolbox = prepare_toolbox(dataset)
    stats = prepare_statistics()

    init_pop = toolbox.population(POP_SIZE)

    elite = tools.HallOfFame(1)
    rpop, logbook = algorithms.eaSimple(init_pop,
                                        toolbox,
                                        CXPB,
                                        MUTPB,
                                        NGEN,
                                        stats=stats,
                                        halloffame=elite)
    best_clusters = dataset.individual_to_clusters(elite[0])
    best_indiv = elite[0]
    print("clusters determined by the best individual:")
    print(best_clusters)

    min_ = logbook.select("min")
    plt.plot(min_)
    plt.show()

    colors = ['red', 'green', 'blue', 'orange', 'purple']
    index_to_cluster = [None] * len(dataset.df.index)
    for clust_num, item_num_list in best_clusters.items():
예제 #34
0
def main():
    maxList = []
    avgList = []
    minList = []
    stdList = []

    # max value of all runs
    maxValue = []
    # corresponding weight
    maxWeight = []

    for r in range(0, N_RUNS):

        print('\nAt the run:', r)
        # create initial population (generation 0):
        population = toolbox.populationCreator(n=POPULATION_SIZE)

        # define the hall-of-fame object:
        hof = tools.HallOfFame(HALL_OF_FAME_SIZE)

        # prepare the statistics object:
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", numpy.mean)
        stats.register("std", numpy.std)
        stats.register("min", numpy.min)
        stats.register("max", numpy.max)

        # perform the Genetic Algorithm flow:
        population, logbook = algorithms.eaSimple(population,
                                                  toolbox,
                                                  cxpb=P_CROSSOVER,
                                                  mutpb=P_MUTATION,
                                                  ngen=MAX_GENERATIONS,
                                                  stats=stats,
                                                  halloffame=hof,
                                                  verbose=True)

        # print Hall of Fame info:
        print("Hall of Fame Individuals = ", *hof.items, sep="\n")
        print("\nBest Ever Individual = ", hof.items[0], "\nFitness: ", r, " ",
              knapsack(hof.items[0]))

        # hof.items[0] --> (value,weight)

        # append max value to a list
        maxValue.append(knapsack(hof.items[0])[0])
        # append max weight to a list
        maxWeight.append(knapsack(hof.items[0])[1])

        # Genetic Algorithm is done with this run - extract statistics:
        meanFitnessValues, stdFitnessValues, minFitnessValues, maxFitnessValues = logbook.select(
            "avg", "std", "min", "max")

        # Save statistics for this run:
        avgList.append(meanFitnessValues)
        stdList.append(stdFitnessValues)
        minList.append(minFitnessValues)
        maxList.append(maxFitnessValues)

    print()
    print('Max Value from all the runs:', max(maxValue))

    # len(maxValue) == len(maxWeight)
    # print corresponding weight for max value
    for i in range(len(maxValue)):
        if maxValue[i] == max(maxValue):
            print('Corresponding Weight:', maxWeight[i])

    # Genetic Algorithm is done (all runs) - plot statistics:
    x = numpy.arange(0, MAX_GENERATIONS + 1)
    avgArray = numpy.array(avgList)
    stdArray = numpy.array(stdList)
    minArray = numpy.array(minList)
    maxArray = numpy.array(maxList)
    plt.xlabel('Generation')
    plt.ylabel('Fitness')
    plt.title(
        'Max and Average Fitness for Knapsack with Tournament Selection - 300 Runs'
    )
    plt.errorbar(x,
                 avgArray.mean(0),
                 yerr=stdArray.mean(0),
                 label="Average",
                 color="Red")
    plt.errorbar(x,
                 maxArray.mean(0),
                 yerr=maxArray.std(0),
                 label="Best",
                 color="Green")
    plt.show()
예제 #35
0
    def GeneticMake(self,
                    train,
                    test,
                    features,
                    params,
                    iteration,
                    feature_limit,
                    gen_num=10):
        '''
        iteration: 反復回数, 特徴量作成の試行回数
        feature_limit: 許容する特徴量数の限界値
        gen_num: 進化する世代数
        '''

        # 分母が0の場合を考慮した, 除算関数
        def protectedDiv(left, right):
            eps = 1.0e-7
            tmp = np.zeros(len(left))
            tmp[np.abs(right) >=
                eps] = left[np.abs(right) >= eps] / right[np.abs(right) >= eps]
            tmp[np.abs(right) < eps] = 1.0
            return tmp

        # 初期値
        base_score = self.Model(train, features, params)
        print("validation mean score:", base_score['score'].mean())
        # 適合度を最大化するような木構造を個体として定義
        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual",
                       gp.PrimitiveTree,
                       fitness=creator.FitnessMax)
        # setting
        prev_score = np.mean(base_score['score'])  # base score
        exprs = []  # 生成した特徴量
        results = pd.DataFrame(
            columns=['n_features', 'best_score',
                     'val_score'])  # 結果を格納する. (best_score == val_score ??)
        n_features = len(features)  # 初期時点の特徴量数
        X_train = train[features]  # 訓練データの特徴量
        X_test = test[features]  # テストデータの特徴量
        y_train = train['y']  # 訓練データのターゲット変数
        # main
        for i in tqdm(range(iteration)):
            pset = gp.PrimitiveSet("MAIN", n_features)
            pset.addPrimitive(operator.add, 2)
            pset.addPrimitive(operator.sub, 2)
            pset.addPrimitive(operator.mul, 2)
            pset.addPrimitive(protectedDiv, 2)
            pset.addPrimitive(operator.neg, 1)
            pset.addPrimitive(np.cos, 1)
            pset.addPrimitive(np.sin, 1)
            pset.addPrimitive(np.tan, 1)

            # function
            def eval_genfeat(individual):
                func = toolbox.compile(expr=individual)
                # make new features
                features_train = [
                    np.array(X_train)[:, j] for j in range(n_features)
                ]
                new_feat_train = func(*features_train)
                # combine table and select features name
                train_tmp = pd.concat([
                    X_train,
                    pd.DataFrame(new_feat_train, columns=['tmp']), y_train
                ],
                                      axis=1)
                features_tmp = train_tmp.drop("y", axis=1).columns.values
                tmp_score = self.Model(train_tmp, features_tmp, params)
                # print(np.mean(tmp_score['score']))
                return np.mean(tmp_score['score']),

            # 関数のデフォルト値の設定
            toolbox = base.Toolbox()
            toolbox.register("expr",
                             gp.genHalfAndHalf,
                             pset=pset,
                             min_=1,
                             max_=3)
            toolbox.register("individual", tools.initIterate,
                             creator.Individual, toolbox.expr)
            toolbox.register("population", tools.initRepeat, list,
                             toolbox.individual)
            toolbox.register("compile", gp.compile, pset=pset)
            # 評価、選択、交叉、突然変異の設定
            # 選択はサイズ10のトーナメント方式、交叉は1点交叉、突然変異は深さ2のランダム構文木生成と定義
            toolbox.register("evaluate", eval_genfeat)
            toolbox.register("select", tools.selTournament, tournsize=10)
            toolbox.register("mate", gp.cxOnePoint)
            toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
            toolbox.register("mutate",
                             gp.mutUniform,
                             expr=toolbox.expr_mut,
                             pset=pset)
            # 構文木の制約の設定
            # 交叉や突然変異で深さ5以上の木ができないようにする
            toolbox.decorate(
                "mate",
                gp.staticLimit(key=operator.attrgetter("height"), max_value=5))
            toolbox.decorate(
                "mutate",
                gp.staticLimit(key=operator.attrgetter("height"), max_value=5))

            # 世代ごとの個体とベスト解を保持するクラスの生成
            pop = toolbox.population(n=300)
            hof = tools.HallOfFame(1)

            # 統計量の表示設定
            stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
            stats_size = tools.Statistics(len)
            mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
            mstats.register("avg", np.mean)
            mstats.register("std", np.std)
            mstats.register("min", np.min)
            mstats.register("max", np.max)

            # 進化の実行
            # 交叉確率50%、突然変異確率10%、?世代まで進化
            start_time = time.time()
            pop, log = algorithms.eaSimple(pop,
                                           toolbox,
                                           0.5,
                                           0.1,
                                           gen_num,
                                           stats=mstats,
                                           halloffame=hof,
                                           verbose=True)
            end_time = time.time()

            # ベスト解とscoreの保持
            best_expr = hof[0]
            best_score = mstats.compile(pop)["fitness"]["max"]

            # 生成変数を学習、テストデータに追加し、ベストスコアを更新する
            if prev_score < best_score:
                # 生成変数の追加
                func = toolbox.compile(expr=best_expr)
                features_train = [
                    np.array(X_train)[:, j] for j in range(n_features)
                ]
                features_test = [
                    np.array(X_test)[:, j] for j in range(n_features)
                ]
                new_feat_train = func(*features_train)
                new_feat_test = func(*features_test)
                # データ更新
                X_train = pd.concat([
                    X_train,
                    pd.DataFrame(new_feat_train, columns=['NEW' + str(i)])
                ],
                                    axis=1)
                X_test = pd.concat([
                    X_test,
                    pd.DataFrame(new_feat_test, columns=['NEW' + str(i)])
                ],
                                   axis=1)
                new_features = X_train.columns.values
                # テストスコアの計算(プロット用)
                val_score = self.Model(pd.concat([X_train, y_train], axis=1),
                                       new_features, params)
                # test_pred = DecisionTree.prediction(train,test,features,params)

                # ベストスコアの更新と特徴量数の加算
                prev_score = best_score
                n_features += 1
                # 表示と出力用データの保持
                print("n_features: %i, best_score: %f, time: %f second" %
                      (n_features, best_score, end_time - start_time))
                # 結果の格納 ( スコアの記録と作成した特徴量)
                tmp = pd.Series(
                    [n_features, best_score,
                     np.mean(val_score['score'])],
                    index=results.columns)
                results = results.append(tmp, ignore_index=True)
                exprs.append(best_expr)
                # save with file name
                Process.write_feather(pd.concat([X_train, y_train], axis=1),
                                      file_name='train_gen')
                Process.write_feather(X_test, file_name='test_gen')
                # 変数追加後の特徴量数が??を超えた場合break
                if n_features >= feature_limit:
                    break
        return pd.concat([X_train, y_train], axis=1), X_test, results, exprs
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=0.3, indpb=0.1)
toolbox.register("select", selectOverride) 

print("Here goes nothing")

import operator
fit_stats = tools.Statistics(key=operator.attrgetter("fitness.values"))
fit_stats.register('mean', np.mean)
fit_stats.register('min', np.min)
fit_stats.register('max', np.max)

ngen = 200
pop = toolbox.population(n=20)
result, log = algorithms.eaSimple(pop, toolbox,
                             cxpb=0.5, mutpb=0.5,
                             ngen=ngen, verbose=True,
                             stats=fit_stats)
best = tools.selBest(result,k=1)[0]

i = 0
while (os.path.exists("best_%d.npy" % i)):
    i += 1
np.save('best_%d' % i, best)

mf = np.vectorize(lambda x: 0.5 if x is None else float(x))
mlp = MLPClassifierOverride()
mlp.init_weights(best)
mlp.fit([[0.0]*3*3*3,[0.0]*3*3*3], [[0.1]*3*3*3,[0.1]*3*3*3])

message = "Do you want to be Crosses or Noughts [X/O]: "
human_is_cross = get_input(message,lambda x: x in ['x','o']) == 'x'
예제 #37
0
toolbox.register('mutate', tools.mutUniformInt, low=n_i, up=n_f, indpb=0.2)
toolbox.register('select', tools.selTournament, tournsize=3)

#generating population
pop = toolbox.population(n=100)

#------------------------------------ Simple formate using DEAP ------------------------------------------

hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register('avg', np.mean)
stats.register('std', np.std)
stats.register('min', np.min)
stats.register('max', np.max)
pop, log = algorithms.eaSimple(pop,
                               toolbox,
                               cxpb=0.5,
                               mutpb=0.2,
                               ngen=10,
                               stats=stats,
                               halloffame=hof)

#---------------------------------------------------------------------------------------------------------

Best = tools.selBest(pop, k=1)
print(Best)
t = PrettyTable(['Crop', 'Planting Month', 'Harvest Month'])
for i in range(len(Best[0])):
    val = Best[0][i]
    t.add_row([Crop_name[val * 12 - 1], Current_month_str, Harvest_month(val)])
print(t)
예제 #38
0
    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register("avg", np.mean)
    mstats.register("std", np.std)
    mstats.register("min", np.min)
    mstats.register("max", np.max)

    # 進化の実行
    # algorithms.eaSimple(pop, toolbox, 0.5, 0.1, 10,
    # 交叉確率50%、突然変異確率10%、10世代まで進化
    start_time = time.time()
    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   0.5,
                                   0.1,
                                   10,
                                   stats=mstats,
                                   halloffame=hof,
                                   verbose=True)
    end_time = time.time()

    # ベスト解とAUCの保持
    best_expr = hof[0]
    best_mae = mstats.compile(pop)["fitness"]["max"]

    # 5-fold CVのAUCスコアが前ステップのAUCを超えていた場合
    # 生成変数を学習、テストデータに追加し、ベストAUCを更新する
    if prev_mae < best_mae:
        # 生成変数の追加
        func = toolbox.compile(expr=best_expr)
        features_train = [X_train[:, i] for i in range(n_features)]
예제 #39
0
def main():

    input_file = 'me_at_the_zoo.csv'
    output_file = 'salida.csv'

    entrada_salida.entrada(input_file)

    configuracionIndividuos()

    configuracionAlgoritmo_Experimento1()
    stats1 = tools.Statistics(lambda ind: ind.fitness.values)
    stats1.register("avg", np.mean)
    stats1.register("std", np.std)
    stats1.register("min", np.min)
    stats1.register("max", np.max)
    population1, logbook1 = algorithms.eaSimple(pop,
                                                toolbox,
                                                cxpb=0.5,
                                                mutpb=0.2,
                                                ngen=100,
                                                stats=stats1)

    print(stats1)
    print("La mejor solucion encontrada es: ")
    print(tools.selBest(population1, 1))
    print("Su fitness es: ")
    print(tools.selBest(population1, 1)[0].fitness.values)

    gen = logbook1.select("gen")
    avgs = logbook1.select("avg")

    fig, ax1 = plt.subplots()

    line1 = ax1.plot(gen, avgs, "r-", label="Average Fitness")
    ax1.set_xlabel("Generation")
    ax1.set_ylabel("Fitness 1", color="b")

    plt.plot()

    indi = tools.selBest(population1, 1)
    indi2 = np.array(indi)
    mejor_individuo = indi2.reshape(entrada_salida.n_caches,
                                    entrada_salida.n_videos)
    entrada_salida.salida(output_file, mejor_individuo)

    #Segundo experimento
    configuracionAlgoritmo_Experimento2()
    stats2 = tools.Statistics(lambda ind: ind.fitness.values)
    stats2.register("avg", np.mean)
    stats2.register("std", np.std)
    stats2.register("min", np.min)
    stats2.register("max", np.max)
    population2, logbook2 = algorithms.eaSimple(pop,
                                                toolbox,
                                                cxpb=0.5,
                                                mutpb=0.2,
                                                ngen=100,
                                                stats=stats2)

    print(stats2)
    print("La mejor solucion encontrada es: ")
    print(tools.selBest(population2, 1))
    print("Su fitness es: ")
    print(tools.selBest(population2, 1)[0].fitness.values)

    gen = logbook2.select("gen")
    avgs = logbook2.select("avg")

    fig, ax1 = plt.subplots()

    line1 = ax1.plot(gen, avgs, "r-", label="Average Fitness")
    ax1.set_xlabel("Generation")
    ax1.set_ylabel("Fitness 2", color="b")

    plt.plot()

    #Tercer experimento
    configuracionAlgoritmo_Experimento3()
    stats3 = tools.Statistics(lambda ind: ind.fitness.values)
    stats3.register("avg", np.mean)
    stats3.register("std", np.std)
    stats3.register("min", np.min)
    stats3.register("max", np.max)
    population3, logbook3 = algorithms.eaSimple(pop,
                                                toolbox,
                                                cxpb=0.5,
                                                mutpb=0.2,
                                                ngen=200,
                                                stats=stats3)

    print(stats3)
    print("La mejor solucion encontrada es: ")
    print(tools.selBest(population3, 1))
    print("Su fitness es: ")
    print(tools.selBest(population3, 1)[0].fitness.values)

    gen = logbook3.select("gen")
    avgs = logbook3.select("avg")

    fig, ax1 = plt.subplots()

    line1 = ax1.plot(gen, avgs, "r-", label="Average Fitness")
    ax1.set_xlabel("Generation")
    ax1.set_ylabel("Fitness 3", color="b")

    plt.plot()
예제 #40
0
def optimize_diameters(path, inpfile, **kwargs):
    """ Optimize diameters

    Optimize pipe diameters of a hydraulic network using Genetic Algorithms.

    :param str path: path to the input file.
    :param str inpfile: EPANET's input file (INP) with network data.
    :param int pop: population size or number of individuals.
    :param int gen: number of generations.
    :param float cxbp: crossover (mating) probability.
    :param float mutpb: mutation probability.
    :param float indpb: individual mutation probability?
    """

    _unit_price = kwargs.get('prices', {})
    _popsize = kwargs.get('pop', 200)
    _cxpb = kwargs.get('cxpb', 0.9)
    _mutpb = kwargs.get('mutpb', 0.02)
    _indpb = kwargs.get('indpb', 0.10)
    _generations = kwargs.get('gen', 500)

    # Create the appropiate types for diameter optimization
    creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))  # Minimize
    creator.create("Individual", list, fitness=creator.FitnessMin)

    # Create the Network object needed for EPANET simulation and analysis
    network = Network(path, inpfile)
    network.open_network()
    network.initialize()

    # Create the individuals and population
    # dimension = network.links
    # individual size = network.links
    toolbox = base.Toolbox()
    toolbox.register("attr_diameter", random.randint, 0, len(_unit_price) - 1)
    toolbox.register("individual", tools.initRepeat, creator.Individual,
                     toolbox.attr_diameter, network.links)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Genetic operators and evaluation function
    toolbox.register("evaluate",
                     lambda x: network_diameters(x, network, _unit_price))
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutShuffleIndexes, indpb=_indpb)
    toolbox.register("select", tools.selRoulette)

    # Create the population
    pop = toolbox.population(n=_popsize)
    hof = tools.HallOfFame(1)  # To remember best solution
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    # Use simplest evolutionary algorithm as in chapter 7 of Back (2000)
    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb=_cxpb,
                                   mutpb=_mutpb,
                                   ngen=_generations,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)
    return hof, pop, log
예제 #41
0
파일: mo_deap.py 프로젝트: abcp4/SFPNovelty
NGEN = 30
i = 0

hof = tools.ParetoFront(
)  # a ParetoFront may be used to retrieve the best non dominated individuals of the evolution
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean, axis=0)
stats.register("std", np.std, axis=0)
stats.register("min", np.min, axis=0)
stats.register("max", np.max, axis=0)

algorithms.eaSimple(population,
                    toolbox,
                    0.7,
                    0.2,
                    ngen=NGEN,
                    stats=stats,
                    halloffame=hof,
                    verbose=True)
print_debug_states()
"""
for gen in range(NGEN):
    i+=1
    offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.3)
    fits = toolbox.map(toolbox.evaluate, offspring)
    for fit, ind in zip(fits, offspring):
        ind.fitness.values = fit
    population = toolbox.select(offspring, k=len(population))
    print("################ Gen ",i,' ################')
    print_debug_states()
    clean_debug_states()
# create initial population (generation 0):
population = toolbox.population(n=POPULATION_SIZE)

# determine statistics to be calulated:
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("min", np.min)
stats.register("avg", np.mean)
stats.register("max", np.max)
stats.register("std", np.std)

# define the hall-of-fame:
hof = tools.HallOfFame(HALL_OF_FAME_SIZE)

# run eaSimple algorithm and save stats in logbook
population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
                                            ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)

# print best individual info:
best = hof.items[0]
print("Best Ever Individual = ", best)
print("Best Ever Fitness = ", best.fitness.values[0])

# print optimal solution info:
print("Known Optimal Solution = ", optimal_solution)
print("Optimal Distance = ", get_total_distance(optimal_solution)[0])

# plot best solution:
plt.figure(1)
plt.title('Best Found Solution')
plot_individual(best)
예제 #43
0
    def _fit(self, X, y, parameter_dict):
        self._cv_results = None  # To indicate to the property the need to update
        self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
        n_samples = _num_samples(X)
        X, y = indexable(X, y)

        if y is not None:
            if len(y) != n_samples:
                raise ValueError('Target variable (y) has a different number '
                                 'of samples (%i) than data (X: %i samples)'
                                 % (len(y), n_samples))
        cv = check_cv(self.cv, y=y, classifier=is_classifier(self.estimator))

        toolbox = base.Toolbox()

        name_values, gene_type, maxints = _get_param_types_maxint(parameter_dict)
        if self.gene_type is None:
            self.gene_type = gene_type

        if self.verbose:
            print("Types %s and maxint %s detected" % (self.gene_type, maxints))

        toolbox.register("individual", _initIndividual, creator.Individual, maxints=maxints)
        toolbox.register("population", tools.initRepeat, list, toolbox.individual)

        # If n_jobs is an int, greater than 1 or less than 0 (indicating to use as
        # many jobs as possible) then we are going to create a default pool.
        # Windows users need to be warned of this feature as it only works properly
        # on linux. They need to encapsulate their pool in an if __name__ == "__main__"
        # wrapper so that pools are not recursively created when the module is reloaded in each map
        if isinstance(self.n_jobs, int):
            if self.n_jobs > 1 or self.n_jobs < 0:
                from multiprocessing import Pool  # Only imports if needed
                if os.name == 'nt':               # Checks if we are on Windows
                    warnings.warn(("Windows requires Pools to be declared from within "
                                   "an \'if __name__==\"__main__\":\' structure. In this "
                                   "case, n_jobs will accept map functions as well to "
                                   "facilitate custom parallelism. Please check to see "
                                   "that all code is working as expected."))
                pool = Pool(self.n_jobs)
                toolbox.register("map", pool.map)

        # If it's not an int, we are going to pass it as the map directly
        else:
            try:
                toolbox.register("map", self.n_jobs)
            except Exception:
                raise TypeError("n_jobs must be either an integer or map function. Received: {}".format(type(self.n_jobs)))

        toolbox.register("evaluate", _evalFunction,
                         name_values=name_values, X=X, y=y,
                         scorer=self.scorer_, cv=cv, iid=self.iid, verbose=self.verbose,
                         error_score=self.error_score, fit_params=self.fit_params,
                         score_cache=self.score_cache)

        toolbox.register("mate", _cxIndividual, indpb=self.gene_crossover_prob, gene_type=self.gene_type)

        toolbox.register("mutate", _mutIndividual, indpb=self.gene_mutation_prob, up=maxints)
        toolbox.register("select", tools.selTournament, tournsize=self.tournament_size)

        pop = toolbox.population(n=self.population_size)
        hof = tools.HallOfFame(1)

        # Stats
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.nanmean)
        stats.register("min", np.nanmin)
        stats.register("max", np.nanmax)
        stats.register("std", np.nanstd)

        # History
        hist = tools.History()
        toolbox.decorate("mate", hist.decorator)
        toolbox.decorate("mutate", hist.decorator)
        hist.update(pop)

        if self.verbose:
            print('--- Evolve in {0} possible combinations ---'.format(np.prod(np.array(maxints) + 1)))

        pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2,
                                           ngen=self.generations_number, stats=stats,
                                           halloffame=hof, verbose=self.verbose)

        # Save History
        self.all_history_.append(hist)
        self.all_logbooks_.append(logbook)
        current_best_score_ = hof[0].fitness.values[0]
        current_best_params_ = _individual_to_params(hof[0], name_values)
        if self.verbose:
            print("Best individual is: %s\nwith fitness: %s" % (
                current_best_params_, current_best_score_))

        if current_best_score_ > self.best_mem_score_:
            self.best_mem_score_ = current_best_score_
            self.best_mem_params_ = current_best_params_

        # Check memoization, potentially unknown bug
        # assert str(hof[0]) in self.score_cache, "Best individual not stored in score_cache for cv_results_."

        # Close your pools if you made them
        if isinstance(self.n_jobs, int) and (self.n_jobs > 1 or self.n_jobs < 0):
            pool.close()
            pool.join()

        self.best_score_ = current_best_score_
        self.best_params_ = current_best_params_
def run():

    for i in range(number_of_runs):
        ###################################################################
        #EVOLUTIONARY ALGORITHM
        ###################################################################
        #TYPE
        #Create minimizing fitness class w/ single objective:
        creator.create('FitnessMin', base.Fitness, weights=(-1.0, ))
        #Create individual class:
        creator.create('Individual', list, fitness=creator.FitnessMin)

        #TOOLBOX
        toolbox = base.Toolbox()
        #Register function to create a number in the interval [1-100?]:
        #toolbox.register('init_params', )
        #Register function to use initRepeat to fill individual w/ n calls to rand_num:
        toolbox.register('individual',
                         tools.initRepeat,
                         creator.Individual,
                         np.random.random,
                         n=number_of_params)
        #Register function to use initRepeat to fill population with individuals:
        toolbox.register('population', tools.initRepeat, list,
                         toolbox.individual)

        #GENETIC OPERATORS:
        # Register evaluate fxn = evaluation function, individual to evaluate given later
        toolbox.register('evaluate', scorefxn_helper)
        # Register mate fxn = two points crossover function
        toolbox.register('mate', tools.cxTwoPoint)
        # Register mutate by swapping two points of the individual:
        toolbox.register('mutate',
                         tools.mutPolynomialBounded,
                         eta=0.1,
                         low=0.0,
                         up=1.0,
                         indpb=0.2)
        # Register select = size of tournament set to 3
        toolbox.register('select', tools.selTournament, tournsize=3)

        #EVOLUTION!
        pop = toolbox.population(n=number_of_individuals)
        hof = tools.HallOfFame(1)

        stats = tools.Statistics(key=lambda ind: [ind.fitness.values, ind])
        stats.register('all', np.copy)

        # using built in eaSimple algo
        pop, logbook = algorithms.eaSimple(pop,
                                           toolbox,
                                           cxpb=crossover_rate,
                                           mutpb=mutation_rate,
                                           ngen=number_of_generations,
                                           stats=stats,
                                           halloffame=hof,
                                           verbose=False)
        # print(f'Run number completed: {i}')

        ###################################################################
        #MAKE LISTS
        ###################################################################
        # Find best scores and individuals in population
        arr_best_score = []
        arr_best_ind = []
        for a in range(len(logbook)):
            scores = []
            for b in range(len(logbook[a]['all'])):
                scores.append(logbook[a]['all'][b][0][0])
            #print(a, np.nanmin(scores), np.nanargmin(scores))
            arr_best_score.append(np.nanmin(scores))
            #logbook is of type 'deap.creator.Individual' and must be loaded later
            #don't want to have to load it to view data everytime, thus numpy
            ind_np = np.asarray(logbook[a]['all'][np.nanargmin(scores)][1])
            ind_np_conv = convert_individual(ind_np, arr_conversion_matrix,
                                             number_of_params)
            arr_best_ind.append(ind_np_conv)
            #arr_best_ind.append(np.asarray(logbook[a]['all'][np.nanargmin(scores)][1]))

        # print('Best individual is:\n %s\nwith fitness: %s' %(arr_best_ind[-1],arr_best_score[-1]))

        ###################################################################
        #PICKLE
        ###################################################################
        arr_to_pickle = [arr_best_score, arr_best_ind]

        def get_filename(val):
            filename_base = dir_to_use + '/' + stripped_name + '_'
            if val < 10:
                toret = '000' + str(val)
            elif 10 <= val < 100:
                toret = '00' + str(val)
            elif 100 <= val < 1000:
                toret = '0' + str(val)
            else:
                toret = str(val)
            return filename_base + toret + '.pickled'

        counter = 0
        filename = get_filename(counter)
        while os.path.isfile(filename) == True:
            counter += 1
            filename = get_filename(counter)

        pickle.dump(arr_to_pickle, open(filename, 'wb'))
예제 #45
0
def main():
    global snake
    global pset

    ## THIS IS WHERE YOUR CORE EVOLUTIONARY ALGORITHM WILL GO #
    pset = gp.PrimitiveSet("MAIN", 0)

    pset.addPrimitive(prog2, 2)
    pset.addPrimitive(prog3, 3)

    pset.addPrimitive(snake.if_obstacle_ahead, 2)
    pset.addPrimitive(snake.if_next_obstacle_ahead, 2)
    pset.addPrimitive(snake.if_obstacle_right, 2)
    pset.addPrimitive(snake.if_obstacle_left, 2)

    #pset.addPrimitive(snake.if_obstacle_up, 2)
    #pset.addPrimitive(snake.if_obstacle_down, 2)

    #pset.addPrimitive(snake.if_next_obstacle_up, 2)
    #pset.addPrimitive(snake.if_next_obstacle_down, 2)
    #pset.addPrimitive(snake.if_next_obstacle_left, 2)
    #pset.addPrimitive(snake.if_next_obstacle_right, 2)

    pset.addPrimitive(snake.if_move_up, 2)
    pset.addPrimitive(snake.if_move_down, 2)
    pset.addPrimitive(snake.if_move_left, 2)
    pset.addPrimitive(snake.if_move_right, 2)

    pset.addTerminal(snake.changeDirectionUp)
    pset.addTerminal(snake.changeDirectionDown)
    pset.addTerminal(snake.changeDirectionLeft)
    pset.addTerminal(snake.changeDirectionRight)
    pset.addTerminal(snake.moveForward, name="forward")

    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Individual",
                   gp.PrimitiveTree,
                   fitness=creator.FitnessMax,
                   pset=pset)

    toolbox = base.Toolbox()

    # Attribute generator
    toolbox.register("expr_init", gp.genGrow, pset=pset, min_=1, max_=5)

    # Structure initializers
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.expr_init)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    def evalSteps(individual):
        score, steps = runGame(individual)
        return steps,

    def evalScore(individual):
        score, steps = runGame(individual)
        return score,

    toolbox.register("evaluate", evalScore)
    toolbox.register("select", tools.selTournament, tournsize=5)
    toolbox.register("mate", gp.cxOnePoint)
    toolbox.register("expr_mut", gp.genHalfAndHalf, min_=1, max_=4)
    toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)

    toolbox.decorate(
        "mate", gp.staticLimit(key=operator.attrgetter("height"),
                               max_value=17))
    toolbox.decorate(
        "mutate",
        gp.staticLimit(key=operator.attrgetter("height"), max_value=17))

    random.seed(69)

    pop = toolbox.population(n=400)
    hof = tools.HallOfFame(5)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   MUTPB,
                                   CXPB,
                                   GEN,
                                   stats,
                                   halloffame=hof)

    expr = tools.selBest(pop, 1)[0]
    nodes, edges, labels = gp.graph(expr)

    # g = pgv.AGraph(nodeSep=1.0)
    # g.add_nodes_from(nodes)
    # g.add_edges_from(edges)
    # g.layout(prog="dot")

    # for i in nodes:
    # n = g.get_node(i)
    # n.attr["label"] = labels[i]

    # g.draw("tree.pdf")

    return pop, hof, stats
예제 #46
0
toolbox.register("select", tools.selNSGA2)
# toolbox.register("select", tools.selTournament, tournsize=3)

# ind1 = toolbox.individual()
# ind2 = toolbox.individual()
#
# print(cxInds(ind1, ind2))


pop = toolbox.population(n=10)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("min", numpy.min)
stats.register("max", numpy.max)

pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=10, stats=stats, halloffame=hof, verbose=True)
gen, avg, min_, max_ = logbook.select("gen", "avg", "min", "max")
plt.plot(gen, avg, label="average")
plt.plot(gen, min_, label="minimum")
plt.plot(gen, max_, label="maximum")
plt.xlabel("Generation")
plt.ylabel("Fitness")
plt.legend(loc="lower right")
# plt.show()
plt.savefig('results.png')

# ind = toolbox.individual()
# print(ind)
# ind = toolbox.mutate(ind)
# print(ind)
예제 #47
0
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("min", np.min)
stats.register("avg", np.mean)
stats.register("max", np.max)
stats.register("best",
               lambda fitnessValues: fitnessValues.index(min(fitnessValues)))
stats.register(
    "traps",
    lambda fitnessValues: pop[fitnessValues.index(min(fitnessValues))])
###############################################################################
# Optimization Cycle
###############################################################################
(pop, logbook) = algorithms.eaSimple(pop,
                                     toolbox,
                                     cxpb=MAT['cxpb'],
                                     mutpb=MUT['mutpb'],
                                     ngen=GENS,
                                     stats=stats,
                                     halloffame=hof,
                                     verbose=VERBOSE)
###############################################################################
# Get and Export Results
###############################################################################
bestChromosome = hof[0]
bestTraps = np.reshape(bestChromosome, (-1, 2))
dta = pd.DataFrame(logbook)
srv.exportLog(logbook, OUT_PTH, '{}_{:02d}_LOG'.format(ID, TRPS_NUM))
lnd.updateTrapsCoords(bestTraps)
###############################################################################
# Plot Landscape
###############################################################################
(fig, ax) = (plt.figure(figsize=(15, 15)),
예제 #48
0
# initializing
toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0,)) 
creator.create("Individual", list, fitness=creator.FitnessMax)

toolbox.register("bit", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.bit, n=8)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", fitness_function)
toolbox.register("mate", tools.cxOnePoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.4)
toolbox.register("select", tools.selTournament,tournsize=10)
pop = toolbox.population(n=50)

result, log = algorithms.eaSimple(pop, toolbox,cxpb=0.8, mutpb=0.4, ngen=20, verbose=False)

best_individual = tools.selBest(result, k=1)[0]

print('Fitness of Best individual: ', fitness_function(best_individual))
print('Best individual: ', best_individual)

 
listindividual = []
 
for j in best_individual:
    listindividual.append(j)

count=0

# creating new dataset based on the output from the feature selection
    def _fit(self, X, y):
        X, y = check_X_y(X, y, "csr")
        # Initialization
        cv = check_cv(self.cv, y, is_classifier(self.estimator))
        scorer = check_scoring(self.estimator, scoring=self.scoring)
        n_features = X.shape[1]

        estimator = clone(self.estimator)

        # Genetic Algorithm
        toolbox = base.Toolbox()

        toolbox.register("attr_bool", random.randint, 0, 1)
        toolbox.register("individual",
                         tools.initRepeat,
                         creator.Individual,
                         toolbox.attr_bool,
                         n=n_features)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)
        toolbox.register("evaluate",
                         _evalFunction,
                         gaobject=self,
                         estimator=estimator,
                         X=X,
                         y=y,
                         cv=cv,
                         scorer=scorer,
                         verbose=self.verbose,
                         fit_params=self.fit_params,
                         caching=self.caching)
        toolbox.register("mate",
                         tools.cxUniform,
                         indpb=self.crossover_independent_proba)
        toolbox.register("mutate",
                         tools.mutFlipBit,
                         indpb=self.mutation_independent_proba)
        toolbox.register("select",
                         tools.selTournament,
                         tournsize=self.tournament_size)

        if self.n_jobs > 1:
            pool = multiprocessing.Pool(processes=self.n_jobs)
            toolbox.register("map", pool.map)
        elif self.n_jobs < 0:
            pool = multiprocessing.Pool(
                processes=max(cpu_count() + 1 + self.n_jobs, 1))
            toolbox.register("map", pool.map)

        pop = toolbox.population(n=self.n_population)
        hof = tools.HallOfFame(1, similar=np.array_equal)
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean, axis=0)
        stats.register("std", np.std, axis=0)
        stats.register("min", np.min, axis=0)
        stats.register("max", np.max, axis=0)

        if self.verbose > 0:
            print("Selecting features with genetic algorithm.")

        pop, logbook = algorithms.eaSimple(pop,
                                           toolbox,
                                           cxpb=self.crossover_proba,
                                           mutpb=self.mutation_proba,
                                           ngen=self.n_generations,
                                           stats=stats,
                                           halloffame=hof,
                                           verbose=self.verbose)
        if self.n_jobs != 1:
            pool.close()
            pool.join()

        # Set final attributes
        support_ = np.array(hof, dtype=np.bool)[0]
        self.estimator_ = clone(self.estimator)
        self.estimator_.fit(X[:, support_], y)

        self.n_features_ = support_.sum()
        self.support_ = support_
        self.logbook_ = logbook

        return self
예제 #50
0
                     toolbox.create_individual, POPULATION_SIZE)
    f = lambda ind: (tsp.total_distance(ind),)
    toolbox.register("evaluate", f)
    toolbox.register("select", tools.selTournament, tournsize=3)
    toolbox.register("mate", tools.cxOrdered)
    toolbox.register("mutate", tools.mutShuffleIndexes, indpb=0.1)

    stat = tools.Statistics(key=lambda ind: ind.fitness.values)
    stat.register("min", np.min)
    stat.register("avg", np.mean)

    population = toolbox.create_population()
    hof = tools.HallOfFame(1)

    population, logbook = algorithms.eaSimple(
        population, toolbox, cxpb=CROSS_PROB, mutpb=MUT_PROB, ngen=GENERATIONS,
        stats=stat, halloffame=hof, verbose=False
    )

    best = hof[0]
    print("Best individual:", best)
    print("Best fitness:", best.fitness.values[0])

    tsp.plot(best)

    minpath, avgpath = logbook.select("min", "avg")
    plt.plot(minpath, label="Min")
    plt.plot(avgpath, label="Avg")
    plt.xlabel("generations")
    plt.ylabel("length of path")
    plt.title("path over generations")
    plt.grid(True)
예제 #51
0
def ga_fit(_rrl, min_ind, max_ind, random_state, nind, ngen):
    import random

    from deap import algorithms
    from deap import base
    from deap import creator
    from deap import tools

    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", np.ndarray, fitness=creator.FitnessMax)

    toolbox = base.Toolbox()

    def create_ind_uniform(min_ind, max_ind):
        ind = []
        for min, max in zip(min_ind, max_ind):
            ind.append(random.uniform(min, max))
        return ind

    def create_ind_gauss(mu_ind, sigma_ind):
        ind = []
        for mu, sigma in zip(mu_ind, sigma_ind):
            ind.append(random.gauss(mu, sigma))
        return ind

    toolbox.register("create_ind", create_ind_uniform, min_ind, max_ind)
    # toolbox.register("create_ind", create_ind_gauss, mu_ind, sigma_ind)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.create_ind)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    def evalOneMax(individual):
        return _rrl.calc_S(individual),

    def cxTwoPointCopy(ind1, ind2):
        size = len(ind1)
        cxpoint1 = random.randint(1, size)
        cxpoint2 = random.randint(1, size - 1)
        if cxpoint2 >= cxpoint1:
            cxpoint2 += 1
        else: # Swap the two cx points
            cxpoint1, cxpoint2 = cxpoint2, cxpoint1
        ind1[cxpoint1:cxpoint2], ind2[cxpoint1:cxpoint2] = ind2[cxpoint1:cxpoint2].copy(), ind1[cxpoint1:cxpoint2].copy()
        return ind1, ind2

    def mutUniformDbl(individual, min_ind, max_ind, indpb):
        size = len(individual)
        for i, min, max  in zip(xrange(size), min_ind, max_ind):
            if random.random() < indpb:
                individual[i] = random.uniform(min, max)
        return individual,

    toolbox.register("evaluate", evalOneMax)
    toolbox.register("mate", cxTwoPointCopy)
    toolbox.register("mutate", mutUniformDbl, min_ind=min_ind, max_ind=max_ind, indpb=0.05)
    toolbox.register("select", tools.selTournament, tournsize=3)

    random.seed(random_state)

    pop = toolbox.population(n=nind)
    hof = tools.HallOfFame(1, similar=np.array_equal)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    # Get elapsed time
    import time
    tic = time.clock()
    def get_elapsedtime(data):
        return time.clock() - tic
    stats.register("elapsed time",get_elapsedtime)

    pop_last, logbook = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=ngen, stats=stats, halloffame=hof)
    best_ind = tools.selBest(pop_last, 1)[0]

    return best_ind, logbook
    random.seed(1)
    populacao = toolbox.population(n=20)
    probabilidade_crossover = 1.0
    probabilidade_mutacao = 0.01
    numero_geracoes = 100

    estatisticas = tools.Statistics(
        key=lambda individuo: individuo.fitness.values)
    estatisticas.register('max', np.max)
    estatisticas.register('min', np.min)
    estatisticas.register('med', np.mean)
    estatisticas.register('std', np.std)

    populacao, info = algorithms.eaSimple(populacao, toolbox,
                                          probabilidade_crossover,
                                          probabilidade_mutacao,
                                          numero_geracoes, estatisticas)

    melhores = tools.selBest(populacao, 2)
    for individuo in melhores:
        print(individuo)
        print(individuo.fitness)
        soma = 0
        for i in range(len(lista_produtos)):
            if individuo[i] == 1:
                soma += valores[i]
                print(individuo, soma)

    valores_grafico = info.select('max')
    plt.plot(valores_grafico)
    plt.title('Acompanhamento dos valores')
예제 #53
0
def genetic():

    population_size = 5  # num of solutions in the population
    num_generations = 10  # num of time we generate new population

    # create a minimizing fitness function, cause we want to minimize RMSE
    creator.create('FitnessMax', base.Fitness, weights=(-1.0, ))
    # create a list to encode solution in it (binary list)
    creator.create('Individual', list, fitness=creator.FitnessMax)

    # create an object of Toolbox class
    toolbox = base.Toolbox()

    toolbox.register("attr_int2", random.randint, 100, 2000)
    toolbox.register("attr_int3", random.randint, 1, 5)
    toolbox.register("attr_int4", random.randint, 1, 5)
    toolbox.register("attr_int5", random.randint, 1, 5)
    toolbox.register("attr_int6", random.randint, 1, 6)
    toolbox.register("individual",
                     tools.initCycle,
                     creator.Individual,
                     (toolbox.attr_int2, toolbox.attr_int3, toolbox.attr_int4,
                      toolbox.attr_int5, toolbox.attr_int6),
                     n=1)

    toolbox.register('population', tools.initRepeat, list, toolbox.individual)

    toolbox.register('mate', tools.cxTwoPoint)
    toolbox.register('mutate',
                     tools.mutUniformInt,
                     low=[100, 1, 1, 1, 1],
                     up=[2000, 5, 5, 5, 6],
                     indpb=0.6)
    toolbox.register('select', tools.selRoulette)
    toolbox.register('evaluate', train_evaluate)

    # create population by calling population function
    population = toolbox.population(n=population_size)

    hof = tools.HallOfFame(3)

    # start GA
    r = algorithms.eaSimple(population,
                            toolbox,
                            cxpb=0.4,
                            mutpb=0.1,
                            ngen=num_generations,
                            halloffame=hof,
                            verbose=False)

    # Print top N solutions
    best_individuals = tools.selBest(hof, k=3)
    best_epochs = None
    best_neuron1 = None
    best_neuron2 = None
    best_neuron3 = None
    best_window = None

    print("\nBest solution is:")
    for bi in best_individuals:
        best_epochs = bi[0]
        best_neuron1 = bi[1]
        best_neuron2 = bi[2]
        best_neuron3 = bi[3]
        best_window = bi[4]

        print('\n epochs = ', best_epochs, ", neurons = [", best_neuron1, ",",
              best_neuron2, ",", best_neuron3, "]", "window size = ",
              best_window)
예제 #54
0
파일: GP_deap.py 프로젝트: oislen/Kaggle
def GP_deap(evolved_train):
    global HOWMANYITERS
    import operator
    import math
    import random

    from deap import algorithms
    from deap import base, creator
    from deap import tools
    from deap import gp

    # dropping Survived and Passenger ID because we can not use them for training
    outputs = evolved_train['Survived'].values.tolist()
    evolved_train = evolved_train.drop(["Survived", "PassengerId"], axis=1)
    inputs = evolved_train.values.tolist()  # to np array

    def protectedDiv(left, right):
        try:
            return left / right
        except ZeroDivisionError:
            return 1

    def randomString(stringLength=10):
        """Generate a random string of fixed length """
        letters = string.ascii_lowercase
        return ''.join(random.choice(letters) for i in range(stringLength))

    #choosing Primitives
    pset = gp.PrimitiveSet("MAIN", len(evolved_train.columns))  # add here
    pset.addPrimitive(operator.add, 2)
    pset.addPrimitive(operator.sub, 2)
    pset.addPrimitive(operator.mul, 2)
    pset.addPrimitive(protectedDiv, 2)
    pset.addPrimitive(math.cos, 1)
    pset.addPrimitive(math.sin, 1)
    pset.addPrimitive(math.tanh, 1)
    pset.addPrimitive(max, 2)
    pset.addPrimitive(min, 2)
    pset.addEphemeralConstant(randomString(), lambda: random.uniform(-10, 10))
    # 50 as a precaution. 34 would be enough
    pset.renameArguments(ARG0='x1')
    pset.renameArguments(ARG1='x2')
    pset.renameArguments(ARG2='x3')
    pset.renameArguments(ARG3='x4')
    pset.renameArguments(ARG4='x5')
    pset.renameArguments(ARG5='x6')
    pset.renameArguments(ARG6='x7')
    pset.renameArguments(ARG7='x8')
    pset.renameArguments(ARG8='x9')
    pset.renameArguments(ARG9='x10')
    pset.renameArguments(ARG10='x11')
    pset.renameArguments(ARG11='x12')
    pset.renameArguments(ARG12='x13')
    pset.renameArguments(ARG13='x14')
    pset.renameArguments(ARG14='x15')
    pset.renameArguments(ARG15='x16')
    pset.renameArguments(ARG16='x17')
    pset.renameArguments(ARG17='x18')
    pset.renameArguments(ARG18='x19')
    pset.renameArguments(ARG19='x20')
    pset.renameArguments(ARG20='x21')
    pset.renameArguments(ARG21='x22')
    pset.renameArguments(ARG22='x23')
    pset.renameArguments(ARG23='x24')
    pset.renameArguments(ARG24='x25')
    pset.renameArguments(ARG25='x26')
    pset.renameArguments(ARG26='x27')
    pset.renameArguments(ARG27='x28')
    pset.renameArguments(ARG28='x29')
    pset.renameArguments(ARG29='x30')
    pset.renameArguments(ARG30='x31')
    pset.renameArguments(ARG31='x32')
    pset.renameArguments(ARG32='x33')
    pset.renameArguments(ARG33='x34')
    pset.renameArguments(ARG34='x35')
    pset.renameArguments(ARG35='x36')
    pset.renameArguments(ARG36='x37')
    pset.renameArguments(ARG37='x38')
    pset.renameArguments(ARG38='x39')
    pset.renameArguments(ARG39='x40')
    pset.renameArguments(ARG40='x41')
    pset.renameArguments(ARG41='x42')
    pset.renameArguments(ARG42='x43')
    pset.renameArguments(ARG43='x44')
    pset.renameArguments(ARG44='x45')
    pset.renameArguments(ARG45='x46')
    pset.renameArguments(ARG46='x47')
    pset.renameArguments(ARG47='x48')
    pset.renameArguments(ARG48='x49')
    pset.renameArguments(ARG49='x50')

    # two object types is needed: an individual containing the genotype
    # and a fitness -  The reproductive success of a genotype (a measure of quality of a solution)
    creator.create("FitnessMin", base.Fitness, weights=(1.0, ))
    creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)

    #register some parameters specific to the evolution process.
    toolbox = base.Toolbox()
    toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=3)  #
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.expr)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("compile", gp.compile, pset=pset)

    #evaluation function, which will receive an individual as input, and return the corresponding fitness.
    def evalSymbReg(individual):
        # Transform the tree expression in a callable function
        func = toolbox.compile(expr=individual)
        # Evaluate the accuracy of individuals // 1|0 == survived
        return math.fsum(
            np.round(1. - (1. / (1. + np.exp(-func(*in_))))) == out
            for in_, out in zip(inputs, outputs)) / len(evolved_train),

    toolbox.register("evaluate", evalSymbReg)
    toolbox.register("select", tools.selTournament, tournsize=3)
    toolbox.register("mate", gp.cxOnePoint)
    toolbox.register("expr_mut", gp.genFull, min_=0, max_=3)
    toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)

    toolbox.decorate(
        "mate", gp.staticLimit(key=operator.attrgetter("height"),
                               max_value=17))
    toolbox.decorate(
        "mutate",
        gp.staticLimit(key=operator.attrgetter("height"), max_value=17))

    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(1)

    #Statistics over the individuals fitness and size
    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    stats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb=0.7,
                                   mutpb=0.3,
                                   ngen=HOWMANYITERS,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)

    #Parameters:
    #population – A list of individuals.
    #toolbox – A Toolbox that contains the evolution operators.
    #cxpb – The probability of mating two individuals.
    #mutpb – The probability of mutating an individual.
    #ngen – The number of generation.
    #stats – A Statistics object that is updated inplace, optional.
    #halloffame – A HallOfFame object that will contain the best individuals, optional.
    #verbose – Whether or not to log the statistics.

    # Transform the tree expression of hof[0] in a callable function and return it
    func2 = toolbox.compile(expr=hof[0])

    return func2
예제 #55
0
if __name__ == "__main__":
    random.seed(1)
    população = toolbox.population(n=50)
    probabilidadeCrossover = 1.0
    probabilidadeMutação = 0.01
    numeroGerações = 200
    
    estatisticas = tools.Statistics(key=lambda individuo: individuo.fitness.values)
    estatisticas.register("max", numpy.max)
    estatisticas.register("min", numpy.min)
    estatisticas.register("med", numpy.mean)
    estatisticas.register("std", numpy.std)
    
    população, info = algorithms.eaSimple(população, toolbox,
                                          probabilidadeCrossover,
                                          probabilidadeMutação,
                                          numeroGerações, estatisticas)
    melhores = tools.selBest(população, 1)
    for individuo in melhores:
        print(individuo)
        print(individuo.fitness)
        soma=0
        for i in range(len(listaProdutos)):
            if individuo[i] == 1:
                soma += listaProdutos[i].valor
                print("Nome: %s R$ %s " % (listaProdutos[i].nome,
                                           listaProdutos[i].valor))
        print("Melhor solução: %s" % soma)
    
    valoresGrafico = info.select("max")
    plt.plot(valoresGrafico)
def ga_opt(load_dir_store, hparams):
    # Load all the saved data_store.pkl into data_store list
    data_store = prepare_grand_data_store(load_dir_store)

    yt = data_store[0]['train']['df'].iloc[:, -6:-3].values
    p_yt_store = np.array(
        [data['train']['df'].iloc[:, -3:].values for data in data_store])
    yv = data_store[0]['val']['df'].iloc[:, -6:-3].values
    p_yv_store = np.array(
        [data['val']['df'].iloc[:, -3:].values for data in data_store])

    def eval(individual):
        # Individual is a list of 0 or 1, where if the j entry is 1, the j model is included and vice versa
        selected_mask = [
            idx for idx, value in enumerate(individual) if value == 1
        ]
        # Calculate mean relative error for the selected models
        re_t = mean_relative_error(
            yt, np.mean(p_yt_store[selected_mask, :, :], axis=0))
        re_v = mean_relative_error(
            yv, np.mean(p_yv_store[selected_mask, :, :], axis=0))
        re = (re_t + 2 * re_v) / 3
        return (re, )

    creator.create("FitnessMax", base.Fitness, weights=(-1, ))
    creator.create("Individual", list, fitness=creator.FitnessMax)
    toolbox = base.Toolbox()
    toolbox.register("attr_bool",
                     np.random.choice,
                     np.arange(0, 2),
                     p=hparams['init'])
    toolbox.register("individual",
                     tools.initRepeat,
                     creator.Individual,
                     toolbox.attr_bool,
                     n=len(data_store))
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("evaluate", eval)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.2)
    toolbox.register("select", tools.selTournament, tournsize=3)
    # Logging
    stats = tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)
    pop = toolbox.population(n=hparams['n_pop'])
    hof = tools.HallOfFame(1)
    # Run the GA algorithm
    pop, logbook = algorithms.eaSimple(toolbox=toolbox,
                                       population=pop,
                                       cxpb=0.5,
                                       mutpb=0.2,
                                       ngen=hparams['n_gen'],
                                       halloffame=hof,
                                       stats=stats,
                                       verbose=True)

    # Create the ga results dir based on the load dir name
    results_dir = create_results_directory(f'./results/ga/ga_opt',
                                           folders=['plots'],
                                           excels=['ga_results'])
    # Plotting
    gen = logbook.select("gen")
    fit_min = [x.item() for x in logbook.select("min")]
    fit_avg = [x.item() for x in logbook.select("avg")]
    fit_max = [x.item() for x in logbook.select("max")]
    fig, ax1 = plt.subplots()
    line1 = ax1.plot(gen, fit_min, label="Min MRE")
    line2 = ax1.plot(gen, fit_avg, label="Avg MRE")
    line3 = ax1.plot(gen, fit_max, label="Max MRE")
    plt.legend()
    ax1.set_xlabel("Generation")
    ax1.set_ylabel("Relative Error")
    plt.savefig('{}/plots/GA_opt_MRE_all.png'.format(results_dir),
                bbox_inches="tight")
    fig, ax1 = plt.subplots()
    line1 = ax1.plot(gen, fit_min, label="Min MRE")
    plt.legend()
    ax1.set_xlabel("Generation")
    ax1.set_ylabel("Total Generation Cost")
    plt.savefig('{}/plots/GA_opt_min_only.png'.format(results_dir),
                bbox_inches="tight")

    # Store final results
    av = hof[-1]  # av stands for allocation vector
    results_dict = defaultdict(list)
    data_names = [k for k in data_store[0].keys() if k not in ['info']]
    for data, indicator in zip(data_store, av):
        if indicator == 1:  # Means include the model
            for k in data_names:
                results_dict[k].append(data[k]['df'].iloc[:, -3:].values)

    # Create excel workbook to print GA results to
    wb = openpyxl.Workbook()
    # Print allocation vector to excel
    wb.create_sheet('av')
    ws = wb['av']
    model_names = [data['info']['model_name'] for data in data_store]
    print_df_to_excel(df=pd.DataFrame([av, model_names],
                                      index=['av', 'model_names']).T,
                      ws=ws)
    summary_df = {}
    for k, v in results_dict.items(
    ):  # Print the prediction for each dataset to excel
        y = data_store[0][k]['df'].iloc[:, -6:-3].values
        v = np.array(v)
        p_y = np.mean(v, axis=0)
        mse = mean_squared_error(y, p_y)
        mre = mean_relative_error(y, p_y)
        var = np.mean(np.var(v, axis=0))
        summary_df[k] = {'mse': mse, 'mre': mre, 'var': var}
        df = pd.DataFrame(np.hstack((y, p_y)),
                          columns=[f'y{i + 1}' for i in range(3)] +
                          [f'P_y{i + 1}' for i in range(3)])
        wb.create_sheet(k)
        ws = wb[k]
        print_df_to_excel(df=df, ws=ws)
        print_df_to_excel(df=pd.DataFrame.from_dict({
            'mse': [mse],
            'mre': [mre]
        }),
                          ws=ws,
                          start_col=10)
    # Print summary of losses for different dataset in the summary worksheet
    summary_df = pd.DataFrame.from_dict(summary_df)

    def move_column_inplace(df, col, pos):
        col = df.pop(col)
        df.insert(pos, col.name, col)

    move_column_inplace(summary_df, 'train', 0)
    move_column_inplace(summary_df, 'val', 1)
    ws = wb['Sheet']
    print_df_to_excel(df=summary_df, ws=ws, start_row=5)
    print_df_to_excel(df=pd.DataFrame(hparams), ws=ws)
    # Save and close excel workbook
    wb.save(f'{results_dir}/ga_results.xlsx')
    wb.close()
예제 #57
0
파일: tpot.py 프로젝트: hsaputra/tpot
    def fit(self, features, classes):
        """Fits a machine learning pipeline that maximizes classification
        accuracy on the provided data

        Uses genetic programming to optimize a machine learning pipeline that
        maximizes classification accuracy on the provided features and classes.
        Performs an internal stratified training/testing cross-validaton split
        to avoid overfitting on the provided data.

        Parameters
        ----------
        features: array-like {n_samples, n_features}
            Feature matrix
        classes: array-like {n_samples}
            List of class labels for prediction

        Returns
        -------
        None

        """
        try:
            if self.random_state:
                random.seed(self.random_state)
                np.random.seed(self.random_state)

            features = features.astype(np.float64)
            classes = classes.astype(np.float64)

            self._toolbox.register('evaluate',
                                   self._evaluate_individual,
                                   features=features,
                                   classes=classes)
            pop = self._toolbox.population(n=self.population_size)

            def pareto_eq(ind1, ind2):
                """Function used to determine whether two individuals are equal
                on the Pareto front

                Parameters
                ----------
                ind1: DEAP individual from the GP population
                    First individual to compare
                ind2: DEAP individual from the GP population
                    Second individual to compare

                Returns
                ----------
                individuals_equal: bool
                    Boolean indicating whether the two individuals are equal on
                    the Pareto front

                """
                return np.all(ind1.fitness.values == ind2.fitness.values)

            self.hof = tools.ParetoFront(similar=pareto_eq)

            verbose = (self.verbosity == 2)

            # Start the progress bar
            num_evaluations = self.population_size * (self.generations + 1)
            self.pbar = tqdm(total=num_evaluations,
                             unit='pipeline',
                             leave=False,
                             disable=(not verbose),
                             desc='GP Progress')

            pop, _ = algorithms.eaSimple(population=pop,
                                         toolbox=self._toolbox,
                                         cxpb=self.crossover_rate,
                                         mutpb=self.mutation_rate,
                                         ngen=self.generations,
                                         halloffame=self.hof,
                                         verbose=False)

        # Allow for certain exceptions to signal a premature fit() cancellation
        except (KeyboardInterrupt, SystemExit):
            if self.verbosity > 0:
                print('GP closed prematurely - will use current best pipeline')
        finally:
            # Close the progress bar
            # Standard truthiness checks won't work for tqdm
            if not isinstance(self.pbar, type(None)):
                self.pbar.close()

            # Reset gp_generation counter to restore initial state
            self.gp_generation = 0

            # Store the pipeline with the highest internal testing accuracy
            if self.hof:
                top_score = 0.
                for pipeline, pipeline_scores in zip(self.hof.items,
                                                     reversed(self.hof.keys)):
                    if pipeline_scores.wvalues[1] > top_score:
                        self._optimized_pipeline = pipeline
                if self._optimized_pipeline is None:
                    raise ValueError((
                        'There was an error in the TPOT optimization process. '
                        'This could be because the data was not formatted properly, '
                        'or because data for a regression problem was provided to the TPOTClassifier object. '
                        'Please make sure you passed the data to TPOT correctly.'
                    ))
                else:
                    self._fitted_pipeline = self._toolbox.compile(
                        expr=self._optimized_pipeline)
                with warnings.catch_warnings():
                    warnings.simplefilter('ignore')
                    self._fitted_pipeline.fit(features, classes)

            if self.verbosity >= 1 and self._optimized_pipeline:
                # Add an extra line of spacing if the progress bar was used
                if verbose:
                    print()

                print('Best pipeline: {}'.format(self._optimized_pipeline))
예제 #58
0
toolbox.register("mutate", tools.mutFlipBit, indpb=mutation_rate)
toolbox.register("select", tools.selBest)

pop = toolbox.population(n=population_size)

best = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)

pop, log = algorithms.eaSimple(pop,
                               toolbox,
                               cxpb=mate_rate,
                               mutpb=mutation_rate,
                               ngen=n_of_generations,
                               stats=stats,
                               halloffame=best,
                               verbose=True)

print("Clause count: " + str(formula.clause_count))

generations = [i['gen'] for i in log]
bests = [i['max'] for i in log]
averages = [i['avg'] for i in log]

fig = go.Figure()
fig.add_trace(
    go.Scatter(x=tuple(generations),
               y=tuple(bests),
               mode='lines+markers',
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
	ind.fitness.values = fit

# The Appeal of Evolution
# Begin the evolution
CXPB=0.5
MUTPB=0.1
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=40, stats=stats, verbose=True)
'''NGEN=40
for gen in range(NGEN):
	print("-- Generation %i --" % gen)

	for i in pop:
		print(i)
	# Gather all the fitnesses in one list and print the stats
	fits = [ind.fitness.values[0] for ind in pop]

	length = len(pop)
	mean = sum(fits) / length
	sum2 = sum(x*x for x in fits)
	std = abs(sum2 / length - mean**2)**0.5

	print("  Min %s" % min(fits))
## set a 5 percent of mutation, which causes genes in the chromosome to
## be randomly swapped and the result of the mutation is also a permutation
toolbox.register("mutate", tools.mutShuffleIndexes, indpb=0.05)
## set the selection function to tournment selection, which will pick three
## chromosomes at random and select the one with the best fitness
toolbox.register("select", tools.selTournament, tournsize=3)

## set population size to n=100 (n=20 or n=10 see the impact of n)
pop = toolbox.population(n=10)
## have variable best_genome store the chromosome with the best fitness
best_genome = tools.HallOfFame(1)

## run GA to get the solution
#algorithms.eaSimple(pop, toolbox, xo prob, mut prob, gens, store best)
#algorithms.eaSimple(pop, toolbox, 0.7, 0.2, 40, halloffame=best_genome)
algorithms.eaSimple(pop, toolbox, 0.7, 0.2, 40, halloffame=best_genome)
## store the solution into best_path
best_path = best_genome[0]
## print best_path
print('\nBEST PATH:\n')
for i in range(len(best_path)):
    print(df['Neighborhood'][best_path[i]])
print(df['Neighborhood'][best_path[0]])
cost = 0
print('\nBEST PATH COST:\n')
for i in range(len(best_path)-1):
    tempo = distance_matrix[best_path[i]][best_path[i+1]]
    cost+= tempo
    print(df['Neighborhood'][best_path[i]], tempo)
print(df['Neighborhood'][best_path[0]],distance_matrix[len(best_path)-1][best_path[0]])
cost += distance_matrix[len(best_path)-1][best_path[0]]