def main():
    random.seed(64)
    
    creator.create("FitnessMax", base.Fitness, weights=(-1.0,))
    creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMax) #@UndefinedVariable
    
    toolbox = base.Toolbox()
    
    # Attribute generator
    toolbox.register("attr_bool", random.randint, 0, 1)
    
    # Structure initializers
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, DIM*L) #@UndefinedVariable
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    
    
    toolbox.register("evaluate", rastrigin_arg0)
    toolbox.register("mate", tools.cxTwoPoints)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.001)
    toolbox.register("select", tools.selTournament, tournsize=5)
    
    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)
    
    algorithms.eaSimple(pop, toolbox, cxpb=0.8, mutpb=1, ngen=100, stats=stats,
                        halloffame=hof, verbose=True)
    
    return pop, stats, hof
Example #2
0
def init_toolbox(ind_size, data_dict, depot):
    """
    Initializes the toolbox used for the genetic algorithm.
    """
    # Assign the custom individual class to the toolbox
    # And set the number of wanted fitnesses
    toolbox = base.Toolbox()
    creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -1.0, -1.0))
    creator.create(
        "Individual",
        genome.MvrpIndividual,
        fitness=creator.FitnessMulti)

    # Assign the initialisation operator to the toolbox's individual
    # And describe the population initialisation
    toolbox.register(
        "individual", operators.init, creator.Individual,
        size=ind_size, data=data_dict)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Set the different genetic oprator inside the toolbox
    toolbox.register("clone", copy.deepcopy)
    toolbox.register("mate", operators.crossover, data=data_dict)
    toolbox.register("mutate", operators.constrained_swap, data=data_dict)
    toolbox.register("select", tools.selNSGA2)
    toolbox.register(
        "evaluate", operators.evaluate, data=data_dict,
        depot=depot, size=ind_size)

    return toolbox
Example #3
0
def print_out_optimals(model_name):
    files = []
    for fd in folders:
        files.extend(glob.glob(fd + '/*.txt'))
    files = filter(lambda f: model_name in f, files)

    valid_objs = set()
    for records in files:
        with open(records, 'r') as f:
            lines = f.readlines()
            lines = map(lambda x: x.rstrip(), lines)
            start = lines.index("~~~")
            fits = lines[start + 1:-2]
            for fit in fits:
                if fit.startswith('0.0'):
                    valid_objs.add(fit)

    valid_objs = map(lambda x: map(float, x.split(' ')), valid_objs)

    creator.create("FitnessMin", base.Fitness, weights=[-1.0] * 5)
    creator.create("Individual", list, fitness=creator.FitnessMin)

    pop = list()
    for i in valid_objs:
        ind = creator.Individual([])
        ind.fitness = creator.FitnessMin(i)
        pop.append(ind)
    frontier = _get_frontier(pop)

    with open(PROJECT_PATH+'/optimal_in_his/'+model_name+'.txt', 'w') as f:
        f.write('~~~\n')
        for fer in frontier:
            f.write(' '.join(map(str, fer)))
            f.write('\n')
    def optimize_model(self, ngen=30, cxpb=0.5, mutpb=0.1, pop_size=15):
        """
        DEAP Optimization
        """
        print('OPTIMIZATION STARTED')
        creator.create("FitnessMax", base.Fitness, weights=(1.0,))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        toolbox = base.Toolbox()
        toolbox.register("candidate", self.generate_candidate,
                         [self.nlay, self.nrow, self.ncol])
        toolbox.register("individual", tools.initIterate,
                         creator.Individual, toolbox.candidate)
        toolbox.register("population", tools.initRepeat,
                         list, toolbox.individual)
        toolbox.register("mate", tools.cxOnePoint)
        toolbox.register("evaluate", self.evaluate)
        toolbox.register("mutate", self.mutate)
        toolbox.register("select", tools.selTournament, tournsize=3)

        pop = toolbox.population(n=pop_size)

        self.hall_of_fame = tools.HallOfFame(maxsize=100)

        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("mean", np.mean, axis=0)
        stats.register("std", np.std, axis=0)
        stats.register("min", np.min, axis=0)
        stats.register("max", np.max, axis=0)
        self.result, self.log = algorithms.eaSimple(
            pop, toolbox,
            cxpb=cxpb, mutpb=mutpb,
            ngen=ngen, stats=stats,
            halloffame=self.hall_of_fame, verbose=False
            )
        return self.hall_of_fame
Example #5
0
    def run(self, measure, n_gens=100, seed=None, resume=False, **kwargs):
        ''' Main abbreviated measure generation function.

        Args:
            measure (Measure): A Measure instance to abbreviate
            n_gens (int): Number of generations to run GA for
            seed (int): Optional integer to use as random seed
            resume (bool): If True, AND the measure passed is the same as the
                one already stored, AND the Generator has been run before, then
                pick up where we left off--i.e., start with the last population
                produced instead of initializing a new one.
            kwargs: Additional keywords to pass on to the evaluation method of
                the current LossFunction class.

        Returns: A list of items included in the abbreviated measure.
        '''

        # Set random seed for both native Python and Numpy, to be safe
        random.seed(seed)
        np.random.seed(seed)

        # Set up the GA
        creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
        creator.create("Individual", list, fitness=creator.FitnessMin)

        toolbox = base.Toolbox()
        toolbox.register(
            "attr_bool", self._random_boolean, self.zero_to_one_ratio)
        toolbox.register("individual", tools.initRepeat, creator.Individual,
                         toolbox.attr_bool, measure.n_X)
        toolbox.register(
            "population", tools.initRepeat, list, toolbox.individual)
        toolbox.register("evaluate", self.evaluate)
        toolbox.register("mate", tools.cxTwoPoint)
        toolbox.register("mutate", tools.mutFlipBit, indpb=self.indpb)
        toolbox.register(
            "select", tools.selTournament, tournsize=self.tourn_size)

        self.measure = measure
        self.evaluation_keywords = kwargs

        # Cross-validation
        if self.cross_val:
            inds = range(self.measure.n_subjects)
            random.shuffle(inds)
            self.train_subs = [x for i, x in enumerate(inds) if i % 2 != 0]
            self.test_subs = [x for x in inds if x not in self.train_subs]
            self.test_measure = copy.deepcopy(self.measure)
            self.measure.select_subjects(self.train_subs)
            self.test_measure.select_subjects(self.test_subs)

        # Initialize population or pick up where we left off.
        if resume and self.measure == measure and hasattr(self, 'pop'):
            pop = self.pop
        else:
            self.reset()
            pop = toolbox.population(n=self.pop_size)

        self._evolve(
            measure, pop, toolbox, n_gens, cxpb=self.cxpb, mutpb=self.mutpb)
Example #6
0
 def __init__(self, paramSetting):
     self.paramSetting = paramSetting
     creator.create("FitnessMax", base.Fitness, weights=(paramSetting.maxFitIndiv,))
     creator.create("Individual", list, fitness=creator.FitnessMax)
     self.toolbox = self.setup_toolbox()
     self.stats = self.setup_stats()
     self.graph = None
Example #7
0
    def main(self):
        '''
        Fonction principale de l'optimisation

        enregistre les fonctions et types définis précédemment dans la toolbox et lance les calculs

        Returns:
            le hall of fame des meilleurs individus
        '''
        creator.create("Fitness", base.Fitness, weights=(-1,))
        creator.create("Individual", dict, fitness=creator.Fitness)

        toolbox = base.Toolbox()
        toolbox.register("individual", self.createInd, creator.Individual)
        toolbox.register("population", tools.initRepeat, list, toolbox.individual)
        toolbox.register("evaluate", self.evaluate)
        toolbox.register("mutate", self.mutate)
        toolbox.register("select", self.selCustom)
        toolbox.register("clone", copy.deepcopy)
        toolbox.register("mate", self.crossMultiPoint)

        stats = tools.Statistics()
        stats.register("max", self.max)
        stats.register("min", self.min)
        stats.register("nbCurrentGen", self.nbGen)
        hof = tools.HallOfFame(10, similar=self.equals)
        pop = toolbox.population(n=self.mu)
        algorithms.eaMuPlusLambda(pop, toolbox, mu=self.mu, lambda_=self.lambda_, cxpb=self.cxpb, mutpb=self.mutpb, ngen=self.ngen, halloffame=hof, stats=stats)
        #print "fini"
        return hof
Example #8
0
    def __init__(self, problem, job):
        '''
        @param problem: problem
        @param nodes: list of nodes
        '''
        self.job = job
        random.seed(1000+job)

        self.toolbox = base.Toolbox()
        creator.create("FitnessMin1", base.Fitness, weights=(-1.0,))
        creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin1, tours=list)
        
        current_gen = 0
        self.problem = problem
        
        self.POPSIZE=100
        self.NUMGEN=100
        self.INDSIZE = self.problem.num_of_nodes + len(self.problem.obligatory_nodes)
        self.cxP=0.5
        self.mutP=0.5
        self.init_popsize=0
        
        self.initialize(problem.name)
        
        # for testing only
        self.best_cost = 10**10
        # job, gen
        self.stats = []
Example #9
0
def main():
    "All function calls in the main() functions are for testing purposes only"  
    dirL='/home/stefano/Documents/Projects/Homeostat/Simulator/Python-port/Homeo/SimulationsData/'
    fileL = 'History-2015-01-11-12-55-19.hist'
    filename = os.path.join(dirL,fileL)
    #logbook = pickle.load(open(filename, 'r'))
    #---------------------------------------------
    #
    # Experiment to see whether I can unpickle the history object in a different environment
    creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
    
    'Homeostat genome is a list plus an ID'     
    creator.create("Individual", list, fitness=creator.FitnessMin, ID=None)   

                
    'Register function to create a random individual'
    #toolbox.register("individual", self.initIndividual, creator.Individual, genomeSize=self.genomeSize, ID = 'DummyID')  
    
    'Register function to create an individual with given genome'
    #toolbox.register('individualClone', self.initIndividualClone, creator.Individual, self.clonableGenome)
    
    #
    #---------------------------------------------
    history = pickle.load(open(filename,'r'))
    showGenealogyTree(history)
Example #10
0
def example_nevzpominam():
    # Creating appropriate type
    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    # Initialization
    IND_SIZE = 100
    toolbox = base.Toolbox()
    toolbox.register("attr_bool", random.randint, 0, 1)
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=IND_SIZE)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", evalOneMax)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
    toolbox.register("select", tools.selTournament, tournsize=3)

    population = toolbox.population(n=300)

    NGEN=40

    for gen in range(NGEN):
        offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.1)
        fits = toolbox.map(toolbox.evaluate, offspring)
        for fit, ind in zip(fits, offspring):
            ind.fitness.values = fit
        population = toolbox.select(offspring, k=len(population))
    top10 = tools.selBest(population, k=10)
Example #11
0
def load_optimization_results(file_name, weights, zipped=True):
    '''
    load the specified bz2 file. the file is assumed to be saves
    using save_results.
    
    :param file: the path of the file
    :param zipped: load the pickled data from a zip file if True
    :return: the unpickled results
    :raises: IOError if file not found
    :raises: EMAError if weights are not correct
    
    '''
    creator.create("Fitness", base.Fitness, weights=weights)
    creator.create("Individual", dict, 
                   fitness=creator.Fitness) #@UndefinedVariable
    
    file_name = os.path.abspath(file_name)
    debug("loading "+file_name)
    try:
        if zipped:
            file_name = bz2.BZ2File(file_name, 'rb')
        else:
            file_name = open(file_name, 'rb')
        
        results = cPickle.load(file_name)
        
        if results[0].weights != weights:
            raise EMAError("weights are %s, should be %s" % (weights, results[0].weights))
    except IOError:
        warning(file_name + " not found")
        raise
    
    return results
Example #12
0
def get_po_toolbox(predictors, response):
    creator.create("FitnessAge", base.Fitness, weights=(WEIGHT_FITNESS, WEIGHT_AGE_DENSITY))
    creator.create("Individual", SemanticPrimitiveTree, fitness=creator.FitnessAge, age=int)

    toolbox = base.Toolbox()
    pset = symbreg.get_numpy_pset(len(predictors[0]))
    toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=MIN_DEPTH_INIT, max_=MAX_DEPTH_INIT)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
    toolbox.register("population", initialization.syntactically_distinct, individual=toolbox.individual, retries=100)
    toolbox.register("select", tools.selRandom)

    expression_dict = cachetools.LRUCache(maxsize=10000)
    toolbox.register("error_func", ERROR_FUNCTION, response=response)
    toolbox.register("evaluate_error", semantics.calc_eval_semantics, context=pset.context, predictors=predictors,
                     eval_semantics=toolbox.error_func, expression_dict=expression_dict)

    toolbox.register("koza_node_selector", operators.internally_biased_node_selector, bias=INTERNAL_NODE_SELECTION_BIAS)
    toolbox.register("mate", operators.one_point_xover_biased, node_selector=toolbox.koza_node_selector)
    toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=MAX_HEIGHT))
    toolbox.decorate("mate", gp.staticLimit(key=len, max_value=MAX_SIZE))

    mstats = reports.configure_inf_protected_stats()
    multi_archive = get_archive(response)

    pop = toolbox.population(n=POP_SIZE)
    toolbox.register("run", afpo.pareto_optimization, population=pop, toolbox=toolbox, xover_prob=XOVER_PROB,
                     mut_prob=MUT_PROB, ngen=NGEN, tournament_size=TOURN_SIZE, num_randoms=1, stats=mstats,
                     archive=multi_archive, calc_pareto_front=False)

    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(multi_archive))
    return toolbox
Example #13
0
    def cont(self,str_ind):
        pset = gp.PrimitiveSet("MAIN", 13)
        pset.addPrimitive(operator.add, 2)
        pset.addPrimitive(operator.sub, 2)
        pset.addPrimitive(operator.mul, 2)
        pset.addPrimitive(safe_div, 2)
        pset.addPrimitive(np.cos, 1)
        pset.addPrimitive(np.sin, 1)
        #pset.addPrimitive(myexp, 1)
        pset.addPrimitive(mylog, 1)
        pset.addPrimitive(mypower2, 1)
        pset.addPrimitive(mypower3, 1)
        pset.addPrimitive(mysqrt, 1)
        pset.addPrimitive(np.tan, 1)
        pset.addPrimitive(np.tanh, 1)
        #pset.addEphemeralConstant("rand101", lambda: random.uniform(-1, 1))
        pset.renameArguments(ARG0='x0',ARG1='x1', ARG2='x2', ARG3='x3', ARG4='x4', ARG5='x5', ARG6='x6', ARG7='x7',  ARG8='x8', ARG9='x9',  ARG10='x10',  ARG11='x11',  ARG12='x12')

        creator.create("Individual", gp.PrimitiveTree)

        toolbox = base.Toolbox()
        toolbox.register("expr", gp.genFull, pset=pset, min_=1, max_=3)
        toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
        toolbox.register("compile", gp.compile, pset=pset)



        #Aqui pones el string que tienes del individuo.
        cade=str_ind

        #Esta linea convierte el string a individuo. Tienes que mandarle el conjunto de parametros
        expr1=neat_gp.PrimitiveTree.from_string(cade, pset)
        return len(expr1)
def ea(evaluator, pop_size=50, ngen=40, mutation_rate=0.05):

    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", array.array, typecode='l', fitness=creator.FitnessMax)

    toolbox = base.Toolbox()

    # Attribute generator
    toolbox.register("attr_bool", random.randint, 0, 2**PADDING - 1)

    # Structure initializers
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, 3)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", evaluator)
    toolbox.register("mate", crossover)  # was cxTwoPoint
    toolbox.register("mutate", mutation, indpb=mutation_rate)  # was 0.05
    toolbox.register("select", tools.selRoulette)  # was setTournament

    pop = toolbox.population(n=pop_size)  # was 300
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    algorithms.eaSimple(pop, toolbox, cxpb=1, mutpb=1, ngen=ngen,  # was 0.5, 0.2, 40
                        stats=stats, halloffame=hof, verbose=True)

    return hof[0]
def deap_test():
    # onemax example evolves to print list of ones: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
    numpy.random.seed(1)
    def evalOneMax(individual):
        return sum(individual),

    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", list, typecode='b', fitness=creator.FitnessMax)

    toolbox = base.Toolbox()
    toolbox.register("attr_bool", numpy.random.randint, 0, 1)
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, 10)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("evaluate", evalOneMax)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
    toolbox.register("select", tools.selTournament, tournsize=3)

    pop   = toolbox.population(n=50)
    hof   = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=30, 
                                   stats=stats, halloffame=hof, verbose=False) # change to verbose=True to see evolution table
    print "deap test >>>", hof[0]
    def setup_deap(self):
        """ Setups configuration for the DEAP library. """
        # Single-objective maximization (1.0)
        creator.create("FitnessMax", base.Fitness, weights=(Decimal(1.0),))

        # Individual
        creator.create("Individual", list, fitness=creator.FitnessMax)
        self.deap_toolbox.register("attr_bool", random.randint, 0, 1)
        self.deap_toolbox.register(
            "individual",
            tools.initRepeat,
            creator.Individual,
            self.deap_toolbox.attr_bool,
            n=FeatureWeightLearner.FEATURES * self.bits,
        )
        self.deap_toolbox.register("population", tools.initRepeat, list, self.deap_toolbox.individual)

        # Operators
        self.deap_toolbox.register("evaluate", self.fitness_wrapper)
        self.deap_toolbox.register("mate", tools.cxTwoPoint)
        self.deap_toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
        self.deap_toolbox.register("select", tools.selTournament, tournsize=3)

        # Constraints
        sum_is_one = lambda r, f, a: round(r + f + a, 5) == 1
        non_zero = lambda r, f, a: r * f * a > 0
        self.constraints.extend([sum_is_one, non_zero])
Example #17
0
    def fit(self, X, y):
        #ToDo: Check that the columns or the feature names are not same
        #ToDo: All other general sanity checks are also to be made.
        #ToDo: make all the parameters in the init function

        input_feat = list(X.columns.values);
        creator.create("FitnessMax", base.Fitness, weights=(-1.0,))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size)
        self.X_train = X_train
        self.X_test = X_test
        self.y_train = y_train
        self.y_test = y_test
        
        toolbox = base.Toolbox()
        toolbox.register("attr_bool", self.get_indiv_sample, data=X_train, output=y_train, base_estimator=self.base_estimator)
        toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=self.N_individual)
        toolbox.register("population", tools.initRepeat, list, toolbox.individual)
        toolbox.register("evaluate", evalOneMax,x_te = X_test, y_te = y_test, test_frac = self.test_frac, test_frac_flag = self.test_frac_flag)
        toolbox.register("mate", self.crossover_func)
        toolbox.register("mutate", mutate_feat, indpb=self.indpb,input_fe = input_feat, X_tr = X_train)
        toolbox.register("select", tools.selTournament, tournsize=3)
        
        pop = toolbox.population(n=self.N_population)
        hof = tools.HallOfFame(1, similar=compare_hof);
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean)
        stats.register("min", np.min)
        stats.register("max", np.max)
        self.pop, self.logbook = algorithms.eaSimple(pop, toolbox, cxpb=self.cxpb, mutpb=self.mutpb, ngen=self.ngen, stats=stats, halloffame=hof,  verbose=True)
        self.hof = hof
        #return pop, logbook, hof
        return self
Example #18
0
    def __init__(self):
        super(GA, self).__init__()
        self.pop_size = 1000
        self.mut_rate = 0.2
        self.cross_rate = 0.9
        self.generations = 20
        self.tournament_size = 3
        self.N = 100
        creator.create("FitnessMax", base.Fitness, weights=(1.0,))
        creator.create("Individual", list, fitness=creator.FitnessMax)

        self.toolbox = base.Toolbox()
        # Attribute generator
        self.toolbox.register("attr_bool", random.randint, 0, 1)
        # Structure initializers
        self.toolbox.register("individual", tools.initRepeat, creator.Individual, 
            self.toolbox.attr_bool, 100)
        self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)

        # Operator registering
        self.toolbox.register("evaluate", self.fitness_function)
        self.toolbox.register("mate", tools.cxTwoPoints)
        self.toolbox.register("mutate", self.mutate, indpb=0.05)
        self.toolbox.register("select", tools.selTournament, tournsize=self.tournament_size)

        self.population_snapshots = []
        self.genotypes_history = Genotypes(min=False)
def create_toolbox(hyperparameters, pset):
    # Recuperation des informations sur les hyperparametres
    n_tournament = hyperparameters['n_tournament']
    init_depth = hyperparameters['init_depth']

    # Caracteristiques de l'individu et de la fitness
    creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
    creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)

    # Creation de la toolbox
    toolbox = base.Toolbox()
    toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=init_depth)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("select", tools.selTournament, tournsize=n_tournament)
    toolbox.register("mate", gp.cxOnePoint)
    toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
    toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)

    # A mettre a jour plus tard en fonction du pli
    toolbox.register("evaluate", eval_symbreg, pset=pset, trX=None, trY=None, teX=None, teY=None)

    # Controle du bloat
    toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
    toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))

    return toolbox
Example #20
0
    def ZDT1_init_SPEA(self):

        creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
        creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)

        # Functions zdt1, zdt2, zdt3 have 30 dimensions, zdt4 and zdt6 have 10

        #NDIM = 6
        NDIM = 30

        toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
        toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
        toolbox.register("population", tools.initRepeat, list, toolbox.individual)

        toolbox.register("evaluate", benchmarks.zdt1)
        # toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
        # toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)
        #toolbox.register("select", tools.selNSGA2)

        toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=0.5)
        toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=0.5, indpb=1.0)

        #toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, n=6) #@UndefinedVariable


        toolbox.register("select", tools.selSPEA2)
        toolbox.register("selectTournament", tools.selTournament, tournsize=2)
Example #21
0
    def __init__(self,knapsack_file="weing8.pkl"):
        super(MDimKnapsack, self).__init__()
        creator.create("FitnessMax", base.Fitness, weights=(1.0,))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        self.pop_size = 100
        self.mut_rate = 1.0
        self.cross_rate = 0.2
        self.generations = 2000

        self.knapsack = pickle.load(open(knapsack_file))
        self.knapsack.capacities = [[2000]]
        self.N = int(self.knapsack.items)
        self.toolbox = base.Toolbox()
        # Attribute generator
        self.toolbox.register("attr_bool", random.randint, 0, 1)
        # Structure initializers
        self.toolbox.register("individual", tools.initRepeat, creator.Individual, 
            self.toolbox.attr_bool, int(self.knapsack.items))
        self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)

        # Operator registering
        self.toolbox.register("evaluate", self.fitness_function, knapsack = self.knapsack)
        self.toolbox.register("mate", tools.cxTwoPoints)
        self.toolbox.register("mutate", self.mutate, indpb=float(1.0/self.N))
        self.toolbox.register("select", tools.selTournament, tournsize=self.tournament_size)

        self.genotypes_history = Genotypes(min=False)
Example #22
0
def defineBasic():
    """
    the function creates the classes and methods to be used by the GA
    the function returns the toolbox
    """

    # define in how many dimensions the fitness will be evaluated, and what is the goal for each dimension: minimization or maximization
    creator.create("Fitness", base.Fitness, weights = (-1.0, 1.0))

    # define the class individual, that inherits from python 'set' class and has an attribute fitness
    creator.create("Individual", set, fitness = creator.Fitness)

    # get toolbox and methods
    toolbox = base.Toolbox()

    # method to instantiate an item randomly
    toolbox.register("attribute_item", random.randrange, NUM_ITENS)

    # method to instantiate an individual
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attribute_item, IND_INIT_SIZE)

    # a population is modeled as a list of individuals -- why is initRepeat receiving only 2 arguments??
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # methods to do evaluation, crossover, mutation and selection
    toolbox.register("evaluate", evaluationFunction)
    toolbox.register("mate", crossoverSet)
    toolbox.register("mutate", mutationSet)
    toolbox.register("select", tools.selNSGA2)    

    return toolbox
Example #23
0
 def _run_algorithm(self, stats, hall_of_fame):
     creator.create("FitnessMulti", base.Fitness, weights=self.config.targets)
     creator.create("Individual", list, fitness=creator.FitnessMulti)
     toolbox = base.Toolbox()
     toolbox.register("individual", self._init_individual, clazz=creator.Individual)
     toolbox.register("population", tools.initRepeat, list, toolbox.individual)
     toolbox.register("evaluate", self._evaluate_individual)
     # Initialize the logger
     logbook = tools.Logbook()
     evals = 0
     pop = toolbox.population(n=1)
     # Initialize population
     while evals < self.config.max_evals:
         print("-- Evaluations %i --" % evals)
         # Evaluate the entire population
         pop[:] = toolbox.population(n=1)
         fitnesses = list(map(toolbox.evaluate, pop))
         for ind, fit in zip(pop, fitnesses):
             ind.fitness.values = fit
             print(ind.fitness)
         if hall_of_fame is not None:
             hall_of_fame.update(pop)
         # Gather the stats
         record = stats.compile(pop)
         print(record)
         evals += 1
         logbook.record(evaluations=evals, gen=evals, **record)
     return pop, logbook, hall_of_fame
    def __init__(self, data_set, fitness_weights = (1.0,),
        playlist_size = 15,  hof_size = 5):

        self.ids = data_set
        creator.create("FitnessMax", base.Fitness, weights=fitness_weights)
        creator.create("Individual", list, fitness=creator.FitnessMax)

        self.toolbox = base.Toolbox()
        # Attribute generator
        self.toolbox.register("attr_float", self.__create_new_playlist)
        # Structure initializers

        self.toolbox.register("individual", tools.initRepeat, creator.Individual,
            self.toolbox.attr_float, playlist_size)

        self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)

        self.toolbox.register("evaluate", self.__eval_one_max)
        self.toolbox.register("mate", self.__cross)
        self.toolbox.register("mutate", self.__mutate_playlist, indpb=1)
        self.toolbox.register("select", tools.selTournament, tournsize=3)

        self.hofs = []
        for i in range(THREADS_NUMBER):
            self.hofs.append(tools.HallOfFame(hof_size))

        print(self.hofs)
        self.ga_algorithms = algorithms.eaSimple

        self.ngramMetric = Ngrams()
Example #25
0
 def _run_algorithm(self, stats, hall_of_fame):
     # First, we initialize the framework
     creator.create("FitnessMulti", base.Fitness, weights=self.config.targets)
     creator.create("Individual", list, fitness=creator.FitnessMulti)
     toolbox = base.Toolbox()
     toolbox.register("individual", self._init_individual, clazz=creator.Individual)
     toolbox.register("population", tools.initRepeat, list, toolbox.individual)
     toolbox.register("evaluate", self._evaluate_individual)
     toolbox.register("train", self._train_bp)
     toolbox.register("sample", self._sample_architecture)
     toolbox.register("mate", self._mate)
     toolbox.register("mutate", self._mutate)
     toolbox.register("select", self._select)
     toolbox.register("replace", self._replace)
     # Initialize the logger
     logbook = tools.Logbook()
     for i in range(self.config.restarts):
         # Get a group of candidates from an evolutionary process
         pop, logbook = self._micro_eval(toolbox, logbook)
         # Train the candidates using BP
         map(toolbox.train, pop)
         fitnesses = map(toolbox.evaluate, invalid_ind)
         for ind, fit in zip(pop, fitnesses):
             ind.fitness.values = fit
         if hall_of_fame is not None:
             hall_of_fame.update(pop)
     return pop, logbook, hall_of_fame
Example #26
0
 def __init__(
     self,
     task,
     generations=40,
     population=50,
     print_to_stdout=False,
     crossover_probability=0.5,
     mutation_probability=0.2,
 ):
     self.population = population
     self.crossover_probability = crossover_probability
     self.mutation_probability = mutation_probability
     self.task = task
     self.ngen = generations
     self.initial_individuals = None
     self.print_to_stdout = print_to_stdout
     IND_SIZE = self.task.net_maker().params.shape[0]
     creator.create("FitnessMax", base.Fitness, weights=(1.0, 1.0))
     creator.create("Individual", list, fitness=creator.FitnessMax)
     toolbox = base.Toolbox()
     toolbox.register("attribute", random.random)
     toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attribute, n=IND_SIZE)
     toolbox.register("population", tools.initRepeat, list, toolbox.individual)
     toolbox.register("mate", tools.cxTwoPoints)
     toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=2, indpb=0.1)
     toolbox.register("select", tools.selRoulette)
     toolbox.register("evaluate", GeneticLearner.evaluate, self)
     self.toolbox = toolbox
Example #27
0
 def get_toolbox(self):
     #    
     creator.create('PolyTerms_Fitness', base.Fitness, weights = (1.0,))
     creator.create('PolyTerms_Individual', PolyTerms, fitness = creator.PolyTerms_Fitness)
     #
     toolbox = base.Toolbox()
     #
     #   Individual and Population creation
     #
     toolbox.register('individual', creator.PolyTerms_Individual,
         random_matrix(
             num_terms = self.params['max_num_terms'],
             num_vars = self.scorer.num_features,
             valid_degrees = self.params['valid_degrees']
         )
     )
     #
     toolbox.register('population', tools.initRepeat, list, toolbox.individual)
     #
     #   Genetic operators
     #
     operators = self.get_operators()
     for (k,op) in operators.items():
         toolbox.register(k, op )
     #
     #   Fitness
     #
     toolbox.register('evaluate', self.scorer.score)
     toolbox.register('select', tools.selTournament, tournsize = self.params['tournment_size'])
     
     return toolbox
Example #28
0
def configure_toolbox(pset, tournsize):
    """ Creates and configures a DEAP toolbox object """

    # minimization problem, so weights are -1
    creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
    creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)

    toolbox = base.Toolbox()

    toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=0, max_=10)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
    toolbox.register("compile", gp.compile, pset=pset)

    # population function
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # evaluation function, pass toolbox and points args
    toolbox.register("evaluate", eval_symb_reg, toolbox=toolbox, points=get_training_dataset())

    # tournament size
    toolbox.register("select", tools.selTournament, tournsize=tournsize)
    # mating strategy
    toolbox.register("mate", gp.cxOnePoint)
    toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
    toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)

    # limit mating and mutation to a tree w/ max height of 50
    toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=50))
    toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=50))

    return toolbox
Example #29
0
    def run_initialize(self):
        design_space = self.fitness.designSpace

        smin = [-1.0 * self.configuration.max_speed *
                (dimSetting['max'] - dimSetting['min'])
                for dimSetting in design_space]
        smax = [self.configuration.max_speed *
                (dimSetting['max'] - dimSetting['min'])
                for dimSetting in design_space]

        creator.create('FitnessMax', base.Fitness, weights=(1.0,))
        creator.create('Particle', list, fitness=creator.FitnessMax,
                       smin=smin, smax=smax,
                       speed=[uniform(smi, sma) for sma, smi in zip(smax,
                                                                    smin)],
                       pmin=[dimSetting['max'] for dimSetting in design_space],
                       pmax=[dimSetting['min'] for dimSetting in design_space],
                       model=False, best=None, code=None)

        self.toolbox = base.Toolbox()
        self.toolbox.register('particle', generate, designSpace=design_space)
        self.toolbox.register('filter_particles', filterParticles,
                              designSpace=design_space)
        self.toolbox.register('population', tools.initRepeat,
                              list, self.toolbox.particle)
        self.toolbox.register('update', updateParticle, trial=self,
                              conf=self.configuration,
                              designSpace=design_space)
        self.toolbox.register('evaluate', self.fitness_function)
Example #30
0
def get_online_validation_toolbox(train_predictors, test_predictors, train_response, test_response,
                                  primitive_tree_class, *terminal_classes):
    creator.create("ErrorSize", base.Fitness, weights=(-1.0, -1.0))
    creator.create("Individual", primitive_tree_class, fitness=creator.ErrorSize)

    toolbox = base.Toolbox()
    pset = get_parametrized_pset()
    for terminal in terminal_classes:
        pset.addParametrizedTerminal(terminal)
    toolbox.pset = pset

    metadata_dict = dict()
    latitude_longitude = np.load('./metadata/latlon.npy')
    elevation = np.load('./metadata/elevation.npy')
    aspect = np.load('./metadata/aspect.npy')
    metadata_dict["LatLon"] = latitude_longitude
    metadata_dict["Elevation"] = np.repeat(elevation, 3)
    metadata_dict["Aspect"] = np.repeat(aspect, 3)

    toolbox.register("validate_error", linear_model_from_semantics, context=toolbox.pset.context,
                     evaluation_func=fast_numpy_evaluate_metadata,
                     train_predictors=train_predictors, test_predictors=test_predictors,
                     train_response=train_response, test_response=test_response,
                     metadata=metadata_dict)
    toolbox.register("validate", afpo.evaluate_fitness_size, error_func=toolbox.validate_error)
    return toolbox
# Creation of the environment #

print(" ---- OPTIMIZING MAP NUMBER {} ----".format(r))

env = Lake(filepath='map_{}.csv'.format(r),
           number_of_agents=1,
           action_type="complete",
           init_pos=init_points[r - 1][np.newaxis],
           importance_map_path='importance_map_{}.csv'.format(r),
           num_of_moves=30 * r)

IND_SIZE = 8  # Number of actions #

# Creation of the algorithm. Maximization like. #
creator.create('FitnessMax', base.Fitness, weights=(1.0, ))
creator.create('Individual', list, fitness=creator.FitnessMax)

toolbox = base.Toolbox()

# Generate a random action set

toolbox.register("indices", np.random.randint, 0, 8, r * 30)

# Generación de inviduos y población
toolbox.register("individual", tools.initIterate, creator.Individual,
                 toolbox.indices)
toolbox.register("population", tools.initRepeat, list, toolbox.individual,
                 10 * 30 * r)

# registro de operaciones genéticas
Example #32
0
ENV_NAME = 'CartPole-v2'
EPISODES = 1  # Number of times to run envionrment when evaluating
STEPS = 5000  # Max number of steps to run run simulation
count = 0
env = gym.make(ENV_NAME)
POPULATION_SIZE = 40
CROSS_PROB = 0.5
NGEN = 300   # Number of generations
MUTATION_RATE = 0.4 # PERCENTAGE OF GENES TO MUTATE
NUM_PARAMS = 572
N = 26
nn = NNN()

toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
def decide_weight(network):
    w_list = []
    if not network :
        w_list.extend(np.reshape(np.random.normal(0,1,(4,3)),(12)))
        w_list.extend(np.reshape(np.random.normal(0,1,(1,3)),(3)))
        w_list.extend(np.reshape(np.random.normal(0,1,(3,2)),(6)))
        w_list.extend(np.reshape(np.random.normal(0,1,(1,2)),(2)))
        w_list.extend(np.reshape(np.random.normal(0,1,(2,1)),(2)))
        w_list.extend(np.reshape(np.random.normal(0,1,(1,1)),(1)))
    return w_list

toolbox.register("attr_floats", decide_weight, nn.network)

toolbox.register("individual", tools.initIterate, creator.Individual,
#                               SET UP GA PARAMETERS
# ------------------------------------------------------------------------------
POPULATION_SIZE = 40
CROSS_PROB = 0.5
NUM_GEN = 10000  # Number of generations
DEME_SIZE = 3  # from either side
MUTATION_RATE = 0.4  # PERCENTAGE OF GENES TO MUTATE

# ------------------------------------------------------------------------------
#                               CREATE GA
# ------------------------------------------------------------------------------

# Creates a Fitness class, with a weights attribute.
#    1 means a metric that needs to be maximized = the value
#    -1 means the metric needs to be minimized. All OpenAI
creator.create("FitnessMax", base.Fitness, weights=(1.0, ))

# Creates a class Individual, that is based on a numpy array as that is the
# class used for the weights passed to TensorFlow.
#   It also has an attribute that is a Fitness, when setting the attribute
#   it automatically calls the __init__() in Fitness initializing the
#   weight (1)
creator.create("Individual", np.ndarray, fitness=creator.FitnessMax)
# ==============================================================================

toolbox = base.Toolbox()

# Create a function 'attr_item' to return the 'ID' of one item
# num_nodes = num_inputs+1 = 20
NUM_PARAMS = agent.num_params
print(NUM_PARAMS)
Example #34
0
    def _fit(self, X, y, parameter_dict):
        self._cv_results = None  # To indicate to the property the need to update
        self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
        n_samples = _num_samples(X)
        X, y = indexable(X, y)

        if y is not None:
            if len(y) != n_samples:
                raise ValueError('Target variable (y) has a different number '
                                 'of samples (%i) than data (X: %i samples)' %
                                 (len(y), n_samples))
        cv = check_cv(self.cv, y=y, classifier=is_classifier(self.estimator))

        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual",
                       list,
                       est=clone(self.estimator),
                       fitness=creator.FitnessMax)

        toolbox = base.Toolbox()

        name_values, gene_type, maxints = _get_param_types_maxint(
            parameter_dict)
        if self.gene_type is None:
            self.gene_type = gene_type

        if self.verbose:
            print("Types %s and maxint %s detected" %
                  (self.gene_type, maxints))

        toolbox.register("individual",
                         _initIndividual,
                         creator.Individual,
                         maxints=maxints)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)

        if self.n_jobs > 1:
            pool = Pool(processes=self.n_jobs)
            toolbox.register("map", pool.map)

        toolbox.register("evaluate",
                         _evalFunction,
                         name_values=name_values,
                         X=X,
                         y=y,
                         scorer=self.scorer_,
                         cv=cv,
                         iid=self.iid,
                         verbose=self.verbose,
                         error_score=self.error_score,
                         fit_params=self.fit_params,
                         score_cache=self.score_cache)

        toolbox.register("mate",
                         _cxIndividual,
                         indpb=self.gene_crossover_prob,
                         gene_type=self.gene_type)

        toolbox.register("mutate",
                         _mutIndividual,
                         indpb=self.gene_mutation_prob,
                         up=maxints)
        toolbox.register("select",
                         tools.selTournament,
                         tournsize=self.tournament_size)

        # Stats
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.nanmean)
        stats.register("min", np.nanmin)
        stats.register("max", np.nanmax)
        stats.register("std", np.nanstd)

        # History
        hist = tools.History()
        pop = toolbox.population(n=self.population_size)
        hof = tools.HallOfFame(1)
        toolbox.decorate("mate", hist.decorator)
        toolbox.decorate("mutate", hist.decorator)
        toolbox.decorate("population", hist.decorator)
        hist.update(pop)

        if self.verbose:
            print('--- Evolve in {0} possible combinations ---'.format(
                np.prod(np.array(maxints) + 1)))

        pop, logbook = algorithms.eaSimple(pop,
                                           toolbox,
                                           cxpb=0.5,
                                           mutpb=0.2,
                                           ngen=self.generations_number,
                                           stats=stats,
                                           halloffame=hof,
                                           verbose=self.verbose)

        # Save History
        self.all_history_.append(hist)
        self.all_logbooks_.append(logbook)
        current_best_score_ = hof[0].fitness.values[0]
        current_best_params_ = _individual_to_params(hof[0], name_values)
        if self.verbose:
            print("Best individual is: %s\nwith fitness: %s" %
                  (current_best_params_, current_best_score_))

        if current_best_score_ > self.best_mem_score_:
            self.best_mem_score_ = current_best_score_
            self.best_mem_params_ = current_best_params_

        # Check memoization, potentially unknown bug
        assert str(
            hof[0]
        ) in self.score_cache, "Best individual not stored in score_cache for cv_results_."

        if self.n_jobs > 1:
            pool.close()
            pool.join()

        self.best_score_ = current_best_score_
        self.best_params_ = current_best_params_
Example #35
0
import json
import pickle
from deap import creator, base
from individual import ListWithAttributes
import math
from collections import defaultdict
import pandas as pd
from individual import ListWithAttributes
from deap import tools
import itertools
import sqlalchemy as sql
import pickle as pck

creator.create("FitnessMin", base.Fitness, weights=(-1.0,-1.0,-1.0,))
creator.create("Individual", ListWithAttributes, fitness=creator.FitnessMin)

def sortEpsilonNondominated(individuals, k, first_front_only=False):
    if k == 0:
        return []
    angle = 30
    a = math.tan(math.radians(angle*2))/2
    map_fit_ind = defaultdict(list)
    max_fit = [0,0,0]
    for ind in individuals:
        for i,fit in enumerate(ind.fitness.values):
          if abs(fit) > max_fit[i]:
            max_fit[i] = abs(fit)
    for ind in individuals:
        new_fit = creator.FitnessMin((ind.fitness.values[0]/max_fit[0]*1+ind.fitness.values[1]/max_fit[1]*a + ind.fitness.values[2]/max_fit[2]*a, ind.fitness.values[1]/max_fit[1]*1+ind.fitness.values[0]/max_fit[0]*a+ ind.fitness.values[2]/max_fit[2]*a, ind.fitness.values[2]/max_fit[2]*1 + ind.fitness.values[0]/max_fit[0]*a + ind.fitness.values[1]/max_fit[1]*a,))
        map_fit_ind[new_fit].append(ind)
    fits = map_fit_ind.keys()
Example #36
0
#    GNU Lesser General Public License for more details.
#
#    You should have received a copy of the GNU Lesser General Public
#    License along with DEAP. If not, see <http://www.gnu.org/licenses/>.


#    example which maximizes the sum of a list of integers
#    each of which can be 0 or 1

import random

from deap import base
from deap import creator
from deap import tools

creator.create("FitnessMax", base.Fitness, weights=(1.0,))  # 这里这个base.Fitness是干嘛的???
creator.create("Individual", list, fitness=creator.FitnessMax)  # 这里的list,fitness是参数,干嘛的???

toolbox = base.Toolbox()  # base是个很基本的类啊!!!看来很重要

# Attribute generator: define 'attr_bool' to be an attribute ('gene')
#                      which corresponds to integers sampled uniformly
#                      from the range [0,1] (i.e. 0 or 1 with equal
#                      probability)
toolbox.register("attr_bool", random.randint, 0, 1)  # 包含了0,1的随机整数。不明白这里是干嘛的???

# Structure initializers: define 'individual' to be an individual
#                         consisting of 100 'attr_bool' elements ('genes')
toolbox.register("individual", tools.initRepeat, creator.Individual,  # tools.initRepeat是干嘛的???
                 toolbox.attr_bool, 100)
Example #37
0
def algortimo_genetico(calorias):

    # Se indica como se va a evaluar los pesos de la funcion (en este caso, a mayor valor mejor es la funcion)
    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    # Se crea la herramienta que va a registrar los individuos y las estrategias de seleccion, cruzamiento y mutacion
    toolbox = base.Toolbox()

    # Se indica como se va a crear cada individuo de la poblacion. En este caso, cada individuo esta formado por
    # n digitos binarios, siendo N el valor de comidas.LONGITUD_CROMOSOMA
    toolbox.register("attr_bool", random.randint, 0, 1)
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=comidas.LONGITUD_CROMOSOMA)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Se indica cual sera la funcion aptitud a evaluar para cada individuo. En este caso la funcion de aptitud siempre
    # recibe un parametro que indica la cantidad de calorias que debe tener la dieta.
    toolbox.register("evaluate", funcion_aptitud, calorias_a_consumir=calorias)

    # Se indica cuales seran las estrategias de seleccion (select), cruzamiento (mate) y mutacion (mutate)
    probabilidad_cruzamiento = 1
    probabilidad_mutacion = 0.2
    toolbox.register("select", tools.selTournament, tournsize=50)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.1)

    # Se indica la cantidad de generaciones y el numero de individuos de la poblacion inicial
    cantidad_de_generaciones = 50
    numero_de_poblacion_inicial = 5000

    # Se indica el numero de individuos que tendra la poblacion inicial, que sera constante durante toda la corrida.
    population = toolbox.population(n=numero_de_poblacion_inicial)

    # El HallOfFame se utiliza para almacenar los N mejores individuos de entre todas las generaciones
    mejores_n_individuos_de_entre_todas_las_generaciones = tools.HallOfFame(cantidad_de_generaciones)

    # Se crean estadisticas para analizar cada ciclo. En este caso, las estadisticas registradas son
    #   -max = devuelve el valor de la funcion fitness del mejor individuo del ciclo
    #   -avg = devuelve el promedio de la funcion fitness de todos los individuos del ciclo
    stats = tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("max", numpy.max)
    stats.register("avg", numpy.average)

    # Es el metodo que realiza la simulacion en si. Los parametros mas importantes son:
    #   -population --> Poblacion inicial
    #   -toolbox    --> Contiene la informacion de los metodos de seleccion, mutacion y cruzamiento que se va a usar
    #   -cxpb       --> Probabilidad de cruzamiento
    #   -mutpb      --> Probabilidad de mutacion
    #   -ngen       --> Numero de generaciones
    #   -stats      --> Estadisticas que cada ciclo debe registrar
    #   -halloffame --> Objeto que almacena el mejor individuo de cada ciclo
    poblacion_final, logbook = algorithms.eaSimple(
        population,
        toolbox,
        cxpb=probabilidad_cruzamiento,
        mutpb=probabilidad_mutacion,
        ngen=cantidad_de_generaciones,
        stats=stats,
        halloffame=mejores_n_individuos_de_entre_todas_las_generaciones
    )

    logs.crear_logs(mejores_n_individuos_de_entre_todas_las_generaciones, poblacion_final,logbook)
Example #38
0
                    self.dir = 1
        self.matrix_row = len(self.matrix)
        self.matrix_col = len(self.matrix[0])
        self.matrix_exc = copy.deepcopy(self.matrix)

ant = AntSimulator(600)

pset = gp.PrimitiveSet("MAIN", 0)
pset.addPrimitive(ant.if_food_ahead, 2)
pset.addPrimitive(prog2, 2)
pset.addPrimitive(prog3, 3)
pset.addTerminal(ant.move_forward)
pset.addTerminal(ant.turn_left)
pset.addTerminal(ant.turn_right)

creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax)

toolbox = base.Toolbox()

# Attribute generator
toolbox.register("expr_init", gp.genFull, pset=pset, min_=1, max_=2)

# Structure initializers
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr_init)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)

def evalArtificialAnt(individual):
    # Transform the tree expression to functionnal Python code
    routine = gp.compile(individual, pset)
    # Run the generated routine
def main1(seed, game, algorithm, group):
    experiment_name = 'dummy_demo'
    if not os.path.exists(experiment_name):
        os.makedirs(experiment_name)

    # Initialize the amout of neurons
    n_hidden_neurons = 10

    # initializes simulation in individual evolution mode, for single static enemy.
    env = Environment(experiment_name=experiment_name,
                      enemies=[7, 8],
                      playermode="ai",
                      player_controller=player_controller(n_hidden_neurons),
                      enemymode="static",
                      level=2,
                      multiplemode="yes",
                      speed="fastest")

    # default environment fitness is assumed for experiment

    env.state_to_log()  # checks environment state

    ####   Optimization for controller solution (best genotype-weights for phenotype-network): Ganetic Algorihm    ###

    ini = time.time()  # sets time marker

    # genetic algorithm params

    run_mode = 'train'  # train or test

    # number of weights for multilayer with 10 hidden neurons
    n_vars = (env.get_num_sensors() +
              1) * n_hidden_neurons + (n_hidden_neurons + 1) * 5

    #---------------------

    creator.create("FitnessMin", base.Fitness, weights=(-30.0, -30.0))
    creator.create("Individual", list, fitness=creator.FitnessMin)
    toolbox = base.Toolbox()

    # Attribute generator
    #                      define 'attr_bool' to be an attribute ('gene')
    #                      which corresponds to integers sampled uniformly
    #                      from the range [0,1] (i.e. 0 or 1 with equal
    #                      probability)
    toolbox.register("attr_bool", random.uniform, -1, 1)

    # Structure initializers
    #                         define 'individual' to be an individual
    #                         consisting of 100 'attr_bool' elements ('genes')
    toolbox.register("individual", tools.initRepeat, creator.Individual,
                     toolbox.attr_bool, n_vars)

    # define the population to be a list of individuals
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # the goal ('fitness') function to be maximized

    # evaluation function that is used in the DEAP algorithm
    def evaluate(x):
        return np.array(list(map(lambda y: simulation(env, y), x))),

    #----------
    # Operator registration
    #----------
    # register the goal / fitness function
    toolbox.register("evaluate", evaluate)

    # register the crossover operator
    toolbox.register("mate", tools.cxTwoPoint)

    # register a mutation operator with a probability to
    # flip each attribute/gene of 0.05
    toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.05)

    # operator for selecting individuals for breeding the next
    # generation: each individual of the current generation
    # is replaced by the 'fittest' (best) of three individuals
    # drawn randomly from the current generation.
    toolbox.register("select", tools.selTournament)

    creator.create("FitnessMin", base.Fitness, weights=(-100, ))

    #----------

    def simulation(env, x):
        f, p, e, t = env.play(pcont=x)
        return f, p

    # runs simulation
    def main(seed, game, group):
        file_aux = open(str(algorithm) + '_group_' + str(group) + '.txt', 'a')
        file_aux.write(f'\ngame {game} \n')
        file_aux.write('gen, best, mean, std, median, q1, q3, life')
        file_aux.close()

        # fitnesses = np.array([])
        random.seed(seed)

        # create an initial population of 30 individuals (where
        # each individual is a list of integers)
        pop = toolbox.population(n=50)
        pop_array = np.array(pop)

        # CXPB  is the probability with which two individuals
        #       are crossed
        # MUTPB is the probability for mutating an individual
        CXPB = 0.8

        print("Start of evolution")

        # Evaluates the entire population

        values = evaluate(pop_array)
        values = values[0].tolist()
        fitnesses = []
        lifes = []
        for value in values:
            fitnesses.append(value[0])
            lifes.append(value[1])
        for count, individual in enumerate(fitnesses):

            # Rewrites the fitness value in a way the DEAP algorithm can understand
            fitnesses[count] = (-individual, )

        # Gives individual a fitness value
        for ind, fit in zip(pop, fitnesses):
            ind.fitness.values = fit

        print("  Evaluated %i individuals" % len(pop))

        # Extracting all the fitnesses of
        fits = [ind.fitness.values[0] for ind in pop]

        # Variable keeping track of the number of generations
        g = 0
        g_end = 15

        # Saves first generation
        length = len(pop)
        mean = sum(fits) / length * -1
        sum2 = sum(x * x for x in fits)
        std = abs(sum2 / length - abs(mean)**2)**0.5
        q1 = np.percentile(fits, 25) * -1
        median = np.percentile(fits, 50) * -1
        q3 = np.percentile(fits, 75) * -1
        max_life = max(lifes)
        file_aux = open(str(algorithm) + '_group_' + str(group) + '.txt', 'a')
        file_aux.write(
            f'\n{str(g)}, {str(round(min(fits)*-1,6))}, {str(round(mean,6))}, {str(round(std,6))}, {str(round(median,6))}, {str(round(q1,6))}, {str(round(q3,6))}, {str(round(max_life,6))}'
        )
        file_aux.close()

        # Begin the evolution
        while max(fits) < 100 and g < g_end:
            # A new generation
            g = g + 1
            print("-- Generation %i --" % g)

            # Select the next generation individuals
            offspring = toolbox.select(pop, len(pop), 6)

            for i in offspring:
                print(i.fitness.values[0])
            # Clone the selected individuals
            offspring = list(map(toolbox.clone, offspring))

            # Prints fitnesses of the offspring (does nothing for the algorithm
            # for i in offspring:
            #     print(i.fitness.values[0])

            # Apply crossover and mutation on the offspring
            for child1, child2 in zip(offspring[::2], offspring[1::2]):

                # cross two individuals with probability CXPB
                if random.random() < CXPB:

                    toolbox.mate(child1, child2)

                    # fitness values of the children
                    # must be recalculated later
                    del child1.fitness.values
                    del child2.fitness.values

            for mutant in offspring:
                # mutate an individual with probability MUTPB
                if random.random() < (0.5):
                    toolbox.mutate(mutant)
                    del mutant.fitness.values

            # Evaluate the individuals with an invalid fitness
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            pop_array = np.array(invalid_ind)

            values = evaluate(pop_array)
            values = values[0].tolist()
            fitnesses = []
            for value in values:
                fitnesses.append(value[0])
                lifes.append(value[1])

            for count, individual in enumerate(fitnesses):
                fitnesses[count] = (-individual, )

            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            print("  Evaluated %i individuals" % len(invalid_ind))
            # Changes best individuals of population with worst individuals of the offspring
            amount_swithed_individuals = int(len(pop) / 10)
            worst_offspring = deap.tools.selWorst(offspring,
                                                  amount_swithed_individuals,
                                                  fit_attr='fitness')
            best_gen = deap.tools.selBest(pop,
                                          amount_swithed_individuals,
                                          fit_attr='fitness')

            for count, individual in enumerate(worst_offspring):
                index = offspring.index(individual)
                offspring[index] = best_gen[count]

            # The population is entirely replaced by the offspring (plus best of previous generations)
            pop[:] = offspring
            print(f"There are {len(pop)} individuals in the population ")

            # Gather all the fitnesses in one list and print the stats
            fits = [ind.fitness.values[0] for ind in pop]

            length = len(pop)
            mean = sum(fits) / length * -1
            sum2 = sum(x * x for x in fits)
            std = abs(sum2 / length - mean**2)**0.5
            q1 = np.percentile(fits, 25) * -1
            median = np.percentile(fits, 50) * -1
            q3 = np.percentile(fits, 75) * -1
            max_life = max(lifes)

            print("  Min %s" % max(fits))
            print("  Max %s" % min(fits))
            print("  Avg %s" % mean)
            print("  Std %s" % std)

            # saves results for first pop
            file_aux = open(
                str(algorithm) + '_group_' + str(group) + '.txt', 'a')
            file_aux.write(
                f'\n{str(g)}, {str(round(min(fits) *-1,6))}, {str(round(mean,6))}, {str(round(std,6))}, {str(round(median,6))}, {str(round(q1,6))}, {str(round(q3,6))}, {str(round(max_life,6))}'
            )
            file_aux.close()
            best_ind = tools.selBest(pop, 1)[0]
            print("Best individual is %s, %s" %
                  (best_ind, best_ind.fitness.values))
            np.savetxt(
                experiment_name + '/Algorithm_' + algorithm_number +
                '_group_' + group + '/individuals_game_' + str(game) +
                '/game_' + str(game) + '_gen_' + str(g) + '_group_' +
                str(group) + '_Tournement.txt', best_ind)
        print("-- End of (successful) evolution --")

    main(seed, game, group)
Example #40
0
import random

from deap import base
from deap import creator
from deap import tools

creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)

toolbox = base.Toolbox()


def evaluteInd(individual):
    a = sum(individual)
    b = len(individual)
    return a, 1. / b


toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evaluteInd)

IND_SIZE = 5

toolbox.register("attr_float", random.random)
toolbox.register("individual",
                 tools.initRepeat,
                 creator.Individual,
                 toolbox.attr_float,
                 n=IND_SIZE)
Example #41
0
def cluster_GA(
    nPool,
    eleNames,
    eleNums,
    eleRadii,
    generations,
    calc,
    filename,
    log_file,
    CXPB=0.5,
    singleTypeCluster=False,
    use_dask=False,
    use_vasp=False,
    al_method=None,
    al_learner_params=None,
    train_config=None,
    optimizer=BFGS,
    use_vasp_inter=False,
    restart=False,
    gen_num=None,
):
    """
    DEAP Implementation of the GIGA Geneting Algorithm for nanoclusters

    nPool : Total number of clusters present in the initial pool
    eleNames : List of element symbols present in the cluster
    eleNums : List of the number of atoms of each element present in the cluster
    eleRadii : List of radii of each element present in the cluster
    generations : Total number of generations to run the genetic algorithm
    calc : The calculator used to perform relaxations (must be an ase calculator object)
    filename : Name of the file to be used to generate ase traj and db files
    log_file : Name of the log file
    CXPB : probability of a crossover operation in a given generation
    singleTypeCluster : Default = False, set to True if only 1 element is present in cluster
    use_Dask : Default = False, set to True if using dask (Refer examples on using dask)
    use_vasp : Default = False, set to True if using inbuilt vasp optimizer to run GA code (not supported with active learning)
    al_method : Default = None, accepts values 'online' or 'offline'
    al_learner_params : Default = None, refer examples or https://github.com/ulissigroup/al_mlp for sample set up
    trainer_config : Default = None, refer examples or https://github.com/ulissigroup/al_mlp for sample set up
    optimizer : Default = BFGS, ase optimizer to be used
    use_vasp_inter : Default = False, whether to use vasp interactive mode or not
    """
    def calculate(atoms):
        """
        Support function to assign the type of minimization to e performed (pure vasp, using ase optimizer or using active learning)
        """
        if al_method is not None:

            atoms_min, parent_calls = minimize_al(
                atoms,
                calc,
                eleNames,
                al_learner_params,
                train_config,
                dataset_parent,
                optimizer,
                al_method,
            )
        else:
            if use_vasp == True:
                atoms_min = minimize_vasp(atoms, calc)
            else:
                atoms_min = minimize(atoms, calc, optimizer, use_vasp_inter)
        return atoms_min, parent_calls

    if al_method is not None:
        al_method = al_method.lower()

    # Creating DEAP types
    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    # Registration of the evolutionary tools in the toolbox
    toolbox = base.Toolbox()
    toolbox.register("poolfill", fillPool, eleNames, eleNums, eleRadii, calc)
    toolbox.register("individual", tools.initRepeat, creator.Individual,
                     toolbox.poolfill, 1)
    toolbox.register("evaluate", fitness_func)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Registering mutations and crossover operators
    toolbox.register("mate", mate)
    toolbox.register("mutate_homotop", homotop)
    toolbox.register("mutate_rattle", rattle_mut)
    toolbox.register("mutate_rotate", rotate_mut)
    toolbox.register("mutate_twist", twist)
    toolbox.register("mutate_tunnel", tunnel)
    toolbox.register("mutate_partialinv", partialInversion)
    toolbox.register("mutate_skin", skin)
    toolbox.register("mutate_changecore", changeCore)

    # Registering selection operator
    toolbox.register("select", tools.selTournament)

    #Initialize the parent dataset
    dataset_parent = []
    # Creating a list of cluster atom objects from population
    if not restart:
        population = toolbox.population(n=nPool)
        pop_list = []
        for individual in population:
            pop_list.append(individual[0])
        write('init_pop_before_relax.traj', pop_list)

        if use_dask == True:
            # distribute and run the calculations (requires dask and needs to be set up correctly)
            clus_bag = db.from_sequence(pop_list, partition_size=1)
            clus_bag_computed = clus_bag.map(calculate)
            lst_clus_min = clus_bag_computed.compute()

        else:
            lst_clus_min = list(map(calculate, pop_list))

        for i, p in enumerate(population):
            p[0] = lst_clus_min[i][0]

        init_pop_list_after_relax = []
        for individual in population:
            init_pop_list_after_relax.append(individual[0])
        write('init_pop_after_relax.traj', init_pop_list_after_relax)
        with open(log_file, "a+") as fh:
            fh.write(
                f'Total clusters in the intital pool after relaxationi: {len(population)}'
                "\n")

        #parent_calls list if online learner
        total_parent_calls = []
        parent_calls_initial_pool = []
        for i in range(len(lst_clus_min)):
            parent_calls_initial_pool.append(lst_clus_min[i][1])
        total_parent_calls.extend(parent_calls_initial_pool)

        with open(log_file, "a+") as fh:
            fh.write(
                f'parent calls after initial pool relaxation: {parent_calls_initial_pool}'
                '\n')
            fh.write(
                f'Total parent calls after initial pool relaxation: {sum(parent_calls_initial_pool)}'
                '\n')

        # Fitnesses (or Energy) values of the initial random population
        fitnesses = list(map(toolbox.evaluate, population))

        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit

        #Removing bad geometries
        population_filter = []
        for i, p in enumerate(population):
            if checkBonded(p[0]) == True:
                if checkOverlap(p[0]) == False:
                    population_filter.append(p)
        population = copy.deepcopy(population_filter)

        init_pop_list_after_filter = []
        for individual in population:
            init_pop_list_after_filter.append(individual[0])
        write('init_pop_after_filter.traj', init_pop_list_after_filter)
        with open(log_file, "a+") as fh:
            fh.write(
                f'Total clusters in the intital pool after filtering: {len(population)}'
                "\n")

        fitnesses_init_pool = list(map(toolbox.evaluate, population))
        with open(log_file, "a+") as fh:
            fh.write("Energies (fitnesses) of the initial pool" "\n")
            for value in fitnesses_init_pool:
                fh.write("{} \n".format(value[0]))

        # Evolution of the Genetic Algorithm
        with open(log_file, "a+") as fh:
            fh.write("\n")
            fh.write("Starting Evolution" "\n")

        g = 0  # Generation counter

        init_pop_db = ase.db.connect("init_pop_{}.db".format(filename))
        for cl in population:
            write_to_db(init_pop_db, cl[0])

        bi = []

    else:
        population = toolbox.population(n=nPool)
        restart_gen = 'best_n_clus_after_gen' + str(gen_num) + '.traj'
        restart_traj = Trajectory(restart_gen)
        for i, p in enumerate(population):
            p[0] = restart_traj[i]
        # Fitnesses (or Energy) values of the restart population from the gen_num
        fitnesses = list(map(toolbox.evaluate, population))

        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit
        fitnesses_restart_pool = list(map(toolbox.evaluate, population))

        # Restarting the Evolution of the Genetic Algorithm from Restart Trajectory
        with open(log_file, "a+") as fh:
            fh.write("\n")
            fh.write("Restarting  Evolution" "\n")
            fh.write("Energies (fitnesses) of the Restarted pool" "\n")
            for value in fitnesses_restart_pool:
                fh.write("{} \n".format(value[0]))
            fh.write("\n")

        g = gen_num  # Generation counter -Restart gen number
        #parent_calls list if online learner
        total_parent_calls = []
        bi = []

        old_final_pop_db = "./final_pop_{}.db".format(filename)
        copy_final_pop_db = "final_pop_{}_{}.db".format(filename, gen_num)
        if os.path.exists(old_final_pop_db):
            subprocess.call(
                ['mv', old_final_pop_db, 'old_' + copy_final_pop_db])

    ##### Evolution of Generations ######

    while g < generations:
        mutType = None
        muttype_list = []
        g = g + 1
        with open(log_file, "a+") as fh:
            fh.write("{} {} \n".format("Generation", g))

        cm_pop = []
        if random.random() < CXPB:  # Crossover Operation
            mutType = "crossover"
            with open(log_file, "a+") as fh:
                fh.write("{} {} \n".format("mutType", mutType))

            # Crossover operation step.
            # The child clusters will be checked for bonding and similarity
            # between other child clusters.
            loop_count = 0
            while (
                    loop_count != 200
            ):  # Perform 200 possible crossovers or until unique crossovers match pool size
                clusters = toolbox.select(population, 2, 1)
                muttype_list.append(mutType)
                parent1 = copy.deepcopy(clusters[0])
                parent2 = copy.deepcopy(clusters[1])
                fit1 = clusters[0].fitness.values
                (f1, ) = fit1
                fit2 = clusters[1].fitness.values
                (f2, ) = fit2
                child_clus = toolbox.mate(parent1[0], parent2[0], f1, f2)
                parent1[0] = child_clus

                diff_list = []
                if checkBonded(parent1[0]) == True:
                    if loop_count == 0:
                        cm_pop.append(parent1)
                    else:
                        for c, cluster in enumerate(cm_pop):
                            diff = checkSimilar(cluster[0], parent1[0])
                            diff_list.append(diff)

                        if all(diff_list) == True:
                            cm_pop.append(parent1)
                loop_count = loop_count + 1

                if len(cm_pop) == nPool:
                    break

        else:  # Mutation Operation
            mutType = "mutations"
            with open(log_file, "a+") as fh:
                fh.write("{} {} \n".format("mutType", mutType))

                # Mutation opeation step
                # Each cluster in the population will undergo a randomly chosen mutation step
                # Mutated new clusters will be checked for bonding and similarity with other new clusters
            for m, mut in enumerate(population):
                mutant = copy.deepcopy(mut)
                if singleTypeCluster:
                    mutType = random.choice([
                        "rattle",
                        "rotate",
                        "twist",
                        "partialinv",
                        "tunnel",
                        "skin",
                        "changecore",
                    ])
                else:
                    mutType = random.choice([
                        "rattle",
                        "rotate",
                        "homotop",
                        "twist",
                        "partialinv",
                        "tunnel",
                        "skin",
                        "changecore",
                    ])

                muttype_list.append(mutType)

                if mutType == "homotop":
                    mutant[0] = toolbox.mutate_homotop(mutant[0])
                if mutType == "rattle":
                    mutant[0] = toolbox.mutate_rattle(mutant[0])
                if mutType == "rotate":
                    mutant[0] = toolbox.mutate_rotate(mutant[0])
                if mutType == "twist":
                    mutant[0] = toolbox.mutate_twist(mutant[0])
                if mutType == "tunnel":
                    mutant[0] = toolbox.mutate_tunnel(mutant[0])
                if mutType == "partialinv":
                    mutant[0] = toolbox.mutate_partialinv(mutant[0])
                if mutType == "skin":
                    mutant[0] = toolbox.mutate_skin(mutant[0])
                if mutType == "changecore":
                    mutant[0] = toolbox.mutate_changecore(mutant[0])

                diff_list = []
                if checkBonded(mutant[0]) == True:
                    for c, cluster in enumerate(cm_pop):
                        diff = checkSimilar(cluster[0], mutant[0])
                        diff_list.append(diff)

                    if all(diff_list) == True:
                        cm_pop.append(mutant)

            with open(log_file, "a+") as fh:
                fh.write("{} {} \n".format("mutType_list", muttype_list))

        mut_new_lst = []
        for mut in cm_pop:
            mut_new_lst.append(mut[0])
        write('mut_before_relax_gen' + str(g) + '.traj', mut_new_lst)

        # DASK Parallel relaxation of the crossover child/mutated clusters
        if use_dask == True:
            mut_bag = db.from_sequence(mut_new_lst, partition_size=1)
            mut_bag_computed = mut_bag.map(calculate)
            mut_new_lst_min = mut_bag_computed.compute()

        else:
            mut_new_lst_min = list(map(calculate, mut_new_lst))

        for i, mm in enumerate(cm_pop):
            mm[0] = mut_new_lst_min[i][0]

        mut_list_after_relax = []
        for individual in cm_pop:
            mut_list_after_relax.append(individual[0])
        write('mut_after_relax_gen' + str(g) + '.traj', mut_list_after_relax)
        with open(log_file, "a+") as fh:
            fh.write(
                f'Total clusters relaxed in  Generation {g}: {len(cm_pop)}'
                "\n")

        #parent calls list if online learner
        parent_calls_mut_list = []
        for i in range(len(mut_new_lst_min)):
            parent_calls_mut_list.append(mut_new_lst_min[i][1])

        total_parent_calls.extend(parent_calls_mut_list)

        with open(log_file, "a+") as fh:
            fh.write(
                f'Parent calls list after relaxtions in this generation: {parent_calls_mut_list} '
                '\n')
            fh.write(
                f'Total Parent calls  specific to this  generation: {sum(parent_calls_mut_list)} '
                '\n')
            fh.write(
                f'Total Parent calls  up  to this  generation: {sum(total_parent_calls)} '
                '\n')

        fitnesses_mut = list(map(toolbox.evaluate, cm_pop))

        for ind, fit in zip(cm_pop, fitnesses_mut):
            ind.fitness.values = fit

        new_population = copy.deepcopy(population)
        # Relaxed clusters will be checked for bonded and similarity with the other
        # clusters in the population. If dissimilar, they will be added to the new population.
        for cm1, cmut1 in enumerate(cm_pop):
            new_diff_list = []
            if checkBonded(cmut1[0]) == True:
                if checkOverlap(cmut1[0]) == False:
                    for c2, cluster1 in enumerate(population):
                        diff = checkSimilar(cluster1[0], cmut1[0])
                        new_diff_list.append(diff)
                    if all(new_diff_list) == True:
                        new_population.append(cmut1)
                    else:
                        pass

        mut_list_after_filter = []
        for individual in new_population:
            mut_list_after_filter.append(individual[0])
        write('mut_after_filter_gen' + str(g) + '.traj', mut_list_after_filter)
        with open(log_file, "a+") as fh:
            fh.write(
                f'Total clusters flitered out in  Generation {g}: {len(cm_pop) + len(population) - len(new_population)}'
                "\n")
            fh.write(
                f'Total clusters in the pool after filtering in Generation {g}: {len(new_population)}'
                "\n")

        fitnesses_pool = list(map(toolbox.evaluate, new_population))

        with open(log_file, "a+") as fh:
            fh.write(
                "Energies (fitnesses) of the present pool before best 10 are selected"
                "\n")
            for value in fitnesses_pool:
                fh.write("{} \n".format(value[0]))

        # Selecting the lowest energy npool clusters from the new_population
        len_new_pop = len(new_population)
        if len_new_pop > nPool:
            best_n_clus = tools.selWorst(new_population, nPool)
        else:
            best_n_clus = new_population

        best_n_clus_list = []
        for individual in best_n_clus:
            best_n_clus_list.append(individual[0])
        write('best_n_clus_after_gen' + str(g) + '.traj', best_n_clus_list)

        population = best_n_clus

        best_clus = tools.selWorst(population, 1)[0]
        with open(log_file, "a+") as fh:
            fh.write("{} {} \n".format("Lowest energy for this generation is",
                                       best_clus.fitness.values[0]))
            fh.write("\n Best cluster in this generation: \n")
            for atom in best_clus[0]:
                fh.write("{} {:12.8f} {:12.8f} {:12.8f} \n".format(
                    atom.symbol, atom.x, atom.y, atom.z))
            fh.write("\n")

        bi.append(best_clus[0])
        if g == 1:
            writer = TrajectoryWriter(filename + "_best.traj",
                                      mode="w",
                                      atoms=best_clus[0])
            writer.write()
        else:
            writer = TrajectoryWriter(filename + "_best.traj",
                                      mode="a",
                                      atoms=best_clus[0])
            writer.write()

    final_pop_db = ase.db.connect("final_pop_{}.db".format(filename))
    for clus in population:
        write_to_db(final_pop_db, clus[0])

    with open(log_file, "a+") as fh:
        fh.write("Global Minimum after {} Generations \n".format(g))
        for atom in best_clus[0]:
            fh.write("{} {:12.8f} {:12.8f} {:12.8f} \n".format(
                atom.symbol, atom.x, atom.y, atom.z))
    # Return the list of best clusters in every generations and the overall best cluster
    return bi, best_clus[0]
Example #42
0
    def fit(self, targetData, inputData, maxLag, windowLimits, delta,
            numberCrossVal, ngen, popSize):
        #Ger some parameters
        test_length = numberCrossVal
        minWindow = windowLimits[0]
        maxWindow = windowLimits[1]
        # =============================================================================
        # SET OPTIMIZATION PARAMETERS
        # =============================================================================
        #A triky way to pass bay the optimazer
        self.gene_length = inputData.shape[1] + 1 + 1  #All variables and window
        self.windowRange = np.round(
            np.linspace(minWindow, maxWindow, num=1 + maxLag)).astype(int)
        self.targetData = targetData
        self.inputData = inputData
        self.test_length = numberCrossVal
        self.maxLag = maxLag
        self.popSize = popSize
        self.delta = delta

        creator.create("FitnessMax", base.Fitness, weights=(-1.0, ))
        creator.create("Individual",
                       array.array,
                       typecode='b',
                       fitness=creator.FitnessMax)

        toolbox = base.Toolbox()

        # Attribute generator
        toolbox.register("attr_bool", random.randint, 0, maxLag)

        toolbox.register("evaluate", self.objectiveFunction)

        toolbox.register("mate", tools.cxTwoPoint)
        toolbox.register("mutate",
                         tools.mutFlipBit,
                         indpb=1 / self.gene_length)
        #toolbox.register("select", tools.selBest, k=5)
        toolbox.register("select", tools.selTournament, tournsize=5)
        toolbox.register("migrate",
                         tools.migRing,
                         k=5,
                         selection=tools.selBest,
                         replacement=tools.selRandom)

        toolbox.register("variaton",
                         algorithms.varAnd,
                         toolbox=toolbox,
                         cxpb=0.7,
                         mutpb=0.3)

        # =============================================================================
        # ============================ RUN IT =========================================

        # Structure initializers GA
        toolbox.register("individual_guess", self.initIndividual,
                         creator.Individual)
        toolbox.register("population_guess", self.initPopulation, list,
                         toolbox.individual_guess)

        pop = toolbox.population_guess(self.maxLag, self.popSize,
                                       self.gene_length)
        toolbox.register("population_guess", self.initPopulation, list,
                         creator.Individual)
        hof = tools.HallOfFame(1)
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean)
        stats.register("std", np.std)
        stats.register("min", np.min)
        stats.register("max", np.max)

        pop, log = algorithms.eaSimple(pop,
                                       toolbox,
                                       cxpb=0.5,
                                       mutpb=0.2,
                                       ngen=ngen,
                                       stats=stats,
                                       halloffame=hof,
                                       verbose=True)

        best_individuals = tools.selBest(pop, k=1)

        for bi in best_individuals:
            # Decode GA solution to integer for window_size and num_units
            best_windowSize = self.windowRange[bi[0]]
            best_lagConf = np.array(bi[1:], dtype=np.int32)
            print('\nDELTA: ', delta)
            print('\nBest Window Size: ', best_windowSize,
                  ', Best Lag Config: ', best_lagConf)
            target, forecast = self.predict(targetData, inputData,
                                            best_lagConf, best_windowSize,
                                            delta, numberCrossVal)

        cost = self.ssigmadelta(target, forecast, delta)
        print('Validation s/sd: ', cost, '\n')

        self.lagConf = best_lagConf
        self.window = best_windowSize

        return best_windowSize, best_lagConf
    def get_toolbox(self, predictors, response, pset, variable_type_indices,
                    variable_names):
        subset_size = int(
            math.floor(predictors.shape[0] * self.subset_proportion))
        creator.create("ErrorAgeSizeComplexity",
                       base.Fitness,
                       weights=(-1.0, -1.0, -1.0, -1.0))
        creator.create("Individual",
                       sp.SimpleParametrizedPrimitiveTree,
                       fitness=creator.ErrorAgeSizeComplexity,
                       age=int)
        toolbox = base.Toolbox()
        toolbox.register(
            "expr", sp.generate_parametrized_expression,
            partial(gp.genHalfAndHalf,
                    pset=pset,
                    min_=self.min_depth_init,
                    max_=self.max_depth_init), variable_type_indices,
            variable_names)
        toolbox.register("individual", tools.initIterate, creator.Individual,
                         toolbox.expr)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)
        toolbox.register("compile", gp.compile, pset=pset)
        toolbox.register("select", tools.selRandom)
        toolbox.register("koza_node_selector",
                         operators.internally_biased_node_selector,
                         bias=self.internal_node_selection_bias)
        self.history = tools.History()
        toolbox.register("mate",
                         operators.one_point_xover_biased,
                         node_selector=toolbox.koza_node_selector)
        toolbox.decorate(
            "mate",
            operators.static_limit(key=operator.attrgetter("height"),
                                   max_value=self.max_height))
        toolbox.decorate(
            "mate", operators.static_limit(key=len, max_value=self.max_size))
        toolbox.decorate("mate", self.history.decorator)
        toolbox.register(
            "grow", sp.generate_parametrized_expression,
            partial(gp.genGrow,
                    pset=pset,
                    min_=self.min_gen_grow,
                    max_=self.max_gen_grow), variable_type_indices,
            variable_names)
        toolbox.register("mutate",
                         operators.mutation_biased,
                         expr=toolbox.grow,
                         node_selector=toolbox.koza_node_selector)
        toolbox.decorate(
            "mutate",
            operators.static_limit(key=operator.attrgetter("height"),
                                   max_value=self.max_height))
        toolbox.decorate(
            "mutate", operators.static_limit(key=len, max_value=self.max_size))
        toolbox.decorate("mutate", self.history.decorator)

        def generate_randoms(individuals):
            return individuals

        toolbox.register("generate_randoms",
                         generate_randoms,
                         individuals=[
                             toolbox.individual()
                             for i in range(self.num_randoms)
                         ])
        toolbox.decorate("generate_randoms", self.history.decorator)
        toolbox.register("error_func", self.error_function)
        expression_dict = cachetools.LRUCache(maxsize=1000)
        subset_selection_archive = subset_selection.RandomSubsetSelectionArchive(
            frequency=self.subset_change_frequency,
            predictors=predictors,
            response=response,
            subset_size=subset_size,
            expression_dict=expression_dict)
        evaluate_function = partial(
            subset_selection.fast_numpy_evaluate_subset,
            get_node_semantics=sp.get_node_semantics,
            context=pset.context,
            subset_selection_archive=subset_selection_archive,
            error_function=toolbox.error_func,
            expression_dict=expression_dict)
        toolbox.register("evaluate_error", evaluate_function)
        toolbox.register("assign_fitness",
                         afpo.assign_age_fitness_size_complexity)
        self.multi_archive = utils.get_archive(100)
        if self.log_mutate:
            mutation_stats_archive = archive.MutationStatsArchive(
                evaluate_function)
            toolbox.decorate(
                "mutate",
                operators.stats_collector(archive=mutation_stats_archive))
            self.multi_archive.archives.append(mutation_stats_archive)
        self.multi_archive.archives.append(subset_selection_archive)
        self.mstats = reports.configure_parametrized_inf_protected_stats()
        self.pop = toolbox.population(n=self.pop_size)
        toolbox.register("run",
                         afpo.pareto_optimization,
                         population=self.pop,
                         toolbox=toolbox,
                         xover_prob=self.xover_prob,
                         mut_prob=self.mut_prob,
                         ngen=self.ngen,
                         tournament_size=self.tournament_size,
                         num_randoms=self.num_randoms,
                         stats=self.mstats,
                         archive=self.multi_archive,
                         calc_pareto_front=False,
                         verbose=False,
                         reevaluate_population=True,
                         history=self.history,
                         stop_time=self.stop_time)
        toolbox.register("save", reports.save_log_to_csv)
        toolbox.decorate("save", reports.save_archive(self.multi_archive))
        return toolbox
Example #44
0
def GAPoolingHeuristic(failure_rates, service_rates, holding_costs,
                       penalty_cost, skill_cost, machine_cost, numSKUs,
                       minCluster, maxCluster):

    # 1 is for maximization -1 for minimization
    # Minimize total cost
    creator.create("FitnessMax", base.Fitness, weights=(-1.0, ))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    def generateIndividual(numSKUs, minCluster, maxCluster):
        #Generating initial indvidual that are in the range of given max-min cluster numbers

        return creator.Individual(
            np.random.choice(np.arange(minCluster, maxCluster + 1), numSKUs))

    toolbox = base.Toolbox()

    # Attribute generator
    #                      define 'attr_bool' to be an attribute ('gene')
    #                      which corresponds to integers sampled uniformly
    #                      from the range [1,number of SKUs] (i.e. 0 or 1 with equal
    #                      probability)

    #toolbox.register("attr_bool", random.randint, 1, len(failure_rates))

    # Structure initializers
    #                         define 'individual' to be an individual
    #                         consisting of #number of maximum cluster =#of SKUs 'attr_bool' elements ('genes')
    toolbox.register("individual", generateIndividual, numSKUs, minCluster,
                     maxCluster)

    # define the population to be a list of individuals
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # the goal ('fitness') function to be maximized
    #for objective function call pooling optimizer !!!
    # what values need for optimizer !!!

    #def evalOneMax(individual):
    #    return sum(individual),

    #----------
    # Operator registration
    #----------
    # register the goal / fitness function
    toolbox.register("evaluate", evalOneMax, failure_rates, service_rates,
                     holding_costs, penalty_cost, skill_cost, machine_cost)

    # register the crossover operator
    toolbox.register("mate", tools.cxOnePoint)

    # register a mutation operator with a probability to
    # flip each attribute/gene of 0.05
    #
    toolbox.register("mutate", swicthtoOtherMutation)
    #toolbox.register("mutate", swicthtoOtherMutation)

    # operator for selecting individuals for breeding the next
    # generation: each individual of the current generation
    # is replaced by the 'fittest' (best) of three individuals
    # drawn randomly from the current generation.
    toolbox.register("select", tools.selTournament, tournsize=10)

    #----------

    #def main():
    random.seed(64)

    # create an initial population of 50 individuals (where
    # each individual is a list of integers)
    pop = toolbox.population(n=100)

    # CXPB  is the probability with which two individuals
    #       are crossed
    #
    # MUTPB is the probability for mutating an individual
    CXPB, MUTPB = 0.7, 0.10

    #ADD Hall of Fame for the next version !!!
    #hof = tools.HallOfFame(1)
    #print("Start of evolution")

    # Evaluate the entire population
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    #print("  Evaluated %i individuals" % len(pop))

    # Extracting all the fitnesses of
    fits = [ind.fitness.values[0] for ind in pop]

    # Variable keeping track of the number of generations
    g = 0

    # Begin the evolution
    #while max(fits) < 100 and g < 100:
    while g < 50:
        # A new generation
        g = g + 1
        #print("-- Generation %i --" % g)

        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))

        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):

            # cross two individuals with probability CXPB
            if random.random() < CXPB:
                toolbox.mate(child1, child2)

                # fitness values of the children
                # must be recalculated later
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:

            # mutate an individual with probability MUTPB
            if random.random() < MUTPB:
                mutant = toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        #print("  Evaluated %i individuals" % len(invalid_ind))

        # The population is entirely replaced by the offspring
        pop[:] = offspring

        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        #length = len(pop)
        #mean = sum(fits) / length
        #sum2 = sum(x*x for x in fits)
        #std = abs(sum2 / length - mean**2)**0.5

        print("  Min %s" % min(fits))

    #    print("  Max %s" % max(fits))
    #    print("  Avg %s" % mean)
    #    print("  Std %s" % std)

    #print("-- End of (successful) evolution --")

    best_ind = tools.selBest(pop, 1)[0]
    #print("Best individual is %s, %s" % (individual2cluster(best_ind), best_ind.fitness.values))

    return best_ind.fitness.values, best_ind
Example #45
0
def main():
    # create types
    creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
    creator.create("Individual", list, fitness=creator.FitnessMin)

    # initialize population containing random individuals
    toolbox = base.Toolbox()
    toolbox.register("attribute", random.random)
    toolbox.register("individual",
                     tools.initRepeat,
                     creator.Individual,
                     toolbox.attribute,
                     n=IND_SIZE)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # initialize operators and evaluation function
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.1)
    toolbox.register("select", tools.selTournament, tournsize=3)
    toolbox.register("evaluate", evaluate)

    # create history object, which is used to get nice prints of the overall evolution process
    history = tools.History()
    # decorate the variation operator so that they can be used to retrieve a history
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    # create an initial population
    pop = toolbox.population(n=POB_SIZE)
    history.update(pop)
    # create a list of statistics to be retrieved during the evolution process (they are shown in the logbook)
    stats = tools.Statistics()
    stats.register('min', min)
    # create a hall of fame, which contains the best individual/s that ever lived in the population during the evolution
    hof = tools.HallOfFame(maxsize=HOF_SIZE)

    # simplest evolutionary algorithm as presented in chapter 7 of Back, Fogel and Michalewicz, “Evolutionary Computation 1 : Basic Algorithms and Operators”, 2000.
    final_population, logbook = algorithms.eaSimple(population=pop,
                                                    toolbox=toolbox,
                                                    cxpb=CXPB,
                                                    mutpb=MUTPB,
                                                    ngen=NGEN,
                                                    stats=stats,
                                                    halloffame=hof,
                                                    verbose=True)
    # output results of the evolutionary algorithm
    print('*' * 100)
    print('FINAL POPULATION\n')
    print(final_population)
    print('*' * 100)
    print('HALL OF FAME\n')
    print(hof)
    print('*' * 100)
    print('BEST INDIVIDUAL')
    print(hof[0])
    print('\nEVALUATION')
    print(evaluate(hof[0]))

    # draw a digraph representing the population evolution during that has taken place
    graph = networkx.DiGraph(history.genealogy_tree)
    graph = graph.reverse()  # Make the graph top-down
    colors = [toolbox.evaluate(history.genealogy_history[i])[0] for i in graph]
    networkx.draw(graph, node_color=colors)
    plt.show()
Example #46
0
import math
import gym
from my_gym_envs.mujoco import *
import operator
import random

import numpy

from deap import base
from deap import benchmarks
from deap import creator
from deap import tools
from CPG_core.quadruped_osc.oscillator_5 import oscillator_nw
from scoop import futures

creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
creator.create("Particle",
               list,
               fitness=creator.FitnessMax,
               speed=list,
               smin=None,
               smax=None,
               best=None)

parser = argparse.ArgumentParser(description='DeepPILCO')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument(
    '--env_name', type=str, default='CellrobotEnv-v0'
)  #  Ant2-v2  HalfCheetah-v2  ArmReacherEnv-v0  Hopper2-v2: Swimmer2-v2
parser.add_argument('--pop_size', type=int, default=8)
parser.add_argument('--max_gen', type=int, default=1000)
Example #47
0
# value pset:
val_pset = gp.PrimitiveSet("MAIN", 8)
val_pset.addPrimitive(operator.add, 2)
val_pset.addPrimitive(operator.mul, 2)
val_pset.renameArguments(ARG0='ones_diff_rows')
val_pset.renameArguments(ARG1='ones_diff_cols')
val_pset.renameArguments(ARG2='zeros_diff_rows')
val_pset.renameArguments(ARG3='zeros_diff_cols')
val_pset.renameArguments(ARG4='compare_blocks_rows')
val_pset.renameArguments(ARG5='compare_blocks_cols')
val_pset.renameArguments(ARG6='max_row_clue')
val_pset.renameArguments(ARG7='max_col_clue')
val_pset.addEphemeralConstant("rand101_1", lambda: np.random.randint(0, 100))

# creator stuff:
creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
creator.create("ValueTree", gp.PrimitiveTree, pset=val_pset)
creator.create("ConditionTree", gp.PrimitiveTree, pset=cond_pset)
creator.create("Individual",
               DoubleTreeBasedIndividual,
               fitness=creator.FitnessMax)

# nonograms = load_unsolved_nonograms_from_file(path=pickle_unsolved_file_path)

# def _make_condition_tree_pset():
#     cond_pset = gp.PrimitiveSetTyped("MAIN", [float, float, float, float, float, float], bool)
#     cond_pset.addPrimitive(operator.__and__, [bool, bool], bool)
#     cond_pset.addPrimitive(operator.__or__, [bool, bool], bool)
#     cond_pset.addPrimitive(operator.le, [float, float], bool)
#     cond_pset.addPrimitive(operator.ge, [float, float], bool)
#     cond_pset.addTerminal(True, bool)
        'pls_standard_regression_coefficients.csv'
    )  # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください

# 構造生成
main_molecules = [
    molecule for molecule in Chem.SmilesMolSupplier(
        file_name_of_main_fragments, delimiter='\t', titleLine=False)
    if molecule is not None
]
fragment_molecules = [
    molecule for molecule in Chem.SmilesMolSupplier(
        file_name_of_sub_fragments, delimiter='\t', titleLine=False)
    if molecule is not None
]

creator.create('FitnessMax', base.Fitness,
               weights=(1.0, ))  # for minimization, set weights as (-1.0,)
creator.create('Individual', list, fitness=creator.FitnessMax)

toolbox = base.Toolbox()
min_boundary = np.zeros(len(fragment_molecules) + 1)
max_boundary = np.ones(len(fragment_molecules) + 1) * 1.0


def create_ind_uniform(min_boundary, max_boundary):
    index = []
    for min, max in zip(min_boundary, max_boundary):
        index.append(random.uniform(min, max))
    return index


toolbox.register('create_ind', create_ind_uniform, min_boundary, max_boundary)
pset.addPrimitive(sin, 1)
pset.addPrimitive(protected_sqrt, 1)
pset.addPrimitive(protected_pow, 2)
pset.addPrimitive(exp, 1)
pset.addPrimitive(protected_ln, 1)
pset.addPrimitive(protected_log, 1)

pset.renameArguments(ARG0="y_true")  # y
pset.renameArguments(ARG1="y_pred")  # yhat

pset.addEphemeralConstant("rand_int_-6_6",
                          lambda: np.float32(random.randint(-6, 6)))
pset.addEphemeralConstant("rand_float_-6_6",
                          lambda: np.float32(random.uniform(-6, 6)))

creator.create("FitnessMin", base.Fitness,
               weights=(1.0, ))  # DEAP requires that weights be a tuple.
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)

toolbox = base.Toolbox()
toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=3)
toolbox.register("individual", tools.initIterate, creator.Individual,
                 toolbox.expr)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("compile", gp.compile, pset=pset)


def is_invalid(individual):
    if ("y_true" in str(individual)) and ("y_pred" in str(individual)):
        return False
    return True
Example #50
0
    def run_optimization_GA(self):

        # Shape of optimization parameters
        OPT_SHAPE = (len(self.opt_vec))

        # flattening of optimization parameters (size of an individual genome)
        IND_SIZE = np.prod(OPT_SHAPE)

        # population size for parameter optimization
        # 3 * # attributes per individual
        POP_SIZE = IND_SIZE * 4

        # number of islands (subpopulations that evolve independently until a migration)
        NISLANDS = 3

        # set max number of generations to run for
        NGEN = 60

        # Migrations frequency
        MIG_FREQ = 20

        # Evolution strategy variables
        MIN_VALUE = 0.0  # individual attribute min
        MAX_VALUE = 7.0  # individual attribute max
        MIN_STRATEGY = 0.0  # min value of strength of mutation
        MAX_STRATEGY = 1.5  # max value of strength of mutation

        # If we want to run optimization in parallel, all information must be accessed
        # through picklable data types in python
        #ffobj.optimization_shape=(ffobj.guest.ncomp, ffobj.grid.ncomp, ffobj.model.num_params)
        #pickled = convert_ffobj_to_dict(ffobj)

        opt_weights = (-1.0, )

        creator.create("FitnessMin", base.Fitness, weights=opt_weights)
        creator.create("Individual",
                       list,
                       fitness=creator.FitnessMin,
                       strategy=None)
        creator.create("Strategy", list)

        toolbox = base.Toolbox()

        # function calls to chromosome intialization (random vs intelligent assignment)
        #toolbox.register("rand_float", np.random.uniform)
        #toolbox.register("assign_guess", self.assign_UFF_starting)

        # create individual intialization method (random vs intelligent assignment)
        toolbox.register("individual", self.generateES, self.opt_vec,
                         creator.Individual, creator.Strategy, IND_SIZE,
                         MIN_VALUE, MAX_VALUE, MIN_STRATEGY, MAX_STRATEGY)
        #toolbox.register("individual", toolbox.assign_guess, creator.Individual)

        # objective function for this minimization
        # toolbox.register("evaluate", self.deap_multi_evalFitness)
        toolbox.register("evaluate", self.construct_curr_UC_GA)

        # define evolution strategies
        toolbox.register("mate", tools.cxESBlend, alpha=0.5)
        toolbox.decorate(
            "mate",
            self.checkStrategy(MIN_VALUE, MAX_VALUE, MAX_STRATEGY,
                               MAX_STRATEGY))

        ###toolbox.register("mutate", tools.mutPolynomialBounded, eta = 0.0001, low = 0.0, up = 10000.0, indpb = 0.1)
        toolbox.register("mutate", tools.mutESLogNormal, c=1.0, indpb=0.9)
        toolbox.decorate(
            "mutate",
            self.checkStrategy(MIN_VALUE, MAX_VALUE, MAX_STRATEGY,
                               MAX_STRATEGY))
        ###toolbox.register("mutate", tools.mutESLogNormal, c = 1, indpb = 0.1)

        toolbox.register("select",
                         tools.selTournament,
                         tournsize=int(POP_SIZE / 2))
        ###toolbox.register("select", tools.selTournament, k = 10, tournsize = 64)

        # parallelize or no
        #pool = multiprocessing.Pool(processes = 7)
        #toolbox.register("map", pool.map)

        # create a population of individuals
        toolbox.register("population",
                         tools.initRepeat,
                         list,
                         toolbox.individual,
                         n=POP_SIZE)
        population = toolbox.population()

        # create islands to contain distinct populations
        islands = [toolbox.population() for i in range(NISLANDS)]

        # create a hall of fame for each island
        hofsize = max(1, int(POP_SIZE / 10))
        famous = [tools.HallOfFame(maxsize=hofsize) for i in range(NISLANDS)]

        # create a stats log for each island
        stats = [
            tools.Statistics(lambda ind: ind.fitness.values)
            for i in range(NISLANDS)
        ]

        for i in range(NISLANDS):
            stats[i].register("avg", np.mean)
            stats[i].register("std", np.std)
            stats[i].register("min", np.min)
            stats[i].register("max", np.max)

        # MU, LAMDA parameters
        MU, LAMBDA = POP_SIZE, POP_SIZE * 2

        # run optimization with periodic migration between islands
        for i in range(int(NGEN / MIG_FREQ)):
            print("----------------")
            print("Evolution period: " + str(i))
            print("----------------")
            for k in range(len(islands)):
                print("------------------------")
                print("Island " + str(k) + " evolution:")
                print("------------------------")
                #islands[k], log = algorithms.eaGenerateUpdate(toolbox, ngen = MIG_FREQ, halloffame = famous[k], stats = stats[k])
                islands[k], log = algorithms.eaMuCommaLambda(
                    islands[k],
                    toolbox,
                    mu=MU,
                    lambda_=LAMBDA,
                    cxpb=0.4,
                    mutpb=0.6,
                    ngen=MIG_FREQ,
                    halloffame=famous[k],
                    stats=stats[k])
            print("---------------")
            print("MIGRATION!")
            print("---------------")
            self.custom_migRing(islands,
                                10,
                                tools.selBest,
                                replacement=tools.selWorst)

        # Create final population for the last run
        final_famous = tools.HallOfFame(maxsize=1)
        final_stats = tools.Statistics(lambda ind: ind.fitness.values)
        final_stats.register("avg", np.mean)
        final_stats.register("std", np.std)
        final_stats.register("min", np.min)
        final_stats.register("max", np.max)
        toolbox.register("final_population",
                         tools.initRepeat,
                         list,
                         toolbox.individual,
                         n=hofsize * NISLANDS)
        final_population = toolbox.final_population()

        # copy over each island's famous individuals into last
        for i in range(NISLANDS):
            for j in range(hofsize):
                final_population[i * j + j] = famous[i][j]

        # make sure our ultimate hall of fame starts out as the best we've ever seen
        final_famous.update(final_population)

        # reset MU, LAMBDA and rerun final evolution
        MU, LAMBDA = hofsize * NISLANDS, hofsize * NISLANDS * 2
        final_pop, log = algorithms.eaMuCommaLambda(final_population,
                                                    toolbox,
                                                    mu=MU,
                                                    lambda_=LAMBDA,
                                                    cxpb=0.4,
                                                    mutpb=0.6,
                                                    ngen=MIG_FREQ,
                                                    halloffame=final_famous,
                                                    stats=final_stats)

        self.opt_vec = np.array(final_famous[0])
Example #51
0
from deap import tools, base, creator
from helpers import Gene as Gene
import helpers
import random
import cPickle
import csv
from pathos.multiprocessing import freeze_support
import sys

# Set up the toolbox from base
toolbox = base.Toolbox()

# set up the fitness component and the chromosome using that fitness
creator.create("fitness", base.Fitness, weights=(1.0, ))
creator.create("Tactic", list, fitness=creator.fitness)

# The minimum and maximum values that the three measures can have
MIN_DISTANCE, MAX_DISTANCE = -48152, 48152
MIN_VELOCITY, MAX_VELOCITY = -1000, 1000
MIN_ACCEL, MAX_ACCEL = -100, 100


def main():
    # set evolutionary operators
    toolbox.register("mutate", helpers.gaussian, mu=0, sigma=0.2, indpb=0.002)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("evaluate", helpers.evaluate)

    # Initialise all the gene's to be used in the chromosome.
    toolbox.register("px_min", Gene, MIN_DISTANCE, MAX_DISTANCE, True)
    toolbox.register("px_max", Gene, MIN_DISTANCE, MAX_DISTANCE, False)
Example #52
0
            prior_irs = irs
                    
                        
        else: prior_irs = rob2.read_irs()
        
            
        
        new_location_x, new_location_y, new_location_z = rob2.position()
        fitnessScore+=np.sqrt((new_location_x - current_location_x)**2+(new_location_y - current_location_y)**2)
    
    rob2.stop_world()

    print('working')
    return [10*fitnessScore+ 50/(number_of_collisions+1)]
# initialize fitness and set fitness weight to positive value (we want to maximize)
creator.create("FitnessMax", base.Fitness, weights=[1.0])
# the goal ('fitness') function to be maximized

creator.create("Individual", array.array, typecode="d",
               fitness=creator.FitnessMax, strategy=None)
creator.create("Strategy", array.array, typecode="d")
record = 0

def generateWeights(icls, scls, size, imin, imax, smin, smax):      
    
    
    ind = icls(np.random.normal() for _ in range(size))
    ind.strategy = scls(random.gauss(0, 1) for _ in range(size))     
    
    return ind
Example #53
0
from deap import algorithms
from deap import base
from deap import cma
from deap import creator
from deap import tools
import pred_link_eval
#from scoop import futures

# Problem size
#N=8
N = len(pred_link_eval.SFrame.myparams.ScoresChoiced)

# -1 means we are approaching a minimization problem
creator.create(
    "FitnessMin", base.Fitness,
    weights=(-1.0, ))  # , must be used because deap is used for multi-obj optm
creator.create("Individual", list, fitness=creator.FitnessMin)

toolbox = base.Toolbox()
toolbox.register("evaluate", pred_link_eval.SFrame.evaluate)
#toolbox.register("map", futures.map)


def main():

    # to generate the aleatory values we need a seed
    numpy.random.seed(128)
    file1 = open('config.txt', 'r')
    line = file1.readline()
    line = line.strip('\n').strip('\r').split(',')
def run():


    for i in range(number_of_runs):
        ###################################################################
        #EVOLUTIONARY ALGORITHM
        ###################################################################
        #TYPE
        #Create minimizing fitness class w/ single objective:
        creator.create('FitnessMin', base.Fitness, weights=(-1.0,))
        #Create individual class:
        creator.create('Individual', list, fitness=creator.FitnessMin)

        #TOOLBOX
        toolbox = base.Toolbox()
        #Register function to create a number in the interval [1-100?]:
        #toolbox.register('init_params', )
        #Register function to use initRepeat to fill individual w/ n calls to rand_num:
        toolbox.register('individual', tools.initRepeat, creator.Individual,
                         np.random.random, n=number_of_params)
        #Register function to use initRepeat to fill population with individuals:
        toolbox.register('population', tools.initRepeat, list, toolbox.individual)

        #GENETIC OPERATORS:
        # Register evaluate fxn = evaluation function, individual to evaluate given later
        toolbox.register('evaluate', scorefxn_helper)
        # Register mate fxn = two points crossover function
        toolbox.register('mate', tools.cxTwoPoint)
        # Register mutate by swapping two points of the individual:
        toolbox.register('mutate', tools.mutPolynomialBounded,
                         eta=0.1, low=0.0, up=1.0, indpb=0.2)
        # Register select = size of tournament set to 3
        toolbox.register('select', tools.selTournament, tournsize=3)

        #EVOLUTION!
        pop = toolbox.population(n=number_of_individuals)
        hof = tools.HallOfFame(1)

        stats = tools.Statistics(key = lambda ind: [ind.fitness.values, ind])
        stats.register('all', np.copy)

        # using built in eaSimple algo
        pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=crossover_rate,
                                           mutpb=mutation_rate,
                                           ngen=number_of_generations,
                                           stats=stats, halloffame=hof,
                                           verbose=False)
        # print(f'Run number completed: {i}')

        ###################################################################
        #MAKE LISTS
        ###################################################################
        # Find best scores and individuals in population
        arr_best_score = []
        arr_best_ind = []
        for a in range(len(logbook)):
            scores = []
            for b in range(len(logbook[a]['all'])):
                scores.append(logbook[a]['all'][b][0][0])
            #print(a, np.nanmin(scores), np.nanargmin(scores))
            arr_best_score.append(np.nanmin(scores))
            #logbook is of type 'deap.creator.Individual' and must be loaded later
            #don't want to have to load it to view data everytime, thus numpy
            ind_np = np.asarray(logbook[a]['all'][np.nanargmin(scores)][1])
            ind_np_conv = convert_individual(ind_np, arr_conversion_matrix, number_of_params)
            arr_best_ind.append(ind_np_conv)
            #arr_best_ind.append(np.asarray(logbook[a]['all'][np.nanargmin(scores)][1]))


        # print('Best individual is:\n %s\nwith fitness: %s' %(arr_best_ind[-1],arr_best_score[-1]))

        ###################################################################
        #PICKLE
        ###################################################################
        arr_to_pickle = [arr_best_score, arr_best_ind]

        def get_filename(val):
            filename_base = dir_to_use + '/' + stripped_name + '_'
            if val < 10:
                toret = '000' + str(val)
            elif 10 <= val < 100:
                toret = '00' + str(val)
            elif 100 <= val < 1000:
                toret = '0' + str(val)
            else:
                toret = str(val)
            return filename_base + toret + '.pickled'

        counter = 0
        filename = get_filename(counter)
        while os.path.isfile(filename) == True:
            counter += 1
            filename = get_filename(counter)

        pickle.dump(arr_to_pickle, open(filename,'wb'))
Example #55
0
def main():
    """ This function is an entry  point of the application.
    """
    # reading command-line argumets and options
    parser = optparse.OptionParser()
    parser.set_defaults(debug=False, xls=False)
    parser.add_option('--debug', action='store_true', dest='debug')
    parser.add_option('--verbose', action='store_true', dest='verbose')
    parser.add_option('--randomized', action='store_true', dest='randomized')
    (options, args) = parser.parse_args()

    # obtaining execution paramters
    parameters = get_execution_parameters(options, args)
    if (options.debug or options.verbose):
        print("Execution parameters: ", parameters)

    # seeding random process
    if (not options.randomized):
        random.seed(parameters['RandomSeed'])

    # reading read elements from input file
    (labels, reads) = read_labels_reads(options, parameters)
    if (options.debug or options.verbose):
        print("Mutation labels:", labels)
        print("Reads (from input):")
        for x in reads:
            print(x)

    # create fitness function
    creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))

    # create strucute of the individual
    creator.create("Individual", GaNode, fitness=creator.FitnessMin)

    # create toolbox for execution of the genetic algorithm
    toolbox = base.Toolbox()

    # register bolean attribute to toolbbox
    toolbox.register("attr_bool", random.randint, 0, 1)

    # register individual creation to toolbbox
    toolbox.register("individual",
                     init_ga_node_individual,
                     creator.Individual,
                     labels=labels,
                     size=2 * len(labels))

    # register population to toolbbox
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # register evaluation function
    toolbox.register("evaluate", evaluate_ga_node_individual, reads)

    # register the crossover operator
    toolbox.register("mate", crossover_ga_node_individuals)

    # register a mutation operator
    toolbox.register("mutate", mutate_ga_node_individual)

    # operator for selecting individuals for breeding the next
    # generation: each individual of the current generation
    # is replaced by the 'fittest' (best) of three individuals
    # drawn randomly from the current generation.
    toolbox.register("select", tools.selTournament, tournsize=3)

    # create an initial population, where each individual is a GaTree
    population_size = 5
    pop = toolbox.population(n=population_size)
    if (options.verbose):
        print("Population (size %d) - initial\n" % len(pop))
        print(pop)

    # Probability with which two individuals are crossed
    crossover_probability = 0.5

    # Probability for mutating an individual
    mutation_probability = 0.2

    if (options.debug or options.verbose):
        print("Start of evolution")

    # Evaluate the entire population
    fitnesses = list(map(toolbox.evaluate, pop))
    if (options.debug):
        print("Fitnesses of individuals in population - initial")
        print(fitnesses)

    # Assign fitness to individuals in population
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    # Variable keeping track of the number of generations
    generation = 0

    # Begin the evolution
    while True:
        if (options.debug or options.verbose):
            print("-- Generation %i --" % generation)

        if (options.debug or options.verbose):
            fits = [ind.fitness.values[0] for ind in pop]
            length = len(pop)
            mean = sum(fits) / length
            sum2 = sum(x * x for x in fits)
            std = abs(sum2 / length - mean**2)**0.5
            print("  Fitness: ", fits)
            print("  Min %s" % min(fits))
            print("  Max %s" % max(fits))
            print("  Avg %s" % mean)
            print("  Std %s" % std)
            best_in_generation = tools.selBest(pop, 1)[0]
            print("  Best individual: \n %s", best_in_generation)

        # A new generation
        generation += 1

        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))

        # Apply crossover on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            # cross two individuals with previously determined probability
            if random.random() < crossover_probability:
                toolbox.mate(child1, child2)
                # fitness values of the children
                # must be recalculated later
                del child1.fitness.values
                del child2.fitness.values

        # Apply mutation on the offspring
        for mutant in offspring:
            # mutate an individual with previously determined probability
            if random.random() < mutation_probability:
                toolbox.mutate(mutant)
                # fitness values of the mutant
                # must be recalculated later
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # The population is entirely replaced by the offspring
        pop[:] = offspring

        # Gather all the fitnesses in one list and print the stats

        # Check if any of finishing criteria is meet
        # Criteria based on number of generations
        if (generation > 10):
            break
        # Criteria based on standard deviation of fitness in population
        fits = [ind.fitness.values[0] for ind in pop]
        sum2 = sum(x * x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5
        if (std <= 0):
            break

    if (options.debug or options.verbose):
        print("-- End of evolution --")
    if (options.verbose):
        print("Population (size %d) - at end\n" % len(pop))
        print(pop)
    best_ind = tools.selBest(pop, 1)[0]
    print("Best individual is\n%s\n, with fitness %s" %
          (best_ind, best_ind.fitness.values))
    return
Example #56
0
	return COR, # If using just one objective function, put a comma at the end!!!


	NSE = hydroStats.NS(s=Qsim, o=Qobs, warmup=WarmupDays)
	print "   run_rand_id: " + str(run_rand_id) + ", NSE: " + "{0:.3f}".format(NSE)
	with open(os.path.join(path_subcatch, "runs_log.csv"), "a") as myfile:
		myfile.write(str(run_rand_id) + "," + str(NSE) + "\n")
	return NSE,  # If using just one objective function, put a comma at the end!!!
	"""


########################################################################
#   Perform calibration using the DEAP module
########################################################################

creator.create("FitnessMin", base.Fitness, weights=(maxDeap, ))
creator.create("Individual",
               array.array,
               typecode='d',
               fitness=creator.FitnessMin)

toolbox = base.Toolbox()

# Attribute generator
toolbox.register("attr_float", random.uniform, 0, 1)

# Structure initializers
toolbox.register("Individual", tools.initRepeat, creator.Individual,
                 toolbox.attr_float, len(ParamRanges))
toolbox.register("population", tools.initRepeat, list, toolbox.Individual)
Example #57
0
import numpy

from deap import algorithms
from deap import base
from deap import creator
from deap import tools

# gr*.json contains the distance map in list of list style in JSON format
# Optimal solutions are : gr17 = 2085, gr24 = 1272, gr120 = 6942
with open("tsp/gr17.json", "r") as tsp_data:
    tsp = json.load(tsp_data)

distance_map = tsp["DistanceMatrix"]
IND_SIZE = tsp["TourSize"]

creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
creator.create("Individual",
               array.array,
               typecode='i',
               fitness=creator.FitnessMin)

toolbox = base.Toolbox()

# Attribute generator
toolbox.register("indices", random.sample, range(IND_SIZE), IND_SIZE)

# Structure initializers
toolbox.register("individual", tools.initIterate, creator.Individual,
                 toolbox.indices)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
Example #58
0
    def create_ga(self, *args, **kwargs):
        is_silent = kwargs["silent"]
        wf = kwargs["wf"]
        rm = kwargs["resource_manager"]
        estimator = kwargs["estimator"]
        ga_params = kwargs["ga_params"]

        POPSIZE = ga_params.get("population", self.DEFAULT_POPULATION)
        CXPB = ga_params.get('crossover_probability',
                             self.DEFAULT_CROSSOVER_PROBABILITY)
        MUTPB = ga_params.get('replacing_mutation_probability',
                              self.DEFAULT_REPLACING_MUTATION_PROBABILITY)
        NGEN = ga_params.get('generations', self.DEFAULT_GENERATIONS)
        SWEEPMUTPB = ga_params.get('sweep_mutation_probability',
                                   self.DEFAULT_SWEEP_MUTATION_PROBABILITY)

        Kbest = ga_params.get('Kbest', POPSIZE)

        ga_functions = kwargs.get("ga_functions",
                                  GAFunctions2(wf, rm, estimator))

        check_evolution_for_stopping = kwargs.get(
            "check_evolution_for_stopping", True)

        def default_fixed_schedule_part(resource_manager):
            fix_schedule_part = Schedule({
                node: []
                for node in HeftHelper.to_nodes(
                    resource_manager.get_resources())
            })
            return fix_schedule_part

        ##================================
        ##Create genetic algorithm here
        ##================================
        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual", dict, fitness=creator.FitnessMax)

        toolbox = base.Toolbox()
        # Attribute generator
        toolbox.register("attr_bool", ga_functions.build_initial(None, 0))
        # Structure initializers
        toolbox.register("individual", tools.initIterate, creator.Individual,
                         toolbox.attr_bool)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)

        ## default case
        fix_schedule_part = default_fixed_schedule_part(rm)
        toolbox.register("evaluate",
                         ga_functions.build_fitness(fix_schedule_part, 0))

        toolbox.register("mate", ga_functions.crossover)
        toolbox.register("mutate", ga_functions.mutation)
        # toolbox.register("select", tools.selTournament, tournsize=4)
        toolbox.register("select", tools.selRoulette)
        # toolbox.register("select", tools.selBest)
        # toolbox.register("select", tools.selTournamentDCD)
        # toolbox.register("select", tools.selNSGA2)

        repeated_best_count = 10

        class GAComputation(SynchronizedCheckpointedGA):

            EVOLUTION_STOPPED_ITERATION_NUMBER = "EvoStpdIterNum"

            def __init__(self):
                super().__init__()
                pass

            @timing
            def __call__(self,
                         fixed_schedule_part,
                         initial_schedule,
                         current_time=0,
                         initial_population=None):
                print("Evaluating...")
                toolbox.register(
                    "evaluate",
                    ga_functions.build_fitness(fixed_schedule_part,
                                               current_time))
                toolbox.register(
                    "attr_bool",
                    ga_functions.build_initial(fixed_schedule_part,
                                               current_time))
                # Structure initializers
                toolbox.register("individual", tools.initIterate,
                                 creator.Individual, toolbox.attr_bool)
                toolbox.register("population", tools.initRepeat, list,
                                 toolbox.individual)

                ga_functions.initial_chromosome = GAFunctions2.schedule_to_chromosome(
                    initial_schedule, fixed_schedule_part)

                if initial_population is None:
                    initial_population = []

                if ga_functions.initial_chromosome is None:
                    print("empty_init_solutions")
                    init_solutions = []
                else:
                    init_solutions = [
                        creator.Individual(
                            copy.deepcopy(ga_functions.initial_chromosome))
                        for _ in range(int(POPSIZE * 0.9))
                    ]

                pop = initial_population + toolbox.population(
                    n=POPSIZE - len(initial_population) -
                    len(init_solutions)) + init_solutions

                ## TODO: experimental change
                history = History()

                # Decorate the variation operators
                #toolbox.decorate("mate", history.decorator)
                # toolbox.decorate("mutate", history.decorator)

                # Create the population and populate the history
                #history.update(pop)
                #===================================================

                hallOfFame = deap.tools.HallOfFame(5)

                stats = tools.Statistics(key=lambda x: 1 / x.fitness.values[0])
                stats.register("min", numpy.min)
                stats.register("max", numpy.max)
                stats.register("avr", numpy.mean)
                stats.register("std", numpy.std)

                logbook = tools.Logbook()
                logbook.header = ["gen"] + stats.fields

                # Evaluate the entire population
                fitnesses = list(map(toolbox.evaluate, pop))
                for ind, fit in zip(pop, fitnesses):
                    ind.fitness.values = fit

                previous_raised_avr_individuals = []

                # Begin the evolution
                for g in range(NGEN):
                    # print("Iteration")
                    if self.is_stopped():
                        break

                    hallOfFame.update(pop)

                    # logbook.record(pop=copy.deepcopy(pop))

                    # check if evolution process has stopped

                    # if (check_evolution_for_stopping is True) and len(previous_raised_avr_individuals) == repeated_best_count:
                    #     length = len(previous_raised_avr_individuals)
                    #     whole_sum = sum(previous_raised_avr_individuals)
                    #     mean = whole_sum / length
                    #     sum2 = sum(abs(x - mean) for x in previous_raised_avr_individuals)
                    #     std = sum2/length
                    #     ## TODO: uncomment it later. output
                    #     # print("std: " + str(std))
                    #     if std < 0.0001:
                    #         print(" Evolution process has stopped at " + str(g) + " iteration")
                    #         res = self._get_result()
                    #         extended_result = (res[0], res[1], res[2], res[3], g)
                    #         self._save_result(extended_result)
                    #         break

                    # print("-- Generation %i --" % g)
                    # Select the next generation individuals
                    offspring = pop  #toolbox.select(pop, len(pop))
                    # Clone the selected individuals
                    offspring = list(map(toolbox.clone, offspring))
                    # Apply crossover and mutation on the offspring
                    for child1, child2 in zip(offspring[::2], offspring[1::2]):
                        if random.random() < CXPB:
                            toolbox.mate(child1, child2)
                            del child1.fitness.values
                            del child2.fitness.values

                    for mutant in offspring:
                        if random.random() < SWEEPMUTPB:
                            ga_functions.sweep_mutation(mutant)
                            del mutant.fitness.values
                            continue
                        if random.random() < MUTPB:
                            toolbox.mutate(mutant)
                            del mutant.fitness.values

                    # Evaluate the individuals with an invalid fitness
                    invalid_ind = [
                        ind for ind in offspring if not ind.fitness.valid
                    ]

                    fitnesses = list(map(toolbox.evaluate, invalid_ind))

                    for ind, fit in zip(invalid_ind, fitnesses):
                        ind.fitness.values = fit
                    #pop[:] = offspring

                    # mix with the best individuals of the time
                    sorted_pop = sorted(pop + list(hallOfFame) + offspring,
                                        key=lambda x: x.fitness.values,
                                        reverse=True)
                    pop = sorted_pop[:Kbest:] + toolbox.select(
                        sorted_pop[Kbest:], POPSIZE - Kbest)

                    # Gather all the fitnesses in one list and print the stats
                    fits = [ind.fitness.values[0] for ind in pop]

                    length = len(pop)
                    mean = sum(fits) / length
                    sum2 = sum(x * x for x in fits)
                    std = abs(sum2 / length - mean**2)**0.5
                    worst = 1 / min(fits)
                    best = 1 / max(fits)
                    avr = 1 / mean

                    data = stats.compile(pop)
                    logbook.record(gen=g, **data)
                    if not is_silent:
                        print(logbook.stream)
                        # print("-- Generation %i --" % g)
                        # print("  Worst %s" % str(worst))
                        # print("   Best %s" % str(best))
                        # print("    Avg %s" % str(avr))
                        # print("    Std %s" % str(1/std))

                    best = self._find_best(pop)
                    # the last component is iteration number when evolution stopped
                    result = (best, pop, fixed_schedule_part, current_time, g)
                    self._save_result(result)
                    self._save_pop(pop)

                    if len(previous_raised_avr_individuals
                           ) == repeated_best_count:
                        previous_raised_avr_individuals = previous_raised_avr_individuals[
                            1::]
                    previous_raised_avr_individuals.append(1 / mean)

                    pass
                #
                # import matplotlib.pyplot as plt
                # import networkx
                #
                # graph = networkx.DiGraph(history.genealogy_tree)
                # graph = graph.reverse()     # Make the grah top-down
                # colors = [toolbox.evaluate(history.genealogy_history[i])[0] for i in graph]
                # networkx.draw(graph, node_color=colors)
                # plt.show()

                # best = self._find_best(pop)
                # self._save_result((best, pop, fixed_schedule_part, current_time))

                ## return the best fitted individual and resulted population

                print("Ready")
                return self.get_result(), logbook

            def _find_best(self, pop):
                # resulted_pop = [(ind, ind.fitness.values[0]) for ind in pop]
                # result = max(resulted_pop, key=lambda x: x[1])
                # return result[0]
                result = max(pop, key=lambda x: x.fitness.values[0])
                return result

            def _construct_result(self, result):
                (best, pop, fixed_schedule_part, current_time,
                 stopped_iteration) = result
                ## TODO: make additional structure to return elements
                return best, pop, ga_functions.build_schedule(
                    best, fixed_schedule_part, current_time), stopped_iteration

            pass

        return GAComputation()
Example #59
0
def initialize():
    global data, labels, gMatrix, wMatrix, guessedY, classificationOutput, colors, numOfAllData
    data = np.random.rand(numOfData, dimension)
    labels = np.zeros((numOfData, numOfClasses))
    gMatrix = np.random.rand(numOfData, numOfClusters)
    wMatrix = np.random.rand(numOfClusters, numOfClasses)  # m * c
    guessedY = np.random.rand(numOfData, numOfClasses)
    classificationOutput = np.zeros((numOfData, 1))
    colors = ['red', 'pink', 'yellow', 'magenta', 'blue', 'black',
              'green']  # to the num of classes
    if numOfClasses == 1 and test is False:
        numOfAllData = input("Enter the numOfAllData : ")


creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
creator.create("Individual",
               array.array,
               typecode="d",
               fitness=creator.FitnessMin,
               strategy=None)
creator.create("Strategy", array.array, typecode="d")


# Individual generator
def generateES(icls, scls, size, imin, imax, smin, smax):
    ind = icls(random.uniform(imin, imax) for _ in range(IND_SIZE))
    # gammas and their clusters in each chromosome
    ind.strategy = scls(random.uniform(smin, smax) for _ in range(IND_SIZE))
    return ind
def main():
    # Import data
    x_train, y_train = load_split_all()[0]
    data = x_train
    labels = y_train

    num_positives = np.count_nonzero(labels)
    num_negatives = len(labels) - num_positives
    # num_positives is max false negatives
    # num_negatives is max false positives - append these to fitness so we have reliable AuC
    fn_trivial_fitness = (0, num_positives)
    fp_trivial_fitness = (num_negatives, 0)

    creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
    creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)

    # Arguments
    random.seed(25)
    crossover_rate = 0.5
    mutation_rate = 0.2
    samples = 10  # set to 10 when generating submission data
    calc_area = False  # set to true when generating submission data

    input_types = []
    for i in range(x_train.shape[1]):  # multiplication op doesn't work
        input_types.append(float)
    pset = gp.PrimitiveSetTyped("MAIN", input_types, bool)

    # Essential Primitives
    pset.addPrimitive(is_greater, [float, float], bool)
    pset.addPrimitive(is_equal_to, [float, float], bool)
    pset.addPrimitive(if_then_else, [bool, float, float], float)

    pset.addPrimitive(np.logical_not, [bool], bool)
    pset.addPrimitive(
        np.logical_and, [bool, bool],
        bool)  # Demorgan's rule says all logic ops can be made with not & and

    pset.addPrimitive(np.negative, [float], float)
    pset.addPrimitive(operator.mul, [float, float], float)
    pset.addPrimitive(operator.add, [float, float], float)
    pset.addPrimitive(operator.sub, [float, float], float)

    # constants
    # pset.addTerminal(1.0, float)
    pset.addTerminal(2.0, float)
    pset.addTerminal(10.0, float)
    pset.addTerminal(25.0, float)
    pset.addTerminal(1, bool)  # Necessary for valid compilation
    pset.addTerminal(0,
                     bool)  # Though I'd like to discourage, boosts performance

    # More primitives (for fun/tinkering/reducing verbosity of tree)
    # Complex ops
    # pset.addPrimitive(equal_conditional, [bool, bool, float, float], float)
    # Logic to float

    # Float to logic
    # pset.addPrimitive(in_range, [float, float, float], bool)

    # Logic to logic
    pset.addPrimitive(operator.xor, [bool, bool], bool)

    # Float to float
    pset.addPrimitive(relu, [float], float)
    # pset.addPrimitive(absolute, [float], float)
    # pset.addPrimitive(safe_division, [float, float], float)
    pset.addPrimitive(math.floor, [float], int)

    # Visualizing aids
    pset.renameArguments(ARG0='pclass')
    pset.renameArguments(ARG1='sex')
    pset.renameArguments(ARG2='age')
    pset.renameArguments(ARG3='sibsp')
    pset.renameArguments(ARG4='parch')
    pset.renameArguments(ARG5='fare')
    # pset.renameArguments(ARG6='embarked')
    min_init = 1
    max_init = 4
    toolbox = base.Toolbox()
    toolbox.register("expr",
                     gp.genGrow,
                     pset=pset,
                     min_=min_init,
                     max_=max_init)
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.expr)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("compile", gp.compile, pset=pset)

    toolbox.register("evaluate",
                     bloatControlEval,
                     pset=pset,
                     data=data,
                     labels=labels)
    # select
    # toolbox.register("select", tools.selTournament, tournsize=3)
    toolbox.register("select", tools.selWorst)  # added

    # crossover
    toolbox.register("mate", gp.cxOnePoint)
    # mutate
    toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
    toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)

    toolbox.decorate(
        "mate", gp.staticLimit(key=operator.attrgetter("height"),
                               max_value=17))
    toolbox.decorate(
        "mutate",
        gp.staticLimit(key=operator.attrgetter("height"), max_value=17))

    gen = range(40)
    avg_list = []
    max_list = []
    min_list = []
    population_size = 300
    pop = toolbox.population(n=population_size)
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    avg_areas = [
        0 for g in gen
    ]  # contains sum of performances per generation (averaged later)
    # for i in range(samples):  # sample 10 times
    # reset population at the start of each trial
    pop = toolbox.population(n=300)
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
    # Begin the evolution
    for g in gen:
        if g > 30:
            toolbox.register("evaluate",
                             evalSymbReg,
                             pset=pset,
                             data=data,
                             labels=labels)
        else:
            toolbox.register("evaluate",
                             bloatControlEval,
                             pset=pset,
                             data=data,
                             labels=labels)
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))

        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < crossover_rate:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:
            if random.random() < mutation_rate:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness .... define invalid???
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Replace population
        pop[:] = offspring

        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x * x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5
        g_max = max(fits)
        g_min = min(fits)

        avg_list.append(mean)
        max_list.append(g_max)
        min_list.append(g_min)

        # print("  Min %s" % g_min)
        # print("  Max %s" % g_max)
        # print("  Avg %s" % mean)
        # print("  Std %s" % std)

        # find area under curve for population
        if calc_area:
            # Evaluate our true fitnesses (sans bloat control)
            toolbox.register("evaluate",
                             evalSymbReg,
                             pset=pset,
                             data=data,
                             labels=labels)
            fitnesses = list(map(toolbox.evaluate, pop))
            for ind, fit in zip(pop, fitnesses):
                ind.fitness.values = fit

            hof_pop = generate_min_front(pop)
            # Extract fitnesses and sort so HoF draws correctly
            hof = np.asarray([ind.fitness.values for ind in hof_pop])
            hof = np.insert(hof, 0, [fp_trivial_fitness, fn_trivial_fitness],
                            0)
            hof = hof[np.argsort(hof[:, 0])]
            area = area_under_curve(hof)
            avg_areas[g] += area
            info = "\t\tAUC: %f" % area
        else:
            info = ""
        print("-- Generation %i --%s" % (g, info))

    print("-- End of (successful) evolution --")

    if calc_area:
        # average the areas
        avg_areas = [area / samples for area in avg_areas]
        # write to csv
        file = open("results/driver_results.csv", 'w')
        header = ','
        driver_line = "Driver,"
        for g in gen:
            header += "%d," % i
            driver_line += "%f," % avg_areas[g]
        header += "\n"
        file.write(header)
        file.write(driver_line)
        file.close()

    # Evaluate our true fitnesses (sans bloat control)
    toolbox.register("evaluate",
                     evalSymbReg,
                     pset=pset,
                     data=data,
                     labels=labels)
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
    hof_pop = generate_min_front(pop)
    # Extract fitnesses and sort so HoF draws correctly
    hof = np.asarray([ind.fitness.values for ind in hof_pop])
    hof = np.insert(hof, 0, [fp_trivial_fitness, fn_trivial_fitness], 0)
    hof = hof[np.argsort(hof[:, 0])]
    # print(hof)

    # Charts
    pop_1 = [ind.fitness.values[0] for ind in pop]
    pop_2 = [ind.fitness.values[1] for ind in pop]

    plt.scatter(pop_1, pop_2, color='b')
    plt.scatter(hof[:, 0], hof[:, 1], color='r')
    plt.plot(hof[:, 0], hof[:, 1], color='r', drawstyle='steps-post')
    plt.xlabel("False Positives")
    plt.ylabel("False Negatives")
    plt.title("Pareto Front")
    print(area_under_curve(hof))

    if calc_area:
        print(avg_areas[-1])
    else:
        print(area_under_curve(hof))
    plt.show()
    if calc_area:
        plt.plot(gen, avg_areas, color='g')
        plt.xlabel("Generation")
        plt.ylabel("Area Under Curve")
        plt.title("AUC evolution")
        plt.show()

    print("Generating individual graphs")
    for k in range(len(hof_pop)):
        best_ind = hof_pop[k]
        nodes, edges, labels = gp.graph(best_ind)
        g = pgv.AGraph()
        g.add_nodes_from(nodes)
        g.add_edges_from(edges)
        g.layout(prog="dot")

        for i in nodes:
            n = g.get_node(i)
            n.attr["label"] = labels[i]

        g.draw("graphs/tree%s.pdf" % k)