Example #1
0
def svm_from_cfg(cfg):
    """ Creates a SVM based on a configuration and evaluates it on the
    iris-dataset using cross-validation.

    Parameters:
    -----------
    cfg: Configuration (ConfigSpace.ConfigurationSpace.Configuration)
        Configuration containing the parameters.
        Configurations are indexable!

    Returns:
    --------
    A crossvalidated mean score for the svm on the loaded data-set.
    """
    # For deactivated parameters, the configuration stores None-values.
    # This is not accepted by the SVM, so we remove them.
    #    cfg = {k : cfg[k] for k in cfg if cfg[k]}
    # We translate boolean values:
    #    cfg["shrinking"] = True if cfg["shrinking"] == "true" else False
    # And for gamma, we set it to a fixed value or to "auto" (if used)
    #    if "gamma" in cfg:
    #        cfg["gamma"] = cfg["gamma_value"] if cfg["gamma"] == "value" else "auto"
    #        cfg.pop("gamma_value", None)  # Remove "gamma_value"

    np.random.seed(1)

    x = np.array(cfg.values())

    # regr = svm.SVR(degree=2, max_iter=1e5, **cfg)

    # scores = cross_val_score(regr, X, y, cv=5, scoring='r2')
    return benchmarks.himmelblau(x)[0]
    def check_stop(self):
        _, f, __ = self.best.get()
        if f <= self.ftarget:
            self.stop_dict["ftarget"] = f

        if self.countevals >= self.max_FEs:
            self.stop_dict["FEs"] = self.countevals

        return bool(self.stop_dict)


dim = 2
n_point = 8
max_FEs = 30 * n_point
obj_fun = lambda x: benchmarks.himmelblau(x)[0]
lb, ub = -6, 6

search_space = RealSpace([lb, ub]) * dim
mean = trend.constant_trend(dim, beta=0)  # Ordinary Kriging

# autocorrelation parameters of GPR
thetaL = 1e-10 * (ub - lb) * np.ones(dim)
thetaU = 10 * (ub - lb) * np.ones(dim)
theta0 = np.random.rand(dim) * (thetaU - thetaL) + thetaL

model = GaussianProcess(
    mean=mean,
    corr="squared_exponential",
    theta0=theta0,
    thetaL=thetaL,
Example #3
0
def main():
    np.random.seed(64)
    pop = toolbox.population(n=150)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)
    #pop,hof = algorithms.eaSimple(pop,toolbox,cxpb=0.5,mutpb=0.01,ngen=200,stats=stats,halloffame=hof,verbose=True)
    fitness = list(map(toolbox.evaluate, pop))

    for ind, fit in zip(pop, fitness):
        ind.fitness.values = fit
    min_fit = 0
    nvmap = noveltymap(5, POPNUM)
    step = 0
    #print("gen ","min ""max ","mean")
    novflag = 0
    for i in range(NGEN):
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))

        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))
        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            # cross two individuals with probability CXPB
            if random.random() < CXPB:
                toolbox.mate(child1, child2)

                # fitness values of the children
                # must be recalculated later
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:

            # mutate an individual with probability MUTPB
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values
        if novflag == 2:
            #print("nov")
            novflag = 0
            _pop = nvmap.popInd(pop)
            for off, _ind in zip(offspring, _pop):
                off = _ind.xy
                off.fitness.values = _ind.novelty,
                pop[:] = offspring
                fits = [benchmarks.himmelblau(ind)[0] for ind in pop]
        else:
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            """
            for ind in invalid_ind:
                nvmap.ax.scatter(ind[0],ind[1],c='blue',marker='.')
            """
            fitness = list(map(toolbox.evaluate, invalid_ind))
            for ind, fit in zip(invalid_ind, fitness):
                ind.fitness.values = fit

            # The population is entirely replaced by the offspring
            pop[:] = offspring

            # Gather all the fitnesses in one list and print the stats
            fits = [ind.fitness.values[0] for ind in pop]
        hof.update(pop)
        length = len(pop)
        mean = sum(fits) / length
        #sum2 = sum(x*x for x in fits)
        #std = abs(sum2 / length - mean**2)**0.5
        if abs(min(fits) - min_fit) == 0.001:
            novflag += 1
        else:
            novflag = 0
        #print(i ,min(fits) ,max(fits) ,mean)
        min_fit = min(fits)
        if min(fits) <= np.exp(-10):
            step += 1
            break
        #print("gen:",i,"  Min %s" % min(fits),"  Max %s" % max(fits),"  Avg %s" % mean)
        #print("gen:",i,"  Min %s" % min(fits),"  Max %s" % max(fits),"  Avg %s" % mean,"  Std %s" % std)
        #print(i,max(fits),mean)

    #nvmap.fig.show()
    #time.sleep(100000000)
    return pop, hof, step
Example #4
0
def untuple(sol):
    return benchmarks.himmelblau(sol)[0]
Example #5
0
 def calFit(self, ind):
     return benchmarks.himmelblau(ind)[0]
Example #6
0
def himmelblau_arg0(sol):
    return np.nan if w_obstacles and sol[2] == 1 else benchmarks.himmelblau(sol[:2])[0]
Example #7
0
 def evalBenchmark(individual):
     return benchmarks.himmelblau(individual)
def himmelblau_arg0(sol):
    return benchmarks.himmelblau(sol)[0]
Example #9
0
 def evaluate(self, x):
     return himmelblau(x)[0]
Example #10
0
def himmelblau_arg0(sol):
    return benchmarks.himmelblau(sol)[0]