Exemplo n.º 1
0
    def fit(self, guess=None):
        """
        Fits the given data

        Parameters
        ----------
        guess : np.array
            Starting parameters to use when minimizing. Note: Starting guess is only used by simplex and basinhopping methods. DiffEv does not use guess. 
        """
        exp = self.exp
        if guess is None:
            guess = self.default_guess()
        if (self.method is "nm"):
            res = minimize(self.error,
                           guess,
                           method='nelder-mead',
                           options={'maxiter': 10000})
        elif (self.method is "bh"):
            res = basinhopping(self.error, guess, niter=1000)
            return exp.surface, res.x
        elif (self.method is "de"):
            if self.bounds is None:
                self.bounds_from_guess(guess)
            res = de(self.error, self.bounds, popsize=20, mutation=(1., 1.5))
        if not res.success:
            print("Failed to converge to a correct structure.")
            return exp.surface, res.x
        else:
            #exp.surface.d = res.x
            return exp.surface, res.x
Exemplo n.º 2
0
def get_param(g0, beta, wn, size, mu, V=None, ei=None, bound_v=(-2.0, 2.0), bound_e=(-2.0, 1.5)):
    '''
    Возвращает значение параметров фиттированной функции Грина
    -------
    g0 : Функция Грина :: np.array(compex)
    wn : Матцубаровские частоты :: np.array(float)
    size : Число узлов фермионной ванны :: int
    V, ei : Фитируемые параметры (перескоки с кластера на ванну и затравочная энегрия на ванне) :: 0 or np.array(float)
    bound_v, bound_e : Границы для искомых параметров :: tuple
    \\\\\ Параметры для варьирования хим потенциала
    shift : шаг :: float
    erabs : точность ::  float
    -----
    Returns: V, e, mu :: np.array(float), np.array(float), float
    '''
    # Параметры для минимизации
    if type(V) != int:
        V = V.copy()
        ei = ei.copy()
        V += 0.005 * (2 * np.random.rand(size) - 1) # Добавляем шум к уже найденным параметрам 
        ei += 0.005 * (2 * np.random.rand(size) - 1)
    else:
        V = np.random.rand(size) # Работает при первой итерации
        ei = np.random.rand(size)
#     init = np.concatenate((V, ei))
    bounds = [bound_v] * size + [bound_e] * size
    #num_threads = int(os.popen(u'grep -c cores /proc/cpuinfo').read()) - 1
    num_threads = int(multiprocessing.cpu_count() - 1)
    model = de(diff, bounds=bounds, args=(g0, wn, mu), tol=1e-08, maxiter = 5000, workers=num_threads)
    V = (model.x[:size])
    e = (model.x[size:])
    #mu = find_mu(g0, beta, wn, V, e, mu, shift, erabs)
    return abs(V), e, model.fun
Exemplo n.º 3
0
    def runOptimization(self, maxiter=10):
        #        self.result = minimize(self.merit, x0 = self.UB, method='L-BFGS-B', bounds = self.bounds, options = {'eps' : 1e-2})

        self.result = de(self.merit,
                         bounds=[(0, 1)] * self.N_pars,
                         maxiter=maxiter)

        if self.parametrization == 'CST':
            self.finalAirfoil = self.airfoilfromX(self.result.x)
        else:
            self.finalAirfoil = self.airfoilBSfromX(self.result.x)
        self.finalAirfoil.plotAirfoil()
 def run(self, seed=0, func_params=None):
     return de(
         func=self.__func,
         # strategy="randtobest1bin",
         args=func_params,
         maxiter=self.__max_iter,
         bounds=self.__bounds,
         popsize=self.__pop_size,
         mutation=self.__f,
         recombination=self.__cr,
         callback=self.__callback,
         polish=False,
         seed=seed,
         disp=True)
def main():
    '''Samples from two uniform distributions 100 times. Separates the data points in X according
    to the target weights and the activation function given in separate_classes. Uses logistic
    regression to estimate the target weights. Plots the class 0 points in red, the class 1
    points in green, and the division line given by the weight estimates.'''

    X = sample_points(100, 2, -5, 5)
    target_weights = [1, -1, 5]
    X0, X1 = separate_classes(target_weights, X)

    def f(weights):
        return -log_likelihood(weights, X0, X1)

    weights = de(f, [(-5, 5), (-5, 5), (-5, 5)]).x
    scatter(X0, 'red')
    scatter(X1, 'green')
    plot_division_line(weights, -5, 5)
    print('Weights:', weights)
Exemplo n.º 6
0
def optimize(w, freq):
    cons = nlc(cons_fun, -np.inf, 1000)
    bnds = [np.array([-1,1]),]*w.shape[0]
    res = de(kf, args=(w, freq), maxiter=1000, bounds=bnds, popsize=2, polish=False, constraints=cons, disp=True, workers=-1, updating='deferred')
    output = res.x>0
    np.save('output.npy', output)
Exemplo n.º 7
0
 def de_optimizer(obj_func, initial_theta, bounds): 
     result = de(lambda x:obj_func(x)[0], [(b[0], b[1]) for b in bounds]  )        
     return result.x, result.fun
Exemplo n.º 8
0
def optimize_gp(obj_func,disp=True):
   
    dim = obj_func.dimensions 
    bounds = obj_func.bounds
    
    def apply_bounds(x, bounds):
        return np.array([xi * (lub[1]-lub[0]) + lub[0] for xi, lub in zip(x,bounds)])
    
    def de_optimizer(obj_func, initial_theta, bounds): 
        result = de(lambda x:obj_func(x)[0], [(b[0], b[1]) for b in bounds]  )        
        return result.x, result.fun
    
    
    kernel =  C(1.0, (1e-3, 1e3)) * RBF([1]*dim, [(1e-2, 1e2)]*dim)
    #kernel = C(1.0, (1e-3, 1e3)) * M([1]*dim, [(1e-2, 1e2)]*dim, nu=1.5)
    gp = GPR(kernel=kernel, optimizer=de_optimizer)
    
    
    n = dim * 10 + 1
    N0 = n
    Nmax = 20 * dim
    
    pop = [apply_bounds(sgi, bounds) for sgi in sg(dim, n, 1).transpose()]
    
    y= [obj_func(pi) for pi in pop]
    
    bestx = [pop[i] for i in range(len(y)) if y[i]==min(y)]
    besty = [y[i] for i in range(len(y)) if y[i]==min(y)]
   
    
    def eval_gp_model(x):
        y,s = gp.predict([x], return_std=True)
        return y[0],s[0]
    
    def func1(x):
        y,s = eval_gp_model(x)
        return y - 3*s
    
    def func2(x):
        y,s = eval_gp_model(x)
        return y - 2*s  
    
    def func3(x):
        y,s = eval_gp_model(x)
        return y - 1*s  
    
    def func4(x):
        y,s = eval_gp_model(x)
        return y
    
    def func5(x):
        y,s = eval_gp_model(x)
        return y + 1*s 
    def func6(x):
        y,s = eval_gp_model(x)
        return y + 2*s  
    
    def func7(x):
        y,s = eval_gp_model(x)
        return y + 3*s 
    
    def ei(x):
        y,s = eval_gp_model(x)
        if s<=0: return np.inf
        t = (min(besty)-y)/s
        return -1* ((min(besty)-y)*norm.cdf(t)+s*norm.pdf(t))
    
    def pi(x):
        y,s = eval_gp_model(x)
        if s<=0: return np.inf  
        return -norm.cdf((min(besty)-y)/s)   
    
    
    def distance(x1, x2):
        dx = x1 - x2
        return np.sqrt((dx*dx).sum())
    
    def distance2(x1, x2):
        dx = x1 - x2
        return np.abs(dx).max()
    
    def mdist(x1, x2, di):
        return np.exp(-np.log(2) * distance2(x1,x2)/di)
        
    skip = len(pop) + 1    
    funcs = [ ei, pi,func1, func2, func3, func4, func5, func6, func7]
    
    while len(pop) < Nmax:
        d = [(b[1]-b[0])*10**(-1.1-2.1*(len(pop)-N0)/(Nmax-N0)) for b in bounds]
        
        gp.fit(pop, y)
      
#        print(bounds)
#        print([f(obj_func.generator()) for f in funcs])
        attrac_center = [de(f, bounds).x for f in funcs]
        
        
        while True:
            new_sample = apply_bounds(sg(dim,1, skip).transpose()[0], bounds)
            skip = skip + 1
          
            filter_on = [0.5 <= mdist(new_sample, aci, di)  for aci,di in zip(attrac_center,d)]
            if any(filter_on):
                choice = filter_on.index(True)
                break
       
        new_y = obj_func(new_sample)  
        if np.isnan(new_y) or np.isinf(new_y):
            continue
        pop.append(new_sample)
        y.append(new_y)
        if (new_y <= min(besty)):
            bestx.append(new_sample)
            besty.append(new_y)
            bestx = [bestx[i] for i in range(len(bestx)) if besty[i]==min(besty)]
            besty = [besty[i] for i in range(len(besty)) if besty[i]==min(besty)]
        if disp:
            print(len(pop), max(d), funcs[choice].__name__.upper(), skip, new_sample, new_y, bestx[0], besty[0],sep='\t',end='\n')
    
    return {'pop':pop, 'y':y, 'bestx':bestx, 'besty':besty, 'model':gp,'func':obj_func}
Exemplo n.º 9
0
def differential_evolution(case, runopts, executor, initial_design, fvals):
    """ optimize case using scipy.optimize.differential_evolution
    """
    files = Manager().Queue()
    fun = partial(case.fitness, queue=files)
    N_iter = 0
    print("Begin differential evolution")
    print(
        "NOTE: parallel evaluation not currently supported. Maybe in future scipy"
    )
    from scipy.optimize import differential_evolution as de
    with Logger(params=case.params,
                queue=files,
                logfile=runopts.logfilename,
                best_dir=runopts.output_dir) as log:
        y = de(fun,
               bounds=case.get_bounds(),
               maxiter=runopts.max_iter,
               callback=log.callback)

    return log.x_best, log.f_best, log.Xi, log.Fi


optimizers = {
    "skopt": skopt,
    "multistart": multistart,
    "dummy": dummy,
    "de": differential_evolution
}


def get_optimizer(name="skopt"):
res_fun_values = np.zeros(n_runs)
successes = np.zeros(n_runs)
optid = np.zeros(n_runs)
dist = np.zeros(3)
# The fraction of runs in each optimum region
b_1 = 0.
b_2 = 0.
b_3 = 0.
# This will be the average number of function evaluations in N_runs
avg_nfev = 0
for i in range(n_runs):
    res = de(
        my_fun,
        bounds,
        maxiter=max_iter,
        popsize=pop_size,
        tol=0.0,  # this needs to be 0.0 to turn off convergence
        disp=False,  # this prints best objective value each iteration
        polish=use_bfgs,  # whether to run bfgs after
        init='latinhypercube'  # LHS random sampling for initial pop
    )

    res_x = res.x  # the optimum design point
    res_f = res.fun  # the optimum function value

    # find which minimum it went to, success, and store results
    dist[0] = np.linalg.norm(res_x - x1) * epsilon
    dist[1] = np.linalg.norm(res_x - x2) * epsilon
    dist[2] = np.linalg.norm(res_x - x3) * epsilon
    if dist[0] < 1:
        b_1 = b_1 + 1
    if dist[1] < 1:
Exemplo n.º 11
0
'''
To improve your chances of finding a global minimum:
 *use higher popsize values,
 *with higher mutation and (dithering),
 *but lower recombination values.
 This has the effect of widening the search radius, but slowing convergence.
'''

# from scipy.optimize import rosen # import Rosenbrock func as example
from scipy.optimize import differential_evolution as de

def rose(x):
    '''Rosenbrock Function'''
    return sum(100 * (x[1:] - x[:-1]) ** 2 + (x[:-1] - 1) ** 2)


bnds = [(0, 2), (0, 2), (0, 2)]
ans = de(func=rose, bounds=bnds, maxiter=1000, popsize=25,
         tol=0.01, mutation=(0.5, 1), recombination=0.7)
print(ans.x, ans.fun)
Exemplo n.º 12
0
population_size = 30
optimization_steps = 5000
scaling_factor = 0.01
cross_over_rate = 0.5

problem_size = 4
# x = [-0.1, 0.1, -0.1, 1.0]
# y = [1000.0, 1000.0, 1000.0, 1000.0]
# x_c = x.ctypes.data_as(c.POINTER(c.c_double))
# y_c = x.ctypes.data_as(c.POINTER(c.c_double))

bounds = [(-0.5, 0.5), (-0.5, 0.5), (-0.5, 0.5), (-0.5, 0.5), (-0.5, 0.5),
          (0.7, 0.99)]
result = de(
    opt.objective_func,
    bounds,
    updating="deferred",
    workers=2,
    maxiter=optimization_steps,
)

x = result.x
opts = opt.vec_to_opts(x)
a = Automata(opts)
a.run()
print("BestAutomata", a)
print(a.stats())
p = Plotter(a)
p.plot(True)  # animate
Exemplo n.º 13
0
        price07 = market07['price'].tolist()
        price08 = market08['price'].tolist()
        score = [
            1 for m in [[real07, cf07, price_2007], [real08, cf08, price_2008]]
            for i in range(len(m[0])) for j in range(len(m[0])) if j > i
            if (m[0][i] - m[1][j, i] >= m[2][i] - m[2][j])
            & (m[0][j] - m[1][i, (j - 1)] >= m[2][j] - m[2][i])
        ]
    return sum(score) * -1


score(S, transfer)

# False Transfers.
transfer = "F"
# Guess
S = np.array((1000, -1))
nm_false = opt.minimize(score, S, args=(transfer), method='Nelder-Mead')
# Differential Evolution, false transfers.
bound_f = [(0, 500), (-5, 5)]
de_false = de(score, bound_f, params=(transfer))
print('\n' '>' " NELDER-MEAD, FALSE TRANSFERS:" '\n', nm_false)
print('\n' '>' " DIFFERENTIAL EVOLUTION, FALSE TRANSFERS:" '\n', de_false)

# True Transfers
transfer = "T"
# Differential Evolution, true transfers.
bound_t = [(-1000, 1000), (0, 1000), (-1000, 0), (-1000, 0)]
de_true = de(score, bound_t, params=(transfer))
print('\n' '>' " DIFFERENTIAL EVOLUTION, TRUE TRANSFERS:" '\n', de_true)