def test_seed_gives_repeatability(self):
     result = differential_evolution(self.quadratic,
                                     [(-100, 100)],
                                     polish=False,
                                     seed=1,
                                     tol=0.5)
     result2 = differential_evolution(self.quadratic,
                                     [(-100, 100)],
                                     polish=False,
                                     seed=1,
                                     tol=0.5)
     assert_equal(result.x, result2.x)
 def params(self,name,bounds):
     if name in self.models.keys():
         if name == u'Modified_Zener_Hollomon':
             ans = opt.differential_evolution(self.models[name][1],bounds,args=(self.temp,self.rate,self.xdata,self.ydata,),
                                              strategy='best1bin',disp=True)
             error = np.sqrt(sum(np.square(self.models[name][0](ans.x,self.temp,self.rate,self.xdata)-self.ydata))/len(self.xdata))
         else:
             ans = opt.differential_evolution(self.models[name][1],bounds,args=(self.xdata,self.ydata,),disp=True)
             error = np.sqrt(sum(np.square(self.models[name][0](ans.x,self.xdata)-self.ydata))/len(self.xdata))
         print 'Model:',name
         print 'Params:',ans.x
         print 'Error:',error
         return [ans.x,error]
     else:
         return None
    def test_gh_4511_regression(self):
        # This modification of the differential evolution docstring example
        # uses a custom popsize that had triggered an off-by-one error.
        # Because we do not care about solving the optimization problem in
        # this test, we use maxiter=1 to reduce the testing time.
        bounds = [(-5, 5), (-5, 5)]
        # result = differential_evolution(rosen, bounds, popsize=1815,
        #                                 maxiter=1)

        # the original issue arose because of rounding error in arange, with
        # linspace being a much better solution. 1815 is quite a large popsize
        # to use and results in a long test time (~13s). I used the original
        # issue to figure out the lowest number of samples that would cause
        # this rounding error to occur, 49.
        differential_evolution(rosen, bounds, popsize=49, maxiter=1)
示例#4
0
 def improve_emulator_for_lnlik(self,swmm,improvement_steps,design):
     import scipy.optimize as opt
     # from multiprocessing import Pool
     j=0
     # threads=8
     while j<improvement_steps:
         ret=opt.differential_evolution(self.lnprob,
                                        bounds=list(zip(self.lower_bounds,
                                                        self.upper_bounds))
                                        ,args=[False]
                                        ,disp=True, popsize=20,maxiter=20,
                                        polish=False)
         pars=ret.x[0:8]
         if cf.closest_distance(self.result_producing_thing.dp,pars)<0.5:
             pars=design.pars_all[self.ini_dd+self.added_counter]
             swmm.result=design.data_all[self.ini_dd+self.added_counter]
             self.added_counter+=1
         else:
             swmm.run(pars)
             with open("candidates.dat", 'ab') as file:
                 np.savetxt(file,pars,fmt='%10.5f', newline=' ')
             with open("candidates.dat", 'a') as file:
                 file.writelines("\n")
         self.result_producing_thing.dd=np.vstack((self.result_producing_thing.dd,swmm.result))
         self.result_producing_thing.dp=np.vstack((self.result_producing_thing.dp,
                                             pars))
         self.result_producing_thing.condition()
         j+=1
def minimize_waveform_only(r, phi, z, scale, t0, smooth,  wf,):
#  result = op.minimize(neg_lnlike_wf, [r, phi, z, scale,t0,  smooth, esmooth], args=(wf) ,method="Powell")

  bounds = [ (0, detector.detector_radius), (0, np.pi/4), (0, detector.detector_length), (scale/1.2, scale*1.2), (wf.t0Guess - 15, wf.t0Guess +10), (0, 20)   ]
  result = op.differential_evolution(neg_lnlike_wf, bounds, args=([wf]), polish=False, maxiter=100)
  
  return result
示例#6
0
    def run(self,func=None):
        """Allows the user to set the data from a File
        This data is to be compared with the simulated data in the process of parameter estimation
        
        Args:
            func: An Optional Variable with default value (None) which by default run differential evolution
                which is from scipy function. Users can provide reference to their defined function as argument.
            

        Returns:
            The Value of the parameter(s) which are estimated by the function provided.
        
        .. sectionauthor:: Shaik Asifullah <*****@*****.**>
        
        
        """
        
        self._parameter_names = self.bounds.keys()
        self._parameter_bounds = self.bounds.values()
        self._model_roadrunner = te.loada(self.model.model)
        x_data = self.data[:,0]
        y_data = self.data[:,1:]
        arguments = (x_data,y_data)

        if(func is not None):
            result = differential_evolution(self._SSE, self._parameter_bounds, args=arguments)
            return(result.x)
        else:
            result = func(self._SSE,self._parameter_bounds,args=arguments)
            return(result.x)
def optimize():
    import numpy as np


    bounds = [(0, 1), (0, 5), (20,21)]
    result = differential_evolution(eq, bounds)
    print(result)
 def __tune__(self):
     if self.minimizer == Minimizer.DifferentialEvolution:
         bounds = [
             self.spectralRadiusBound,
             self.inputScalingBound,
             self.reservoirScalingBound,
             self.leakingRateBound,
         ]
         result = optimize.differential_evolution(self.__reservoirTrain__, bounds=bounds)
         print("The Optimization results are :" + str(result))
         return result.x[0], result.x[1], result.x[2], result.x[3]
     else:
         bounds = [
             self.spectralRadiusBound,
             self.inputScalingBound,
             self.reservoirScalingBound,
             self.leakingRateBound,
         ]
         minimizer_kwargs = {"method": "TNC", "bounds": bounds, "options": {"eps": 0.005}}
         mytakestep = ParameterStep()
         result = optimize.basinhopping(
             self.__reservoirTrain__,
             x0=self.initialGuess,
             minimizer_kwargs=minimizer_kwargs,
             take_step=mytakestep,
             stepsize=0.005,
         )
         print("The Optimization results are :" + str(result))
         return result.x[0], result.x[1], result.x[2], result.x[3]
示例#9
0
def estimate_de( peaks, sizes ):

    f = ZFunc( peaks, sizes, [], estimate=True )
    bounds = [ (0.01, 0.5), (-275, 75) ]


    niter = 0
    results = []

    while niter < 3:

        #prev_rss = rss

        res = differential_evolution(f, bounds, tol=1e-5, mutation=(0.3, 1.7),
                popsize=45, recombination=0.5, strategy='rand1bin')

        pairs, final_rss = f.get_pairs(res.x)
        pairs.sort()
        rtimes, bpsizes = zip( *pairs)
        zres = estimate_z(rtimes, bpsizes, 1)

        niter += 1
        cerr('I: DE iter: %2d  - pairs: %2d  - Cur RSS: %6.2f' % (niter, len(pairs), zres.rss))
        results.append( (zres, pairs ) )

        if zres.rss < len(bpsizes) * 1.0:
            break

    results.sort( key = lambda x: x[0].rss )
    zres, pairs = results[0]

    plot(f.rtimes, f.sizes, zres.z, pairs)

    return pairs, zres.z
def hausdorff_distance_2D(a, b, rotation=False, rotation_pivot=False,
                          rotation_limits=[-math.pi/4., math.pi/4.]):
    """Comparing a vector data 'a' to a vector data 'b'."""

    def find_d_rotated(beta):
        """Find Hausdorff distance considering a rigid body rotation."""
        for i in range(len(data['x'])):
            x = b['x'][i] - x_pivot
            y = b['y'][i] - y_pivot
            c_beta = math.cos(beta)
            s_beta = math.sin(beta)
            x_rotated = c_beta*x - s_beta*y + x_pivot
            y_rotated = s_beta*x + c_beta*y + y_pivot
        b_rotated = {'x': x_rotated, 'y': y_rotated}
        return find_d(a, b_rotated)

    if rotation is False and rotation_pivot is False:
        return find_d(a, b)
    else:
        # determine what is the leading edge and the rotation angle beta
        x_pivot = rotation_pivot[0]
        y_pivot = rotation_pivot[1]

        beta_bounds = [0, -math.pi/4]

        result = differential_evolution(find_d_rotated, beta_bounds)
        return result.fun
    def test_L6(self):
        # Lampinen ([5]) test problem 6
        def f(x):
            x = np.hstack(([0], x))  # 1-indexed to match reference
            fun = (x[1]-10)**3 + (x[2] - 20)**3
            return fun

        def c1(x):
            x = np.hstack(([0], x))  # 1-indexed to match reference
            return [(x[1]-5)**2 + (x[2] - 5)**2 - 100,
                    -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81]

        N = NonlinearConstraint(c1, 0, np.inf)
        bounds = [(13, 100), (0, 100)]
        constraints = (N)
        res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
                                     constraints=constraints, tol=1e-7)
        x_opt = (14.095, 0.84296)
        f_opt = -6961.814744

        assert_allclose(f(x_opt), f_opt, atol=1e-6)
        assert_allclose(res.fun, f_opt, atol=0.001)
        assert_allclose(res.x, x_opt, atol=1e-4)
        assert res.success
        assert_(np.all(np.array(c1(res.x)) >= 0))
        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
    def test_L5(self):
        # Lampinen ([5]) test problem 5

        def f(x):
            x = np.hstack(([0], x))  # 1-indexed to match reference
            fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) /
                   (x[1]**3*(x[1]+x[2])))
            return -fun  # maximize

        def c1(x):
            x = np.hstack(([0], x))  # 1-indexed to match reference
            return [x[1]**2 - x[2] + 1,
                    1 - x[1] + (x[2]-4)**2]

        N = NonlinearConstraint(c1, -np.inf, 0)
        bounds = [(0, 10)]*2
        constraints = (N)

        res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
                                     constraints=constraints)

        x_opt = (1.22797135, 4.24537337)
        f_opt = -0.095825
        print(res)
        assert_allclose(f(x_opt), f_opt, atol=2e-5)
        assert_allclose(res.fun, f_opt, atol=1e-4)
        assert res.success
        assert_(np.all(np.array(c1(res.x)) <= 0))
        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
    def test_L9(self):
        # Lampinen ([5]) test problem 9

        def f(x):
            x = np.hstack(([0], x))  # 1-indexed to match reference
            return x[1]**2 + (x[2]-1)**2

        def c1(x):
            x = np.hstack(([0], x))  # 1-indexed to match reference
            return [x[2] - x[1]**2]

        N = NonlinearConstraint(c1, [-.001], [0.001])

        bounds = [(-1, 1)]*2
        constraints = (N)
        res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
                                     constraints=constraints)

        x_opt = [np.sqrt(2)/2, 0.5]
        f_opt = 0.75

        assert_allclose(f(x_opt), f_opt)
        assert_allclose(np.abs(res.x), x_opt, atol=1e-3)
        assert_allclose(res.fun, f_opt, atol=1e-3)
        assert res.success
        assert_(np.all(np.array(c1(res.x)) >= -0.001))
        assert_(np.all(np.array(c1(res.x)) <= 0.001))
        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
示例#14
0
    def fit(self):
        # Remove baseline

        i_x = linspace(self.l, self.u, 100)
        peak_counts = self.peak_counts - self._eval_baseline(self.peak_ind)

        i_y = interp1d(
            self.peak_ind,
            peak_counts,
            kind="cubic",
            bounds_error=False,
            fill_value=0.0
        )(i_x)

        def fobj(C):
            return ((self._gaussian(i_x, *C) - i_y) ** 2).sum()

        cal = sop.differential_evolution(
            fobj,
            (
                (0.5 * peak_counts.max(), 1.5 * peak_counts.max()),
                (self.l, self.u),
                (1, self.u - self.l)
            ),
            tol=.0001,
            popsize=30,
        )

        if not cal.success:
            print("Fit failed")
        else:
            self.fit_params = cal.x
示例#15
0
def get_optimal_splines(events, optimise_bin_edges, k=1):

    cut_events = {}
    cut_energies, ga_cuts, xi_cuts = [], [], []
    for elow, ehigh, emid in zip(optimise_bin_edges[:-1],
                                 optimise_bin_edges[1:],
                                 np.sqrt(optimise_bin_edges[:-1] *
                                         optimise_bin_edges[1:])):

        for key in events:
            cut_events[key] = events[key][
                (events[key]["MC_Energy"] > elow) &
                (events[key]["MC_Energy"] < ehigh)]

        res = optimize.differential_evolution(
            cut_and_sensitivity,
            bounds=[(.5, 1), (0, 0.5)],
            maxiter=1000, popsize=10,
            args=(cut_events,
                  np.array([elow / energy_unit,
                            ehigh / energy_unit]) * energy_unit,
                  alpha)
        )

        if res.success:
            cut_energies.append(emid.value)
            ga_cuts.append(res.x[0])
            xi_cuts.append(res.x[1])

    spline_ga = interpolate.splrep(cut_energies, ga_cuts, k=k)
    spline_xi = interpolate.splrep(cut_energies, xi_cuts, k=k)

    return (spline_ga, ga_cuts), (spline_xi, xi_cuts)
 def test_gh_4511_regression(self):
     # This modification of the differential evolution docstring example
     # uses a custom popsize that had triggered an off-by-one error.
     # Because we do not care about solving the optimization problem in
     # this test, we use maxiter=1 to reduce the testing time.
     bounds = [(-5, 5), (-5, 5)]
     result = differential_evolution(rosen, bounds, popsize=1815, maxiter=1)
示例#17
0
    def _optimization_diffevo(self, x0, minimizer_kwargs, niter):  # pragma: no cover
        """

        Parameters
        ----------
        x0 : np.ndarray
            An optimization vector.
        minimizer_kwargs : dict
            A dictionary of keyword arguments to pass to the optimizer.
        niter : int
            If applicable, the number of iterations to make.

        Returns
        -------
        result : OptimizeResult, None
            The result of the optimization. Returns None if the optimization failed.
        """
        if 'constraints' in minimizer_kwargs:
            msg = "Differential Evolution can only be used in unconstrained optimization."
            raise OptimizationException(msg)

        if niter is None:
            niter = self._default_hops

        result = differential_evolution(func=self.objective,
                                        bounds=minimizer_kwargs['bounds'],
                                        maxiter=minimizer_kwargs['options']['maxiter'],
                                        popsize=niter,
                                        tol=minimizer_kwargs['options']['ftol'],
                                        )

        if result.success:
            return result
示例#18
0
def run_test(test, args=(), g_args=()):
    if test is not test10_1:
        res = tgo(test.f, test.bounds, args=args, g_cons=test.g,
                  g_args=g_args)

    # Exceptional cases
    if test == test5_1:
        # Remove the extra minimizer found in this test
        # (note all minima is at the global 0.0 value)
        res.xl = [res.xl[0], res.xl[1],
                  res.xl[3], res.xl[2]]
        res.funl = res.funl[:4]

    if test == test10_1:
        res = tgo(test.f, test.bounds, args=args, g_cons=test.g,
                  g_args=g_args, n=1000)

    print("=" * 100)
    print("=" * 100)
    print("Topographical Global Optimization: ")
    print("-" * 34)
    print(res)

    from scipy.optimize import differential_evolution, basinhopping
    res2 = differential_evolution(test.f, test.bounds, args=args)
    print("=" * 100)
    print("Differential Evolution: ")
    print("-" * 23)
    print(res2)

    print("=" * 100)
    print("Basinhopping : (x_0 = numpy.mean(bounds,axis=1)) ")
    x_0 = numpy.mean(test.bounds, axis=1)
    minimizer_kwargs = {'args': args}
    res3 = basinhopping(test.f, x_0, minimizer_kwargs=minimizer_kwargs)
    print("-" * 49)
    print(res3)
    # Global minima
    if test.expected_x is not None:
        numpy.testing.assert_allclose(res.x, test.expected_x,
                                      rtol=test_atol,
                                      atol=test_atol)

    # (Optional tests)
    if test.expected_fun is not None:
        numpy.testing.assert_allclose(res.fun,
                                      test.expected_fun,
                                      atol=test_atol)

    if test.expected_xl is not None:

        numpy.testing.assert_allclose(res.xl,
                                      test.expected_xl,
                                      atol=test_atol)

    if test.expected_funl is not None:
        numpy.testing.assert_allclose(res.funl,
                                      test.expected_funl,
                                      atol=test_atol)
示例#19
0
    def fit_model(self, dataX, datarp, datacp, differential_evolution=True, TNC=True, SLSQP=True, verbose=True):
        '''Fit the function using one or multiple optimization methods in serial''' 
    
        def diffsq(params):
            self.setparams(params)
            
            (rp,cp) = self(dataX)
            
            diffrp = (datarp - rp)/datarp
            diffcp = (datacp - cp)/datacp 
            
            #Ldatacp = datacp/(datarp**2 + datacp**2)
            #Lfitcp =  cp/(rp**2 + cp**2)
            #diffLcp = (Ldatacp - Lfitcp)/Ldatacp
            
            return dot(diffcp, diffcp) + dot(diffrp, diffrp) #+ dot(diffLcp,diffLcp)

        def costfun(params):
            """Wrapper function neede for the optimization method

            Args: 
                params: a list of parameters for the model
            Returns: 
                The cost (real scalar)
            """
            Error = diffsq(params) 
            
            fsumpenalty = datarp[0] - self.fsum()
            
            return Error + fsumpenalty**2 

        start_t = time.time()

        params = self.getparams()
        bounds = self.getbounds()

        if (differential_evolution == True):
            resultobject = optimize.differential_evolution(costfun,bounds,maxiter=2000)  
            if (verbose == True): print("diff. evolv. number of iterations = ", resultobject.nit)

        if (TNC == True):
            resultobject = optimize.minimize(costfun, x0=params, bounds=bounds, method='TNC')
            if (verbose == True): print("TNC number of iterations = ", resultobject.nit)
        
        if (SLSQP == True):
            resultobject = optimize.minimize(costfun, x0=params, bounds=bounds, method='SLSQP')
            if (verbose == True): print("SLSQP number of iterations = ", resultobject.nit)

        #mybounds = MyBounds(bounds=array(bounds))
        #ret = basinhopping(diffsq, params, niter=10,accept_test=mybounds)

        params = self.getparams() #get updated params
        
        end_t = time.time()
        m, s = divmod(end_t - start_t, 60)
        h, m = divmod(m, 60)
        print("Fit completed in %02d hr %02d min %02d sec" % (h, m, s))

        self.RMS_error = sqrt(diffsq(params)/(2*len(dataX))) #Store RMS error
示例#20
0
def optimize():
	import scipy.optimize as opt
	bounds = [slice(0.1, 20, 1), slice(0, 20, 1)]
	# rs = opt.brute(lambda v: solve(v)[0], bounds, full_output=True)
	bounds = [(0.1, 20), (0, 20)]
	rs = opt.differential_evolution(lambda v: solve(v)[0], bounds)

	print(rs)
示例#21
0
def _min_max_band(args):
    """
    Min and max values at `idx`.

    Global optimization to find the extrema per component.

    Parameters
    ----------
    args: list
        It is a list of an idx and other arguments as a tuple:
            idx : int
                Index value of the components to compute
        The tuple contains:
            band : list of float
                PDF values `[min_pdf, max_pdf]` to be within.
            pca : statsmodels Principal Component Analysis instance
                The PCA object to use.
            bounds : sequence
                ``(min, max)`` pair for each components
            ks_gaussian : KDEMultivariate instance

    Returns
    -------
    band : tuple of float
        ``(max, min)`` curve values at `idx`

    """
    idx, (band, pca, bounds, ks_gaussian, use_brute, seed) = args
    if have_de_optim and not use_brute:
        max_ = differential_evolution(_curve_constrained, bounds=bounds,
                                      args=(idx, -1, band, pca, ks_gaussian),
                                      maxiter=7, seed=seed).x
        min_ = differential_evolution(_curve_constrained, bounds=bounds,
                                      args=(idx, 1, band, pca, ks_gaussian),
                                      maxiter=7, seed=seed).x
    else:
        max_ = brute(_curve_constrained, ranges=bounds, finish=fmin,
                     args=(idx, -1, band, pca, ks_gaussian))

        min_ = brute(_curve_constrained, ranges=bounds, finish=fmin,
                     args=(idx, 1, band, pca, ks_gaussian))

    band = (_inverse_transform(pca, max_)[0][idx],
            _inverse_transform(pca, min_)[0][idx])
    return band
示例#22
0
文件: gap.py 项目: joselado/pygra
  def opte(f):
    """Optimize the eigenvalues"""
    from scipy.optimize import differential_evolution
    from scipy.optimize import minimize
    bounds = [(0.,1.) for i in range(h.dimensionality)]
    x0 = np.random.random(h.dimensionality) # inital vector
    res = differential_evolution(f,bounds=bounds)
#    res = minimize(f,res.x,method="Powell")
    return f(res.x)
    def bench_run(self, **minimizer_kwargs):
        """
        do an optimization test starting at x0 for all the optimizers
        """
        kwargs = self.minimizer_kwargs

        if hasattr(self.fun, "temperature"):
            kwargs["T"] = self.function.temperature
        if hasattr(self.fun, "stepsize"):
            kwargs["stepsize"] = self.function.stepsize
        minimizer_kwargs = {"method": "L-BFGS-B"}
        x0 = self.get_random_configuration()

        # basinhopping - with gradient
        if hasattr(self.function, 'der'):
            minimizer_kwargs['jac'] = True
            t0 = time.time()
            res = basinhopping(
                self.energy_gradient, x0, accept_test=self.accept_test,
                callback=self.stop_criterion, niter=1000,
                minimizer_kwargs=minimizer_kwargs,
                **kwargs)
            t1 = time.time()
            res.success = True
            if not self.found_target(res):
                res.success = False
            self.add_result(res, t1 - t0, 'basinhopping')

        # basinhopping - no gradient
        x0 = self.get_random_configuration()
        minimizer_kwargs['jac'] = False
        t0 = time.time()

        res = basinhopping(
            self.fun, x0, accept_test=self.accept_test,
            callback=self.stop_criterion, niter=1000,
            minimizer_kwargs=minimizer_kwargs,
            **kwargs)

        t1 = time.time()
        res.success = True
        if not self.found_target(res):
            res.success = False
        self.add_result(res, t1 - t0, 'basinhopping - no gradient')

        # differential_evolution
        t0 = time.time()

        res = differential_evolution(self.fun,
                                     self.bounds,
                                     popsize=20,
                                     polish=True)

        t1 = time.time()
        if not self.found_target(res):
            res.success = False
        self.add_result(res, t1 - t0, 'differential_evolution')
def minimize_waveform_only_nosmooth(r, phi, z, scale, t0,  wf,):
  #result = op.minimize(neg_lnlike_wf_nosmooth, [r, phi, z, scale,t0], args=(wf) ,method="Nelder-Mead")
  #result = op.basinhopping(neg_lnlike_wf_nosmooth, [r, phi, z, scale,t0], niter=10, minimizer_kwargs={"args":wf, "method": "Nelder-Mead"})

  bounds = [ (0, detector.detector_radius), (0, np.pi/4), (0, detector.detector_length), (scale/1.2, scale*1.2), (0, 15)   ]
  result = op.differential_evolution(neg_lnlike_wf_nosmooth, bounds, args=([wf]), polish=False, maxiter=100)
  
  
  return result
示例#25
0
def conditional_firing_probability(train1, train2, min_points=0,
                                   method=None):
    """ Calculate the conditional firing probability between two lists of
        spike times

        train1: numpy array of spike times in msec
        train2: numpy array of spike times in msec
        threshold: minimum amplitude of fit
        delay: minimum peak time of fit in msec

        For LeFeber the fit was performed with the following limits
            0 <= max < 1
            0 <= delay <= 500
            1 <= width <= 100
            0 <= offset <= 0.5

        The followng was required to declare a connection
            max/offset >= 2
            5<= delay <= 250
            width > 5

    """
    class Empty_Fit():
        def __init__(self):
            self.x = np.zeros(4)
            self.x[-1] = 1

    psth = np.zeros(500)
    max2 = train2.shape[0]
    indices = np.searchsorted(train2, train1, side='right')
    for i, index in enumerate(indices):
        j = index
        while (j < max2) and (train2[j] - train1[i] < 500):
            psth[int(train2[j] - train1[i])] += 1
            j += 1

    if np.sum(psth) < min_points:
        return Empty_Fit(), psth

    psth /= train1.shape[0]
    xdata = np.arange(500)
    offset = np.average(psth)
    maxi = np.max(psth) - offset
    delay = np.argmax(psth)
    width = delay/2 if delay/2 > 5 else 5
    p0 = (maxi, delay, width, offset)
    bounds = ((0, 1), (0, 500), (1, 100), (0, 0.5))
    if method == 'DE':
        fit = opt.differential_evolution(_cfp_cost, bounds=bounds, seed=1,
                                         args=(psth, xdata), maxiter=100,
                                         polish=False)
        p0 = fit.x

    fit = opt.minimize(_cfp_cost, p0, bounds=bounds, args=(psth, xdata),
                       method='L-BFGS-B')
    return fit, psth
示例#26
0
def maxEffGlobal(Ts, Tc, Ps, fs, X, n):
    myRange = [(0, 12)] * n
    myEg = optimize.differential_evolution(
        maxEff, myRange, args=(Ts, Tc, Ps, fs, X), maxiter=100000, popsize=15, tol=0.0001
    )
    if myEg["success"]:
        return myEg
    else:
        # this should return NaN probably
        return myEg
示例#27
0
def RandomSearch(P_Search_Alg, H_a, H_b, P_con, P_relay, per_s, per_c):
    CCF_beta_func=lambda x: CCF_sumrate_compute(vector(RR, [1,]+list(x[0:L-1])), H_a, H_b, P_con, P_relay, per_s, per_c)
    Pranges=((0.1,betaScale_max),)*(L-1)
    if P_Search_Alg=='differential_evolution':
        ResSearch=optimize.differential_evolution(CCF_beta_func,Pranges)
        beta_opt=ResSearch.x
        sum_rate_opt=-ResSearch.fun
    else:
        raise Exception("error: Not Such Search Algorithm!")
    return beta_opt, sum_rate_opt
示例#28
0
def opt_differential_evolution(parametersList):
    # Maximul of iterations of the algorithm
    max_iter=10
    # Set the bounds of each parameter
    bounds=list()
    for i in range(0,len(parametersList)):
        bounds.append((parametersList[i][1],parametersList[i][2]))
    # TODO : change the criterium of convergence 
    scipy_res=differential_evolution(deepov_func,bounds,args=parametersList,maxiter=max_iter,disp=True,polish=False)
    return scipy_res
示例#29
0
 def estimate_hyperpars(self):
     import scipy.optimize as opt
     lower=np.array([0,0,0])
     upper=np.array([0.001,10,10])
     ret=opt.differential_evolution(self.objective_hyperpars,
                                    bounds=list(zip(lower,
                                                    upper))
                                    ,disp=True, popsize=100,maxiter=200,
                                    polish=False)
     return ret
 def test_infinite_objective_function(self):
     # Test that there are no problems if the objective function
     # returns inf on some runs
     def sometimes_inf(x):
         if x[0] < .5:
             return np.inf
         return x[1]
     bounds = [(0, 1), (0, 1)]
     x_fit = differential_evolution(sometimes_inf,
                                    bounds=[(0, 1), (0, 1)],
                                    disp=False)
示例#31
0
def optimizer(func, x0, args, disp):
    res = optimize.differential_evolution(func, bounds, args, tol=1e-1)
    return res.x
x0 = [4.14826750e-04, 4.99870249e+01, 2.00000000e-01, 6.52069017e-02,8.55311554e-01]
temp = 333.0

with open(str(temp)+"_dat_fn1.txt","w+") as p:
    with open(str(temp)+"_results_fn1.txt","w+") as f:
        
        f.write("x0 = tau_c,dj,lamb,ks,kt\n")

        temp_dat = np.loadtxt('t_333.txt',delimiter=',')

        lifetime_exp_zero = 5.752641121675911
        lifetime_exp_res = 1.4174868758031038
        lifetime_exp_high = 6.347652948471806

        J = 28.95
        
        res =  differential_evolution(lambda x1,x2,x3,x4,x5,x6,x7: calc_yield_parallel(*x1,x2,x3,x4,x5,x6,x7),bounds=bnds,args=(temp,temp_dat,lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high,J), maxiter= 10)
        #res = minimize(lambda x1,x2,x3,x4,x5,x6,x7: calc_yield_parallel(*x1,x2,x3,x4,x5,x6,x7),x0,args=(temp,temp_dat,lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high,J),method='SLSQP',bounds=bnds)
        
        print(res)
        
        f.write("\n")
        f.write("x0 for T=333k\n")
        f.write(str(res)+"\n")
        for i in range(0,len(res.x)):
            p.write(str(res.x[i])+",")
        p.write(str(temp)+"\n")
        
        


def scale(x, scale=5):
    return cv2.resize(x,
                      None,
                      fx=scale,
                      fy=scale,
                      interpolation=cv2.INTER_AREA)


while True:
    bounds = [(0, shape[0] - 1), (0, shape[1]), (0, 255), (0, 255),
              (0, 255)] * d
    result = differential_evolution(optimize,
                                    bounds,
                                    maxiter=iters,
                                    popsize=popsize,
                                    tol=1e-5,
                                    callback=callback)

    adv_img = perturb(result.x)
    inp = Variable(torch.from_numpy(preprocess(adv_img)).float().unsqueeze(0))
    out = model(inp)
    prob = softmax(out.data.numpy()[0])
    print('Prob [%s]: %f --> Prob[%s]: %f' %
          (cifar10_class_names[pred_orig], prob_orig[pred_orig],
           cifar10_class_names[pred_adv], prob_adv))

    cv2.imshow('adversarial image', scale(adv_img[..., ::-1]))

    key = 0
    while True:
示例#34
0
def stsc(X, w, C, eigen_gap=0, verbose=0):
    ''' Self-tuning spectral clustering
    affinity_matrix: locally scaled affinity matrix
    C: largest possible group number
    eigen_gap: whether to use gap of eigenvalues to select candinates for STSC
    labels: cluster labels assigned to points
    Reference:
    "Zelnik-Manor, L. and Perona, P. Self-tuning spectral clustering. In NIPS, pp. 1601-1608. 2005."
    '''

    if eigen_gap:
        # Get candidates for c_best, based on eigenvalues of L
        c_range = set([
            idx + 1
            for idx in find_gap(w[:C + 1], multiple=True, verbose=verbose)[0]
        ])


#        if 1 in c_range:
#            c_range = c_range - set({1})
#            c_range.add(2)
    else:
        c_range = range(1, C + 1)

    min_align = np.inf
    for c in c_range:
        # For each possible group number c recover the rotation which best aligns X's columns
        # with the canonical coordinate system

        if verbose == 2:
            print 'c =', c

        if c == 1:
            # In the ideal case, the end points of eigenvectors should form a vertical line in the 1st dim,
            # i.e., var(X[:, 0]) = 0
            J_opt = np.var(X[:, 0])
            theta_opt = 0
        else:
            # Optimize alignment cost over theta
            K = c * (c - 1) / 2

            # Visualize cost function and its gradient
            #            if K == 1:
            #                thetas = np.arange(-np.pi/2, np.pi/2, np.pi/100)
            #                costs = np.zeros_like(thetas)
            #                grads = np.zeros_like(thetas)
            #                for i in range(len(thetas)):
            #                    costs[i] = align_cost(np.array([thetas[i]]), X[:,:2])
            #                    grads[i] = align_grad(np.array([thetas[i]]), X[:,:2])
            #                plt.figure()
            #                plt.subplot(211)
            #                plt.plot(thetas, costs)
            #                plt.title('cost')
            #                plt.subplot(212)
            #                plt.plot(thetas, grads)
            #                plt.title('gradient')
            #                plt.show()

            res = differential_evolution(align_cost,
                                         bounds=((-np.pi / 4, np.pi / 4), ) *
                                         K,
                                         args=(X[:, :c], ),
                                         popsize=100,
                                         tol=1e-8)
            J_opt = np.clip(res.fun, np.finfo(float).eps, np.inf)
            theta_opt = res.x

        if verbose == 2:
            print 'J_opt =', J_opt
            print 'theta_opt =', theta_opt

        if J_opt <= min_align:
            min_align = J_opt
            c_best = c
            theta_best = theta_opt

    return c_best, theta_best, min_align
示例#35
0
]
ref_trajectory = Trajectory(variables_ref)
ref_trajectory.solver()

tra_ref = ref_trajectory.getdisp()

np.savetxt('tra_ref.csv', tra_ref, fmt='%f', delimiter=',')

#Optimization = CostFunction([45.,45.,180.])
#print('TotalError: ',Optimization)
#-------------------------------------------------------------------------------------------------------------------
# Optimization
#-------------------------------------------------------------------------------------------------------------------
Opt_SimplifiedModel = differential_evolution(CostFunction,
                                             bounds,
                                             strategy=Strategy,
                                             maxiter=MaxGenerations,
                                             popsize=Np,
                                             tol=0.01,
                                             mutation=F,
                                             recombination=CR,
                                             polish=SwitchtoGradients,
                                             init='latinhypercube',
                                             callback=StopOptimization)

np.savetxt('ErrorTotal.csv', MinErrorList, fmt='%f', delimiter=',')
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
#Create a Summary file for the Optimization Process
end = time.time()
Duration = (end - start) / 60. / 60.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.view_init(45, -45)
ax.plot_surface(xgrid, ygrid, eggholder(xy), cmap='terrain')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('eggholder(x, y)')
plt.show()

results = dict()
results['shgo'] = optimize.shgo(eggholder, bounds)
results['shgo']
results['DA'] = optimize.dual_annealing(eggholder, bounds)
results['DA']
results['DE'] = optimize.differential_evolution(eggholder, bounds)
results['BH'] = optimize.basinhopping(eggholder, bounds)
results['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=200, iters=5,
                                      sampling_method='sobol')

fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',
               cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')

def plot_point(res, marker='o', color=None):
    ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)

plot_point(results['BH'], color='y')  # basinhopping           - yellow
def deconvolve(a, deltaF):
    #a: signal to deconvolve natural linewidht from
    #deltaF: frequency range over which a resides
    peakSig = a.max()  #to properly scale the final result
    x = np.arange(a.shape[0])
    xDense = np.linspace(x[0], x[-1], num=251)
    a = a / a.max()
    # plt.scatter(x,a)

    dataAnalyzer = DataAnalyzer()
    b = dataAnalyzer.multi_Voigt(
        (xDense - (xDense.min() + xDense.max()) / 2) * deltaF / xDense.max(),
        0, 1, 0, 1e-10)
    b = b / b.max()
    # b=b[75:-75]
    aFunc = spi.Rbf(x, a, smooth=0.0)  #interp1d(x,a)
    a = aFunc(xDense)
    # plt.plot(xDense,a)

    window = 2 * int((a.shape[0] / 20) // 2) + 1
    for i in range(25):
        a = sps.savgol_filter(a, window, 2)
    a = a / a.max()

    # plt.plot(xDense,a)
    # plt.plot(b)
    # plt.show()

    M = generate_MConv(a, b)
    M = M / M.max()
    # test=np.asarray([1,1,1,1,2,0,1,1,0,1])
    # plt.plot(generate_Curve(test,a.shape[0]))
    # plt.show()

    @numba.njit(numba.float64(numba.float64[:]))
    def cost_Inner(c):
        cost = 0

        cost += 10 * np.abs(np.sum(c[c < 0]))  #sum up the negative values
        # c=c-c.min()
        c = c / c.max()
        aNew = M @ c
        aNew = aNew / aNew.max()
        cost += np.sum((aNew - a)**2)
        cost0 = 1.0
        #find any peaks in the data, besides the one single peak
        j = 0
        for i in range(0, c.shape[0] - 3):  # xclude the end
            probe = c[i:i + 3]
            if np.argmax(
                    probe
            ) == 1:  #center should not be peak, except at the actual peak
                if j != 0:  #There should be at least one peak, so ingore the fir time
                    cost += cost0  #add a small penalty
                j = 1
        return cost

    def cost(args):
        #args: the deviation from the original array
        c = generate_Curve(args, a.shape[0])
        # cFunc=spi.interp1d(np.linspace(0,a.shape[0],args.shape[0]),args)
        # c=cFunc(np.arange(0,a.shape[0]))
        return cost_Inner(c)

    bounds = []

    # for i in range(a.shape[0]):
    #     deltaLower=a[i] #the signal can be totally compensated, but not go below zero
    #     deltayMaxFact=.25 #Factor for setting upper bound. Much more efficient than adding same amount everywhere
    #     deltaUpper=deltayMaxFact*a[i]+.1
    #     bounds.append((-deltaLower,deltaUpper))
    for i in range(25):
        bounds.append((-1.0, 1.0))
    bestSol = None
    for i in range(5):
        sol = spo.differential_evolution(cost,
                                         bounds,
                                         maxiter=5000,
                                         disp=False,
                                         mutation=(.5, 1.5),
                                         recombination=.25,
                                         popsize=1,
                                         tol=0,
                                         workers=1,
                                         polish=True)
        if sol.fun < 1.0:
            bestSol = sol
            break
        else:
            if bestSol is None:
                bestSol = sol
            elif sol.fun < bestSol.fun:
                bestSol = sol
    # cFunc = spi.interp1d(np.linspace(0, a.shape[0], bestSol.x.shape[0]), bestSol.x)
    # c = cFunc(np.arange(0, a.shape[0]))
    c = generate_Curve(bestSol.x, a.shape[0])
    c = c * peakSig / c.max()
    # plt.plot(c/c.max())
    # window = 2 * int((a.shape[0] / 10) // 2) + 1
    # c=sps.savgol_filter(c,window,2)
    # j = 0
    # for i in range(0, c.shape[0] - 3):  # xclude the end
    #     probe = c[i:i + 3]
    #     if np.argmax(probe) == 1:  # center should not be peak, except at the actual peak
    #         if j != 0:  # There should be at least one peak, so ingore the fir time
    #             print('fail')
    #         j = 1
    # plt.plot(c)
    # c=sps.savgol_filter(c,window,2)
    # c=sps.savgol_filter(c,window,2)

    # plt.plot(c/c.max())

    # aNew=M@c
    # aNew=aNew/aNew.max()
    # plt.plot(a)
    # plt.plot(aNew)
    # aNew=aNew/aNew.max()
    # print(np.sum((a-aNew)**2))

    # plt.show()

    cFunc = spi.Rbf(xDense, c)
    cDownSample = cFunc(x)
    return cDownSample
示例#38
0
 def execute(self, **de_options):
     ans = differential_evolution(self.list2kwargs(self.objective),
                                  self.bounds, **de_options)
     return self._pack_output(ans)
示例#39
0
    f1_range = [-20, -14]
    f2_range = [-14, 0.5]
    x1 = np.linspace(min(f1_range), max(f1_range), 1000)
    x2 = np.linspace(min(f2_range), max(f2_range), 1000)
    y = np.linspace(0, 0, 1000)

    problem = MOProblem(variables=varsl,
                        objectives=[f1, f2],
                        ideal=np.array([-20, -12]),
                        nadir=np.array([-14, 0.5]))

    from desdeo_mcdm.interactive.NIMBUS import NIMBUS
    from scipy.optimize import minimize, differential_evolution

    scalar_method = ScalarMethod(
        lambda x, _, **y: differential_evolution(x, **y),
        use_scipy=True,
        method_args={
            "polish": True,
            "disp": True
        })

    method = NIMBUS(problem, scalar_method)

    classification_request, plot_request = method.start()

    # print(classification_request.content.keys())
    # print(classification_request.content["message"])

    print(classification_request.content["objective_values"])
    with open('dataset/podaci_INTERVENTION.pkl', 'rb') as f2:
        list_inter = pickle.load(f2)

    country_atribute = pd.read_csv("dataset/country_atribute.csv", index_col=0)
    country_atribute = []

    bnd = ((-1, 0), ) * list_inter[0].iloc[:, 2:].shape[1] + ((0, 2), ) * 3

    if fit == 1:
        if opt == 'meta':
            res = optimize.differential_evolution(Obj_function,
                                                  bounds=bnd,
                                                  maxiter=iter_max,
                                                  popsize=pop_size,
                                                  args=(
                                                      country_atribute,
                                                      list_SIRM,
                                                      list_inter,
                                                  ),
                                                  disp=True,
                                                  tol=0.00001)
            np.save(
                "modeli/vektor_{}_{}_{}_{}".format(iter_max, pop_size, opt,
                                                   name), res.x)
        elif opt == 'neld':
            x0 = np.zeros(len(bnd))
            res = optimize.minimize(Obj_function,
                                    x0,
                                    args=(
                                        country_atribute,
                                        list_SIRM,
示例#41
0
    return np.round(res, decimals=0)
    #return np.array(x, dtype = 'int32')


# Поскольку градиента для негладкой функции не существует
# метод BFGS в данном случае непригоден для поиска экстремума
x_0 = 22.0
optimize_result = minimize(f, x0=x_0, method='BFGS')
if optimize_result['success']:
    draw_optimization_result(x_0, x, f(x), "BFGS optimization method",
                             optimize_result)
    print("optimize_result = {0}".format(optimize_result))

# записываем в файл значение функции для первого приближения
with open('C:\\Lessons\\Course1Week3\\FuncMinimizing\\NonSmooth_Results.txt',
          'w') as file_obj:
    file_obj.writelines(str(np.round(optimize_result['fun'][0], 2)) + ' ')

bounds = [(1, 30)]

optimize_result = differential_evolution(f, bounds)
if optimize_result['success']:
    draw_optimization_result(bounds, x, f(x), "Differential evolution",
                             optimize_result)
    print("optimize_result = {0}".format(optimize_result))

# записываем в файл значение функции для второго приближения
with open('C:\\Lessons\\Course1Week3\\FuncMinimizing\\NonSmooth_Results.txt',
          'a') as file_obj:
    file_obj.writelines(str(np.round(optimize_result['fun'], 2)) + ' ')
示例#42
0
                                      lc_event_fun=hm.ll_lc_event)
        elif use_model == 'ExpIDM':
            pguess = [40, 1, 1, 3, 10, 15]
            mybounds = [(20, 120), (.1, 5), (.1, 35), (.1, 20), (.1, 20),
                        (.1, 75)]
            cal = hc.make_calibration(curplatoon, meas, platooninfo, .1,
                                      hm.RelaxExpIDM)

        start = time.time()
        cal.simulate(pguess)
        print('time to compute loss is ' + str(time.time() - start))

        start = time.time()
        if use_method == 'BFGS':
            bfgs = sc.fmin_l_bfgs_b(cal.simulate,
                                    pguess,
                                    bounds=mybounds,
                                    approx_grad=1)  # BFGS
            print('time to calibrate is ' + str(time.time() - start) +
                  ' to find mse ' + str(bfgs[1]))
        elif use_method == 'GA':
            bfgs = sc.differential_evolution(cal.simulate,
                                             bounds=mybounds,
                                             workers=2)  # GA
            print('time to calibrate is ' + str(time.time() - start) +
                  ' to find mse ' + str(bfgs['fun']))

    plt.plot(cal.all_vehicles[0].speedmem)
    plt.ylabel('speed')
    plt.xlabel('time index')
示例#43
0
 def __tune__(self):
     bounds = [self.probabilityBound]
     result = optimize.differential_evolution(self.__ESNTrain__,
                                              bounds=bounds)
     print("\nThe optimal parameters for Erdos ESN:" + str(result.x))
     return result.x[0]
]
y0_seirpdq_known = S0, P0, R0, D0
# bounds_seirdaq = [(0, 1e-2), (0, 1), (0, 1), (0, 0.2), (0, 0.2), (0, 0.2)]

result_seirpdq = optimize.differential_evolution(
    # seirpdq_least_squares_error_ode,
    seirpdq_least_squares_error_ode_y0,
    bounds=bounds_seirpdq,
    # args=(data_time, [infected_individuals, dead_individuals], seirpdq_ode_solver, y0_seirpdq),
    args=(
        data_time,
        [infected_individuals, dead_individuals],
        seirpdq_ode_solver,
        y0_seirpdq_known,
        target_population,
    ),
    popsize=30,
    strategy="best1bin",
    tol=1e-5,
    recombination=0.95,
    mutation=0.6,
    maxiter=10000,
    polish=True,
    disp=True,
    seed=seed,
    callback=callback_de,
    workers=-1,
)

print(result_seirpdq)

# %%
示例#45
0
def Op_ABC(C_all_points):
    #len(C_all_points) = 10 (2 * Num_of_Points)
    #    for i in range(len(C_all_points)/2):
    #        i = i * 2
    #        print C_all_points[i],',',C_all_points[i+1]
    #    print ' '

    #Point in Polygon?
    for i in range(len(C_all_points) / 2):
        i = i * 2
        if g.point_in_polygon(vg.Point(C_all_points[i],
                                       C_all_points[i + 1])) == -1:
            pass
        else:
            return float('inf')
    #change[a,b,c,d] to [[a,b],[c,d]]
    CABC_all_points = []
    for i in range(len(C_all_points) / 2):
        i = i * 2
        Temp_CABC = [C_all_points[i], C_all_points[i + 1]]
        CABC_all_points.append(Temp_CABC)

    ap = [Point(p) for p in CABC_all_points]
    co = [LRF_(p.x, p.y, Radius) for p in ap]

    #A: The intersect part of path coverage (Enclude polygons)
    Sa = getIntersection(co).area
    #B:  The rest of the map's area except path coverage
    Sb = getUnscanned(co).area

    #C: The shortest distance
    def Dis_PosC(x):
        Dist_ = 0
        Store_Comb = []
        list_Num = 5
        Permutation_list = []
        list_ = [0, 1, 2, 3, 4]
        x_list = []

        for i in range(len(C_all_points) / 2):
            x[i] = math.floor(x[i])
            x_list.append(int(round(x[i])))

        for i in range(list_Num):
            Permutation_list.append(list_[x_list[i]])
            del list_[x_list[i]]


#        for i in range(len(C_all_points)/2):
#            print Permutation_list[i]
#        print ' '
        for i in range(len(C_all_points) / 2):
            Store_Comb.append(CABC_all_points[Permutation_list[i]])

        for i in range(len(C_all_points) / 2 - 1):
            Dis_0 = vg.Point(Store_Comb[i][0], Store_Comb[i][1])
            Dis_1 = vg.Point(Store_Comb[i + 1][0], Store_Comb[i + 1][1])
            Dist = g.shortest_path(Dis_0, Dis_1)
            _Dist_ = LineString(sp2list(Dist)).length
            Dist_ = Dist_ + _Dist_
        return Dist_

    New_Nearest_Dis = []
    bounds_C = [(0.1, 4.9), (0.1, 3.9), (0.1, 2.9), (0.1, 1.9), (0.1, 0.9)]
    Nearest_Dis = differential_evolution(Dis_PosC,
                                         bounds_C,
                                         maxiter=5,
                                         polish=False)

    for i in range(5):
        New_Nearest_Dis.append(Nearest_Dis.x[i])

    #Find the best sequence
    x_list = []
    BS_x = []
    Store_Comb = []
    list_Num = 5
    Permutation_list = []
    list_ = [0, 1, 2, 3, 4]

    for i in range(len(C_all_points) / 2):
        BS_x.append(math.floor(New_Nearest_Dis[i]))
        x_list.append(int(round(BS_x[i])))

    for i in range(list_Num):
        Permutation_list.append(list_[x_list[i]])
        del list_[x_list[i]]

    for i in range(len(C_all_points) / 2):
        Store_Comb.append(CABC_all_points[Permutation_list[i]])
    for i in range(len(C_all_points) / 2):
        Adjust_x.append(Store_Comb[i])

    #A + B + C
    Cal_Op_ABC = Sa + Sb + Nearest_Dis.fun
    return Cal_Op_ABC
示例#46
0
文件: module.py 项目: belac626/AeroPy
def fitting_shape_coefficients(filename,
                               bounds='Default',
                               n=5,
                               return_data=False,
                               return_error=False,
                               optimize_deltaz=False):
    """Fit shape parameters to given data points
        Inputs:
        - filename: name of the file where the original data is
        - bounds: bounds for the shape parameters. If not defined,
                    Default values are used.
        - n: order of the Bernstein polynomial. If bounds is default
                this input will define the order of the polynomial.
                Otherwise the length of bounds (minus one) is taken into 
                consideration"""

    from hausdorff_distance import hausdorff_distance_2D

    def shape_difference(inputs, optimize_deltaz=False):

        if optimize_deltaz == True or optimize_deltaz == [True]:
            y_u = CST(upper['x'],
                      1,
                      deltasz=inputs[-1] / 2.,
                      Au=list(inputs[:n + 1]))
            y_l = CST(lower['x'],
                      1,
                      deltasz=inputs[-1] / 2.,
                      Al=list(inputs[n + 1:-1]))
        else:
            y_u = CST(upper['x'],
                      1,
                      deltasz=deltaz / 2.,
                      Au=list(inputs[:n + 1]))
            y_l = CST(lower['x'],
                      1,
                      deltasz=deltaz / 2.,
                      Al=list(inputs[n + 1:]))
        # Vector to be compared with
        a_u = {'x': upper['x'], 'y': y_u}
        a_l = {'x': lower['x'], 'y': y_l}

        b_u = upper
        b_l = lower
        return hausdorff_distance_2D(a_u, b_u) + hausdorff_distance_2D(
            a_l, b_l)

    # def shape_difference_upper(inputs, optimize_deltaz = False):
    # if optimize_deltaz == True:
    # y = CST(x, 1, deltasz = inputs[-1]/2., Au = list(inputs[:-1]))
    # else:
    # y = CST(x, 1, deltasz = inputs[-1]/2., Au = list(inputs))
    # # Vector to be compared with
    # b = {'x': x, 'y': y}
    # return hausdorff_distance_2D(a, b)

    # def shape_difference_lower(inputs, optimize_deltaz = False):
    # if optimize_deltaz == True:
    # y = CST(x, 1, deltasz = inputs[-1]/2.,  Al = list(inputs[:-1]))
    # else:
    # y = CST(x, 1, deltasz = deltaz/2.,  Al = list(inputs))
    # # Vector to be compared with
    # b = {'x': x, 'y': y}
    # return hausdorff_distance_2D(a, b)

    def separate_upper_lower(data):
        for i in range(len(data['x'])):
            if data['y'][i] < 0:
                break
        upper = {'x': data['x'][0:i], 'y': data['y'][0:i]}
        lower = {'x': data['x'][i:], 'y': data['y'][i:]}
        return upper, lower

    # Order of Bernstein polynomial
    if bounds != 'Default':
        n = len(bounds) - 1

    # Obtaining data
    data = output_reader(filename, separator=', ', header=['x', 'y'])

    # Rotating airfoil
    x_TE = (data['x'][0] + data['x'][-1]) / 2.
    y_TE = (data['y'][0] + data['y'][-1]) / 2.

    theta_TE = math.atan(-y_TE / x_TE)

    # position trailing edge at the x-axis
    processed_data = {'x': [], 'y': []}
    for i in range(len(data['x'])):
        x = data['x'][i]
        y = data['y'][i]
        c_theta = math.cos(theta_TE)
        s_theta = math.sin(theta_TE)
        x_rotated = c_theta * x - s_theta * y
        y_rotated = s_theta * x + c_theta * y
        processed_data['x'].append(x_rotated)
        processed_data['y'].append(y_rotated)
    data = processed_data

    # determine what is the leading edge and the rotation angle beta
    processed_data = {'x': [], 'y': []}
    min_x_list = []
    min_y_list = []

    min_x = min(data['x'])
    min_index = data['x'].index(min_x)
    min_y = data['y'][min_index]

    chord = max(data['x']) - min(data['x'])
    beta = math.atan((y_TE - min_y) / (x_TE - min_x))

    for i in range(len(data['x'])):
        processed_data['x'].append((data['x'][i] - min_x) / chord)
        processed_data['y'].append(data['y'][i] / chord)
    data = processed_data

    #==============================================================================
    # Optimizing shape
    #==============================================================================
    # Determining default bounds
    if bounds == 'Default':
        upper_bounds = [[0, 1.]] * (n + 1)
        lower_bounds = [[0, 1]] + [[-1., 1.]] * n

    if optimize_deltaz:
        bounds = upper_bounds + lower_bounds + [[0, 0.1]]
    else:
        bounds = upper_bounds + lower_bounds
        deltaz = (data['y'][0] - data['y'][-1])
    print(bounds)
    upper, lower = separate_upper_lower(data)
    # a = data
    # x = data['x']
    result = differential_evolution(shape_difference,
                                    bounds,
                                    disp=True,
                                    popsize=10,
                                    args=[optimize_deltaz])
    print('order %i upper done' % n)
    # x = lower['x']
    # a = lower
    # result_lower = differential_evolution(shape_difference_lower, lower_bounds,
    # disp=True, popsize = 10,
    # args = (optimize_deltaz))
    # print 'order %i lower done' % n
    if optimize_deltaz:
        Au = list(result.x[:n + 1])
        Al = list(result.x[n + 1:-1])
        deltaz = result.x[-1]
    else:
        Au = list(result.x[:n + 1])
        Al = list(result.x[n + 1:])

    # Return Al, Au, and others
    if return_data:
        return data, deltaz, Al, Au
    elif return_error:
        return result.fun, deltaz, Al, Au
    else:
        return deltaz, Al, Au
示例#47
0
def main(*args):
    # EXCEL
    if len(args) == 0:
        wb = xw.Book.caller()
    # ECLIPSE
    elif len(args) == 1:
        filename = args[0][0]
        wb = xw.Book(Root.resources() + filename)

    df = erw.data_reader(wb)
    #############
    n = 1096
    days = [i for i in range(len(df))]
    price = numpy.array(df.Price)
    holiday = numpy.array(df.Holiday)
    lnPrice = numpy.log(price[:n])

    dim = len(df)

    #################
    def func(t, alpha, beta, gamma, tau):
        return alpha + beta * holiday[int(t)] + gamma * numpy.cos(
            (tau + t) * (2 * math.pi) / 365)

    def funcSqSum(params):
        alpha, beta, gamma, tau = params
        return sum([(lnPrice[t] - func(t, alpha, beta, gamma, tau))**2
                    for t in range(0, n)])

    x0 = [1.0, 1.0, 2.0, 3.0]

    eh = Eh()
    eh.lnPricetransp = lnPrice.reshape((len(lnPrice), 1))
    eh.simulated = randopt.randopt(funcSqSum)[0]
    eh.value = randopt.randopt(funcSqSum)[1]
    ###############GLOBAL OPTIM

    f1 = open('basopt.txt', 'r+')
    if list(f1) == []:
        basopt = basinhopping(funcSqSum, x0)
        basopteha = basopt.x
        alpha = basopteha[0]
        beta = basopteha[1]
        gamma = basopteha[2]
        tau = basopteha[3]
        x = [alpha, beta, gamma, tau]
        json.dump(x, f1)
    else:
        f1 = open('basopt.txt', 'r')
        basopteha = json.load(f1)
        alpha = basopteha[0]
        beta = basopteha[1]
        gamma = basopteha[2]
        tau = basopteha[3]

    f2 = open('deopt.txt', 'r+')
    if list(f2) == []:
        bounds = [(-1000, 1000), (-1000, 1000), (-1000, 1000), (0, 365)]
        deopt = differential_evolution(funcSqSum, bounds)
        deopteha = deopt.x
        alpha2 = deopteha[0]
        beta2 = deopteha[1]
        gamma2 = deopteha[2]
        tau2 = deopteha[3]
        y = [alpha2, beta2, gamma2, tau2]
        json.dump(y, f2)
    else:
        f2 = open('deopt.txt', 'r')
        deopteha = json.load(f2)
        alpha2 = deopteha[0]
        beta2 = deopteha[1]
        gamma2 = deopteha[2]
        tau2 = deopteha[3]


#######################Season, S_t
    season = numpy.zeros(dim)
    for t in days:
        season[t] = func(t, alpha, beta, gamma, tau)
    S = lnPrice[:n] - season[:n]

    #######################
    eh.deopteha = deopteha
    eh.basopteha = basopteha
    eh.deoptfv = funcSqSum(deopteha)
    eh.basoptfv = funcSqSum(basopteha)
    eh.seasonexcel = season.reshape(len(season), 1)
    eh.Swrite = S.reshape(len(S), 1)

    kalk = calc.parameters(S, n)
    Delta = kalk[6]
    Kappa = kalk[8]
    Sigma = kalk[9]
    tol = 50
    #####################################KALIBRATION
    kal1 = kal.kalibr(S, Kappa, Sigma, Delta, tol, n, season)
    S_kal1 = kal1[0]
    Pt_kal1 = kal1[1]

    pt1 = pth.plothelper(n, tol, price, Pt_kal1)
    pthelpkal1 = pt1

    eh.S_kalresult = S_kal1.reshape(len(S_kal1), 1)
    eh.Pt_kalresult = Pt_kal1.reshape(len(Pt_kal1), 1)

    erw.result_writer(wb, eh)

    kal2 = kal.kalibr(S, Kappa, Sigma, Delta, tol, n, season)
    S_kal2 = kal2[0]
    Pt_kal2 = kal2[1]
    pt2 = pth.plothelper(n, tol, price, Pt_kal2)
    pthelpkal2 = pt2

    kal3 = kal.kalibr(S, Kappa, Sigma, Delta, tol, n, season)
    S_kal3 = kal3[0]
    Pt_kal3 = kal3[1]
    pt3 = pth.plothelper(n, tol, price, Pt_kal3)
    pthelpkal3 = pt3

    kal4 = kal.kalibr(S, Kappa, Sigma, Delta, tol, n, season)
    S_kal4 = kal4[0]
    Pt_kal4 = kal4[1]
    pt4 = pth.plothelper(n, tol, price, Pt_kal4)
    pthelpkal4 = pt4

    kal5 = kal.kalibr(S, Kappa, Sigma, Delta, tol, n, season)
    S_kal5 = kal5[0]
    Pt_kal5 = kal5[1]
    pt5 = pth.plothelper(n, tol, price, Pt_kal5)
    pthelpkal5 = pt5

    ############## MEAN REVERTING TEST

    print ts.adfuller(S)  #ADFULLER TEST

    #####Hurts, mean reverting <=> H<0.5
    def hurst(ts):
        """Returns the Hurst Exponent of the time series vector ts"""
        lags = range(2, 100)
        tau = [sqrt(std(subtract(ts[lag:], ts[:-lag]))) for lag in lags]
        poly = polyfit(log(lags), log(tau), 1)
        return poly[0] * 2.0

    print "Hurst(S):  %s" % hurst(S)

    #############################Prediction, 7 day
    tol = n
    pu = scipy.stats.norm.ppf(0.975)  #up/felso
    p = 0.0  # standard norm=0
    pd = scipy.stats.norm.ppf(0.025)  #down/also

    Delta = 1.0
    S_pred = pred.pred(p, S, Kappa, Delta, Sigma, tol, season)[0]
    Pt_pred = pred.pred(p, S, Kappa, Delta, Sigma, tol, season)[1]

    S_pu = pred.pred(pu, S, Kappa, Delta, Sigma, tol, season)[0]
    Pt_pu = pred.pred(pu, S, Kappa, Delta, Sigma, tol, season)[1]

    S_pd = pred.pred(pd, S, Kappa, Delta, Sigma, tol, season)[0]
    Pt_pd = pred.pred(pd, S, Kappa, Delta, Sigma, tol, season)[1]
    plothelper2 = predplot.prdplt(n, tol, price, Pt_pu)
    plothelper3 = predplot.prdplt(n, tol, price, Pt_pd)
    plotgood = predplot.prdplt(n, tol, price, Pt_pred)

    ############PLOTS
    plt.figure(1)
    plt.plot(price, label='1')
    plt.plot(pthelpkal1, label='2')
    plt.title("Ar")
    plt.xlabel('Ido, napokban')
    plt.ylabel('Eur/MWh')
    # szotchasztikus folyamat
    plt.figure(2)
    plt.plot(S, label='1')
    plt.plot(S_kal1, label='2')
    plt.title("S folyamat")
    plt.xlabel('Id , napokban')
    #szezon fv plot
    plt.figure(3)
    plt.plot(season)
    plt.title("Szezon fuggveny")
    plt.xlabel('Ido, napokban')
    #predikcio
    plt.figure(4)
    plt.plot(plothelper2[n - 15:], label='pt_upper')
    plt.plot(plothelper3[n - 15:], label='pt_lower')
    plt.plot(plotgood[n - 15:], label='pt_pontos')
    plt.plot(price[n - 15:n], label='eredeti')
    plt.title("Ar predikcio")
    plt.xlabel('Ido, napokban')
    plt.ylabel('Eur/MWh')
    #tobb kalibracio
    plt.figure(5)
    plt.plot(pthelpkal1[0:100], label='Kalibracio 1')
    plt.plot(pthelpkal2[0:100], label='Kalibracio 2')
    plt.plot(pthelpkal3[0:100], label='Kalibracio 3')
    plt.plot(price[0:50], label='Eredeti ar', linewidth=3.0)
    plt.title("Szcenariok")
    plt.xlabel('Ido, napokban')
    plt.ylabel('Eur/MWh')
    #megtobb
    plt.figure(6)
    plt.plot(pthelpkal1[0:100], label='Kalibracio 1')
    plt.plot(pthelpkal2[0:100], label='Kalibracio 2')
    plt.plot(pthelpkal3[0:100], label='Kalibracio 3')
    plt.plot(pthelpkal4[0:100], label='Kalibracio 4')
    plt.plot(pthelpkal5[0:100], label='Kalibracio 5')
    plt.plot(price[0:50], label='Eredeti ar', linewidth=3.0)
    plt.title("Szcenariok")
    plt.xlabel('Ido, napokban')
    plt.ylabel('Eur/MWh')
    plt.show()
    print 'END'
示例#48
0
    def run(self):
        """
        Optimize the problem using selected Scipy optimizer.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        opt = self.options['optimizer']
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # maxiter and disp get passsed into scipy with all the other options.
        self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        nparam = 0
        for param in itervalues(self._designvars):
            nparam += param['size']
        x_init = np.empty(nparam)

        # Initial Design Vars
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, meta in iteritems(self._designvars):
            size = meta['size']
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta['lower']
                meta_high = meta['upper']
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        if use_bounds and (opt in _supports_new_style) and _use_new_style:
            # For 'trust-constr' it is better to use the new type bounds, because it seems to work
            # better (for the current examples in the tests) with the "keep_feasible" option
            try:
                from scipy.optimize import Bounds
                from scipy.optimize._constraints import old_bound_to_new
            except ImportError:
                msg = ('The "trust-constr" optimizer is supported for SciPy 1.1.0 and above. '
                       'The installed version is {}')
                raise ImportError(msg.format(scipy_version))

            # Convert "old-style" bounds to "new_style" bounds
            lower, upper = old_bound_to_new(bounds)  # tuple, tuple
            keep_feasible = self.opt_settings.get('keep_feasible_bounds', True)
            bounds = Bounds(lb=lower, ub=upper, keep_feasible=keep_feasible)

        # Constraints
        constraints = []
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        if opt in _constraint_optimizers:
            for name, meta in iteritems(self._cons):
                size = meta['size']
                upper = meta['upper']
                lower = meta['lower']
                equals = meta['equals']
                if 'linear' in meta and meta['linear']:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # In scipy constraint optimizers take constraints in two separate formats

                # Type of constraints is list of NonlinearConstraint
                if opt in _supports_new_style and _use_new_style:
                    try:
                        from scipy.optimize import NonlinearConstraint
                    except ImportError:
                        msg = ('The "trust-constr" optimizer is supported for SciPy 1.1.0 and'
                               'above. The installed version is {}')
                        raise ImportError(msg.format(scipy_version))

                    if equals is not None:
                        lb = ub = equals
                    else:
                        lb = lower
                        ub = upper
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        # TODO linear constraint if meta['linear']
                        # TODO add option for Hessian
                        con = NonlinearConstraint(fun=signature_extender(self._con_val_func, args),
                                                  lb=lb, ub=ub,
                                                  jac=signature_extender(self._congradfunc, args))
                        constraints.append(con)
                else:  # Type of constraints is list of dict
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        con_dict = {}
                        if meta['equals'] is not None:
                            con_dict['type'] = 'eq'
                        else:
                            con_dict['type'] = 'ineq'
                        con_dict['fun'] = self._confunc
                        if opt in _constraint_grad_optimizers:
                            con_dict['jac'] = self._congradfunc
                        con_dict['args'] = [name, False, j]
                        constraints.append(con_dict)

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            dcon_dict = {}
                            dcon_dict['type'] = 'ineq'
                            dcon_dict['fun'] = self._confunc
                            if opt in _constraint_grad_optimizers:
                                dcon_dict['jac'] = self._congradfunc
                            dcon_dict['args'] = [name, True, j]
                            constraints.append(dcon_dict)

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(of=lincons, wrt=self._dvlist,
                                                              return_format='array')
            else:
                self._lincongrad_cache = None

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self._gradfunc
        else:
            jac = None

        # Hessian calculation method for optimizers, which require it
        if opt in _hessian_optimizers:
            if 'hess' in self.opt_settings:
                hess = self.opt_settings.pop('hess')
            else:
                # Defaults to BFGS, if not in opt_settings
                from scipy.optimize import BFGS
                hess = BFGS()
        else:
            hess = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_sparsity and self.options['dynamic_simul_derivs']:
            coloring_mod.dynamic_simul_coloring(self, run_model=False, do_sparsity=False)

        # optimize
        try:
            if opt in _optimizers:
                result = minimize(self._objfunc, x_init,
                                  # args=(),
                                  method=opt,
                                  jac=jac,
                                  hess=hess,
                                  # hessp=None,
                                  bounds=bounds,
                                  constraints=constraints,
                                  tol=self.options['tol'],
                                  # callback=None,
                                  options=self.opt_settings)
            elif opt == 'basinhopping':
                from scipy.optimize import basinhopping

                def fun(x):
                    return self._objfunc(x), jac(x)

                if 'minimizer_kwargs' not in self.opt_settings:
                    self.opt_settings['minimizer_kwargs'] = {"method": "L-BFGS-B", "jac": True}
                self.opt_settings.pop('maxiter')  # It does not have this argument

                def accept_test(f_new, x_new, f_old, x_old):
                    # Used to implement bounds besides the original functionality
                    if bounds is not None:
                        bound_check = all([b[0] <= xi <= b[1] for xi, b in zip(x_new, bounds)])
                        user_test = self.opt_settings.pop('accept_test', None)  # callable
                        # has to satisfy both the bounds and the acceptance test defined by the
                        # user
                        if user_test is not None:
                            test_res = user_test(f_new, x_new, f_old, x_old)
                            if test_res == 'force accept':
                                return test_res
                            else:  # result is boolean
                                return bound_check and test_res
                        else:  # no user acceptance test, check only the bounds
                            return bound_check
                    else:
                        return True

                result = basinhopping(fun, x_init,
                                      accept_test=accept_test,
                                      **self.opt_settings)
            elif opt == 'dual_annealing':
                from scipy.optimize import dual_annealing
                self.opt_settings.pop('disp')  # It does not have this argument
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = dual_annealing(self._objfunc,
                                        bounds=bounds,
                                        **self.opt_settings)
            elif opt == 'differential_evolution':
                from scipy.optimize import differential_evolution
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = differential_evolution(self._objfunc,
                                                bounds=bounds,
                                                **self.opt_settings)
            elif opt == 'shgo':
                from scipy.optimize import shgo
                kwargs = dict()
                for param in ('minimizer_kwargs', 'sampling_method ', 'n', 'iters'):
                    if param in self.opt_settings:
                        kwargs[param] = self.opt_settings[param]
                # Set the Jacobian and the Hessian to the value calculated in OpenMDAO
                if 'minimizer_kwargs' not in kwargs or kwargs['minimizer_kwargs'] is None:
                    kwargs['minimizer_kwargs'] = {}
                kwargs['minimizer_kwargs'].setdefault('jac', jac)
                kwargs['minimizer_kwargs'].setdefault('hess', hess)
                # Objective function tolerance
                self.opt_settings['f_tol'] = self.options['tol']
                result = shgo(self._objfunc,
                              bounds=bounds,
                              constraints=constraints,
                              options=self.opt_settings,
                              **kwargs)
            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _all_optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        # rather than the cryptic message from scipy.
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()

        self.result = result

        if hasattr(result, 'success'):
            self.fail = False if result.success else True
            if self.fail:
                print('Optimization FAILED.')
                print(result.message)
                print('-' * 35)

            elif self.options['disp']:
                print('Optimization Complete')
                print('-' * 35)
        else:
            self.fail = True  # It is not known, so the worst option is assumed
            print('Optimization Complete (success not known)')
            print(result.message)
            print('-' * 35)

        return self.fail
示例#49
0
        {"name": "id_start_x", "type": ["int"]},
        {"name": "model", "type": ["bytes"]},
        {"name": "model_size", "type": ["int"]},
        {"name": "rmse", "type": ["null", "float"]},
        {"name": "mae", "type": ["null", "float"]},
        {"name": "rsquared", "type": ["null", "float"]},
        {"name": "CPU_ms", "type": ["int"]},
        {"name": "RAM", "type": ["int"]}
        ]
    """

    new_model = new_pc.decode_avro_msg(msg)

    model = pickle.loads(new_model['model'])
    result = differential_evolution(evaluate_diff_evo,
                                    bounds,
                                    maxiter=N_MAX_ITER,
                                    popsize=N_POP_SIZE)

    surrogate_x = result.x[0]
    surrogate_y = result.fun

    print(f"The {new_model['model_name']} optimization suggests "
          f"x={round(surrogate_x, 3)}, y={round(surrogate_y, 3)}")
    """
    "name": "Model_Application",
    "fields": [
        {"name": "phase", "type": ["enum"], "symbols": ["init", "observation"]},
        {"name": "model_name", "type": ["string"]},
        {"name": "id_x", "type": ["int"]},
        {"name": "n_data_points", "type": ["int"]},
        {"name": "id_start_x", "type": ["int"]},
示例#50
0
def main():

    main_dir = Path(
        r'P:\Synchronize\IWS\Testings\fourtrans_practice\multisite_phs_spec_corr\5min\v7_long_range')

    os.chdir(main_dir)

#     in_data_file = Path(r'temperature_avg.csv')
#     in_crds_file = Path(r'temperature_avg_coords.csv')  # has X, Y, Z cols
#     out_fig_pref = f'temperature_{mw_ftn}'

    suff = '__RR1D_RTsum'

    in_data_file = Path(r'../neckar_1min_ppt_data_20km_buff_Y2009__RRD_RTsum.pkl')
    in_crds_file = Path(r'../metadata_ppt_gkz3_crds.csv')  # has X, Y cols
    out_fig_pref = f'ppt_{mw_ftn}'

    sep = ';'
    time_fmt = '%Y-%m-%d %H:%M:%S'

    beg_time = '2009-01-01 00:00:00'
    end_time = '2009-03-31 23:59:00'

    fig_size = (15, 7)

    cut_off_dist = 70e3
    rng_bds = [1, 1e6]
    sill_bds = [0.0, 1.0]

    out_dir = main_dir

    phss_out_dir = out_dir / 'phss'
    phss_out_dir.mkdir(exist_ok=True, parents=True)

    cos_out_dir = out_dir / 'cos'
    cos_out_dir.mkdir(exist_ok=True, parents=True)

    sin_out_dir = out_dir / 'sin'
    sin_out_dir.mkdir(exist_ok=True, parents=True)

    mags_out_dir = out_dir / 'mags'
    mags_out_dir.mkdir(exist_ok=True, parents=True)

    if in_data_file.suffix == '.csv':
        data_df = pd.read_csv(in_data_file, sep=sep, index_col=0)
        data_df.index = pd.to_datetime(data_df.index, format=time_fmt)

    elif in_data_file.suffix == '.pkl':
        data_df = pd.read_pickle(in_data_file)

    else:
        raise NotImplementedError(
            f'Unknown extension of in_data_file: {in_data_file.suffix}!')

    crds_df = pd.read_csv(in_crds_file, sep=sep, index_col=0)

    data_df = data_df.loc[beg_time:end_time]

    data_df.dropna(axis=1, how='any', inplace=True)

    crds_df = crds_df.loc[data_df.columns]

    assert np.all(np.isfinite(data_df.values))
    assert np.all(np.isfinite(crds_df[['X', 'Y']].values))

    print(data_df.shape)
    print(crds_df.shape)

    assert all(data_df.shape)

    all_stns = data_df.columns

    if data_df.shape[0] % 2:
        data_df = data_df.iloc[:-1,:]
        print('Dropped last record in data_df!')

    n_stns = data_df.shape[1]

    probs_df = data_df.rank(axis=0) / (data_df.shape[0] + 1)

    norms_df = pd.DataFrame(
        data=norm.ppf(probs_df.values), columns=data_df.columns)

    ft_df = pd.DataFrame(
        data=np.fft.rfft(norms_df.values, axis=0), columns=data_df.columns)

    phs_spec_df = pd.DataFrame(data=np.angle(ft_df), columns=data_df.columns)

    cos_spec_df = pd.DataFrame(
        data=np.cos(phs_spec_df.values), columns=data_df.columns)

    sin_spec_df = pd.DataFrame(
        data=np.sin(phs_spec_df.values), columns=data_df.columns)

#     phs_le_idxs = phs_spec_df < 0
#
#     phs_spec_df[phs_le_idxs] = (2 * np.pi) + phs_spec_df[phs_le_idxs]

    mag_spec_df = pd.DataFrame(
        data=np.abs(ft_df), columns=data_df.columns)

    n_freqs = phs_spec_df.shape[0]

    # Test to verify that forward and backward transforms are
    # working as expected.
#     fft_vals = np.empty_like(mag_spec_df.values, dtype=complex)
#
#     fft_vals[:].real = mag_spec_df.values * np.cos(phs_spec_df.values)
#     fft_vals[:].imag = mag_spec_df.values * np.sin(phs_spec_df.values)
#
#     ift_vals = np.fft.irfft(fft_vals, axis=0)

    phs_spec_df.to_csv(str(phss_out_dir / f'phss{suff}.csv'), sep=sep)

    cos_spec_df.to_csv(str(cos_out_dir / f'cos{suff}.csv'), sep=sep)
    sin_spec_df.to_csv(str(sin_out_dir / f'sin{suff}.csv'), sep=sep)

    mag_spec_df.to_csv(str(mags_out_dir / f'mags{suff}.csv'), sep=sep)

    dist_and_corrs_mat = np.full(
        (int(n_stns * (n_stns - 1) * 0.5), 3), np.nan)

    print(dist_and_corrs_mat.shape)

    print('Filling matrix...')

    idx = 0
    for i in range(n_stns):
        x_crd_i, y_crd_i = crds_df.loc[all_stns[i], ['X', 'Y']]
        for j in range(n_stns):
            if j <= i:
                continue

            x_crd_j, y_crd_j = crds_df.loc[
                all_stns[j], ['X', 'Y']]

            dist = (
                ((x_crd_i - x_crd_j) ** 2) +
                ((y_crd_i - y_crd_j) ** 2))

            dist **= 0.5

            phs_corr = np.cos(
                phs_spec_df.loc[:, all_stns[i]].values -
                phs_spec_df.loc[:, all_stns[j]].values).sum() / n_freqs

            mag_num = mag_spec_df.loc[:, [all_stns[i], all_stns[j]]].product(axis=1).values.sum()

            mag_denom = mag_spec_df.loc[:, [all_stns[i], all_stns[j]]].values ** 2

            mag_denom = mag_denom.sum(axis=0)

            mag_denom = np.product(mag_denom)

            mag_denom **= 0.5

            mag_corr = mag_num / mag_denom

            dist_and_corrs_mat[idx, 0] = dist
            dist_and_corrs_mat[idx, 1] = phs_corr
            dist_and_corrs_mat[idx, 2] = mag_corr

            idx += 1

    assert np.all(np.isfinite(dist_and_corrs_mat))
    print('Done filling!')

#     max_corr = dist_and_corrs_mat[:, [1, 2]].max()
#     min_corr = dist_and_corrs_mat[:, [1, 2]].min()

    print('Optimizing...')
    dist_sort_idxs = np.argsort(dist_and_corrs_mat[:, 0])

    (exp_vg_vals_x_mw,
     exp_vg_vals_mag_mw,
     exp_vg_vals_phs_mw) = get_mv_vals_crds(
        dist_and_corrs_mat[dist_sort_idxs, 0],
        dist_and_corrs_mat[dist_sort_idxs, 2],
        dist_and_corrs_mat[dist_sort_idxs, 1],
        50)

    opt_dist_idxs = exp_vg_vals_x_mw < cut_off_dist

    bds = [
        rng_bds,
        sill_bds]

    opt_res = differential_evolution(
        obj_ftn,
        bds,
        popsize=50,
        args=(exp_vg_vals_x_mw[opt_dist_idxs],
              exp_vg_vals_mag_mw[opt_dist_idxs]),
        polish=False)

    opt_prms = opt_res.x
    sq_diff = opt_res.fun

    print(np.round(opt_prms, 3))
    print(sq_diff)

    print('Done optimizing.')

    mag_cftn_str = (
        f'{opt_prms[1]:0.5f} Exp({opt_prms[0]:0.1f})')

    with open(str(mags_out_dir / f'{out_fig_pref}_cftns{suff}.csv'), 'w') as txt_hdl:
        txt_hdl.write(f'ft_type;cftn\n')
        txt_hdl.write(f'mag;{mag_cftn_str}\n')

    with open(str(mags_out_dir / f'vg_strs{suff}.csv'), 'w') as txt_hdl:
        txt_hdl.write(f'freq;vg\n')

        for i in range(n_freqs):
            txt_hdl.write(f'{i};{mag_cftn_str}\n')

    exp_vg_vals = get_nug_sph_cftn(opt_prms, dist_and_corrs_mat[:, 0])

    # Magnitude scatter
    plt.figure(figsize=fig_size)

    plt.scatter(
        dist_and_corrs_mat[:, 0],
        dist_and_corrs_mat[:, 2],
        alpha=0.6,
        color='red',
        label='obs')

    plt.scatter(
        dist_and_corrs_mat[:, 0],
        exp_vg_vals,
        alpha=0.6,
        color='blue',
        label='fit')

    plt.plot(
        exp_vg_vals_x_mw,
        exp_vg_vals_mag_mw,
        alpha=0.6,
        color='green',
        label='mw')

    plt.title(mag_cftn_str)
    plt.grid()

    plt.legend()

    plt.xlabel('Distance (m)')
    plt.ylabel('Mag. Spec. Corr. (-)')

    plt.gca().set_axisbelow(True)

    plt.xlim(0, plt.xlim()[1])
#     plt.ylim(min_corr, max_corr)

    plt.savefig(
        str(mags_out_dir / f'{out_fig_pref}_mag_corr_cftn{suff}.png'),
        bbox_inches='tight')

#     plt.show()
    plt.close()

    # Phase spectrum.
    opt_res = differential_evolution(
        obj_ftn,
        bds,
        popsize=50,
        args=(exp_vg_vals_x_mw[opt_dist_idxs],
              exp_vg_vals_phs_mw[opt_dist_idxs]),
        polish=False)

    opt_prms = opt_res.x
    sq_diff = opt_res.fun

    print(np.round(opt_prms, 3))
    print(sq_diff)

    print('Done optimizing.')

    phs_cftn_str = (
        f'{opt_prms[1]:0.5f} Exp({opt_prms[0]:0.1f})')

    with open(str(phss_out_dir / f'{out_fig_pref}_cftns{suff}.csv'), 'w') as txt_hdl:
        txt_hdl.write(f'ft_type;cftn\n')
        txt_hdl.write(f'phs;{phs_cftn_str}\n')

    with open(str(phss_out_dir / f'vg_strs{suff}.csv'), 'w') as txt_hdl:
        txt_hdl.write(f'freq;vg\n')

        for i in range(n_freqs):
            txt_hdl.write(f'{i};{phs_cftn_str}\n')

    with open(str(cos_out_dir / f'vg_strs{suff}.csv'), 'w') as txt_hdl:
        txt_hdl.write(f'freq;vg\n')

        for i in range(n_freqs):
            txt_hdl.write(f'{i};{phs_cftn_str}\n')

    with open(str(sin_out_dir / f'vg_strs{suff}.csv'), 'w') as txt_hdl:
        txt_hdl.write(f'freq;vg\n')

        for i in range(n_freqs):
            txt_hdl.write(f'{i};{phs_cftn_str}\n')

    exp_vg_vals = get_nug_sph_cftn(opt_prms, dist_and_corrs_mat[:, 0])

    # Phase scatter
    plt.figure(figsize=fig_size)

    plt.scatter(
        dist_and_corrs_mat[:, 0],
        dist_and_corrs_mat[:, 1],
        alpha=0.6,
        color='red',
        label='obs')

    plt.scatter(
        dist_and_corrs_mat[:, 0],
        exp_vg_vals,
        alpha=0.6,
        color='blue',
        label='fit')

    plt.plot(
        exp_vg_vals_x_mw,
        exp_vg_vals_phs_mw,
        alpha=0.6,
        color='green',
        label='mw')

    plt.title(phs_cftn_str)
    plt.grid()

    plt.legend()

    plt.xlabel('Distance (m)')
    plt.ylabel('Phs. Spec. Corr. (-)')

    plt.gca().set_axisbelow(True)

    plt.xlim(0, plt.xlim()[1])
#     plt.ylim(min_corr, max_corr)

    plt.savefig(
        str(phss_out_dir / f'{out_fig_pref}_phs_corr_cftn{suff}.png'),
        bbox_inches='tight')

#     plt.show()
    plt.close()

    return
示例#51
0
xyz_data = (xy_data_coord, histo, matern_v)

if opt_method == 'nelder-mead':
    initial_param = np.array(
        [10, 10,
         10])  # sigma, length and noise - find a good one to reduce iterations
    # No bounds needed for Nelder-Mead
    solution = scopt.minimize(fun=log_model_evidence,
                              args=xyz_data,
                              x0=initial_param,
                              method='Nelder-Mead')

elif opt_method == 'latin-hypercube':
    boundary = [(0, 30), (0, 3), (0, 3)]
    solution = scopt.differential_evolution(func=log_model_evidence,
                                            bounds=boundary,
                                            args=xyz_data,
                                            init='latinhypercube')

sigma_optimal = solution.x[0]
length_optimal = solution.x[1]
noise_optimal = solution.x[2]
print(solution)
"""Defining the entire range of potential sampling points"""
intervals = 50
cut_off_x = (np.max(xv_transform_row) - np.min(xv_transform_row)) / 2
cut_off_y = (np.max(yv_transform_row) - np.min(yv_transform_row)) / 2
sampling_points_x = np.linspace(
    np.min(xv_transform_row) - cut_off_x,
    np.max(xv_transform_row) + cut_off_x, intervals)
sampling_points_y = np.linspace(
    np.min(yv_transform_row) - cut_off_y,
示例#52
0
 def __tune__(self):
     bounds = [self.attachmentBound]
     result = optimize.differential_evolution(self.__ESNTrain__,
                                              bounds=bounds)
     #print("\nThe optimal parameters for Scale Free Networks ESN:"+str(result.x))
     return int(np.floor(result.x[0]))
示例#53
0
                 (k**13) * p13 + (k**14) * p14)
            ]

            api = api_i[s - 3]
            aps[i, s - 1] = api
        corr = np.corrcoef(aps[off_idx:, Q_obs_col], aps[off_idx:, s - 1])[0,
                                                                           1]
        crs[s] = corr
        return (1 - corr)

    # optimize
    x0_bounds = (0, 1)

    bounds = [x0_bounds]

    result = differential_evolution(objective, bounds)
    x = result[0].x
    # show final objective
    print('Optimised_corr_' + str(s) + ': ' + str(1 - objective(x)))

    # print (solution)
    print('k = ' + str(x[0]))
    print('\n')

crs = list(crs)
columns = ('prec', 'Q_obs', 'api_3', 'api_4', 'api_5', 'api_6', 'api_7',
           'api_8', 'api_9', 'api_10', 'api_11', 'api_12', 'api_13', 'api_14')
idx = pd.date_range(dates_start, dates_end)
aps_df = pd.DataFrame(aps, columns=columns, index=idx)
pd.to_pickle(aps_df, 'aps' + station_p)
aps_df.to_excel('aps.xlsx', sheet_name='Sheet1')
示例#54
0
def differential_evolution(
    parameters_guess,
    bounds,
    fit_bead,
    fit_parameter_names,
    exp_dict,
    global_opts={},
    constraints=None,
):
    r"""
    Fit defined parameters for equation of state object using scipy.optimize.differential_evolution with given experimental data. 

    Parameters
    ----------
    parameters_guess : numpy.ndarray 
        An array of initial guesses for parameters.
    bounds : list[tuple]
        List of length equal to fit_parameter_names with lists of pairs for minimum and maximum bounds of parameter being fit. Defaults from Eos object are broad, so we recommend specification.
    fit_bead : str
        Name of bead whose parameters are being fit.
    fit_parameter_names : list[str]
        This list contains the name of the parameter being fit (e.g. epsilon). See EOS documentation for supported parameter names. Cross interaction parameter names should be composed of parameter name and the other bead type, separated by an underscore (e.g. epsilon_CO2).
    exp_dict : dict
        Dictionary of experimental data objects.
    global_opts : dict, Optional

        - init (str) - Optional, default="random", type of initiation for population
        - write_intermediate_file (str) - Optional, default=False, If True, an intermediate file will be written from the method callback
        - filename (str) - Optional, default=None, filename for callback output, if provided, ``write_intermediate_file`` will be set to True
        - obj_cut (float) - Optional, default=None, Cut-off objective value to write the parameters, if provided, ``write_intermediate_file`` will be set to True
        - etc. Other keywords for scipy.optimize.differential_evolution use the function defaults

    constraints : dict, Optional, default=None
        This dictionary of constraint types and their arguments will be converted into a tuple of constraint classes that is compatible

    Returns
    -------
    Objective : obj
        scipy OptimizedResult object

    """

    obj_kwargs = ["obj_cut", "filename", "write_intermediate_file"]
    if "obj_cut" in global_opts:
        obj_cut = global_opts["obj_cut"]
        del global_opts["obj_cut"]
        global_opts["write_intermediate_file"] = True
    else:
        obj_cut = None

    if "filename" in global_opts:
        filename = global_opts["filename"]
        del global_opts["filename"]
        global_opts["write_intermediate_file"] = True
    else:
        filename = None

    if ("write_intermediate_file" in global_opts
            and global_opts["write_intermediate_file"]):
        del global_opts["write_intermediate_file"]
        global_opts["callback"] = _WriteParameterResults(fit_parameter_names,
                                                         obj_cut=obj_cut,
                                                         filename=filename)

    # Options for differential evolution, set defaults in new_global_opts
    new_global_opts = {"init": "random"}
    if global_opts:
        for key, value in global_opts.items():
            if key == "MultiprocessingObject":
                flag_workers = "workers" in global_opts and global_opts[
                    "workers"] > 1
                if value.ncores > 1 and flag_workers:
                    logger.info(
                        "Differential Evolution algorithm is using {} workers."
                        .format(value.ncores))
                    new_global_opts["workers"] = value._pool.map
                    exp_dict = _del_Data_MultiprocessingObject(exp_dict)
            elif key not in obj_kwargs:
                new_global_opts[key] = value
    global_opts = new_global_opts

    if constraints is not None:
        global_opts["constraints"] = ff.initialize_constraints(
            constraints, "class")
    logger.info("Differential Evolution Options: {}".format(global_opts))

    result = spo.differential_evolution(ff.compute_obj,
                                        bounds,
                                        args=(fit_bead, fit_parameter_names,
                                              exp_dict, bounds),
                                        **global_opts)

    return result
    df = pd.DataFrame(index=['Weights', 'fitness'], data=np.concatenate([optimizer.W[None, ...], optimizer.fitness[None, ...]], axis=0))
    #display(df)
    #print(df)
    '''
    clear_output()
    print(i, optimizer.best)
    fitness_values.append(optimizer.fitness)
    best_fitness.append(optimizer.best[0])
for fv in np.array(fitness_values).T:
    plt.plot(fv, color='b')
plt.plot(best_fitness, color='g')

bestGrid = auxGrid(*denormalize(optimizer.best[1]))
plt.imshow(pimg + (1 - bestGrid))

result = optimize.differential_evolution(lambda x: -fitness(x),
                                         bounds=aquarium)
aaa = auxGrid(*denormalize(result.x))
plt.imshow(pimg + (1 - aaa))

plt.imshow(
    utils.grid(img_fit.shape, [a1, a2], [freq1, freq2], [phase1, phase2],
               width))

plt.imshow(auxGrid(*denormalize(optimizer.best[1])))

swarm_size = 20
population = np.random.randn(swarm_size, 4) * .5
pso = PSO(population, 1, 1, 0.8)

for i in range(50):
    pso.minimize(lambda x: -fitness(x))
示例#56
0
import numpy as np
from scipy import optimize
from matplotlib import pylab as plt

x = np.arange(1, 30, 0.1)
y1 = np.sin(x / 5.) * np.exp(x / 10.) + 5 * np.exp(-x / 2.)
y_gr = [int(y1_val) for y1_val in y1]
y_func = lambda x: int(np.sin(x / 5.) * np.exp(x / 10.) + 5 * np.exp(-x / 2.))

plt.plot(x, y_gr)

mmm = optimize.minimize(y_func, [30], method='BFGS')
print(mmm)
print("# Task 1 MIN: " + str(mmm.fun) + " in point " + str(mmm.x))

print("----------------------------------------------")

mmm = optimize.differential_evolution(y_func, [(1, 30)])
print(mmm)
print("# Task 2 MIN: " + str(mmm.fun) + " in point " + str(mmm.x))
示例#57
0
def fitting_shape_coefficients(filename,
                               bounds='Default',
                               n=5,
                               return_data=False,
                               return_error=False,
                               optimize_deltaz=False,
                               solver='gradient',
                               deltaz=None,
                               objective='hausdorf',
                               surface='both',
                               x0=None):
    """Fit shape parameters to given data points
        Inputs:
        - filename: name of the file where the original data is
        - bounds: bounds for the shape parameters. If not defined,
                    Default values are used.
        - n: order of the Bernstein polynomial. If bounds is default
                this input will define the order of the polynomial.
                Otherwise the length of bounds (minus one) is taken into
                consideration"""

    from optimization_tools.hausdorff_distance import hausdorff_distance_2D

    def shape_difference(inputs, optimize_deltaz=False, surface=surface):
        # Define deltaz
        if optimize_deltaz is True or optimize_deltaz == [True]:
            deltasz = inputs[-1] / 2.
        else:
            deltasz = deltaz / 2.

        # Calculate upper and lower surface
        if surface == 'both':
            y_u = CST(upper['x'], 1, deltasz=deltasz, Au=list(inputs[:n + 1]))
            y_l = CST(lower['x'],
                      1,
                      deltasz=deltasz,
                      Al=list(inputs[n + 1:-1]))
        elif surface == 'upper':
            y_u = CST(upper['x'], 1, deltasz=deltasz, Au=list(inputs[:n + 1]))
        elif surface == 'lower':
            y_l = CST(lower['x'], 1, deltasz=deltasz, Al=list(inputs[:n + 1]))

        # Vector to be compared with
        error = 0
        if surface == 'upper' or surface == 'both':
            a_u = {'x': upper['x'], 'y': y_u}
            if objective == 'hausdorf':
                error += hausdorff_distance_2D(a_u, upper)
            elif objective == 'squared_mean':
                error += np.mean(
                    (np.array(a_u['x']) - np.array(upper['x']))**2 +
                    (np.array(a_u['y']) - np.array(upper['y']))**2)

        if surface == 'lower' or surface == 'both':
            a_l = {'x': lower['x'], 'y': y_l}
            if objective == 'hausdorf':
                error += hausdorff_distance_2D(a_l, lower)
            elif objective == 'squared_mean':
                error += np.mean(
                    (np.array(a_l['x']) - np.array(lower['x']))**2 +
                    (np.array(a_l['y']) - np.array(lower['y']))**2)

        # plt.figure()
        # plt.scatter(a_u['x'], a_u['y'], c='k')
        # plt.scatter(a_l['x'], a_l['y'], c='b')
        # plt.scatter(upper['x'], upper['y'], c='r')
        # plt.scatter(lower['x'], lower['y'], c='g')
        # plt.show()
        return error

    def separate_upper_lower(data):
        for key in data:
            data[key] = np.array(data[key])

        index = np.where(data['y'] > 0)
        upper = {'x': data['x'][index], 'y': data['y'][index]}
        index = np.where(data['y'] <= 0)
        lower = {'x': data['x'][index], 'y': data['y'][index]}
        # x = data['x']
        # y = data['y']
        # for i in range(len(x)):
        #     if data['y'][i] < 0:
        #         break
        # upper = {'x': x[0:i],
        #          'y': y[0:i]}
        # lower = {'x': x[i:],
        #          'y': y[i:]}
        return upper, lower

    def _determine_bounds_x0(n, optimize_deltaz, bounds):
        if bounds == 'Default':
            upper_bounds = [[0, 1]] + [[-1., 1.]] * n
            lower_bounds = [[0, 1]] + [[-1., 1.]] * n

        if optimize_deltaz:
            if surface == 'both':
                bounds = upper_bounds + lower_bounds + [[0, 0.1]]
                x0 = (n + 1) * [
                    0.,
                ] + (n + 1) * [
                    0.,
                ] + [0.]
            elif surface == 'upper':
                bounds = upper_bounds + [[0, 0.1]]
                x0 = (n + 1) * [
                    0.,
                ] + [0.]
            elif surface == 'lower':
                bounds = lower_bounds + [[0, 0.1]]
                x0 = (n + 1) * [
                    0.,
                ] + [0.]
        else:
            bounds = upper_bounds + lower_bounds
            x0 = (n + 1) * [
                0.,
            ] + (n + 1) * [
                0.,
            ]
        return x0, bounds

    # Order of Bernstein polynomial
    if bounds != 'Default':
        n = len(bounds) - 1

    # Obtaining data
    if filename[-2:] == '.p':
        import pickle
        data = pickle.load(open(filename, "rb"), encoding='latin1')
        data = data['wing'][list(data['wing'].keys())[3]]
        x, y, z = data.T
    else:
        data = output_reader(filename, separator='\t', header=['x', 'z'])
        x = data['x']
        z = data['z']

    # Rotating airfoil
    x_TE = (x[0] + x[-1]) / 2.
    y_TE = (z[0] + z[-1]) / 2.

    theta_TE = math.atan(-y_TE / x_TE)

    # position trailing edge at the x-axis
    processed_data = {'x': [], 'y': []}
    for i in range(len(x)):
        x_i = x[i]
        z_i = z[i]
        c_theta = math.cos(theta_TE)
        s_theta = math.sin(theta_TE)
        x_rotated = c_theta * x_i - s_theta * z_i
        z_rotated = s_theta * x_i + c_theta * z_i
        processed_data['x'].append(x_rotated)
        processed_data['y'].append(z_rotated)
    data = processed_data

    # determine what is the leading edge and the rotation angle beta
    processed_data = {'x': [], 'y': []}

    min_x = min(x)
    # min_index = data['x'].index(min_x)
    # min_y = data['y'][min_index]

    chord = max(x) - min(x)
    # beta = math.atan((y_TE - min_y)/(x_TE - min_x))

    for i in range(len(x)):
        processed_data['x'].append((x[i] - min_x) / chord)
        processed_data['y'].append(z[i] / chord)
    data = processed_data

    # Determining default bounds
    x0_default, bounds = _determine_bounds_x0(n, optimize_deltaz, bounds)
    if x0 is None:
        x0 = x0_default

    if not optimize_deltaz and deltaz is None:
        deltaz = (data['y'][0] - data['y'][-1])

    if surface == 'both':
        upper, lower = separate_upper_lower(data)
    elif surface == 'upper':
        upper = data
    elif surface == 'lower':
        lower = data

    # Calculate original error
    error0 = shape_difference(x0,
                              optimize_deltaz=optimize_deltaz,
                              surface=surface)

    def f(x):
        return shape_difference(
            x, optimize_deltaz=optimize_deltaz, surface=surface) / error0

    # Optimize
    if solver == 'differential_evolution':

        result = differential_evolution(f, bounds, disp=True, popsize=10)
        x = result.x
        f = result.fun
    elif solver == 'gradient':

        solution = minimize(f,
                            x0,
                            bounds=bounds,
                            options={
                                'maxfun': 30000,
                                'eps': 1e-02
                            })
        x = solution['x']
        f = solution['fun']
    print('order %i  done' % n)

    # Unpackage data
    if surface == 'both' or surface == 'upper':
        Au = list(x[:n + 1])
    if surface == 'both':
        if optimize_deltaz:
            Al = list(x[n + 1:-1])
            deltaz = x[-1]
        else:
            Al = list(x[n + 1:])
    elif surface == 'lower':
        Al = list(x[:n + 1])

    # Return Al, Au, and others
    to_return = []
    if return_data:
        to_return.append(data)
    if return_error:
        to_return.append(f)
    to_return.append(deltaz)
    if surface == 'lower' or surface == 'both':
        to_return.append(Al)
    elif surface == 'upper' or surface == 'both':
        to_return.append(Au)
    print(to_return)
    return to_return
示例#58
0
        x_list.append(int(round(BS_x[i])))

    for i in range(list_Num):
        Permutation_list.append(list_[x_list[i]])
        del list_[x_list[i]]

    for i in range(len(C_all_points) / 2):
        Store_Comb.append(CABC_all_points[Permutation_list[i]])
    for i in range(len(C_all_points) / 2):
        Adjust_x.append(Store_Comb[i])

    #A + B + C
    Cal_Op_ABC = Sa + Sb + Nearest_Dis.fun
    return Cal_Op_ABC

COp_ABC = differential_evolution(Op_ABC, bounds, maxiter=5, polish=False)
#COp_ABC.x = C_all_points (The best one)
print COp_ABC.x
print COp_ABC.fun

Adjust_x_ = []
for i in range(len(Adjust_x) - 5, len(Adjust_x), 1):
    Adjust_x_.append(Adjust_x[i])

#Plot
Plot_Store_Dist = []
for i in range(len(Adjust_x_) - 1):
    Dis_0 = vg.Point(Adjust_x_[i][0], Adjust_x_[i][1])
    Dis_1 = vg.Point(Adjust_x_[i + 1][0], Adjust_x_[i + 1][1])
    Dist = g.shortest_path(Dis_0, Dis_1)
    del Dist[len(Dist) - 1]
from cmath import sin, exp
import numpy as np
from matplotlib import pylab as plt
from scipy import linalg
from scipy import optimize


def f(x):
    return (sin(x / 5.0) * exp(x / 10.0) + 5 * exp(-x / 2.0)).real

def c(arr):
    return sum([f(i) for i in arr])

#x = [i for i in np.arange(-10, 10, 1)]
#y = [f(i) for i in np.arange(-10, 10, 1)]

# plt.plot(x, y)
# plt.show()

print(optimize.differential_evolution(f, [(1,30)]))
# result = optimize.minimize(f, 2)
# print(result)
示例#60
0
 def __tune__(self):
     bounds = [self.reservoirConnectivityBound]
     result = optimize.differential_evolution(self.__ESNTrain__,
                                              bounds=bounds)
     print("\nThe optimal parameters for classic ESN:" + str(result.x))
     return result.x[0]