예제 #1
0
    def test_against_scipy(self):
        problem = get_problem("rosenbrock")
        problem.xl = None
        problem.xu = None
        x0 = np.array([0.5, 0.5])

        hist_scipy = []

        def fun(x):
            hist_scipy.append(x)
            return problem.evaluate(x)

        scipy_minimize(fun, x0, method='Nelder-Mead')
        hist_scipy = np.array(hist_scipy)

        hist = []

        def callback(x):
            if x.shape == 2:
                hist.extend(x)
            else:
                hist.append(x)

        problem.callback = callback
        minimize(
            problem,
            nelder_mead(x0=x0,
                        max_restarts=0,
                        termination=get_termination("n_eval",
                                                    len(hist_scipy))))
        hist = np.row_stack(hist)[:len(hist_scipy)]

        self.assertTrue(np.all(hist - hist_scipy < 1e-7))
예제 #2
0
def test_against_scipy():
    problem = get_problem("rosenbrock")
    problem.xl = None
    problem.xu = None
    x0 = np.array([0.5, 0.5])

    hist_scipy = []

    def fun(x):
        hist_scipy.append(x)
        return problem.evaluate(x)

    scipy_minimize(fun, x0, method='Nelder-Mead')
    hist_scipy = np.array(hist_scipy)

    hist = []

    def callback(x, _):
        if x.shape == 2:
            hist.extend(x)
        else:
            hist.append(x)

    problem.callback = callback
    minimize(
        problem,
        NelderMead(x0=x0,
                   max_restarts=0,
                   termination=get_termination("n_eval", len(hist_scipy))))
    hist = np.row_stack(hist)[:len(hist_scipy)]

    np.testing.assert_allclose(hist, hist_scipy, rtol=1e-04)
예제 #3
0
def pyapprox_minimize(fun,
                      x0,
                      args=(),
                      method='rol-trust-constr',
                      jac=None,
                      hess=None,
                      hessp=None,
                      bounds=None,
                      constraints=(),
                      tol=None,
                      callback=None,
                      options={},
                      x_grad=None):

    options = options.copy()
    if x_grad is not None and 'rol' not in method:
        # Fix this limitation
        msg = f"Method {method} does not currently support gradient checking"
        #raise Exception(msg)
        print(msg)

    if 'rol' in method and has_ROL:
        if callback is not None:
            raise Exception(f'Method {method} cannot use callbacks')
        if args != ():
            raise Exception(f'Method {method} cannot use args')
        rol_methods = {'rol-trust-constr': None}
        if method in rol_methods:
            rol_method = rol_methods[method]
        else:
            raise Exception(f"Method {method} not found")
        return rol_minimize(fun, x0, rol_method, jac, hess, hessp, bounds,
                            constraints, tol, options, x_grad)

    if method == 'trust-constr':
        if 'ctol' in options:
            del options['ctol']
        return scipy_minimize(fun, x0, args, method, jac, hess, hessp, bounds,
                              constraints, tol, callback, options)

    if method == 'slsqp':
        hess, hessp = None, None
        if 'ctol' in options:
            del options['ctol']
        if 'gtol' in options:
            ftol = options['gtol']
            del options['gtol']
        options['ftol'] = ftol
        if 'verbose' in options:
            verbose = options['verbose']
            options['disp'] = verbose
            del options['verbose']
        return scipy_minimize(fun, x0, args, method, jac, hess, hessp, bounds,
                              constraints, tol, callback, options)

    raise Exception(f"Method {method} was not found")
예제 #4
0
def pyapprox_minimize(fun, x0, args=(), method='rol-trust-constr', jac=None,
                      hess=None, hessp=None, bounds=None, constraints=(),
                      tol=None, callback=None, options={}, x_grad=None):
    options = options.copy()
    if x_grad is not None and 'rol' not in method:
        # Fix this limitation
        msg = f"Method {method} does not currently support gradient checking"
        #raise Exception(msg)
        print(msg)

    if 'rol' in method and has_ROL:
        if callback is not None:
            raise Exception(f'Method {method} cannot use callbacks')
        if args != ():
            raise Exception(f'Method {method} cannot use args')
        rol_methods = {'rol-trust-constr': None}
        if method in rol_methods:
            rol_method = rol_methods[method]
        else:
            raise Exception(f"Method {method} not found")
        return rol_minimize(
            fun, x0, rol_method, jac, hess, hessp, bounds, constraints, tol,
            options, x_grad)

    x0 = x0.squeeze() # scipy only takes 1D np.ndarrays
    x0 = np.atleast_1d(x0) # change scalars to np.ndarrays
    assert x0.ndim <= 1
    if method == 'rol-trust-constr' and not has_ROL:
        print('ROL requested by not available switching to scipy.minimize')
        method = 'trust-constr'
    
    if method == 'trust-constr':
        if 'ctol' in options:
            del options['ctol']
        return scipy_minimize(
            fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol,
            callback, options)
    elif method == 'slsqp':
        hess, hessp = None, None
        if 'ctol' in options:
            del options['ctol']
        if 'gtol' in options:
            ftol = options['gtol']
            del options['gtol']
        options['ftol'] = ftol
        if 'verbose' in options:
            verbose = options['verbose']
            options['disp'] = verbose
            del options['verbose']
        return scipy_minimize(
            fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol,
            callback, options)

    raise Exception(f"Method {method} was not found")
예제 #5
0
def minimize(args):
    """
    Wrapper around scipy.optimizer.minimize, which can be called using multi processing.
    This method does multiple restarts for each x0 in X0 provided.

    """
    fun = args['fun']
    X0 = args['X0']
    use_gradients = args['use_gradients']
    bounds = args['bounds']
    sync_restarts = args['sync_restarts']
    warnings = args['warnings']
    options = args['options']

    warnings = {}  # dict to collect warnings
    if sync_restarts:
        res = scipy_minimize(fun,
                             X0,
                             method=_minimize_lbfgsb_multi,
                             jac=use_gradients,
                             tol=0.001,
                             bounds=bounds,
                             options=options)
        for status, mes in zip(res['status'], res['message']):

            if status:
                if not mes in warnings:
                    warnings[mes] = 0
                warnings[mes] += 1
    else:
        for x in X0:
            res = scipy_minimize(fun,
                                 x,
                                 method="L-BFGS-B",
                                 jac=use_gradients,
                                 tol=1e-7,
                                 bounds=bounds,
                                 options=options)
            if res['status']:
                mes = res['message']

                if not mes in warnings:
                    warnings[mes] = 0
                warnings[mes] += 1

    if warnings:
        for mes, num in warnings.items():
            logger.warning(f'Optimizer Warning ({num}x): {mes}')
    return fun.best_x, fun.best_y
예제 #6
0
    def vmax(self, total_mass, *prof_params):
        r""" Maximum circular velocity of the halo profile.

        Parameters
        ----------
        total_mass: array_like
            Total halo mass in :math:`M_{\odot}/h`; can be a number or a numpy array.

        *prof_params : array_like
            Any additional array(s) necessary to specify the shape of the radial profile,
            e.g., halo concentration.

        Returns
        --------
        vmax : array_like
            :math:`V_{\rm max}` in km/s.

        Notes
        ------
        See :ref:`halo_profile_definitions` for derivations and implementation details.

        """
        guess = 0.25
        result = scipy_minimize(self._vmax_helper, guess, args=prof_params)
        halo_radius = self.halo_mass_to_halo_radius(total_mass)

        return self.circular_velocity(result.x[0] * halo_radius, total_mass,
                                      *prof_params)
예제 #7
0
def lbfgs(x, rho, f_df, maxiter=20):
    """
    Minimize the proximal operator of a given objective using L-BFGS

    Parameters
    ----------
    f_df : function
        Returns the objective and gradient of the function to minimize

    maxiter : int
        Maximum number of L-BFGS iterations
    """
    def f_df_augmented(theta):
        f, df = f_df(theta)
        obj = f + (rho / 2.) * np.linalg.norm(theta - x)**2
        grad = df + rho * (theta - x)
        return obj, grad

    res = scipy_minimize(f_df_augmented,
                         x,
                         jac=True,
                         method='L-BFGS-B',
                         options={
                             'maxiter': maxiter,
                             'disp': False
                         })

    return res.x
예제 #8
0
def MatchPolygons(points, points_ref, axes, bounds, maxeval=1000):
    points_ref = ConvexHull(points_ref)
    points = np.array(points)
    axes = np.array(axes)

    def f_obj(r):
        move = np.dot(r, axes)
        points_mv = points + move
        intersection = ClipPolygon(points_mv.tolist(), points_ref)
        return -PolygonArea(intersection)

    r = [0.0] * len(axes)
    while f_obj(r) == 0.0 and maxeval > 0:
        r = RandB(bounds)
        maxeval -= 1
        print r, f_obj(r)
    if f_obj(r) == 0.0: return None, points
    bounds2 = [[xmin, xmax] for xmin, xmax in zip(bounds[0], bounds[1])]
    res = scipy_minimize(f_obj,
                         r,
                         bounds=bounds2,
                         options={'maxiter': maxeval})
    print res
    r = res['x']
    points_mv = points + np.dot(r, axes)
    return r, points_mv.tolist()
예제 #9
0
    def fit_parameters_bfgs(self, error_func):
        # L-BFGS-B accepts bounds

        np.seterr(all='raise')

        params = self.get_initial_parameters()
        bounds = np.array([(p.min, p.max) for p_name, p in params.items()])

        #self._error_func = error_func

        for p_name, p in params.items():
            print("{:10s} [{:.2f} - {:.2f}]".format(p_name, p.min, p.max))

        x0 = [p.value for p_name, p in params.items()]
        print("initial guess for params: {}".format(x0))

        #exit()
        res = scipy_minimize(self._plumb_scipy,
                             x0=x0,
                             method='L-BFGS-B',
                             bounds=bounds,
                             args=(error_func, ))

        print(res)

        self._fit_params = self._params_array_to_dict(res.x)

        for p_name, p in params.items():
            print("{:10s} [{:.2f} - {:.2f}] : {:.2f}".format(
                p_name, p.min, p.max, self._fit_params[p_name]))
예제 #10
0
    def stocha_fit_parameters(self):
        # L-BFGS-B accepts bounds

        np.seterr(all='raise')

        # Find first set of parameters
        params = self.get_initial_parameters()
        bounds = np.array([(p.min, p.max) for p_name, p in params.items()])

        # Group parameters
        for p_name, p in params.items():
            print("{:10s} [{:.2f} - {:.2f}]".format(p_name, p.min, p.max))

        x0 = [p.value for p_name, p in params.items()]
        print("initial guess for params: {}".format(x0))

        res = scipy_minimize(self._plumb_scipy_stocha,
                             x0=x0,
                             method='L-BFGS-B',
                             bounds=bounds)

        print(res)

        self._fit_params = self._params_array_to_dict(res.x)

        for p_name, p in params.items():
            print("{:10s} [{:.2f} - {:.2f}] : {:.2f}".format(
                p_name, p.min, p.max, self._fit_params[p_name]))
예제 #11
0
    def fit_parameters(self, data = None, optimizer = 'LBFGSB',
                       randomPick = False,
                       picks = 1000,
                       start = 0,
                       end = None,
                       params = None):

        assert self._ICInitialized, 'ERROR: Inital conditions not initialized.'

        if isinstance(data, np.ndarray):
            self._data = data
            self._dataLength = data.shape[0]
        else:
            print("ERROR: Data required")
            return

        if not(end):
            self._fittingPeriod = [start, len(self._data)]
        else:
            self._fittingPeriod = [start, end]

        #print("fitting period: {}".format(self._fittingPeriod))


        # L-BFGS-B accepts bounds
        # np.seterr(all = 'raise')

        # Find first set of parameters
        parameters = self.get_initial_parameters(randomPick = randomPick, picks = picks)
        bounds = np.array([(p.min, p.max) for pName, p in parameters.items()])

        # Group parameters
        #print('Parameter bounds:')
        #for pName, p in parameters.items():
            #print("{:10s} [{:.2f} - {:.2f}]".format(pName, p.min, p.max))

        x0 = [p.value for p_name, p in parameters.items()]

        if not(params == None):
            x0 = params

        print("Initial guess for the constant parameters: {}".format(x0))

        if optimizer == 'LBFGSB':
            res = scipy_minimize(self.plumb,
                                x0 = x0,
                                method = 'L-BFGS-B',
                                bounds = bounds)

            self._optimalParams = dict(zip(self._paramNames, res.x))
            self._fitted = True

            print('Optimal constant parameters after the fitting:')
            for pName, p in parameters.items():
                print("{:10s} [{:.4f} - {:.4f}] : {}".format(pName, p.min, p.max,
                                                                 self._optimalParams[pName]))
        else:
            print("Other method to implement")

        return
예제 #12
0
def offset3(sino, raymax, start):

    epsilon = 1e-8
    t0 = -1.0
    tf = 1.0

    params = (sino, epsilon, t0, tf)

    z0 = [start]

    cons = ({
        'type': 'ineq',
        'fun': lambda x: -x[0] + raymax
    }, {
        'type': 'ineq',
        'fun': lambda x: x[0] + raymax
    })

    ###

    sol_con = scipy_minimize(consistency,
                             z0,
                             args=(params, ),
                             constraints=cons)
    beta = sol_con.x[0]

    ###

    offset = get_offset(beta, -1.0, 1.0, sino.shape[0])

    return beta, offset, offset
예제 #13
0
    def vmax(self, total_mass, *prof_params):
        """ Maximum circular velocity of the halo profile.

        Parameters
        ----------
        total_mass: array_like
            Total halo mass in :math:`M_{\odot}/h`; can be a number or a numpy array.

        *prof_params : array_like
            Any additional array(s) necessary to specify the shape of the radial profile,
            e.g., halo concentration.

        Returns
        --------
        vmax : array_like
            :math:`V_{\\rm max}` in km/s.

        Notes
        ------
        See :ref:`halo_profile_definitions` for derivations and implementation details.

        """
        guess = 0.25
        result = scipy_minimize(self._vmax_helper, guess, args=prof_params)
        halo_radius = self.halo_mass_to_halo_radius(total_mass)

        return self.circular_velocity(result.x[0]*halo_radius, total_mass, *prof_params)
예제 #14
0
def LOCAL_SOLVER(objfun, initial, bounds, args=()):
    result = scipy_minimize(objfun,
                            initial,
                            args=args,
                            method="L-BFGS-B",
                            bounds=bounds,
                            tol=LOCAL_TOLERANCE)
    return result["x"], result["fun"]
예제 #15
0
    def __call__(self, x0, rho):

        self.v = np.array(x0).copy()
        self.rho = rho

        res = scipy_minimize(self.f_df_augmented, x0, jac=True, method='L-BFGS-B',
                             options={'maxiter': self.numiter, 'disp': False})

        return res.x
예제 #16
0
파일: proxops.py 프로젝트: ahwillia/descent
def lbfgs(x, rho, f_df, maxiter=20):

    def f_df_augmented(theta):
        f, df = f_df(theta)
        obj = f + (rho / 2.) * np.linalg.norm(theta - x) ** 2
        grad = df + rho * (theta - x)
        return obj, grad

    res = scipy_minimize(f_df_augmented, x, jac=True, method='L-BFGS-B',
                            options={'maxiter': maxiter, 'disp': False})

    return res.x
예제 #17
0
def minimize(objective, p0, method, bounds, constraints, options, obs_rrs):  #
    """
    This is a wrapper function that iterates over all the substrate combinations
    calling the SciPy minimize function for each combination.  It returns the
    result with the best fit.  It supports passing a pool of worker to
    parallelize over the substrate combinations.

    Args:
	objective (callable): the objective function
        p0 (ndarray): the initial guess
        method (str): the type of solver
	bounds (sequence): bounds for the variable solution
	options (dict): solver options
	obs_rrs (ndarray): initial observations
	pool (Pool, optional): a pool of processes for multiprocessing
    """

    objective.observed_rrs = obs_rrs

    results = scipy_minimize(objective,
                             p0,
                             jac=True,
                             method=method,
                             bounds=bounds,
                             constraints=constraints,
                             options=options)

    #    Nc = len(objective._fixed_parameters.substrate_combinations)
    #
    #    if pool == None:
    #        results = []
    #        for i in range(Nc):
    #            results.append(pwork(i,objective,p0,method,bounds,options,obs_rrs))
    #    else:
    #        results = [None]*Nc
    #        presults = [None]*Nc
    #
    #        for i in range(Nc):
    #            presults[i] = pool.apply_async(pwork,args=(i,objective,p0,method,bounds,options,obs_rrs))
    #
    #        for i, presult in enumerate(presults):
    #            results[i] = presult.get()
    #
    #    min_fun = sys.maxsize
    #    id = -1
    #    tot_nit = 0
    #    for i, result in enumerate(results):
    #        if (result.fun < min_fun) & ( result.success ):
    #            min_fun = result.fun
    #            id = i
    #        tot_nit += result.nit

    return minimize_result(results.x, results.nit, results.success)
예제 #18
0
def offset4(sino, k):

    V = sino.shape[1]
    R = sino.shape[0]

    MAX_NPIXELS = float(R / 4)

    epsilon = 1e-8
    t0 = -1.0
    tf = 1.0

    dt = (tf - t0) / R

    RAYMAX = MAX_NPIXELS * dt

    dimension = int((int(k) + 1) * (int(k) + 2) / 2)

    b = func4_data(sino, k, t0, tf)
    A, S = func4_matrix_blocks(k, t0, tf, V)

    z0 = numpy.zeros((dimension, 1))
    z0[0, 0], z0[1, 0], z0[2, 0] = azevedo_analytical_shift(t0, tf, sino)

    # -- optimization using scipy

    params = (sino, epsilon, t0, tf, k, A, b, S)

    cons = ({
        'type': 'ineq',
        'fun': lambda x, p: consistency_cstr(x, p),
        'args': (params, )
    }, {
        'type': 'ineq',
        'fun': lambda x: -x[0] + RAYMAX
    }, {
        'type': 'ineq',
        'fun': lambda x: x[0] + RAYMAX
    })

    solution = scipy_minimize(func4,
                              z0,
                              args=(params, ),
                              constraints=cons,
                              method='SLSQP')

    ## ---

    beta = solution.x[0]

    offset = get_offset(beta, -1.0, 1.0, sino.shape[0])

    return beta, offset, solution.x[1:dimension]
예제 #19
0
def NelderMead(f, x, initial_step, lower_bounds, upper_bounds, xtol_rel,
               ftol_rel):
    """Note that NM doesn't impose bounds on optimization.
    The arguments initial_step, lower_bounds, and upper_bounds are not used.
    """
    res = scipy_minimize(f,
                         x,
                         method='nelder-mead',
                         options={
                             'frtol': ftol_rel,
                             'xrtol': xtol_rel,
                             'disp': False
                         })
    return (res.x, res.fun)
예제 #20
0
def minimize(stimulus, response, stim_lags, dt):
    """Returns the ML estimate of the parameters of a linear-nonlinear Poisson model.
    Parameters:
    stimulus : vector / matrix of floats containing the input stimulus.
    response : vector / matrix of floats containing the responses of the neuron to the stimulus. Should be of same shape as the stimulus.
    dt : float value containing size of timebins in s.
    
    Returns:
    _filter : vector of floats. ML estimate of the convolutional filter.
    baseline : float value. ML estimate of the baseline offset. 
    """
    init = np.zeros(stim_lags + 1)
    params = scipy_minimize(nll, init, args=(stimulus, response, dt), jac=False).x
    return params[:-1], params[-1]
예제 #21
0
    def scalar_minimize(self, method="Nelder-Mead", hess=None, tol=None, **kws):
        """use one of the scaler minimization methods from scipy.
        Available methods include:
          Nelder-Mead
          Powell
          CG  (conjugate gradient)
          BFGS
          Newton-CG
          Anneal
          L-BFGS-B
          TNC
          COBYLA
          SLSQP

        If the objective function returns a numpy array instead
        of the expected scalar, the sum of squares of the array
        will be used.

        Note that bounds and constraints can be set on Parameters
        for any of these methods, so are not supported separately
        for those designed to use bounds.

        """
        # print 'RUN SCALAR MIN with method ', method
        if not HAS_SCALAR_MIN:
            raise NotImplementedError

        self.prepare_fit()

        maxfev = 1000 * (self.nvarys + 1)
        opts = {"maxiter": maxfev}
        if method not in ("L-BFGS-B", "TNC", "SLSQP"):
            opts["maxfev"] = maxfev

        fmin_kws = dict(method=method, tol=tol, hess=hess, options=opts)
        fmin_kws.update(self.kws)
        fmin_kws.update(kws)

        def penalty(params):
            "local penalty function -- eval sum-squares residual"
            r = self.__residual(params)
            if isinstance(r, ndarray):
                r = (r * r).sum()
            return r

        ret = scipy_minimize(penalty, self.vars, **fmin_kws)
        xout = ret.x
        self.message = ret.message
        self.nfev = ret.nfev
예제 #22
0
파일: minimizer.py 프로젝트: omdv/lmfit-py
    def scalar_minimize(self, method='Nelder-Mead', hess=None, tol=None, **kws):
        """use one of the scaler minimization methods from scipy.
        Available methods include:
          Nelder-Mead
          Powell
          CG  (conjugate gradient)
          BFGS
          Newton-CG
          Anneal
          L-BFGS-B
          TNC
          COBYLA
          SLSQP

        If the objective function returns a numpy array instead
        of the expected scalar, the sum of squares of the array
        will be used.

        Note that bounds and constraints can be set on Parameters
        for any of these methods, so are not supported separately
        for those designed to use bounds.

        """
        if not HAS_SCALAR_MIN:
            raise NotImplementedError

        self.prepare_fit()

        maxfev = 1000*(self.nvarys + 1)
        opts = {'maxiter': maxfev}
        if method not in ('L-BFGS-B', 'TNC', 'SLSQP'):
            opts['maxfev'] = maxfev

        fmin_kws = dict(method=method, tol=tol, hess=hess, options=opts)
        fmin_kws.update(self.kws)
        fmin_kws.update(kws)

        ret = scipy_minimize(self.penalty, self.vars, **fmin_kws)
        xout = ret.x
        self.message = ret.message        
        self.nfev = ret.nfev
        self.residual = self.__residual(xout)
        self.chisqr = (self.residual**2).sum()
        self.ndata = len(self.residual)
        self.nfree = (self.ndata - self.nvarys)
        self.redchi = self.chisqr/self.nfree
        self.unprepare_fit()
        return
예제 #23
0
    def scalar_minimize(self,
                        method='Nelder-Mead',
                        hess=None,
                        tol=None,
                        **kws):
        """use one of the scaler minimization methods from scipy.
        Available methods include:
          Nelder-Mead
          Powell
          CG  (conjugate gradient)
          BFGS
          Newton-CG
          Anneal
          L-BFGS-B
          TNC
          COBYLA
          SLSQP

        If the objective function returns a numpy array instead
        of the expected scalar, the sum of squares of the array
        will be used.

        Note that bounds and constraints can be set on Parameters
        for any of these methods, so are not supported separately
        for those designed to use bounds.

        """
        if not HAS_SCALAR_MIN:
            raise NotImplementedError

        self.prepare_fit()

        maxfev = 1000 * (self.nvarys + 1)
        opts = {'maxiter': maxfev}
        if method not in ('L-BFGS-B', 'TNC', 'SLSQP'):
            opts['maxfev'] = maxfev

        fmin_kws = dict(method=method, tol=tol, hess=hess, options=opts)
        fmin_kws.update(self.kws)
        fmin_kws.update(kws)

        ret = scipy_minimize(self.penalty, self.vars, **fmin_kws)
        xout = ret.x
        self.message = ret.message
        self.nfev = ret.nfev
        self.chisqr = (self.penalty(xout)**2).sum()
예제 #24
0
파일: proxops.py 프로젝트: nirum/descent
def lbfgs(x, rho, f_df, maxiter=20):
    """
    Minimize the proximal operator of a given objective using L-BFGS

    Parameters
    ----------
    f_df : function
        Returns the objective and gradient of the function to minimize

    maxiter : int
        Maximum number of L-BFGS iterations
    """

    def f_df_augmented(theta):
        f, df = f_df(theta)
        obj = f + (rho / 2.) * np.linalg.norm(theta - x) ** 2
        grad = df + rho * (theta - x)
        return obj, grad

    res = scipy_minimize(f_df_augmented, x, jac=True, method='L-BFGS-B',
                         options={'maxiter': maxiter, 'disp': False})

    return res.x
예제 #25
0
def pwork(id, objective, p0, method, bounds, options, obs_rrs):
    """
    This is a wrapper function for the SciPy minimize function as only
    top level functions can be pickled with the multiprocessing module

    Args:
	id (int): the substrate combination index to use
	objective (callable): the objective function
        p0 (ndarray): the initial guess
        method (str): the type of solver
	bounds (sequence): bounds for the variable solution
	options (dict): solver options
	obs_rrs (ndarray): initial observations
    """

    objective.id = id
    objective.observed_rrs = obs_rrs

    return scipy_minimize(objective,
                          p0,
                          method=method,
                          bounds=bounds,
                          options=options)
예제 #26
0
def offset5(sino, k):

    V = sino.shape[1]
    R = sino.shape[0]

    MAX_NPIXELS = 400

    epsilon = 1e-5
    t0 = -1.0
    tf = 1.0

    dt = (tf - t0) / R

    RAYMAX = MAX_NPIXELS * dt

    dimension = int((int(k) + 1) * (int(k) + 2) / 2)

    b = func4_data(sino, k, t0, tf)
    A, S = func4_matrix_blocks(k, t0, tf, V)

    z0 = numpy.zeros((dimension, 1))
    z0[0, 0], _, _ = azevedo_analytical_shift(t0, tf, sino)

    # -- optimization using scipy

    params = (sino, epsilon, t0, tf, k, A, b, S)

    sol_con = scipy_minimize(func4, z0, args=(params, ))

    ## ---

    beta = sol_con.x[0]

    offset = get_offset(beta, -1.0, 1.0, sino.shape[0])

    return beta, offset, sol_con.x[1:dimension]
예제 #27
0
    def rmax(self, total_mass, *args):
        """ Radius at which the halo attains its maximum circular velocity.

        Parameters 
        ----------
        total_mass: array_like
            Total halo mass in :math:`M_{\odot}/h`; can be a number or a numpy array.

        args : array_like 
            Any additional array(s) necessary to specify the shape of the radial profile, 
            e.g., halo concentration.         

        Returns 
        --------
        rmax : array_like 
            :math:`R_{\\rm max}` in Mpc/h.
        """
        halo_radius = self.halo_mass_to_halo_radius(total_mass)

        guess = 0.25

        result = scipy_minimize(self._vmax_helper, guess, args=args)

        return result.x[0]*halo_radius
예제 #28
0
    def scalar_minimize(self, method='Nelder-Mead', **kws):
        """use one of the scaler minimization methods from scipy.
        Available methods include:
          Nelder-Mead
          Powell
          CG  (conjugate gradient)
          BFGS
          Newton-CG
          Anneal
          L-BFGS-B
          TNC
          COBYLA
          SLSQP
          dogleg
          trust-ncg

        If the objective function returns a numpy array instead
        of the expected scalar, the sum of squares of the array
        will be used.

        Note that bounds and constraints can be set on Parameters
        for any of these methods, so are not supported separately
        for those designed to use bounds.

        """
        if not HAS_SCALAR_MIN:
            raise NotImplementedError

        self.prepare_fit()

        fmin_kws = dict(method=method,
                        options={'maxiter': 1000*(self.nvarys + 1)})
        fmin_kws.update(self.kws)
        fmin_kws.update(kws)

        # hess supported only in some methods
        if 'hess' in fmin_kws and method not in ('Newton-CG',
                                                 'dogleg', 'trust-ncg'):
            fmin_kws.pop('hess')

        # jac supported only in some methods (and Dfun could be used...)
        if 'jac' not in fmin_kws and fmin_kws.get('Dfun', None) is not None:
            self.jacfcn = fmin_kws.pop('jac')
            fmin_kws['jac'] = self.__jacobian

        if 'jac' in fmin_kws and method not in ('CG', 'BFGS', 'Newton-CG',
                                                'dogleg', 'trust-ncg'):
            self.jacfcn = None
            fmin_kws.pop('jac')

        ret = scipy_minimize(self.penalty, self.vars, **fmin_kws)
        xout = ret.x
        self.message = ret.message
        self.nfev = ret.nfev
        self.chisqr = self.residual = self.__residual(xout)
        self.ndata = 1
        self.nfree = 1
        if isinstance(self.residual, ndarray):
            self.chisqr = (self.chisqr**2).sum()
            self.ndata = len(self.residual)
            self.nfree = self.ndata - self.nvarys
        self.redchi = self.chisqr/self.nfree
        self.unprepare_fit()
        return
예제 #29
0
    def scalar_minimize(self, method='Nelder-Mead', **kws):
        """
        use one of the scaler minimization methods from scipy.
        Available methods include:
          Nelder-Mead
          Powell
          CG  (conjugate gradient)
          BFGS
          Newton-CG
          Anneal
          L-BFGS-B
          TNC
          COBYLA
          SLSQP

        If the objective function returns a numpy array instead
        of the expected scalar, the sum of squares of the array
        will be used.

        Note that bounds and constraints can be set on Parameters
        for any of these methods, so are not supported separately
        for those designed to use bounds.

        """
        if not HAS_SCALAR_MIN :
            raise NotImplementedError

        self.prepare_fit()

        maxfev = 1000*(self.nvarys + 1)
        opts = {'maxiter': maxfev}
        if method not in ('L-BFGS-B','TNC', 'SLSQP'):
            opts['maxfev'] = maxfev

        fmin_kws = dict(method=method, tol=self.toler, options=opts)

        fmin_kws.update(self.kws)
        fmin_kws.update(kws)
        def penalty(parvals):
            "local penalty function -- eval sum-squares residual"
            r = self.__residual(parvals)
            if isinstance(r, ndarray):
                r = (r*r).sum()
            return r

        ret = scipy_minimize(penalty, self.vars, **fmin_kws)
        del self.vars
        resid  = self.__residual(ret.x)
        ndata  = len(resid)
        chisqr = (resid**2).sum()
        nfree  = (ndata - self.nvarys)
        redchi = chisqr / nfree

        ofit = group = self.paramgroup
        if Group is not None:
            ofit = group.fit_details = Group()

        ofit.method    = method
        ofit.nfev      = ret.nfev
        ofit.success   = ret.success
        ofit.status    = ret.status
        group.nvarys   = self.nvarys
        group.nfree    = nfree
        group.residual = resid
        group.message  = ret.message
        group.chi_square  = chisqr
        group.chi_reduced = redchi
        group.errorbars   = False
예제 #30
0
    res = minimize_manfred(
        func=test_func,
        x=start_x,
        xtol=0.001,
        step_sizes=[0.1, 0.05, 0.0125],
        max_fun=10_000,
        lower_bounds=lower_bounds,
        upper_bounds=upper_bounds,
        max_step_sizes=[1, 0.2, 0.1],
        linesearch_n_points=12,
        gradient_weight=gradient_weight,
    )

    scipy_res = scipy_minimize(scipy_test_func,
                               start_x,
                               method="Nelder-Mead",
                               options={"maxfev": 100_000})

    fig = plot_history(res)

    fig.savefig(Path(__file__).resolve().parent / "convergence_plot.pdf")

    print("Noise Free Test:           ")  # noqa: T001
    print("Manfred Solution:     ", res["solution_x"].round(2))  # noqa: T001
    print("True Solution:        ", true_x.round(2))  # noqa: T001
    print("Nelder Mead Solution: ", scipy_res.x.round(2))  # noqa: T001
    print("Manfred n_evals:      ",
          res["n_criterion_evaluations"])  # noqa: T001
    print("Nelder Mead n_evals:  ", scipy_res.nfev, "\n")  # noqa: T001

    # ==================================================================================
예제 #31
0
def _minimize(
    criterion,
    criterion_args,
    criterion_kwargs,
    params,
    internal_params,
    constraints,
    algorithm,
    algo_options,
    general_options,
    queue,
):
    """
    Create the internal criterion function and minimize it.

    Args:
        criterion (function):
            Python function that takes a pandas Series with parameters as the first
            argument and returns a scalar floating point value.

        criterion_args (list or tuple):
            additional positional arguments for criterion

        criterion_kwargs (dict):
            additional keyword arguments for criterion

        params (pd.DataFrame):
            See :ref:`params`.

        internal_params (DataFrame):
            See :ref:`params`.

        constraints (list):
            list with constraint dictionaries. See for details.

        algorithm (str):
            specifies the optimization algorithm. See :ref:`list_of_algorithms`.

        algo_options (dict):
            algorithm specific configurations for the optimization

        general_options (dict):
            additional configurations for the optimization

        queue (Queue):
            queue to which originally the parameters DataFrame is supplied and to which
            the updated parameter Series will be supplied later.

    """
    internal_criterion = _create_internal_criterion(
        criterion=criterion,
        params=params,
        internal_params=internal_params,
        constraints=constraints,
        criterion_args=criterion_args,
        criterion_kwargs=criterion_kwargs,
        queue=queue,
    )

    with open(os.path.join(os.path.dirname(__file__), "algo_dict.json")) as j:
        algos = json.load(j)
    origin, algo_name = algorithm.split("_", 1)

    try:
        assert algo_name in algos[
            origin], "Invalid algorithm requested: {}".format(algorithm)
    except (AssertionError, KeyError):
        proposals = propose_algorithms(algorithm, algos)
        raise NotImplementedError(
            f"{algorithm} is not a valid choice. Did you mean one of {proposals}?"
        )

    if origin in ["nlopt", "pygmo"]:
        prob = _create_problem(internal_criterion, internal_params)
        algo = _create_algorithm(algo_name, algo_options, origin)
        pop = _create_population(prob, algo_options, internal_params)
        evolved = algo.evolve(pop)
        result = _process_results(evolved, params, internal_params,
                                  constraints, origin)
    elif origin == "scipy":
        bounds = _get_scipy_bounds(internal_params)
        x0 = _x_from_params(params, constraints)
        minimized = scipy_minimize(
            internal_criterion,
            x0,
            method=algo_name,
            bounds=bounds,
            options=algo_options,
        )
        result = _process_results(minimized, params, internal_params,
                                  constraints, origin)
    else:
        raise ValueError("Invalid algorithm requested.")

    return result
예제 #32
0
def offset6(sino, k, *args):

    #
    # parameters to speed up method
    #
    if len(args) > 0:
        argum = args[0]
    ############

    V = sino.shape[1]
    R = sino.shape[0]

    th = numpy.linspace(0, 180, V, endpoint=False) * (numpy.pi / 180)
    th.shape = [len(th), 1]

    if len(args) and len(argum) == 1:
        #MAX_NPIXELS = int(argum[0]*R)
        RAYMAX = float(argum[0])
    else:
        #MAX_NPIXELS = int(0.25*R)
        RAYMAX = 0.5

    epsilon = 1e-8
    t0 = -1.0
    tf = 1.0

    dt = (tf - t0) / R

    #RAYMAX = MAX_NPIXELS * dt

    moments = numpy.zeros([k + 1, k + 1])
    moments[0][0] = 1.0

    # -- first optimization step

    j = 1

    b = func4_data(sino, j, t0, tf)
    A, S = func4_matrix_blocks(j, t0, tf, V)

    bnds = ((-RAYMAX, RAYMAX), (None, None), (None, None))

    params = (sino, epsilon, t0, tf, j, A, b, S)

    cons = ({
        'type': 'ineq',
        'fun': lambda x, p: consistency_cstr(x, p),
        'args': (params, )
    }, {
        'type': 'ineq',
        'fun': lambda x: -x[0] + RAYMAX
    }, {
        'type': 'ineq',
        'fun': lambda x: x[0] + RAYMAX
    })

    dimension = int((int(j) + 1) * (int(j) + 2) / 2)

    z0 = numpy.zeros((dimension, 1))
    beta0, cxmass, cymass = azevedo_analytical_shift(t0, tf, sino)

    if len(args) > 0 and len(argum) == 2:
        initial = argum[1]
        z0[0, 0] = initial
        z0[1, 0] = cxmass
        z0[2, 0] = cymass
    else:
        z0[0, 0] = -beta0
        z0[1, 0] = cxmass
        z0[2, 0] = cymass

    solution = scipy_minimize(
        func4, z0, args=(params, ),
        constraints=cons)  #, bounds=bnds) # method='SLSQP')

    beta = solution.x[0]

    y = solution.x[1:dimension]
    y.shape = [len(y), 1]

    moments[1, 0] = y[0]  # z0[1,0] #y[0]
    moments[0, 1] = y[1]  # z0[2,0] #y[1]

    if k > 1:

        for m in range(2, k + 1):
            dimension = m + 1
            b = func4_data(sino, m, t0, tf)
            D, _, _, _ = func4_matrix_diag(beta, m, V)
            A_ = matrix_A(m, th)
            A = numpy.c_[A_, A]
            M = numpy.dot(A, D)
            z0 = numpy.zeros((dimension, 1))

            # -- optimization using scipy

            params = (sino, epsilon, t0, tf, m, M, b, y, beta)

            solution = scipy_minimize(func5, z0, args=(params, ))

            y_ = solution.x

            y_.shape = [dimension, 1]

            y = numpy.r_[y_, y]

            ## --- moment matrix

            for l in range(0, m + 1):
                moments[m - l][l] = y[l]

    offset = get_offset(beta, -1.0, 1.0, sino.shape[0])

    return -beta, offset, moments
예제 #33
0
def minimize_scipy_generic(rf_np, method, bounds = None, **kwargs):
    ''' Interface to the generic minimize method in scipy '''

    try:
        from scipy.optimize import minimize as scipy_minimize
    except ImportError:
        print "**************** Deprecated warning *****************"
        print "You have an unusable installation of scipy. This version is not supported by dolfin-adjoint."

        try:
            import scipy
            print "Version: %s\tFile: %s" % (scipy.__version__, scipy.__file__)
        except:
            pass

        raise

    if method in ["Newton-CG"]:
        forget = None
    else:
        forget = False

    project = kwargs.pop("project", False)

    m = [p.data() for p in rf_np.controls]
    m_global = rf_np.obj_to_array(m)
    J = rf_np.__call__
    dJ = lambda m: rf_np.derivative(m, forget=forget, project=project)
    H = rf_np.hessian

    if not "options" in kwargs:
        kwargs["options"] = {}
    if rank(rf_np.rf.mpi_comm()) != 0:
        # Shut up all processors except the first one.
        kwargs["options"]["disp"] = False
    else:
        # Print out progress information by default
        if not "disp" in kwargs["options"]:
            kwargs["options"]["disp"] = True

    # Make the default SLSLQP options more verbose
    if method == "SLSQP" and "iprint" not in kwargs["options"]:
        kwargs["options"]["iprint"] = 2

    # For gradient-based methods add the derivative function to the argument list
    if method not in ["COBYLA", "Nelder-Mead", "Anneal", "Powell"]:
        kwargs["jac"] = dJ

    # For Hessian-based methods add the Hessian action function to the argument list
    if method in ["Newton-CG"]:
        kwargs["hessp"] = H

    if "constraints" in kwargs:
        from constraints import canonicalise, InequalityConstraint, EqualityConstraint
        constraints = canonicalise(kwargs["constraints"])
        scipy_c = []
        for c in constraints:
            if isinstance(c, InequalityConstraint):
                typestr = "ineq"
            elif isinstance(c, EqualityConstraint):
                typestr = "eq"
            else:
                raise Exception, "Unknown constraint class"

            def jac(x):
                out = c.jacobian(x)
                return [gather(y) for y in out]

            scipy_c.append(dict(type=typestr, fun=c.function, jac=jac))
        kwargs["constraints"] = scipy_c

    if method=="basinhopping":
        try:
            from scipy.optimize import basinhopping
        except ImportError:
            print "**************** Outdated scipy version warning *****************"
            print "The basin hopping optimisation algorithm requires scipy >= 0.12."
            raise ImportError

        del kwargs["options"]
        del kwargs["jac"]
        kwargs["minimizer_kwargs"]["jac"]=dJ

        if "bounds" in kwargs["minimizer_kwargs"]:
            kwargs["minimizer_kwargs"]["bounds"] = \
                serialise_bounds(rf_np, kwargs["minimizer_kwargs"]["bounds"])

        res = basinhopping(J, m_global, **kwargs)

    elif bounds != None:
        bounds = serialise_bounds(rf_np, bounds)
        res = scipy_minimize(J, m_global, method=method, bounds=bounds, **kwargs)
    else:
        res = scipy_minimize(J, m_global, method=method, **kwargs)

    rf_np.set_controls(np.array(res["x"]))
    m = [p.data() for p in rf_np.controls]
    return m
예제 #34
0
def minimize_scipy_generic(rf_np, method, bounds=None, **kwargs):
    ''' Interface to the generic minimize method in scipy '''

    try:
        from scipy.optimize import minimize as scipy_minimize
    except ImportError:
        print("**************** Deprecated warning *****************")
        print(
            "You have an unusable installation of scipy. This version is not supported by dolfin-adjoint."
        )

        try:
            import scipy
            print("Version: %s\tFile: %s" %
                  (scipy.__version__, scipy.__file__))
        except:
            pass

        raise

    if method in ["Newton-CG"]:
        forget = None
    else:
        forget = False

    project = kwargs.pop("project", False)

    m = [p.data() for p in rf_np.controls]
    m_global = rf_np.obj_to_array(m)
    J = rf_np.__call__
    dJ = lambda m: rf_np.derivative(m, forget=forget, project=project)
    H = rf_np.hessian

    if not "options" in kwargs:
        kwargs["options"] = {}
    if rank(rf_np.rf.mpi_comm()) != 0:
        # Shut up all processors except the first one.
        kwargs["options"]["disp"] = False
    else:
        # Print out progress information by default
        if not "disp" in kwargs["options"]:
            kwargs["options"]["disp"] = True

    # Make the default SLSLQP options more verbose
    if method == "SLSQP" and "iprint" not in kwargs["options"]:
        kwargs["options"]["iprint"] = 2

    # For gradient-based methods add the derivative function to the argument list
    if method not in ["COBYLA", "Nelder-Mead", "Anneal", "Powell"]:
        kwargs["jac"] = dJ

    # For Hessian-based methods add the Hessian action function to the argument list
    if method in ["Newton-CG"]:
        kwargs["hessp"] = H

    if "constraints" in kwargs:
        from .constraints import canonicalise, InequalityConstraint, EqualityConstraint
        constraints = canonicalise(kwargs["constraints"])
        scipy_c = []
        for c in constraints:
            if isinstance(c, InequalityConstraint):
                typestr = "ineq"
            elif isinstance(c, EqualityConstraint):
                typestr = "eq"
            else:
                raise Exception("Unknown constraint class")

            def jac(x):
                out = c.jacobian(x)
                return [gather(y) for y in out]

            scipy_c.append(dict(type=typestr, fun=c.function, jac=jac))
        kwargs["constraints"] = scipy_c

    if method == "basinhopping":
        try:
            from scipy.optimize import basinhopping
        except ImportError:
            print(
                "**************** Outdated scipy version warning *****************"
            )
            print(
                "The basin hopping optimisation algorithm requires scipy >= 0.12."
            )
            raise ImportError

        del kwargs["options"]
        del kwargs["jac"]
        kwargs["minimizer_kwargs"]["jac"] = dJ

        if "bounds" in kwargs["minimizer_kwargs"]:
            kwargs["minimizer_kwargs"]["bounds"] = \
                serialise_bounds(rf_np, kwargs["minimizer_kwargs"]["bounds"])

        res = basinhopping(J, m_global, **kwargs)

    elif bounds != None:
        bounds = serialise_bounds(rf_np, bounds)
        res = scipy_minimize(J,
                             m_global,
                             method=method,
                             bounds=bounds,
                             **kwargs)
    else:
        res = scipy_minimize(J, m_global, method=method, **kwargs)

    rf_np.set_controls(np.array(res["x"]))
    m = [p.data() for p in rf_np.controls]
    return m
예제 #35
0
    def scalar_minimize(self, method='Nelder-Mead', **kws):
        """
        use one of the scaler minimization methods from scipy.
        Available methods include:
          Nelder-Mead
          Powell
          CG  (conjugate gradient)
          BFGS
          Newton-CG
          Anneal
          L-BFGS-B
          TNC
          COBYLA
          SLSQP

        If the objective function returns a numpy array instead
        of the expected scalar, the sum of squares of the array
        will be used.

        Note that bounds and constraints can be set on Parameters
        for any of these methods, so are not supported separately
        for those designed to use bounds.

        """
        if not HAS_SCALAR_MIN:
            raise NotImplementedError

        self.prepare_fit()

        maxfev = 1000 * (self.nvarys + 1)
        opts = {'maxiter': maxfev}
        if method not in ('L-BFGS-B', 'TNC', 'SLSQP'):
            opts['maxfev'] = maxfev

        fmin_kws = dict(method=method, tol=self.toler, options=opts)

        fmin_kws.update(self.kws)
        fmin_kws.update(kws)

        def penalty(parvals):
            "local penalty function -- eval sum-squares residual"
            r = self.__residual(parvals)
            if isinstance(r, ndarray):
                r = (r * r).sum()
            return r

        ret = scipy_minimize(penalty, self.vars, **fmin_kws)
        del self.vars
        resid = self.__residual(ret.x)
        ndata = len(resid)
        chisqr = (resid**2).sum()
        nfree = (ndata - self.nvarys)
        redchi = chisqr / nfree

        ofit = group = self.paramgroup
        if Group is not None:
            ofit = group.fit_details = Group()

        ofit.method = method
        ofit.nfev = ret.nfev
        ofit.success = ret.success
        ofit.status = ret.status
        group.nvarys = self.nvarys
        group.nfree = nfree
        group.residual = resid
        group.message = ret.message
        group.chi_square = chisqr
        group.chi_reduced = redchi
        group.errorbars = False
예제 #36
0
    def fit_parameters(self, data = None, optimizer = 'LBFGSB',
                       randomPick = False,
                       picks = 1000,
                       start = 0,
                       end = None,
                       params = None):

        assert self._ICInitialized, 'ERROR: Inital conditions not initialized.'

        if isinstance(data, np.ndarray):
            self._data = data
            self._dataLength = data.shape[0]
        else:
            print("ERROR: Data required")
            return

        if not(end):
            self._fittingPeriod = [start, len(self._data)]
        else:
            self._fittingPeriod = [start, end]

        #print("fitting period: {}".format(self._fittingPeriod))


        # L-BFGS-B accepts bounds
        # np.seterr(all = 'raise')
        nonConstantParamNames = [pName for pName in self._paramNames if pName not in self._constantParamNames]
        # Find first set of parameters
        initialParams, bounds = self.get_initial_parameters(paramNames = nonConstantParamNames, randomPick = randomPick, picks = picks)
        constantParams, _ = self.get_initial_parameters(paramNames = self._constantParamNames, randomPick = randomPick, picks = picks)
        bounds = [bound for bound in bounds.values()]

        if not(params == None):
            x0 = params
        else:
            x0 = [p for p in initialParams.values()]

        #print(f"Initial guess for the parameters:\n{x0}")
        #for pName, (pMin, pMax) in zip(nonConstantParamNames, bounds):
            #print("{:10s} [{:.4f} - {:.4f}] : {:.4f}".format(pName, pMin, pMax, initialParams[pName]))

        if optimizer == 'LBFGSB':
            print(constantParams)
            res = differential_evolution(self.plumb,
                                         bounds = bounds,
                                         args = (constantParams, False),
                                         popsize = 30,
                                         mutation = (1, 1.9),
                                         recombination = 0.3)
            print('Status : %s' % res['message'])
            print('Total Evaluations: %d' % res['nfev'])
            solution = res['x']
            print(f'Solution:\n{solution}')

            res = scipy_minimize(self.plumb,
                                 x0 = res.x, # x0,
                                 args = (constantParams, True),
                                 method = 'L-BFGS-B',
                                 bounds = bounds)
            print(res.x)
            parameters = res.x
            for paramName, i in zip(self._paramNames, range(len(parameters) + len(constantParams))):
                if paramName in constantParams:
                    parameters = np.insert(parameters, i, constantParams[paramName])
            self._optimalParams = dict(zip(self._paramNames, parameters))
            self._fitted = True

            print('Optimal parameters after the fitting:')
            for pName, (pMin, pMax) in zip(nonConstantParamNames, bounds):
                print("{:10s} [{:.4f} - {:.4f}] : {:.4f}".format(pName, pMin, pMax,
                                                                 self._optimalParams[pName]))
            print([self._optimalParams[pName] for pName in self._paramNames])
        else:
            print("Other method to implement")

        return
예제 #37
0
    def scalar_minimize(self, method='Nelder-Mead', **kws):
        """
        Use one of the scalar minimization methods from
        scipy.optimize.minimize.

        Parameters
        ----------
        method : str, optional
            Name of the fitting method to use.
            One of:
                'Nelder-Mead' (default)
                'L-BFGS-B'
                'Powell'
                'CG'
                'Newton-CG'
                'COBYLA'
                'TNC'
                'trust-ncg'
                'dogleg'
                'SLSQP'
                'differential_evolution'

        kws : dict, optional
            Minimizer options pass to scipy.optimize.minimize.

        If the objective function returns a numpy array instead
        of the expected scalar, the sum of squares of the array
        will be used.

        Note that bounds and constraints can be set on Parameters
        for any of these methods, so are not supported separately
        for those designed to use bounds. However, if you use the
        differential_evolution option you must specify finite
        (min, max) for each Parameter.

        Returns
        -------
        success : bool
            Whether the fit was successful.

        """
        if not HAS_SCALAR_MIN:
            raise NotImplementedError
        self.prepare_fit()

        fmin_kws = dict(method=method,
                        options={'maxiter': 1000 * (self.nvarys + 1)})
        fmin_kws.update(self.kws)
        fmin_kws.update(kws)

        # hess supported only in some methods
        if 'hess' in fmin_kws and method not in ('Newton-CG', 'dogleg',
                                                 'trust-ncg'):
            fmin_kws.pop('hess')

        # jac supported only in some methods (and Dfun could be used...)
        if 'jac' not in fmin_kws and fmin_kws.get('Dfun', None) is not None:
            self.jacfcn = fmin_kws.pop('jac')
            fmin_kws['jac'] = self.__jacobian

        if 'jac' in fmin_kws and method not in ('CG', 'BFGS', 'Newton-CG',
                                                'dogleg', 'trust-ncg'):
            self.jacfcn = None
            fmin_kws.pop('jac')

        if method == 'differential_evolution':
            fmin_kws['method'] = _differential_evolution
            pars = self.params
            bounds = [(pars[par].min, pars[par].max) for par in pars]
            if not np.all(np.isfinite(bounds)):
                raise ValueError('With differential evolution finite bounds '
                                 'are required for each parameter')
            bounds = [(-np.pi / 2., np.pi / 2.)] * len(self.vars)
            fmin_kws['bounds'] = bounds

            # in scipy 0.14 this can be called directly from scipy_minimize
            # When minimum scipy is 0.14 the following line and the else
            # can be removed.
            ret = _differential_evolution(self.penalty, self.vars, **fmin_kws)
        else:
            ret = scipy_minimize(self.penalty, self.vars, **fmin_kws)

        xout = ret.x
        self.message = ret.message

        self.nfev = ret.nfev
        self.chisqr = self.residual = self.__residual(xout)
        self.ndata = 1
        self.nfree = 1
        if isinstance(self.residual, ndarray):
            self.chisqr = (self.chisqr**2).sum()
            self.ndata = len(self.residual)
            self.nfree = self.ndata - self.nvarys
        self.redchi = self.chisqr / self.nfree
        self.unprepare_fit()
        return ret.success
예제 #38
0
    def solve(self):
        """
        Solve optmal control problem

        """

        msg = "You need to build the problem before solving it"
        assert hasattr(self, "opt_type"), msg

        module, method = self.opt_type.split("_")

        logger.info("\n" + "Starting optimization".center(100, "-"))
        logger.info(
            "Scale: {}, \nDerivative Scale: {}".format(
                self.rd.scale, self.rd.derivative_scale
            )
        )
        logger.info(
            "Tolerace: {}, \nMaximum iterations: {}\n".format(self.tol, self.max_iter)
        )

        t = Timer()
        t.start()

        if self.oneD:

            res = minimize_1d(self.rd, self.x[0], **self.options)
            x = res["x"]

        else:

            if module == "scipy":
                res = scipy_minimize(self.rd, self.x, **self.options)
                x = res["x"]

            elif module == "pyOpt":

                obj, x, d = self.problem(**self.options)

            elif module == "moola":

                sol = self.solver.solve()
                x = sol["control"].data

            elif module == "ipopt":
                x = self.solver.solve(self.x)

            else:
                msg = (
                    "Unknown optimizatin type {}. "
                    "Define the optimization type as 'module-method', "
                    "where module is e.g scipy, pyOpt and methos is "
                    "eg slsqp."
                )
                raise ValueError(msg)

        run_time = t.stop()

        opt_result = {}

        opt_result["x"] = x
        opt_result["nfev"] = self.rd.iter
        opt_result["nit"] = self.rd.iter
        opt_result["njev"] = self.rd.nr_der_calls
        opt_result["ncrash"] = self.rd.nr_crashes
        opt_result["run_time"] = run_time
        opt_result["controls"] = self.rd.controls_lst
        opt_result["func_vals"] = self.rd.func_values_lst
        opt_result["forward_times"] = self.rd.forward_times
        opt_result["backward_times"] = self.rd.backward_times
        opt_result["grad_norm"] = self.rd.grad_norm

        return self.rd, opt_result
예제 #39
0
    def calculate_staging(self, return_OptimizeResult=False):
        """
        Calculate the staging by finding the root of 
        
        sqrt((self.xp[3]-self.user_xp)**2 / self.user_xp**2
             + (self.xt[3]-self.user_xt)**2 / self.user_xt**2)
        
        using scipy's optimize.minimize function. Round the result to the 
        nearest integer value, then recalculate the cascade to get the
        final result on concentrations and streams for said integer number
        of stages. Raise RuntimeError if the optimiser does not exit
        successfully.
        """
        if self.uptodate:
            return self.n_e, self.n_s

        if self.process == 'diffusion':
            n_init_enriching = [500, 1000, 5000]
            n_init_stripping = [100, 500, 1000, 5000]
            upper_bound = 7000
        elif self.process == 'centrifuge':
            n_init_enriching = [5, 10, 50]
            n_init_stripping = [1, 5, 10, 50]
            upper_bound = 200
        else:
            msg = "'process' must either be 'centrifuge' or 'diffusion'!"
            raise ValueError(msg)

        concentration = lambda n=(None, None): self.calculate_concentrations(
            n_e=n[0], n_s=n[1])
        lower_bound = 1
        bound = (lower_bound, upper_bound)

        for s in n_init_stripping:
            for e in n_init_enriching:
                result = scipy_minimize(concentration,
                                        x0=(e, s),
                                        bounds=(bound, bound),
                                        method='L-BFGS-B',
                                        options={'gtol': 1e-15})
                n = result['x']

                msg = (
                    "\n'calculate_staging' results:\n" +
                    "  exited successfully   {}\n".format(result['success']) +
                    "  message               {}\n".format(result['message']) +
                    "  n_e                   {:.4f}\n".format(n[0]) +
                    "  n_s                   {:.4f}\n".format(n[1]))
                logging.debug(msg)
                delta = self.difference_concentration(log=True)

                self.n_e = n[0]
                self.n_s = n[1]
                concentration()
                self.calculate_flows()

                if result['success'] and delta < 1e-7:
                    self.uptodate = True
                    if return_OptimizeResult:
                        return result

                    if (b'NORM_OF_PROJECTED_GRADIENT' in result['message']
                            or n[0] > 0.9 * upper_bound):
                        result = scipy_minimize(concentration,
                                                x0=(10 * upper_bound, s),
                                                method='L-BFGS-B',
                                                options={'gtol': 1e-15})
                        n = result['x']

                        concentration(n)
                        self.calculate_flows()
                        self.maximal_enrichment = self.xp[3]
                        self.n_e = float('nan')
                        self.n_s = n[1]

                        error_msg = ('Unphysical result:\n' +
                                     'n_enriching is larger than 0.9*' +
                                     'upper_bound with upper_bound = ' +
                                     '{}.\n'.format(upper_bound) +
                                     'The most probable reason is that ' +
                                     'the concentration of minor isotopes ' +
                                     'is too \nhigh, making an U235 ' +
                                     'product enrichment up to the ' +
                                     'defined level impossible.\nThe ' +
                                     'maximal (asymptotical) U235 product' +
                                     'enrichment is {:.4e} %.\n'.format(
                                         self.maximal_enrichment * 100) +
                                     'Try lowering the desired U235 ' +
                                     'enrichment below this value (e.g.,' +
                                     'by 0.5%).')
                        raise RuntimeError(error_msg)
                    return n[0], n[1]

        error_msg = ('Optimiser did not exit successfully. Output:\n' +
                     str(result))
        raise RuntimeError(error_msg)
예제 #40
0
    def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
        """
        Use one of the scalar minimization methods from
        scipy.optimize.minimize.

        Parameters
        ----------
        method : str, optional
            Name of the fitting method to use.
            One of:
                'Nelder-Mead' (default)
                'L-BFGS-B'
                'Powell'
                'CG'
                'Newton-CG'
                'COBYLA'
                'TNC'
                'trust-ncg'
                'dogleg'
                'SLSQP'
                'differential_evolution'

        params : Parameters, optional
           Parameters to use as starting points.
        kws : dict, optional
            Minimizer options pass to scipy.optimize.minimize.

        If the objective function returns a numpy array instead
        of the expected scalar, the sum of squares of the array
        will be used.

        Note that bounds and constraints can be set on Parameters
        for any of these methods, so are not supported separately
        for those designed to use bounds. However, if you use the
        differential_evolution option you must specify finite
        (min, max) for each Parameter.

        Returns
        -------
        success : bool
            Whether the fit was successful.

        """
        if not HAS_SCALAR_MIN:
            raise NotImplementedError

        result = self.prepare_fit(params=params)
        vars = result.init_vals
        params = result.params

        fmin_kws = dict(method=method,
                        options={'maxiter': 1000 * (len(vars) + 1)})
        fmin_kws.update(self.kws)
        fmin_kws.update(kws)

        # hess supported only in some methods
        if 'hess' in fmin_kws and method not in ('Newton-CG',
                                                 'dogleg', 'trust-ncg'):
            fmin_kws.pop('hess')

        # jac supported only in some methods (and Dfun could be used...)
        if 'jac' not in fmin_kws and fmin_kws.get('Dfun', None) is not None:
            self.jacfcn = fmin_kws.pop('jac')
            fmin_kws['jac'] = self.__jacobian

        if 'jac' in fmin_kws and method not in ('CG', 'BFGS', 'Newton-CG',
                                                'dogleg', 'trust-ncg'):
            self.jacfcn = None
            fmin_kws.pop('jac')

        if method == 'differential_evolution':
            fmin_kws['method'] = _differential_evolution
            bounds = [(par.min, par.max) for par in params.values()]
            if not np.all(np.isfinite(bounds)):
                raise ValueError('With differential evolution finite bounds '
                                 'are required for each parameter')
            bounds = [(-np.pi / 2., np.pi / 2.)] * len(vars)
            fmin_kws['bounds'] = bounds

            # in scipy 0.14 this can be called directly from scipy_minimize
            # When minimum scipy is 0.14 the following line and the else
            # can be removed.
            ret = _differential_evolution(self.penalty, vars, **fmin_kws)
        else:
            ret = scipy_minimize(self.penalty, vars, **fmin_kws)

        result.aborted = self._abort
        self._abort = False
        if isinstance(ret, dict):
            for attr, value in ret.items():
                setattr(result, attr, value)
        else:
            for attr in dir(ret):
                if not attr.startswith('_'):
                    setattr(result, attr, getattr(ret, attr))

        result.x = np.atleast_1d(result.x)
        result.chisqr = result.residual = self.__residual(result.x)
        result.nvarys = len(vars)
        result.ndata = 1
        result.nfree = 1
        if isinstance(result.residual, ndarray):
            result.chisqr = (result.chisqr**2).sum()
            result.ndata = len(result.residual)
            result.nfree = result.ndata - result.nvarys
        result.redchi = result.chisqr / result.nfree
        _log_likelihood = result.ndata * np.log(result.redchi)
        result.aic = _log_likelihood + 2 * result.nvarys
        result.bic = _log_likelihood + np.log(result.ndata) * result.nvarys

        return result
예제 #41
0
    def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
        """
        Use one of the scalar minimization methods from
        scipy.optimize.minimize.

        Parameters
        ----------
        method : str, optional
            Name of the fitting method to use.
            One of:
                'Nelder-Mead' (default)
                'L-BFGS-B'
                'Powell'
                'CG'
                'Newton-CG'
                'COBYLA'
                'TNC'
                'trust-ncg'
                'dogleg'
                'SLSQP'
                'differential_evolution'

        params : Parameters, optional
           Parameters to use as starting points.
        kws : dict, optional
            Minimizer options pass to scipy.optimize.minimize.

        If the objective function returns a numpy array instead
        of the expected scalar, the sum of squares of the array
        will be used.

        Note that bounds and constraints can be set on Parameters
        for any of these methods, so are not supported separately
        for those designed to use bounds. However, if you use the
        differential_evolution option you must specify finite
        (min, max) for each Parameter.

        Returns
        -------
        success : bool
            Whether the fit was successful.

        """
        if not HAS_SCALAR_MIN:
            raise NotImplementedError

        result = self.prepare_fit(params=params)
        vars = result.init_vals
        params = result.params

        fmin_kws = dict(method=method,
                        options={'maxiter': 1000 * (len(vars) + 1)})
        fmin_kws.update(self.kws)
        fmin_kws.update(kws)

        # hess supported only in some methods
        if 'hess' in fmin_kws and method not in ('Newton-CG', 'dogleg',
                                                 'trust-ncg'):
            fmin_kws.pop('hess')

        # jac supported only in some methods (and Dfun could be used...)
        if 'jac' not in fmin_kws and fmin_kws.get('Dfun', None) is not None:
            self.jacfcn = fmin_kws.pop('jac')
            fmin_kws['jac'] = self.__jacobian

        if 'jac' in fmin_kws and method not in ('CG', 'BFGS', 'Newton-CG',
                                                'dogleg', 'trust-ncg'):
            self.jacfcn = None
            fmin_kws.pop('jac')

        if method == 'differential_evolution':
            fmin_kws['method'] = _differential_evolution
            bounds = [(par.min, par.max) for par in params.values()]
            if not np.all(np.isfinite(bounds)):
                raise ValueError('With differential evolution finite bounds '
                                 'are required for each parameter')
            bounds = [(-np.pi / 2., np.pi / 2.)] * len(vars)
            fmin_kws['bounds'] = bounds

            # in scipy 0.14 this can be called directly from scipy_minimize
            # When minimum scipy is 0.14 the following line and the else
            # can be removed.
            ret = _differential_evolution(self.penalty, vars, **fmin_kws)
        else:
            ret = scipy_minimize(self.penalty, vars, **fmin_kws)

        result.aborted = self._abort
        self._abort = False
        if isinstance(ret, dict):
            for attr, value in ret.items():
                setattr(result, attr, value)
        else:
            for attr in dir(ret):
                if not attr.startswith('_'):
                    setattr(result, attr, getattr(ret, attr))

        result.x = np.atleast_1d(result.x)
        result.chisqr = result.residual = self.__residual(result.x)
        result.nvarys = len(vars)
        result.ndata = 1
        result.nfree = 1
        if isinstance(result.residual, ndarray):
            result.chisqr = (result.chisqr**2).sum()
            result.ndata = len(result.residual)
            result.nfree = result.ndata - result.nvarys
        result.redchi = result.chisqr / result.nfree
        _log_likelihood = result.ndata * np.log(result.redchi)
        result.aic = _log_likelihood + 2 * result.nvarys
        result.bic = _log_likelihood + np.log(result.ndata) * result.nvarys

        return result