Ejemplo n.º 1
0
    def update(self, r, x):
        u0_long = 0.5 * np.ones(self.H)
        u0_lat = 0.5 * np.ones(self.H)
        constraints = {'type': 'ineq', 'fun': self.con_long}
        options = {'disp': False}

        # t0 = time.time()
        res_f = minimize(self.obj_long,
                         u0_long,
                         args=(r, x),
                         bounds=Bounds(-5.0, 5.0),
                         options=options,
                         method='slsqp')
        res_tau = minimize(self.obj_lat,
                           u0_lat,
                           args=(r, x),
                           bounds=Bounds(-0.5, 0.5),
                           options=options,
                           method='slsqp')
        # t1 = time.time() - t0
        # print('Time elapsed:', t1)
        # print("x = ", res.x)
        # print('f = ', res.fun)
        # print(res.success)
        # print(res.message)
        x_long_star = res_f.x
        x_lat_star = res_tau.x
        return np.array([[res_f.x.item(0) + self.Fe], [res_tau.x.item(0)]])
Ejemplo n.º 2
0
def max_likelihood(size_dist, distribution_mean=None):
    """ Returns the estimated mean, sd from size dist

        Arguments
        ------------
        size_dist: dict of the form {str: float or int} where the string is 'a_i-a_i+1' or 'a_n+' and the float or int is the proportion or number of companies in that bin.
        (optional) distribution_mean: if the mean of the distribution is known then this is a constraint that can be used to improve the estimation.
    """
    if distribution_mean is None:
        result = minimize(lambda x: -likelihood(x, size_dist), (0.5, 1.5),
                          jac=lambda x: -likelihood_jacobian(x, size_dist),
                          bounds=Bounds([-np.inf, 0], [np.inf, np.inf]))
    else:
        result = minimize(
            lambda x: -likelihood(x, size_dist), (0.5, 1.5),
            jac=lambda x: -likelihood_jacobian(x, size_dist),
            bounds=Bounds([-np.inf, 0], [np.inf, np.inf]),
            constraints={
                'type': 'eq',
                'fun': lambda x: np.exp(x[0] + x[1]**2 / 2) - distribution_mean
            })
    #print(result)

    if result.success:
        return result.x
    else:
        return None
class TestBoundedNelderMead:
    @pytest.mark.parametrize('bounds, x_opt', [
        (Bounds(-np.inf, np.inf), Rosenbrock().x_opt),
        (Bounds(-np.inf, -0.8), [-0.8, -0.8]),
        (Bounds(3.0, np.inf), [3.0, 9.0]),
        (Bounds([3.0, 1.0], [4.0, 5.0]), [3., 5.]),
    ])
    def test_rosen_brock_with_bounds(self, bounds, x_opt):
        prob = Rosenbrock()
        with suppress_warnings() as sup:
            sup.filter(UserWarning, "Initial guess is not within "
                       "the specified bounds")
            result = minimize(prob.fun, [-10, -10],
                              method='Nelder-Mead',
                              bounds=bounds)
            assert np.less_equal(bounds.lb, result.x).all()
            assert np.less_equal(result.x, bounds.ub).all()
            assert np.allclose(prob.fun(result.x), result.fun)
            assert np.allclose(result.x, x_opt, atol=1.e-3)

    def test_equal_all_bounds(self):
        prob = Rosenbrock()
        bounds = Bounds([4.0, 5.0], [4.0, 5.0])
        with suppress_warnings() as sup:
            sup.filter(UserWarning, "Initial guess is not within "
                       "the specified bounds")
            result = minimize(prob.fun, [-10, 8],
                              method='Nelder-Mead',
                              bounds=bounds)
            assert np.allclose(result.x, [4.0, 5.0])

    def test_equal_one_bounds(self):
        prob = Rosenbrock()
        bounds = Bounds([4.0, 5.0], [4.0, 20.0])
        with suppress_warnings() as sup:
            sup.filter(UserWarning, "Initial guess is not within "
                       "the specified bounds")
            result = minimize(prob.fun, [-10, 8],
                              method='Nelder-Mead',
                              bounds=bounds)
            assert np.allclose(result.x, [4.0, 16.0])

    def test_invalid_bounds(self):
        prob = Rosenbrock()
        with raises(ValueError,
                    match=r"one of the lower bounds is greater "
                    r"than an upper bound."):
            bounds = Bounds([-np.inf, 1.0], [4.0, -5.0])
            minimize(prob.fun, [-10, 3], method='Nelder-Mead', bounds=bounds)

    @pytest.mark.xfail(reason="Failing on Azure Linux and macOS builds, "
                       "see gh-13846")
    def test_outside_bounds_warning(self):
        prob = Rosenbrock()
        with raises(UserWarning,
                    match=r"Initial guess is not within "
                    r"the specified bounds"):
            bounds = Bounds([-np.inf, 1.0], [4.0, 5.0])
            minimize(prob.fun, [-10, 8], method='Nelder-Mead', bounds=bounds)
Ejemplo n.º 4
0
 def __init__(self, name, weight_bounds=Bounds([0, 0], [1, 1])):
     self.name = name
     if bounds_re(name) is None:
         raise Exception("re function is not implemented")
     self.numVars, self.numObjs, self.numConstr, lower, upper = bounds_re(
         name)
     self.bounds = Bounds(lower, upper)
     self.weight_bounds = weight_bounds  # weighting of objectives
Ejemplo n.º 5
0
 def minimizer_L1(self, x):
     # D: (M, N)
     D = x[1]
     y = x[0].T
     x0 = np.ones(D.shape[1], )
     if (D.shape[0] < D.shape[1]):
         # less observations than nodes
         # Adjust the options' parameters to speed up when N >= 300
         # see https://docs.scipy.org/doc/scipy/reference/optimize.minimize-slsqp.html#optimize-minimize-slsqp
         options = {
             'maxiter': 10,
             'ftol': 1e-01,
             'iprint': 1,
             'disp': False,
             'eps': 1.4901161193847656e-02
         }
         upcons = {
             'type': 'ineq',
             'fun': self.lessObsUpConstrain,
             'args': (D, y)
         }
         cur_time = datetime.now()
         result = minimize(self.square_sum,
                           x0,
                           args=(),
                           method='SLSQP',
                           jac=None,
                           bounds=Bounds(0, 1),
                           constraints=[upcons],
                           tol=None,
                           callback=None,
                           options=options)
         logging.info("minimizer_L1 time:" +
                      str(datetime.now() - cur_time) + "," + str(options) +
                      " result.fun:" + str(result.fun) + ", " +
                      str(result.success) + ", " + str(result.message))
     else:
         logging.info("more observations than nodes")
         result = minimize(self.moreObsfunc,
                           x0,
                           args=(D, y),
                           method='L-BFGS-B',
                           jac=None,
                           bounds=Bounds(0, 1),
                           tol=None,
                           callback=None,
                           options={
                               'disp': None,
                               'maxcor': 10,
                               'ftol': 2.220446049250313e-09,
                               'gtol': 1e-05,
                               'eps': 1e-08,
                               'maxfun': 15000,
                               'maxiter': 15000,
                               'iprint': -1,
                               'maxls': 20
                           })
     return result.x
Ejemplo n.º 6
0
def solverprog(util, par):
    """ Runs SLSQP optimizer for a parameterization for each piecewise linear part of the budget constraint (3),
    and evaluates the kink points (2) aswell, then compares the utility of these 5 points, and returns the leisure consumption
    associated with the highest utility bundle.
    INPUT:
    Util: Utility function of agents.
    par: Parameters of the utility function (tuple if multipile); For cobddouglas an args=alpha (bt. 0 and 1),
    For CES a 2-tuple, where par[0]=a  and par[1]=r, 0<=a<=1 and r <=1.
    OUTPUT
    c^*: optimal leisure consumption (float)
    """
    # Optimize behaviour in no tax bracket (l_bot < l < T):
    guess_no = (goods(1 / 2 * (T - l_bot)), 1 / 2 * (T - l_bot))
    best_notax = optimize.minimize(
        util,
        guess_no,
        args=par,
        method='SLSQP',
        constraints=[budget_func(wage_prog, maxlabinc_prog, leiexp_prog)],
        options={'disp': False},
        bounds=Bounds((0, l_bot), (np.inf, T)))
    # Optimize behaviour in low tax bracket ( l_top < l <l_bot):
    guess_low = (goods(1 / 2 * (l_bot - l_top)), 1 / 2 * (l_bot - l_top))
    best_lowtax = optimize.minimize(
        util,
        guess_low,
        args=par,
        method='SLSQP',
        constraints=[budget_func(wage_prog, maxlabinc_prog, leiexp_prog)],
        options={'disp': False},
        bounds=Bounds((0, l_top), (np.inf, l_bot)))
    #Optimize behaviour in top tax bracket ( 0 < l < l_top):
    guess_high = (goods(1 / 2 * (l_top)), 1 / 2 * l_top)
    best_hightax = optimize.minimize(
        util,
        guess_high,
        args=par,
        method='SLSQP',
        constraints=[budget_func(wage_prog, maxlabinc_prog, leiexp_prog)],
        options={'disp': False},
        bounds=Bounds((0, 0), (np.inf, l_top)))
    #Evaluate utility at kink point between no tax and low tax (util(l=l_bot, c=R_0-leiexp(l_bot,wage)):
    Kink_bot = util(x_bot, par)
    kink_top = util(x_top, par)

    # Evaluate candidates and choose optimal bundle
    candidates = np.array([
        [best_notax.fun, best_notax.x[0], best_notax.x[1]],
        [best_lowtax.fun, best_lowtax.x[0], best_lowtax.x[1]],
        [best_hightax.fun, best_hightax.x[0], best_hightax.x[1]],
        [Kink_bot, x_bot[0], x_bot[1]], [kink_top, x_top[0], x_top[1]]
    ])  # Create array with all candidates where first element is utility
    # 2nd is the consumption bundle as a tuple.
    best_cand = np.argmin(candidates,
                          axis=0)  # exstract row number for best bundle.
    return candidates[best_cand[0], 2]  # returns only optimal leisure choice.
Ejemplo n.º 7
0
def stark_intervals(y,
                    K,
                    alpha,
                    h,
                    options_dict={'maxiter': 500},
                    method='slsqp'):
    """
    Starks chi-sq intervals.

    NOTE:
    - data and K matrix are assumed to be Cholesky transformed

    Parameters:
        y            (np arr) : m element array Cholesky trans observations
        K            (np arr) : mxn smearing matrix
        alpha        (float)  : interval level
        h            (np arr) : n element functional on the parameters
        options_dict (dict)   : optimizer options
        method       (str)    : optimizer method for scipy.optimize

    Returns:
        tuple -- lower/upper bound
    """
    # dimensions of problem
    m, n = K.shape

    # find the chi-sq critical value
    chisq_q = stats.chi(df=m).ppf(1 - alpha)

    # define constraint
    constr_stark = [{
        'type': 'ineq',
        'fun': lambda x: chisq_q - np.linalg.norm(y - K @ x)
    }]

    # find the bounds for full rank
    stark_lb = minimize(fun=lambda x: np.dot(h, x),
                        x0=np.zeros(n),
                        constraints=constr_stark,
                        bounds=Bounds(lb=np.zeros(n), ub=np.ones(n) * np.inf),
                        method='slsqp',
                        options=options_dict)

    stark_ub = minimize(fun=lambda x: -np.dot(h, x),
                        x0=np.zeros(n),
                        constraints=constr_stark,
                        bounds=Bounds(lb=np.zeros(n), ub=np.ones(n) * np.inf),
                        method='slsqp',
                        options=options_dict)

    assert stark_lb['success']
    assert stark_ub['success']

    return stark_lb['fun'], -stark_ub['fun']
Ejemplo n.º 8
0
def solver_prog(util, par):
    """ Runs SLSQP optimizer for a parameterization for each piecewise linear part of the budget constraint,
        and evaluates the kink points aswell. 
   INPUT:
   Util: Utility function of agents.
   par: Parameters of the utility function (tuple if multipile); For cobddouglas an args=alpha (bt. 0 and 1),
                                 For CES a 2-tuple, where par[0]=a  and par[1]=r, 0<=a<=1 and r <=1.
   """
    # Optimize behaviour in no tax bracket (l_bot < l < T):
    best_notax = optimize.minimize(
        util,
        guess,
        args=par,
        method='SLSQP',
        constraints=[budget_func(wage_prog, maxlabinc_prog, leiexp_prog)],
        options={'disp': False},
        bounds=Bounds((0, l_bot), (np.inf, T)))
    # Optimize behaviour in low tax bracket ( l_top < l <l_bot):
    best_lowtax = optimize.minimize(
        util,
        guess,
        args=par,
        method='SLSQP',
        constraints=[budget_func(wage_prog, maxlabinc_prog, leiexp_prog)],
        options={'disp': False},
        bounds=Bounds((0, l_top), (np.inf, l_bot)))
    #Optimize behaviour in top tax bracket ( 0 < l < l_top):
    best_hightax = optimize.minimize(
        util,
        guess,
        args=par,
        method='SLSQP',
        constraints=[budget_func(wage_prog, maxlabinc_prog, leiexp_prog)],
        options={'disp': False},
        bounds=Bounds((0, 0), (np.inf, l_top)))
    #Evaluate utility at kink point between no tax and low tax (util(l=l_bot, c=R_0-leiexp(l_bot,wage)):
    Kink_bot = util(goods_bot, l_bot)
    kink_top = util(goods_top, l_top)

    # Evaluate candidates and choose optimal bundle
    candidates = np.array([
        [best_notax.fun, best_notax.x[0], best_notax.x[1]],
        [best_lowtax.fun, best_lowtax.x[0], best_lowtax.x[1]],
        [best_hightax.fun, best_hightax.x[0], best_hightax.x[1]],
        [Kink_bot, x_bot[0], x_bot[1]], [kink_top, x_top[0], x_top[1]]
    ])  # Create array with all candidates where first element is utility
    # 2nd is the consumption bundle as a tuple.
    best_cand = np.argmax(candidates,
                          axis=0)  # Restract row number for best bundle.
    return (candidates[best_cand[0], 1], candidates[best_cand[0], 2])
Ejemplo n.º 9
0
    def __find_adv(self, model, x0, y0, lw, up):
        def obj_func(x, model, y0):
            output = model.apply(x).reshape(-1)
            y0_score = output[y0]

            output_no_y0 = output - np.eye(len(output))[y0] * 1e9
            max_score = np.max(output_no_y0)

            return y0_score - max_score

        # print('Finding adversarial sample! Try {} times'.format(self.max_sus))

        for i in range(self.max_sus):
            if self.max_sus == 1:
                x = x0.copy()
            else:
                x = generate_x(len(x0), lw, up)

            args = (model, y0)
            jac = grad(obj_func)
            bounds = Bounds(lw, up)

            res = minimize(obj_func, x, args=args, jac=jac, bounds=bounds)

            if res.fun <= 0:  # an adversarial sample is generated
                valid = self.__validate_adv(model, res.x, y0)
                assert valid
                return res.x

        return None
Ejemplo n.º 10
0
def _get_thresholds(stratifications: List[Tuple], means: pd.DataFrame,
                    sds: pd.DataFrame, weights_df: pd.DataFrame,
                    draw: int) -> pd.Series:
    col = f'draw_{draw}'
    thresholds = pd.Series(0, index=means.index, name=col)

    ts = time.time()
    print(f'Start: {ts}')

    for i, stratification in enumerate(stratifications):
        mu = means.loc[stratification, col]
        sigma = sds.loc[stratification, col]
        threshold = 0
        if mu and sigma:
            weights = weights_df.loc[stratification].reset_index()
            weights = (weights[weights['parameter'] != 'glnorm'].
                       loc[:, ['parameter', 'value']].set_index(
                           'parameter').to_dict()['value'])
            weights = {k: [v] for k, v in weights.items()}
            ens_dist = EnsembleDistribution(weights=weights, mean=mu, sd=sigma)
            threshold = minimize(lambda x: (ens_dist.ppf(x) - 7)**2, [0.5],
                                 bounds=Bounds(0, 1.0),
                                 method='Nelder-Mead').x[0]

        print(f'mu: {mu}, sigma: {sigma}, threshold: {threshold}')
        thresholds.loc[stratification] = threshold

    tf = time.time()
    print(f'End: {tf}')
    print(f'Duration: {tf - ts}')

    return thresholds
def test_solver():
    X, y = load_breast_cancer(return_X_y=True)

    clfs = [
        LogisticRegression(solver="lbfgs", penalty="l1", warm_start=True),
        LogisticRegression(solver="lbfgs",
                           penalty="elasticnet",
                           warm_start=True)
    ]
    for clf in clfs:
        with raises(ValueError):
            clf.fit(X, y)

    lb = np.r_[np.full(X.shape[1], -1), -np.inf]
    ub = np.r_[np.zeros(X.shape[1]), np.inf]
    bounds = Bounds(lb, ub)

    clfs = [
        LogisticRegression(solver="lbfgs", penalty="l1"),
        LogisticRegression(solver="lbfgs", penalty="elasticnet")
    ]

    for clf in clfs:
        with raises(ValueError):
            clf.fit(X, y, bounds=bounds)

    lb = np.array([0.0])
    ub = np.array([0.5])
    A = np.zeros((1, X.shape[1] + 1))
    A[0, :2] = np.array([-1, 1])
    constraints = LinearConstraint(A, lb, ub)

    for clf in clfs:
        with raises(ValueError):
            clf.fit(X, y, constraints=constraints)
Ejemplo n.º 12
0
    def _fit_train(self, b: np.ndarray, A: np.ndarray, warm_start: bool=True) -> Any:
        """ Fit method
        Defines the model and fit it to the data.

        Args
        ----
        b: np.array, dtype float

        A: np.ndarray, dtype float

        mask: np.ndarray, dtype int | bool
            mask_gray > 0.1


        Return
        ------
        reconstructor: Recontructor
            The object after fit. To get the data access the attribute 'x' otherwise call fit_predict

        Note
        ----
        b, A and mask are not required if the proble has been previously formulated (e.g. if self.warmstart = True)
        """


        x_guess = np.ones(A.shape[1]) * np.mean(b)/10.

        bounds = Bounds(np.zeros_like(x_guess), np.full_like(x_guess, np.sum(b)/5.))

        x = optimize.minimize(obj_logNbl1tv_grad, x0 = x_guess, args = (A, b, self.alpha, self.beta, self.sa, self.sb, self.r, self.ixs),
                        method="L-BFGS-B",jac=True, bounds=bounds).x
        return x
Ejemplo n.º 13
0
def get_min(coefs, state_dim):
    b = Bounds([0] * state_dim, [1] * state_dim)
    res = scipy.optimize.minimize(poly_obj_fn,
                                  0.5 * np.ones((state_dim, 1)),
                                  args=coefs,
                                  bounds=b)
    return res.x, res.fun
Ejemplo n.º 14
0
 def suggest(self, timeout=10):
     x1 = np.expand_dims(self.top_points_real[0, ...], 0)
     x1 = self.tr.continuous_transform(x1)
     c0 = self.cost(x1)
     x = self.tr.to_hyper_space(x1)
     iter_time = 0
     _iter_c = 0
     _iter = cycle(self.top_points_real.tolist())
     end_time = time.time() + timeout
     _x0 = self.top_points_real[0, ...]
     while (time.time() + 1.25 * iter_time) < end_time:
         start_time = time.time()
         dx = self.tr.random_continuous(1, self.random) * 0.01
         x0 = _x0 + dx
         ret = minimize(self.cost,
                        x0,
                        method="l-bfgs-b",
                        bounds=Bounds(self.tr._lb, self.tr._ub))
         iter_time = time.time() - start_time
         if ret.success:
             _x = np.expand_dims(ret.x, 0)
             x1 = self.tr.continuous_transform(_x)
             c = self.cost(x1)
             if c < c0:
                 c0 = c
                 x = self.tr.to_hyper_space(x1)
                 # print(_iter_c, c, x, dx)
         _iter_c += 1
     return {k: v[0][0] for k, v in x.items()}
Ejemplo n.º 15
0
 def suggest(self, timeout=10):
     x1 = self.tr.random_continuous(1, self.random)
     x1 = self.tr.continuous_transform(x1)
     c0 = self.cost(x1)
     x = self.tr.to_hyper_space(x1)
     end_time = time.time() + timeout
     iter_time = 0
     _iter = 0
     while (time.time() + 1.25 * iter_time) < end_time:
         start_time = time.time()
         x0 = self.tr.random_continuous(1, self.random)
         ret = minimize(self.cost,
                        x0,
                        method="l-bfgs-b",
                        bounds=Bounds(self.tr._lb, self.tr._ub))
         iter_time = time.time() - start_time
         if ret.success:
             x1 = self.tr.continuous_transform(ret.x.reshape(1, -1))
             c = self.cost(x1)
             if c < c0:
                 c0 = c
                 x = self.tr.to_hyper_space(x1)
                 # print(_iter, c, x,)
         _iter += 1
     return {k: v[0][0] for k, v in x.items()}
Ejemplo n.º 16
0
    def test_bounds_class(self):
        # test that result does not depend on the bounds type
        def func(x):
            f = np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
            return f

        lw = [-5.12] * 5
        up = [5.12] * 5

        # Unbounded global minimum is all zeros. Most bounds below will force
        # a DV away from unbounded minimum and be active at solution.
        up[0] = -2.0
        up[1] = -1.0
        lw[3] = 1.0
        lw[4] = 2.0

        # run optimizations
        bounds = Bounds(lw, up)
        ret_bounds_class = dual_annealing(func, bounds=bounds, seed=1234)

        bounds_old = list(zip(lw, up))
        ret_bounds_list = dual_annealing(func, bounds=bounds_old, seed=1234)

        # test that found minima, function evaluations and iterations match
        assert_allclose(ret_bounds_class.x, ret_bounds_list.x, atol=1e-8)
        assert_allclose(ret_bounds_class.x, np.arange(-2, 3), atol=1e-7)
        assert_allclose(ret_bounds_list.fun, ret_bounds_class.fun, atol=1e-9)
        assert ret_bounds_list.nfev == ret_bounds_class.nfev
Ejemplo n.º 17
0
    def internal_optimization(self) -> OptimizeResult:
        """
        method to do internal optimization process, with a hyperpath setted you can get a optimization of the network
        :return:     res : OptimizeResult
        The optimization result represented as a ``OptimizeResult`` object.
        Important attributes are: ``x`` the solution array, ``success`` a
        Boolean flag indicating if the optimizer exited successfully and
        ``message`` which describes the cause of the termination. See
        `OptimizeResult` for a description of other attributes.
        """

        constr_func = lambda fopt: np.array(self.get_constrains(fopt))

        lb = [-1 * np.inf] * self.len_constrains
        ub = [0] * self.len_constrains
        nonlin_con = NonlinearConstraint(constr_func, lb=lb, ub=ub)

        lb = [0] * self.len_var
        ub = [np.inf] * self.len_var

        bounds = Bounds(lb=lb, ub=ub)
        res = minimize(self.VRC, self.f_opt, method='trust-constr', constraints=nonlin_con, tol=0.01, bounds=bounds)
        logger.info(self.string_information_internal_optimization(res))

        return res
Ejemplo n.º 18
0
 def get_pow(self, S, controller='P', dof=2):
     
     Fexc = self.get_waveExcitation(S)['Fexc'].isel(influenced_dof=dof).squeeze()
     Zi = self.hydro['Zi'].isel(dict(influenced_dof=dof,
                                     radiating_dof=dof)).squeeze()
     
     if controller == 'P':
         def dampingPower(x):
             P = -0.5 * x[0] * np.sum(np.abs(Fexc / (Zi + x[0]))**2)
             fval = P.values[()]
             return fval
         
         x0 = [1e-10]
         bounds = Bounds(lb=0, ub=np.inf)
         
         res = minimize(dampingPower, 
                        x0, 
                        method='L-BFGS-B',
                        bounds=bounds,
                        options={
                            'disp': False,
                            # 'maxiter': 10,
                            })
    
     return res.fun
Ejemplo n.º 19
0
 def bounds_constr(self):
     """ Create bound constraints."""
     number_wells = self.res_param["nb_prod"] + self.res_param["nb_inj"]
     number_cycles = self.res_param["nb_cycles"]
     lower = np.zeros((number_cycles * number_wells, 1))
     upper = np.ones((number_cycles * number_wells, 1))
     return Bounds(lower, upper)
Ejemplo n.º 20
0
def growth(x, y, f, noise='gamma', init=None):
    def loglik(x, y, f, p):
        a, b, c, w0, w1, scale = p
        _mu = logistic(x, f, [a, b, c, w0, w1])
        if noise == 'gamma':
            return -np.sum(gamma.logpdf(y, np.maximum(_mu / scale, 1e-12), scale=scale))
        if noise == 'normal':
            return -np.sum(norm.logpdf(y, loc=_mu, scale=scale))

    fx = lambda p: loglik(x, y, f, p)

    lb = np.ones((6,)) * 0.00001
    lb[2] = 1
    lb[3] = -np.inf
    lb[4] = -np.inf
    ub = np.ones((6,)) * np.inf
    bounds = Bounds(lb, ub)
    if init is None:
        init = [np.nanmean(y), 0.1, 10, 0, 0, np.nanstd(y)]
    options_trust = {'maxiter': 10000}

    with warnings.catch_warnings():
        warnings.filterwarnings('ignore', category=UserWarning)
        result = minimize(fx, init, bounds=bounds, method='trust-constr', options=options_trust)

    if result.success is False:
        print('optimization failed')

    return GrowthModel(result, noise, x, y, f)
Ejemplo n.º 21
0
    def compute_opt_ratios(self, workloads, init_ratios, new_idd):
        el_time = time.time() * 1000 - self.last_time
        ideal_mems = np.array([w.ideal_mem for w in workloads])
        percents = np.array([(1 - (w.idd == new_idd)) * min(
            (w.percent + el_time / w.profile(w.ratio)) /
            self.slow_downs[w.wname], 0.95) for w in workloads])
        profiles = [w.profile for w in workloads]
        mem_gradients = [w.mem_gradient for w in workloads]
        gradients = [w.gradient for w in workloads]

        x0 = np.array(init_ratios)

        eq_cons = {
            'type': 'eq',
            'fun': eq,
            'jac': eq_grad,
            'args': (ideal_mems, self.total_mem)
        }
        bounds = Bounds(0.5, 1.0)
        beta = 0
        res = minimize(obj_new,
                       x0,
                       method='SLSQP',
                       jac=obj_grad_new,
                       args=(ideal_mems, percents, profiles, gradients,
                             mem_gradients, beta),
                       constraints=eq_cons,
                       options={'disp': False},
                       bounds=bounds)
        final_ratios = res.x
        return np.round(final_ratios, 3), res.fun
Ejemplo n.º 22
0
    def fit(self, y, v, X):
        """Fit the estimator to data."""
        # use D-L estimate for initial values
        est_DL = DerSimonianLaird().fit(y, v, X).params_
        beta = est_DL["fe_params"]
        tau2 = est_DL["tau2"]

        theta_init = np.r_[beta.ravel(), tau2]

        lb = np.ones(len(theta_init)) * -np.inf
        ub = -lb
        lb[-1] = 0.0  # bound only the variance
        bds = Bounds(lb, ub, keep_feasible=True)

        res = minimize(self._nll_func,
                       theta_init, (y, v, X),
                       bounds=bds,
                       **self.kwargs)
        beta, tau = res.x[:-1], float(res.x[-1])
        tau = np.max([tau, 0])
        _, inv_cov = weighted_least_squares(y, v, X, tau, True)
        self.params_ = {
            "fe_params": beta[:, None],
            "tau2": tau,
            "inv_cov": inv_cov
        }
        return self
Ejemplo n.º 23
0
def match_expectation(size_dist):
    result = minimize(lambda x: expectation_difference(x, size_dist), (0, 1),
                      bounds=Bounds([-np.inf, 0], [np.inf, np.inf]))
    if result.success:
        return result.x
    else:
        return None
Ejemplo n.º 24
0
    def fit(self, y, n, X):
        """Fit the estimator to data."""
        if n.std() < np.sqrt(np.finfo(float).eps):
            raise ValueError("Sample size-based likelihood estimator cannot "
                             "work with all-equal sample sizes.")
        if n.std() < n.mean() / 10:
            raise Warning(
                "Sample sizes are too close, sample size-based likelihood estimator may fail."
            )
        # set tau^2 to 0 and compute starting values
        tau2 = 0.0
        k, p = X.shape
        beta = weighted_least_squares(y, n, X, tau2)
        sigma = ((y - X.dot(beta))**2 * n).sum() / (k - p)
        theta_init = np.r_[beta.ravel(), sigma, tau2]

        lb = np.ones(len(theta_init)) * -np.inf
        ub = -lb
        lb[-2:] = 0.0  # bound only the variances
        bds = Bounds(lb, ub, keep_feasible=True)

        res = minimize(self._nll_func,
                       theta_init, (y, n, X),
                       bounds=bds,
                       **self.kwargs)
        beta, sigma, tau = res.x[:-2], float(res.x[-2]), float(res.x[-1])
        tau = np.max([tau, 0])
        _, inv_cov = weighted_least_squares(y, sigma / n, X, tau, True)
        self.params_ = {
            "fe_params": beta[:, None],
            "sigma2": np.array(sigma),
            "tau2": tau,
            "inv_cov": inv_cov,
        }
        return self
Ejemplo n.º 25
0
def _retry_loop(pid,
                rgs,
                store,
                optimize,
                value_limit,
                stop_fitness=-math.inf):
    fun = store.wrapper if store.statistic_num > 0 else store.fun
    #reinitialize logging config for windows -  multi threading fix
    if 'win' in sys.platform and not store.logger is None:
        store.logger = logger()

    while store.get_runs_compare_incr(
            store.num_retries) and store.best_y.value > stop_fitness:
        if _crossover(fun, store, optimize, rgs[pid]):
            continue
        try:
            rg = rgs[pid]
            dim = len(store.lower)
            sol, y, evals = optimize(fun, Bounds(store.lower,
                                                 store.upper), None,
                                     [rg.uniform(0.05, 0.1)] * dim, rg, store)
            store.add_result(y, sol, store.lower, store.upper, evals,
                             value_limit)
        except Exception as ex:
            continue
Ejemplo n.º 26
0
def optimize(num_iter, init_p, init_err, M, m, c_1, c_2):
    """ This method is used to iteratively optimize obj_func_p and obj_func_err. The estimated parameters are used for alternative hypothesis test"""
    estimated_err = [init_err]
    estimated_p = [init_p]

    p_bound = Bounds(0.5, 0.9)
    e_bound = Bounds(1e-100, 0.3)
    opts = {'disp':False, 'gtol':1e-100}
    for i in range(num_iter):
        # estimate proportion of a major strain given estimated error
        res_p = minimize(obj_func_p, [init_p], args = (estimated_err[i], M, m, c_1, c_2), method='TNC',bounds=p_bound, options=opts)
        estimated_p.append(res_p.x[0])
        # estimate error given proportion
        res_err = minimize(obj_func_err, [init_err], args = (estimated_p[i], M, m, c_1, c_2), method='TNC', bounds=e_bound, options=opts)
        estimated_err.append(res_err.x[0])
    return [estimated_p, estimated_err]
Ejemplo n.º 27
0
Archivo: run.py Proyecto: keshava/daks
def initial_setup(filename, tn, dtype, space_order, nbl, datakey="m0", exclude_boundaries=True, water_depth=20):
    model = overthrust_model_iso(filename, datakey=datakey, dtype=dtype, space_order=space_order, nbl=nbl)

    geometry = create_geometry(model, tn)
    nbl = model.nbl

    if exclude_boundaries:
        v = trim_boundary(model.vp, model.nbl)
    else:
        v = model.vp.data

    # Define physical constraints on velocity - we know the maximum and minimum velocities we are expecting
    vmax = np.ones(v.shape) * 6.5
    vmin = np.ones(v.shape) * 1.3

    # Constrain the velocity for the water region. We know the velocity of water beforehand.
    if exclude_boundaries:
        vmax[:, 0:water_depth] = v[:, 0:water_depth]
        vmin[:, 0:water_depth] = v[:, 0:water_depth]
    else:
        vmax[:, 0:water_depth+nbl] = v[:, 0:water_depth+nbl]
        vmin[:, 0:water_depth+nbl] = v[:, 0:water_depth+nbl]

    b = Bounds(mat2vec(vmin), mat2vec(vmax))

    return model, geometry, b
Ejemplo n.º 28
0
def vertical_tail(plane, req, s, u, tol=10e-4):
    zeta_dr_req = req['stability_and_control']['zeta_dr']
    c_n_b_req = req['stability_and_control']['c_n_b']
    alpha = arctan(s[2] / s[0])
    a = Atmosphere(s[-1]).speed_of_sound()

    def obj(x):
        return x[0]

    def constraint(x):
        plane['vertical']['planform'] = x[0]
        c_n_b = directional_stability(plane, s[0] / a, alpha)
        zeta_dr, omega_dr = dutch_roll_mode(plane, s, u)
        c = array([zeta_dr - zeta_dr_req, c_n_b - c_n_b_req])
        return c

    lim = Bounds(0.1, float(plane['wing']['planform']))
    x0 = array([float(plane['vertical']['planform'])])
    ineq_con = {'type': 'ineq', 'fun': constraint}
    u_out = minimize(obj,
                     x0,
                     bounds=lim,
                     tol=tol,
                     constraints=ineq_con,
                     options=({
                         'maxiter': 200
                     }))
    return u_out['x'][0]
Ejemplo n.º 29
0
    def run_least_squares(self):
        """
        Run least squares minimization algorithm to calibrate model parameters.
        """
        lower_bounds = []
        upper_bounds = []
        x0 = []
        for prior in self.priors:
            lower_bound, upper_bound = get_parameter_bounds_from_priors(prior)
            lower_bounds.append(lower_bound)
            upper_bounds.append(upper_bound)
            if not any([math.isinf(lower_bound), math.isinf(upper_bound)]):
                x0.append(0.5 * (lower_bound + upper_bound))
            elif all([math.isinf(lower_bound), math.isinf(upper_bound)]):
                x0.append(0.0)
            elif math.isinf(lower_bound):
                x0.append(upper_bound)
            else:
                x0.append(lower_bound)
        bounds = Bounds(lower_bounds, upper_bounds)

        sol = minimize(self.loglikelihood, x0, bounds=bounds)
        self.mle_estimates = sol.x

        # FIXME: need to fix dump_mle_params_to_yaml_file
        logger.info("Best solution: %s", self.mle_estimates)
Ejemplo n.º 30
0
 def _update_Z(self, A, Z):
     n, n_archetypes = A.shape
     # build the vbar matrix used in the CLS problem
     mask = np.eye(n_archetypes, dtype=bool)
     before_dot_products = A[:, :, None] * Z
     V = self.X[:, :, None] - np.tensordot(
         before_dot_products, ~mask, axes=[1, 0])
     #V = self.X - A[:,None][~mask].dot(Z)
     diagonal = A.dot(mask)
     diagonal[diagonal == 0] = commons.EPSILON
     V /= diagonal[:, None, :]
     A2 = A**2
     vbar = (np.einsum("ir,ipr->pr", A2, V) / A2.sum(axis=0)).T
     # solve a CLS problem for archetypes coefficients
     beta = np.zeros_like(A.T)
     new_Z = np.zeros_like(Z)
     for l in range(n_archetypes):
         bounds = Bounds(np.zeros(n), np.ones(n))
         ones = np.array(1)
         constraint = LinearConstraint(np.ones(n).T, ones, ones)
         res = minimize(lambda beta: commons._least_squares_cost(
             vbar[l], self.X, beta),
                        self.beta[l],
                        method="SLSQP",
                        bounds=bounds,
                        constraints=[constraint])
         beta[l] = res.x
         new_Z[l] = beta[l].dot(self.X)
     return beta, new_Z