Ejemplo n.º 1
0
    def optimize(self, obj_handle, ctr_handle_ineq, ctr_handle_eq, p0):
        nonl_cons_ineq = NonlinearConstraint(ctr_handle_ineq,
                                             -np.inf,
                                             0,
                                             jac='3-point',
                                             hess=BFGS())
        nonl_cons_eq = NonlinearConstraint(ctr_handle_eq,
                                           0,
                                           0,
                                           jac='3-point',
                                           hess=BFGS())

        logger.info('Optimizing the lyapunov function')
        solution = minimize(obj_handle,
                            np.reshape(p0, [len(p0)]),
                            hess=BFGS(),
                            constraints=[nonl_cons_eq, nonl_cons_ineq],
                            method='trust-constr',
                            options={
                                'disp': True,
                                'initial_constr_penalty': 1.5
                            },
                            callback=self.callback_opt)

        return solution.x, solution.fun
Ejemplo n.º 2
0
    def test_multiple_constraint_objects(self):
        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2
        x0 = [2, 0, 1]
        coni = []  # only inequality constraints (can use cobyla)
        methods = ["slsqp", "cobyla", "trust-constr"]

        # mixed old and new
        coni.append([{
            'type': 'ineq',
            'fun': lambda x: x[0] - 2 * x[1] + 2
        },
                     NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])

        coni.append([
            LinearConstraint([1, -2, 0], -2, np.inf),
            NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)
        ])

        coni.append([
            NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf),
            NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)
        ])

        for con in coni:
            funs = {}
            for method in methods:
                with suppress_warnings() as sup:
                    sup.filter(UserWarning)
                    result = minimize(fun, x0, method=method, constraints=con)
                    funs[method] = result.fun
            assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4)
            assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4)
Ejemplo n.º 3
0
def optimalPump(x, nInc):

    fObjRest, Sensibil, y = Prediction(x, 1)

    def fun_obj(x):
        res, sens, y = Prediction(x, 0)
        cost = res['fObj']
        return cost

    def fun_constr_1(x):
        res, sens, y = Prediction(x, 0)
        g1 = res['g1']
        return g1

    def fun_constr_2(x):
        res, sens, y = Prediction(x, 0)
        g2 = res['g2']
        return g2

    c1 = NonlinearConstraint(fun_constr_1,
                             -9999999,
                             0,
                             jac='2-point',
                             hess=BFGS(),
                             keep_feasible=False)
    c2 = NonlinearConstraint(fun_constr_2,
                             -9999999,
                             0,
                             jac='2-point',
                             hess=BFGS(),
                             keep_feasible=False)
    bounds = Bounds([0 for i in range(nInc)], [1 for i in range(nInc)],
                    keep_feasible=False)
    res = minimize(fun_obj,
                   x,
                   args=(),
                   method='trust-constr',
                   jac='2-point',
                   hess=BFGS(),
                   constraints=[c1, c2],
                   options={'verbose': 3},
                   bounds=bounds)
    #print("res=",res)
    print("Solução final: x=", [round(res.x[i], 3) for i in range(len(res.x))])
    #a=input('')

    fObjRest, Sensibil, yopt = Prediction(res.x, 1)
    print("CustoF=", fObjRest['fObj'], '\n')
    cost = fObjRest['fObj']
    xopt = res.x

    return xopt, yopt, xopt, yopt, cost
Ejemplo n.º 4
0
def a_vector_MLE(a, y, term, m_dict, bounds, boundedness):
    """TODO: write docstring

    """
    ym = [newtons_method_metalog(a, xi, term, bounds, boundedness) for xi in m_dict['dataValues']['x']]

    def MLE_quantile_constraints(x):
        M = [quantileMetalog(x[:term], yi, term, bounds=bounds, boundedness=boundedness) for yi in x[term:]]
        return m_dict['dataValues']['x'] - M

    def MLE_objective_function(x, y, term, m_dict):
        return -np.sum([np.log10(pdfMetalog(x[:term], yi, term, bounds, boundedness)) for yi in np.absolute(x[term:])])

    m_dict[str('MLE' + str(term))] = {}

    x0 = np.hstack((a[:term],ym))
    m_dict[str('MLE' + str(term))]['oldobj'] = -MLE_objective_function(x0, y, term, m_dict)
    bnd = ((None, None),)*len(a)+((0, 1),)*(len(x0)-len(a))
    con = NonlinearConstraint(MLE_quantile_constraints, 0, 0)

    mle = minimize(MLE_objective_function, x0, args=(y, term, m_dict), bounds=bnd, constraints=con)

    m_dict[str('MLE' + str(term))]['newobj'] = -MLE_objective_function(mle.x, y, term, m_dict)
    m_dict[str('MLE'+str(term))]['A'] = mle.x[:term]
    m_dict[str('MLE'+str(term))]['Y'] = mle.x[term:]

    m_dict[str('MLE' + str(term))]['oldA'] = a
    m_dict[str('MLE' + str(term))]['oldY'] = y

    out_temp = np.zeros_like(a)
    for i in range(term):
        out_temp[i] = mle.x[i]

    return out_temp
Ejemplo n.º 5
0
    def new_contr(self, fonction, mini, maxi, type):
        if (type == "non_lin"):
            nlc = NonlinearConstraint(fonction, mini, maxi)
        else:
            nlc = LinearConstraint(fonction, mini, maxi)

        self.contr = self.contr + (nlc, )
	def path_planner(self):
		# drop_start = [(self.master_start_pos[0] - self.follower_start_pos[0])/2, (self.master_start_pos[1] - self.follower_start_pos[1])/2]
		drop_start = [(self.nav_targets.leader.initial[0] - self.nav_targets.follower.initial[0])/2, 
			(self.nav_targets.leader.initial[1] - self.nav_targets.follower.initial[1])/2]
		drop_end = [drop_start[0] + self.min_drop_dist, drop_start[1]]
		X = points_to_list(drop_start, drop_end)
		P = points_to_list(self.nav_targets.leader.initial[0:2], self.nav_targets.leader.goal[0:2], 
			self.nav_targets.follower.initial[0:2], self.nav_targets.follower.goal[0:2])

		nl_cons = NonlinearConstraint(non_linear_dist_constraint, self.min_drop_dist, 100)
		options = {'disp':False}
		ans = minimize(fun=total_dist, args=P, x0=X, method='trust-constr', constraints=nl_cons, options=options)
		# print(total_dist(ans.x,P))
		angle = get_path_angle(ans.x[0:2], ans.x[2:4])

		# Store the path start and end
		path_start = (ans.x[0], ans.x[1], angle)
		path_end = (ans.x[2], ans.x[3], angle)

		# Generate start and end of line for both turtlebots and store in nav message
		self.nav_targets.follower.line_start = offset_coords(path_start, angle, self.tb_separation)
		self.nav_targets.follower.line_end = offset_coords(path_end, angle, self.tb_separation)
		self.nav_targets.leader.line_start = path_start
		self.nav_targets.leader.line_end = path_end

		self.publish_path_plan()
Ejemplo n.º 7
0
    def projection_on_target(self, x):
        def dist(xt):
            return sqrt((x[0] - xt[0])**2 + (x[1] - xt[1])**2)

        in_target = NonlinearConstraint(self.target.level, -np.inf, 0)
        sol = minimize(dist, np.array([0, 0]), constraints=(in_target, ))
        return sol.x
Ejemplo n.º 8
0
    def fit(self, cases, deaths, population, generate_guesses=None):
        if generate_guesses is None:
            initial_guesses = [[x[0] for x in self.states.values()]]
        else:
            initial_guesses = [
                np.linspace(x[1][0], x[1][1], generate_guesses)
                for x in DEFAULT_STATES.values()
            ]
            initial_guesses = np.array(initial_guesses).transpose()

        bounds = [x[1] for x in self.states.values()]

        def constraint(x):
            return x[3] - x[4]

        cons = NonlinearConstraint(constraint, 1.0, 10.0)
        best = (10000, None)
        for initial_guess in initial_guesses:
            result = minimize(
                self.model_fn,
                initial_guess,
                bounds=bounds,
                constraints=cons,
                args=(cases, deaths, population, False),
                method="SLSQP",
                tol=1e-10,
                options={"maxiter": 5000},
            )
            if result.fun < best[0]:
                best = (result.fun, result)

        return best[1]
Ejemplo n.º 9
0
    def constr(self):
        def fun(x):
            x_coord, y_coord, z_coord = self._get_cordinates(x)
            return x_coord**2 + y_coord**2 + z_coord**2 - 1

        if self.constr_jac is None:

            def jac(x):
                x_coord, y_coord, z_coord = self._get_cordinates(x)
                Jx = 2 * np.diag(x_coord)
                Jy = 2 * np.diag(y_coord)
                Jz = 2 * np.diag(z_coord)
                return csc_matrix(np.hstack((Jx, Jy, Jz)))
        else:
            jac = self.constr_jac

        if self.constr_hess is None:

            def hess(x, v):
                D = 2 * np.diag(v)
                return block_diag(D, D, D)
        else:
            hess = self.constr_hess

        return NonlinearConstraint(fun, -np.inf, 0, jac, hess)
Ejemplo n.º 10
0
def depth_in_target(xi, xds, target, a, r):
    def dr(x, xi=xi, xds=xds, a=a, r=r):
        return dominant_region(x, xi, xds, a, r)

    on_dr = NonlinearConstraint(dr, -np.inf, 0)
    sol = minimize(target, xi, constraints=(on_dr, ))
    return sol.x
    def solve(self, x0, optim_options={}, method=None):
        r"""
        Returns
        -------
        res : OptimizeResult
            The optimization result represented as a OptimizeResult object. 
            Important attributes are: x the solution array, success a Boolean 
            flag indicating if the optimizer exited successfully and message
            which describes the cause of the termination.
        """
        x_grad = x0  # use rol to check_gradients
        if method is None:
            if has_ROL:
                method = 'rol-trust-constr'
            else:
                method = 'trust-constr'
        if method == 'trust_constr':
            x_grad = None

        bounds = self.get_unknowns_bounds()

        keep_feasible = True
        constr_lb, constr_ub = self.get_constraint_bounds()
        constraint = NonlinearConstraint(
            self.constraint_fun, constr_lb, constr_ub,
            jac=self.constraint_jac, hess=self.constraint_hess,
            keep_feasible=keep_feasible)
        res = pyapprox_minimize(
            self.objective_fun, x0, method=method,
            jac=self.objective_jac, hessp=self.objective_hessp,
            constraints=[constraint], bounds=bounds, options=optim_options,
            x_grad=x_grad)

        return res
Ejemplo n.º 12
0
    def minimize_qfm_exact_constraints_SLSQP(self, tol=1e-3, maxiter=1000):
        r"""
        This function tries to find the surface that approximately solves

        .. math::
            \min_{S} f(S)

        subject to

        .. math::
            \texttt{label} = \texttt{labeltarget}

        where :math:`f(S)` is the QFM residual. This is done using SLSQP.
        """
        s = self.surface
        x = s.get_dofs()

        fun = lambda x: self.qfm_objective(x, derivatives=1)
        con = lambda x: self.qfm_label_constraint(x, derivatives=1)[0]
        dcon = lambda x: self.qfm_label_constraint(x, derivatives=1)[1]

        nlc = NonlinearConstraint(con, 0, 0)
        eq_constraints = [{'type': 'eq', 'fun': con, 'jac': dcon}]
        res = minimize(
            fun, x, jac=True, method='SLSQP', constraints=eq_constraints,
            options={'maxiter': maxiter, 'ftol': tol})

        resdict = {
            "fun": res.fun, "gradient": res.jac, "iter": res.nit, "info": res,
            "success": res.success,
        }
        s.set_dofs(res.x)
        resdict['s'] = s

        return resdict
Ejemplo n.º 13
0
def svm(X, y):
    '''
    SVM Support vector machine.

    INPUT:  X: training sample features, P-by-N matrix.
            y: training sample labels, 1-by-N row vector.

    OUTPUT: w: learned perceptron parameters, (P+1)-by-1 column vector.
            num: number of support vectors

    '''
    P, N = X.shape
    w = np.zeros(P + 1)
    num = 0
    X = np.concatenate([np.ones((1, N)), X], axis=0)

    # YOUR CODE HERE
    # Please implement SVM with scipy.optimize. You should be able to implement
    # it within 20 lines of code. The optimization should converge wtih any method
    # that support constrain.
    # begin answer
    def loss(w):
        return np.sum(w * w) / 2

    def cal(w):
        w = w.reshape((-1, 1))
        res = y * (np.dot(w.T, X))
        return res.reshape(-1)

    cons = NonlinearConstraint(cal, lb=1, ub=np.inf)
    res = minimize(loss, w, constraints=cons)
    # end answer
    return res.x.reshape((-1, 1)), res.nit
    def minmax_nonlinear_constraints(self, parameter_samples, design_samples):
        constraints = []
        for ii in range(parameter_samples.shape[1]):
            design_factors = self.design_factors(parameter_samples[:, ii],
                                                 design_samples)
            homog_outer_prods = compute_homoscedastic_outer_products(
                design_factors)
            if self.noise_multiplier is not None:
                hetero_outer_prods = compute_heteroscedastic_outer_products(
                    design_factors, self.noise_multiplier)
            else:
                hetero_outer_prods = None
            opts = copy.deepcopy(self.opts)
            if opts is not None and 'pred_factors' in opts:
                opts['pred_factors'] = opts['pred_factors'](
                    parameter_samples[:, ii], opts['pred_samples'])
            obj, jac = self.get_objective_and_jacobian(
                design_factors.copy(), homog_outer_prods.copy(),
                hetero_outer_prods, self.noise_multiplier, copy.deepcopy(opts))
            constraint_obj = partial(minmax_oed_constraint_objective, obj)
            constraint_jac = partial(minmax_oed_constraint_jacobian, jac)
            num_design_pts = design_factors.shape[0]
            constraint = NonlinearConstraint(constraint_obj,
                                             0,
                                             np.inf,
                                             jac=constraint_jac)
            constraints.append(constraint)

        num_design_pts = homog_outer_prods.shape[2]
        return constraints, num_design_pts
Ejemplo n.º 15
0
def constr_transcosts(n, x0, nonlin_constr=True, supply_H_and_jac=True):
    bounds = Bounds(np.array([0] * (2 * n)),
                    np.append(1 - x0, x0) if nonlin_constr else np.inf)

    A = np.append(np.ones((1, n)), np.ones((1, n)) * -1)
    lb, ub = 0, 0
    if not nonlin_constr:
        lincomb_bound = np.zeros((n, 2 * n))
        for i in range(n):
            lincomb_bound[i, i] = 1
            lincomb_bound[i, i + n] = -1

        A = np.vstack([A[None, :], lincomb_bound])
        lb = np.append(lb, -x0)
        ub = np.append(ub, 1 - x0)

    constr = [LinearConstraint(A, lb, ub)]

    if nonlin_constr:
        constr.append( NonlinearConstraint(lambda x: constr_d(n,x),0,0,
                                           **({} if not supply_H_and_jac \
                                              else {'jac': lambda x: jac_c(n,x),
                                                    'hess': lambda x,v: hessian_c(n,x,v)}))
        )

    return {'bounds': bounds, 'constraints': constr}
Ejemplo n.º 16
0
def run_design(objective, jac, constraints, constraints_jac, bounds, x0,
               options):
    options = options.copy()
    if constraints_jac is None:
        constraints_jac = [None] * len(constraints)
    scipy_constraints = []
    for constraint, constraint_jac in zip(constraints, constraints_jac):
        scipy_constraints.append(
            NonlinearConstraint(constraint, 0, np.inf, jac=constraint_jac))
    method = options.get('method', 'slsqp')
    if 'method' in options: del options['method']
    callback = options.get('callback', None)
    if 'callback' in options:
        del options['callback']
    print(x0[:, 0])
    res = minimize(objective,
                   x0[:, 0],
                   method=method,
                   jac=jac,
                   hess=None,
                   constraints=scipy_constraints,
                   options=options,
                   callback=callback,
                   bounds=bounds)
    return res.x, res
Ejemplo n.º 17
0
    def project_to_constraint(self, q0, constraint):
        def f(q):
            return constraint(q)[0]

        def df(q):
            c_grad = constraint(q)[1]
            q_grad = self._fr.jacobian(q).T @ c_grad
            return q_grad

        def c_f(q):
            diff_q = q - q0
            return diff_q @ diff_q

        def c_df(q):
            diff_q = q - q0
            return 0.5 * diff_q

        c_joint_limits = LinearConstraint(np.eye(len(q0)),
                                          self._fr.joint_limits_low,
                                          self._fr.joint_limits_high)
        c_close_to_q0 = NonlinearConstraint(c_f,
                                            0,
                                            self._q_step_size**2,
                                            jac=c_df)

        res = minimize(f,
                       q0,
                       jac=df,
                       method='SLSQP',
                       tol=0.1,
                       constraints=(c_joint_limits, c_close_to_q0))

        return res.x
Ejemplo n.º 18
0
    def construct_constraints(self, x: np.ndarray,
                              y: np.ndarray,
                              beta: Optional[np.ndarray] = None) -> NonlinearConstraint:
        """
        Get constraints dictionary from data, e.g.,
        {"func": lambda beta: fun(x, y, beta), "type": "ineq"}
        Args:
            x (np.ndarray): MxN input data array
            y (np.ndarray): M output targets
            beta (np.ndarray): placeholder
        Returns: dict of constraints
        """

        def _constraint(beta):
            return np.linalg.norm(x.T @ (y - x @ beta), np.infty)

        def _jac(beta):
            vec = x.T @ (y - x @ beta)
            max_ind = np.argmax(np.abs(vec))
            der = np.zeros_like(vec.ravel())
            der[max_ind] = np.sign(vec[max_ind])
            return -x.T.dot(x).dot(der)

        return NonlinearConstraint(_constraint, -np.infty, self.lambd * self.sigma,
                                   jac=_jac)
Ejemplo n.º 19
0
    def solve(self, optim_options=None):
        if optim_options is None:
            tol = 1e-12
            optim_options = {'verbose': 0, 'maxiter': 1000,
                             'gtol': tol, 'xtol': tol, 'barrier_tol': tol}

        keep_feasible = False
        nonlinear_constraint = NonlinearConstraint(
            self.nonlinear_constraints, 0, np.inf,
            jac=self.nonlinear_constraints_jacobian,  # jac='2-point',
            hess=self.nonlinear_constraints_hessian,
            # hess=BFGS(),
            keep_feasible=keep_feasible)

        constraints = [nonlinear_constraint]
        res = minimize(
            self.objective, self.init_guess,
            method='trust-constr',
            jac=self.objective_jacobian,
            hess=self.objective_hessian,
            constraints=constraints, options=optim_options,
            bounds=self.bounds)

        coef = res.x[:self.ncoef]

        if not res.success:
            raise Exception(res.message)

        return coef
Ejemplo n.º 20
0
    def minimize(self, x0: np.ndarray) -> Dict:
        """Minimizes the scalarizer given an initial guess x0.
        
        Args:
            x0 (np.ndarray): A numpy array containing an initial guess of variable values.

        Returns:
            Dict: A dictionary with at least the following entries: 'x' indicating the optimal
            variables found, 'fun' the optimal value of the optimized function, and 'success' a boolean
            indicating whether the optimizaton was conducted successfully.
        """
        if self._use_scipy:
            # create wrapper for the constraints to be used with scipy's minimize routine.
            # assuming that all constraints hold when they return a positive value.
            if self._constraint_evaluator is not None:
                scipy_cons = NonlinearConstraint(self._constraint_evaluator, 0,
                                                 np.inf)
            else:
                scipy_cons = ()

            res = self._method(self._scalarizer,
                               x0,
                               bounds=self._bounds,
                               constraint_evaluator=scipy_cons)

        else:
            res = self._method(self._scalarizer,
                               x0,
                               bounds=self._bounds,
                               constraint_evaluator=self._constraint_evaluator)

        return res
Ejemplo n.º 21
0
def do_inverse(test, k_file, exp_file, punch=False):
    """
    TODO
    """
    res = 0

    #csv_files = glob.glob("*.csv")
    #csv_files.sort()
    #csv_f = [c.split('.')[0] for c in csv_files]

    #k_files = [glob.glob(c+"*.k") for c in csv_f]
    #k_files = np.sort(np.array(k_files).flatten())
    #func_simulation([-1, 1200])
    #try:
    constraint_1 = NonlinearConstraint(lambda x: x[0] - x[1], 0.01, np.inf)
    #constraint_2 = NonlinearConstraint(lambda x: x[0], 0.1, 1)
    #constraint_3 = NonlinearConstraint(lambda x: x[1], 0.05, np.inf)

    res = differential_evolution(err_func,
                                 bounds=([0.1, 1], [0.05, 0.5]),
                                 args=[test, k_file, exp_file, punch],
                                 constraints=(constraint_1, ))
    # reps_s1   0.299
    # reps_s2   0.285
    # reps_t    0.410
    # reps_n    0.084
    # reps_p    0.594
    #print(res)
    #except ValueError as e:
    #print(res)
    #print('line 100')
    #print(str(e))

    return res
Ejemplo n.º 22
0
    def internal_optimization(self) -> OptimizeResult:
        """
        method to do internal optimization process, with a hyperpath setted you can get a optimization of the network
        :return:     res : OptimizeResult
        The optimization result represented as a ``OptimizeResult`` object.
        Important attributes are: ``x`` the solution array, ``success`` a
        Boolean flag indicating if the optimizer exited successfully and
        ``message`` which describes the cause of the termination. See
        `OptimizeResult` for a description of other attributes.
        """

        constr_func = lambda fopt: np.array(self.get_constrains(fopt))

        lb = [-1 * np.inf] * self.len_constrains
        ub = [0] * self.len_constrains
        nonlin_con = NonlinearConstraint(constr_func, lb=lb, ub=ub)

        lb = [0] * self.len_var
        ub = [np.inf] * self.len_var

        bounds = Bounds(lb=lb, ub=ub)
        res = minimize(self.VRC, self.f_opt, method='trust-constr', constraints=nonlin_con, tol=0.01, bounds=bounds)
        logger.info(self.string_information_internal_optimization(res))

        return res
Ejemplo n.º 23
0
def stoye_CI(lower_estim, upper_estim, varcov, signif_level):
    # Stoye (2009) confidence interval for partially identified parameter
    # Inputs to routine
    Delta = upper_estim - lower_estim  # Point estimate of length of identif. set
    sigma_lower = np.sqrt(varcov[0, 0])  # Std. dev. of lower bound estimate
    sigma_upper = np.sqrt(varcov[1, 1])  # Std. dev. of upper bound estimate
    rho = varcov[0, 1] / (sigma_lower * sigma_upper
                          )  # Correlation of lower and upper bound estimates

    # Numerically minimize CI length subject to coverage constraints
    con = lambda c: [
        0, 1 - signif_level - np.r_[stoye_bound(c, rho, Delta / sigma_upper),
                                    stoye_bound(np.fliplr(c), rho, Delta /
                                                sigma_lower)]
    ]
    nlc = NonlinearConstraint(con, -np.inf, 1.9)
    c_opt = opt.minimize(
        lambda c: np.c_[sigma_lower, sigma_upper] @ c.T,
        x0=np.array([
            lower_estim - invgauss.cdf(1 - signif_level / 2) * sigma_lower,
            upper_estim + invgauss.cdf(1 - signif_level / 2) * sigma_upper
        ]),
        constraints=nlc)

    # Confidence interval
    CI = np.c_[lower_estim - sigma_lower * c_opt(1),
               upper_estim + sigma_upper * c_opt(2)]
    return CI
Ejemplo n.º 24
0
def fun_conv(x, obj_fixo, obj_movel):

    # Define a função objetivo, o gradiente e a hessiana.

    def fun_objetivo(a):
        return fun_interna_conv(a, x, obj_fixo, obj_movel)

    def grad_objetivo(a):
        return nd.Gradient(fun_objetivo)(a)

    #def hess_objetivo(a): return nd.Hessian(fun_objetivo)(a)

    def hess_objetivo(a):
        return np.zeros((2, 2))

    # Define os parâmetros do otimizador.

    a_inicial = np.array([1.0, 0.0])

    restricao = NonlinearConstraint(fun_restricoes,
                                    1.0,
                                    1.0,
                                    jac=jac_restricoes)

    return fun_otimizador(a_inicial, fun_objetivo, grad_objetivo,
                          hess_objetivo, restricao)
Ejemplo n.º 25
0
def _get_steadystate_dualcontrol_init(dis: float, r11: float, r22: float,
                                      K11: float, K22: float, BB12: float,
                                      BB21: float, P: float, C11: float,
                                      C12: float, c2: float, CB: float,
                                      umax: float) -> np.array:
    ## Used as initial guess for fully specified problem
    x01 = np.array([1, 1, .1, .1]).T
    cons = NonlinearConstraint(
        fun=lambda x: const01_ineq_init(x, r11, r22, K11, K22, BB12, BB21),
        lb=0,
        ub=np.inf,
    )
    OptimizeResult01_init = minimize(
        fun=obj1,
        x0=x01,
        args=(r11, r22, K11, K22, BB12, BB21, P, c2, C11, C12, CB),
        method='trust-constr',
        jac='cs',
        hess=BFGS(),
        constraints=cons,
        bounds=[
            (0, K11),
            (0, K22),
            (0, umax),
            (0, umax),
        ],
        tol=10e-14,
    )
    print(OptimizeResult01_init.status)
    print(OptimizeResult01_init.message)
    print(OptimizeResult01_init.fun)
    print(OptimizeResult01_init.method)
    print(*OptimizeResult01_init.x, sep='\n ')
    return OptimizeResult01_init.x
Ejemplo n.º 26
0
def main():
    x, y = np.meshgrid(np.arange(-2.0, 2.0, 0.01), np.arange(-4.0, 4.0, 0.01))
    z = func([x, y])
    graphics(x, y, z)
    non_linear_r_s = [20]

    x_i = 1.0
    y_i = 1.0

    matr = [[-1.0, 1.0], [1.0, 1.0]]
    r_s = [-1.0, -1.0]
    linear_constraint = LinearConstraint(matr, [-np.inf, -np.inf], r_s)
    nonlinear_constraint = NonlinearConstraint(cons_f, -np.inf, non_linear_r_s[0],
                                               jac=cons_J, hess=cons_H)
    x0 = np.array([x_i, y_i])
    res = minimize(func, x0, method='trust-constr', jac=rosen_der, hess=rosen_hess,
                   constraints=[linear_constraint, nonlinear_constraint], bounds=((float(-2),
                                                                                   float(2)),
                                                                                  (float(-4),
                                                                                   float(0))),
                   )
    print("MINIMUM: ")
    print(res.x)
    opt = res.x.reshape(-1, 1)
    get_lambda(res.x)
    graphics(x, y, z, point=opt, with_point=True)
Ejemplo n.º 27
0
def exercise_3(vector: []):
    linear_constraint = LinearConstraint([[1, 2], [1, -1]], [-np.inf, -np.inf],
                                         [1, 4])

    def cons_f(x):
        return [x[0]**2 + x[1]]

    def cons_J(x):
        return [[2 * x[0], 1]]

    # def cons_H(x, v):
    # return v[0]*np.array([[2, 0], [0, 0]])

    # nonlinear_constraint = NonlinearConstraint(cons_f, -np.inf, 1, jac=cons_J, hess=cons_H)
    nonlinear_constraint = NonlinearConstraint(cons_f,
                                               -np.inf,
                                               1,
                                               jac=cons_J,
                                               hess=BFGS())

    # x0 = np.array([0.5, 0])
    x0 = np.array(vector)

    res = minimize(rosen,
                   x0,
                   method='trust-constr',
                   jac=rosen_der,
                   hess=rosen_hess,
                   constraints=[linear_constraint, nonlinear_constraint],
                   options={'verbose': 1})

    print(res.x)
Ejemplo n.º 28
0
def fun_misto(x, obj_fixo, obj_movel, verbose):

    # Define os parâmetros do otimizador.

    a_inicial = np.array([1.0, 0.0])
    restricao = NonlinearConstraint(fun_restricoes,
                                    1.0,
                                    1.0,
                                    jac=jac_restricoes)

    n = len(obj_fixo) // 3
    m = len(obj_movel) // 3
    p_min = min(n, m)

    l = 1

    resultado = 0.0

    for i in range(p_min):

        # Define a função objetivo, o gradiente e a hessiana, para aquela iteração.

        if p_min == n:

            def fun_objetivo(a):
                return fun_interna_misto_fixo(a, i, x, obj_fixo, obj_movel)

        else:

            def fun_objetivo(a):
                return fun_interna_misto_movel(a, i, x, obj_fixo, obj_movel)

        def grad_objetivo(a):
            return nd.Gradient(fun_objetivo)(a)

        def hess_objetivo(a):
            return nd.Hessian(fun_objetivo)(a)

        # Chama o otimizador e resolve uma instância do problema

        valor, confiavel = fun_otimizador(a_inicial, fun_objetivo,
                                          grad_objetivo, hess_objetivo,
                                          restricao)

        if verbose == True:

            print("---> Resolvendo: ", l, " de ", p_min, " ...")

        l = l + 1

        if confiavel == True:

            resultado = resultado + valor

        else:

            return 0.0, False

    return resultado, True
def old_pu_learning(x, y, P, k = 7, alpha = 0.2, gamma = 0.3, maxiter=1000):
    Fd = x.shape[1]
    Ft = y.shape[1]
    Nd = x.shape[0]
    Nt = y.shape[0]
    
    #Number of variables
    N_variables = Fd * k + Ft * k
    
    print("Number of variables:", N_variables)    
    print("Finding positive and negative examples...")
    
    Ipos = np.where(P==1.)
    Ineg = np.where(P==0.)

    print("Number of positive examples:", Ipos[0].shape[0])
    print("Number of negative/unlabelled examples:", Ineg[0].shape[0])
    
    alpha_rac = sqrt(alpha)
    
    @timeit
    def objective(z):
        H = z[:Fd*k].reshape((Fd,k))
        W = z[-Ft*k:].reshape((Ft,k))
        
        M = P - (x @ H @ np.transpose(W) @ np.transpose(y))
        
        M[Ineg] *= alpha_rac
        
        L = torch.sum(M**2) + gamma/2 * (np.sum(H**2, axis=(0,1)) + np.sum(W**2, axis=(0,1)))
        print(L)

        return(L)
    
    def constraint(z):
        H = z[:Fd*k].reshape((Fd,k))
        W = z[-Ft*k:].reshape((Ft,k))
        S = x @ H @ np.transpose(W) @ np.transpose(y)
        S = S.reshape((-1,))
        
        return(S)
    
    nlc = NonlinearConstraint(constraint, np.zeros(Nt*Nd), np.ones(Nt*Nd))

    print("Going to minimize... Maximum number of iterations:", maxiter)
    res=minimize(objective, x0 = np.random.randn(N_variables), options={'maxiter':maxiter, 'disp':'True'}, constraints=[nlc], method='trust-constr')
    
    print("\n\nSolved.")
    
    z=res['x']
    H = z[:Fd*k].reshape((Fd,k))
    W = z[-Ft*k:].reshape((Ft,k))

    print("Now computing Z=HW^T, then will compute S...")
    
    S = x @ H @ np.transpose(W) @ np.transpose(y)
    
    return(S)
def funTriang(xA, lista):
    # usar os pontos
    # Define as funções necessárias (função, jacobiana (gradiente), hessiana)
    def fun_obj(a, xA, lista):
        z1 = max(a[0] * xA + a[1] * lista[0],
                 a[0] * lista[1][0] + a[1] * lista[1][1],
                 a[0] * lista[2][0] + a[1] * lista[2][1])
        z2 = min(a[0] * lista[3][0] + a[1] * lista[3][1],
                 a[0] * lista[4][0] + a[1] * lista[4][1],
                 a[0] * lista[5][0] + a[1] * lista[5][1])
        a = (max(0, z1 - z2))**2.0

    def fun_triang(a):
        return fun_obj(a, xA, lista)

    def grad_triang(a):
        return nd.Gradient(fun_triang)(a)

    def hess_triang(a):
        return nd.Hessian(fun_triang)(a)

    # Define a função e a jacobiana das restrições do problema ||a||_{2}^{2} = 1.0 ==> 1 <= |a||_{2}^{2} <== 1
    def cons_f(a):
        return a[0]**2.0 + a[1]**2.0

    def cons_J(a):
        return [[2.0 * a[0], 2.0 * a[1]]]

    # Parâmetros do otimizador
    a0 = np.array([1.0, 0.0])
    nonlinear_constraint = NonlinearConstraint(cons_f, 1.0, 1.0, jac=cons_J)

    k = 0

    while True:
        # Resolve o problema de minimização
        result = minimize(fun_triang,
                          a0,
                          method='trust-constr',
                          jac=grad_triang,
                          hess=hess_triang,
                          constraints=nonlinear_constraint)

        # Verifica a solução
        if (result.success == False) or ((result.fun < 10**(-8)) and
                                         (result.constr_violation != 0.0)):
            # Não resolveu o problema, ou resolveu o problema mas violou as restrições
            a0 = np.array([random.random(), random.random()])
            k = k + 1
        elif k > 10:
            # Permite que sejam realizadas até 10 tentivas

            return result.fun, False
        else:
            # Resolveu o problema sem violar as restrições

            return result.fun, True
Ejemplo n.º 31
0
    def test_warn_ignored_options(self):
        # warns about constraint options being ignored
        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2
        x0 = (2, 0, 1)

        if self.method == "slsqp":
            bnds = ((0, None), (0, None), (0, None))
        else:
            bnds = None

        cons = NonlinearConstraint(lambda x: x[0], 2, np.inf)
        res = minimize(fun, x0, method=self.method,
                       bounds=bnds, constraints=cons)
        # no warnings without constraint options
        assert_allclose(res.fun, 1)

        cons = LinearConstraint([1, 0, 0], 2, np.inf)
        res = minimize(fun, x0, method=self.method,
                       bounds=bnds, constraints=cons)
        # no warnings without constraint options
        assert_allclose(res.fun, 1)

        cons = []
        cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
                                        keep_feasible=True))
        cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
                                        hess=BFGS()))
        cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
                                        finite_diff_jac_sparsity=42))
        cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
                                        finite_diff_rel_step=42))
        cons.append(LinearConstraint([1, 0, 0], 2, np.inf,
                                     keep_feasible=True))
        for con in cons:
            _assert_warns(OptimizeWarning, minimize, fun, x0,
                          method=self.method, bounds=bnds, constraints=cons)