def optimize_n_periods(num_periods,
                       r,
                       eta,
                       pure_discount,
                       epsilon,
                       beta,
                       years_per_period=100):
    def utility_fn(xs):
        c = [xs[2 * i] for i in range(num_periods)]
        s = [xs[2 * i + 1] for i in range(num_periods)]

        # scale by investment return
        for i in range(1, num_periods):
            c[i] *= (1 + r)**(i * years_per_period)
            s[i] *= (1 + r)**(i * years_per_period)

        # utility_floor = -utility_of_consumption(eta, 0.01)
        utility_floor = 0

        # Note: This doesn't really make sense because higher consumption at
        # time t decreases the probability of there being consumption at time
        # t. First one should really be t-1 instead, but then the two-period
        # model is too sparse
        initial_x_risk = 0.001
        x_risk = [
            min(
                1,
                initial_x_risk * sum(c[0:(i + 1)])**epsilon *
                sum(s[0:(i + 1)])**beta) for i in range(num_periods)
        ]

        return sum([
            np.product([((1 - pure_discount) *
                         (1 - x_risk[j]))**years_per_period
                        for j in range(i + 1)]) *
            (utility_of_consumption(eta, c[i]) + utility_floor)
            for i in range(num_periods)
        ])

    bounds_constraint = optimize.LinearConstraint(
        np.identity(2 * num_periods),  # identity matrix
        lb=[0.0, 0] * num_periods,
        ub=[1] * (2 * num_periods),
    )

    sum_constraint = optimize.LinearConstraint(
        np.array([1] * (2 * num_periods)),
        lb=0,
        ub=1,
    )

    opt = optimize.minimize(
        lambda xs: -utility_fn(xs),
        x0=np.array([1.0 / (2 * num_periods)] * (2 * num_periods)),
        constraints=[bounds_constraint, sum_constraint],
    )
    print(opt)
Esempio n. 2
0
def optimize_three_pronged_choice_multiperiod(alpha,
                                              delta,
                                              r,
                                              eta,
                                              num_periods=300):
    '''
    Optimize where each period has a three-pronged choice (invest, consume, reduce discount rate).

    alpha: Rate at which it becomes harder to reduce the discount rate (negative)
    delta: Minimum discount rate
    r: Interest rate
    eta: Elasticity of marginal utility of consumption
    '''

    # num_periods=300 takes 10 minutes
    def total_utility(xs):
        capital = 1
        spending_on_discount = 1
        discount_factor = 1
        utility = 0
        for i in range(num_periods):
            utility += discount_factor * utility_function(
                eta, capital * xs[num_periods + i])
            discount_factor *= 1 - delta * spending_on_discount**(-alpha)
            spending_on_discount += capital * xs[i]
            capital *= 1 - xs[i] - xs[num_periods + i]
            capital *= 1 + r

        return -utility  # negative so we can minimize it

    # first num_periods entries are spending on discount rate reduction
    # second num_periods are spending on general consumption
    initial_guess = np.array(
        [0.5 / num_periods for _ in range(2 * num_periods)])

    # spend no more than 100% in each period
    spending_sum_constraint = optimize.LinearConstraint(np.array([[
        1 if i == j or i == j + num_periods else 0
        for i in range(2 * num_periods)
    ] for j in range(num_periods)]),
                                                        lb=[0] * num_periods,
                                                        ub=[1] * num_periods)

    # cannot spend less than 0% or more than 100% on each thing
    bounds_constraint = optimize.LinearConstraint(
        np.identity(2 * num_periods),  # identity matrix
        lb=[0] * 2 * num_periods,
        ub=[1] * 2 * num_periods)

    opt = optimize.minimize(
        total_utility,
        initial_guess,
        constraints=[spending_sum_constraint, bounds_constraint])
    for i in range(num_periods):
        print("x({}) = {}, c({}) = {}".format(i, opt.x[i], i,
                                              opt.x[num_periods + i]))
Esempio n. 3
0
def optimize_three_pronged_choice_fixed(alpha, delta, r, eta, num_periods=500):
    '''
    Optimize where each period has a three-pronged choice (invest, consume,
    reduce discount rate). As a simplifying assumption require that the
    proportion of capital consumed in each period remains fixed, and likewise
    for spending on reducing the discount rate.

    '''
    def total_utility(xs):
        capital = 100
        spending_on_discount = 1
        discount_factor = 1
        utility = 0
        for i in range(num_periods):
            utility += discount_factor * utility_function(eta, capital * xs[1])

            # what if at a certain point, the discount rate becomes fixed at a
            # low value and you can't reduce it anymore?
            # if i < 100:
            #     discount_factor *= 1 - delta * spending_on_discount**(-alpha)
            # else:
            #     discount_factor *= 1 - 0.01/100
            discount_factor *= 1 - delta * spending_on_discount**(-alpha)
            spending_on_discount += capital * xs[0]
            capital *= 1 - xs[0] - xs[1]
            capital *= 1 + r

        return -utility  # negative so we can minimize it

    initial_guess = np.array([0.5, 0.5])

    # no more than 100% each period
    spending_sum_constraint = optimize.LinearConstraint(np.array([1,
                                                                  1]).reshape(
                                                                      1, -1),
                                                        lb=[0],
                                                        ub=[1])

    # cannot spend less than 0% or more than 100% on each thing
    bounds_constraint = optimize.LinearConstraint(np.array([[1, 0], [0, 1]]),
                                                  lb=[0, 0],
                                                  ub=[1, 1])

    opt = optimize.minimize(
        total_utility,
        initial_guess,
        constraints=[spending_sum_constraint, bounds_constraint])
    print("x(t) = {:.4f}%, c(t) = {:.4f}%".format(100 * opt.x[0],
                                                  100 * opt.x[1]))
    print("{}".format(-total_utility(opt.x)))
Esempio n. 4
0
def XYvT(J, hpj=1):
    Temp = np.linspace(0, 10, 201)
    rranges = (slice(0, 1, 0.01), slice(0, 0.5, 0.01))

    con = opt.LinearConstraint([[1, 0], [-1, 1], [1, 1]],
                               [0, -np.inf, -np.inf], [1, 0, 1])
    bound = opt.Bounds([0, 0], [1, 0.5])

    mX, mY = [], []

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    for Tpj in Temp:
        fun = lambda x: F(J, hpj * abs(J), x[0], x[1], Tpj * abs(J))
        res = opt.minimize(fun, [0.5, 0.25],
                           method='trust-constr',
                           constraints=con,
                           bounds=bound)
        mX.append(res.x[0])
        mY.append(res.x[1])
        working = "Calculating T/J:" + str(Tpj)
        print(working)

    ax.plot(mX, mY, Temp, label='min')
    ax.set_xlabel('Composition: X')
    ax.set_ylabel('Bond Frequency: Y')
    ax.set_zlabel('Temperature (T/J)')

    plt.show()
Esempio n. 5
0
def find_eigvec(samples, gamma, orthogonal_to=None, alpha=1e6):
    constraints = [nonlinear_constraint]
    if orthogonal_to is not None and len(orthogonal_to) > 0:
        ortho = optimize.LinearConstraint(orthogonal_to, 0, 0)
        constraints.append(ortho)
    
    U, _, _ = linalg.svd(samples.T, full_matrices=False)
    x = U[:, 0]
    #x_neg = np.maximum(0, -x)
    #x_pos = np.maximum(0, x)
    #x = np.stack([x_neg, x_pos])
    #x = np.random.randn(samples.shape[1])
    opt = optimize.minimize(
        cost,
        x,
        method='trust-constr',
        constraints=constraints,
        args=(samples, gamma, alpha),
        jac=True,
        #hessp=hessian_p,
        options={'maxiter': 100000}
    )
    #print(opt)
    assert opt.success, opt.message
    vec = opt.x / linalg.norm(opt.x)
    k = samples.shape[0]
    return vec, vec @ (samples.T @ (samples @ vec)) / k
def estim_quantities(A, b, tol=None):
    # Number of ingredients
    p = A.shape[1]

    # Objective function
    def obj(x):
        return np.sum((A @ x - b)**2)

    # Ordering constraint matrix
    # each row: c_i^T * x >= 0
    C = np.zeros([p, p])
    # Require that each value is larger than the previous
    for i in range(p):
        for j in range(p):
            if i == j:
                C[i, j] = 1
            elif j == i - 1:
                C[i, j] = -1

    # non-negative constraint (0 <= c_i^T *x <= inf)
    lin_const = optim.LinearConstraint(C, 0, np.inf, keep_feasible=False)

    # Solve!
    result = optim.minimize(obj,
                            np.zeros(p),
                            method="COBYLA",
                            constraints=lin_const,
                            tol=tol)

    return result
Esempio n. 7
0
def optimize_consumption_cumulative(delta, r, eta, num_periods=100):
    def total_utility(xs):
        capital = 100
        cumulative_consumption = 0
        utility = 1
        for t in range(num_periods):
            consumption = capital * xs[t]
            cumulative_consumption += consumption
            utility += (1 - delta)**t * utility_function(
                eta, cumulative_consumption)
            capital -= consumption
            capital *= 1 + r

        return -utility  # negative so we can minimize it

    initial_guess = np.array([1 / num_periods] * num_periods)

    # cannot spend less than 0% or more than 100% on any period
    bounds_constraint = optimize.LinearConstraint(
        np.identity(num_periods),  # identity matrix
        lb=[0] * num_periods,
        ub=[1] * num_periods)

    opt = optimize.minimize(total_utility,
                            initial_guess,
                            constraints=[bounds_constraint])
    for i in range(num_periods):
        print("x({}) = {}".format(i, opt.x[i]))
    print("total utility = {}".format(-total_utility(opt.x)))
    copy = [x if i != 0 else 1 for i, x in enumerate(opt.x)]
    print("fun utility = {}".format(-total_utility(copy)))
def run_prop_strategies(starting_longtermists, starting_susceptible,
                        starting_money):
    print("Spend 1%: {}".format(
        spend_prop(starting_longtermists, starting_susceptible, starting_money,
                   100, 0.01)[0]))
    print("Spend 10%: {}".format(
        spend_prop(starting_longtermists, starting_susceptible, starting_money,
                   100, 0.1)[0]))
    print("Spend 50%: {}".format(
        spend_prop(starting_longtermists, starting_susceptible, starting_money,
                   100, 0.5)[0]))
    print("Spend 75%: {}".format(
        spend_prop(starting_longtermists, starting_susceptible, starting_money,
                   100, 0.75)[0]))
    print("Spend 100%: {}".format(
        spend_prop(starting_longtermists, starting_susceptible, starting_money,
                   100, 1)[0]))

    bounds_constraint = optimize.LinearConstraint([1], lb=[0], ub=[1])
    opt = optimize.minimize(
        lambda prop: -spend_prop(starting_longtermists, starting_susceptible,
                                 starting_money, 100, prop)[0], [0.1],
        constraints=bounds_constraint)
    print("Optimal spending: {:.1f}% -> {}".format(
        opt.x[0] * 100,
        spend_prop(starting_longtermists, starting_susceptible, starting_money,
                   100, opt.x[0])[0]))
Esempio n. 9
0
def risk_parity(df_cov):

    assets = df_cov.index
    cov = df_cov.values
    n = len(assets)
    w0 = np.repeat(1 / n, n)
    A = np.repeat(1, n)[np.newaxis, :]
    lb = np.array([1])
    ub = np.array([1])
    constraints = opt.LinearConstraint(A=A, lb=lb, ub=ub)
    bounds = opt.Bounds(ub=np.repeat(np.inf, n), lb=np.repeat(0, n))

    def target(w):
        target = 0
        p_variance = w.T.dot(cov).dot(w)
        for i in range(n):
            denominator = cov[i, :].dot(w) * n
            target += (w[i] - p_variance / denominator)**2
        return target

    res = opt.minimize(target,
                       w0,
                       method='SLSQP',
                       constraints=constraints,
                       bounds=bounds)

    return pd.Series(res.x, index=assets)
Esempio n. 10
0
    def add_linear_ineq_con(self, A, b_l, b_u):
        """
        Adds linear inequality constraints :math:`b_l <= A x <= b_u` to the optimisation problem.
        Only ``trust-constr``, ``COBYLA``, and ``SLSQP`` methods can handle general constraints.

        :param numpy.ndarray A: An (M,n) matrix that contains coefficients of the linear inequality constraints.
        :param numpy.ndarray b_l: An (M,1) matrix that specifies lower bounds of the linear inequality constraints. If there is no lower bound, set ``b_l = -np.inf * np.ones(M)``.
        :param numpy.ndarray b_u: A (M,1) matrix that specifies upper bounds of the linear inequality constraints. If there is no upper bound, set ``b_u = np.inf * np.ones(M)``.
        """
        # trust-constr method has its own linear constraint handler
        assert self.method in ['SLSQP', 'trust-constr', 'COBYLA']
        if self.method == 'trust-constr':
            self.constraints.append(optimize.LinearConstraint(A, b_l, b_u))
        # other methods add inequality constraints using dictionary files
        else:
            if not np.any(np.isinf(b_l)):
                self.constraints.append({
                    'type': 'ineq',
                    'fun': lambda x: np.dot(A, x) - b_l,
                    'jac': lambda x: A
                })
            if not np.any(np.isinf(b_u)):
                self.constraints.append({
                    'type': 'ineq',
                    'fun': lambda x: -np.dot(A, x) + b_u,
                    'jac': lambda x: -A
                })
Esempio n. 11
0
def u_for_stay_v1(tri, num, c, B):
    cost_fun = lambda x: cost_fun_v1(x, c)
    x0 = np.zeros((6, ))
    nhat_array = nhat_tri_facet(tri, num)
    # Extract vertex coordinates:
    points = tri.points
    vertices = tri.simplices
    vertices_here = vertices[num, :]
    C_cons = cons_array_for_stay(nhat_array, B)
    m = 6  # Number of constraints for control to facet with no dynamics
    cons = optimize.LinearConstraint(C_cons, np.zeros(m), np.inf * np.ones(m))
    res = optimize.minimize(cost_fun,
                            x0,
                            method='trust-constr',
                            constraints=cons)
    local_debug = 0
    if local_debug == 1:
        print ''
        print 'Results of optimization (success, cost, number of iterations):'
        print res.success
        print res.fun
        print res.nit
    Vrow1 = np.concatenate((points[vertices_here[0], :], np.ones((1, ))),
                           axis=0)
    Vrow2 = np.concatenate((points[vertices_here[1], :], np.ones((1, ))),
                           axis=0)
    Vrow3 = np.concatenate((points[vertices_here[2], :], np.ones((1, ))),
                           axis=0)
    Vmat = np.array([Vrow1, Vrow2, Vrow3])
    u_optimal = res.x
    RHS = np.array([u_optimal[0:2], u_optimal[2:4], u_optimal[4:6]])
    f_and_g_trans = np.matmul(np.linalg.inv(Vmat), RHS)
    f_and_g = np.transpose(f_and_g_trans)
    return u_optimal, f_and_g
    def get_lineqcon_obj(self, optimization_problem):
        """Returns the optimized linear equality constraints as an object.

        Checks the length of the beq first, before setting the bounds of the
        constraint. Sets the lower and upper bounds of the
        optimization_problem and returns optimized linear equality constraints.
        Keep_feasible is set to True.

        Returns
        -------
        None: bool
            If the length of the beq of the optimization_problem is equal zero.
        lineqcon_obj : LinearConstraint
            Linear equality Constraint object with optimized upper and lower
            bounds of beq of the optimization_problem.

        See also
        --------
        constraint_objects
        Aeq
        beq
        """
        if len(optimization_problem.beq) == 0:
            return None

        lb = optimization_problem.beq
        ub = optimization_problem.beq

        return optimize.LinearConstraint(
            optimization_problem.Aeq, lb, ub, keep_feasible=True
        )
Esempio n. 13
0
def optimize_discount_fixed(alpha, delta, r, eta, num_periods=10000):
    g = (r - delta) / eta
    utility_lower_limit = abs(utility_function(eta, 1))

    def total_utility(xs):
        capital = 100
        spending_on_discount = 1
        discount_factor = 1
        utility = 0
        for i in range(num_periods):
            # add a constant to utility function so it's not negative, b/c that messes up the optimizer
            utility += discount_factor * (utility_lower_limit +
                                          utility_function(eta, (1 + g)**i))
            discount_factor *= 1 - delta * spending_on_discount**(-alpha)
            spending_on_discount += capital * xs[0]
            capital *= 1 - xs[0]
            capital *= 1 + r

        return -utility  # negative so we can minimize it

    initial_guess = np.array([0.01])

    # cannot spend less than 0% or more than 100%
    bounds_constraint = optimize.LinearConstraint([1], lb=[0], ub=[1])

    opt = optimize.minimize(total_utility,
                            initial_guess,
                            constraints=[bounds_constraint])
    print("x(t) = {:.4f}%".format(100 * opt.x[0]))
    print("{}".format(-total_utility(opt.x)))
    for x in [0.1, 1, 10, 100]:
        print("{}: {}".format(x, -total_utility([x / 100])))
Esempio n. 14
0
    def addLinearIneqCon(self, A, b_l, b_u):
        """
        Adds linear inequality constraints b_l <= A x <= b_u to the optimization problem.
        Only 'trust-constr', 'COBYLA, and 'SLSQP' methods can handle general constraints.

        :param ndarray A:
            M-by-n matrix that contains coefficients of the linear inequality constraints
        :param ndarray b_l:
            M-by-1 matrix that specifies lower bounds of the linear inequality constraints.
            If there is no lower bound, set b_l = -np.inf * np.ones(M).
        :param ndarray b_u:
            M-by-1 matrix that specifies upper bounds of the linear inequality constraints.
            If there is no upper bound, set b_u = np.inf * np.ones(M).
        """
        #       trust-constr method has its own linear constraint handler
        assert self.method == 'trust-constr' or 'SLSQP' or 'COBYLA'
        if self.method == 'trust-constr':
            self.constraints.append(optimize.LinearConstraint(A, b_l, b_u))
#       other methods add inequality constraints using dictionary files
        else:
            if not np.any(np.isinf(b_l)):
                self.constraints.append({
                    'type': 'ineq',
                    'fun': lambda x: np.dot(A, x) - b_l,
                    'jac': lambda x: A
                })
            if not np.any(np.isinf(b_u)):
                self.constraints.append({
                    'type': 'ineq',
                    'fun': lambda x: -np.dot(A, x) + b_u,
                    'jac': lambda x: -A
                })
Esempio n. 15
0
    def fit(self, X, y):
        """
        Parameters
        ----------
        X : array-like of shape (n_samples, n_classes, n_estimators)
            X_ice is the probability estimator e puts on sample i being in 
            class c.

        y : array-like of shape (n_samples,)
            Targets.

        Returns
        -------
        self
        """

        # X is (n_samples, n_classes, n_estimators)
        def loss(coef_):
            return self.loss(y, (X * coef_).sum(axis=-1))

        n_estimators = X.shape[-1]
        self.classes_ = np.arange(X.shape[1])
        if n_estimators == 1:
            self.coef_ = np.array([1])
            return self
        constraint = optimize.LinearConstraint(np.array([1] * n_estimators), 1,
                                               1)
        coef0 = np.array([1. / n_estimators] * n_estimators)
        self.coef_ = optimize.minimize(loss,
                                       x0=coef0,
                                       constraints=[constraint]).x
        return self
Esempio n. 16
0
def _calculate_limited_purchases(
        current_portfolio: Dict[Security, Money],
        desired_percentages: Dict[Security, float], amount_to_invest: Money,
        purchases_to_keep: int) -> Dict[Security, Money]:
    all_stocks = set(current_portfolio.keys()).union(
        desired_percentages.keys())
    stock_combinations = itertools.combinations(all_stocks, purchases_to_keep)
    min_deviation = float('inf')
    next_purchases = None
    for stock_candidates in stock_combinations:
        purchase_guess = amount_to_invest * np.ones(
            purchases_to_keep) / purchases_to_keep
        constraint_sum_to_investment = opt.LinearConstraint(
            np.ones_like(purchase_guess), amount_to_invest, amount_to_invest)
        purchase_optim = opt.minimize(_get_squared_deviation,
                                      purchase_guess,
                                      method='trust-constr',
                                      args=(stock_candidates,
                                            current_portfolio,
                                            desired_percentages),
                                      constraints=constraint_sum_to_investment)
        deviation = purchase_optim.fun
        if deviation < min_deviation:
            min_deviation = deviation
            next_purchases = dict(zip(stock_candidates, purchase_optim.x))

    return next_purchases
Esempio n. 17
0
def dynamic_cournot_nash_ot(mu, nu_len, c, potential, d_potential, SS, epsilon,
                            **kwargs):
    def opt_func(x):
        val, res_ot = ot_extended(mu,
                                  x,
                                  c,
                                  None,
                                  None,
                                  epsilon,
                                  log=True,
                                  optimizing=False)
        psi = res_ot[1]['v']
        val += potential(x)
        grad = np.log(np.maximum(psi, 1e-300)) * epsilon
        grad += d_potential(x)
        grad -= np.mean(grad)
        if np.any(np.isnan(grad)):
            raise RuntimeError(f"nan @ dynamic_cournot_nash")
        return val, grad

    res = opt.minimize(opt_func,
                       np.full(nu_len, 1. / nu_len),
                       jac=True,
                       method='SLSQP',
                       constraints=opt.LinearConstraint(
                           np.r_[np.eye(nu_len), [np.full(nu_len, 1)]],
                           np.r_[np.full(nu_len, 0), [-np.inf]],
                           np.r_[np.full(nu_len, np.inf), [1]]),
                       **kwargs)
    return res
def LinearConstraint_AD(f):
    """
    Takes a linear constraint f>=0, 
    encoded as an ad.Sparse variable, 
    and turns it into a scipy compatible constraint.
    """
    return sciopt.LinearConstraint(f.tangent_operator(),
                                   lb=-f.value,
                                   ub=np.inf)
Esempio n. 19
0
def updateReviewParams(review,params,modelParams,termvec):
    # maximization to find phi
    K = len(modelParams['gamma'])
    wn = np.where(termvec>0)[0] # nonzero elements
    gamma = modelParams['gamma']
    phi = params['phi']
    newphi = phi
    bounds = opt.Bounds([1e-10 for i in range(K)],[1 for i in range(K)])
    constraints = opt.LinearConstraint([1 for i in range(K)], 1, 1)
    for idx, j in enumerate(wn):
        x0 = phi[:,idx]
        x0 = [max(val,1e-10) for val in x0]
        x0 = [min(val,1) for val in x0]
        res = opt.minimize(
                        lambda x: -phifunction(x,idx,wn,review,modelParams,params, termvec), x0,
                           bounds = bounds, constraints = constraints)
        newphi[:,idx] = res.x

    params['phi']=newphi
    
    # update eta
    neweta = gamma
    for idx, n in enumerate(wn):
        neweta = neweta + phi[:,idx]
    params['eta']=neweta
    
    # update lambda
    lamb = params['lamb']
    beta = modelParams['beta']
    sbar = np.zeros(K)
    sVar = np.zeros(K)
    for i in range(K):
        for idx, j in enumerate(wn):
            sbar[i] = sbar[i] + termvec[j]*beta[i,j]*phi[i,idx]
            sVar[i] = sVar[i] + termvec[j]*(beta[i,j]**2)*phi[i,idx]*(1-phi[i,idx])
    bounds = opt.Bounds([0 for i in range(K)],[1 for i in range(K)])
    x0 = lamb
    x0 = [max(val,0) for val in x0]
    x0 = [min(val,1) for val in x0]
    res = opt.minimize(
                lambda x: lambdafunction(x,review,modelParams,params, termvec),
                            x0, bounds = bounds, constraints = constraints
                        )
    newlamb= res.x
    params['lamb']=newlamb
    
    # update sigma
    delta2 = modelParams['delta2']
    SIG = modelParams['SIG']
    SIGinv = np.linalg.inv(SIG)
    newsigma = np.zeros(params['sigma'].shape)
    for i in range(K):
        newsigma[i] = delta2/(sVar[i]+sbar[i]**2+delta2/SIGinv[i,i])
    
    reviewParams = {'eta':neweta,'phi':newphi,'lamb':newlamb,'sigma':newsigma}
    return reviewParams
Esempio n. 20
0
    def helper(nu, eps):
        def xlogx(x):
            return xlx(x, x)

        def entropy(x):
            return -xlogx(x) - xlogx(1 - x)

        def entropy_grad(x):
            grad = np.zeros_like(x)
            grad = np.log(1 / x - 1)
            grad[x >= 1 - 1e-8] = +10
            grad[x <= 0 + 1e-8] = -10
            return grad

        def entropy_hess(x):
            hess = np.zeros_like(x)
            hess = -1 / ((1 - x) * x)
            hess[x >= 1 - 1e-8] = -10
            hess[x <= 0 + 1e-8] = -10
            return hess

        def inner_func(mu, nu):
            return (entropy(mu - nu) - entropy(mu)).sum()

        def inner_jac(mu, nu):
            return entropy_grad(mu - nu) - entropy_grad(mu)

        def inner_hess(mu, nu):
            diag_elems = entropy_hess(mu - nu) - entropy_hess(mu)
            return np.diag(diag_elems)

        inner_function = lambda x: inner_func(x, nu)
        inner_jacobian = lambda x: inner_jac(x, nu)
        inner_hessian = lambda x: inner_hess(x, nu)

        A = np.concatenate([np.ones((1, T)), np.eye(T)], axis=0)
        lb = np.concatenate([np.array([(1 + eps) / 2. * T]), nu * np.ones(T)],
                            axis=0)
        ub = np.concatenate([np.array([T]), np.ones(T)], axis=0)
        cons = optimize.LinearConstraint(A, lb, ub)

        result = optimize.minimize(fun=inner_function,
                                   jac=inner_jacobian,
                                   constraints=cons,
                                   hess=inner_hessian,
                                   method="trust-constr",
                                   x0=(1 + eps) / 2 * np.ones(T),
                                   options={
                                       'maxiter': 500,
                                       'disp': False,
                                       'initial_constr_penalty': 10.,
                                       'initial_tr_radius': 1.
                                   })

        return result
Esempio n. 21
0
def MacGyver_method(tickerlist, dfeps, method='integrated'):
    '''MacGyver method of pairwise calculation'''
    periodicity = 52
    minimal = 10**(-20)
    xlams, alphas, betas = [], [], []
    for it in range(len(tickerlist) - 1):
        tick1 = tickerlist[it]
        for jt in range(it + 1, len(tickerlist)):
            tick2 = tickerlist[jt]
            InData = np.array(dfeps[[tick1, tick2]])  # global cast
            # integrated corr.
            if method == 'integrated':
                result = minimize_scalar(IntegratedCorrObj)
                xlamopt = np.exp(result.x) / (1 + np.exp(result.x))
                print(tick1, tick2)
                print('    Optimal lambda:', xlamopt)
                print('    Optimal objective function:', \
                      result.fun)
                if np.absolute(xlamopt) < minimal or xlamopt >= 1:
                    halflife = 0
                else:
                    halflife = -np.log(2) / np.log(1 - xlamopt)
                print('    Half-life (years):', halflife / periodicity)
                xlams.append(xlamopt)
            # mean revert corr.
            elif method == 'meanRev':
                #alpha and beta positive
                corr_bounds = scpo.Bounds([0, 0], [np.inf, np.inf])
                #Sum of alpha and beta is less than 1
                corr_linear_constraint = \
                    scpo.LinearConstraint([[1, 1]],[0],[.999])

                initparams = [.02, .93]
                results = scpo.minimize(MeanRevCorrObj, \
                        initparams, \
                        method='trust-constr', \
                        jac='2-point', \
                        hess=scpo.SR1(), \
                        bounds=corr_bounds, \
                        constraints=corr_linear_constraint)
                alpha, beta = results.x
                print('Optimal alpha, beta:', alpha, beta)
                print('Optimal objective function:', results.fun)
                halflife = -np.log(2) / np.log(1 - alpha)
                print('Half-life (years):', halflife / periodicity)
                alphas.append(alpha)
                betas.append(beta)
    # cout median values
    if method == 'integrated':
        print('\nMedian MacGyver lambda:', np.median(xlams))
    elif method == 'meanRev':
        print('\nMedian MacGyver alpha:', np.median(alphas))
        print('\nMedian MacGyver beta:', np.median(betas))
    def add_linear_eq_con(self, A, b):
        """
        Adds linear equality constraints  :math:`Ax = b` to the optimisation routine. Only ``trust-constr`` and ``SLSQP`` methods can handle equality constraints.

        :param numpy.ndarray A: A (M, n) matrix that contains coefficients of the linear equality constraints.
        :param numpy.ndarray b: A (M, 1) matrix that specifies right hand side of the linear equality constraints.
        """
        assert self.method == 'trust-constr' or 'SLSQP'
        if self.method == 'trust-constr':
            self.constraints.append(optimize.LinearConstraint(A,b,b))
        else:
            self.constraints.append({'type':'eq', 'fun': lambda x: A.dot(x) - b, 'jac': lambda x: A})
Esempio n. 23
0
    def gen_lin_constraint(self, distortions):
        """
        Generate the LinearConstraint object

        Parameters
        ----------
        distortions: A list of distortion objectives
        """
        linear_constraint = opt.LinearConstraint(
            self.constraint, [0, 0] + [1] * (self.states + self.messages),
            list(distortions) + [1] * (self.states + self.messages))
        return linear_constraint
Esempio n. 24
0
    def _numerical_optimize(cls, stats_sum, *, det_pool, lost_hp, inst, lb, ub, stats0):
        def objective(s):
            return -cls.dmg_calculate(s, det_pool=det_pool, lost_hp=lost_hp, inst=inst)

        def grad(s):
            return -cls.dmg_grad(s, det_pool=det_pool, lost_hp=lost_hp, inst=inst)

        n = cls.stats_number()
        bounds = opt.Bounds(lb, ub)
        constraint = opt.LinearConstraint(np.ones(n), stats_sum, stats_sum)
        return opt.minimize(objective, stats0, bounds=bounds, constraints=constraint,
                            jac=grad, method='trust-constr', options={'maxiter': n * 5})
Esempio n. 25
0
    def fit(self, X, params0=None, method='trust-constr', options=None):

        fun = lambda params: -self.ll_zinb(X, params)
        jac = lambda params: -self.grad_ll_zinb(X, params)
        hess = lambda params: -self.hess_ll_zinb(X, params)

        if options == {}:
            options = None

        mean_X_nonzero = X[X > 0].mean()
        var_X_nonzero = X[X > 0].var()
        if params0 is None:
            if mean_X_nonzero < var_X_nonzero:
                # Pseudo-MME initialization
                # p0 and r0 as in https://rdrr.io/bioc/polyester/src/R/get_params.R)
                p0 = 1 - mean_X_nonzero / var_X_nonzero
                r0 = (1 - p0) * mean_X_nonzero / p0
                # pi0 using p(x=0) = pi0 + (1-pi0)(1-p0)^r0
                pi0 = max([1e-2, div_eps((X==0).mean()\
                                         - np.exp(r0 * log_eps(1-p0) ) , 1. - np.exp(r0 * log_eps(1-p0) ))])
            else:
                p0 = 0.5
                pi0 = 0.2
                r0 = 1.
            params0 = np.array([r0, p0, pi0])

        constraints = scpopt.LinearConstraint(A=np.eye(3), lb=np.array([1e-6,1e-6,1e-6]),\
                                              ub=np.array([np.inf,1.-1e-6, 1.-1e-6]), keep_feasible=True)

        if method != 'L-BFGS-B':
            res = scpopt.minimize(fun,
                                  params0,
                                  method=method,
                                  jac=jac,
                                  hess=hess,
                                  constraints=constraints,
                                  options=options)
        else:
            res = scpopt.minimize(fun,
                                  params0,
                                  method=method,
                                  jac=jac,
                                  constraints=constraints,
                                  options=options)

        if not (res.success):
            print("Not a success")
            print(res.message)

        self.r = res.x[0]
        self.p = res.x[1]
        self.pi = res.x[2]
Esempio n. 26
0
def minimize_transmitted_angle(optics):

    m = len(optics.mirror)

    def clock_and_transmit(*rotation_angle):
        clocked = optics.clock(*rotation_angle, True)
        t = clocked.transmission_angle()
        return np.linalg.norm(t)

    bounds = optimize.LinearConstraint(np.identity(m),0,2*np.pi)
    result = optimize.minimize(clock_and_transmit, np.zeros(m), constraints=(bounds))

    return result
Esempio n. 27
0
def compute_meanRevCorr(df_logs, InData):
    '''Compute mean reverting correlations'''
    periodicity = 52
    #alpha and beta positive
    corr_bounds = scpo.Bounds([0, 0], [np.inf, np.inf])
    #Sum of alpha and beta is less than 1
    corr_linear_constraint = \
        scpo.LinearConstraint([[1, 1]],[0],[.999])

    initparams = [.02, .93]

    results = scpo.minimize(MeanRevCorrObj, \
            initparams, \
            method='trust-constr', \
            jac='2-point', \
            hess=scpo.SR1(), \
            bounds=corr_bounds, \
            constraints=corr_linear_constraint)

    alpha, beta = results.x
    print('Optimal alpha, beta:', alpha, beta)
    print('Optimal objective function:', results.fun)
    halflife = -np.log(2) / np.log(1 - alpha)
    print('Half-life (years):', halflife / periodicity)

    #Compute mean reverting correlations
    nobs = len(InData)
    nsecs = len(InData[0])
    previousq = np.identity(nsecs)
    Rlong = np.corrcoef(InData.T)
    rmatrices = []
    for i in range(nobs):
        stdmtrx = np.diag([1 / np.sqrt(previousq[s, s]) for s in range(nsecs)])
        rmatrices.append(np.matmul(stdmtrx, np.matmul(previousq, stdmtrx)))
        shockvec = np.mat(np.array(InData[i]))
        #Update q matrix
        shockmat = np.matmul(shockvec.T, shockvec)
        previousq = (1 - alpha -
                     beta) * Rlong + alpha * shockmat + beta * previousq

    #Plot mean-reverting correlations
    iccol = ['r', 'g', 'b']
    xtitle = 'Mean Reverting Correlations α=%1.5f' % alpha
    xtitle += ', β=%1.5f' % beta
    xtitle+=', '+min(df_logs.index.strftime("%Y-%m-%d"))+':'+ \
                 max(df_logs.index.strftime("%Y-%m-%d"))
    dates = df_logs.index
    stride = 5 * periodicity
    corr_matrix = df_logs[df_logs.columns].corr()
    plot_corrs(dates, rmatrices, corr_matrix, iccol, stride, xtitle)
Esempio n. 28
0
 def _optimizer(obj_func, initial_theta, bounds, method):
     constraints = [
         optimize.LinearConstraint(np.eye(initial_theta.shape[0]),
                                   bounds[:, 0], bounds[:, 1])
     ]
     res = optimize.minimize(
         lambda theta: obj_func(theta=theta, eval_gradient=False),
         initial_theta,
         constraints=constraints,
         method=method,
         jac=lambda theta: obj_func(theta=theta, eval_gradient=True)[1],
         hess=optimize.BFGS(),
         options={'gtol': 1e-6})
     return res.x, res.fun
Esempio n. 29
0
def _estimate_ar2_params(field_src,
                         field_dst,
                         estim_weights,
                         interp_weights,
                         num_workers=1):
    """Constrained optimization of AR(2) parameters."""
    def objf(p, *args):
        i = args[0]
        field_ar = p[0] * field_src[1] + p[1] * field_src[0]
        return np.nansum(estim_weights[i] * (field_dst - field_ar)**2.0)

    bounds = [(-1.98, 1.98), (-0.98, 0.98)]
    constraints = [
        opt.LinearConstraint(
            np.array([(1, 1), (-1, 1)]),
            (-np.inf, -np.inf),
            (0.98, 0.98),
            keep_feasible=True,
        )
    ]

    def worker(i):
        return opt.minimize(
            objf,
            (0.8, 0.0),
            method="trust-constr",
            bounds=bounds,
            constraints=constraints,
            args=(i, ),
        ).x

    if DASK_IMPORTED and num_workers > 1:
        res = []
        for i in range(len(estim_weights)):
            res.append(dask.delayed(worker)(i))

        psi = dask.compute(*res, num_workers=num_workers, scheduler="threads")
    else:
        psi = []
        for i in range(len(estim_weights)):
            psi.append(worker(i))

    psi_out = []
    for i in range(2):
        psi_out.append(
            np.sum([psi[j][i] * interp_weights[j] for j in range(len(psi))],
                   axis=0))

    return psi_out
Esempio n. 30
0
    def fit(self, X, y):
        # X is (n_samples, n_classes, n_estimators)
        def loss(coef_):
            return self.loss(y, (X * coef_).sum(axis=-1))

        n_estimators = X.shape[-1]
        self.classes_ = np.arange(X.shape[1])
        if n_estimators == 1:
            self.coef_ = np.array([1])
            return self
        constraint = optimize.LinearConstraint(np.array([1] * n_estimators), 1,
                                               1)
        coef0 = np.array([1. / n_estimators] * n_estimators)
        self.coef_ = optimize.minimize(loss,
                                       x0=coef0,
                                       constraints=[constraint]).x
        return self