Exemplo n.º 1
0
def create(m, n):
    mu = 1
    rho = 1
    sigma = 0.1

    A = problem_util.normalized_data_matrix(m, n, mu)
    x0 = sp.rand(n, 1, rho)
    x0.data = np.random.randn(x0.nnz)
    x0 = x0.toarray().ravel()

    b = np.sign(A.dot(x0) + sigma*np.random.randn(m))
    A[b>0,:] += 0.7*np.tile([x0], (np.sum(b>0),1))
    A[b<0,:] -= 0.7*np.tile([x0], (np.sum(b<0),1))

    P = la.block_diag(np.random.randn(n-1,n-1), 0)

    lam = 1
    x = cp.Variable(A.shape[1])

    # Straightforward formulation w/ no constraints
    # TODO(mwytock): Fix compiler so this works
    z0 = 1 - sp.diags([b],[0])*A*x + cp.norm1(P.T*x)
    f_eval = lambda: (lam*cp.sum_squares(x) + cp.sum_entries(cp.max_elemwise(z0, 0))).value

    # Explicit epigraph constraint
    t = cp.Variable(1)
    z = 1 - sp.diags([b],[0])*A*x + t
    f = lam*cp.sum_squares(x) + cp.sum_entries(cp.max_elemwise(z, 0))
    C = [cp.norm1(P.T*x) <= t]
    return cp.Problem(cp.Minimize(f), C), f_eval
Exemplo n.º 2
0
    def solve_sparse(self, y, w, x_mask=None):
        assert y.ndim == 1 and w.ndim == 1 and y.shape == w.shape
        assert w.shape[0] == self.n

        x = cp.Variable(np.count_nonzero(x_mask))
        inv_mask = np.logical_not(x_mask)

        term1 = cp.square(cp.norm(x-y[x_mask]))
        term2 = self.c * cp.norm1(cp.diag(w[x_mask]) * x)

        objective = cp.Minimize(term1 + term2)
        constraints = [cp.quad_form(x, self.B[np.ix_(x_mask, x_mask)]) <= 1]
        problem = cp.Problem(objective, constraints)

        result = problem.solve(solver=cp.SCS)
        if problem.status != cp.OPTIMAL:
            warnings.warn(problem.status)
        if problem.status not in (cp.OPTIMAL, cp.OPTIMAL_INACCURATE,
                                  cp.UNBOUNDED_INACCURATE):
            raise ValueError(problem.status)

        out = np.zeros(self.n)
        x = np.asarray(x.value).flatten()
        out[x_mask] = x
        return out
Exemplo n.º 3
0
def create(m, n):
    # Generate data
    X = np.hstack([np.random.randn(m,n), np.ones((m,1))])
    theta0 = np.random.randn(n+1)
    y = np.sign(X.dot(theta0) + 0.1*np.random.randn(m))
    X[y>0,:] += np.tile([theta0], (np.sum(y>0),1))
    X[y<0,:] -= np.tile([theta0], (np.sum(y<0),1))

    # Generate uncertainty envelope
    P = la.block_diag(np.random.randn(n,n), 0)
    lam = 1e-8
    theta = cp.Variable(n+1)

    # TODO(mwytock): write this as:
    # f = (lam/2*cp.sum_squares(theta) +
    #      problem_util.hinge(1 - y[:,np.newaxis]*X*theta+cp.norm1(P.T*theta)))

    # already in prox form
    t1 = cp.Variable(m)
    t2 = cp.Variable(1)
    z = cp.Variable(n+1)
    f = lam/2*cp.sum_squares(theta) + problem_util.hinge(1-t1)
    C = [t1 == y[:,np.newaxis]*X*theta - t2,
         cp.norm1(z) <= t2,
         P.T*theta == z]
    return cp.Problem(cp.Minimize(f), C)
Exemplo n.º 4
0
def create(**kwargs):
    A, B = problem_util.create_regression(**kwargs)
    lambda_max = np.abs(A.T.dot(B)).max()
    lam = 0.5*lambda_max

    X = cp.Variable(A.shape[1], B.shape[1] if len(B.shape) > 1 else 1)
    f = cp.sum_squares(A*X - B) + lam*cp.norm1(X)
    return cp.Problem(cp.Minimize(f))
Exemplo n.º 5
0
def create(m, n):
    np.random.seed(0)

    A = np.random.randn(m,n)
    x0 = sp.rand(n, 1, 0.1)
    b = A*x0

    x = cp.Variable(n)
    return cp.Problem(cp.Minimize(cp.norm1(x)), [A*x == b])
Exemplo n.º 6
0
def lasso_dense(n):
    m = 2*n
    A = np.random.randn(m, n)
    A /= np.sqrt(np.sum(A**2, 0))
    b = A*sp.rand(n, 1, 0.1) + 1e-2*np.random.randn(m,1)
    lam = 0.2*np.max(np.abs(A.T.dot(b)))

    x = cvx.Variable(n)
    f = cvx.sum_squares(A*x - b) + lam*cvx.norm1(x)
    return cvx.Problem(cvx.Minimize(f))
Exemplo n.º 7
0
def create(**kwargs):
    A, b = problem_util.create_classification(**kwargs)
    m = kwargs["m"]
    n = kwargs["n"]
    sigma = 0.05
    mu = kwargs.get("mu", 1)
    lam = 0.5*sigma*np.sqrt(m*np.log(mu*n))

    x = cp.Variable(A.shape[1])
    f =  ep.hinge_loss(x, A, b) + lam*cp.norm1(x)
    return cp.Problem(cp.Minimize(f))
Exemplo n.º 8
0
def create(**kwargs):
    A, b = problem_util.create_classification(**kwargs)

    ratio = float(np.sum(b==1)) / len(b)
    lambda_max = np.abs((1-ratio)*A[b==1,:].sum(axis=0) +
                        ratio*A[b==-1,:].sum(axis=0)).max()
    lam = 0.5*lambda_max

    x = cp.Variable(A.shape[1])
    f = ep.logistic_loss(x, A, b) + lam*cp.norm1(x)
    return cp.Problem(cp.Minimize(f))
Exemplo n.º 9
0
def create(m, n):
    np.random.seed(0)
    A = np.random.randn(m,n);
    A = A*sp.diags([1 / np.sqrt(np.sum(A**2, 0))], [0])
    b = A.dot(10*np.random.randn(n))

    k = max(m/50, 1)
    idx = np.random.randint(0, m, k)
    b[idx] += 100*np.random.randn(k)

    x = cp.Variable(n)
    return cp.Problem(cp.Minimize(cp.norm1(A*x - b)))
Exemplo n.º 10
0
def lasso_conv(n):
    sigma = n/10
    c = np.exp(-np.arange(-n/2., n/2.)**2./(2*sigma**2))/np.sqrt(2*sigma**2*np.pi)
    c[c < 1e-4] = 0

    x0 = np.array(sp.rand(n, 1, 0.1).todense()).ravel()
    b = np.convolve(c, x0) + 1e-2*np.random.randn(2*n-1)
    lam = 0.2*np.max(np.abs(np.convolve(b, c, "valid")))
    print lam

    x = cvx.Variable(n)
    f = cvx.sum_squares(cvx.conv(c, x) - b) + lam*cvx.norm1(x)
    return cvx.Problem(cvx.Minimize(f))
Exemplo n.º 11
0
def lasso_sparse(n):
    m = 2*n
    A = sp.rand(m, n, 0.1)
    A.data = np.random.randn(A.nnz)
    N = A.copy()
    N.data = N.data**2
    A = A*sp.diags([1 / np.sqrt(np.ravel(N.sum(axis=0)))], [0])

    b = A*sp.rand(n, 1, 0.1) + 1e-2*np.random.randn(m,1)
    lam = 0.2*np.max(np.abs(A.T*b))

    x = cvx.Variable(n)
    f = cvx.sum_squares(A*x - b) + lam*cvx.norm1(x)
    return cvx.Problem(cvx.Minimize(f))
Exemplo n.º 12
0
def create(n, data):
    np.random.seed(0)

    X, y = load_data(data)
    X = random_features(X, n)
    Y = one_hot_encoding(y)
    n = X.shape[1]
    k = Y.shape[1]
    lam = 0.1

    # TODO(mwytock): Use softmax here
    Theta = cp.Variable(n, k)
    f = cp.sum_squares(X*Theta - Y) + lam*cp.norm1(Theta)
    return cp.Problem(cp.Minimize(f))
Exemplo n.º 13
0
def create(n):
    np.random.seed(0)
    k = max(int(np.sqrt(n)/2), 1)

    x0 = np.ones((n,1))
    idxs = np.random.randint(0, n, (k,2))
    idxs.sort()
    for a, b in idxs:
        x0[a:b] += 10*(np.random.rand()-0.5)
    b = x0 + np.random.randn(n, 1)

    lam = np.sqrt(n)
    x = cp.Variable(n)
    f = 0.5*cp.sum_squares(x-b) + lam*cp.norm1(x[1:]-x[:-1])

    return cp.Problem(cp.Minimize(f))
Exemplo n.º 14
0
    def solve(self, y, w):
        assert y.ndim == 1 and w.ndim == 1 and y.shape == w.shape
        assert w.shape[0] == self.n

        x = cp.Variable(self.n)

        term1 = x.T*y - self.c*cp.norm1(cp.diag(w) * x)
        constraints = [cp.quad_form(x, self.B) <= 1]
        problem = cp.Problem(cp.Maximize(term1), constraints)

        result = problem.solve(solver=cp.SCS)
        if problem.status != cp.OPTIMAL:
            warnings.warn(problem.status)
        if problem.status not in (cp.OPTIMAL, cp.OPTIMAL_INACCURATE):
            raise ValueError(problem.status)
        return np.asarray(x.value).flatten()
Exemplo n.º 15
0
def create(n, r=10, density=0.1):
    np.random.seed(0)

    L1 = np.random.randn(n, r)
    L2 = np.random.randn(r, n)
    L0 = L1.dot(L2)

    S0 = sp.rand(n, n, density)
    S0.data = 10 * np.random.randn(len(S0.data))
    M = L0 + S0
    lam = 0.1

    L = cp.Variable(n, n)
    S = cp.Variable(n, n)
    f = cp.norm(L, "nuc") + lam * cp.norm1(S)
    C = [L + S == M]

    return cp.Problem(cp.Minimize(f), C)
Exemplo n.º 16
0
def create(m, n, lam):
    np.random.seed(0)

    m = int(n)
    n = int(n)
    lam = float(lam)

    A = sp.rand(n,n, 0.01)
    A = np.asarray(A.T.dot(A).todense() + 0.1*np.eye(n))
    L = np.linalg.cholesky(np.linalg.inv(A))
    X = np.random.randn(m,n).dot(L.T)
    S = X.T.dot(X)/m
    W = np.ones((n,n)) - np.eye(n)

    Theta = cp.Variable(n,n)
    return cp.Problem(cp.Minimize(
        lam*cp.norm1(cp.mul_elemwise(W,Theta)) +
        cp.sum_entries(cp.mul_elemwise(S,Theta)) -
        cp.log_det(Theta)))
Exemplo n.º 17
0
    def test_norm1(self):
        """Test L1 norm prox fn.
        """
        # No modifiers.
        tmp = Variable(10)
        fn = norm1(tmp)
        rho = 1
        v = np.arange(10) * 1.0 - 5.0
        x = fn.prox(rho, v.copy())
        self.assertItemsAlmostEqual(x, np.sign(v) * np.maximum(np.abs(v) - 1.0 / rho, 0))

        rho = 2
        x = fn.prox(rho, v.copy())
        self.assertItemsAlmostEqual(x, np.sign(v) * np.maximum(np.abs(v) - 1.0 / rho, 0))

        # With modifiers.
        mod_fn = norm1(tmp, alpha=0.1, beta=5,
                       c=np.ones(10) * 1.0, b=np.ones(10) * -1.0, gamma=4)

        rho = 2
        v = np.arange(10) * 1.0
        x = mod_fn.prox(rho, v.copy())

        # vhat = mod_fn.beta*(v - mod_fn.c/rho)*rho/(rho+2*mod_fn.gamma) - mod_fn.b
        # rho_hat = rho/(mod_fn.alpha*mod_fn.beta**2)
        # xhat = fn.prox(rho_hat, vhat)
        x_var = cvx.Variable(10)
        cost = 0.1 * cvx.norm1(5 * x_var + np.ones(10)) + np.ones(10).T * x_var + \
            4 * cvx.sum_squares(x_var) + (rho / 2) * cvx.sum_squares(x_var - v)
        prob = cvx.Problem(cvx.Minimize(cost))
        prob.solve()

        self.assertItemsAlmostEqual(x, x_var.value, places=3)

        # With weights.
        tmp = Variable(10)
        v = np.arange(10) * 1.0 - 5.0
        fn = weighted_norm1(tmp, -v + 1)
        rho = 2
        x = fn.prox(rho, v)
        self.assertItemsAlmostEqual(x, np.sign(v) *
                                    np.maximum(np.abs(v) - np.abs(-v + 1) / rho, 0))
Exemplo n.º 18
0
    def solve_sparse(self, y, w, x_mask):
        assert y.ndim == 1 and w.ndim == 1 and y.shape == w.shape
        assert w.shape[0] == self.n

        x = cp.Variable(np.count_nonzero(x_mask))

        term1 = x.T*y[x_mask] - self.c*cp.norm1(cp.diag(w[x_mask]) * x)
        constraints = [cp.quad_form(x, self.B[np.ix_(x_mask, x_mask)]) <= 1]
        problem = cp.Problem(cp.Maximize(term1), constraints)

        result = problem.solve(solver=cp.SCS)
        if problem.status != cp.OPTIMAL:
            warnings.warn(problem.status)
        if problem.status not in (cp.OPTIMAL, cp.OPTIMAL_INACCURATE):
            raise ValueError(problem.status)

        out = np.zeros(self.n)
        x = np.asarray(x.value).flatten()
        out[x_mask] = x
        return out
Exemplo n.º 19
0
def glrd_sparse(V, G, F, r, err_V, err_F):
    # sparsity threshold is num_nodes / num_roles
    for k in xrange(r):
        G_copy = np.copy(G)  # create local copies for excluding the k^th col and row of G and F resp.
        F_copy = np.copy(F)
        G_copy[:, k] = 0.0
        F_copy[k, :] = 0.0

        R = V - np.dot(G_copy, F_copy)  # compute residual

        # Solve for optimal G(.)(k) with sparsity constraints
        F_k = F[k, :]
        x_star_G = linalg.lstsq(R.T, F_k.T)[0].T
        x_G = cvx.Variable(x_star_G.shape[0])
        objective_G = cvx.Minimize(cvx.norm2(x_star_G - x_G))
        constraints_G = [x_G >= 0]
        constraints_G += [cvx.norm1(x_G) <= err_V]
        prob_G = cvx.Problem(objective_G, constraints_G)
        result = prob_G.solve(solver='SCS')
        if not np.isinf(result):
            G_k_min = np.asarray(x_G.value)
            G[:, k] = G_k_min[:, 0]
        else:
            print result

        # Solve for optimal F(k)(.) with sparsity constraints
        G_k = G[:, k]
        x_star_F = linalg.lstsq(R, G_k)[0]
        x_F = cvx.Variable(x_star_F.shape[0])
        objective_F = cvx.Minimize(cvx.norm2(x_star_F - x_F))
        constraints_F = [x_F >= 0]
        constraints_F += [cvx.sum_entries(x_F) <= err_F]
        prob_F = cvx.Problem(objective_F, constraints_F)
        result = prob_F.solve(solver='SCS')
        if not np.isinf(result):
            F_k_min = np.asarray(x_F.value)
            F[k, :] = F_k_min[0, :]
        else:
            print result

    return G, F
Exemplo n.º 20
0
A = sps.rand(n,n, 0.01)
A = np.asarray(A.T.dot(A).todense() + 0.1*np.eye(n))
L = np.linalg.cholesky(np.linalg.inv(A))
X = np.random.randn(m,n).dot(L.T)
S = X.T.dot(X)/m
W = np.ones((n,n)) - np.eye(n)




# Problem construction

Theta = cp.Variable(n,n)
prob = cp.Problem(cp.Minimize(
        lam*cp.norm1(cp.mul_elemwise(W,Theta)) +
        cp.sum_entries(cp.mul_elemwise(S,Theta)) -
        cp.log_det(Theta)))


# Problem collection

# Single problem collection
problemDict = {
    "problemID" : problemID,
    "problem"   : prob,
    "opt_val"   : opt_val
}
problems = [problemDict]

Exemplo n.º 21
0
    def fit(self, x, y, loss = 'l2',  interactions = False, m = 1, constraints = None, \
            verbose = False):

        n_samples, n_features = x.shape
        n_coeffs = n_features * self.deg + 1
        designmatrix = self.build_designmatrix(x, interactions=interactions)
        n_coeffs = designmatrix.shape[1]
        column_norms_designmatrix = self.column_norms(designmatrix)
        designmatrix = designmatrix / column_norms_designmatrix

        coeffs = cv.Variable(n_coeffs)

        #calculate residuals
        residuals = designmatrix @ coeffs - y

        #define loss function

        loss_options = {
            'l2': cv.sum_squares(residuals),
            'l1': cv.norm1(residuals),
            'huber': cv.sum(cv.huber(residuals, m))
        }

        data_term = loss_options[loss]

        if self.regularization is not None:

            regularization_options = {
                'l2': cv.pnorm(coeffs, 2, axis=0)**2,
                'l1': cv.norm1(coeffs)
            }

            regularization_term = regularization_options[self.regularization]

            objective = cv.Minimize(data_term + self.lam * regularization_term)

        else:

            objective = cv.Minimize(data_term)

        #build constraints

        constraint_list = []

        if constraints is not None:

            #loop over all features

            for feature_index in constraints:

                Feature_constraints = constraints[feature_index]

                xvals_feature = x[:, feature_index]
                coefficient_index = feature_index * self.deg + 1
                feature_coefficients = coeffs[
                    coefficient_index:coefficient_index + self.deg]

                if Feature_constraints.sign == 'positive':

                    constraint_list.append(feature_coefficients >= 0)

                elif Feature_constraints.sign == 'negative':

                    constraint_list.append(feature_coefficients <= 0)

                monotonic = Feature_constraints.monotonicity is not None
                strict_curvature = Feature_constraints.curvature is not None

                if monotonic or strict_curvature or ybound:

                    if Feature_constraints.constraint_range is None:

                        constraint_min = np.amin(xvals_feature)
                        constraint_max = np.amax(xvals_feature)
                        Feature_constraints.constraint_range = [
                            constraint_min, constraint_max
                        ]

                    constraints_grid = np.linspace(Feature_constraints.constraint_range[0], \
                        Feature_constraints.constraint_range[1], num=Feature_constraints.gridpoints)

                if monotonic:

                    vander_grad = self.vander_grad(constraints_grid)[:, 1:]
                    norms = column_norms_designmatrix[
                        coefficient_index:coefficient_index + self.deg]
                    vander_grad = vander_grad / norms

                    if Feature_constraints.monotonicity == 'inc':

                        constraint_list.append(
                            vander_grad @ feature_coefficients >= 0)

                    elif Feature_constraints.monotonicity == 'dec':

                        constraint_list.append(
                            vander_grad @ feature_coefficients <= 0)

                if strict_curvature:

                    vander_hesse = self.vander_hesse(constraints_grid)[:, 1:]
                    norms = column_norms_designmatrix[
                        coefficient_index:coefficient_index + self.deg]
                    vander_hesse = vander_hesse / norms

                    if Feature_constraints.curvature == 'convex':

                        constraint_list.append(
                            vander_hesse @ feature_coefficients >= 0)

                    elif Feature_constraints.curvature == 'concave':

                        constraint_list.append(
                            vander_hesse @ feature_coefficients <= 0)

        #set up cvxpy problem

        problem = cv.Problem(objective, constraints=constraint_list)

        try:

            if loss == 'l1' or self.regularization == 'l2':
                #l1 loss solved by ECOS. Lower its tolerances for convergence
                problem.solve(abstol=1e-8, reltol=1e-8, max_iters=1000000, \
                              feastol=1e-8, abstol_inacc = 1e-7, \
                                  reltol_inacc=1e-7, verbose = verbose)

            else:

                #l2 and huber losses solved by OSQP. Lower its tolerances for convergence
                problem.solve(eps_abs=1e-8, eps_rel=1e-8, max_iter=10000000, \
                              eps_prim_inf = 1e-9, eps_dual_inf = 1e-9, verbose = verbose, \
                                  adaptive_rho = True)

        #in case OSQP or ECOS fail, use SCS
        except cv.SolverError:

            try:

                problem.solve(solver=cv.SCS,
                              max_iters=100000,
                              eps=1e-4,
                              verbose=verbose)

            except cv.SolverError:

                print("cvxpy optimization failed!")

        #if optimal solution found, set parameters

        if problem.status == 'optimal':

            coefficients = coeffs.value / column_norms_designmatrix

            self.coeffs_ = coefficients

        #if not try SCS optimization
        else:

            try:

                problem.solve(solver=cv.SCS,
                              max_iters=100000,
                              eps=1e-6,
                              verbose=verbose)

            except cv.SolverError:

                pass

        if problem.status == 'optimal':

            coefficients = coeffs.value / column_norms_designmatrix

            self.coeffs_ = coefficients

        else:

            print("CVXPY optimization failed")

        return self
Exemplo n.º 22
0
def lasso_sure_cvxpy(X, y, alpha, sigma, random_state=42):
    # lambda_alpha = [alpha, alpha]
    n_samples, n_features = X.shape
    epsilon = 2 * sigma / n_samples**0.3
    rng = check_random_state(random_state)
    delta = rng.randn(n_samples)

    y2 = y + epsilon * delta
    Xth, yth, y2th, deltath = map(torch.from_numpy, [X, y, y2, delta])

    # set up variables and parameters
    beta_cp = cp.Variable(n_features)
    lambda_cp = cp.Parameter(nonneg=True)

    # set up objective
    loss = ((1 / (2 * n_samples)) * cp.sum(cp.square(Xth @ beta_cp - yth)))
    reg = lambda_cp * cp.norm1(beta_cp)
    objective = loss + reg

    # define problem
    problem1 = cp.Problem(cp.Minimize(objective))
    assert problem1.is_dpp()

    # solve problem1
    layer = CvxpyLayer(problem1, [lambda_cp], [beta_cp])
    alpha_th1 = torch.tensor(alpha, requires_grad=True)
    beta1, = layer(alpha_th1)

    # get test loss and it's gradient
    test_loss1 = (Xth @ beta1 - yth).pow(2).sum()
    test_loss1 -= 2 * sigma**2 / epsilon * (Xth @ beta1) @ deltath
    test_loss1.backward()
    val1 = test_loss1.detach().numpy()
    grad1 = np.array(alpha_th1.grad)

    # set up variables and parameters
    beta_cp = cp.Variable(n_features)
    lambda_cp = cp.Parameter(nonneg=True)

    # set up objective
    loss = ((1 / (2 * n_samples)) * cp.sum(cp.square(Xth @ beta_cp - y2th)))
    reg = lambda_cp * cp.norm1(beta_cp)
    objective = loss + reg

    # define problem
    problem2 = cp.Problem(cp.Minimize(objective))
    assert problem2.is_dpp()

    # solve problem2
    layer = CvxpyLayer(problem2, [lambda_cp], [beta_cp])
    alpha_th2 = torch.tensor(alpha, requires_grad=True)
    beta2, = layer(alpha_th2)

    # get test loss and it's gradient
    test_loss2 = 2 * sigma**2 / epsilon * (Xth @ beta2) @ deltath
    test_loss2.backward()
    val2 = test_loss2.detach().numpy()
    grad2 = np.array(alpha_th2.grad)

    val = val1 + val2 - len(y) * sigma**2
    grad = grad1 + grad2
    return val, grad
Exemplo n.º 23
0
Arquivo: covsel_4.py Projeto: paob/scs
lam = float(0.1)

import scipy.sparse as sps

A = sps.rand(n, n, 0.01)
A = np.asarray(A.T.dot(A).todense() + 0.1 * np.eye(n))
L = np.linalg.cholesky(np.linalg.inv(A))
X = np.random.randn(m, n).dot(L.T)
S = X.T.dot(X) / m
W = np.ones((n, n)) - np.eye(n)

# Problem construction

Theta = cp.Variable(n, n)
prob = cp.Problem(
    cp.Minimize(lam * cp.norm1(cp.mul_elemwise(W, Theta)) +
                cp.sum_entries(cp.mul_elemwise(S, Theta)) - cp.log_det(Theta)))

# Problem collection

# Single problem collection
problemDict = {"problemID": problemID, "problem": prob, "opt_val": opt_val}
problems = [problemDict]

# For debugging individual problems:
if __name__ == "__main__":

    def printResults(problemID="", problem=None, opt_val=None):
        print(problemID)
        problem.solve()
        print("\tstatus: {}".format(problem.status))
Exemplo n.º 24
0
import cvxpy as cp
import pandas as pd

#%%
u = ut.get_cvx_utility()

#%%
t = get_data()
r = get_target_return(t, months=3)
r = r.fillna(0)  # A voir si c'est necessaire?

#%%
n, p = r.shape
w_cp = cp.Variable(p)
λ = 1
reg = λ * cp.norm1(w_cp)
objective = cp.Maximize(1 / n * cp.sum(u(r.values @ w_cp)) - reg)
constraints = [w_cp >= 0, cp.sum(w_cp) == 1]
prob = cp.Problem(objective, constraints)
result = prob.solve(solver="ECOS", verbose=True)

# %%
w = pd.Series(w_cp.value, index=t.columns)
w = w[w >= 0.01]  # On ne garde que les titres avec un poids >= 1%
w.sort_values(ascending=False).plot.bar()


# %%
w.index = w.index.map(fund_name)
w.sort_values(ascending=False)
Exemplo n.º 25
0
def total_variation_plus_seasonal_quantile_filter(signal,
                                                  use_ixs=None,
                                                  tau=0.995,
                                                  c1=1e3,
                                                  c2=1e2,
                                                  c3=1e2,
                                                  solver='ECOS',
                                                  residual_weights=None,
                                                  tv_weights=None):
    '''
    This performs total variation filtering with the addition of a seasonal baseline fit. This introduces a new
    signal to the model that is smooth and periodic on a yearly time frame. This does a better job of describing real,
    multi-year solar PV power data sets, and therefore does an improved job of estimating the discretely changing
    signal.

    :param signal: A 1d numpy array (must support boolean indexing) containing the signal of interest
    :param c1: The regularization parameter to control the total variation in the final output signal
    :param c2: The regularization parameter to control the smoothness of the seasonal signal
    :return: A 1d numpy array containing the filtered signal
    '''
    n = len(signal)
    if residual_weights is None:
        residual_weights = np.ones_like(signal)
    if tv_weights is None:
        tv_weights = np.ones(len(signal) - 1)
    if use_ixs is None:
        use_ixs = np.ones(n, dtype=np.bool)
    # selected_days = np.arange(n)[index_set]
    # np.random.shuffle(selected_days)
    # ix = 2 * n // 3
    # train = selected_days[:ix]
    # validate = selected_days[ix:]
    # train.sort()
    # validate.sort()

    s_hat = cvx.Variable(n)
    s_seas = cvx.Variable(max(n, 366))
    s_error = cvx.Variable(n)
    s_linear = cvx.Variable(n)
    c1 = cvx.Parameter(value=c1, nonneg=True)
    c2 = cvx.Parameter(value=c2, nonneg=True)
    c3 = cvx.Parameter(value=c3, nonneg=True)
    tau = cvx.Parameter(value=tau)
    # w = len(signal) / np.sum(index_set)
    beta = cvx.Variable()
    objective = cvx.Minimize(
        # (365 * 3 / len(signal)) * w * cvx.sum(0.5 * cvx.abs(s_error) + (tau - 0.5) * s_error)
        2 * cvx.sum(0.5 * cvx.abs(cvx.multiply(residual_weights, s_error)) +
                    (tau - 0.5) * cvx.multiply(residual_weights, s_error)) +
        c1 * cvx.norm1(cvx.multiply(tv_weights, cvx.diff(s_hat, k=1))) +
        c2 * cvx.norm(cvx.diff(s_seas, k=2)) + c3 * beta**2)
    constraints = [
        signal[use_ixs] == s_hat[use_ixs] + s_seas[:n][use_ixs] +
        s_error[use_ixs],
        cvx.sum(s_seas[:365]) == 0
    ]
    if True:
        constraints.append(s_seas[365:] - s_seas[:-365] == beta)
        constraints.extend([beta <= 0.01, beta >= -0.1])
    problem = cvx.Problem(objective=objective, constraints=constraints)
    problem.solve(solver='MOSEK')
    return s_hat.value, s_seas.value[:n]
    def solve(self, data):
        is_labeled_train = data.is_train & data.is_labeled
        x = data.x[is_labeled_train, :]
        #self.transform.with_mean = False
        #self.transform.with_std = False
        x = self.transform.fit_transform(x)
        y = data.y[is_labeled_train]
        n = x.shape[0]
        p = x.shape[1]

        C = self.C
        C2 = self.C2
        C3 = self.C3
        #C = .001
        #C2 = 0
        use_nonneg_ridge = self.method == MixedFeatureGuidanceMethod.METHOD_RIDGE and self.use_nonneg
        if self.method == MixedFeatureGuidanceMethod.METHOD_ORACLE:
            assert False, 'Update this'
            #Refit with standardized data to clear transform
            #Is there a better way of doing this?
            self.transform.fit_transform(x)
            self.w = data.metadata['true_w']
            self.b = 0
            return
        elif self.method in {MixedFeatureGuidanceMethod.METHOD_RELATIVE, MixedFeatureGuidanceMethod.METHOD_HARD_CONSTRAINT}\
                or use_nonneg_ridge:
            opt_data = optimize_data(x, y, C, C2, C3)
            '''
            opt_data.pairs = [
                (0, 9),
                (1, 8),
                (2, 7),
                (3, 6)
            ]
            '''
            opt_data.pairs = list()
            constraints = list()
            pairs = self.pairs
            feats_to_constraint = self.feats_to_constrain
            true_w = data.metadata['true_w']
            if self.method == MixedFeatureGuidanceMethod.METHOD_HARD_CONSTRAINT:
                assert not self.use_corr
                constraints = list()

                for j, k in pairs:
                    constraints.append({
                        'fun': lambda w, j=j, k=k: w[j] - w[k],
                        'type': 'ineq'
                    })
                #for i in range(num_signs):
                #    j = np.random.choice(p)

                for j in feats_to_constraint:
                    fun = lambda w, j=j: w[j]*np.sign(true_w[j])
                    constraints.append({
                        'fun': fun,
                        'type': 'ineq',
                        'idx': j
                    })
            else:
                opt_data.pairs = pairs


            if self.method == MixedFeatureGuidanceMethod.METHOD_RELATIVE or use_nonneg_ridge:
                assert len(feats_to_constraint) == 0 or len(pairs) == 0
                w = cvx.Variable(p)
                b = cvx.Variable(1)
                z = cvx.Variable(len(feats_to_constraint) + len(pairs))
                loss = cvx.sum_entries(
                    cvx.power(
                        x*w + b - y,
                        2
                    )
                )
                loss /= n
                constraints = list()
                idx = 0
                corr = data.metadata['corr']
                if self.method != MixedFeatureGuidanceMethod.METHOD_RIDGE:
                    for j, k in pairs:
                        if self.use_corr:
                            if corr[j] > corr[k]:
                                constraints.append(w[j] - w[k] + z[idx] >= 0)
                            else:
                                constraints.append(w[k] - w[j] + z[idx] >= 0)
                        else:
                            constraints.append(w[j] - w[k] + z[idx] >= 0)
                        idx += 1
                    for j in feats_to_constraint:
                        assert not self.use_nonneg
                        if self.use_corr:
                            constraints.append(w[j] * np.sign(corr[j]) + z[idx] >= 0)
                        else:
                            constraints.append(w[j]*np.sign(true_w[j]) + z[idx] >= 0)
                        idx += 1
                reg = cvx.norm2(w) ** 2
                if self.use_l1:
                    reg_guidance = cvx.norm1(z)
                else:
                    reg_guidance = cvx.norm2(z) ** 2
                if np.isinf(C2):
                    constraints = []
                    C2 = 0
                constraints.append(z >= 0)
                if self.use_nonneg:
                    constraints.append(w >= 0)
                obj = cvx.Minimize(loss + C*reg + C2*reg_guidance)
                prob = cvx.Problem(obj, constraints)
                try:
                    #prob.solve(solver='SCS')
                    prob.solve(solver=self.cvx_method)
                    assert w.value is not None
                    self.w = np.squeeze(np.asarray(w.value))
                except:
                    self.w = np.zeros(p)
                    '''
                    if not self.running_cv:
                        assert False, 'Failed to converge when done with CV!'
                    '''
                '''
                if b.value is not None:
                    assert abs(b.value - y.mean())/abs(b.value) <= 1e-3
                '''
            else:
                assert not self.use_corr
                eval_func = lambda a: MixedFeatureGuidanceMethod.eval(opt_data, a)

                w0 = np.zeros(p)
                options = dict()
                options['maxiter'] = 1000
                options['disp'] = False
                bounds = [(None, None)] * p

                '''
                w1 = optimize.minimize(
                    eval_func,
                    a0,
                    method=self.configs.scipy_opt_method,
                    jac=None,
                    options=options,
                    bounds = bounds,
                    constraints=constraints
                ).x
                '''
                if self.method == MixedFeatureGuidanceMethod.METHOD_ORACLE_SPARSITY:
                    assert False



                #with Capturing() as output:
                results = optimize.minimize(
                    eval_func,
                    w0,
                    method=self.configs.scipy_opt_method,
                    jac=None,
                    options=options,
                    bounds = bounds,
                    constraints=constraints
                )
                w2 = results.x
                self.w = np.asarray(results.x)
        else:
            #assert not self.use_corr
            self.w = self.solve_w(x, y, C)
        self.b = y.mean()
        if not self.running_cv:
            try:
                print prob.status
            except:
                pass
        if not self.running_cv and self.method != MixedFeatureGuidanceMethod.METHOD_RIDGE:
            w2 = self.solve_w(x,y,C)
            true_w = data.metadata['true_w']
            err1 = array_functions.normalized_error(self.w, true_w)
            err2 = array_functions.normalized_error(w2, true_w)
            print str(err1 - err2)
            c2 = deepcopy(self.configs)
            c2.method = MixedFeatureGuidanceMethod.METHOD_RIDGE
            t2 = MixedFeatureGuidanceMethod(c2)
            t2.quiet = True
            t2.train_and_test(data)
            w = self.w
            w2 = t2.w
            pass
        #print self.w
        self.true_w = data.metadata['true_w']
        pass
Exemplo n.º 27
0
def compile_lrtop(data):
    """
    Create the local rendezvous trajectory optimization problem (LRTOP).

    Parameters
    ----------
    data : dict
        Problem data.
    """
    csm = data['csm']
    N = data['N']
    p_traj = data['state_traj_init'][:, :3]
    v_traj = data['state_traj_init'][:, 3:6]
    w_traj = data['state_traj_init'][:, 10:13]

    state_init = np.concatenate(
        [data['x0']['p'], data['x0']['v'], data['x0']['q'], data['x0']['w']])
    state_final = np.concatenate(
        [data['xf']['p'], data['xf']['v'], data['xf']['q'], data['xf']['w']])

    # Compute scaling terms
    # state (=P_x*state_scaled+p_x)
    p_center = np.mean(p_traj, axis=0)
    v_center = np.mean(v_traj, axis=0)
    q_center = np.zeros(4)
    w_center = np.mean(w_traj, axis=0)
    p_box = tools.bounding_box(p_traj - p_center)
    v_box = tools.bounding_box(v_traj - v_center)
    q_box = np.ones(4)
    w_box = tools.bounding_box(w_traj - w_center)
    p_x = np.concatenate([p_center, v_center, q_center, w_center])
    P_x = np.diag(np.concatenate([p_box, v_box, q_box, w_box]))
    P_x_inv = la.inv(P_x)
    data['scale_x'] = lambda x: P_x_inv.dot(x - p_x)
    # virtual control (=P_v*vc_scaled)
    p_box = tools.bounding_box(p_traj)
    v_box = tools.bounding_box(v_traj)
    q_box = np.ones(4)
    w_box = tools.bounding_box(w_traj)
    P_v = np.diag(np.concatenate([p_box, v_box, q_box, w_box]))
    # input (=P_u*input_scaled+p_u)
    p_u = np.array([0.5 * data['t_pulse_max'] for i in range(csm.M)])
    P_u = np.diag(p_u)
    # fuel (impulse) cost (=P_xi*xi_scaled+p_xi)
    # Heuristic: compute as having a quarter of the thrusters active at minimum
    # pulse width, the whole time
    mean_active_thruster_count = 0.25 * csm.M
    mean_pulse_width = csm.t_pulse_min
    p_xi = 0.
    P_xi = mean_active_thruster_count * mean_pulse_width * N

    # General quantites
    data['t_grid'] = np.linspace(0., data['t_f'],
                                 N + 1)  # discretization time grid
    n_x = data['state_traj_init'].shape[1]
    n_u = csm.M
    e = -tools.rotate(np.array([1., 0., 0.]),
                      csm.q_dock)  # dock axis in LM frame
    I_e = tools.rotate(e, data['lm']['q'])  # dock axis in inertial frame
    data['dock_axis'] = I_e

    # Optimization variables
    # non-dimensionalized
    x_hat = [cvx.Variable(n_x) for k in range(N + 1)]
    v_hat = [cvx.Variable(n_x) for k in range(N + 1)]
    xi_hat = cvx.Variable(N + 1)
    u_hat = [cvx.Variable(n_u) for k in range(N)]
    eta_hat = cvx.Variable(N)  # quadratic trust region size
    # dimensionalized (physical units)
    x = [P_x * x_hat[k] + p_x for k in range(N + 1)]  # unscaled state
    xi = P_xi * xi_hat + p_xi
    u = [P_u * u_hat[k] + p_u for k in range(N)]  # unscaled control
    v = [P_v * v_hat[k] for k in range(N)]  # virtual control
    data['lrtop_var'] = dict(x=x, u=u, xi=xi, v=v)

    # Optimization parameters
    A = [cvx.Parameter((n_x, n_x)) for k in range(N)]
    B = [cvx.Parameter((n_x, n_u)) for k in range(N)]
    r = [cvx.Parameter(n_x) for k in range(N)]
    u_lb = [cvx.Parameter(n_u, value=np.zeros(n_u)) for k in range(N)]
    stc_lb = [cvx.Parameter(n_u) for k in range(N)]
    stc_q = cvx.Parameter(N + 1)
    x_prev_hat = [cvx.Parameter(n_x) for k in range(N + 1)]
    data['lrtop_par'] = dict(A=A,
                             B=B,
                             r=r,
                             u_lb=u_lb,
                             stc_lb=stc_lb,
                             stc_q=stc_q,
                             x_prev_hat=x_prev_hat)

    # Cost
    # minimum-impulse
    J = xi_hat[-1]
    # trust region penalty
    J_tr = data['w_tr'] * sum(eta_hat)
    # virtual control penalty
    J_vc = data['w_vc'] * sum([cvx.norm1(v_hat[k]) for k in range(N)])
    data['lrtop_cost'] = dict(J=J, J_tr=J_tr, J_vc=J_vc)
    cost = cvx.Minimize(J + J_tr + J_vc)

    # constraints
    constraints = []
    constraints += [
        x[k + 1] == A[k] * x[k] + B[k] * u[k] + r[k] + v[k] for k in range(N)
    ]
    constraints += [xi[k + 1] == xi[k] + sum(u[k]) for k in range(N)]
    constraints += [x[0] == state_init, x[-1] == state_final, xi[0] == 0]
    constraints += [(x[k][:3] - data['lm']['p']).T * I_e >=
                    cvx.norm(x[k][:3] - data['lm']['p']) *
                    np.cos(np.deg2rad(data['gamma'])) for k in range(N + 1)]
    constraints += [u[k] <= data['t_pulse_max'] for k in range(N)]
    constraints += [u[k] >= u_lb[k] for k in range(N)]
    constraints += [
        u[k][i] == 0. for k in range(N) for i in range(n_u)
        if i in data['thrusters_off']
    ]
    constraints += [
        cvx.quad_form(x_hat[k + 1] - x_prev_hat[k + 1], np.eye(n_x)) <=
        eta_hat[k] for k in range(N)
    ]
    constraints += [
        stc_lb[k][i] * u[k][i] == 0 for k in range(N) for i in range(n_u)
    ]
    constraints += [
        stc_q[k] * u[k][i] == 0 for k in range(N) for i in range(n_u)
        if 'p_f' in csm.i2thruster[i]
    ]
    constraints += [
        stc_q[k + 1] *
        (tools.rqpmat(data['xf']['q'])[0].dot(np.diag([1, -1, -1, -1])) *
         x[k][6:10] - np.cos(0.5 * np.deg2rad(data['ang_app']))) <= 0
        for k in range(N)
    ]

    data['lrtop'] = cvx.Problem(cost, constraints)
def modalityRegularizer(beta):
    return cp.norm1(beta)
Exemplo n.º 29
0
import matplotlib.pyplot as plt

# generate a problem instance
n = 50
d = 1000
np.random.seed(42)
A = np.random.randn(n, d)
b = np.random.randn(n)
R = 5
L = np.sqrt(d)  # Lipschtz
DELTA = 1e-12
x1 = np.random.rand(d) * 2 - 1  # starting point

# compute optimal value by solving a problem using cvxpy
x = cp.Variable(d)
prob = cp.Problem(objective=cp.Minimize(cp.norm1(x)),
                  constraints=[cp.norm(x - x1) <= R])
prob.solve()
f_opt = prob.value
x_opt = x.value

# run projected subgradient method with decreasing step size
x = x1.copy()
MAX_ITERS = 3000
xs = [x]
for k in range(1, MAX_ITERS):
    g = np.zeros(x.shape[0])
    g[np.where(x > +DELTA)] = +1
    g[np.where(x < -DELTA)] = -1
    x = x - (R / (L * np.sqrt(k))) * g
    if np.linalg.norm(x - x1) > R:
Exemplo n.º 30
0
def loss_fn_l1(X, Y, beta):
    return cp.norm1(cp.matmul(X, beta) - Y)
Exemplo n.º 31
0
def regularizer(beta):
    return cp.norm1(beta)
def conv_sensing(measurement_ratios,
                 filter_size,
                 solver="AMP",
                 sparse_in_dct=False,
                 width=50,
                 depth=20,
                 delta=1e-2,
                 sparsity=0.5,
                 n_rep=10):

    N = width * depth
    signal = GaussBernouilliPrior(size=(N, ), rho=sparsity)

    def sample_trnsf():
        if (sparse_in_dct):
            D = scipy.linalg.block_diag(
                *[dctmtx(width).T for _ in range(depth)])
        else:
            D = np.eye(N)
        return D

    recovery_per_alpha = []

    for alpha in measurement_ratios:
        recoveries = []
        for rep in range(n_rep):
            out_channels = int(np.rint(alpha * depth))
            ensemble = ChannelConvEnsemble(width=width,
                                           in_channels=depth,
                                           out_channels=out_channels,
                                           k=filter_size)
            A = ensemble.generate()
            C = sample_trnsf()

            teacher = conv_model(
                A, C, signal) @ GaussianChannel(var=delta) @ O(id="y")
            teacher = teacher.to_model()
            sample = teacher.sample()

            if (solver == "AMP"):

                max_iter = 20
                damping = 0.1

                student = conv_model(A, C, signal) @ GaussianLikelihood(
                    y=sample['y'], var=delta)
                student = student.to_model_dag()
                student = student.to_model()
                ep = ExpectationPropagation(student)
                ep.iterate(max_iter=max_iter,
                           damping=damping,
                           callback=EarlyStopping(tol=1e-8))
                data_ep = ep.get_variables_data((['x']))
                mse = np.mean((data_ep['x']['r'] - sample['x'])**2)
                recoveries.append(mse)

            elif (solver == "CVX"):

                reg_param = 0.001

                x = cp.Variable(shape=(N, ), name="x")
                lmbda = cp.Parameter(nonneg=True)
                objective = cp.norm2(A @ C @ x -
                                     sample['y'])**2 + lmbda * cp.norm1(x)
                problem = cp.Problem(cp.Minimize(objective))
                lmbda.value = reg_param
                problem.solve(abstol=1e-6)
                mse = np.mean((x.value - sample['x'])**2)
                recoveries.append(mse)
            else:
                raise ValueError("Solver must be 'AMP' or 'CVX'")
        recovery_per_alpha.append(recoveries)
    return recovery_per_alpha
Exemplo n.º 33
0
def cvx_inequality_time_graphical_lasso(S, K_init, max_iter, loss, C, theta,
                                        psi, gamma, tol):
    """Inequality constrained time-varying graphical LASSO solver.

    Solves the following problem via ADMM:
        min sum_{i=1}^T ||K_i||_{od,1} + beta sum_{i=2}^T Psi(K_i - K_{i-1})
        s.t. objective =< c_i for i = 1, ..., T

    where S_i = (1/n_i) X_i^T X_i is the empirical covariance of data
    matrix X (training observations by features).

    Parameters
    ----------
    emp_cov : ndarray, shape (n_features, n_features)
        Empirical covariance of data.
    alpha, beta : float, optional
        Regularisation parameter.
    rho : float, optional
        Augmented Lagrangian parameter.
    max_iter : int, optional
        Maximum number of iterations.
    n_samples : ndarray
        Number of samples available for each time point.
    gamma: float, optional
        Kernel parameter when psi is chosen to be 'kernel'.
    tol : float, optional
        Absolute tolerance for convergence.
    rtol : float, optional
        Relative tolerance for convergence.
    return_history : bool, optional
        Return the history of computed values.
    return_n_iter : bool, optional
        Return the number of iteration before convergence.
    verbose : bool, default False
        Print info at each iteration.
    update_rho_options : dict, optional
        Arguments for the rho update.
        See regain.update_rules.update_rho function for more information.
    compute_objective : bool, default True
        Choose to compute the objective value.
    init : {'empirical', 'zero', ndarray}
        Choose how to initialize the precision matrix, with the inverse
        empirical covariance, zero matrix or precomputed.

    Returns
    -------
    K : numpy.array, 3-dimensional (T x d x d)
        Solution to the problem for each time t=1...T .
    history : list
        If return_history, then also a structure that contains the
        objective value, the primal and dual residual norms, and tolerances
        for the primal and dual residual norms at each iteration.

    """

    if loss == 'LL':
        loss_function = neg_logl
    else:
        loss_function = dtrace

    T, p, _ = S.shape
    K = [cp.Variable(shape=(p, p), PSD=True) for t in range(T)]
    # Z_1 = [cp.Variable(shape=(p, p), PSD=True) for t in range(T-1)]
    # Z_2 = [cp.Variable(shape=(p, p), PSD=True) for t in range(T-1)]

    if psi == 'laplacian':
        objective = cp.Minimize(
            theta * cp.sum(
                [cp.norm(K[t] - cp.diag(cp.diag(K[t])), 1)
                 for t in range(T)]) + (1 - theta) *
            cp.sum([cp.norm(K[t] - K[t - 1], 'fro') for t in range(1, T)]))
    elif psi == 'l1':
        objective = cp.Minimize(theta * cp.sum([
            cp.norm(K[t] - cp.diag(cp.diag(K[t])), 1) for t in range(T)
        ]) + (1 - theta) * cp.sum(
            [cp.sum(cp.norm1(K[t] - K[t - 1], axis=1)) for t in range(1, T)]))
    elif psi == 'l2':
        objective = cp.Minimize(theta * cp.sum(
            [cp.norm(K[t] - cp.diag(cp.diag(K[t])), 1)
             for t in range(T)]) + (1 - theta) * cp.sum([
                 cp.sum(cp.norm(K[t] - K[t - 1], p=2, axis=1))
                 for t in range(1, T)
             ]))
    elif psi == 'linf':
        objective = cp.Minimize(theta * cp.sum(
            [cp.norm(K[t] - cp.diag(cp.diag(K[t])), 1)
             for t in range(T)]) + (1 - theta) * cp.sum([
                 cp.sum(cp.norm_inf(K[t] - K[t - 1], axis=1))
                 for t in range(1, T)
             ]))

    # if loss_function == neg_logl:
    constraints = [(cp.sum(cp.multiply(K[t], S[t])) - cp.log_det(K[t]) <= C[t])
                   for t in range(T)]
    # [(cp.trace(K[t] @ S[t]) - cp.log_det(K[t]) <= C[t]) for t in range(T)] # + \
    # [(Z_1[t] == K[t]) for t in range(T-1)] + \
    # [(Z_2[t] == K[t+1]) for t in range(T-1)]
    # else:
    #     constraints = [(cp.trace(K[t] @ K[t] @ S[t]) - cp.trace(K[t]) <= C[t]) for t in range(T)] # + \
    #                     # [(Z_1[t] == K[t]) for t in range(T-1)] + \
    #                     # [(Z_2[t] == K[t+1]) for t in range(T-1)]

    prob = cp.Problem(objective, constraints)
    # prob.solve(solver=cp.SCS, max_iters=np.int(max_iter), eps=tol, verbose=True)
    prob.solve(solver=cp.MOSEK, verbose=True)

    print(prob.status)
    print(prob.value)

    K = np.array([k.value for k in K])
    covariance_ = np.array([linalg.pinvh(k) for k in K])
    return_list = [K, covariance_]
    return return_list
Exemplo n.º 34
0
def total_variation(C, p):
    return _solve_convex(C, p, lambda p, q: 0.5 * cvxpy.norm1(p - q))
Exemplo n.º 35
0
#!/usr/bin/env python

import numpy as np
import cvxpy as cp

from epopt import cvxpy_expr
from epopt import expression_vis
from epopt.compiler import canonicalize

if __name__ == "__main__":
    n = 5
    x = cp.Variable(n)

    # Lasso expression tree
    m = 10
    A = np.random.randn(m,n)
    b = np.random.randn(m)
    lam = 1
    f = cp.sum_squares(A*x - b) + lam*cp.norm1(x)
    prob0 = cvxpy_expr.convert_problem(cp.Problem(cp.Minimize(f)))[0]
    expression_vis.graph(prob0.objective).write("expr_lasso.dot")

    # Canonicalization of a more complicated example
    c = np.random.randn(n)
    f = cp.exp(cp.norm(x) + c.T*x) + cp.norm1(x)
    prob0 = cvxpy_expr.convert_problem(cp.Problem(cp.Minimize(f)))[0]
    expression_vis.graph(prob0.objective).write("expr_epigraph.dot")
    prob1 = canonicalize.transform(prob0)
    expression_vis.graph(prob1.objective).write("expr_epigraph_canon.dot")
Exemplo n.º 36
0
 def reg(self, X): return self.nu*cp.norm1(X)
 def __str__(self): return "linear reg"
def recovery(measurement_ratios,
             filter_size,
             solver="AMP",
             prior="conv",
             sparse_in_dct=False,
             N=1000,
             delta=1e-2,
             sparsity=0.5,
             n_rep=10):

    signal = GaussBernouilliPrior(size=(N, ), rho=sparsity)
    prior_conv_ens = ConvEnsemble(N, filter_size)

    def sample_trnsf():
        if (sparse_in_dct):
            D = dctmtx(N).T
        else:
            D = np.eye(N)

        if (prior == "conv"):
            C = prior_conv_ens.generate()
        elif (prior == "sparse"):
            C = np.eye(N)
        else:
            raise ValueError("Prior must be 'conv' or 'sparse'")
        return D @ C

    recovery_per_alpha = []

    for alpha in measurement_ratios:
        recoveries = []
        for rep in range(n_rep):
            M = int(alpha * N)
            ensemble = GaussianEnsemble(M, N)
            A = ensemble.generate()
            C = sample_trnsf()

            teacher = conv_model(
                A, C, signal) @ GaussianChannel(var=delta) @ O(id="y")
            teacher = teacher.to_model()
            sample = teacher.sample()

            if (solver == "AMP"):

                max_iter = 20
                damping = 0.1

                student = conv_model(A, C, signal) @ GaussianLikelihood(
                    y=sample['y'], var=delta)
                student = student.to_model_dag()
                student = student.to_model()
                ep = ExpectationPropagation(student)
                ep.iterate(max_iter=max_iter,
                           damping=damping,
                           callback=EarlyStopping(tol=1e-8))
                data_ep = ep.get_variables_data((['x']))
                mse = np.mean((data_ep['x']['r'] - sample['x'])**2)
                recoveries.append(mse)

            elif (solver == "CVX"):

                reg_param = 0.001

                x = cp.Variable(shape=(N, ), name="x")
                lmbda = cp.Parameter(nonneg=True)
                objective = cp.norm2(A @ C @ x -
                                     sample['y'])**2 + lmbda * cp.norm1(x)
                problem = cp.Problem(cp.Minimize(objective))
                lmbda.value = reg_param
                problem.solve(abstol=1e-6)
                mse = np.mean((x.value - sample['x'])**2)
                recoveries.append(mse)
            else:
                raise ValueError("Solver must be 'AMP' or 'CVX'")
        recovery_per_alpha.append(recoveries)
    return recovery_per_alpha
Exemplo n.º 38
0
def total_variation_plus_seasonal_filter(signal,
                                         c1=10,
                                         c2=500,
                                         residual_weights=None,
                                         tv_weights=None,
                                         use_ixs=None,
                                         periodic_detector=False,
                                         transition_locs=None,
                                         seas_max=None):
    '''
    This performs total variation filtering with the addition of a seasonal baseline fit. This introduces a new
    signal to the model that is smooth and periodic on a yearly time frame. This does a better job of describing real,
    multi-year solar PV power data sets, and therefore does an improved job of estimating the discretely changing
    signal.

    :param signal: A 1d numpy array (must support boolean indexing) containing the signal of interest
    :param c1: The regularization parameter to control the total variation in the final output signal
    :param c2: The regularization parameter to control the smoothness of the seasonal signal
    :return: A 1d numpy array containing the filtered signal
    '''
    if residual_weights is None:
        residual_weights = np.ones_like(signal)
    if tv_weights is None:
        tv_weights = np.ones(len(signal) - 1)
    if use_ixs is None:
        index_set = ~np.isnan(signal)
    else:
        index_set = np.logical_and(use_ixs, ~np.isnan(signal))
    s_hat = cvx.Variable(len(signal))
    s_seas = cvx.Variable(len(signal))
    s_error = cvx.Variable(len(signal))
    c1 = cvx.Constant(value=c1)
    c2 = cvx.Constant(value=c2)
    #w = len(signal) / np.sum(index_set)
    if transition_locs is None:
        objective = cvx.Minimize(
            # (365 * 3 / len(signal)) * w *
            # cvx.sum(cvx.huber(cvx.multiply(residual_weights, s_error)))
            10 * cvx.norm(cvx.multiply(residual_weights, s_error)) +
            c1 * cvx.norm1(cvx.multiply(tv_weights, cvx.diff(s_hat, k=1))) +
            c2 * cvx.norm(cvx.diff(s_seas, k=2))
            # + c2 * .1 * cvx.norm(cvx.diff(s_seas, k=1))
        )
    else:
        objective = cvx.Minimize(
            10 * cvx.norm(cvx.multiply(residual_weights, s_error)) +
            c2 * cvx.norm(cvx.diff(s_seas, k=2)))
    constraints = [
        signal[index_set] == s_hat[index_set] + s_seas[index_set] +
        s_error[index_set],
        cvx.sum(s_seas[:365]) == 0
    ]
    if len(signal) > 365:
        constraints.append(s_seas[365:] - s_seas[:-365] == 0)
        if periodic_detector:
            constraints.append(s_hat[365:] - s_hat[:-365] == 0)
    if transition_locs is not None:
        loc_mask = np.ones(len(signal) - 1, dtype=bool)
        loc_mask[transition_locs] = False
        # loc_mask[transition_locs + 1] = False
        constraints.append(cvx.diff(s_hat, k=1)[loc_mask] == 0)
    if seas_max is not None:
        constraints.append(s_seas <= seas_max)
    problem = cvx.Problem(objective=objective, constraints=constraints)
    problem.solve()
    return s_hat.value, s_seas.value
Exemplo n.º 39
0
def fit_ellipse_stack2(dx, dy, dz, di, norm_type="l2"):
    """
    fit ellipoid using squared loss

    idea to learn all stacks together including smoothness

    """

    #TODO create flag for norm1 vs norm2
    
    assert norm_type in ["l1", "l2", "huber"]

    # sanity check
    assert len(dx) == len(dy)
    assert len(dx) == len(dz)
    assert len(dx) == len(di)

    # unique zs
    dat = defaultdict(list)

    # resort data
    for idx in range(len(dx)):
        dat[dz[idx]].append( [dx[idx], dy[idx], di[idx]] )

    # init ret
    ellipse_stack = []
    for idx in range(max(dz)):
        ellipse_stack.append(Ellipse(0, 0, idx, 1, 1, 0))
    

    total_N = len(dx)
    M = len(dat.keys())
    #D = 5
    D = 4

    X_matrix = []
    thetas = []
    slacks = []
    eps_slacks = []

    mean_di = float(numpy.mean(di))

    for z in dat.keys():

        x = numpy.array(dat[z])[:,0]
        y = numpy.array(dat[z])[:,1]

        # intensities
        i = numpy.array(dat[z])[:,2]
        ity = numpy.diag(i) / mean_di

        # dimensionality
        N = len(x)
        d = numpy.zeros((N, D))

        d[:,0] = x*x
        d[:,1] = y*y
        #d[:,2] = x*y
        d[:,2] = x
        d[:,3] = y
        #d[:,4] = numpy.ones(N)

        #d[:,0] = x*x
        #d[:,1] = y*y
        #d[:,2] = x*y
        #d[:,3] = x
        #d[:,4] = y
        #d[:,5] = numpy.ones(N)
    
        # consider intensities
        old_shape = d.shape
        #d = numpy.dot(ity, d)
        assert d.shape == old_shape
    
        print d.shape   
        d = cvxpy.matrix(d)
        #### parameters

        # da
        X = cvxpy.parameter(N, D, name="X" + str(z))
        X.value = d
        X_matrix.append(X)


        #### varibales
    
        # parameter vector
        theta = cvxpy.variable(D, name="theta" + str(z))
        thetas.append(theta)


    # construct obj
    objective = 0

    print "norm type", norm_type 

    for i in xrange(M):


        if norm_type == "l1":
            objective += cvxpy.norm1(X_matrix[i] * thetas[i] + 1.0)
        if norm_type == "l2":
            objective += cvxpy.norm2(X_matrix[i] * thetas[i] + 1.0)

        #TODO these need to be summed
        #objective += cvxpy.huber(X_matrix[i] * thetas[i], 1)
        #objective += cvxpy.deadzone(X_matrix[i] * thetas[i], 1)


    # add smoothness regularization
    reg_const = float(total_N) / float(M-1)

    for i in xrange(M-1):
        objective += reg_const * cvxpy.norm2(thetas[i] - thetas[i+1])


    # create problem                                    
    p = cvxpy.program(cvxpy.minimize(objective))

    prob = p
    import ipdb
    ipdb.set_trace()

    # add constraints
    #for i in xrange(M):
    #    #p.constraints.append(cvxpy.eq(thetas[i][0,:] + thetas[i][1,:], 1))
    #    p.constraints.append(cvxpy.eq(thetas[i][4,:], 1))

    # set solver settings
    p.options['reltol'] = 1e-1
    p.options['abstol'] = 1e-1
    #p.options['feastol'] = 1e-1

    # invoke solver
    p.solve()
    

    # wrap up result
    ellipse_stack = {}

    active_layers = dat.keys()
    assert len(active_layers) == M

    for i in xrange(M):

        w = numpy.array(thetas[i].value)

        ## For clarity, fill in the quadratic form variables
        #A        = numpy.zeros((2,2))
        #A[0,0]   = w[0]
        #A.ravel()[1:3] = w[2]
        #A[1,1]   = w[1]
        #bv       = w[3:5]
        #c        = w[5]

        A              = numpy.zeros((2,2))
        A[0,0]         = w[0]
        A.ravel()[1:3] = 0 #w[2]
        A[1,1]         = w[1]
        #bv             = w[2:4]
        bv             = w[2:]
        #c              = w[4]
        c              = 1.0
                
        ## find parameters
        z, a, b, alpha = util.conic2parametric(A, bv, c)
        print "layer (i,z,a,b,alpha):", i, z, a, b, alpha

        layer = active_layers[i]
        ellipse_stack[layer] = Ellipse(z[0], z[1], layer, a, b, alpha)


    return ellipse_stack
Exemplo n.º 40
0
def sparse_kmeans(AllDataMatrix,s,niter,group):    
        
    w = [1/np.sqrt(AllDataMatrix.shape[1])]*AllDataMatrix.shape[1]
    
    wx = np.zeros((len(AllDataMatrix),AllDataMatrix.shape[1]))
    for j in range(AllDataMatrix.shape[1]):
      wx[:,j] = AllDataMatrix[:,j]*(np.sqrt(w)[j])
    alpha_group = s
    s_orig = s
    nclust = 6
  
    #kmt = KMeans(n_clusters=nclust, init='random', n_init=100,verbose=False, tol=0.0000000001)                    
    #kmt.fit(wx)
    #print kmt.labels_
    #print adjusted_rand_score(labels, kmt.labels_)
    
    
    kmt = rkmeans(x=numpy2ri(wx),centers=nclust,nstart=100)
    kmlabels = np.array(kmt[0])
    print adjusted_rand_score(labels, np.array(kmt[0]))
    #overall iterations
    for i in range(niter):
        #print i
        #1.get bcssj
    
        aj_list = []
        for j in range(AllDataMatrix.shape[1]):
            dat_j = AllDataMatrix[:,j].reshape((len(AllDataMatrix),1))
            djall = euclidean_distances(dat_j, dat_j)
            sumd_all = np.sum(djall**2)/len(AllDataMatrix)
            nk_list = [];sumd_k_list = []
        
            for k in range(nclust):
                dat_j = AllDataMatrix[kmlabels==k,j]
                dat_j = dat_j.reshape((len(dat_j),1))
                if(len(dat_j)<1):
                    d = 0
                else:    
                    d = euclidean_distances(dat_j, dat_j)
                nk = len(dat_j)
                sumd_k = np.sum(d**2)
                nk_list.append(nk)
                sumd_k_list.append(sumd_k)
            
            nk_list = np.array(nk_list)
            sumd_k_list = np.array(sumd_k_list)
            #compute within-sum of squares over feature j
            nk_list[nk_list==0] = -1
            one_nk_list = 1./nk_list
            one_nk_list[np.sign(one_nk_list)== -1 ] = 0
            withinssj = np.sum(one_nk_list*sumd_k_list)
            #aj = totalss/n - wss/nk
            aj = sumd_all - withinssj
            aj_list.append(aj)
        #2. get w
        a = np.array(aj_list)
        lenseq = np.array([256,128,64,32,16,8,4,2,1,1])
        lenseq = np.array([256,128,64,32,16,8,8])
        nlevels = len(lenseq)
    
        sqrtlenseq = np.sqrt(lenseq)
        indseq = np.cumsum(lenseq)
        wvar = cvx.Variable(len(a))
        
        t = cvx.Variable(nlevels)
        ## Form objective.
    
        if group:
        ####GROUP SPARSE
            #obj = cvx.Minimize(sum(-1*a*wvar) + alpha_group*sum(t))
            obj = cvx.Minimize(sum(-1*a*wvar))
    
           
            group0 = [cvx.norm(wvar[0:(indseq[0]-1)],2)<=t[0]]
            group1 = [cvx.norm(wvar[indseq[0]:(indseq[1])],2)<=t[1]]
            group2 = [cvx.norm(wvar[indseq[1]:(indseq[2])],2)<=t[2]]
            group3 = [cvx.norm(wvar[indseq[2]:(indseq[3])],2)<=t[3]]
            group4 = [cvx.norm(wvar[indseq[3]:(indseq[4])],2)<=t[4]]
            group5 = [cvx.norm(wvar[indseq[4]:(indseq[5])],2)<=t[5]]
            group6 = [cvx.norm(wvar[indseq[5]:(indseq[6])],2)<=t[6]]
            
    
    #        group0 = [cvx.norm(wvar[0:indseq[0]],2)<=t[0]]
    #        group1 = [cvx.norm(wvar[indseq[0]:indseq[1]],2)<=t[1]]
#            group0 = [sqrtlenseq[0]*cvx.norm(wvar[0:(indseq[0]-1)],2)<=t[0]]
#            group1 = [sqrtlenseq[1]*cvx.norm(wvar[indseq[0]:(indseq[1])],2)<=t[1]]
#            group2 = [sqrtlenseq[2]*cvx.norm(wvar[indseq[1]:(indseq[2])],2)<=t[2]]
#            group3 = [sqrtlenseq[3]*cvx.norm(wvar[indseq[2]:(indseq[3])],2)<=t[3]]
#            group4 = [sqrtlenseq[4]*cvx.norm(wvar[indseq[3]:(indseq[4])],2)<=t[4]]
#            group5 = [sqrtlenseq[5]*cvx.norm(wvar[indseq[4]:(indseq[5])],2)<=t[5]]
#            group6 = [sqrtlenseq[6]*cvx.norm(wvar[indseq[5]:(indseq[6])],2)<=t[6]]
    #        
            #group7 = [cvx.norm(wvar[indseq[6]:(indseq[7])],2)<=t[7]]
    #        group8 = [cvx.norm(wvar[indseq[7]:(indseq[8])],2)<=t[8]]
    #        group9 = [cvx.norm(wvar[indseq[8]:(indseq[9])],2)<=t[9]]
            
        ###"correct" constraints
            #constr = [wvar>=0,sum(wvar)==1] + group0 + group1 + group2 + group3 + group4 + group5 + group6
        ##l2 constraints
            #constr = [cvx.square(cvx.norm2(wvar))<=1,wvar>=0] + group0 + group1 + group2 + group3 + group4 + group5 + group6 + group7 + group8 + group9
            #constr = [cvx.square(cvx.norm2(wvar))<=1,wvar>=0] + group0 + group1 + group2 + group3 + group4 + group5 + group6 + group7
            constr = [cvx.square(cvx.norm2(wvar))<=1,wvar>=0] + group0 + group1 + group2 + group3 + group4 + group5 + group6
            constr = constr + [sum(t)<=alpha_group]#cvx.norm1(wvar)<=s_orig
    
    ####GROUP NORM AS IN LASSO
    #        groupnormvec = [cvx.norm(wvar[0:(indseq[0]-1)],2),cvx.norm(wvar[indseq[0]:(indseq[1])],2),
    #                    cvx.norm(wvar[indseq[1]:(indseq[2])],2),cvx.norm(wvar[indseq[2]:(indseq[3])],2),
    #                    cvx.norm(wvar[indseq[3]:(indseq[4])],2),cvx.norm(wvar[indseq[4]:(indseq[5])],2),
    #                    cvx.norm(wvar[indseq[5]:(indseq[6])],2)]
    #        obj = cvx.Minimize(sum(-1*a*wvar) + alpha_group*sum(groupnormvec))
    #        constr = [cvx.square(cvx.norm2(wvar))<=1,wvar>=0]  
        else:
        ####ORIGINAL SPARSE KMEANS PROBLEM
            #obj = cvx.Minimize(cvx.sum(cvx.mul_elemwise(-1*a,wvar))) 
            obj = cvx.Minimize(sum(-1*a*wvar)) 
            
            constr = [cvx.square(cvx.norm2(wvar))<=1,cvx.norm1(wvar)<=s_orig, wvar>=0]  
            #constr = [cvx.square(cvx.norm2(wvar))<=1, wvar>=0]  
    
        prob = cvx.Problem(obj, constr)
        #prob.solve()
    
        try: 
            prob.solve()
            #print "default solver"
        except:
            
            #print "SCS SOLVER"
            #prob.solve(solver =cvx.CVXOPT)
            prob.solve(solver = cvx.SCS,verbose=False)#use_indirect=True
            #print prob.value
        w = wvar.value
    
        #3. update kmeans 
        wx = np.zeros((len(AllDataMatrix),AllDataMatrix.shape[1]))
        for j in range(AllDataMatrix.shape[1]):
            wj = np.sqrt(w[j][0,0])
            #wj = w[j][0,0]
            if np.isnan(wj):
                #print "bad"
                wj = 10**-20
    #        else:
    #            #print "yes"
    #            #print wj
            wx[:,j] = AllDataMatrix[:,j]*wj
    
    #    kmt = KMeans(n_clusters=nclust, init='random', n_init=100,verbose=False,tol=0.0000000001)                    
    #    kmt.fit(wx)
        kmt = rkmeans(x=numpy2ri(wx),centers=nclust,nstart=100)
        kmlabels =  np.array(kmt[0])
    return prob.value,kmlabels
Exemplo n.º 41
0
        constr = [cvx.square(cvx.norm2(wvar))<=1,wvar>=0] + group0 + group1 + group2 + group3 + group4 + group5 + group6
        constr = constr + [sum(t)<=alpha_group]#cvx.norm1(wvar)<=s_orig

####GROUP NORM AS IN LASSO
#        groupnormvec = [cvx.norm(wvar[0:(indseq[0]-1)],2),cvx.norm(wvar[indseq[0]:(indseq[1])],2),
#                    cvx.norm(wvar[indseq[1]:(indseq[2])],2),cvx.norm(wvar[indseq[2]:(indseq[3])],2),
#                    cvx.norm(wvar[indseq[3]:(indseq[4])],2),cvx.norm(wvar[indseq[4]:(indseq[5])],2),
#                    cvx.norm(wvar[indseq[5]:(indseq[6])],2)]
#        obj = cvx.Minimize(sum(-1*a*wvar) + alpha_group*sum(groupnormvec))
#        constr = [cvx.square(cvx.norm2(wvar))<=1,wvar>=0]  
    else:
    ####ORIGINAL SPARSE KMEANS PROBLEM
        #obj = cvx.Minimize(cvx.sum(cvx.mul_elemwise(-1*a,wvar))) 
        obj = cvx.Minimize(sum(-1*a*wvar)) 
        
        constr = [cvx.square(cvx.norm2(wvar))<=1,cvx.norm1(wvar)<=s_orig, wvar>=0]  
        #constr = [cvx.square(cvx.norm2(wvar))<=1, wvar>=0]  

    prob = cvx.Problem(obj, constr)
    #prob.solve()

    try: 
        prob.solve()
        print "default solver"
    except:
        
        print "SCS SOLVER"
        #prob.solve(solver =cvx.CVXOPT)
        prob.solve(solver = cvx.SCS,verbose=False)#use_indirect=True
        print prob.value
    w = wvar.value
Exemplo n.º 42
0
# Proximal operators
PROX_TESTS = [
    #prox("MATRIX_FRAC", lambda: cp.matrix_frac(p, X)),
    #prox("SIGMA_MAX", lambda: cp.sigma_max(X)),
    prox("AFFINE", lambda: randn(n).T*x),
    prox("CONSTANT", lambda: 0),
    prox("LAMBDA_MAX", lambda: cp.lambda_max(X)),
    prox("LOG_SUM_EXP", lambda: cp.log_sum_exp(x)),
    prox("MAX", lambda: cp.max_entries(x)),
    prox("NEG_LOG_DET", lambda: -cp.log_det(X)),
    prox("NON_NEGATIVE", None, C_non_negative_scaled),
    prox("NON_NEGATIVE", None, C_non_negative_scaled_elemwise),
    prox("NON_NEGATIVE", None, lambda: [x >= 0]),
    prox("NORM_1", f_norm1_weighted),
    prox("NORM_1", lambda: cp.norm1(x)),
    prox("NORM_2", lambda: cp.norm(X, "fro")),
    prox("NORM_2", lambda: cp.norm2(x)),
    prox("NORM_NUCLEAR", lambda: cp.norm(X, "nuc")),
    prox("SECOND_ORDER_CONE", None, C_soc_scaled),
    prox("SECOND_ORDER_CONE", None, C_soc_scaled_translated),
    prox("SECOND_ORDER_CONE", None, C_soc_translated),
    prox("SECOND_ORDER_CONE", None, lambda: [cp.norm(X, "fro") <= t]),
    prox("SECOND_ORDER_CONE", None, lambda: [cp.norm2(x) <= t]),
    prox("SEMIDEFINITE", None, lambda: [X >> 0]),
    prox("SUM_DEADZONE", f_dead_zone),
    prox("SUM_EXP", lambda: cp.sum_entries(cp.exp(x))),
    prox("SUM_HINGE", f_hinge),
    prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
    prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
    prox("SUM_INV_POS", lambda: cp.sum_entries(cp.inv_pos(x))),
def SLS_synthesis_p1(Pssd, T, regularization=-1, test_signal='step',
                     Mp=1.01, m=0.5, b=10, k=80,
                     freqs_bnd_yn=[1e-2, 255], mag_bnd_yn=[-10, -10],
                     freqs_bnd_T=[1e-2, 357], mag_bnd_T=[6, 6], T_delay=11):
    """Synthesize a controller using SLS.

    Procedure p1

        Constraints
        - 20c, 20a, 20b (achievability constraints),
        - lower bound on impulse response to prevent negative response,
        - steady-state displacement given constant acting force;
        - noise attenuation: upper bound on the mapping from noise to displacement;
        - robust stability: upper bound on the complementary sensitivity transfer function;

        Objectives
        - regularization using scaled l1 norm on the impulse responses L, MB2
        - distance to a desired mass/spring/damper model (m,b,k)
    """
    print("-- Starting SLS_synthesis_p1")
    # parameters
    nu = 1  # 1 dof controller
    ny = 1

    # form response matrices
    R, N, M, L, H, constraints = Ss.SLS.form_SLS_response_matrices(
        Pssd, nu, ny, T)

    # # constants
    nx = Pssd.states
    A, B1, B2, C1, C2, D11, D12, D21, D22 = Ss.get_partitioned_mats(
        Pssd, nu, ny)

    # select component of H that correspond to the mapping from acting
    # force (and noise) to actual robot displacement (H_yn) and the
    # mapping that corresponds to the complementary transfer function T.
    H_yn = H[0::2, :]
    H_T = H[1::2, :]

    # objective: match the impulse response of a given system
    sys_model = co.c2d(co.tf([1], [m, b, k]), dT)
    _, imp_model = co.impulse_response(sys_model, np.arange(T) * dT)

    # NOTE: have to use norm, sum_of_squares does not work. The reason
    # is the magnitude of the objective function must not be too
    # small. The optimizer seems to get confuse and simply stop
    # working.
    imp_diff = H_yn[T_delay:, 0] - imp_model[0, :T - T_delay]
    weight = np.diag(1 + 2 * (1.0 / T) * np.arange(T - T_delay))
    objective = 1e6 * cvx.norm(weight * imp_diff)

    # try some regularization
    if regularization > 0:
        reg = regularization * (cvx.norm1(H_yn))
    else:
        reg = cvx.abs(cvx.Variable())

    # constraint in frequency-domain, if specified
    W_dft = Ss.dft_matrix(T)
    Hz_yn = W_dft * H_yn
    Hz_T = W_dft * H_T
    omegas = np.arange(int(T / 2)) * 2 * np.pi / T / dT
    omegas[0] = 1e-2

    # upper bound for noise attenuation
    wN_inv = np.ones((T, 1)) * 100  # infinity
    wN_inv[:int(T / 2), 0] = np.power(10, np.interp(np.log10(omegas),
                                                    np.log10(freqs_bnd_yn), mag_bnd_yn) / 20)

    # upper bound for complementary sensitivity transfer function
    wT_inv = np.ones((T, 1)) * 100  # infinity
    wT_inv[:int(T / 2), 0] = np.power(
        10, np.interp(np.log10(omegas), np.log10(freqs_bnd_T), mag_bnd_T) / 20)

    # add both frequency-domian constraints
    constraints.append(cvx.abs(Hz_yn) <= wN_inv)
    constraints.append(cvx.abs(Hz_T) <= wT_inv)

    # optimize
    obj = cvx.Minimize(objective + reg)
    prob = cvx.Problem(obj, constraints)
    print("-- [SLS_synthesis_p1] Preparing problem with cvxpy!")
    prob.solve(verbose=True, solver=cvx.MOSEK)
    print("-- [SLS_synthesis_p1] optimization status: {:}".format(prob.status))
    print(
        "-- [SLS_synthesis_p1] obj = {:}, reg = {:}".format(objective.value, reg.value))

    if prob.status != "optimal":
        return None, None

    print("-- [SLS_synthesis_p1] Forming controllers!")
    # form controllers (Structure 1, Figure 4b, Wang 2018)
    L_value = np.array(L.value).reshape(ny, nu, -1)
    MB2_value = np.array((M * B2).value).reshape(nu, nu, -1)

    # since ny=nu=1, we have
    fir_den = [1] + [0 for n in range(T - 1)]
    MB2_tf = co.tf(MB2_value[0, 0], fir_den, dT)
    L_tf = co.tf(L_value[0, 0], fir_den, dT)
    K = co.feedback(1, MB2_tf, sign=-1) * L_tf
    K = Ss.tf2ss(K, minreal=True)

    # response mapping
    Rval = np.array(
        [R[n * nx: (n + 1) * nx, :].value for n in range(T)]).reshape(-1, nx, nx)
    Nval = np.array(
        [N[n * nx: (n + 1) * nx, :].value for n in range(T)]).reshape(-1, nx, ny)
    Mval = np.array(
        [M[n * nu: (n + 1) * nu, :].value for n in range(T)]).reshape(-1, nu, nx)
    Lval = np.array(
        [L[n * nu: (n + 1) * nu, :].value for n in range(T)]).reshape(-1, nu, ny)
    Hval_yn = np.array(H_yn.value)
    Hval_T = np.array(H_T.value)

    return K, {'internal responses': (Rval, Nval, Mval, Lval),
               'output impulse': (Hval_yn, Hval_T),
               'L': L_value, 'MB2': MB2_value}
Exemplo n.º 44
0
import cvxpy as cp
import matplotlib.pyplot as plt

# generate a problem instance
n = 1000  # number of variables
m = 50  # number of equality constraints
# randn('state',1); # set state so problem is reproducable
A = np.random.randn(m, n)
b = np.random.randn(m)

# threshold value below which we consider an element to be zero
DELTA = 1e-8

# compute an optimal point by solving an LP (use CVX)
x = cp.Variable(n)
objective = cp.Minimize(cp.norm1(x))
constraint = [A @ x == b]
prob = cp.Problem(objective, constraint)
result = prob.solve()
f_min = sum(np.abs(x.value))
print(f_min)
print(f'Optimal value is {f_min:0.4f}')

nnz = len(np.where(abs(x.value) > DELTA))
print(f'Found a feasible x in R^{n} that hs {nnz}')

# initial point needs to satisfy A*x1 = b (can use least-norm solution)
x1 = np.linalg.pinv(A).dot(b)

# ********************************************************************
# subgradient method computation
Exemplo n.º 45
0
# Variable declarations

import scipy.sparse as sps
n = 500
m = 300
np.random.seed(0)

A = np.random.rand(m, n)
x0 = sps.rand(n, 1, 0.1)
b = A*x0


# Problem construction

x = cp.Variable(n)
prob = cp.Problem(cp.Minimize(cp.norm1(x)), [A*x == b])


# Problem collection

# Single problem collection
problemDict = {
    "problemID" : problemID,
    "problem"   : prob,
    "opt_val"   : opt_val
}
problems = [problemDict]



# For debugging individual problems:
Exemplo n.º 46
0
# A: the complete connection matrix; A_hat: the selected connection matrix;
# b: the complete threshold; b_hat: the selected threshold
# x: the encoding bits; y: the merged test cube to be encoded; y_hat: the decoded/re-constructed test cube
y = np.random.choice(2, (num_sc), p=[0.8, 0.2])
x = cp.Variable(num_ctrl, boolean=True)
# A = state_dict_np['decoder.0.linear.weight'] * np.expand_dims(thred_sign, axis=1)
# b = thred_decoder * thred_sign
A = np.load('checkpoint/decoder.linear.weight.npy')
b = np.load('checkpoint/encoder.bn.thred.npy')
A_hat = A[y.astype(bool)]
b_hat = b[y.astype(bool)]
# print(A_hat)
# print(b_hat)
# exit()
# constraints
# cost = cp.norm1(A @ x - b)   # This cost is too computational complex to solve.
cost = cp.norm1(x)
objective = cp.Minimize(cost)
constraint = [A_hat @ x >= b_hat]
prob = cp.Problem(objective, constraint)
prob.solve()

print("Status: ", prob.status)
print("The optimal value is", prob.value)
print("A solution x is")
print(x.value)

y_recover = np.matmul(A, x.value) >= b
# print(y_recover)
print('OnePercent: ', np.sum(y_recover) / num_sc)
Exemplo n.º 47
0
def linear_builder(er: np.ndarray,
                   lbound: Union[np.ndarray, float],
                   ubound: Union[np.ndarray, float],
                   risk_constraints: np.ndarray,
                   risk_target: Tuple[np.ndarray, np.ndarray],
                   turn_over_target: float = None,
                   current_position: np.ndarray = None,
                   method: str = 'ecos') -> Tuple[str, np.ndarray, np.ndarray]:
    er = er.flatten()
    n, m = risk_constraints.shape

    if not risk_target:
        risk_lbound = -np.inf * np.ones((m, 1))
        risk_ubound = np.inf * np.ones((m, 1))
    else:
        risk_lbound = risk_target[0].reshape((-1, 1))
        risk_ubound = risk_target[1].reshape((-1, 1))

    if isinstance(lbound, float):
        lbound = np.ones(n) * lbound

    if isinstance(ubound, float):
        ubound = np.ones(n) * ubound

    if not turn_over_target or current_position is None:
        cons_matrix = np.concatenate(
            (risk_constraints.T, risk_lbound, risk_ubound), axis=1)
        prob = LPOptimizer(cons_matrix, lbound, ubound, -er, method)

        if prob.status() == 0:
            return 'optimal', prob.feval(), prob.x_value()
        else:
            raise PortfolioBuilderException(prob.status())
    else:
        if method in ("simplex", "interior"):
            # we need to expand bounded condition and constraint matrix to handle L1 bound
            w_u_bound = np.minimum(
                np.maximum(np.abs(current_position - lbound),
                           np.abs(current_position - ubound)),
                turn_over_target).reshape((-1, 1))

            current_position = current_position.reshape((-1, 1))

            lbound = np.concatenate((lbound, np.zeros(n)), axis=0)
            ubound = np.concatenate((ubound, w_u_bound.flatten()), axis=0)

            risk_lbound = np.concatenate((risk_lbound, [[0.]]), axis=0)
            risk_ubound = np.concatenate((risk_ubound, [[turn_over_target]]),
                                         axis=0)

            risk_constraints = np.concatenate(
                (risk_constraints.T, np.zeros((m, n))), axis=1)
            er = np.concatenate((er, np.zeros(n)), axis=0)

            turn_over_row = np.zeros(2 * n)
            turn_over_row[n:] = 1.
            risk_constraints = np.concatenate(
                (risk_constraints, [turn_over_row]), axis=0)

            turn_over_matrix = np.zeros((2 * n, 2 * n))
            for i in range(n):
                turn_over_matrix[i, i] = 1.
                turn_over_matrix[i, i + n] = -1.
                turn_over_matrix[i + n, i] = 1.
                turn_over_matrix[i + n, i + n] = 1.

            risk_constraints = np.concatenate(
                (risk_constraints, turn_over_matrix), axis=0)

            risk_lbound = np.concatenate((risk_lbound, -np.inf * np.ones(
                (n, 1))),
                                         axis=0)
            risk_lbound = np.concatenate((risk_lbound, current_position),
                                         axis=0)

            risk_ubound = np.concatenate((risk_ubound, current_position),
                                         axis=0)
            risk_ubound = np.concatenate((risk_ubound, np.inf * np.ones(
                (n, 1))),
                                         axis=0)

            cons_matrix = np.concatenate(
                (risk_constraints, risk_lbound, risk_ubound), axis=1)
            prob = LPOptimizer(cons_matrix, lbound, ubound, -er, method)

            if prob.status() == 0:
                return 'optimal', prob.feval(), prob.x_value()[:n]
            else:
                raise PortfolioBuilderException(prob.status())
        elif method.lower() == 'ecos':
            from cvxpy import Problem
            from cvxpy import Variable
            from cvxpy import norm1
            from cvxpy import Minimize

            w = Variable(n)
            current_risk_exposure = risk_constraints.T @ w

            constraints = [
                w >= lbound, w <= ubound,
                current_risk_exposure >= risk_lbound.flatten(),
                current_risk_exposure <= risk_ubound.flatten(),
                norm1(w - current_position) <= turn_over_target
            ]

            objective = Minimize(-w.T * er)
            prob = Problem(objective, constraints)
            prob.solve(solver='ECOS',
                       feastol=1e-10,
                       abstol=1e-10,
                       reltol=1e-10)

            if prob.status == 'optimal' or prob.status == 'optimal_inaccurate':
                return prob.status, prob.value, w.value.flatten()
            else:
                raise PortfolioBuilderException(prob.status)
        else:
            raise ValueError("{0} is not recognized".format(method))
Exemplo n.º 48
0
def sparse_kmeans(AllDataMatrix, nclust, s, niter, group=False, tree=False, multi=False, groups_vector=None):
    w = [1 / np.sqrt(AllDataMatrix.shape[1])] * AllDataMatrix.shape[1]
    wx = np.zeros((len(AllDataMatrix), AllDataMatrix.shape[1]))
    for j in range(AllDataMatrix.shape[1]):
        wx[:, j] = AllDataMatrix[:, j] * (np.sqrt(w)[j])
    alpha_group = s
    s_orig = s
    print nclust

    kmt = KMeans(n_clusters=nclust, init="k-means++", max_iter=200, n_init=100)
    kmt.fit(wx)
    kmlabels = np.array(kmt.labels_)
    for i in range(niter):

        aj_list = []
        for j in range(AllDataMatrix.shape[1]):
            dat_j = AllDataMatrix[:, j].reshape((len(AllDataMatrix), 1))
            djall = euclidean_distances(dat_j, dat_j)
            sumd_all = np.sum(djall ** 2) / len(AllDataMatrix)
            nk_list = []
            sumd_k_list = []

            for k in range(nclust):
                dat_j = AllDataMatrix[kmlabels == k, j]
                dat_j = dat_j.reshape((len(dat_j), 1))
                if len(dat_j) < 1:
                    d = 0
                else:
                    d = euclidean_distances(dat_j, dat_j)
                nk = len(dat_j)
                sumd_k = np.sum(d ** 2)
                nk_list.append(nk)
                sumd_k_list.append(sumd_k)

            nk_list = np.array(nk_list)
            sumd_k_list = np.array(sumd_k_list)
            # compute within-sum of squares over feature j
            nk_list[nk_list == 0] = -1
            one_nk_list = 1.0 / nk_list
            one_nk_list[np.sign(one_nk_list) == -1] = 0
            withinssj = np.sum(one_nk_list * sumd_k_list)

            aj = sumd_all - withinssj
            aj_list.append(aj)
        # 2. get w
        a = np.array(aj_list)
        wvar = cvx.Variable(len(a))
        if tree:
            print "tree structure not supported"
            return -1
        else:
            if group:
                obj = cvx.Minimize(sum(-1 * a * wvar))
                if groups_vector is None:
                    lenseq = np.hstack((np.power(2, np.arange(np.log2(AllDataMatrix.shape[1])))[::-1], [1]))
                else:
                    lenseq = np.array(groups_vector)

                lenseq = lenseq.astype(int)

                nlevels = len(lenseq)
                sqrtlenseq = np.sqrt(lenseq)
                indseq = np.cumsum(lenseq)
                t = cvx.Variable(nlevels)
                group0 = [sqrtlenseq[0] * cvx.norm(wvar[0 : (indseq[0])], 2) <= t[0]]
                group_constraints = group0
                for level in range(1, nlevels):
                    # print level
                    group_const = [
                        sqrtlenseq[level] * cvx.norm(wvar[indseq[(level - 1)] : (indseq[level])], 2) <= t[level]
                    ]
                    group_constraints = group_constraints + group_const

                    constr = [cvx.square(cvx.norm2(wvar)) <= 1, wvar >= 0] + group_constraints
                    constr = constr + [sum(t) <= alpha_group]
                if multi:
                    T = AllDataMatrix.shape[1] / 3

                    t = cvx.Variable(T - 1)
                    constr_list = []
                    for coeff in range((T - 1)):
                        penalty = cvx.norm(wvar[coeff : (T * 3) : T], 2) <= t[coeff]
                        constr_list.append(penalty)

                    constr = [cvx.square(cvx.norm2(wvar)) <= 1, wvar >= 0] + constr_list
                    constr = constr + [sum(t) <= alpha_group]
            else:
                ####ORIGINAL SPARSE KMEANS PROBLEM
                print "ORIGINAL"
                obj = cvx.Minimize(sum(-1 * a * wvar))
                constr = [cvx.square(cvx.norm2(wvar)) <= 1, cvx.norm1(wvar) <= s_orig, wvar >= 0]

        prob = cvx.Problem(obj, constr)
        try:
            prob.solve()

        except:
            prob.solve(solver=cvx.SCS, verbose=False)  # use_indirect=True

        w = wvar.value

        # 3. update kmeans
        wx = np.zeros((len(AllDataMatrix), AllDataMatrix.shape[1]))
        for j in range(AllDataMatrix.shape[1]):
            wj = np.sqrt(w[j][0, 0])
            if np.isnan(wj):

                wj = 10 ** -30
            wx[:, j] = AllDataMatrix[:, j] * wj

        kmt = KMeans(n_clusters=nclust, init="k-means++", max_iter=200, n_init=100)
        kmt.fit(wx)
        kmlabels = np.array(kmt.labels_)

    return prob.value, kmlabels, w
Exemplo n.º 49
0
def nonlinearmodel(x,
                   library,
                   dt,
                   params,
                   options={
                       'smooth': True,
                       'solver': 'MOSEK'
                   }):
    '''
    Use the integral form of SINDy to find a sparse dynamical system model for the output, x, given a library of features.
    Then take the derivative of that model to estimate the derivative.  
    
    Inputs
    ------
    x       : (np.array of floats, 1xN) time series to differentiate
    library : (list of 1D arrays) list of features to use for building the model
    dt      : (float) time step

    Parameters
    ----------
    params  : (list)  [gamma,        : (int)    sparsity knob (higher = more sparse model)
                       window_size], : (int)    if option smooth, this determines the smoothing window
    options : (dict)  {'smooth',     : (bool)   if True, apply gaussian smoothing to the result with the same window size
                       'solver'}     : (str)    solver to use with cvxpy, MOSEK is default

    Outputs
    -------
    x_hat    : estimated (smoothed) x
    dxdt_hat : estimated derivative of x

    '''

    # Features
    int_library = integrate_library(library, dt)
    w_int_library, w_int_library_func, dw_int_library_func = whiten_library(
        int_library)

    # Whitened states
    w_state, w_state_func, dw_state_func = whiten_library([x])
    w_x_hat, = w_state

    # dewhiten integral library coefficients
    integrated_library_std = []
    integrated_library_mean = []
    for d in dw_int_library_func:
        integrated_library_std.append(d.std)
        integrated_library_mean.append(d.mean)
    integrated_library_std = np.array(integrated_library_std)
    integrated_library_mean = np.array(integrated_library_mean)

    # dewhiten state coefficients
    state_std = []
    state_mean = []
    for d in dw_state_func:
        state_std.append(d.std)
        state_mean.append(d.mean)
    state_std = np.array(state_std)
    state_mean = np.array(state_mean)

    # Define loss function
    var = cvxpy.Variable((1, len(library)))
    sum_squared_error_x = cvxpy.sum_squares(w_x_hat[1:-1] -
                                            (w_int_library * var[0, :])[1:-1])
    sum_squared_error = cvxpy.sum([sum_squared_error_x])

    # Solve convex optimization problem
    gamma = params[0]
    solver = options['solver']
    L = cvxpy.sum(sum_squared_error + gamma * cvxpy.norm1(var))
    obj = cvxpy.Minimize(L)
    prob = cvxpy.Problem(obj)
    r = prob.solve(solver=solver)
    sindy_coefficients = var.value

    integrated_library_offset = np.matrix(
        sindy_coefficients[0, :] /
        integrated_library_std) * np.matrix(integrated_library_mean).T
    estimated_coefficients = sindy_coefficients[
        0, :] / integrated_library_std * np.tile(state_std[0],
                                                 [len(int_library), 1]).T
    offset = -1 * (state_std[0] *
                   np.ravel(integrated_library_offset)) + state_mean

    # estimate derivative
    dxdt_hat = np.ravel(np.matrix(estimated_coefficients) * np.matrix(library))

    if options['smooth']:
        window_size = params[1]
        kernel = __gaussian_kernel__(window_size)
        dxdt_hat = pynumdiff.smooth_finite_difference.__convolutional_smoother__(
            dxdt_hat, kernel, 1)

    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = utility.estimate_initial_condition(x, x_hat)
    x_hat = x_hat + x0

    return x_hat, dxdt_hat
Exemplo n.º 50
0
A[b<0,:] -= 0.7*np.tile([x0], (np.sum(b<0),1))

P = la.block_diag(np.random.randn(n-1,n-1), 0)

lam = 1


# Problem construction

# Explicit epigraph constraint
x = cp.Variable(n)
t = cp.Variable(1)

z = 1 - sps.diags([b],[0])*A*x + t
f = lam*cp.sum_squares(x) + cp.sum_entries(cp.max_elemwise(z, 0))
C = [cp.norm1(P.T*x) <= t]
prob = cp.Problem(cp.Minimize(f), C)


# Problem collection

# Single problem collection
problemDict = {
    "problemID" : problemID,
    "problem"   : prob,
    "opt_val"   : opt_val
}
problems = [problemDict]


Exemplo n.º 51
0
def f_norm1_weighted():
    w = np.random.randn(n)
    w[0] = 0
    return cp.norm1(cp.mul_elemwise(w, x))
Exemplo n.º 52
0

    # Choose regularization parameter
    # lambda > lambda_max -> zero solution
    lambda_max = 2*norm(dot(nA.T, rSig.T), np.inf) 

    lamb = 1.0e-8*lambda_max
    
    print('Solving L1 penalized system with cvxpy...')

    coefs = cvx.variable(n_qpnts,1)
    A     = cvx.matrix(nA)
    rhs   = cvx.matrix(rSig).T

    objective = cvx.minimize(cvx.norm2(A*coefs - rhs) +
                             lamb*cvx.norm1(coefs) )
    constraints = [cvx.geq(coefs,0.0)]
    prob = cvx.program(objective, constraints)

    # Call the solver
    prob.solve(quiet=True)  #Use quiet=True to suppress output


    # Convert the cvxmod objects to plain numpy arrays for further processing
    nd_coefs_l1 = np.array(coefs.value).squeeze()

    # Cutoff those coefficients that are less than cutoff
    cutoff =  nd_coefs_l1.mean() + 2.0*nd_coefs_l1.std(ddof=1)
    nd_coefs_l1_trim = np.where(nd_coefs_l1 > cutoff, nd_coefs_l1, 0)

    # Get indices needed for sorting coefs, in reverse order.
Exemplo n.º 53
0
def backtest(returns,
             Z_returns,
             benchmark,
             means,
             covs,
             lev_lim,
             bottom_sec_limit,
             upper_sec_limit,
             shorting_cost,
             tcost,
             MAXRISK,
             kappa=None,
             bid_ask=None):

    _, num_assets = returns.shape
    T, K = Z_returns.shape
    Zs_time = np.zeros((T - 1, K))

    value_strat, value_benchmark = 1, 1
    vals_strat, vals_benchmark = [value_strat], [value_benchmark]

    benchmark_returns = benchmark.loc[Z_returns.index].copy().values.flatten()
    """
    On the fly computing for stratified model policy    
    """

    W = [np.zeros(18)]
    W[0][8] = 1  #vti
    for date in range(1, T):

        # if date % 50 == 0: print(date)

        dt = Z_returns.iloc[date].name.strftime("%Y-%m-%d")

        node = tuple(Z_returns.iloc[date])

        Zs_time[date - 1, :] = [*node]

        if date == 1:
            w_prev = W[0].copy()
        else:
            w_prev = W[-1].flatten().copy()

        w = cp.Variable(num_assets)

        #adding cash asset into returns and covariances
        SIGMA = covs[node]
        MU = means[node]

        roll = 15

        #get last 5 days tcs, lagged by one! so this doesnt include today's date
        tau = np.maximum(bid_ask.loc[:dt].iloc[-(roll + 1):-1].mean().values,
                         0) / 2

        obj = -w @ (MU + 1) + shorting_cost * (kappa @ cp.neg(w)) + tcost * (
            cp.abs(w - w_prev)) @ tau
        cons = [
            cp.quad_form(w, SIGMA) <= MAXRISK * 100 * 100,
            sum(w) == 1,
            cp.norm1(w) <= lev_lim,
            bottom_sec_limit * np.ones(num_assets) <= w,
            w <= upper_sec_limit * np.ones(num_assets),
        ]
        prob_sm = cp.Problem(cp.Minimize(obj), cons)

        prob_sm.solve(verbose=False)

        returns_date = 1 + returns[date, :]

        #get TODAY's tc
        tau_sim = bid_ask.loc[dt].values.flatten() / 2

        value_strat *= returns_date @ w.value - (kappa @ cp.neg(w)).value - (
            cp.abs(w - w_prev) @ tau_sim).value
        vals_strat += [value_strat]

        value_benchmark *= 1 + benchmark_returns[date]
        vals_benchmark += [value_benchmark]

        w_prev = w.value.copy() * returns_date
        W += [w_prev.reshape(-1, 1)]

    vals = pd.DataFrame(data=np.vstack([vals_strat, vals_benchmark]).T,
                        columns=["policy", "benchmark"],
                        index=Z_returns.index.rename("Date"))

    Zs_time = pd.DataFrame(data=Zs_time,
                           index=Z_returns.index[1:],
                           columns=Z_returns.columns)

    #calculate sharpe
    rr = vals.pct_change()
    sharpes = np.sqrt(250) * rr.mean() / rr.std()
    returns = 250 * rr.mean()

    return vals, Zs_time, W, sharpes, returns
Exemplo n.º 54
0
# Proximal operators
PROX_TESTS = [
    #prox("MATRIX_FRAC", lambda: cp.matrix_frac(p, X)),
    #prox("SIGMA_MAX", lambda: cp.sigma_max(X)),
    prox("AFFINE", lambda: randn(n).T*x),
    prox("CONSTANT", lambda: 0),
    prox("LAMBDA_MAX", lambda: cp.lambda_max(X)),
    prox("LOG_SUM_EXP", lambda: cp.log_sum_exp(x)),
    prox("MAX", lambda: cp.max_entries(x)),
    prox("NEG_LOG_DET", lambda: -cp.log_det(X)),
    prox("NON_NEGATIVE", None, C_non_negative_scaled),
    prox("NON_NEGATIVE", None, C_non_negative_scaled_elemwise),
    prox("NON_NEGATIVE", None, lambda: [x >= 0]),
    prox("NORM_1", f_norm1_weighted),
    prox("NORM_1", lambda: cp.norm1(x)),
    prox("NORM_2", lambda: cp.norm(X, "fro")),
    prox("NORM_2", lambda: cp.norm2(x)),
    prox("NORM_NUCLEAR", lambda: cp.norm(X, "nuc")),
    #prox("QUAD_OVER_LIN", lambda: cp.quad_over_lin(p, q1)),
    prox("SECOND_ORDER_CONE", None, C_soc_scaled),
    prox("SECOND_ORDER_CONE", None, C_soc_scaled_translated),
    prox("SECOND_ORDER_CONE", None, C_soc_translated),
    prox("SECOND_ORDER_CONE", None, lambda: [cp.norm(X, "fro") <= t]),
    prox("SECOND_ORDER_CONE", None, lambda: [cp.norm2(x) <= t]),
    prox("SEMIDEFINITE", None, lambda: [X >> 0]),
    prox("SUM_DEADZONE", f_dead_zone),
    prox("SUM_EXP", lambda: cp.sum_entries(cp.exp(x))),
    prox("SUM_HINGE", f_hinge),
    prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
    prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
Exemplo n.º 55
0
    def test_web(self):
        """
        Note:

        Alternatively, here is a simpler (Gaussian) model for the z variables:

        def get_z_data(self, p, p_pos, q):
            z = []
            for m in range(M):

                mu = None
                if m < p_pos:
                    mu = 0.5*numpy.ones(q)
                else:
                    mu = -0.5*numpy.ones(q)

                Sigma = numpy.eye(q)

                rv = RandomVariableFactory().create_normal_rv(mu, Sigma)
                z += [rv]

            return z
        """
        if self.run_test_web is False:
            self.assertAlmostEqual(1, 1)
            return

        # Create problem data.
        n = 3  # Num dimensions of x
        m = 20  # Num (x,y) train points
        m_pos = math.floor(m / 2)
        m_neg = m - m_pos

        q = 4  # Num dimensions of z
        p = 30  # Num (z,w) train points
        p_pos = math.floor(p / 2)
        p_neg = p - p_pos

        l = math.floor((n + q) / 3)
        u = math.floor(2 * (n + q) / 3)

        C = 1  # L2 regularization trade-off parameter
        ns = 10  # Num samples

        # Create (x,y) data.
        mu_pos = 0.5 * numpy.ones(n)
        mu_neg = -0.5 * numpy.ones(n)
        Sigma = numpy.eye(n)
        x = numpy.matrix(
            numpy.vstack((
                numpy.random.randn(m_pos, n) + mu_pos,
                numpy.random.randn(m_neg, n) + mu_neg,
            )))

        y = numpy.hstack((numpy.ones(m_pos), -1 * numpy.ones(m_neg)))

        # Set up probabilistic model for (z,w) data.
        z = self.get_z_data(p, p_pos, q)
        w = numpy.hstack((numpy.ones(p_pos), -1 * numpy.ones(p_neg)))

        # Create optimization variables.
        a, b = cp.Variable(n), cp.Variable()
        c, d = cp.Variable(q), cp.Variable()

        # Create second stage problem.
        obj2 = [
            cp.log_sum_exp(vstack(0, -w[i] * (c.T * z[i] + d)))
            for i in range(p)
        ]
        budget = cp.norm1(a) + cp.norm1(c)
        p2 = cp.Problem(cp.Minimize(sum(obj2) + C * cp.norm(c, 2)),
                        [budget <= u])
        Q = partial_optimize(p2, [c, d], [a, b])

        # Create and solve first stage problem.
        obj1 = [
            cp.log_sum_exp(cp.vstack([0, -y[i] * (x[i] * a + b)]))
            for i in range(m)
        ]
        p1 = cp.Problem(
            cp.Minimize(
                cp.sum(obj1) + C * cp.norm(a, 2) + expectation(Q(a, b), ns)),
            [])
        p1.solve()

        self.assert_feas(p1)
Exemplo n.º 56
0
def f_norm1_weighted():
    w = np.random.randn(n)
    w[0] = 0
    return cp.norm1(cp.mul_elemwise(w, x))
Exemplo n.º 57
0
def analysis_z(extracted_data, cmd, keys, gam=0):
    """Analyze data and identify model in discrete time.


    Note: This script analyse interaction between two scalar signal
    only. MIMO is possible, but that is for a future project.

    cmd = "z tmin,tmax order idx1,idx2"

    Params:
        z: Indicator that analysis_z is to be ran.
        tmin, tmax: Two ends of the analysis interval.
        order: Desired Order.
        idx1: Index of the input signal.
        idx2: Index of the output signal.

    """
    tmin, tmax = map(float, cmd.split(" ")[1].split(","))
    Norder = int(cmd.split(" ")[2])
    idx1, idx2 = cmd.split(" ")[3].split(",")
    print(
        "-- Analysis interval: [{:.3f}, {:.3f}]\n"
        "-- Desired Norder: {:d}\n"
        "-- Input/Output indices: {:}, {:}".format(tmin, tmax, Norder, idx1,
                                                   idx2))
    data1 = get_data(extracted_data, idx1, keys)
    data2 = get_data(extracted_data, idx2, keys)

    Nstart1 = np.argmin(np.abs(data1['t'] - tmin))
    Nend1 = np.argmin(np.abs(data1['t'] - tmax))

    Nstart2 = np.argmin(np.abs(data2['t'] - data1['t'][Nstart1]))
    Nend2 = Nstart2 + (Nend1 - Nstart1)

    # notation: Let a=[a0..a[T-1]] and b=[b[0]...b[T-1]] be the
    # coefficients. By definition we have:
    # a[0] x[T] + a[1] x[T-1] + ... + a[T-1] x[0] = b[0] y[T] + b[1] y[T-1] + ... + b[T-1] y[0]
    # rearrange the equations to obtain:
    # X a = Y b,
    # where X and Y are not the vectors of xs and ys, but a matrix
    # formed by rearranging the vectors appropriately. Note that T is
    # the degree (Norder + 1) of the relevant polynomial coefficients.

    # x[0] is substracted from X as do y[0] is substracted from Y.

    xoffset = 1.0
    X = []
    Y = []
    for i in range(Norder - 1, Nend1 - Nstart1):
        X.append(data1['data'][Nstart1 + i:Nstart1 + i + Norder][::-1] -
                 xoffset)
        Y.append(data2['data'][Nstart2 + i:Nstart2 + i + Norder][::-1])
    X = np.array(X)
    Y = np.array(Y)

    N = Nend1 - Nstart1

    # optimization
    a = cvx.Variable(Norder)
    b = cvx.Variable(Norder)
    obj = cvx.sum_squares(X * a - Y * b) / N
    reg = gam * cvx.norm1(a) + gam * cvx.norm1(b)
    constraints = [b[0] == 1]
    prob = cvx.Problem(cvx.Minimize(obj + reg), constraints)
    prob.solve(solver='MOSEK')

    print("obj: {:f}\nreg: {:f}\na={:}\nb={:}".format(obj.value, reg.value,
                                                      a.value, b.value))

    # validation by running the filter obtained on data
    xin = data1['data'][Nstart1:Nend1] - xoffset
    yact = data2['data'][Nstart2:Nend2]
    bval = np.array(b.value).reshape(-1)
    aval = np.array(a.value).reshape(-1)
    ypred = signal.lfilter(bval, aval, xin)
    plt.plot(-160 * xin, label='xin')
    plt.plot(yact, label='yactual')
    # plt.plot(ypred, '--', label='ypredict')
    plt.legend()
    plt.show()

    scipy.io.savemat("11_1_J3_human.mat", {'x': xin, 'y': yact})
Exemplo n.º 58
0
 def reg(self, X): return self.nu*cp.norm1(X)
 def __str__(self): return "linear reg"
Exemplo n.º 59
0
def fit_ellipse(x, y):
    """
    fit ellipoid using squared loss and abs loss
    """

    #TODO introduce flag for switching between losses

    assert len(x) == len(y)

    N = len(x)
    D = 5

    dat = numpy.zeros((N, D))
    dat[:,0] = x*x
    dat[:,1] = y*y
    #dat[:,2] = x*y
    dat[:,2] = x
    dat[:,3] = y
    dat[:,4] = numpy.ones(N)


    print dat.shape
    dat = cvxpy.matrix(dat)
    #### parameters

    # data
    X = cvxpy.parameter(N, D, name="X")


    #### varibales

    # parameter vector
    theta = cvxpy.variable(D, name="theta")

    # simple objective 
    objective = cvxpy.norm1(X*theta)

    # create problem                                    
    p = cvxpy.program(cvxpy.minimize(objective))

    
    p.constraints.append(cvxpy.eq(theta[0,:] + theta[1,:], 1))
   
    ###### set values
    X.value = dat

    p.solve()

    w = numpy.array(theta.value)
    
    #print weights


    ## For clarity, fill in the quadratic form variables
    A              = numpy.zeros((2,2))
    A[0,0]         = w[0]
    A.ravel()[1:3] = 0 #w[2]
    A[1,1]         = w[1]
    bv             = w[2:4]
    c              = w[4]

    ## find parameters
    z, a, b, alpha = util.conic2parametric(A, bv, c)
    print "XXX", z, a, b, alpha

    return z, a, b, alpha