Beispiel #1
0
def miniball(X, L=None):
    r""" Compute the smallest enclosing sphere

	TODO: Use implementation of https://github.com/hbf/miniball

	Parameters
	----------
	X: array-like (M,m)
		Points to enclose in a ball
	L: optional, (m,m)
		Lipschitz-like weighting metric
	
	Returns
	-------
	x: np.array(m)
		Center of circle
	r: float
		radius of circle
	"""
    X = np.array(X)
    M, m = X.shape
    if L is None:
        L = np.eye(m)

    x = cp.Variable(m)
    ones = np.ones((1, M))
    obj = cp.mixed_norm((L @ (cp.reshape(x, (m, 1)) @ ones - X.T)).T, 2,
                        np.inf)
    prob = cp.Problem(cp.Minimize(obj))
    prob.solve()

    return x.value, obj.value
Beispiel #2
0
 def test_mixed_norm(self):
     """Test mixed norm.
     """
     y = Variable((5, 5))
     obj = Minimize(cp.mixed_norm(y, "inf", 1))
     prob = Problem(obj, [y == np.ones((5, 5))])
     result = prob.solve()
     self.assertAlmostEqual(result, 5)
Beispiel #3
0
def norm_l1linf(mat):
    r"""The :math:`\ell_1 \otimes \ell_\infty` nuclear norm for matrices.

    Sum of the :math:`\ell_\infty` norms of the rows of the matrix.

    Synonymous with the matrix norm :math:`\Vert \cdot \Vert_{\infty,1}`.

    """
    return cvxpy.mixed_norm(mat, np.Inf, 1)
Beispiel #4
0
def norm_linfl1(mat):
    r"""The :math:`\ell_\infty \otimes \ell_1` nuclear norm for matrices.

    Sum of the :math:`\ell_\infty` norms of the columns of the matrix.

    Synonymous with the matrix norm :math:`\Vert \cdot \Vert_{\infty,1}`
    performed on the transpose of the matrix.

    """
    return cvxpy.mixed_norm(mat.T, np.Inf, 1)
Beispiel #5
0
def costL12(K, X, S, lam):
    # compute log loss
    loss = 0.
    for q in S:
        i, j, k = q
        # should this be Mt^T * K??
        Mt = (2. * np.outer(X[i], X[j]) - 2. * np.outer(X[i], X[k]) -
              np.outer(X[j], X[j]) + np.outer(X[k], X[k]))
        score = cvx.trace(Mt.T * K)
        loss = loss + cvx.logistic(-score)
    # regularize with the 1,2 norm
    loss = loss / len(S) + lam * cvx.mixed_norm(K, 2, 1)
    return loss
Beispiel #6
0
def _cq_center_cvxpy(Y,
                     L,
                     q=10,
                     xhat=None,
                     solver_opts={'warm_start': True},
                     domain=None):
    xhat_value = xhat

    xhat = cp.Variable(L.shape[1])
    if xhat_value is not None:
        xhat.value = np.copy(xhat_value)

    # This is the objective we want to solve, but
    # all the reductions make this formulation too
    # expensive to use.
    # obj = cp.sum([cp.norm(L*xhat - y)**q for y in Y])

    # Instead we formulate the objective using only
    # matrix operations
    # L @ xhat
    #Lxhat = cp.reshape(xhat.__rmatmul__(L), (L.shape[0],1))
    # outer product so copied over all points
    #LXhat = Lxhat.T.__rmatmul__(np.ones( (len(Y),1)))
    # 2-norm error for all points
    ones_vec = np.ones((Y.shape[0], 1))
    obj = cp.mixed_norm(ones_vec @ cp.reshape(L @ xhat, (1, L.shape[0])) - Y,
                        2, q)
    #norms = cp.sum((LXhat - Y)**2, axis = 1)
    #obj = cp.sum(norms**(q/2.)
    constraints = []
    if domain is not None:
        constraints += domain._build_constraints(xhat)

    prob = cp.Problem(cp.Minimize(obj), constraints)
    prob.solve(**solver_opts)
    return np.array(xhat.value)
Beispiel #7
0
def dilat_lvggm_ccg_cvx_sub(S, alpha, beta, covariance_h, precision_h, max_iter_in=1000, threshold_in=1e-3, verbose=False):
    '''
         A cvx implementation of the Decayed-influence Latent variable Gaussian Graphial Model

        The subproblem in Convex-Concave Procedure

        
            min_{R} -log det(R) + trace(R*S_t) + alpha*||[1,0]*R*[1;0]||_1 + gamma*||[0, Theta]*R*[1;0]||_{1,2} 

                s.t.   [0,1]*R*[0;1] = Theta^{-1}

                       R >= 0 

            S_t = [Cov(X), -Cov*B*Theta; -Theta*B'*Cov, beta I]

    '''
    if np.linalg.norm(S-S.T) > 1e-3:
        raise ValueError("Covariance matrix should be symmetric.")

    n = S.shape[0]
    #n1 = covariance.shape[0]
    n2 = covariance_h.shape[0]
    n1 = n - n2
    #if n != (n1+n2):
    if n1 < 0:
        raise ValueError("dimension mismatch n=%d, n1=%d, n2=%d" % (n,n1,n2))
    
    mask = np.zeros((n,n))
    mask[np.ix_(np.arange(n1), np.arange(n1))]
    J1 = np.zeros((n, n1))
    J1[np.arange(n1),:] = np.eye(n1)
    J2 = np.zeros((n, n2))
    J2[np.arange(n1,n), :] = np.eye(n2)
    Q = np.zeros((n,n2))
    Q[np.arange(n1,n),:] = precision_h
    #Q  = np.zeros((n, n1))
    #Q[np.arange(n1),:] = -covariance

    J1 = np.asmatrix(J1)
    J2 = np.asmatrix(J2)
    Q = np.asmatrix(Q)
    S - np.asmatrix(S)
 
    R = cvx.Semidef(n)
    # define the SDP problem 
    objective = cvx.Minimize(-cvx.log_det(R) + cvx.trace(S*R) + alpha*(cvx.norm((J1.T*R*J1), 1) + beta*cvx.mixed_norm((J1.T*R*Q).T, 2, 1)  ))#beta*cvx.norm( (J1.T*R*Q), 1)) )
    constraints = [J2.T*R*J2 ==  covariance_h]
    # solve the problem
    problem = cvx.Problem(objective, constraints)
    problem.solve(verbose = verbose)

    return np.asarray(R.value)
                                          [3,
                                           -4]]], Constant([5.47722557])),
 (lambda x: cp.norm(x, 1), tuple(), [v_np], Constant([5])),
 (lambda x: cp.norm(x, 1), tuple(), [[[-1, 2], [3, -4]]], Constant([10])),
 (lambda x: cp.norm(x, "inf"), tuple(), [v_np], Constant([2])),
 (lambda x: cp.norm(x, "inf"), tuple(), [[[-1, 2], [3,
                                                    -4]]], Constant([4])),
 (lambda x: cp.norm(x, "nuc"), tuple(), [[[2, 0], [0, 1]]], Constant([3])),
 (lambda x: cp.norm(x, "nuc"), tuple(), [[[3, 4, 5], [6, 7, 8], [9, 10,
                                                                 11]]],
  Constant([23.173260452512931])),
 (lambda x: cp.norm(x, "nuc"), tuple(), [[[3, 4, 5], [6, 7, 8]]],
  Constant([14.618376738088918])),
 (lambda x: cp.sum_largest(cp.abs(x), 3), tuple(), [[1, 2, 3, -4, -5]],
  Constant([5 + 4 + 3])),
 (lambda x: cp.mixed_norm(x, 1, 1), tuple(), [[[1, 2], [3, 4],
                                               [5, 6]]], Constant([21])),
 (lambda x: cp.mixed_norm(x, 1, 1), tuple(), [[[1, 2, 3],
                                               [4, 5, 6]]], Constant([21])),
 # (lambda x: mixed_norm(x, 2, 1), tuple(), [[[3, 1], [4, math.sqrt(3)]]],
 #     Constant([7])),
 (lambda x: cp.mixed_norm(x, 1, 'inf'), tuple(), [[[1, 4],
                                                   [5,
                                                    6]]], Constant([10])),
 (cp.pnorm, tuple(), [[1, 2, 3]], Constant([3.7416573867739413])),
 (lambda x: cp.pnorm(x, 1), tuple(), [[1.1, 2, -3]], Constant([6.1])),
 (lambda x: cp.pnorm(x, 2), tuple(), [[1.1, 2, -3]],
  Constant([3.7696153649941531])),
 (lambda x: cp.pnorm(x, 2, axis=0), (2, ), [[[1, 2], [3, 4]]],
  Constant([math.sqrt(5), 5.]).T),
 (lambda x: cp.pnorm(x, 2, axis=1), (2, ), [[[1, 2], [4, 5]]],
Beispiel #9
0
def train_network(X_f,
                  X_i,
                  X_s,
                  Y_i,
                  dY_i=None,
                  beta=1e-6,
                  M=int(5000),
                  fuse_xxstar=False,
                  abs_act=False,
                  sdf=True,
                  norm=2):
    n_f, d = X_f.shape
    n_i, d = X_i.shape
    X_f = np.append(X_f, np.ones((n_f, 1)), axis=1)
    X_i = np.append(X_i, np.ones((n_i, 1)), axis=1)
    X_s = np.append(X_s, np.ones((n_i, 1)), axis=1)
    d += 1
    # Y_i = np.atleast_1d(Y_i.squeeze())

    dual_norm = np.inf if norm == 1 else int(1 / (1 - 1 / float(norm)))

    ## Finite approximation of all possible sign patterns
    t0 = time.time()
    if n_f + 2 * n_i < 10:
        D_f, D_i, D_s = enumerate_signed_patterns(X_f, X_i, X_s, fuse_xxstar)
    else:
        D_f, D_i, D_s = random_signed_patterns(X_f,
                                               X_i,
                                               X_s,
                                               M,
                                               fuse_xxstar=fuse_xxstar)
    m1 = D_f.shape[1]
    print(f'Dmat creation: {time.time()-t0}s, {m1} arrangements identified.')

    # Optimal CVX
    Uopt0 = cp.Variable((d, 1), value=np.random.randn(d, 1))
    Uopt1 = cp.Variable((d, m1), value=np.random.randn(d, m1))
    Uopt2 = cp.Variable((d, m1), value=np.random.randn(d, m1))
    constraints = []
    loss = 0

    # Feasible points
    if n_f > 0:
        ux_f_1 = cp.multiply(D_f, (X_f @ Uopt1))
        ux_f_2 = cp.multiply(D_f, (X_f @ Uopt2))
        if abs_act:
            y_f = X_f @ Uopt0 + cp.sum(ux_f_1 - ux_f_2, axis=1, keepdims=True)
            deriv_f = D_f @ (Uopt1 - Uopt2).T + Uopt0.T
        else:
            y_f = X_f @ Uopt0 + cp.sum(cp.multiply((D_f + 1) / 2,
                                                   (X_f @ (Uopt1 - Uopt2))),
                                       axis=1,
                                       keepdims=True)
            deriv_f = ((D_f + 1) / 2) @ (Uopt1 - Uopt2).T + Uopt0.T

        constraints += [ux_f_1 >= 0, ux_f_2 >= 0, ux_f_1 >= 0, ux_f_2 >= 0]
        Y_f_lb = np.max(Y_i - np.linalg.norm(
            X_f[None, :, :] - X_i[:, None, :], axis=-1, ord=norm),
                        axis=0,
                        keepdims=True).T

        if sdf:
            loss_f = cp.sum(cp.abs(y_f - Y_f_lb))
        else:
            loss_f = cp.sum(cp.pos(y_f))
        loss = loss + loss_f
        lipsh_f = cp.sum(cp.neg(y_f - Y_f_lb))
    else:
        loss_f = cp.Variable(value=0.)
        lipsh_f = cp.Variable(value=0.)

    # Infeasible points
    if n_i > 0:
        ux_i_1 = cp.multiply(D_i, (X_i @ Uopt1))
        ux_s_1 = cp.multiply(D_s, (X_s @ Uopt1))
        ux_i_2 = cp.multiply(D_i, (X_i @ Uopt2))
        ux_s_2 = cp.multiply(D_s, (X_s @ Uopt2))

        if abs_act:
            y_i = X_i @ Uopt0 + cp.sum(ux_i_1 - ux_i_2, axis=1, keepdims=True)
            y_s = X_s @ Uopt0 + cp.sum(ux_s_1 - ux_s_2, axis=1, keepdims=True)
            deriv_i = D_i @ (Uopt1 - Uopt2).T + Uopt0.T
            deriv_s = D_s @ (Uopt1 - Uopt2).T + Uopt0.T
        else:
            y_i = X_i @ Uopt0 + cp.sum(cp.multiply((D_i + 1) / 2,
                                                   (X_i @ (Uopt1 - Uopt2))),
                                       axis=1,
                                       keepdims=True)
            y_s = X_s @ Uopt0 + cp.sum(cp.multiply((D_s + 1) / 2,
                                                   (X_s @ (Uopt1 - Uopt2))),
                                       axis=1,
                                       keepdims=True)
            deriv_i = ((D_i + 1) / 2) @ (Uopt1 - Uopt2).T + Uopt0.T
            deriv_s = ((D_s + 1) / 2) @ (Uopt1 - Uopt2).T + Uopt0.T

        constraints += [
            ux_i_1 >= 0,
            ux_s_1 >= 0,
            ux_i_2 >= 0,
            ux_s_2 >= 0,
        ]

        loss_i = cp.sum(cp.abs(Y_i - y_i))
        loss_s = cp.sum(cp.abs(y_s))
        if dY_i is not None:
            loss_di = cp.sum(cp.abs(deriv_i[:, :-1] - dY_i))
            loss_ds = cp.sum(cp.abs(deriv_s[:, :-1] - dY_i))
        else:
            loss_di = cp.Variable(value=0., nonneg=True)
            loss_ds = cp.Variable(value=0., nonneg=True)

        loss = loss + loss_i + loss_s + loss_ds + loss_di
        lipsh_i = cp.sum(cp.neg(Y_i - y_i))
    else:
        loss_i = cp.Variable(value=0.)
        loss_s = cp.Variable(value=0.)
        lipsh_i = cp.Variable(value=0.)
        lipsh_s = cp.Variable(value=0.)
        loss_di = cp.Variable(value=0.)
        loss_ds = cp.Variable(value=0.)

    # Regularization
    groupnorm_reg = cp.norm(Uopt0) + cp.mixed_norm(
        Uopt1.T, 2, 1) + cp.mixed_norm(Uopt2.T, 2, 1)
    if norm == 2:
        reg_nrm = cp.norm(Uopt0[:-1]) + cp.mixed_norm(
            Uopt1[:-1].T, 2, 1) + cp.mixed_norm(Uopt2[:-1].T, 2, 1)
    elif norm == 1:
        reg_nrm = cp.Variable(nonneg=True)
        for i in range(d - 1):
            for s in [-1, 1]:
                constraints += [
                    cp.sum(cp.pos(Uopt1[i] * s) + cp.pos(-Uopt2[i] * s)) <=
                    reg_nrm
                ]
    reg = reg_nrm + groupnorm_reg / 100.

    # Solution
    prob = cp.Problem(cp.Minimize(100 * (loss + beta * reg)), constraints)
    t0 = time.time()
    options = {}  #dict(mosek_params = {'MSK_DPAR_BASIS_TOL_X':1e-8})
    prob.solve(verbose=True, **options)
    # prob.solve(solver=cp.SCS, verbose=True)

    print(
        f'Status: {prob.status}, \n '
        f'Value: {prob.value :.2E}, \n '
        f'loss_f: {loss_f.value :.2E}, \n '
        f'loss_i: {loss_i.value :.2E}, \n '
        f'loss_s: {loss_s.value :.2E}, \n '
        f'loss_di: {loss_di.value :.2E}, \n '
        f'loss_ds: {loss_ds.value :.2E}, \n '
        f'Reg: {reg.value : .2E}, \n '
        # # f'lipsh_f: {lipsh_f.value :.2E}, \n '
        # # f'lipsh_i: {lipsh_i.value :.2E}, \n '
        f'Time: {time.time()-t0 :.2f}s')
    if prob.status.lower() == 'infeasible':
        st()
        return None
    u0, u1, u2 = torch.tensor(Uopt0.value), torch.tensor(
        Uopt1.value), torch.tensor(Uopt2.value)
    torch.save(
        {
            'u1': u1,
            'u2': u2,
            'u0': u0,
            'D_f': torch.tensor(D_f),
            'D_i': torch.tensor(D_i),
            'D_s': torch.tensor(D_s)
        }, 'tmp.csv')
    return u0, u1, u2
Beispiel #10
0
def minimax_lloyd(domain,
                  M,
                  L=None,
                  maxiter=100,
                  Xhat=None,
                  verbose=True,
                  xtol=1e-5,
                  full=None):
    r""" A fixed point iteration for a minimax design

	This algorithm can be interpreted as a block coordinate descent type algorithm
	for the optimal minimax experimental design on the given domain.

	
	SD96.	
	"""
    # Terminate early if we have a simple case
    try:
        return minimax_design_1d(domain, M, L=L)
    except AssertionError:
        pass

    if full is None:
        if len(domain) < 3:
            _update_voronoi = _update_voronoi_full
        else:
            _update_voronoi = _update_voronoi_sample
    elif full is True:
        _update_voronoi = _update_voronoi_full
    elif full is False:
        _update_voronoi = _update_voronoi_sample

    if L is None:
        L = np.eye(len(domain))

    M0 = 10 * len(domain) * M
    X0 = domain.sample(M0)

    if Xhat is None:
        if verbose:
            print(10 * '=' + " Building Coffeehouse Design " + 10 * '=')
        Xhat = maximin_coffeehouse(domain, M, L, verbose=verbose)
        if verbose:
            print('\n' + 10 * '=' + " Building Maximin Design " + 10 * '=')
        Xhat = maximin_block(domain,
                             M,
                             L=L,
                             maxiter=50,
                             verbose=verbose,
                             X0=Xhat)
        # The Shrinkage suggested by Pro17 hasn't been demonstrated to be useful with this initialization, so we avoid it
        if verbose:
            print('\n' + 10 * '=' + " Building Minimax Design " + 10 * '=')

    x = cp.Variable(len(domain))
    c = cp.Variable(len(domain))
    constraints = domain._build_constraints(x)

    if verbose:
        printer = IterationPrinter(it='4d', minimax='18.10e', dx='9.3e')
        printer.print_header(it='iter', minimax='minimax est', dx='Δx')

    V = domain.sample(M0)
    Xhat_new = np.zeros_like(Xhat)

    for it in range(maxiter):

        # Compute new Voronoi vertices
        V = _update_voronoi(domain, Xhat, V, L, M0)
        D = cdist(Xhat, V, L=L)
        d = np.min(D, axis=0)

        for k in range(M):
            # Identify closest points to Xhat[k]
            I = np.isclose(D[k], d)

            # Move the Xhat[k] to the circumcenter
            ones = np.ones((1, np.sum(I)))
            obj = cp.mixed_norm(
                (L @ (cp.reshape(x, (len(domain), 1)) @ ones - V[I].T)).T, 2,
                np.inf)
            prob = cp.Problem(cp.Minimize(obj), constraints)
            prob.solve()
            Xhat_new[k] = x.value

        dx = np.max(np.sqrt(np.sum((Xhat_new - Xhat)**2, axis=1)))

        if verbose:
            printer.print_iter(it=it, minimax=np.max(d), dx=dx)

        Xhat[:, :] = Xhat_new[:, :]

        if dx < xtol:
            if verbose:
                print('small change in design')
            break

    return Xhat
def train_network(X_f, X_i, X_s, Y_i, beta, M=int(5000), fuse_xxstar=False, abs_act=False, sdf=True, norm=2):
    n_f, d = X_f.shape
    n_i, d = X_i.shape
    X_f = np.append(X_f,np.ones((n_f,1)),axis=1)
    X_i = np.append(X_i,np.ones((n_i,1)),axis=1)
    X_s = np.append(X_s,np.ones((n_i,1)),axis=1)
    d += 1
    Y_i = np.atleast_1d(Y_i.squeeze())

    dual_norm = np.inf if norm==1 else int(1/(1-1/float(norm)))

    ## Finite approximation of all possible sign patterns
    t0 = time.time()
    if n_f + 2*n_i < 15:
        D_f, D_i, D_s = enumerate_signed_patterns(X_f, X_i, X_s, fuse_xxstar)
    else:
        D_f, D_i, D_s = random_signed_patterns(X_f, X_i, X_s, M, fuse_xxstar=fuse_xxstar)
    m1 = D_f.shape[1]
    print(f'Dmat creation: {time.time()-t0}s, {m1} arrangements identified.')

    # Optimal CVX
    Uopt1=cp.Variable((d,m1), value=np.random.randn(d,m1))
    Uopt2=cp.Variable((d,m1), value=np.random.randn(d,m1))
    constraints = []
    loss = 0

    # Feasible points
    if n_f > 0:
        ux_f_1 = cp.multiply(D_f,(X_f @ Uopt1))
        ux_f_2 = cp.multiply(D_f,(X_f @ Uopt2))
        y_f = cp.sum(ux_f_1 - ux_f_2,axis=1) if abs_act \
                 else cp.sum(cp.multiply((D_f+1)/2,(X_f @ (Uopt1 - Uopt2))),axis=1)
        constraints += [
        ux_f_1>=0,
        ux_f_2>=0
        ]

        Y_f_lb = np.max(Y_i[:, None] - np.linalg.norm(X_f[None, :, :] - X_i[:, None, :], axis=-1, 
                                    ord=dual_norm), axis=0)
        if sdf:
            loss_f = cp.sum(cp.abs(y_f - Y_f_lb))
        else:
            loss_f = cp.sum(cp.pos(y_f))
        loss = loss + loss_f
        lipsh_f = cp.sum(cp.neg(y_f - Y_f_lb))
    else:
        loss_f = cp.Variable(value=0.)
        lipsh_f = cp.Variable(value=0.)

    # Infeasible points
    if n_i > 0:
        ux_i_1 = cp.multiply(D_i,(X_i @ Uopt1))
        ux_s_1 = cp.multiply(D_s,(X_s @ Uopt1))
        ux_i_2 = cp.multiply(D_i,(X_i @ Uopt2))
        ux_s_2 = cp.multiply(D_s,(X_s @ Uopt2))
    
        if abs_act:
            y_i = cp.sum(ux_i_1 - ux_i_2,axis=1)
            y_s = cp.sum(ux_s_1 - ux_s_2,axis=1)
            if sdf:
                if norm == 2:
                    constraints += [cp.sum(cp.square((Uopt1[:2] - Uopt2[:2]) @ D_i.T), axis=0) <= 1]
                elif norm == 1:
                    constraints += [cp.max(cp.abs((Uopt1[:2] - Uopt2[:2]) @ D_i.T), axis=0) <= 1]
        else:
            y_i = cp.sum(cp.multiply((D_i+1)/2,(X_i @ (Uopt1 - Uopt2))),axis=1)
            y_s = cp.sum(cp.multiply((D_s+1)/2,(X_s @ (Uopt1 - Uopt2))),axis=1)
            if sdf:
                if norm == 2:
                    constraints += [cp.sum(cp.square((Uopt1[:2] - Uopt2[:2]) @ ((D_i+1)/2).T), axis=0) <= 1]
                elif norm == 1:
                    constraints += [cp.max(cp.abs((Uopt1[:2] - Uopt2[:2]) @ ((D_i+1)/2).T), axis=0) <= 1]

        constraints += [
            ux_i_1>=0,
            ux_s_1>=0,
            ux_i_2>=0,
            ux_s_2>=0,
        ]
        loss_i = cp.sum(cp.abs(Y_i - y_i))
        loss_s = cp.sum(cp.abs(y_s))
        loss = loss + loss_i + loss_s
        lipsh_i = cp.sum(cp.neg(Y_i - y_i))
    else:
        loss_i = cp.Variable(value=0.)
        loss_s = cp.Variable(value=0.)
        lipsh_i = cp.Variable(value=0.)
        lipsh_s = cp.Variable(value=0.)

    # Regularization
    if norm == 2:
        reg = cp.mixed_norm(Uopt1[:-1].T,2,1) + cp.mixed_norm(Uopt2[:-1].T,2,1)
    elif norm == 1:
        reg = cp.Variable(nonneg=True)
        for i in range(d-1):
            for s in [-1,1]:
                constraints += [cp.sum(cp.pos(Uopt1[i] * s) + cp.pos(-Uopt2[i] * s)) <= reg]

    # Solution
    prob=cp.Problem(cp.Minimize(100*(loss + beta * reg)),constraints)
    t0 = time.time()
    options = dict(mosek_params = {'MSK_DPAR_BASIS_TOL_X':1e-8})
    prob.solve(solver=cp.MOSEK, verbose=True, **options)
    # prob.solve(solver=cp.SCS)

    print(f'Status: {prob.status}, \n '
        f'Value: {prob.value :.2E}, \n '
        f'loss_f: {loss_f.value :.2E}, \n '
        f'loss_i: {loss_i.value :.2E}, \n '
        f'loss_s: {loss_s.value :.2E}, \n '
        f'Reg: {reg.value : .2E}, \n ' 
        f'lipsh_f: {lipsh_f.value :.2E}, \n '
        f'lipsh_i: {lipsh_i.value :.2E}, \n '
        f'Time: {time.time()-t0 :.2f}s')
    if prob.status.lower() == 'infeasible':
        st()
        return None
    u1, u2 = torch.tensor(Uopt1.value), torch.tensor(Uopt2.value)
    st()

    return u1, u2