def generate_problem(nD_x, nD_y, nD_z, tv, dist):
    # Problem data.
    m, n = nD_x.shape

    X = np.abs(nD_x)**2 / N_FFT
    Y = np.abs(nD_y)**2 / N_FFT
    Z = np.abs(nD_z)**2 / N_FFT

    a = cvx.Variable(n)
    b = cvx.Variable(n)

    tv_coeff = cvx.Parameter(nonneg=True, value=tv, name='tv')
    dist_coeff = cvx.Parameter(nonneg=True, value=dist, name='dist')

    combination = X * cvx.diag(a) + Y * cvx.diag(b)
    objective = cvx.Minimize(
        cvx.norm(Z - combination, 'fro') + dist_coeff *
        (cvx.sum_squares(a) + cvx.sum_squares(b)) + tv_coeff *
        (cvx.tv(a) + cvx.tv(b)))

    # constraints = []
    constraints = [0 <= a, a <= 1, 0 <= b, b <= 1]

    for i in range(n - 1):
        constraints.append(a[i] >= a[i + 1])
        constraints.append(b[i] <= b[i + 1])

    prob = cvx.Problem(objective, constraints)

    return prob
Пример #2
0
def regularizer(beta):
    sum_across_rows, sum_across_cols = 0, 0
    rows, cols = beta.shape
    for i in range(cols):
        sum_across_rows = sum_across_rows + cp.tv(beta[:, i])
    for i in range(rows):
        sum_across_cols = sum_across_cols + cp.tv(beta[i, :])
    return sum_across_rows + sum_across_cols
Пример #3
0
def regularizer(beta):
    rows, cols = beta.shape
    sum_across_rows = cp.tv(beta[:, 0])
    for i in range(1, cols):
        sum_across_rows = sum_across_rows + cp.tv(beta[:, i])
    sum_across_cols = cp.tv(beta[0, :])
    for i in range(1, rows):
        sum_across_cols = sum_across_cols + cp.tv(beta[i, :])
    return sum_across_rows + sum_across_cols
Пример #4
0
def plot_tv_vary_lambda_diff():
    y = X[:, 58]
    plt.plot([2 * i for i in range(np.diff(y).size)],
             np.diff(y),
             label='original')
    lambdas = [1, 10, 25]
    for vlambda in lambdas:
        # vlambda = 50

        x = cvx.Variable(y.size)
        obj = cvx.Minimize(0.5 * cvx.sum_squares(y - x) + vlambda * cvx.tv(x))
        prob = cvx.Problem(obj)
        # ECOS and SCS solvers fail to converge before
        # the iteration limit. Use CVXOPT instead.
        prob.solve(solver=cvx.CVXOPT, verbose=True)
        if prob.status != cvx.OPTIMAL:
            raise Exception("Solver did not converge!")
        xv = np.array(x.value).flatten()
        plt.plot([2 * i for i in range(np.diff(xv).size)],
                 np.diff(xv),
                 label='TV  $\lambda=$' + str(vlambda))

    plt.legend()
    plt.xlim([0, 150])
    plt.xlabel('Time (s)')
    plt.ylabel('Neural response 1st derivative')
    plt.savefig('figs/tv_vary_diff.pdf')
    plt.show()
def generate_problem_fade_constrained(nD_x, nD_y, nD_z, tv, dist):
    # Problem data.
    m, n = nD_x.shape

    X = (np.abs(nD_x)**2) / N_FFT
    Y = (np.abs(nD_y)**2) / N_FFT
    Z = (np.abs(nD_z)**2) / N_FFT

    a = cvx.Variable(n)

    tv_coeff = cvx.Parameter(nonneg=True, value=tv, name='tv')
    dist_coeff = cvx.Parameter(nonneg=True, value=dist, name='dist')

    combination = X * cvx.diag(a) + Y * cvx.diag(1 - a)
    objective = cvx.Minimize(
        cvx.norm(combination - Z, 'fro') + dist_coeff * cvx.sum_squares(a) +
        tv_coeff * cvx.tv(a))

    constraints = [0 <= a, a <= 1]

    for i in range(n - 1):
        constraints.append(a[i] >= a[i + 1])

    prob = cvx.Problem(objective, constraints)

    return prob
Пример #6
0
    def getMse(lam):
        prob = cp.Problem(cp.Minimize(0.5*cp.sum_squares(X_-Y_)+lam*cp.tv(Y_)))
        mses_lam = []

        # testOut = os.path.join(workDir, 'test-imgs', 'lam-{:07.2f}'.format(lam))
        # os.makedirs(testOut, exist_ok=True)

        for i in range(nTest):
            X_.value = testX[i]
            prob.solve(cp.SCS)
            assert('optimal' in prob.status)
            Yhat = np.array(Y_.value).ravel()
            mse = np.mean(np.square(testY[i] - Yhat))

            mses_lam.append(mse)

            # if i <= 4:
            #     fig, ax = plt.subplots(1, 1)
            #     plt.plot(testX[i], label='Corrupted')
            #     plt.plot(testY[i], label='Original')
            #     plt.plot(Yhat, label='Predicted')
            #     plt.legend()
            #     f = os.path.join(testOut, '{}.png'.format(i))
            #     fig.savefig(f)
            #     plt.close(fig)

        return np.mean(mses_lam)
Пример #7
0
    def test_inpainting(self):
        """Test image in-painting.
        """
        import numpy as np
        np.random.seed(1)
        rows, cols = 100, 100
        # Load the images.
        # Convert to arrays.
        Uorig = np.random.randint(0, 255, size=(rows, cols))

        rows, cols = Uorig.shape
        # Known is 1 if the pixel is known,
        # 0 if the pixel was corrupted.
        Known = np.zeros((rows, cols))
        for i in range(rows):
            for j in range(cols):
                if np.random.random() > 0.7:
                    Known[i, j] = 1
        Ucorr = Known * Uorig
        # Recover the original image using total variation in-painting.
        U = cvx.Variable((rows, cols))
        obj = cvx.Minimize(cvx.tv(U))
        constraints = [cvx.multiply(Known, U) == cvx.multiply(Known, Ucorr)]
        prob = cvx.Problem(obj, constraints)
        prob.solve(solver=cvx.SCS)
Пример #8
0
def cvx_solve(x,y,z):

    num_centers = np.size(x,0)
    num_grids   = np.size(y,0)
    num_samples = np.size(z,0)

    yyt = y.dot(y.T)
    xxt = x.dot(x.T)
    xyt = x.dot(y.T)

    m   = np.kron(np.ones(num_centers), np.sum(np.mat(np.power(z, 2)), 1)) + \
    np.kron(np.ones([num_samples, 1]), np.sum(np.mat(np.power(x, 2)), 1).T) - 2 * z.dot(x.T)

    t = cvx.Variable((num_grids,num_centers))
    r = cvx.Variable((num_samples,num_centers))

    constraints = [t*np.ones([num_centers,1])==np.ones([num_grids,1]),
                   r*np.ones([num_centers,1])==np.ones([num_samples,1]),
                   t.T*np.ones([num_grids,1])/num_grids == r.T*np.ones([num_samples,1])/num_samples,
                   t>=0,r>=0]

    err = np.trace(yyt)-2*cvx.trace(t*xyt)+cvx.tv(t)
    for i in range(num_grids):
        err += cvx.quad_form(t[i],xxt)
    err += cvx.trace(r*m.T)

    prob = cvx.Problem(cvx.Minimize(err),constraints)
    prob.solve(solver=cvx.MOSEK)

    return t.value,r.value
Пример #9
0
def DFFweights(A, B):
    """
    This function minimizes (total variance(B/mean(A*x)) where A.shape (m x n), X.shape (n x k), and B.shape (m x k)
    where m = image flattened in row-major order, n = number of EigenFlats, and k = observations.
    A is the array of EigenFlats, X is the coefficients for each EigenFlat, B is the observation to correct.
    """
    k = B.shape[1]
    m = B.shape[0]
    n = A.shape[1]

    # Preallocate the empty arrays
    coeff = np.arange(0, n * k, 1, dtype='f8').reshape(n, k) * 0

    b = cp.Parameter(m, nonneg=True)
    x = cp.Variable(n, nonneg=True)

    totalVariance = cp.tv((b * cp.inv_pos(cp.sum(A * x))))
    objective = cp.Minimize(totalVariance)
    prob = cp.Problem(objective)

    # Loop through all spectra and find optimal solutions with the warm start
    for ind in range(0, k):
        b.value = B[:, ind]
        loss = prob.solve()
        coeff[:, ind] = x.value

    return coeff
Пример #10
0
def image_inpainting_sparse(vu, depths, nH, nW):

    depth_map_sparse = np.zeros((nH, nW))

    vu_int = (vu + 0.5).astype(np.int32)
    vu_int[0] = np.maximum(np.minimum(vu_int[0], nH - 1), 0)
    vu_int[1] = np.maximum(np.minimum(vu_int[1], nW - 1), 0)

    depth_map_sparse[vu_int[0], vu_int[1]] = depths

    Known = np.zeros((nH, nW))
    Known[vu_int[0], vu_int[1]] = 1

    U = cp.Variable(nH, nW)
    obj = cp.Minimize(cp.tv(U))
    constraints = [
        cp.mul_elemwise(Known, U) == cp.mul_elemwise(Known, depth_map_sparse)
    ]
    prob = cp.Problem(obj, constraints)
    # Use SCS to solve the problem.
    prob.solve(verbose=True, solver=cp.SCS)

    depth_map = U.value

    return depth_map
Пример #11
0
def create(n, lam):
    # get data
    A = np.rot90(scipy.misc.imread(IMAGE), -1)[400:1400,600:1600]
    Y = scipy.misc.imresize(A, (n,n))

    # set up problem
    X = [cp.Variable(n,n) for i in range(3)]
    f = sum([cp.sum_squares(X[i] - Y[:,:,i]) for i in range(3)]) + lam * cp.tv(*X)
    return cp.Problem(cp.Minimize(f))
Пример #12
0
def create(n, lam):
    # get data
    A = np.rot90(scipy.misc.imread(IMAGE), -1)[400:1400, 600:1600]
    Y = scipy.misc.imresize(A, (n, n))

    # set up problem
    X = [cp.Variable(n, n) for i in range(3)]
    f = sum([cp.sum_squares(X[i] - Y[:, :, i])
             for i in range(3)]) + lam * cp.tv(*X)
    return cp.Problem(cp.Minimize(f))
Пример #13
0
def tv(Ax, Ay, B, verbose=False):
    '''
    minimizes total variation
    min|x|_{TV} for vec(Ax kron Ay * X) = b
    '''
    X = cp.Variable([Ay.shape[1], Ax.shape[1]], complex=True)
    objective = cp.Minimize(cp.tv(X))
    constraints = [Ay * X * Ax.T == B]
    problem = cp.Problem(objective, constraints)
    problem.solve(verbose=verbose, **args_tv)
    return X.value
Пример #14
0
 def tv_inpainting():
     Ucorr = known * Uorig  # This is elementwise mult on numpy arrays.
     variables = []
     constraints = []
     for i in range(colors):
         U = cp.Variable(shape=(rows, cols))
         variables.append(U)
         constraints.append(
             cp.multiply(known[:, :, i], U) == cp.multiply(
                 known[:, :, i], Ucorr[:, :, i]))
     problem = cp.Problem(cp.Minimize(cp.tv(*variables)), constraints)
     problem.get_problem_data(cp.SCS)
Пример #15
0
def fill_cvx(img, mask, max_iters=1500):
    """Fill in masked pixels in `img` using TV convex optimization"""
    import cvxpy as cp

    U = cp.Variable(shape=img.shape)
    obj = cp.Minimize(cp.tv(U))
    constraints = [cp.multiply(~mask, U) == cp.multiply(~mask, img)]
    prob = cp.Problem(obj, constraints)
    # Use SCS to solve the problem.
    prob.solve(verbose=True, solver=cp.SCS, max_iters=max_iters)
    print("optimal objective value: {}".format(obj.value))
    return prob, U
Пример #16
0
def main():
    img_src = cv2.imread("birds_gray.png", 0)
    img_src = cv2.resize(img_src, None, fx=0.2, fy=0.2)
    height, width = img_src.shape
    N = height * width

    #5*5 Gaussian filter
    kernel = np.array(
        [[0.00854167, 0.02230825, 0.03072131, 0.02230825, 0.00854167],
         [0.02230825, 0.05826239, 0.08023475, 0.05826239, 0.02230825],
         [0.03072131, 0.08023475, 0.11049350, 0.08023475, 0.03072131],
         [0.00854167, 0.02230825, 0.03072131, 0.02230825, 0.00854167],
         [0.02230825, 0.05826239, 0.08023475, 0.05826239, 0.02230825]])

    A = kernel2matrixA(kernel, height, width)

    x_src = np.reshape(img_src, N)
    sigma = 5
    n = np.random.normal(0, sigma, N)
    #b = Ax + n
    b = np.clip(np.dot(A, x_src) + n, 0, 255)
    img_blur_f = np.reshape(b, (height, width))
    img_blur = img_blur_f.astype(np.uint8)

    print("image size", height, width)
    print("sigma", sigma)

    print("sparse optimisation")
    Lambda = 0.5
    print("lambda", Lambda)

    x = cp.Variable(N)
    obj = cp.Minimize(
        cp.sum_squares(b - A * x) / 2 +
        Lambda * cp.tv(cp.reshape(x, (height, width))))
    constraints = [0 <= x, x <= 255]
    prob = cp.Problem(obj, constraints)
    prob.solve()

    img_dst = np.reshape(np.clip(x.value, 0, 255),
                         (height, width)).astype(np.uint8)

    print("blur PSNR", psnr(img_src, img_blur_f), "[dB]")
    print("dst PSNR", psnr(img_src, img_dst), "[dB]")

    cv2.imshow("src", img_src)
    cv2.imshow("blur", img_blur)
    cv2.imshow("dst", img_dst)
    cv2.imwrite("result/src.png", img_src)
    cv2.imwrite("result/blur.png", img_blur)
    cv2.imwrite("result/deblur.png", img_dst)
    cv2.waitKey(0)
Пример #17
0
def image_inpainting_dense(depth_map, lambda_coef=1):

    nH, nW = depth_map.shape
    U = cp.Variable(nH, nW)
    obj = cp.Minimize(cp.tv(U) + lambda_coef * cp.sum_squares(depth_map - U))
    constraints = []
    prob = cp.Problem(obj, constraints)

    prob.solve(verbose=True, solver=cp.SCS)

    depth_map_tv = U.value

    return depth_map_tv
Пример #18
0
def tv_full(X):
    Y = X[:, :10]
    vlambda = 10

    x = cvx.Variable(Y.shape[0], 10)
    obj = cvx.Minimize(0.5 * cvx.sum_squares(Y - x) + vlambda * cvx.tv(x))
    prob = cvx.Problem(obj)
    # ECOS and SCS solvers fail to converge before
    # the iteration limit. Use CVXOPT instead.
    prob.solve(solver=cvx.CVXOPT, verbose=False)
    if prob.status != cvx.OPTIMAL:
        raise Exception("Solver did not converge!")

    return x.value
Пример #19
0
def l1filter(t,
             y,
             lam=1200,
             rho=80,
             periods=(365.25, 182.625),
             solver=cvx.MOSEK,
             verbose=False):
    """
    Do l1 regularize for given time series.
    :param t: np.array, time
    :param y: np.array, time series value
    :param lam: lambda value
    :param rho: rho value
    :param periods: list, periods, same unit as t
    :param solver: cvx.solver
    :param verbose: bool, show verbose or not
    :return: x, w, s, if periods is not None, else return x, w
    """
    t = np.asarray(t, dtype=np.float64)
    y = np.asarray(y, dtype=np.float64)

    assert y.shape == t.shape

    n = len(t)
    D = gen_d2(n)

    x = cvx.Variable(n)
    w = cvx.Variable(n)
    errs = y - x - w
    seasonl = None
    if periods is not None:
        tpi_t = 2 * np.pi * t
        for period in periods:
            a = cvx.Variable()
            b = cvx.Variable()
            temp = a * np.sin(tpi_t / period) + b * np.cos(tpi_t / period)
            if seasonl is None:
                seasonl = temp
            else:
                seasonl += temp
        errs = errs - seasonl
    obj = cvx.Minimize(0.5 * cvx.sum_squares(errs) + lam * cvx.norm(D * x, 1) +
                       rho * cvx.tv(w))
    prob = cvx.Problem(obj)
    prob.solve(solver=solver, verbose=verbose)
    if seasonl is not None:
        return np.array(x.value)[:, 0], np.array(w.value)[:, 0], np.array(
            seasonl.value)[:, 0]
    else:
        return np.array(x.value)[:, 0], np.array(w.value)[:, 0]
Пример #20
0
def create(m, ni, k, rho=0.05, sigma=0.05):
    A = np.random.randn(m, ni*k)
    A /= np.sqrt(np.sum(A**2, 0))

    x0 = np.zeros(ni*k)
    for i in range(k):
        if np.random.rand() < rho:
            x0[i*ni:(i+1)*ni] = np.random.rand()
    b = A.dot(x0) + sigma*np.random.randn(m)

    lam = 0.1*sigma*np.sqrt(m*np.log(ni*k))
    x = cp.Variable(A.shape[1])
    f = cp.sum_squares(A*x - b) + lam*cp.norm1(x) + lam*cp.tv(x)
    return cp.Problem(cp.Minimize(f))
Пример #21
0
def tv_X(X):
    X_out = np.zeros(shape=(146, 160))
    for i in range(160):
        print(i)
        Y = X[:, i]
        vlambda = 10

        x = cvx.Variable(Y.size)
        obj = cvx.Minimize(0.5 * cvx.sum_squares(Y - x) + vlambda * cvx.tv(x))
        prob = cvx.Problem(obj)
        # ECOS and SCS solvers fail to converge before
        # the iteration limit. Use CVXOPT instead.
        prob.solve(solver=cvx.CVXOPT, verbose=False)
        if prob.status != cvx.OPTIMAL:
            raise Exception("Solver did not converge!")
        X_out[:, i] = np.array(x.value).ravel()
    return X_out
Пример #22
0
 def inpaint_func(image, mask):
     """Total variation inpainting"""
     inpainted = np.zeros_like(image)
     for c in range(image.shape[2]):
         image_c = image[:, :, c]
         mask_c = mask[:, :, c]
         if np.min(mask_c) > 0:
             # if mask is all ones, no need to inpaint
             inpainted[:, :, c] = image_c
         else:
             h, w = image_c.shape
             inpainted_c_var = cvxpy.Variable(h, w)
             obj = cvxpy.Minimize(cvxpy.tv(inpainted_c_var))
             constraints = [cvxpy.mul_elemwise(mask_c, inpainted_c_var) == cvxpy.mul_elemwise(mask_c, image_c)]
             prob = cvxpy.Problem(obj, constraints)
             # prob.solve(solver=cvxpy.SCS, max_iters=100, eps=1e-2)  # scs solver
             prob.solve()  # default solver
             inpainted[:, :, c] = inpainted_c_var.value
     return inpainted
Пример #23
0
def fusedLassoProblem(problemOptions, solverOptions):
    m = problemOptions['m']
    ni = problemOptions['ni']
    k = problemOptions['k']
    rho=problemOptions['rho']
    sigma=problemOptions['sigma']

    A = np.random.randn(m, ni*k)
    A /= np.sqrt(np.sum(A**2, 0))

    x0 = np.zeros(ni*k)
    for i in range(k):
        if np.random.rand() < rho:
            x0[i*ni:(i+1)*ni] = np.random.rand()
    b = A.dot(x0) + sigma*np.random.randn(m)
    lam = problemOptions['lam_factor']*sigma*np.sqrt(m*np.log(ni*k))

    x = cp.Variable(A.shape[1])
    f = cp.sum_squares(A*x - b) + lam*cp.norm1(x) + lam*cp.tv(x)
    prob = cp.Problem(cp.Minimize(f))
    
    prob.solve(**solverOptions)
    return {'Problem':prob, 'name':'fusedLassoProblem'}
Пример #24
0
    def _formulate(self, b: np.ndarray=None) -> None:
        """ Formulate Problem

        Internal methods that fromulate and prepare the cvxpy model to be fit

        Args
        ----
        b: np.array, dtype float

        Return
        ------
        Nothing
        """
        if (self.mask is None):
            raise ValueError("mask paramenter not provided. A mask is required to fit the model")

        # Define and construct variables and costants
        self.x = cvxpy.Variable(self.cfg.A.shape[1], 1)

        if b is None:
            self.b = cvxpy.Parameter(rows=self.cfg.A.shape[0], cols=1, sign="positive", value=np.zeros(self.cfg.A.shape[0]))
        else:
            self.b = cvxpy.Parameter(rows=self.cfg.A.shape[0], cols=1, sign="positive", value=b)
        self.A = cvxpy.Parameter(rows=self.cfg.A.shape[0], cols=self.cfg.A.shape[1], sign="positive", value=self.cfg.A)
        self.x_img = cvxpy.reshape(self.x, self.mask.shape[0], self.mask.shape[1])  # x must be reshaped to allow calling cvx.tv on it. Possible reimplementation of the tv filter might speed-up
        self.background = cvxpy.mul_elemwise(1. - self.mask.flat[:], self.x_img)

        # The definition of the problem
        self.objective = cvxpy.Minimize(cvxpy.sum_squares(self.A * self.x - self.b) +

                                        self.beta * cvxpy.tv(self.x_img) )#+ self.alpha * cvxpy.norm(self.x, 1) )
        self.constraints = [self.x >= 0.,
                            self.background == 0,
                            cvxpy.sum_entries(self.x) - 0.85*cvxpy.sum_entries(self.b)/2*self.b_n >= 0]

        self.problem = cvxpy.Problem(self.objective, self.constraints)
        self.formulated = True
Пример #25
0
def separateTVAndL1(M, sp, max_iters=30):
    """
    separate matrix m into smooth and sparse components
    M: 2-d ndarray
    sp:strength of sparseness
    max_iters: maximum number of iterations
    """

    m = M.shape[0]
    n = M.shape[1]

    L = cvx.Variable(m, n)
    S = cvx.Variable(m, n)

    #ob = cvx.Minimize(cvx.norm(L,'nuc') + 0.05*cvx.norm(S, 1))
    ob = cvx.Minimize(cvx.tv(L) + sp * cvx.norm(S, 1))

    co = [M == L + S]

    prob = cvx.Problem(ob, co)

    result = prob.solve(solver='SCS', max_iters=30)

    return L.value, S.value
Пример #26
0
    prox("SUM_HINGE", f_hinge),
    prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
    prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
    prox("SUM_INV_POS", lambda: cp.sum_entries(cp.inv_pos(x))),
    prox("SUM_KL_DIV", lambda: cp.sum_entries(cp.kl_div(p1,q1))),
    prox("SUM_LARGEST", lambda: cp.sum_largest(x, 4)),
    prox("SUM_LOGISTIC", lambda: cp.sum_entries(cp.logistic(x))),
    prox("SUM_NEG_ENTR", lambda: cp.sum_entries(-cp.entr(x))),
    prox("SUM_NEG_LOG", lambda: cp.sum_entries(-cp.log(x))),
    prox("SUM_QUANTILE", f_quantile),
    prox("SUM_QUANTILE", f_quantile_elemwise),
    prox("SUM_SQUARE", f_least_squares_matrix),
    prox("SUM_SQUARE", lambda: f_least_squares(20)),
    prox("SUM_SQUARE", lambda: f_least_squares(5)),
    prox("SUM_SQUARE", f_quad_form),
    prox("TOTAL_VARIATION_1D", lambda: cp.tv(x)),
    prox("ZERO", None, C_linear_equality),
    prox("ZERO", None, C_linear_equality_matrix_lhs),
    prox("ZERO", None, C_linear_equality_matrix_rhs),
    prox("ZERO", None, C_linear_equality_multivariate),
    prox("ZERO", None, C_linear_equality_multivariate2),
    prox("ZERO", None, lambda: C_linear_equality_graph(20)),
    prox("ZERO", None, lambda: C_linear_equality_graph(5)),
    prox("ZERO", None, lambda: C_linear_equality_graph_lhs(10, 5)),
    prox("ZERO", None, lambda: C_linear_equality_graph_lhs(5, 10)),
    prox("ZERO", None, lambda: C_linear_equality_graph_rhs(10, 5)),
    prox("ZERO", None, lambda: C_linear_equality_graph_rhs(5, 10)),
]

# Epigraph operators
PROX_TESTS += [
Пример #27
0
            Y_noisy[i][j] = Y[i][j]
            if np.random.uniform(0, 1) <= 0.1:
                noise_points += 1
                Y_noisy[i][j] += np.random.uniform(0, 1)

    print('added {} noise points (should be {})'.format(
        noise_points, r * c * 0.1))
    Y_noisy_snorm = 0.5 * np.sum(np.linalg.norm(np.stack(Y_noisy))**2)
    # Save noisy image
    plt.clf()
    plt.draw()
    plt.imshow(Y_noisy, vmin=0, vmax=1)
    plt.draw()
    plt.savefig('hw8pb3-noisy.png')
    tau = 0.25
    tauV = cvx.Variable(1)

    x = cvx.Variable(r, c)

    func = (0.5 * (cvx.norm2(x - Y_noisy)**2))
    prob = cvx.Problem(
        cvx.Minimize(func),
        constraints=[cvx.tv(x) <= 0.25 * cvx.tv(Y_noisy), 0 <= x, x <= 1])
    prob.solve(verbose=True)
    # Save the cleaned result.
    plt.clf()
    plt.draw()
    plt.imshow(x.value, vmin=0, vmax=1)
    plt.draw()
    plt.savefig('hw8pb3-clean.png')
Пример #28
0
    con = [
        X[:, 0] == np.array([sum(ord_list), 0] * 50)
    ]  # constraint 1: starting condition, specified to be changed based on the current inventory of the bot itself
    con.extend([X[:, 1:w + 1] == A * X[:, 0:w] + B * U
                ])  # constraint 2: update condition
    con.extend([cvx.norm(U[0, j], 'inf') <= max_pos for j in range(0, N)
                ])  # constraint 3: maximum trading position
    con.extend([cvx.norm(X[0, :], 'inf') <= 1
                ])  # constraint 4: maximum inventory size

    con.extend([X[j, -1] == 0 for j in range(0, 2 * 50, 2)])  #neutral exposure

    # create a matrix to be fed into objective function -- this is the second term in the objective function
    for i in range(2 * 50):
        if i == 0:
            tv_arr = cvx.tv(
                cvx.multiply(np.array(projsog[:, i].T)[0], X[i, 1:w + 1]))
            continue
        if i % 2 == 0:
            cvx.vstack([
                tv_arr,
                cvx.tv(
                    cvx.multiply(np.array(projsog[:, i].T)[0], X[i, 1:w + 1]))
            ])
        else:
            cvx.vstack([tv_arr, 0])
    obj = cvx.Maximize(cvx.sum(cvx.diag(X[:, 1:w + 1] @ projs) -
                               tv_arr))  # objective function

    prob = cvx.Problem(
        obj, con)  # insert objective function and constraints into cvxpy
Пример #29
0
orig_img = Image.open('looseleaf_ss.jpg')
gray_img = ImageOps.grayscale(orig_img)

m = gray_img.size[1]
n = gray_img.size[0]

L = cvx.Variable(m, n)
S = cvx.Variable(m, n)
#np.random.seed()
#M = np.random.rand(n,n) + np.ones((n,n))
M = np.array(gray_img)
M = M / np.max(M)

#ob = cvx.Minimize(cvx.norm(L,'nuc') + 0.05*cvx.norm(S, 1))
ob = cvx.Minimize(cvx.tv(L) + 0.2 * cvx.norm(S, 1))

co = [M == L + S]

prob = cvx.Problem(ob, co)

result = prob.solve(solver='SCS', max_iters=30)

maxval = 1.0
imcolor = 'gray'

plt.figure(0)
plt.subplot(2, 2, 1)
plt.imshow(M, interpolation='nearest', vmin=0.0, vmax=maxval, cmap=imcolor)
plt.title('M')
plt.colorbar()
Пример #30
0
np.random.seed(0)

A = np.random.randn(m, ni * k)
A /= np.sqrt(np.sum(A**2, 0))

x0 = np.zeros(ni * k)
for i in range(k):
    if np.random.rand() < rho:
        x0[i * ni:(i + 1) * ni] = np.random.rand()
b = A.dot(x0) + sigma * np.random.randn(m)
lam = 0.1 * sigma * np.sqrt(m * np.log(ni * k))

# Problem construction

x = cp.Variable(A.shape[1])
f = cp.sum_squares(A * x - b) + lam * cp.norm1(x) + lam * cp.tv(x)
prob = cp.Problem(cp.Minimize(f))

# Problem collection

# Single problem collection
problemDict = {"problemID": problemID, "problem": prob, "opt_val": opt_val}
problems = [problemDict]

# For debugging individual problems:
if __name__ == "__main__":

    def printResults(problemID="", problem=None, opt_val=None):
        print(problemID)
        problem.solve()
        print("\tstatus: {}".format(problem.status))
Пример #31
0
                        num_neigh += 1

                if (j == 142 and i < 202 and i > 0):
                    summ = (naive_img[i][j - 1] + naive_img[i - 1][j] +
                            naive_img[i + 1][j])
                    if naive_img[i][j - 1] > 0:
                        num_neigh += 1
                    if naive_img[i - 1][j] > 0:
                        num_neigh += 1
                    if naive_img[i + 1][j] > 0:
                        num_neigh += 1

            if num_neigh > 0:
                naive_img[i][j] = ((summ * 1.0) / num_neigh)

plt.imshow(naive_img)
plt.gray()
plt.savefig('recovered_stanford_tree_2b.png')
plt.show()

from cvxpy import Variable, Minimize, Problem, mul_elemwise, tv
U = Variable(*img.shape)
obj = Minimize(tv(U))
constraints = [mul_elemwise(Known, U) == mul_elemwise(Known, img)]
prob = Problem(obj, constraints)
prob.solve()
# recovered image is now in U.value
plt.imshow(U.value)
plt.gray()
plt.savefig('recovered_stanford_tree_2c.png')
plt.show()
Пример #32
0
    prox("SUM_HINGE", f_hinge),
    prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
    prox("SUM_HINGE", lambda: cp.sum_entries(cp.max_elemwise(1-x, 0))),
    prox("SUM_INV_POS", lambda: cp.sum_entries(cp.inv_pos(x))),
    prox("SUM_KL_DIV", lambda: cp.sum_entries(cp.kl_div(p1,q1))),
    prox("SUM_LARGEST", lambda: cp.sum_largest(x, 4)),
    prox("SUM_LOGISTIC", lambda: cp.sum_entries(cp.logistic(x))),
    prox("SUM_NEG_ENTR", lambda: cp.sum_entries(-cp.entr(x))),
    prox("SUM_NEG_LOG", lambda: cp.sum_entries(-cp.log(x))),
    prox("SUM_QUANTILE", f_quantile),
    prox("SUM_QUANTILE", f_quantile_elemwise),
    prox("SUM_SQUARE", f_least_squares_matrix),
    prox("SUM_SQUARE", lambda: f_least_squares(20)),
    prox("SUM_SQUARE", lambda: f_least_squares(5)),
    prox("SUM_SQUARE", f_quad_form),
    prox("TOTAL_VARIATION_1D", lambda: cp.tv(x)),
    prox("ZERO", None, C_linear_equality),
    prox("ZERO", None, C_linear_equality_matrix_lhs),
    prox("ZERO", None, C_linear_equality_matrix_rhs),
    prox("ZERO", None, C_linear_equality_multivariate),
    prox("ZERO", None, C_linear_equality_multivariate2),
    prox("ZERO", None, lambda: C_linear_equality_graph(20)),
    prox("ZERO", None, lambda: C_linear_equality_graph(5)),
    prox("ZERO", None, lambda: C_linear_equality_graph_lhs(10, 5)),
    prox("ZERO", None, lambda: C_linear_equality_graph_lhs(5, 10)),
    prox("ZERO", None, lambda: C_linear_equality_graph_rhs(10, 5)),
    prox("ZERO", None, lambda: C_linear_equality_graph_rhs(5, 10)),
]

# Epigraph operators
PROX_TESTS += [