def Maximum_likelihood_estimation_of_an_increasing_nonnegative_signal():
    '''
    Setup
    '''
    N = 100
    xtrue = np.zeros((N, 1))
    xtrue[0:40] = 0.1
    xtrue[49] = 2  # matlab is 1-indexed
    xtrue[70:80] = 0.15
    xtrue[79] = 1  # matlab is 1-indexed
    xtrue = np.cumsum(xtrue, axis=0)

    h = np.array([[1, -0.85, 0.7, -0.3]])
    k = len(h)
    yhat = np.array([np.convolve(h.flatten(), xtrue.flatten())]).T
    y = yhat[:-3] + np.array(
        [[-0.43], [-1.7], [0.13], [0.29],
         [-1.1], [1.2], [1.2], [-0.038], [0.33], [0.17], [-0.19], [0.73],
         [-0.59], [2.2], [-0.14], [0.11], [1.1], [0.059], [-0.096], [-0.83],
         [0.29], [-1.3], [0.71], [1.6], [-0.69], [0.86], [1.3], [-1.6], [-1.4],
         [0.57], [-0.4], [0.69], [0.82], [0.71], [1.3], [0.67], [1.2], [-1.2],
         [-0.02], [-0.16], [-1.6], [0.26], [-1.1], [1.4], [-0.81], [0.53],
         [0.22], [-0.92], [-2.2], [-0.059], [-1], [0.61], [0.51],
         [1.7], [0.59], [-0.64], [0.38], [-1], [-0.02], [-0.048], [4.3e-05],
         [-0.32], [1.1], [-1.9], [0.43], [0.9], [0.73], [0.58], [0.04], [0.68],
         [0.57], [-0.26], [-0.38], [-0.3], [-1.5], [-0.23], [0.12], [0.31],
         [1.4], [-0.35], [0.62], [0.8], [0.94], [-0.99], [0.21], [0.24], [-1],
         [-0.74], [1.1], [-0.13], [0.39], [0.088], [-0.64], [-0.56], [0.44],
         [-0.95], [0.78], [0.57], [-0.82], [-0.27]])
    plt.plot(range(len(xtrue)), xtrue, label="xtrue")
    plt.plot(range(len(y)), y, label="y")

    x_ml = cp.Variable((N, 1))
    constraints = [x_ml >= 0] + [x_ml[i + 1] >= x_ml[i] for i in range(N - 1)]
    # obj = cp.sum(cp.log(y - cp.conv(h, x_ml)))
    # prob = cp.Problem(cp.Maximize(obj), constraints)
    obj = cp.norm2(y - cp.conv(h, x_ml))
    prob = cp.Problem(cp.Minimize(obj), constraints)
    prob.solve()
    print("Status = " + str(prob.status))
    plt.plot(range(len(x_ml.value)), x_ml.value, label="x_ml")

    x_ml_free = cp.Variable((N, 1))
    obj = cp.norm2(y - cp.conv(h, x_ml_free))
    prob = cp.Problem(cp.Minimize(obj))
    prob.solve()
    print("Status = " + str(prob.status))
    plt.plot(range(len(x_ml_free.value)), x_ml_free.value, label="x_ml_free")

    plt.legend()
    plt.show()
Beispiel #2
0
def main():
    TARGET = 0
    y = []
    x = []
    for line in open("features.train"):
        vec = line.strip().split()
        x.append([float(vec[1]),float(vec[2])])
        if int(float(vec[0])) == TARGET:
            y.append(1)
        else:
            y.append(-1)

    w = cp.Variable(2)
    b = cp.Variable()
    Q = np.identity(2)
    xi = cp.Variable(len(x))
    C = 0.01
    objective = cp.Minimize(0.5 * cp.quad_form(w,Q) + C * cp.sum_entries(xi))
    constraints = []
    for i in range(0,len(x)):
        constraints.append(y[i]*(cp.conv(x[i],w) + b) >= 1 - xi[i])
        constraints.append(xi[i]>=0)
    
    prob = cp.Problem(objective, constraints)
    prob.solve()
    print "w=",sum([ item**2 for item in w.value])
    #choose any support vector (z,y) to calculate the b
    print "b=",y[1] - np.dot(w.value, x[1])
    print "status:", prob.status
    print "optimal value", prob.value
Beispiel #3
0
 def test_0D_conv(self) -> None:
     """Convolution with 0D input.
     """
     x = cvx.Variable((1, ))  # or cvx.Variable((1,1))
     problem = cvx.Problem(
         cvx.Minimize(cvx.max(cvx.conv([1.], cvx.multiply(1., x)))),
         [x >= 0])
     problem.solve(cvx.ECOS)
     assert problem.status == cvx.OPTIMAL
Beispiel #4
0
def op_convolution(n):
    x = cvx.Variable(n)
    sigma = n/10
    c = np.exp(-np.arange(-n/2., n/2.)**2./(2*sigma**2))/np.sqrt(2*sigma**2*np.pi)
    c[c < 1e-4] = 0

    k = 5
    x.value = np.zeros(n)
    x.value[np.random.choice(n,k),0] = np.random.rand(k)*sigma
    return cvx.conv(c, x), x
Beispiel #5
0
def op_convolution(n):
    x = cvx.Variable(n)
    sigma = n / 10
    c = np.exp(-np.arange(-n / 2., n / 2.)**2. /
               (2 * sigma**2)) / np.sqrt(2 * sigma**2 * np.pi)
    c[c < 1e-4] = 0

    k = 5
    x.value = np.zeros(n)
    x.value[np.random.choice(n, k), 0] = np.random.rand(k) * sigma
    return cvx.conv(c, x), x
 def test_conv_prob(self):
     """Test a problem with convolution.
     """
     import numpy as np
     N = 5
     y = np.asmatrix(np.random.randn(N, 1))
     h = np.asmatrix(np.random.randn(2, 1))
     x = cvx.Variable(N)
     v = cvx.conv(h, x)
     obj = cvx.Minimize(cvx.sum(cvx.multiply(y, v[0:N])))
     print((cvx.Problem(obj, []).solve()))
    def test_1D_conv(self):
        """Test 1D convolution.
        """
        n = 3
        x = cvx.Variable(n)
        f = [1, 2, 3]
        g = [0, 1, 0.5]
        f_conv_g = [0., 1., 2.5, 4., 1.5]
        expr = cvx.conv(f, g)
        assert expr.is_constant()
        self.assertEqual(expr.shape, (5, 1))
        self.assertItemsAlmostEqual(expr.value, f_conv_g)

        expr = cvx.conv(f, x)
        assert expr.is_affine()
        self.assertEqual(expr.shape, (5, 1))
        # Matrix stuffing.
        prob = cvx.Problem(cvx.Minimize(cvx.norm(expr, 1)), [x == g])
        result = prob.solve()
        self.assertAlmostEqual(result, sum(f_conv_g), places=3)
        self.assertItemsAlmostEqual(expr.value, f_conv_g)
Beispiel #8
0
 def test_conv_prob(self):
     """Test a problem with convolution.
     """
     import cvxpy as cvx
     import numpy as np
     N = 5
     y = np.asmatrix(np.random.randn(N, 1))
     h = np.asmatrix(np.random.randn(2, 1))
     x = cvx.Variable(N)
     v = cvx.conv(h, x)
     obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(y, v[0:N])))
     print(cvx.Problem(obj, []).solve())
Beispiel #9
0
def _direct_deconvolution(w, y, Nx, gamma_L2=0, gamma_L1=0):
    w = np.asarray(w)
    y = np.asarray(y)
    cplx = np.issubdtype(w.dtype, complex) or np.issubdtype(y.dtype, complex)

    if cplx:
        wr, wi = w.real, w.imag
        yr, yi = y.real, y.imag

        xr = cvx.Variable(Nx)
        xi = cvx.Variable(Nx)

        error = (cvx.sum_squares(cvx.conv(wr, xr) - cvx.conv(wi, xi) - yr) +
                 cvx.sum_squares(cvx.conv(wi, xr) + cvx.conv(wr, xi) - yi))
        u = cvx.norm(cvx.hstack(xr, xi), 2, axis=1)
    else:
        x = cvx.Variable(Nx)
        error = cvx.sum_squares(cvx.conv(w, x) - y)
        u = x

    cost = error

    if gamma_L1 != 0:
        gamma_L1 = cvx.Parameter(value=gamma_L1, sign="positive")
        cost = cost + gamma_L1 * cvx.norm(x, 1)
    if gamma_L2 != 0:
        gamma_L2 = cvx.Parameter(value=gamma_L2, sign="positive")
        cost = cost + gamma_L2 * cvx.sum_squares(x)

    objective = cvx.Minimize(cost)
    prob = cvx.Problem(objective)
    prob.solve()
    print("Problem Status: {0}".format(prob.status))

    if cplx:
        result = np.array(xr.value).ravel() + 1j * np.array(xi.value).ravel()
    else:
        result = np.asarray(x.value).ravel()

    return result
Beispiel #10
0
def lasso_conv(n):
    sigma = n/10
    c = np.exp(-np.arange(-n/2., n/2.)**2./(2*sigma**2))/np.sqrt(2*sigma**2*np.pi)
    c[c < 1e-4] = 0

    x0 = np.array(sp.rand(n, 1, 0.1).todense()).ravel()
    b = np.convolve(c, x0) + 1e-2*np.random.randn(2*n-1)
    lam = 0.2*np.max(np.abs(np.convolve(b, c, "valid")))
    print lam

    x = cvx.Variable(n)
    f = cvx.sum_squares(cvx.conv(c, x) - b) + lam*cvx.norm1(x)
    return cvx.Problem(cvx.Minimize(f))
Beispiel #11
0
def nn_deconv(n):
    sigma = 0.1*n
    k = 5

    x0 = np.zeros(n)
    x0[np.random.choice(n,k)] = np.random.randn(k)*n/10

    c = np.exp(-np.arange(-n/2., n/2.)**2./(2*sigma**2))/np.sqrt(2*sigma**2*np.pi)
    c[c < 1e-6] = 0
    b = np.convolve(c, x0) + 0.1*np.random.randn(2*n-1)

    x = cvx.Variable(n)
    f = cvx.norm(cvx.conv(c, x) - b)
    return cvx.Problem(cvx.Minimize(f), [x >= 0])
Beispiel #12
0
def nn_deconv(n):
    sigma = 0.1*n
    k = 5

    x0 = np.zeros(n)
    x0[np.random.choice(n,k)] = np.random.randn(k)*n/10

    c = np.exp(-np.arange(-n/2., n/2.)**2./(2*sigma**2))/np.sqrt(2*sigma**2*np.pi)
    c[c < 1e-6] = 0
    b = np.convolve(c, x0) + 0.1*np.random.randn(2*n-1)

    x = cvx.Variable(n)
    f = cvx.norm(cvx.conv(c, x) - b)
    return cvx.Problem(cvx.Minimize(f), [x >= 0])
Beispiel #13
0
def lasso_conv(n):
    sigma = n/10
    c = np.exp(-np.arange(-n/2., n/2.)**2./(2*sigma**2))/np.sqrt(2*sigma**2*np.pi)
    c[c < 1e-4] = 0

    x0 = np.array(sp.rand(n, 1, 0.1).todense()).ravel()
    b = np.convolve(c, x0) + 1e-2*np.random.randn(2*n-1)
    lam = 0.2*np.max(np.abs(np.convolve(b, c, "valid")))
    print lam

    x = cvx.Variable(n)
    # f = cvx.sum_squares(cvx.conv(c, x) - b) + lam*cvx.norm1(x)
    f = cvx.norm1(cvx.conv(c, x) - b) + lam*cvx.norm1(x)
    return cvx.Problem(cvx.Minimize(f))
def nonnegative_deconvolution():
    np.random.seed(0)
    n = 10
    k = 3
    sigma = n/10.

    c = (np.exp(-np.arange(-n/2., n/2.)**2./(2*sigma**2))/
         np.sqrt(2*sigma**2*np.pi))
    x0 = np.zeros(n)
    x0[np.random.choice(n,k)] = np.random.randn(k)*sigma
    b = np.convolve(c, x0)
    b += np.random.randn(2*n-1)*np.linalg.norm(b)/np.sqrt(2*n-1)/20

    x = cvx.Variable(n)
    f = cvx.sum_squares(cvx.conv(c, x) - b)
    return cvx.Problem(cvx.Minimize(f), [x >= 0])
Beispiel #15
0
 def test_conv(self):
     """Test the conv atom.
     """
     a = np.ones((3, 1))
     b = Parameter(2, nonneg=True)
     expr = cp.conv(a, b)
     assert expr.is_nonneg()
     self.assertEqual(expr.shape, (4, 1))
     b = Parameter(2, nonpos=True)
     expr = cp.conv(a, b)
     assert expr.is_nonpos()
     with self.assertRaises(Exception) as cm:
         cp.conv(self.x, -1)
     self.assertEqual(str(cm.exception),
                      "The first argument to conv must be constant.")
     with self.assertRaises(Exception) as cm:
         cp.conv([[0, 1], [0, 1]], self.x)
     self.assertEqual(str(cm.exception),
                      "The arguments to conv must resolve to vectors.")
Beispiel #16
0
    0.17, -0.19, 0.73, -0.59, 2.2, -0.14, 0.11, 1.1, 0.059,
    -0.096, -0.83, 0.29, -1.3, 0.71, 1.6, -0.69, 0.86, 1.3,
    -1.6, -1.4, 0.57, -0.4, 0.69, 0.82, 0.71, 1.3, 0.67, 1.2,
    -1.2, -0.02, -0.16, -1.6, 0.26, -1.1, 1.4, -0.81, 0.53,
    0.22, -0.92, -2.2, -0.059, -1, 0.61, 0.51, 1.7, 0.59,
    -0.64, 0.38, -1, -0.02, -0.048, 4.3e-05, -0.32, 1.1, 
    -1.9, 0.43, 0.9, 0.73, 0.58, 0.04, 0.68, 0.57, -0.26,
    -0.38, -0.3, -1.5, -0.23, 0.12, 0.31, 1.4, -0.35, 0.62,
    0.8, 0.94, -0.99, 0.21, 0.24, -1, -0.74, 1.1, -0.13, 0.39,
    0.088, -0.64, -0.56, 0.44, -0.95, 0.78, 0.57, -0.82, -0.27
])
s = y + noise

# Solving the ML estimation
x_ = cp.Variable(N)
y_ = cp.conv(h, x_)[:-k+1].flatten()
constraints = [0 <= x_[0]] + [
    x_[i] <= x_[i+1] for i in range(0, N-1)
]
# With constraints
cp.Problem(cp.Minimize(cp.norm(y_ - s)), constraints).solve()
x_hat = x_.value
y_hat = y_.value
# Without constraints
cp.Problem(cp.Minimize(cp.norm(y_ - s))).solve()
x_hat_free = x_.value
y_hat_free = y_.value
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(14, 8))

# Plotting the results
axs[0].plot(x, label="Origial")
import numpy as np
import cvxpy as cp

N = 100
xtrue = np.zeros((N, 1))
xtrue[0:39, ] = 0.1
xtrue[49, ] = 2
xtrue[69:79, ] = 0.15
xtrue[79, ] = 1
xtrue = xtrue.cumsum()

h = np.array([1, -0.85, 0.7, -0.3])
k = len(h)
yhat = cp.conv(h, xtrue).value

noise = np.array([
    -0.43, -1.7, 0.13, 0.29, -1.1, 1.2, 1.2, -0.038, 0.33, 0.17, -0.19, 0.73,
    -0.59, 2.2, -0.14, 0.11, 1.1, 0.059, -0.096, -0.83, 0.29, -1.3, 0.71, 1.6,
    -0.69, 0.86, 1.3, -1.6, -1.4, 0.57, -0.4, 0.69, 0.82, 0.71, 1.3, 0.67, 1.2,
    -1.2, -0.02, -0.16, -1.6, 0.26, -1.1, 1.4, -0.81, 0.53, 0.22, -0.92, -2.2,
    -0.059, -1, 0.61, 0.51, 1.7, 0.59, -0.64, 0.38, -1, -0.02, -0.048, 4.3e-05,
    -0.32, 1.1, -1.9, 0.43, 0.9, 0.73, 0.58, 0.04, 0.68, 0.57, -0.26, -0.38,
    -0.3, -1.5, -0.23, 0.12, 0.31, 1.4, -0.35, 0.62, 0.8, 0.94, -0.99, 0.21,
    0.24, -1, -0.74, 1.1, -0.13, 0.39, 0.088, -0.64, -0.56, 0.44, -0.95, 0.78,
    0.57, -0.82, -0.27
])
y = yhat[0:N] + noise

A = np.zeros(shape=(N, N))
for i in range(n):
    for j in range(len(h)):
Beispiel #18
0
# create problem data 
N = 100; 

# create an increasing input signal
xtrue = np.zeros((N,1))
xtrue[1:40] = 0.1
xtrue[50] = 2
xtrue[70:80] = 0.15
xtrue[80] = 1
xtrue = np.cumsum(xtrue)

# pass the increasing input through a moving-average filter 
# and add Gaussian noise
h = np.array([1, -0.85 ,0.7 ,-0.3])
k = h.shape[0]
yhat = np.convolve(h,xtrue)
y = yhat[:-3] + np.random.randn(N)
x = cp.Variable((100,),nonneg = True)
z = y[:,None] - cp.conv(h,x)[:-3]
objective = cp.Minimize(cp.sum_squares(z))
constraints = [cp.diff(x) >= 0]
prob=cp.Problem(objective,constraints=constraints)
prob.solve()

#plot
t = list(range(0,xtrue.size))
plt.plot(t,list(xtrue), color='red',label='x_true')
plt.plot(t,list(x.value), color='blue',label='x_hat')
plt.legend(loc="upper left")
plt.savefig('prob_66.png')
plt.show()
Beispiel #19
0
from numpy.testing import assert_allclose
import cvxpy as cvx
import numpy as np
import scipy.sparse as sp
import tensorflow as tf

from cvxflow import cvxpy_expr

np.random.seed(0)
A_sparse = sp.rand(5, 3, 0.5)
x_var = cvx.Variable(3)

EXPRESSIONS = [
    ("conv", cvx.conv([1, 2, 3], x_var), [1, 2, 4], [1, 2, 4, 6, 8]),
    ("sparse", A_sparse * x_var, [1, 2, 4], [1, 2, 4, 6, 8]),
]


class TensorTest(tf.test.TestCase):
    pass


def get_tensor_test(f_expr, x, y):
    f = f_expr.canonicalize()[0]
    x = np.array(x).reshape(-1, 1)
    y = np.array(y).reshape(-1, 1)

    def test(self):
        prob = cvx.Problem(cvx.Minimize(0), [f_expr == 0])
        A = prob.get_problem_data(cvx.SCS)["A"]
        xt = tf.constant(x, dtype=tf.float32)
Beispiel #20
0
n = 100
m = 40
y = cvx.Variable(m)
x = cvx.Variable(n)

x0 = np.zeros((n, 1))
x0[n / 2 + 3:n / 2 + 4] = 1
x0[n / 2 + 23:n / 2 + 24] = 0.8
x0[n / 2 + 43:n / 2 + 44] = 0.6

t = np.linspace(-2, 2, m)
y0 = np.exp(-np.square(t) * 2)
d = np.convolve(np.array(x0).flatten(), np.array(y0).flatten())

cost = cvx.norm(cvx.conv(y, x) - d) + 0.15 * cvx.norm(
    x, 1)  # cvx.conv does not yet support variable as first argument.
prob = cvx.Problem(cvx.Minimize(cost), [cvx.norm(y, "inf") <= 1])

x.value = np.ones((n, 1))
y.value = np.ones((m, 1))
prob.solve(method='bcd')

plt.plot(np.array(abs(x).value).flatten(), 'b-o')
plt.plot(np.array(abs(y).value).flatten(), 'c-s')
plt.plot(d, 'g-')
plt.plot(x0, 'r--', linewidth=2)
plt.plot(y0, 'm-.', linewidth=2)
print cvx.norm(cvx.conv(x, y) - d).value
plt.legend(["$x$", "$y$", "$d$", "ground truth $x_0$", "ground truth $y_0$"])
plt.show()
Beispiel #21
0
from numpy.testing import assert_allclose
import cvxpy as cvx
import numpy as np
import scipy.sparse as sp
import tensorflow as tf

from cvxflow import cvxpy_expr

np.random.seed(0)
A_sparse = sp.rand(5, 3, 0.5)
x_var = cvx.Variable(3)

EXPRESSIONS = [
    ("conv", cvx.conv([1,2,3], x_var), [1,2,4], [1,2,4,6,8]),
    ("sparse", A_sparse*x_var, [1,2,4], [1,2,4,6,8]),
]

class TensorTest(tf.test.TestCase):
    pass

def get_tensor_test(f_expr, x, y):
    f = f_expr.canonicalize()[0]
    x = np.array(x).reshape(-1,1)
    y = np.array(y).reshape(-1,1)

    def test(self):
        prob = cvx.Problem(cvx.Minimize(0), [f_expr == 0])
        A = prob.get_problem_data(cvx.SCS)["A"]
        xt = tf.constant(x, dtype=tf.float32)
        yt = tf.constant(y, dtype=tf.float32)
Beispiel #22
0
yhat = np.convolve(h, ytrue)
y = yhat[:-3] + np.array([
    -0.43, -1.7, 0.13, 0.29, -1.1, 1.2, 1.2, -0.038, 0.33, 0.17, -0.19, 0.73,
    -0.59, 2.2, -0.14, 0.11, 1.1, 0.059, -0.096, -0.83, 0.29, -1.3, 0.71, 1.6,
    -0.69, 0.86, 1.3, -1.6, -1.4, 0.57, -0.4, 0.69, 0.82, 0.71, 1.3, 0.67, 1.2,
    -1.2, -0.02, -0.16, -1.6, 0.26, -1.1, 1.4, -0.81, 0.53, 0.22, -0.92, -2.2,
    -0.059, -1, 0.61, 0.51, 1.7, 0.59, -0.64, 0.38, -1, -0.02, -0.048, 4.3e-05,
    -0.32, 1.1, -1.9, 0.43, 0.9, 0.73, 0.58, 0.04, 0.68, 0.57, -0.26, -0.38,
    -0.3, -1.5, -0.23, 0.12, 0.31, 1.4, -0.35, 0.62, 0.8, 0.94, -0.99, 0.21,
    0.24, -1, -0.74, 1.1, -0.13, 0.39, 0.088, -0.64, -0.56, 0.44, -0.95, 0.78,
    0.57, -0.82, -0.27
])

#maximum likelihood estimation with no monotonicity taken in to account. can be solved analytically
y_ls = cp.Variable(N)
yhat = cp.conv(h, y_ls)[:-3]
y = y.reshape(-1, 1)
objective = cp.Minimize(cp.sum_squares(yhat - y))
prob = cp.Problem(objective)
_ = prob.solve()
error = np.sum((yhat.value - ytrue)**2)
print('error for y_ml,free is ', error)

#monotonic and non-negative signal estimation
y_mono = cp.Variable(N)
yhat = cp.conv(h, y_mono)[:-3]
objective = cp.Minimize(cp.sum_squares(yhat - y))
constraints = [y_mono[0] >= 0, y_mono[:-1] <= y_mono[1:]]
prob = cp.Problem(objective, constraints)
_ = prob.solve()
error = np.sum((yhat.value - ytrue)**2)
Beispiel #23
0
    return [
        1 / (sigma * sqrt(2 * pi)) * exp(-float(x)**2 / (2 * sigma**2))
        for x in r
    ]


np.random.seed(5)
random.seed(5)
DENSITY = 0.1
n = 1000
x = Variable(n)
# Create sparse signal.
signal = np.zeros(n)
for i in range(n):
    if random.random() < DENSITY:
        signal[i] = random.uniform(1, 100)

# Gaussian kernel.
m = 100
kernel = gauss(m)

# Noisy signal.
noisy_signal = conv(kernel, signal).value + np.random.normal(n + m - 1)

obj = norm(conv(kernel, x) - noisy_signal)
constraints = [x >= 0]
prob = Problem(Minimize(obj), constraints)
result = prob.solve(solver=SCS, verbose=True)

print(norm(signal - x.value, 1).value)
Beispiel #24
0
# Create sparse signal.
signal = np.zeros(n)
nnz = 0
for i in range(n):
    if random.random() < DENSITY:
        signal[i] = random.uniform(0, 100)
        nnz += 1

# Gaussian kernel.
m = 1001
kernel = gauss(m, m/10)

# Noisy signal.
std = 1
noise = np.random.normal(scale=std, size=n+m-1)
noisy_signal = conv(kernel, signal) #+ noise

gamma = Parameter(nonneg=True)
fit = norm(conv(kernel, x) - noisy_signal, 2)
regularization = norm(x, 1)
constraints = [x >= 0]
gamma.value = 0.06
prob = Problem(Minimize(fit), constraints)
solver_options = {"NORMALIZE": True, "MAX_ITERS": 2500,
                  "EPS":1e-3}
result = prob.solve(solver=SCS,
                    verbose=True,
                    NORMALIZE=True,
                    MAX_ITERS=2500)
# Get problem matrix.
data, dims = prob.get_problem_data(solver=SCS)
Beispiel #25
0
# pl.figure(figsize=(6, 6))
# legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), prop={'size': 18})
# print(dir(ct))

H = np.zeros((ct.N, ct.N))
for idx in np.arange(ct.h.size):
    Htemp = np.diag(np.ones(ct.N) * ct.h[idx])
    Htemp = np.roll(Htemp, idx, axis=0)
    Htemp[:idx, :] = 0
    H = H + Htemp
# print(H)
S = np.roll(np.eye(ct.N), 1, axis=0)
S[0, :] = 0
# print(S)
x = cv.Variable(ct.N)
# obj = cv.Minimize(cv.norm(H * x - ct.y))
obj = cv.Minimize(cv.norm(cv.conv(ct.h, x)[:-3] - ct.y))
# cst = [S * x <= x]
cst = [0 <= x[0], x[:-1] <= x[1:]]
prb = cv.Problem(obj, cst)
prb.solve()
# print(prb.status)
pl.figure(figsize=(6, 6))
pl.plot(x.value, label=r'$x_{ml}$')
pl.plot(ct.xtrue, label=r'$x_{true}$')
prb = cv.Problem(obj)
prb.solve()
# print(prb.status)
pl.plot(x.value, label=r'$x_{ml,free}$')
pl.legend(loc='upper left', bbox_to_anchor=(1.03, 1.03), prop={'size': 18})