Exemple #1
0
 def sublevel_set():
     if t.value > 1:
         return s.INFEASIBLE
     tsq = t.value**2
     return ((1 - tsq**2) * atoms.sum_squares(x) -
             atoms.matmul(2 * (a - tsq * b), x) + atoms.sum_squares(a) -
             tsq * atoms.sum_squares(b)) <= 0
Exemple #2
0
def quad_form_canon(expr, args):
    scale, M1, M2 = decomp_quad(args[1].value)
    if M1.size > 0:
        expr = sum_squares(Constant(M1.T) * args[0])
    if M2.size > 0:
        scale = -scale
        expr = sum_squares(Constant(M2.T) * args[0])
    obj, constr = quad_over_lin_canon(expr, expr.args)
    return scale * obj, constr
def quad_form_canon(expr, args):
    # TODO this doesn't work with parameters!
    scale, M1, M2 = decomp_quad(args[1].value)
    if M1.size > 0:
        expr = sum_squares(Constant(M1.T) @ args[0])
    if M2.size > 0:
        scale = -scale
        expr = sum_squares(Constant(M2.T) @ args[0])
    obj, constr = quad_over_lin_canon(expr, expr.args)
    return scale * obj, constr
    def smooth_ridge(self, solver):
        np.random.seed(1)
        n = 200
        k = 50
        eta = 1

        A = np.ones((k, n))
        b = np.ones((k))
        obj = sum_squares(A*self.xsr - b) + \
            eta*sum_squares(self.xsr[:-1]-self.xsr[1:])
        p = Problem(Minimize(obj), [])
        self.solve_QP(p, solver)
        self.assertAlmostEqual(0, p.value, places=4)
Exemple #5
0
    def smooth_ridge(self, solver):
        numpy.random.seed(1)
        n = 500
        k = 50
        eta = 1

        A = numpy.ones((k, n))
        b = numpy.ones((k, 1))
        obj = sum_squares(A * self.xsr -
                          b) + eta * sum_squares(self.xsr[:-1] - self.xsr[1:])
        p = Problem(Minimize(obj), [])
        s = self.solve_QP(p, solver)
        self.assertAlmostEqual(0, s.opt_val)
Exemple #6
0
    def optimize(self):

        num_points = len(self.graph.free_points)
        point_dim = self.graph.point_dim
        num_landmarks = len(self.graph.landmarks)
        landmark_dim = self.graph.landmark_dim

        Am, Ap, d, sigma_d = self.graph.observation_system()
        Bp, t, sigma_t = self.graph.odometry_system()

        S_d, S_t = sp.sparse.diags(
            1 / _sanitized_noise_array(sigma_d)), sp.sparse.diags(
                1 / _sanitized_noise_array(sigma_t))

        if (num_points != 0) and (num_landmarks != 0):

            M = cp.Variable((landmark_dim, num_landmarks))
            P = cp.Variable((point_dim, num_points))
            objective = cp.Minimize(
                sum_squares(S_d * ((Am * vec(M)) + (Ap * vec(P)) - d)) +
                sum_squares(S_t * ((Bp * vec(P)) - t)))
            problem = cp.Problem(objective)
            problem.solve(verbose=self._verbosity, solver=self._solver)

            self.M = M.value
            self.P = P.value

            m = self.M.ravel(order='F')
            p = self.P.ravel(order='F')

            self.res_d = Am.dot(m) + Ap.dot(p) - d
            self.res_t = Bp.dot(p) - t

        elif (num_points != 0) and (num_landmarks == 0):

            P = cp.Variable((point_dim, num_points))
            objective = cp.Minimize(
                sum_squares(sum_squares(S_t * ((Bp * vec(P)) - t))))
            problem = cp.Problem(objective)
            problem.solve(verbose=self._verbosity, solver=self._solver)

            self.P = P.value

            p = self.P.ravel(order='F')

            self.res_t = Bp.dot(p) - t

        else:
            return
Exemple #7
0
 def control(self, solver):
     # Some constraints on our motion
     # The object should start from the origin, and end at rest
     initial_velocity = numpy.matrix('-20; 100')
     final_position = numpy.matrix('100; 100')
     T = 100  # The number of timesteps
     h = 0.1  # The time between time intervals
     mass = 1  # Mass of object
     drag = 0.1  # Drag on object
     g = numpy.matrix('0; -9.8')  # Gravity on object
     # Create a problem instance
     constraints = []
     # Add constraints on our variables
     for i in range(T - 1):
         constraints += [
             self.position[:, i + 1] == self.position[:, i] +
             h * self.velocity[:, i]
         ]
         acceleration = self.force[:,
                                   i] / mass + g - drag * self.velocity[:,
                                                                        i]
         constraints += [
             self.velocity[:,
                           i + 1] == self.velocity[:, i] + h * acceleration
         ]
     # Add position constraints
     constraints += [self.position[:, 0] == 0]
     constraints += [self.position[:, -1] == final_position]
     # Add velocity constraints
     constraints += [self.velocity[:, 0] == initial_velocity]
     constraints += [self.velocity[:, -1] == 0]
     # Solve the problem
     p = Problem(Minimize(sum_squares(self.force)), constraints)
     s = self.solve_QP(p, solver)
     self.assertAlmostEqual(17850.0, s.opt_val)
    def control(self, solver):
        # Some constraints on our motion
        # The object should start from the origin, and end at rest
        initial_velocity = np.array([-20, 100])
        final_position = np.array([100, 100])
        T = 100  # The number of timesteps
        h = 0.1  # The time between time intervals
        mass = 1  # Mass of object
        drag = 0.1  # Drag on object
        g = np.array([0, -9.8])  # Gravity on object
        # Create a problem instance
        constraints = []
        # Add constraints on our variables
        for i in range(T - 1):
            constraints += [
                self.position[:, i + 1] == self.position[:, i] +
                h * self.velocity[:, i]
            ]
            acceleration = self.force[:, i]/mass + g - \
                drag * self.velocity[:, i]
            constraints += [
                self.velocity[:,
                              i + 1] == self.velocity[:, i] + h * acceleration
            ]

        # Add position constraints
        constraints += [self.position[:, 0] == 0]
        constraints += [self.position[:, -1] == final_position]
        # Add velocity constraints
        constraints += [self.velocity[:, 0] == initial_velocity]
        constraints += [self.velocity[:, -1] == 0]
        # Solve the problem
        p = Problem(Minimize(.01 * sum_squares(self.force)), constraints)
        self.solve_QP(p, solver)
        self.assertAlmostEqual(178.500, p.value, places=1)
Exemple #9
0
    def test_sum_squares(self):
        X = Variable(5, 4)
        P = np.asmatrix(np.random.randn(3, 5))
        Q = np.asmatrix(np.random.randn(4, 7))
        M = np.asmatrix(np.random.randn(3, 7))

        y = P*X*Q + M
        self.assertFalse(y.is_constant())
        self.assertTrue(y.is_affine())
        self.assertTrue(y.is_quadratic())
        self.assertTrue(y.is_dcp())

        s = sum_squares(y)
        self.assertFalse(s.is_constant())
        self.assertFalse(s.is_affine())
        self.assertTrue(s.is_quadratic())
        self.assertTrue(s.is_dcp())

        # Frobenius norm squared is indeed quadratic
        # but can't show quadraticity using recursive rules
        t = norm(y, 'fro')**2
        self.assertFalse(t.is_constant())
        self.assertFalse(t.is_affine())
        self.assertFalse(t.is_quadratic())
        self.assertTrue(t.is_dcp())
Exemple #10
0
    def test_sum_squares(self):
        X = Variable(5, 4)
        P = np.asmatrix(np.random.randn(3, 5))
        Q = np.asmatrix(np.random.randn(4, 7))
        M = np.asmatrix(np.random.randn(3, 7))

        y = P*X*Q + M
        self.assertFalse(y.is_constant())
        self.assertTrue(y.is_affine())
        self.assertTrue(y.is_quadratic())
        self.assertTrue(y.is_dcp())

        s = sum_squares(y)
        self.assertFalse(s.is_constant())
        self.assertFalse(s.is_affine())
        self.assertTrue(s.is_quadratic())
        self.assertTrue(s.is_dcp())

        # Frobenius norm squared is indeed quadratic
        # but can't show quadraticity using recursive rules
        t = norm(y, 'fro')**2
        self.assertFalse(t.is_constant())
        self.assertFalse(t.is_affine())
        self.assertFalse(t.is_quadratic())
        self.assertTrue(t.is_dcp())
Exemple #11
0
    def optimize(self):

        num_points = len(self.graph.free_points)
        point_dim = self.graph.point_dim
        num_landmarks = len(self.graph.landmarks)
        landmark_dim = self.graph.landmark_dim

        Am, Ap, d, _ = self.graph.observation_system()
        Bp, t, _ = self.graph.odometry_system()

        if (num_points != 0) and (num_landmarks != 0):

            M = cp.Variable((landmark_dim, num_landmarks))
            P = cp.Variable((point_dim, num_points))
            objective = cp.Minimize(
                sum_squares(Am * vec(M) + Ap * vec(P) - d) +
                sum_squares(Bp * vec(P) - t))

            problem = cp.Problem(objective)
            problem.solve(verbose=self._verbosity, solver=self._solver)

            self.M = M.value
            self.P = P.value

            m = self.M.ravel(order='F')
            p = self.P.ravel(order='F')

            self.res_d = Am.dot(m) + Ap.dot(p) - d
            self.res_t = Bp.dot(p) - t

        elif (num_points != 0) and (num_landmarks == 0):

            P = cp.Variable((point_dim, num_points))
            objective = cp.Minimize(sum_squares(Bp * vec(P) - t))

            problem = cp.Problem(objective)
            problem.solve(verbose=self._verbosity, solver=self._solver)

            self.P = P.value

            p = self.P.ravel(order='F')

            self.res_t = Bp.dot(p) - t

        else:
            return
Exemple #12
0
 def square_affine(self, solver):
     A = np.random.randn(10, 2)
     b = np.random.randn(10)
     p = Problem(Minimize(sum_squares(A * self.x - b)))
     self.solve_QP(p, solver)
     for var in p.variables():
         self.assertItemsAlmostEqual(lstsq(A, b)[0].flatten(),
                                     var.value,
                                     places=1)
Exemple #13
0
 def abs(self, solver):
     u = Variable(2)
     constr = []
     constr += [abs(u[1] - u[0]) <= 100]
     prob = Problem(Minimize(sum_squares(u)), constr)
     print(("The problem is QP: ", prob.is_qp()))
     self.assertEqual(prob.is_qp(), True)
     result = prob.solve(solver=solver)
     self.assertAlmostEqual(result, 0)
Exemple #14
0
 def square_affine(self, solver):
     A = numpy.random.randn(10, 2)
     b = numpy.random.randn(10, 1)
     p = Problem(Minimize(sum_squares(A * self.x - b)))
     s = self.solve_QP(p, solver)
     for var in p.variables():
         self.assertItemsAlmostEqual(lstsq(A, b)[0].flatten(),
                                     s.primal_vars[var.id],
                                     places=1)
Exemple #15
0
 def test_qp_maximization_reduction_path_ecos(self):
     qp_maximization = Problem(Maximize(-sum_squares(self.x)),
                               [self.x <= -1])
     self.assertTrue(qp_maximization.is_dcp())
     path = PathFinder().reduction_path(ProblemType(qp_maximization),
                                        [ECOS])
     self.assertEquals(4, len(path))
     self.assertEquals(path[1], ConeMatrixStuffing)
     self.assertEquals(path[2], Dcp2Cone)
     self.assertEquals(path[3], FlipObjective)
Exemple #16
0
    def sparse_system(self, solver):
        m = 100
        n = 80
        np.random.seed(1)
        density = 0.4
        A = sp.rand(m, n, density)
        b = np.random.randn(m)

        p = Problem(Minimize(sum_squares(A * self.xs - b)), [self.xs == 0])
        self.solve_QP(p, solver)
        self.assertAlmostEqual(b.T.dot(b), p.value, places=4)
Exemple #17
0
    def sparse_system(self, solver):
        m = 1000
        n = 800
        numpy.random.seed(1)
        density = 0.2
        A = sp.rand(m, n, density)
        b = numpy.random.randn(m, 1)

        p = Problem(Minimize(sum_squares(A * self.xs - b)), [self.xs == 0])
        s = self.solve_QP(p, solver)
        self.assertAlmostEqual(b.T.dot(b), s.opt_val)
Exemple #18
0
    def _m_step(self, W, Am, Ap, d, sigma_d):

        num_points = len(self.graph.free_points)
        point_dim = self.graph.point_dim
        num_landmarks = len(self.graph.landmarks)
        landmark_dim = self.graph.landmark_dim

        Bp, t, sigma_t = self.graph.odometry_system()
        S_t = sp.sparse.diags(1 / _sanitized_noise_array(sigma_t))

        sigma_d = np.tile(_sanitized_noise_array(sigma_d), num_landmarks)
        S_d = sp.sparse.diags(1 / sigma_d)
        W = sp.sparse.diags(W.flatten('F'))

        Am = [
            np.zeros((Am.shape[0], Am.shape[1])) for _ in range(num_landmarks)
        ]
        for j in range(num_landmarks):
            Am[j][:, j] = 1
        Am = sp.sparse.csr_matrix(np.concatenate(Am, axis=0))
        Ap = sp.sparse.vstack([Ap for _ in range(num_landmarks)])

        d = np.tile(d, num_landmarks)

        M = cp.Variable((landmark_dim, num_landmarks))
        P = cp.Variable((point_dim, num_points))
        objective = cp.Minimize(
            sum_squares(W * S_d * ((Am * vec(M)) + (Ap * vec(P)) - d)) +
            sum_squares(S_t * ((Bp * vec(P)) - t)))
        problem = cp.Problem(objective)
        problem.solve(verbose=self._verbosity, solver=self._solver)

        self.M = M.value
        self.P = P.value

        m = self.M.ravel(order='F')
        p = self.P.ravel(order='F')

        self.res_d = Am.dot(m) + Ap.dot(p) - d
        self.res_t = Bp.dot(p) - t
Exemple #19
0
def prox_step(prob, rho_init, scaled=False, spectral=False):
    """Formulates the proximal operator for a given objective, constraints, and step size.
	Parikh, Boyd. "Proximal Algorithms."
	
	Parameters
    ----------
    prob : Problem
        The objective and constraints associated with the proximal operator.
        The sign of the objective function is flipped if `prob` is a maximization problem.
    rho_init : float
        The initial step size.
    scaled : logical, optional
    	Should the dual variable be scaled?
	spectral : logical, optional
	    Will spectral step sizes be used?
    
    Returns
    ----------
    prox : Problem
        The proximal step problem.
    vmap : dict
        A map of each proximal variable id to a dictionary containing that variable `x`,
        the mean variable parameter `xbar`, the associated dual parameter `y`, and the
        step size parameter `rho`. If `spectral = True`, the estimated dual parameter 
        `yhat` is also included.
	"""
    vmap = {}  # Store consensus variables
    f = flip_obj(prob).args[0]

    # Add penalty for each variable.
    for xvar in prob.variables():
        xid = xvar.id
        shape = xvar.shape
        vmap[xid] = {
            "x": xvar,
            "xbar": Parameter(shape, value=np.zeros(shape)),
            "y": Parameter(shape, value=np.zeros(shape)),
            "rho": Parameter(value=rho_init[xid], nonneg=True)
        }
        if spectral:
            vmap[xid]["yhat"] = Parameter(shape, value=np.zeros(shape))
        dual = vmap[xid]["y"] if scaled else vmap[xid]["y"] / vmap[xid]["rho"]
        f += (vmap[xid]["rho"] / 2.0) * sum_squares(xvar - vmap[xid]["xbar"] +
                                                    dual)

    prox = Problem(Minimize(f), prob.constraints)
    return prox, vmap
Exemple #20
0
    def regression_2(self, solver):
        np.random.seed(1)
        # Number of examples to use
        n = 100
        # Specify the true value of the variable
        true_coeffs = np.array([2, -2, 0.5])
        # Generate data
        x_data = np.random.rand(n) * 5
        x_data_expanded = np.vstack([np.power(x_data, i) for i in range(1, 4)])
        print((x_data_expanded.shape, true_coeffs.shape))
        y_data = x_data_expanded.T.dot(true_coeffs) + 0.5 * np.random.rand(n)

        quadratic = self.offset + x_data*self.slope + \
            self.quadratic_coeff*np.power(x_data, 2)
        residuals = quadratic.T - y_data
        fit_error = sum_squares(residuals)
        p = Problem(Minimize(fit_error), [])
        self.solve_QP(p, solver)

        self.assertAlmostEqual(139.225660756, p.value, places=4)
Exemple #21
0
    def regression_1(self, solver):
        np.random.seed(1)
        # Number of examples to use
        n = 100
        # Specify the true value of the variable
        true_coeffs = np.array([[2, -2, 0.5]]).T
        # Generate data
        x_data = np.random.rand(n) * 5
        x_data = np.atleast_2d(x_data)
        x_data_expanded = np.vstack([np.power(x_data, i) for i in range(1, 4)])
        x_data_expanded = np.atleast_2d(x_data_expanded)
        y_data = x_data_expanded.T.dot(true_coeffs) + 0.5 * np.random.rand(
            n, 1)
        y_data = np.atleast_2d(y_data)

        line = self.offset + x_data * self.slope
        residuals = line.T - y_data
        fit_error = sum_squares(residuals)
        p = Problem(Minimize(fit_error), [])
        self.solve_QP(p, solver)
        self.assertAlmostEqual(1171.60037715, p.value, places=4)
Exemple #22
0
    def regression_1(self, solver):
        numpy.random.seed(1)
        # Number of examples to use
        n = 100
        # Specify the true value of the variable
        true_coeffs = numpy.matrix('2; -2; 0.5')
        # Generate data
        x_data = numpy.random.rand(n) * 5
        x_data = numpy.asmatrix(x_data)
        x_data_expanded = numpy.vstack([numpy.power(x_data, i)
                                        for i in range(1, 4)])
        x_data_expanded = numpy.asmatrix(x_data_expanded)
        y_data = x_data_expanded.T * true_coeffs + 0.5 * numpy.random.rand(n,1)
        y_data = numpy.asmatrix(y_data)

        line = self.offset + x_data * self.slope
        residuals = line.T - y_data
        fit_error = sum_squares(residuals)
        p = Problem(Minimize(fit_error), [])
        s = self.solve_QP(p, solver)
        self.assertAlmostEqual(1171.60037715, p.value, places=4)
Exemple #23
0
    def regression_2(self, solver):
        numpy.random.seed(1)
        # Number of examples to use
        n = 100
        # Specify the true value of the variable
        true_coeffs = numpy.matrix('2; -2; 0.5')
        # Generate data
        x_data = numpy.random.rand(n, 1) * 5
        x_data = numpy.asmatrix(x_data)
        x_data_expanded = numpy.hstack(
            [numpy.power(x_data, i) for i in range(1, 4)])
        x_data_expanded = numpy.asmatrix(x_data_expanded)
        y_data = x_data_expanded * true_coeffs + 0.5 * numpy.random.rand(n, 1)
        y_data = numpy.asmatrix(y_data)

        quadratic = self.offset + x_data * self.slope + self.quadratic_coeff * numpy.power(
            x_data, 2)
        residuals = quadratic - y_data
        fit_error = sum_squares(residuals)
        p = Problem(Minimize(fit_error), [])
        s = self.solve_QP(p, solver)
        self.assertAlmostEqual(139.225660756, s.opt_val)
Exemple #24
0
    def test_warm_start(self):
        """Test warm start.
        """
        m = 200
        n = 100
        np.random.seed(1)
        A = np.random.randn(m, n)
        b = Parameter(m)

        # Construct the problem.
        x = Variable(n)
        prob = Problem(Minimize(sum_squares(A * x - b)))

        b.value = np.random.randn(m)
        result = prob.solve(warm_start=False)
        result2 = prob.solve(warm_start=True)
        self.assertAlmostEqual(result, result2)
        b.value = np.random.randn(m)
        result = prob.solve(warm_start=True)
        result2 = prob.solve(warm_start=False)
        self.assertAlmostEqual(result, result2)
        pass
Exemple #25
0
    def optimize(self):

        self._pre_optimizer.optimize()
        self._pre_optimizer.update()

        points = self.graph.free_points
        landmarks = self.graph.landmarks

        num_points = len(points)
        point_dim = self.graph.point_dim
        num_landmarks = len(landmarks)
        landmark_dim = self.graph.landmark_dim

        transforms = [
            equivalence.SumMass(self.graph.correspondence_map.set_map()),
            equivalence.ExpDistance(self._sigma),
            equivalence.Facing()
        ]
        E, W = equivalence.equivalence_matrix(landmarks, transforms=transforms)
        if E.shape[0] == 0:
            self.M = self._pre_optimizer.M
            self.P = self._pre_optimizer.P
            self.res_d = self._pre_optimizer.res_d
            self.res_t = self._pre_optimizer.res_t
            self.equivalence_pairs = []
            return

        Am, Ap, d, sigma_d = self.graph.observation_system()
        Bp, t, sigma_t = self.graph.odometry_system()

        S_d, S_t = sp.sparse.diags(
            1 / _sanitized_noise_array(sigma_d)), sp.sparse.diags(
                1 / _sanitized_noise_array(sigma_t))

        M = cp.Variable((landmark_dim, num_landmarks))
        P = cp.Variable((point_dim, num_points))

        M.value = self._pre_optimizer.M
        P.value = self._pre_optimizer.P

        objective = cp.Minimize(mixed_norm(W * E * M.T))
        constraints = [
            norm((Am * vec(M)) +
                 (Ap * vec(P)) - d) <= 2 * np.linalg.norm(sigma_d + 1e-6),
            norm((Bp * vec(P)) - t) <= 2 * np.linalg.norm(sigma_t + 1e-6)
        ]
        problem = cp.Problem(objective, constraints)
        problem.solve(verbose=self._verbosity,
                      solver=self._solver,
                      warm_start=True)

        if problem.solution.status == 'infeasible':
            self.M = self._pre_optimizer.M
            self.P = self._pre_optimizer.P
            self.res_d = self._pre_optimizer.res_d
            self.res_t = self._pre_optimizer.res_t
            self.equivalence_pairs = []
            return

        E_ = E[np.abs(np.linalg.norm(E * M.value.T, axis=1)) < 0.001, :]
        objective = cp.Minimize(
            sum_squares(S_d * ((Am * vec(M)) + (Ap * vec(P)) - d)) +
            sum_squares(S_t * ((Bp * vec(P)) - t)))
        constraints = [E_ * M.T == 0] if E_.shape[0] > 0 else []
        problem = cp.Problem(objective, constraints)
        problem.solve(verbose=self._verbosity,
                      solver=self._solver,
                      warm_start=True)

        self.M = M.value
        self.P = P.value

        m = self.M.ravel(order='F')
        p = self.P.ravel(order='F')

        self.res_d = Am.dot(m) + Ap.dot(p) - d
        self.res_t = Bp.dot(p) - t

        self.equivalence_pairs = [(landmarks[i], landmarks[j])
                                  for (i, j) in E_.tolil().rows]
Exemple #26
0
    def reconstruct(self, measurement_results):
        from cvxpy import Variable, atoms, abs, reshape, Minimize, Problem, CVXOPT
        from traceback import print_exc
        reconstruction_operator_names = []
        reconstruction_operators = []
        basis_axes_names = self.reconstruction_basis.keys()
        basis_vector_norms = np.asarray([
            np.linalg.norm(self.reconstruction_basis[r]['operator'])
            for r in basis_axes_names
        ])

        for reconstruction_operator_name, reconstruction_operator in self.reconstruction_basis.items(
        ):
            reconstruction_operator_names.append(reconstruction_operator_name)
            reconstruction_operators.append(
                reconstruction_operator['operator'])

        reconstruction_matrix = []
        for rot, projection in self.proj_seq.items():
            for measurement_name, projection_operator in projection[
                    'operators'].items():
                reconstruction_matrix.append([
                    np.sum(projection_operator *
                           np.conj(reconstruction_operator)) /
                    np.sum(np.abs(reconstruction_operator)**2)
                    for reconstruction_operator in reconstruction_operators
                ])

        reconstruction_matrix_pinv = np.linalg.pinv(reconstruction_matrix)
        reconstruction_matrix = np.asarray(reconstruction_matrix)
        self.reconstruction_matrix = reconstruction_matrix
        self.reconstruction_matrix_pinv = reconstruction_matrix_pinv

        projections = np.dot(reconstruction_matrix_pinv, measurement_results)
        reconstruction = {
            str(k): v
            for k, v in zip(basis_axes_names, projections)
        }

        if self.reconstruction_type == 'cvxopt':
            #x = cvxpy.Variable(len(projections), complex=True)
            x = Variable(len(projections), complex=True)
            rmat_normalized = np.asarray(reconstruction_matrix /
                                         np.mean(np.abs(measurement_results)),
                                         dtype=complex)
            meas_normalized = np.asarray(measurement_results).ravel(
            ) / np.mean(np.abs(measurement_results))
            #lstsq_objective = cvxpy.atoms.sum_squares(cvxpy.abs(rmat_normalized @ x - meas_normalized))
            lstsq_objective = atoms.sum_squares(
                abs(rmat_normalized @ x - meas_normalized))
            matrix_size = int(np.round(np.sqrt(len(projections))))
            #x_reshaped = cvxpy.reshape(x, (matrix_size, matrix_size))
            x_reshaped = reshape(x, (matrix_size, matrix_size))
            psd_constraint = x_reshaped >> 0
            hermitian_constraint = x_reshaped.H == x_reshaped
            # Create two constraints.
            constraints = [psd_constraint, hermitian_constraint]
            # Form objective.
            #obj = cvxpy.Minimize(lstsq_objective)
            obj = Minimize(lstsq_objective)
            # Form and solve problem.
            prob = cvxpy.Problem(obj, constraints)
            try:
                prob.solve(solver=cvxpy.CVXOPT, verbose=True)
                reconstruction = {
                    str(k): v
                    for k, v in zip(basis_axes_names, np.asarray(x.value))
                }
            except ValueError as e:
                print_exc()

        if self.reconstruction_output_mode == 'array':
            it = np.nditer([self.reconstruction_output_array, None],
                           flags=['refs_ok'],
                           op_dtypes=(object, complex))
            with it:
                for x, z in it:
                    z[...] = reconstruction[str(x)]
                reconstruction = it.operands[1]

        return reconstruction