Exemple #1
0
 def check_hess(self, x=None):
     if (x is None):
         x = np.random.rand(self.n)
     hess = self.f_cost_hess(x)
     hess_fd = approx_jacobian(x, self.f_cost_grad, self.epsilon)
     err = np.sqrt(np.sum((hess - hess_fd)**2))
     print "[%s] Hessian error: %f" % (self.name, err)
     return (hess, hess_fd)
Exemple #2
0
 def compare_jacobian(self, fit_function, ndim, isotropic, n, groups=None,
                      custom_param_mode=None):
     ff = FitFunctions(fit_function, ndim, isotropic)
     param_mode = {param: 'var' for param in ff.params}
     param_mode['background'] = 'cluster'
     if custom_param_mode is not None:
         param_mode.update(custom_param_mode)
     ff = FitFunctions(fit_function, ndim, isotropic, param_mode=param_mode)
     params = np.random.random((n, len(ff.params))) * 10
     residual, jacobian = self.get_residual(ff, n, params, groups)
     vect = vect_from_params(params, ff.modes, groups, operation=np.mean)
     actual = jacobian(vect)
     expected = approx_jacobian(vect, residual, EPSILON)[0]
     assert_allclose(actual, expected, rtol=RTOL, atol=ATOL)
Exemple #3
0
    def intrinsic_variances_heliocentric(self, x_spread=0.01, v_spread=0.001):
        """
        Spread in position = 10 pc, spread in velocity = 1 km/s.
        """
        VX = np.array(
            [x_spread, x_spread, x_spread, v_spread, v_spread, v_spread])**2.

        func = lambda x: cartesian_to_spherical(
            galactocentric_to_heliocentric(x))

        VY = np.zeros_like(self.Y)
        for k in range(self.K):
            J = approx_jacobian(self.X[k], func, 1E-4)
            cov = np.diag(VX)
            VY[k] = np.diag(J.dot(cov).dot(J.T))

        return VY
Exemple #4
0
def _plot_jacobian(graph,
                   X,
                   plot_lines_on_variables=False,
                   plot_lines_on_elements=False):
    import matplotlib.pyplot as plt

    J = approx_jacobian(X, residual_function, 1e-4, graph)
    J = J.astype(dtype='bool')

    plt.imshow(J, aspect='equal', interpolation='none')
    ax = plt.gca()

    if plot_lines_on_variables:
        ax.set_xticks(np.arange(-.5, len(J), 1), minor=True)
        ax.set_yticks(np.arange(-.5, len(J), 1), minor=True)
        ax.grid(which='minor', color='w', linestyle='-', linewidth=1)

    if plot_lines_on_elements:
        ax.set_xticks(np.arange(-.5, len(J), 3), minor=False)
        ax.set_yticks(np.arange(-.5, len(J), 3), minor=False)
        ax.grid(color='r', linestyle='-', linewidth=2)

    plt.show()
Exemple #5
0
 def f_cost_hess(self, x):
     return approx_jacobian(x, self.f_cost_grad, self.epsilon)
Exemple #6
0
 def _jacobian_function_wrapper(X):
     return approx_jacobian(X, _constraint_function_wrapper, 1e-6)
Exemple #7
0
    def test_full_jacobian(self):
        configA = yaml.load('''
                chain_id: chainA
                before_chain: [transformA]
                dh_link_num:  1
                after_chain:  []
            ''')

        configB = yaml.load('''
                chain_id: chainB
                before_chain: []
                dh_link_num:  1
                after_chain:  [transformB]
            ''')

        robot_params = RobotParams()
        robot_params.configure(
            yaml.load('''
            dh_chains:
              chainA:
              - [ 0, 0, 1, 0 ]
              chainB:
              - [ 0, 0, 2, 0 ]
            tilting_lasers:
              laserB:
                before_joint: [0, 0, 0, 0, 0, 0]
                after_joint:  [0, 0, 0, 0, 0, 0]
            rectified_cams: {}
            transforms:
                transformA: [0, 0, 0, 0, 0, 0]
                transformB: [0, 0, 0, 0, 0, 0]
            checkerboards:
              boardA:
                corners_x: 2
                corners_y: 2
                spacing_x: 1
                spacing_y: 1
              boardB:
                corners_x: 1
                corners_y: 1
                spacing_x: 1
                spacing_y: 1
            '''))

        free_dict = yaml.load('''
            dh_chains:
              chainA:
              - [ 1, 0, 0, 0 ]
              chainB:
              - [ 0, 0, 0, 0 ]
            tilting_lasers:
              laserB:
                before_joint: [1, 0, 0, 0, 0, 0]
                after_joint:  [0, 0, 0, 0, 0, 0]
            rectified_cams: {}
            transforms:
                transformA: [0, 0, 0, 0, 0, 0]
                transformB: [1, 0, 0, 0, 0, 0]
            checkerboards:
              boardA:
                spacing_x: 1
                spacing_y: 1
              boardB:
                spacing_x: 0
                spacing_y: 0
            ''')

        sensorA = ChainSensor(
            configA,
            ChainMeasurement(chain_id="chainA",
                             chain_state=JointState(position=[0])), "boardA")
        sensorB = ChainSensor(
            configB,
            ChainMeasurement(chain_id="chainB",
                             chain_state=JointState(position=[0])), "boardB")
        laserB = TiltingLaserSensor(
            {'laser_id': 'laserB'},
            LaserMeasurement(laser_id="laserB",
                             joint_points=[JointState(position=[0, 0, 2])]))

        multisensorA = MultiSensor(None)
        multisensorA.sensors = [sensorA]
        multisensorA.checkerboard = "boardA"
        multisensorA.update_config(robot_params)

        multisensorB = MultiSensor(None)
        multisensorB.sensors = [sensorB, laserB]
        multisensorB.checkerboard = "boardB"
        multisensorB.update_config(robot_params)

        poseA = numpy.array([1, 0, 0, 0, 0, 0])
        poseB = numpy.array([2, 0, 0, 0, 0, 0])

        calc = ErrorCalc(robot_params, free_dict, [multisensorA, multisensorB])

        expanded_param_vec = robot_params.deflate()
        free_list = robot_params.calc_free(free_dict)
        opt_param_mat = expanded_param_vec[numpy.where(free_list), 0].copy()
        opt_param = numpy.array(opt_param_mat)[0]
        opt_all_vec = concatenate([opt_param, poseA, poseB])

        print "Calculating Sparse"
        J = calc.calculate_jacobian(opt_all_vec)
        numpy.savetxt("J_sparse.txt", J, fmt="% 4.3f")

        #import code; code.interact(local=locals())

        print "Calculating Dense"
        from scipy.optimize.slsqp import approx_jacobian
        J_naive = approx_jacobian(opt_all_vec, calc.calculate_error, 1e-6)
        numpy.savetxt("J_naive.txt", J_naive, fmt="% 4.3f")

        self.assertAlmostEqual(numpy.linalg.norm(J - J_naive), 0.0, 6)
Exemple #8
0
def sqp(costfun, x0, kkt0, A, b, Aeq, beq, lb, ub, nonlconeq, nonlconineq, **kwargs):
    """
    A sequential quadratic programming solver. Higher level solver manipulates the problem using
    NumPy and SciPy while the QP subproblems are solved using cvxopt.

    :param costfun:
    :param x0:
    :param kkt0:
    :param A:
    :param b:
    :param Aeq:
    :param beq:
    :param lb:
    :param ub:
    :param nonlconeq:
    :param nonlconineq:
    :return:
    """
    delta_bar = kwargs.get('delta_bar', 0.90)
    max_super_iterations = kwargs.get('max_super_iterations', 100)
    tolerance = kwargs.get('tolerance', 1e-8)

    epsilon = tolerance

    opt = npnlp_result()

    if A is None:
        A = numpy.empty((0, len(x0)))
    else:
        A = A.astype(numpy.float64)

    if b is None:
        b = numpy.empty((0,1))
    else:
        b = b.astype(numpy.float64)

    if Aeq is None:
        Aeq = numpy.empty((0, len(x0)))
    else:
        Aeq = Aeq.astype(numpy.float64)

    if beq is None:
        beq = numpy.empty((0,1))
    else:
        beq = beq.astype(numpy.float64)

    if lb is not None:
        raise NotImplementedError

    if ub is not None:
        raise NotImplementedError

    n_states = x0.shape[0]

    if kkt0 is None:
        kkt0 = kkt_multipliers()

    if nonlconeq is None:
        n_lconeq = 0
        def nonlconeq(*args):
            return numpy.array([])
    else:
        n_lconeq = nonlconeq(x0, kkt0).shape[0]

    if nonlconineq is None:
        n_lconineq = 0
        def nonlconineq(*args):
            return numpy.array([])
    else:
        n_lconineq = nonlconineq(x0, kkt0).shape[0]

    if n_lconeq == 0 and n_lconineq == 0:
        def lagrangian(x, kkt):
            return costfun(x)
    elif n_lconineq == 0:
        def lagrangian(x, kkt):
            return costfun(x) + numpy.inner(kkt.equality_nonlinear, nonlconeq(x, kkt))
    elif n_lconeq == 0:
        def lagrangian(x, kkt):
            return costfun(x) + numpy.inner(kkt.inequality_nonlinear, nonlconineq(x, kkt))
    else:
        def lagrangian(x, kkt):
            return costfun(x) + numpy.inner(kkt.equality_nonlinear, nonlconeq(x, kkt)) + numpy.inner(kkt.inequality_nonlinear, nonlconineq(x, kkt))

    nsuperit = 0
    nit = 0
    running = True
    converged = False

    xk = numpy.copy(x0)
    kktk = kkt_multipliers()
    kktk.equality_linear = numpy.copy(kkt0.equality_linear)
    kktk.equality_nonlinear = numpy.copy(kkt0.equality_nonlinear)
    kktk.inequality_linear = numpy.copy(kkt0.inequality_linear)
    kktk.inequality_nonlinear = numpy.copy(kkt0.inequality_nonlinear)
    kktk.lower = numpy.copy(kkt0.lower)
    kktk.upper = numpy.copy(kkt0.upper)
    B = numpy.eye(n_states) # Start with B = I to ensure positive definite
    cvxopt.solvers.options['show_progress'] = False
    while running:
        f = costfun(xk)
        df = approx_jacobian(xk, costfun, epsilon)

        if n_lconineq > 0:
            dg = approx_jacobian(xk, lambda _: nonlconineq(_, kktk), epsilon)
            nonlconineq_eval = nonlconineq(xk, kktk)
            nonlconineq_satisfied = nonlconineq_eval < 0
            delta = numpy.array([1 if satisfied else delta_bar for satisfied in nonlconineq_satisfied])
        else:
            nonlconineq_eval = numpy.array([])

        if n_lconeq > 0:
            dh = approx_jacobian(xk, lambda _: nonlconeq(_, kktk), epsilon)
            nonlconeq_eval = nonlconeq(xk, kktk)
            nonlconeq_satisfied = numpy.abs(nonlconeq_eval) < tolerance
        else:
            nonlconeq_eval = numpy.array([])

        q = df[0]
        if n_lconineq > 0 and len(A) > 0:
            G = numpy.hstack((dg, A))
            h = numpy.hstack((-delta*nonlconineq_eval, b - A.dot(xk)))
        elif n_lconineq > 0:
            G = dg
            h = -delta*nonlconineq_eval
        elif len(A) > 0:
            G = A
            h = b - A.dot(xk)
        else:
            G = None
            h = None

        if n_lconeq > 0 and len(Aeq) > 0:
            R = numpy.hstack((dh, Aeq))
            t = numpy.hstack((-delta_bar*nonlconeq_eval, beq - Aeq.dot(xk)))
        elif n_lconeq > 0:
            R = dh
            t = -delta_bar*nonlconeq_eval
        elif len(Aeq) > 0:
            R = Aeq
            t = beq - Aeq.dot(xk)
        else:
            R = None
            t = None

        rshape = numpy.linalg.matrix_rank(R)
        reshaped = False
        if R is not None:
            if numpy.linalg.matrix_rank(R) < R.shape[0]:
                reshaped = True
                pshape = B.shape[0]
                Q, L, U = lu(numpy.column_stack((R, t)))
                M = L.dot(U)
                R = M[:rshape, :-1]
                t = M[:rshape, -1]
                Rfull = M[:,:-1]
                Momit = M[rshape:,:]
                tfull = M[:, -1]

        try:
            _1 = cvxopt.matrix(B)
            _2 = cvxopt.matrix(q)
            if G is None:
                _3 = None
                _4 = None
            else:
                _3 = cvxopt.matrix(G)
                _4 = cvxopt.matrix(h)

            if R is None:
                _5 = None
                _6 = None
            else:
                _5 = cvxopt.matrix(R)
                _6 = cvxopt.matrix(t)
            qpsol = cvxopt.solvers.qp(_1, _2, _3, _4, _5, _6)
        except:
            qpsol['status'] = 'Nope!'


        s = numpy.array(qpsol['x']).T[0]
        kktk.equality_linear = numpy.array(qpsol['y']).T[0][:len(beq)]
        kktk.equality_nonlinear = numpy.array(qpsol['y']).T[0][len(beq):]
        kktk.inequality_linear = numpy.array(qpsol['z']).T[0][:len(b)]
        kktk.inequality_nonlinear = numpy.array(qpsol['z']).T[0][len(b):]
        la = kktk.equality_nonlinear

        if reshaped:
            # TODO: This entire "reshaping" can be done using pure LA and no goofy if-then statements. IDK how though ATM
            la_append = None
            for rrow in range(Momit.shape[0]):
                ind = numpy.argmax([numpy.inner(M[row,:], Momit[rrow,:])/(numpy.linalg.norm(M[row,:])*numpy.linalg.norm(Momit[rrow,:])) for row in range(rshape)])
                if la_append is None:
                    la_append = la[ind]
                else:
                    la_append = numpy.hstack((la_append, la[ind]))

            la = numpy.hstack((la, la_append))
            kktk.equality_nonlinear = Q.dot(la)

        nit += qpsol['iterations']

        if nsuperit == 0:
            uineq = abs(kktk.inequality_nonlinear)
            ueq = abs(kktk.equality_nonlinear)
        else:
            uineq = numpy.maximum(abs(kktk.inequality_nonlinear), 1 / 2*(uineq + abs(kktk.inequality_nonlinear)))
            ueq = numpy.maximum(abs(kktk.equality_nonlinear), 1 / 2 * (ueq + abs(kktk.equality_nonlinear)))

        # Get step size
        def phi(alpha):
            return costfun(xk + alpha*s) + numpy.inner(uineq, numpy.maximum(0, nonlconineq(xk + alpha*s, kktk))) +\
                   numpy.inner(ueq, abs(nonlconeq(xk + alpha*s, kktk))) +\
                   numpy.inner(kktk.equality_linear, Aeq.dot(xk + s*alpha) - beq) +\
                   numpy.inner(kktk.inequality_linear, A.dot(xk + s*alpha) - b)

        alpha = fminbound(phi, 0, 2) # TODO: Replace this with a more efficient inexact line search
        p = alpha * s
        xk1 = xk + p

        y = (approx_jacobian(xk1, lambda _: lagrangian(_, kktk), epsilon) - approx_jacobian(xk, lambda _: lagrangian(_, kktk), epsilon))[0]

        # Damped BFGS update of the hessian
        if numpy.inner(p, y) >= 0.2*B.dot(p).dot(p):
            theta = 1
        else:
            theta = 0.8 * B.dot(p).dot(p)/(B.dot(p).dot(p) - numpy.inner(p, y))

        eta = theta * y + (1 - theta)*B.dot(p)

        B = B - B.dot(numpy.outer(p,p)).dot(B) / (B.dot(p).dot(p)) + numpy.outer(eta, eta) / eta.dot(p)

        change = xk1 - xk
        xk = numpy.copy(xk1)
        nsuperit += 1

        if all(abs(change) < tolerance):
            running = False

        if nsuperit > max_super_iterations:
            running = False

    if all(abs(nonlconeq_eval) < tolerance) and all(nonlconineq_eval < tolerance) and qpsol['status'] == 'optimal':
        converged = True

    opt['x'] = xk
    opt['fval'] = costfun(xk)
    opt['success'] = converged
    opt['kkt'] = kktk
    opt['grad'] = approx_jacobian(xk, costfun, epsilon)[0]
    opt['hessian'] = B
    opt['nsuperit'] = nsuperit
    opt['nit'] = nit

    return opt
Exemple #9
0
 def cjac(x, *args):
     return approx_jacobian(x, fun, epsilon, *args)