Esempio n. 1
0
    def __init__(self, x, problem, **kwargs):
        try:
            self.old = kwargs['old']
        except:
            self.old = None
        self.problem = problem
        self.x = x
        self.hessian_modified = False
        self.hessian_original = None

        self.mu = kwargs['mu']
        self.delta = kwargs['delta']
        self.tau = kwargs['tau']
        self.eps_mu = kwargs['eps_mu']

        self.f = problem.f(x)
        self.f_x = problem.f_x(x)
        self.c_e = problem.c_e(x)
        self.c_i = problem.c_i(x)
        self.A_e = problem.A_e(x)
        self.A_i = problem.A_i(x)

        self.n = len(x)
        self.m = len(self.c_i)
        self.t = len(self.c_e)
        self.e = ones((self.m, 1))

        if self.A_e.shape == (self.t, self.n):
            self.A_e = self.A_e.transpose()
        elif self.A_e.shape == (self.n, self.t):
            pass
        else:
            raise ValueError('Wrong shape for equality jacobian matrix')

        if self.A_i.shape == (self.m, self.n):
            self.A_i = self.A_i.transpose()
        elif self.A_i.shape == (self.n, self.m):
            pass
        else:
            raise ValueError('Wrong shape for inequality jacobian matrix')

        try:
            self.s = kwargs['s']
        except:
            s = abs(self.c_i.reshape(-1, 1))
            self.s = s + (1. - self.tau) * (s == 0)  # prevents div by 0 later

        # Combination objects, calculated once for convenience/speed
        self.fx_mu = vstack((self.f_x, -self.mu * self.e))
        self.ce_cis = vstack((self.c_e, self.c_i + self.s))
        self.A = bmat([[self.A_e, self.A_i],
                       [None, spdiags(self.s.T, [0], self.m, self.m)]])
        self.A_aug = bmat([[seye(self.n + self.m, self.n + self.m), self.A],
                           [self.A.transpose(), None]]).tocsc()
        self.A_aug_fact = factorized(self.A_aug)
        self.A_c = self.A * self.ce_cis

        # Lagrange multipliers and Hessian
        try:
            oldie = kwargs['lm_mu']
        except:
            oldie = None
        self.l_e, self.l_i, s_sigma_s = kwargs['update_lambdas'](self, oldie)
        self.Aele_Aili = self.A_e * self.l_e + self.A_i * self.l_i
        try:  # Either use actual hessian, or ...
            self.hessian = problem.hessian(x, self.l_e, self.l_i)
        except:  # start with I for approximations
            try:
                self.hessian = kwargs['hessian_approx'](self)
            except:
                self.hessian = seye(self.n, self.n)

        self.hessian_original = self.hessian

        #i = 0.001
        #while (eigsh(self.hessian, k=1, which='SA', maxiter=100000,
        #       return_eigenvectors=False)[0] < 0):
        #    self.hessian_modified = True
        #    self.hessian = (self.hessian + i * seye(*self.hessian.shape)).tocsc()
        #    i *= 10

        self.G = bmat([[self.hessian, None], [None, s_sigma_s]]).tocsc()

        # Error values
        self.E, self.E_type = kwargs['error_func'](extra=True, it=self)
        self.E_mu = kwargs['error_func'](self.mu, it=self)
Esempio n. 2
0
def GDC(
        pred_depth,
        gt_depth,
        calib,
        k=10,
        W_tol=1e-5,
        recon_tol=1e-4,
        verbose=False,
        method='gmres',
        consider_range=(-0.1, 3.0),
        subsample=False,
):
    """
    Returns the depth map after Graph-based Depth Correction (GDC).

    Parameters:
        pred_depth - predicted depthmap
        gt_depth - lidar depthmap (-1 means no groundtruth)
        calib - calibration object
        k - k used in KNN
        W_tol - tolerance in solving reconstruction weights
        recon_tol - tolerance used in gmres / cg
        debug - if in debug mode (more info will show)
        verbose - if True, more info will show
        method - use cg or gmres to solve the second step
        consider_range - perform LLDC only on points whose pitch angles are
            within this range
        subsample - whether subsampling points by grids

    Returns:
        new_depth_map - A refined depthmap with the same size of pred_depth
    """

    if verbose:
        print("warpping up depth infos...")

    ptc = depth2ptc(pred_depth, calib)
    consider_PL = (filter_mask(ptc) * filter_theta_mask(
        ptc,
        low=np.radians(consider_range[0]),
        high=np.radians(consider_range[1]))).reshape(pred_depth.shape)

    if subsample:
        subsample_mask = subsample_mask_by_grid(ptc).reshape(pred_depth.shape)
        consider_PL = consider_PL * subsample_mask

    consider_L = filter_mask(depth2ptc(gt_depth,
                                       calib)).reshape(gt_depth.shape)
    gt_mask = consider_L * consider_PL

    # We don't drastically move points.
    # This avoids numerical issues in solving linear equations.
    gt_mask[gt_mask] *= (np.abs(pred_depth[gt_mask] - gt_depth[gt_mask]) < 2)

    # we only consider points within certain ranges
    pred_mask = np.logical_not(gt_mask) * consider_PL

    x_info = np.concatenate((pred_depth[pred_mask], pred_depth[gt_mask]))
    gt_info = gt_depth[gt_mask]
    N_PL = pred_mask.sum()  # number of pseudo_lidar points
    N_L = gt_mask.sum()  # number of lidar points (groundtruth)
    ptc = np.concatenate(
        (ptc[pred_mask.reshape(-1)], ptc[gt_mask.reshape(-1)]))
    if verbose:
        print("N_PL={} N_L={}".format(N_PL, N_L))
        print("building up KDtree...")

    tree = KDTree(ptc)
    neighbors = tree.query(ptc, k=k + 1)[1][:, 1:]

    if verbose:
        print("sovling W...")

    As = np.zeros((N_PL + N_L, k + 2, k + 2))
    bs = np.zeros((N_PL + N_L, k + 2))
    As[:, :k, :k] = np.eye(k) * (1 + W_tol)
    As[:, k + 1, :k] = 1
    As[:, :k, k + 1] = 1
    bs[:, k + 1] = 1
    bs[:, k] = x_info
    As[:, k, :k] = x_info[neighbors]
    As[:, :k, k] = x_info[neighbors]

    W = np.linalg.solve(As, bs)[:, :k]

    if verbose:
        avg = 0
        for i in range(N_PL):
            avg += np.abs(W[i, :k].dot(x_info[neighbors[i]]) - x_info[i])
        print("average reconstruction diff: {:.3e}".format(avg / N_PL))
        print("building up sparse W...")

    # We devide the sparse W matrix into 4 parts:
    # [W_PLPL, W_LPL]
    # [W_PLL , W_LL ]
    idx_PLPL = neighbors[:N_PL] < N_PL
    indptr_PLPL = np.concatenate(([0], np.cumsum(idx_PLPL.sum(axis=1))))
    W_PLPL = csr_matrix(
        (W[:N_PL][idx_PLPL], neighbors[:N_PL][idx_PLPL], indptr_PLPL),
        shape=(N_PL, N_PL))

    idx_LPL = neighbors[:N_PL] >= N_PL
    indptr_LPL = np.concatenate(([0], np.cumsum(idx_LPL.sum(axis=1))))
    W_LPL = csr_matrix(
        (W[:N_PL][idx_LPL], neighbors[:N_PL][idx_LPL] - N_PL, indptr_LPL),
        shape=(N_PL, N_L))

    idx_PLL = neighbors[N_PL:] < N_PL
    indptr_PLL = np.concatenate(([0], np.cumsum(idx_PLL.sum(axis=1))))
    W_PLL = csr_matrix(
        (W[N_PL:][idx_PLL], neighbors[N_PL:][idx_PLL], indptr_PLL),
        shape=(N_L, N_PL))

    idx_LL = neighbors[N_PL:] >= N_PL
    indptr_LL = np.concatenate(([0], np.cumsum(idx_LL.sum(axis=1))))
    W_LL = csr_matrix(
        (W[N_PL:][idx_LL], neighbors[N_PL:][idx_LL] - N_PL, indptr_LL),
        shape=(N_L, N_L))

    if verbose:
        print("reconstructing depth...")

    A = sparse.vstack((seye(N_PL) - W_PLPL, W_PLL))
    b = np.concatenate((W_LPL.dot(gt_info), gt_info - W_LL.dot(gt_info)))

    ATA = LinearOperator((A.shape[1], A.shape[1]),
                         matvec=lambda x: A.T.dot(A.dot(x)))
    method = cg if method == 'cg' else gmres
    x_new, info = method(ATA, A.T.dot(b), x0=x_info[:N_PL], tol=recon_tol)
    if verbose:
        print(info)
        print('solve in error: {}'.format(np.linalg.norm(A.dot(x_new) - b)))

    if subsample:
        new_depth_map = np.full_like(pred_depth, -1)
        new_depth_map[subsample_mask] = pred_depth[subsample_mask]
    else:
        new_depth_map = pred_depth.copy()
    new_depth_map[pred_mask] = x_new
    new_depth_map[gt_depth > 0] = gt_depth[gt_depth > 0]

    return new_depth_map
Esempio n. 3
0
    def __init__(self, x, problem, **kwargs):
        try:
            self.old = kwargs['old']
        except:
            self.old = None
        self.problem = problem
        self.x = x
        self.hessian_modified = False
        self.hessian_original = None

        self.mu     = kwargs['mu']
        self.delta  = kwargs['delta']
        self.tau    = kwargs['tau']
        self.eps_mu = kwargs['eps_mu']

        self.f   = problem.f(x)
        self.f_x = problem.f_x(x)
        self.c_e = problem.c_e(x)
        self.c_i = problem.c_i(x)
        self.A_e = problem.A_e(x)
        self.A_i = problem.A_i(x)

        self.n = len(x)
        self.m = len(self.c_i)
        self.t = len(self.c_e)
        self.e = ones((self.m, 1))

        if self.A_e.shape == (self.t, self.n):
            self.A_e = self.A_e.transpose()
        elif self.A_e.shape == (self.n, self.t):
            pass
        else:
            raise ValueError('Wrong shape for equality jacobian matrix')

        if self.A_i.shape == (self.m, self.n):
            self.A_i = self.A_i.transpose()
        elif self.A_i.shape == (self.n, self.m):
            pass
        else:
            raise ValueError('Wrong shape for inequality jacobian matrix')


        try:
            self.s = kwargs['s']
        except:
            s      = abs(self.c_i.reshape(-1, 1))
            self.s = s + (1. - self.tau) * (s == 0) # prevents div by 0 later

        # Combination objects, calculated once for convenience/speed
        self.fx_mu = vstack((self.f_x, -self.mu * self.e))
        self.ce_cis = vstack((self.c_e, self.c_i + self.s))
        self.A = bmat([[self.A_e,                                  self.A_i],
                       [    None,    spdiags(self.s.T, [0], self.m, self.m)]])
        self.A_aug = bmat([[seye(self.n + self.m, self.n + self.m), self.A],
                           [self.A.transpose(), None]]).tocsc()
        self.A_aug_fact = factorized(self.A_aug)
        self.A_c = self.A * self.ce_cis

        # Lagrange multipliers and Hessian
        try:
            oldie = kwargs['lm_mu']
        except:
            oldie = None
        self.l_e, self.l_i, s_sigma_s = kwargs['update_lambdas'](self, oldie)
        self.Aele_Aili = self.A_e * self.l_e + self.A_i * self.l_i
        try: # Either use actual hessian, or ...
            self.hessian = problem.hessian(x, self.l_e, self.l_i)
        except: # start with I for approximations
            try:
                self.hessian = kwargs['hessian_approx'](self)
            except:
                self.hessian = seye(self.n, self.n)

        self.hessian_original = self.hessian

        #i = 0.001
        #while (eigsh(self.hessian, k=1, which='SA', maxiter=100000,
        #       return_eigenvectors=False)[0] < 0):
        #    self.hessian_modified = True
        #    self.hessian = (self.hessian + i * seye(*self.hessian.shape)).tocsc()
        #    i *= 10

        self.G = bmat([[self.hessian, None],
                       [None,    s_sigma_s]]).tocsc()

        # Error values
        self.E, self.E_type = kwargs['error_func'](extra=True, it=self)
        self.E_mu = kwargs['error_func'](self.mu, it=self)
Esempio n. 4
0
    def __init__(self, n, f, **kwargs):
        """
        Initializer for the problem class.

        The user will supply functions and vectors as keyword arguements. The
        input vector x is assumed to be of length n.

        Each function is assumed to take in only the vector x (with the
        exception of the hessian function). Any user arguements must be wrapped
        first instead.

        Parameters
        ==========
            n : integer, length of input vector for NLP problem
            f :   function
                  takes in x
                  returns scalar

        Keyword args
        ============
            f_x : function
                  takes in x
                  returns numpy column array of n x 1
            x_l : a numpy column array of n x 1
            x_u : a numpy column array of n x 1


          and:


            g   : function
                  takes in x
                  returns numpy column array
            h   : function
                  takes in x
                  returns numpy column array
            g_x : function
                  takes in x
                  returns scipy sparse matrix of n x len(g)
            h_x : function
                  takes in x
                  returns scipy sparse matrix of n x len(h)
            hessian : function
                      takes in x, v_g, v_h
                      where v are the lagrange multipliers of the lagrangian,
                      assumed to be of the form:
                      f(x) + v_g^T g + v_h^T h
                      returns scipy sparse matrix of n x n
          or:
            c   : function
                  takes in x
                  returns numpy column array
            c_x : function
                  takes in x
                  returns scipy sparse matrix of n x len(g)
            c_l : numpy column array of len(c) x 1
            c_u : numpy column array of len(c) x 1
            hessian : function
                      takes in x, v
                      where v are the lagrange multipliers (as a numpy column
                      vector) of the lagrangian, assumed to be of the form:
                      f(x) + v^T c
                      returns scipy sparse matrix of n x n

        Methods generated for the solver are:
        f
        f_x
        c_e
        c_i
        A_e
        A_i
        H

        """
        self.n = n
        # First check to make sure problem hasn't been incorrectly supplied
        combined  = ['c', 'cl', 'cu', 'c_x']
        separated = ['g', 'h', 'g_x', 'h_x']
        # now check if components from either style are in the kwargs
        check1 = max([(i in kwargs.keys()) for i in combined])
        check2 = max([(i in kwargs.keys()) for i in separated])
        # Raise error if 2 styles combined, or no constraints supplied
        if check1 and check2:
            raise ValueError('Problem supplied incorrectly (constraint style)')
        elif check1:
            style = 'c'
        elif check2:
            style = 's'
        else:
            raise ValueError('Only constrained problems supported')
        # Also need to create settings for finite differencing
        try:
            # This is equivalent to max(f^y (x0)) where y = order + 1
            # Used to calculate optimal finite difference step size
            self._fdscale = kwargs['fin_diff_scale']
        except:
            self._fdscale = 1. / 3.
        try: # Finite difference order - forward h, central h^2, central h^4
            self._order = kwargs['fin_diff_order']
        except:
            self._order = 2

        ##########################################
        # Functions are now going to be defined. #
        ##########################################

        ########################
        # Common to both forms #
        ########################
        # objective function definition
        resh = lambda x: x.reshape(-1, 1)
        self.f = lambda x: resh(f(x))
        # objective function gradient
        try:
            self.f_x = kwargs['f_x']
        except:
            self.f_x = lambda x: self.approx_jacobian(x, f)

        ##################
        # Separated form #
        ##################
        if style == 's':
            # equality constraint function
            try:
                self.c_e = kwargs['g']
            except:
                self.c_e = lambda x: self.empty_f(x)
            # inequality constraint function
            try:
                self.c_i = kwargs['h']
            except:
                self.c_i = lambda x: self.empty_f(x)
            # equality constraint gradient
            try:
                self.A_e = kwargs['g_x']
            except:
                self.A_e = lambda x: csc_matrix(self.approx_jacobian(x, self.c_e))
            # inequality constraint gradient
            try:
                self.A_i = kwargs['h_x']
            except:
                self.A_i = lambda x: csc_matrix(self.approx_jacobian(x, self.c_i))
            # hessian function
            try:
                self.hessian = kwargs['hessian']
            except:
                self.hessian = None

        #################
        # Combined form #
        #################
        else:
            ######## long and awkward... ###########
            try:
                xl = self.xl = kwargs['xl']
            except:
                xl = self.xl = ones((n, 1)) * -1e20
            try:
                xu = self.xu = kwargs['xu']
            except:
                xu = self.xu = ones((n, 1)) * 1e20
            c = self.c  = kwargs['c']
            cl = self.cl = kwargs['cl']
            cu = self.cu = kwargs['cu']
            try:
                c_x = self.c_x = kwargs['c_x']
            except:
                c_x = self.c_x = lambda x: csc_matrix(self.approx_jacobian(x, self.c))
            o = n
            mn = len(self.cl)
            I = seye(o, o).tocsc()

            (coo_xer, coo_xec, coo_xed, coo_xir, coo_xic, coo_xid, coo_xlr,
             coo_xlc, coo_xld, coo_xur, coo_xuc, coo_xud, coo_cer, coo_cec,
             coo_ced, coo_cir, coo_cic, coo_cid, coo_clr, coo_clc, coo_cld,
             coo_cur, coo_cuc, coo_cud) = ([], [], [], [], [], [], [], [], [],
                                           [], [], [], [], [], [], [], [], [],
                                           [], [], [], [], [], [])

            ############## BOUNDS ################
            xm = 0
            xn = 0
            for i in range(o):
                if xl[i] == xu[i]:
                    coo_xer += [xm]
                    coo_xec += [i]
                    coo_xed += [1]
                    xm += 1
                else:
                    coo_xir += [xn]
                    coo_xic += [i]
                    coo_xid += [1]
                    xn += 1
            l = 0
            u = 0
            for i in range(xn):
                if xl[coo_xic[i]] >= -1e19:
                    coo_xlr += [l]
                    coo_xlc += [coo_xic[i]]
                    coo_xld += [coo_xid[i]]
                    l += 1
                if xu[coo_xic[i]] <= 1e19:
                    coo_xur += [u]
                    coo_xuc += [coo_xic[i]]
                    coo_xud += [coo_xid[i]]
                    u += 1
            try:
                Kxe = coo_matrix((coo_xed, (coo_xer, coo_xec)), shape=(xm, o)).tocsc()
            except:
                Kxe = None
            try:
                Kxl = coo_matrix((coo_xld, (coo_xlr, coo_xlc)), shape=(l, o)).tocsc()
            except:
                Kxl = None
            try:
                Kxu = coo_matrix((coo_xud, (coo_xur, coo_xuc)), shape=(u, o)).tocsc()
            except:
                Kxu = None

            ############## CONSTRAINTS ################
            cm = 0
            cn = 0
            for i in range(mn):
                if cl[i] == cu[i]:
                    coo_cer += [cm]
                    coo_cec += [i]
                    coo_ced += [1]
                    cm += 1
                else:
                    coo_cir += [cn]
                    coo_cic += [i]
                    coo_cid += [1]
                    cn += 1

            l = 0
            u = 0
            for i in range(cn):
                if cl[coo_cic[i]] >= -1e19:
                    coo_clr += [l]
                    coo_clc += [coo_cic[i]]
                    coo_cld += [coo_cid[i]]
                    l += 1
                if cu[coo_cic[i]] <= 1e19:
                    coo_cur += [u]
                    coo_cuc += [coo_cic[i]]
                    coo_cud += [coo_cid[i]]
                    u += 1
            try:
                Kce = coo_matrix((coo_ced, (coo_cer, coo_cec)), shape=(cm, mn)).tocsc()
            except:
                Kce = None
            try:
                Kcl = coo_matrix((coo_cld, (coo_clr, coo_clc)), shape=(l, mn)).tocsc()
            except:
                Kcl = None
            try:
                Kcu = coo_matrix((coo_cud, (coo_cur, coo_cuc)), shape=(u, mn)).tocsc()
            except:
                Kcu = None

            ############## COMBINING ################
            # Equality
            if (Kxe is not None) and (Kce is not None):
                Ke  = bmat([[Kxe, None],
                            [None, Kce]])
                ce  = vstack([Kxe * xl, Kce * cl])
                eq  = lambda x: Ke * vstack([resh(x), c(x)]) - ce
                jeq = lambda x: (Ke * svstack([I, c_x(x)]))
                num_x_eq = len(Kxe * xl)
            elif Kxe is not None:
                Ke  = Kxe
                ce  = Kxe * xl
                eq  = lambda x: Ke * resh(x) - ce
                jeq = lambda x: Ke
                num_x_eq = len(Kxe * xl)
            elif Kce is not None:
                Ke  = Kce
                ce  = Kce * cl
                eq  = lambda x: Ke * c(x) - ce
                jeq = lambda x: (Ke * c_x(x))
                num_x_eq = 0
            else:
                Ke  = None
                ce  = None
                eq  = None
                jeq = None
                num_x_eq = 0
            # Bounds
            if (Kxl is not None) and (Kxu is not None):
                Kiu = bmat([[-Kxl],
                            [ Kxu]])
                ciu = vstack([Kxl * xl, -Kxu * xu])
            elif Kxl is not None:
                Kiu = -Kxl
                ciu = Kxl * xl
            elif Kxu is not None:
                Kiu = Kxu
                ciu = -Kxu * xu
            else:
                Kiu = None
                ciu = None
            # Constraints
            if (Kcl is not None) and (Kcu is not None):
                Kil = bmat([[-Kcl],
                            [ Kcu]])
                cil = vstack([Kcl * cl, -Kcu * cu])
            elif Kcl is not None:
                Kil = -Kcl
                cil = Kcl * cl
            elif Kcu is not None:
                Kil = Kcu
                cil = -Kcu * cu
            else:
                Kil = None
                cil = None
            # Bounds + Constraints
            if (Kiu is not None) and (Kil is not None):
                Ki    = bmat([[ Kiu, None],
                            [None,  Kil]])
                ci    = vstack([ciu, cil])
                ineq  = lambda x: ci + Ki * vstack([resh(x), c(x)])
                jineq = lambda x: (Ki * svstack([I, c_x(x)]))
                num_x_bound = len(ciu)
            elif Kil is not None:
                Ki    = Kil
                ci    = cil
                ineq  = lambda x: ci + Ki * c(x)
                jineq = lambda x: (Ki * c_x(x))
                num_x_bound = 0
            elif Kiu is not None:
                Ki    = Kiu
                ci    = ciu
                ineq  = lambda x: ci + Ki * resh(x)
                jineq = lambda x: Ki
                num_x_bound = len(ciu)
            else:
                Ki    = None
                ci    = None
                ineq  = None
                jineq = None
                num_x_bound = 0

            ############# HESSIAN ###################
            try:
                hess_func = kwargs['hessian']
                self.c_hessian = hess_func
                def hess(x, lam_e, lam_i):
                    lam_e = resh(lam_e)
                    lam_i = resh(lam_i)
                    lam = 0
                    try:
                        lam += Kce.transpose() * lam_e[num_x_eq:]
                    except:
                        pass
                    try:
                        lam += Kcl.transpose() * lam_i[num_x_bound:]
                    except:
                        pass
                    try:
                        lam += Kcu.transpose() * lam_i[num_x_bound:]
                    except:
                        pass
                    return hess_func(x, lam)
            except:
                hess = None

            self.c_e = eq
            self.c_i = ineq
            self.A_e = jeq
            self.A_i = jineq
            self.hessian = hess
Esempio n. 5
0
    def __init__(self, n, f, **kwargs):
        """
        Initializer for the problem class.

        The user will supply functions and vectors as keyword arguements. The
        input vector x is assumed to be of length n.

        Each function is assumed to take in only the vector x (with the
        exception of the hessian function). Any user arguements must be wrapped
        first instead.

        Parameters
        ==========
            n : integer, length of input vector for NLP problem
            f :   function
                  takes in x
                  returns scalar

        Keyword args
        ============
            f_x : function
                  takes in x
                  returns numpy column array of n x 1
            x_l : a numpy column array of n x 1
            x_u : a numpy column array of n x 1


          and:


            g   : function
                  takes in x
                  returns numpy column array
            h   : function
                  takes in x
                  returns numpy column array
            g_x : function
                  takes in x
                  returns scipy sparse matrix of n x len(g)
            h_x : function
                  takes in x
                  returns scipy sparse matrix of n x len(h)
            hessian : function
                      takes in x, v_g, v_h
                      where v are the lagrange multipliers of the lagrangian,
                      assumed to be of the form:
                      f(x) + v_g^T g + v_h^T h
                      returns scipy sparse matrix of n x n
          or:
            c   : function
                  takes in x
                  returns numpy column array
            c_x : function
                  takes in x
                  returns scipy sparse matrix of n x len(g)
            c_l : numpy column array of len(c) x 1
            c_u : numpy column array of len(c) x 1
            hessian : function
                      takes in x, v
                      where v are the lagrange multipliers (as a numpy column
                      vector) of the lagrangian, assumed to be of the form:
                      f(x) + v^T c
                      returns scipy sparse matrix of n x n

        Methods generated for the solver are:
        f
        f_x
        c_e
        c_i
        A_e
        A_i
        H

        """
        self.n = n
        # First check to make sure problem hasn't been incorrectly supplied
        combined = ['c', 'cl', 'cu', 'c_x']
        separated = ['g', 'h', 'g_x', 'h_x']
        # now check if components from either style are in the kwargs
        check1 = max([(i in kwargs.keys()) for i in combined])
        check2 = max([(i in kwargs.keys()) for i in separated])
        # Raise error if 2 styles combined, or no constraints supplied
        if check1 and check2:
            raise ValueError('Problem supplied incorrectly (constraint style)')
        elif check1:
            style = 'c'
        elif check2:
            style = 's'
        else:
            raise ValueError('Only constrained problems supported')
        # Also need to create settings for finite differencing
        try:
            # This is equivalent to max(f^y (x0)) where y = order + 1
            # Used to calculate optimal finite difference step size
            self._fdscale = kwargs['fin_diff_scale']
        except:
            self._fdscale = 1. / 3.
        try:  # Finite difference order - forward h, central h^2, central h^4
            self._order = kwargs['fin_diff_order']
        except:
            self._order = 2

        ##########################################
        # Functions are now going to be defined. #
        ##########################################

        ########################
        # Common to both forms #
        ########################
        # objective function definition
        resh = lambda x: x.reshape(-1, 1)
        self.f = lambda x: resh(f(x))
        # objective function gradient
        try:
            self.f_x = kwargs['f_x']
        except:
            self.f_x = lambda x: self.approx_jacobian(x, f)

        ##################
        # Separated form #
        ##################
        if style == 's':
            # equality constraint function
            try:
                self.c_e = kwargs['g']
            except:
                self.c_e = lambda x: self.empty_f(x)
            # inequality constraint function
            try:
                self.c_i = kwargs['h']
            except:
                self.c_i = lambda x: self.empty_f(x)
            # equality constraint gradient
            try:
                self.A_e = kwargs['g_x']
            except:
                self.A_e = lambda x: csc_matrix(
                    self.approx_jacobian(x, self.c_e))
            # inequality constraint gradient
            try:
                self.A_i = kwargs['h_x']
            except:
                self.A_i = lambda x: csc_matrix(
                    self.approx_jacobian(x, self.c_i))
            # hessian function
            try:
                self.hessian = kwargs['hessian']
            except:
                self.hessian = None

        #################
        # Combined form #
        #################
        else:
            ######## long and awkward... ###########
            try:
                xl = self.xl = kwargs['xl']
            except:
                xl = self.xl = ones((n, 1)) * -1e20
            try:
                xu = self.xu = kwargs['xu']
            except:
                xu = self.xu = ones((n, 1)) * 1e20
            c = self.c = kwargs['c']
            cl = self.cl = kwargs['cl']
            cu = self.cu = kwargs['cu']
            try:
                c_x = self.c_x = kwargs['c_x']
            except:
                c_x = self.c_x = lambda x: csc_matrix(
                    self.approx_jacobian(x, self.c))
            o = n
            mn = len(self.cl)
            I = seye(o, o).tocsc()

            (coo_xer, coo_xec, coo_xed, coo_xir, coo_xic, coo_xid, coo_xlr,
             coo_xlc, coo_xld, coo_xur, coo_xuc, coo_xud, coo_cer, coo_cec,
             coo_ced, coo_cir, coo_cic, coo_cid, coo_clr, coo_clc, coo_cld,
             coo_cur, coo_cuc, coo_cud) = ([], [], [], [], [], [], [], [], [],
                                           [], [], [], [], [], [], [], [], [],
                                           [], [], [], [], [], [])

            ############## BOUNDS ################
            xm = 0
            xn = 0
            for i in range(o):
                if xl[i] == xu[i]:
                    coo_xer += [xm]
                    coo_xec += [i]
                    coo_xed += [1]
                    xm += 1
                else:
                    coo_xir += [xn]
                    coo_xic += [i]
                    coo_xid += [1]
                    xn += 1
            l = 0
            u = 0
            for i in range(xn):
                if xl[coo_xic[i]] >= -1e19:
                    coo_xlr += [l]
                    coo_xlc += [coo_xic[i]]
                    coo_xld += [coo_xid[i]]
                    l += 1
                if xu[coo_xic[i]] <= 1e19:
                    coo_xur += [u]
                    coo_xuc += [coo_xic[i]]
                    coo_xud += [coo_xid[i]]
                    u += 1
            try:
                Kxe = coo_matrix((coo_xed, (coo_xer, coo_xec)),
                                 shape=(xm, o)).tocsc()
            except:
                Kxe = None
            try:
                Kxl = coo_matrix((coo_xld, (coo_xlr, coo_xlc)),
                                 shape=(l, o)).tocsc()
            except:
                Kxl = None
            try:
                Kxu = coo_matrix((coo_xud, (coo_xur, coo_xuc)),
                                 shape=(u, o)).tocsc()
            except:
                Kxu = None

            ############## CONSTRAINTS ################
            cm = 0
            cn = 0
            for i in range(mn):
                if cl[i] == cu[i]:
                    coo_cer += [cm]
                    coo_cec += [i]
                    coo_ced += [1]
                    cm += 1
                else:
                    coo_cir += [cn]
                    coo_cic += [i]
                    coo_cid += [1]
                    cn += 1

            l = 0
            u = 0
            for i in range(cn):
                if cl[coo_cic[i]] >= -1e19:
                    coo_clr += [l]
                    coo_clc += [coo_cic[i]]
                    coo_cld += [coo_cid[i]]
                    l += 1
                if cu[coo_cic[i]] <= 1e19:
                    coo_cur += [u]
                    coo_cuc += [coo_cic[i]]
                    coo_cud += [coo_cid[i]]
                    u += 1
            try:
                Kce = coo_matrix((coo_ced, (coo_cer, coo_cec)),
                                 shape=(cm, mn)).tocsc()
            except:
                Kce = None
            try:
                Kcl = coo_matrix((coo_cld, (coo_clr, coo_clc)),
                                 shape=(l, mn)).tocsc()
            except:
                Kcl = None
            try:
                Kcu = coo_matrix((coo_cud, (coo_cur, coo_cuc)),
                                 shape=(u, mn)).tocsc()
            except:
                Kcu = None

            ############## COMBINING ################
            # Equality
            if (Kxe is not None) and (Kce is not None):
                Ke = bmat([[Kxe, None], [None, Kce]])
                ce = vstack([Kxe * xl, Kce * cl])
                eq = lambda x: Ke * vstack([resh(x), c(x)]) - ce
                jeq = lambda x: (Ke * svstack([I, c_x(x)]))
                num_x_eq = len(Kxe * xl)
            elif Kxe is not None:
                Ke = Kxe
                ce = Kxe * xl
                eq = lambda x: Ke * resh(x) - ce
                jeq = lambda x: Ke
                num_x_eq = len(Kxe * xl)
            elif Kce is not None:
                Ke = Kce
                ce = Kce * cl
                eq = lambda x: Ke * c(x) - ce
                jeq = lambda x: (Ke * c_x(x))
                num_x_eq = 0
            else:
                Ke = None
                ce = None
                eq = None
                jeq = None
                num_x_eq = 0
            # Bounds
            if (Kxl is not None) and (Kxu is not None):
                Kiu = bmat([[-Kxl], [Kxu]])
                ciu = vstack([Kxl * xl, -Kxu * xu])
            elif Kxl is not None:
                Kiu = -Kxl
                ciu = Kxl * xl
            elif Kxu is not None:
                Kiu = Kxu
                ciu = -Kxu * xu
            else:
                Kiu = None
                ciu = None
            # Constraints
            if (Kcl is not None) and (Kcu is not None):
                Kil = bmat([[-Kcl], [Kcu]])
                cil = vstack([Kcl * cl, -Kcu * cu])
            elif Kcl is not None:
                Kil = -Kcl
                cil = Kcl * cl
            elif Kcu is not None:
                Kil = Kcu
                cil = -Kcu * cu
            else:
                Kil = None
                cil = None
            # Bounds + Constraints
            if (Kiu is not None) and (Kil is not None):
                Ki = bmat([[Kiu, None], [None, Kil]])
                ci = vstack([ciu, cil])
                ineq = lambda x: ci + Ki * vstack([resh(x), c(x)])
                jineq = lambda x: (Ki * svstack([I, c_x(x)]))
                num_x_bound = len(ciu)
            elif Kil is not None:
                Ki = Kil
                ci = cil
                ineq = lambda x: ci + Ki * c(x)
                jineq = lambda x: (Ki * c_x(x))
                num_x_bound = 0
            elif Kiu is not None:
                Ki = Kiu
                ci = ciu
                ineq = lambda x: ci + Ki * resh(x)
                jineq = lambda x: Ki
                num_x_bound = len(ciu)
            else:
                Ki = None
                ci = None
                ineq = None
                jineq = None
                num_x_bound = 0

            ############# HESSIAN ###################
            try:
                hess_func = kwargs['hessian']
                self.c_hessian = hess_func

                def hess(x, lam_e, lam_i):
                    lam_e = resh(lam_e)
                    lam_i = resh(lam_i)
                    lam = 0
                    try:
                        lam += Kce.transpose() * lam_e[num_x_eq:]
                    except:
                        pass
                    try:
                        lam += Kcl.transpose() * lam_i[num_x_bound:]
                    except:
                        pass
                    try:
                        lam += Kcu.transpose() * lam_i[num_x_bound:]
                    except:
                        pass
                    return hess_func(x, lam)
            except:
                hess = None

            self.c_e = eq
            self.c_i = ineq
            self.A_e = jeq
            self.A_i = jineq
            self.hessian = hess