Esempio n. 1
0
def model4(n, r, B, V):
    mat = V.T + V

    def F(x=None, z=None):
        if x is None:
            return 0, matrix(1.0, (n, 1))
        if x.T * V * x == 0:
            return None
        risk = x.T * V * x
        alpha = x.T * r - B.T * r
        f = -alpha * risk**-0.5
        Df = (-risk**-0.5 * r + 0.5 * risk**-1.5 * mat * x * alpha).T
        if z is None:
            return f, Df

        part1 = 0.5 * r * x.T * mat * risk**-1.5
        part2 = -0.75 * mat * x * alpha * x.T * mat * risk**-2.5 + 0.5 * (
            mat * x * r.T + mat * alpha) * risk**-1.5
        H = z[0] * (part1 + part2)
        return f, Df, H

    G1 = matrix(np.diag(np.ones(n) * -1))
    h1 = matrix(np.zeros(n))
    G, h = G1, h1
    A = matrix(np.ones(n)).T
    b = matrix(1.0, (1, 1))

    return solvers.cp(F, G, h, A=A, b=b)['x']
def solveDualProblem(Kernel, Y, lambda_, risk ='SVM', optimal_value = 0):
    # Given a risk R, this solver gives the solution of the optimization problem :
    # max_gamma( - R^{*}(-2*lambda_*gamma) - lambda*gamma^{t}*K*gamma )
    # i.e min_gamma ( R^{*}(-2*lambda*gamma) + lambda*gamma^{t}*K*gamma )
    # For instance, in the case of SVM, this is the solution of :
    # max 2*lambda*sum_i(gamma) - lambda_ gamma^T K_eta gamma
    # under the constraints : y_i*gamma_i*lambda_*n >= -1
    # Warning : the cp solver is used for minimization. Be careful with the signs
    # and we suppose that R^{*}(lambda x) = lambda * R^{*}(x)
    # Inputs : - Kernel : array
    #          - Y : list
    #          _ lambda_ : float
    n = len(Y)
    Kernel = matrix(Kernel)
    def F(gamma = None, z = None):
        if gamma is None: return 0, matrix(0., (n,1))
        # val = - ( 2*lambda_*sum(gamma) - lambda_*gamma.T*Kernel*gamma )
        # Df = - ( 2*lambda_*matrix(1., (1,n)) - (2*Kernel*gamma).T )
        val1, Df1, H1 = conjugateRisk(gamma, Y, lambda_, risk)
        val = val1 + lambda_*gamma.T*Kernel*gamma
        Df = Df1 + lambda_*(2*Kernel*gamma).T 
        if z is None: return val, Df
        H =  z[0]*( lambda_*H1 + lambda_*2*Kernel )
        return val, Df, H

    G, h = domainConjugateRisk(Y, lambda_, risk)
    sol = solvers.cp(F, G=G, h=h)
    if optimal_value == 0:
        return sol['x']
    if optimal_value == 1:
        return -sol['primal objective'], sol['x']
Esempio n. 3
0
def logistic_regression(c):
    # X is an n*dim matrix, whose rows are sample points
    X, s, y = load_data()
    n, dim = X.shape
    y = y.reshape(n, 1)
    s = s.reshape(n, 1) - np.mean(s)*np.ones((n, 1))
    G_1 = matrix((1/n)*s.T.dot(X))
    G_2 = matrix((-1/n)*s.T.dot(X))
    G = matrix(np.concatenate((G_1, G_2), axis=0))
    c = matrix(c, (2, 1))

    def log_likelihood(theta=None, z=None):
        if theta is None: return 0, matrix(1.0, (dim, 1))
        theta = theta / 100
        p = sigmoid(X.dot(theta))
        ones = np.ones((n, 1))
        f = matrix(-np.sum(y*np.log(p)+(ones-y)*np.log(ones-p), axis=0))

        # using numpy broadcasting, each row of X is multiplied by (y-p),
        # the sum is taken over the rows to produce the transpose of the gradient
        Df = matrix(-np.sum(X*(y-p), axis=0), (1, dim))

        if z is None: return f, Df
        hessian = X.T.dot((ones-p)*X)
        H = z[0]*hessian
        return f, Df, H
    return solvers.cp(log_likelihood, G=G, h=c)['x']
Esempio n. 4
0
def solver(graph=None, update=False, full=False, data=None, SO=False):
    """Find the UE link flow
    
    Parameters
    ----------
    graph: graph object
    update: if update==True: update link flows and link,path delays in graph
    full: if full=True, also return x (link flows per OD pair)
    data: (Aeq, beq, ffdelays, parameters, type) from get_data(graph)
    """
    if data is None: data = get_data(graph)
    Aeq, beq, ffdelays, pm, type = data
    n = len(ffdelays)
    p = Aeq.size[1]/n
    A, b = spmatrix(-1.0, range(p*n), range(p*n)), matrix(0.0, (p*n,1))
    if type == 'Polynomial':
        if not SO: pm = pm * spdiag([1.0/(j+2) for j in range(pm.size[1])])
        def F(x=None, z=None): return objective_poly(x, z, matrix([[ffdelays], [pm]]), p)
    if type == 'Hyperbolic':
        if SO:
            def F(x=None, z=None): return objective_hyper_SO(x, z, matrix([[ffdelays-div(pm[:,0],pm[:,1])], [pm]]), p)
        else:
            def F(x=None, z=None): return objective_hyper(x, z, matrix([[ffdelays-div(pm[:,0],pm[:,1])], [pm]]), p)
    dims = {'l': p*n, 'q': [], 's': []}
    x = solvers.cp(F, G=A, h=b, A=Aeq, b=beq, kktsolver=get_kktsolver(A, dims, Aeq, F))['x']
    linkflows = matrix(0.0, (n,1))
    for k in range(p): linkflows += x[k*n:(k+1)*n]
    
    if update:
        logging.debug('Update link flows, delays in Graph.'); graph.update_linkflows_linkdelays(linkflows)
        logging.debug('Update path delays in Graph.'); graph.update_pathdelays()
    
    if full: return linkflows, x    
    return linkflows
Esempio n. 5
0
def solver(graph=None, update=False, full=False, data=None, SO=False):
    """Find the UE link flow
    
    Parameters
    ----------
    graph: graph object
    update: if update==True: update link flows and link,path delays in graph
    full: if full=True, also return x (link flows per OD pair)
    data: (Aeq, beq, ffdelays, parameters, type) from get_data(graph)
    """
    if data is None: data = get_data(graph)
    Aeq, beq, ffdelays, pm, type = data
    n = len(ffdelays)
    p = Aeq.size[1]/n
    A, b = spmatrix(-1.0, range(p*n), range(p*n)), matrix(0.0, (p*n,1))
    if type == 'Polynomial':
        if not SO: pm = pm * spdiag([1.0/(j+2) for j in range(pm.size[1])])
        def F(x=None, z=None): return objective_poly(x, z, matrix([[ffdelays], [pm]]), p)
    if type == 'Hyperbolic':
        if SO:
            def F(x=None, z=None): return objective_hyper_SO(x, z, matrix([[ffdelays-div(pm[:,0],pm[:,1])], [pm]]), p)
        else:
            def F(x=None, z=None): return objective_hyper(x, z, matrix([[ffdelays-div(pm[:,0],pm[:,1])], [pm]]), p)
    dims = {'l': p*n, 'q': [], 's': []}
    x = solvers.cp(F, G=A, h=b, A=Aeq, b=beq, kktsolver=get_kktsolver(A, dims, Aeq, F))['x']
    linkflows = matrix(0.0, (n,1))
    for k in range(p): linkflows += x[k*n:(k+1)*n]
    
    if update:
        logging.info('Update link flows, delays in Graph.'); graph.update_linkflows_linkdelays(linkflows)
        logging.info('Update path delays in Graph.'); graph.update_pathdelays()
    
    if full: return linkflows, x    
    return linkflows
Esempio n. 6
0
    def checkEntropy(self, delta):
        const = 0
        for index in range(len(self.cost)):
            const += self.linkCost[index] * self.original[index]
        inequalityConstraints = matrix(-1., (2 + self.k, self.k))
        inequalitySolutions = matrix(0., (2 + self.k, 1))
        for i, mat in enumerate(self.validTours):
            c = 0
            for index in range(len(self.cost)):
                c += mat[index] * self.linkCost[index]
            inequalityConstraints[0, i] = -c
            inequalityConstraints[1, i] = c
        inequalitySolutions[0, 0] = delta - const
        inequalitySolutions[1, 0] = delta + const

        equalityConstraints = matrix(1., (1, self.k))
        equalitySolutions = matrix(1., (1, 1))
        #uses a nonlinear convex problem solver. see cvxopt for documentation
        sol = solvers.cp(self.F,
                         G=inequalityConstraints,
                         h=inequalitySolutions,
                         A=equalityConstraints,
                         b=equalitySolutions)
        if (recordParam):
            self.record(sol['x'], 'Diffusion', '', sol['x'].size[0],
                        sol['x'].size[1])
        total = 0.
        for i in range(self.k):
            total += sol['x'][i] * inequalityConstraints[1, i]
        #max iterations of 200 does not always find optimal solution.
        #Unsure if I should increase the cap or just use unoptimal solution.
        print(sol['x'])
        print(self.linkCost)
        return sol['x']
Esempio n. 7
0
def solver(graph, update=False, data=None, SO=False, random=False):
    """Solve for the UE equilibrium using link-path formulation
    
    Parameters
    ----------
    graph: graph object
    update: if True, update link and path flows in graph
    data: (P,U,r) 
            P: link-path incidence matrix
            U,r: simplex constraints 
    SO: if True compute SO
    random: if True, initialize with a random feasible point
    """
    type = graph.links.values()[0].delayfunc.type
    if data is None:
        P = linkpath_incidence(graph)
        U, r = path_to_OD_simplex(graph)
    else:
        P, U, r = data
    m = graph.numpaths
    A, b = spmatrix(-1.0, range(m), range(m)), matrix(0.0, (m, 1))
    ffdelays = graph.get_ffdelays()
    if type == 'Polynomial':
        coefs = graph.get_coefs()
        if not SO:
            coefs = coefs * spdiag(
                [1.0 / (j + 2) for j in range(coefs.size[1])])
        parameters = matrix([[ffdelays], [coefs]])
        G = ue.objective_poly
    if type == 'Hyperbolic':
        ks = graph.get_ks()
        parameters = matrix([[ffdelays - div(ks[:, 0], ks[:, 1])], [ks]])
        G = ue.objective_hyper

    def F(x=None, z=None):
        if x is None: return 0, solver_init(U, r, random)
        if z is None:
            f, Df = G(P * x, z, parameters, 1)
            return f, Df * P
        f, Df, H = G(P * x, z, parameters, 1)
        return f, Df * P, P.T * H * P

    failed = True
    while failed:
        x = solvers.cp(F, G=A, h=b, A=U, b=r)['x']
        l = ue.solver(graph, SO=SO)
        error = np.linalg.norm(P * x - l, 1) / np.linalg.norm(l, 1)
        if error > TOL:
            print 'error={} > {}, re-compute path_flow'.format(error, TOL)
        else:
            failed = False
    if update:
        logging.info('Update link flows, delays in Graph.')
        graph.update_linkflows_linkdelays(P * x)
        logging.info('Update path delays in Graph.')
        graph.update_pathdelays()
        logging.info('Update path flows in Graph object.')
        graph.update_pathflows(x)
    # assert if x is a valid UE/SO
    return x, l
Esempio n. 8
0
    def solve_it(self, _eps=1.e-10):
        self.var_eps = _eps
        self.q_xy = self.tidy_up_distrib(self.orig_marg_xy, self.var_eps)
        self.q_xz = self.tidy_up_distrib(self.orig_marg_xz, self.var_eps)

        if self.verbose_output:
            print("q_xy=", self.q_xy)
            print("q_xz=", self.q_xz)

        self.create_equations()
        self.create_ieqs()
        self.make_initial_solution()
        if self.verbose_output: print(self.p_0)

        self.solver_ret = solvers.cp(self.callback,
                                     G=self.G,
                                     h=self.h,
                                     A=self.A,
                                     b=self.b)
        print("Solver terminated with status ", self.solver_ret['status'])

        self.p_final = dict()
        for xyz, i in self.var_idx.items():
            self.p_final[xyz] = self.solver_ret['x'][i]
        return self.p_final
def MI_diagonal(cov_matrix, multiKN_par):
    dim = cov_matrix.shape[0]
    min_eigval = np.maximum(np.real(np.amin(np.linalg.eig(cov_matrix)[0]))/dim,1e-20)
    multiKN = 1/(multiKN_par - 1)
    def opt_function(x=None, z=None):
        if x is None: 
            return 0, matrix(np.reshape(min_eigval*np.ones(dim),(dim,1)))
        if np.amin(x) <= 0: 
            return None
        u = np.array(x).reshape(dim)
        aux_matrix = np.linalg.inv(multiKN_par*cov_matrix-np.diag(u))
        if z is None:
            return objective(u), matrix(grad_objective(u, aux_matrix))
        else:
            return objective(u), matrix(grad_objective(u, aux_matrix)), z*matrix(hess_objective(u, aux_matrix))   
    def objective(u):           
        return -np.log(np.linalg.det(multiKN_par*cov_matrix - np.diag(u))) - multiKN*np.sum(np.log(u)) 
    def grad_objective(u, aux_matrix):
        return np.reshape( np.diag(aux_matrix) - (1/u)*multiKN, (1,dim) )
    def hess_objective(u, aux_matrix):          
        return np.square(aux_matrix) + np.diag(np.square(1/u))*multiKN     
    
    dims={'l': 0, 'q': [], 's': [dim]}
    G = matrix(np.vstack([flatten_Ei(l,dim) for l in np.arange(dim)]).tolist())
    h = matrix(list(multiKN_par*cov_matrix.flatten()))       
    solved = False
    while not solved:
        try:
            result = solvers.cp(F=opt_function, G=G, h=h,dims = dims)
            solved = True
        except ValueError:
            print('Relaxing feasible set')
            solvers.options['feastol'] *= 10
    return np.diag((np.array(result['x'])[:,0]))
Esempio n. 10
0
def optimOde(objOde,ode,theta):
    p = len(theta)
    ff0 = numpy.zeros(ode._numState*p*p)
    s0 = numpy.zeros(p*ode._numState)
    def F(x=None, z=None):
        if x is None: return 0, matrix(theta)
        if min(x) <= 0.0: return None
        objJac,output = objOde.jac(theta=x,full_output=True)
        f = matrix((output['resid']**2).sum())
        Df = matrix(numpy.reshape(objJac.transpose().dot(-2*output['resid']),(1,p)))
        if z is None: return f, Df
        ode.setParameters(theta)
        ffParam = numpy.append(numpy.append(objSIR._x0,s0),ff0)
        solutionHessian,outputHessian = scipy.integrate.odeint(ode.odeAndForwardforward,
                                                               ffParam,t,
                                                               full_output=True)

        H = numpy.zeros((2,2))
        for i in range(0,len(output['resid'])):
            H += -2*output['resid'][i] * numpy.reshape(solutionHessian[i,-4:],(2,2))

        H += 2*objJac.transpose().dot(objJac)
        print H
        print scipy.linalg.cholesky(H)
        print numpy.linalg.matrix_rank(H)
        H = matrix(z[0] * H)
        return f, Df, H
    return solvers.cp(F)
Esempio n. 11
0
def x_solver(ffdelays,
             coefs,
             Aeq,
             beq,
             w_obs=0.0,
             obs=None,
             l_obs=None,
             w_gap=1.0):
    """
    optimization w.r.t. x_block:
    min F(x)'x + r(x) s.t. x in K
    
    Parameters
    ----------
    ffdelays: matrix of freeflow delays from graph.get_ffdelays()
    coefs: matrix of coefficients from graph.get_coefs()
    Aeq, beq: equality constraints of the ue program
    w_obs: weight on the observation residual
    obs: indices of the observed links
    l_obs: observations
    w_gap: weight on the gap function
    """
    n = len(ffdelays)
    p = Aeq.size[1] / n
    A, b = spmatrix(-1.0, range(p * n), range(p * n)), matrix(0.0, (p * n, 1))

    def F(x=None, z=None):
        return ue.objective_poly(x, z, matrix([[ffdelays], [coefs]]), p, w_obs,
                                 obs, l_obs, w_gap)

    x = solvers.cp(F, G=A, h=b, A=Aeq, b=beq)['x']
    linkflows = matrix(0.0, (n, 1))
    for k in range(p):
        linkflows += x[k * n:(k + 1) * n]
    return linkflows
Esempio n. 12
0
	def train(self, X, Y, normfac):

		solvers.options['show_progress'] = False
		
		# Reduce maxiters and tolerance to reasonable levels
		solvers.options['maxiters'] = 200
		solvers.options['abstol'] = 1e-2
		solvers.options['feastol'] = 1e-2

		row, col = X.shape
		
		P = matrix(0.0, (row,row))
		
		# Calculating the Kernel Matrix
		for i in range(row):
			for j in range(row):
				P[i,j] = Y[i] * self.kernel(X[i],X[j]) * Y[j]           # It's a PSD matrix, so its okay !

		# A point in the solution space for objective
		x_0 = matrix(0.5, (row, 1))
		
		normarr = matrix(normfac, (1,row))
		
		def F(x = None, z = None):
			
			if x is None:
			    return (0, x_0)                                         # Alpha's start from 0.5, first value is zero as there are zero non-linear objectives
			
			term = matrix(sqrt(x.T * P * x))
			
			f = matrix(term - normfac * sum(x))                         # return the objective function
			
			# first derivative
			Df = (x.T * P)/term - normarr 						        # since for each alpha, normfac will be subtracted, norm arr is an array
			
			if z is None:
			    return f, Df
			
			term2 = matrix((P*x) * (P*x).T)
			H = z[0] * (P/term - term2/pow(term,3))                     # Second derivative of the objective function, is a symmetric matrix, so no need for spDiag ?
			
			return f, Df, H

		# for linear inequalities
		G = matrix(0.0, (row*2, row))									# there are two linear constaints for Alpha
		h = matrix(0.0, (row*2, 1))
		for i in range(row):
			G[i,i] = -1.0												# -Alpha <= 0
			G[row+i, i] = 1.0                                           #  Alpha <= 1
			h[row+i] = 1.0
				

		# solve and return w
		sol = solvers.cp(F, G, h)
		alpha = sol['x']
		
		for i in range(row):
			self.support.append([alpha[i] * Y[i], X[i]])
def feasible_pathflows(graph, l_obs, obs=None, update=False,
                       with_cell_paths=False, with_ODs=False, x_true=None, wp_trajs=None):
    """Attempts to find feasible pathflows given partial of full linkflows
    
    Parameters:
    ----------
    graph: Graph object
    l_obs: observations of link flows
    obs: indices of the observed links
    update: if True, update path flows in graph
    with_cell_paths: if True, include cell paths as constraints
    with_ODs: if True, include ODs in the constraints if no with_cell_paths or in the objective if with_cell_paths
    """
    assert with_cell_paths or with_ODs # we must have some measurements!
    n = graph.numpaths
    # route to links flow constraints
    A, b = linkpath_incidence(graph), l_obs
    if obs: A = A[obs,:] # trim matrix if we have partial observations
    Aineq, bineq = spmatrix(-1.0, range(n), range(n)), matrix(0.0, (n,1)) # positive constraints
    if not with_cell_paths: # if just with ODs flow measurements:
        Aeq, beq = path_to_OD_simplex(graph) # route to OD flow constraints
    else: # if we have cellpath flow measurements:
        assert wp_trajs is not None
        Aeq, beq = WP.simplex(graph, wp_trajs) # route to cellpath flow constraints
        if with_ODs: # if we have ODs + cellpaths measurements
          T, d = path_to_OD_simplex(graph) # route to OD flow constraints included in objective
          A, b = matrix([A, T]), matrix([b, d]) # add the constraints to the objective
        
    if x_true is not None:
        err1 =  np.linalg.norm(A * x_true - b, 1) / np.linalg.norm(b, 1)
        err2 = np.linalg.norm(Aeq * x_true - beq) / np.linalg.norm(beq, 1)
        assert err1 < TOL, 'Ax!=b'
        assert err2 < TOL, 'Aeq x!=beq'
    # construct objective for cvxopt.solvers.qp
    Q, c = A.trans()*A, -A.trans()*b
    #x = solvers.qp(Q + REG_EPS*spmatrix(1.0, range(n), range(n)), c, Aineq, bineq, Aeq, beq)['x']
    # try with cvxopt.solvers.cp
    def qp_objective(x=None, z=None):
      if x is None: return 0, matrix(1.0, (n, 1))
      f = 0.5 * x.trans()*Q*x + c.trans() * x
      Df = (Q*x + c).trans()
      if z is None: return f, Df
      return f, Df, z[0]*Q
  
    dims = {'l': n, 'q': [], 's': []}
    x = solvers.cp(qp_objective, G=Aineq, h=bineq, A=Aeq, b=beq, 
        kktsolver=get_kktsolver(Aineq, dims, Aeq, qp_objective))['x']
    
    if update:
        logging.info('Update link flows, delays in Graph.'); graph.update_linkflows_linkdelays(P*x)
        logging.info('Update path delays in Graph.'); graph.update_pathdelays()
        logging.info('Update path flows in Graph object.'); graph.update_pathflows(x)
    #import ipdb
    #rank = 5
    #if with_ODs == False:
    #    ipdb.set_trace()
    return x, rn.rank(matrix([A, Aeq])), n
    
Esempio n. 14
0
def solve_num(A, c, alpha, rho, niter=100, debug=False):
    """
    This function solves the NUM problem.

        max sum(U(x))
           Ax <= c

    The srikant's utility function:
        U(x) = rho * x^(1-alpha) / (1 - alpha)
    """
    B = matrix(A, tc='d')
    c = matrix(c, tc='d')

    alpha = np.array(alpha)
    rho = np.array(rho)

    m, n = B.size

    assert n == len(alpha)
    assert n == len(rho)
    assert m == len(c)

    alphas = 1 - alpha

    def f(x):
        y = np.array(x.T).flatten()
        return sum(
            np.where(alphas == 0, rho * np.log(y),
                     rho * np.power(y, alphas) / (alphas + 1e-9)))

    def fprime(x):
        y = np.array(x.T).flatten()
        return rho * np.power(y, -alpha)

    def fpprime(x, z):
        y = np.array(x.T).flatten()
        return z[0] * rho * -alpha * y**(-alpha - 1)

    def F(x=None, z=None):
        if x is None:
            return 0, matrix(1.0, (n, 1))
        if min(x) <= 0.0:
            return None
        fx = matrix(-f(x), (1, 1))
        fpx = matrix(-fprime(x), (1, n))
        if z is None:
            return fx, fpx
        fppx = spdiag(matrix(-fpprime(x, z), (n, 1)))
        return fx, fpx, fppx

    ret = solvers.cp(F,
                     G=B,
                     h=c,
                     maxiters=niter,
                     options={'show_progress': debug})
    x, u = ret['x'], ret['zl']
    return np.array(x).flatten(), np.array(u).flatten()
Esempio n. 15
0
def lovasz(G, h, e):
    '''
    You will have to fix two points with partition 0 and 1 respectively.
    '''
    m, n = A.size

    def F(wi):
        z = int(n / 2)
        s1 = wi[:z].sum()
        s2 = wi[z:].sum()
        return s1 * (z - s1) + s2 * (z - s2)

    def FL(x=None, z=None):
        if x is None: return 0, 0.5 * matrix(np.ones(n), (n, 1))
        if min(x) < 0.0: return None
        if max(x) > 1.0: return None
        x = np.array(x).flatten()
        order = np.argsort(x)
        sort = np.insert(np.insert(np.sort(x), 0, 0), n + 1, 1)
        weight = sort[1:] - sort[:-1]
        fwi = np.zeros(n + 1)
        wi = np.zeros(n)
        Df = np.zeros(n)
        f = 0
        for idx, arg in enumerate(order):
            fwi[idx] = F(wi)
            f += fwi[idx] * weight[idx]
            wi[arg] = 1
        fwi[n] = F(wi)
        f += fwi[n] * weight[n]
        clean = np.sort(x)
        for idx, ele in enumerate(x):
            left = np.searchsorted(clean, x[idx])
            right = np.searchsorted(clean, x[idx], side='right')
            base = 0
            grad = 0
            if left >= 1:
                leftgrad = fwi[left] - fwi[left - 1]
                grad += leftgrad
                base += 1
            if right <= n:
                rightgrad = fwi[right] - fwi[right - 1]
                grad += rightgrad
                base += 1
            Df[idx] = grad / base
        f += e * np.random.normal()
        Df += e * np.random.normal(n)
        Df = matrix(Df, (1, n))
        if z is None: return f, Df
        H = matrix(np.eye(n).astype(np.float))
        return f, Df, H

    sol = solvers.cp(FL, G=G, h=h)
    print(sol['x'])
    return sol
Esempio n. 16
0
def acent(A, b):
    m, n = A.size
    def F(x=None, z=None):
        if x is None: return 0, matrix(1.0, (n,1))
        if min(x) <= 0.0: return None
        f = -sum(log(x))
        Df = -(x**-1).T
        if z is None: return f, Df
        H = spdiag(z[0] * x**-2)
        return f, Df, H
    return solvers.cp(F, G=A, h=b)['x']
def cvx_min_energy(n, A, S0):

    # CVXOPT format:
    #
    # f[0](x) = energy(x) (to be minized)
    # f[1](x) = first constraint <= 0
    #         = CONST. - sum(p in A) sum(q in S) | < p , q > |
    #
    # Df[0] = gradient of energy wrt x
    # Df[1] = gradient of constraint wrt x
    #
    # H = z[0] * Hessian of energy wrt x
    #     (gradient of constraint is zero)

    def F(x=None, z=None):

        # x is the new set to be generated
        # x  =  min Energy
        #       st. sum(p in A) sum(q in S) | < p , q > |  <=  CONST.
        # S  =  normalize(x)

        if x is None:
            return 1, matrix(
                S0)  # cvxopt specification - 1 constraint, x0 = previous set

        _A = deserialize(A)
        _x = deserialize(
            array(x)[:, 0])  # deserialize(array(x)[:,0])  # to numpy matrix
        C = len(_A) * len(
            _A)  # some constant - there will be a normalization in the end

        f0 = total_energy(_x)
        prod = np.dot(_A, _x.T)
        prod_sign = np.sign(prod)
        f1 = np.abs(prod).sum().sum() - C
        if f1 > 0: return None  # cvxopt specification

        f = np.array([f0, f1]).T

        Df0 = total_gradient(_x)
        Df1 = prod_sign.dot(_A)
        Df = matrix(np.array([np.hstack(Df0), np.hstack(Df1)]))

        if z is None: return f, Df
        H = matrix(z[0] * serial_adjusted_hessian(S, 10.))

        return f, Df, H

    S = solvers.cp(F)
    S = deserialize(S)
    S = S / la.norm(S)

    return S
Esempio n. 18
0
def F(x=None, z=None):
    if x is None:
        return 1, matrix((2.,1.),(2,1))
    if x[1] == 0: return None
    f = matrix([[1/x[0]+ x[1])
    Df = matrix([x[2], 0.0, 0.0, -x[2], x[0], -x[1]], (2,3))
    if z is None:
        return f, Df
    H = matrix(0.0, (3,3))
    return f, Df, H
sol = solvers.cp(F, G, h)
print sol['x']
Esempio n. 19
0
 def optFunc(self, endhost_weight):  # {{{
     self.endhost_weight = endhost_weight
     for l in range(self.L):
         for i in range(self.I):
             for j in range(self.J):
                 self.GH1[l][i * self.J + j] = self.H[i][l][j]
     GH = matrix(self.GH1)
     DI = -numpy.identity(self.IJ)
     G = matrix(numpy.vstack((GH, DI)))
     h = matrix(numpy.vstack((self.capa, self.rhs2st)))
     sol = solvers.cp(self.F, G, h)
     return sol['x'], sol['z']  # }}}
Esempio n. 20
0
 def optFunc(self,endhost_weight): # {{{
     self.endhost_weight = endhost_weight
     for l in range(self.L):
         for i in range(self.I):
             for j in range(self.J):
                 self.GH1[l][i * self.J + j] = self.H[i][l][j]
     GH = matrix(self.GH1)
     DI = -numpy.identity(self.IJ)
     G = matrix(numpy.vstack((GH, DI)))
     h = matrix(numpy.vstack((self.capa, self.rhs2st)))
     sol = solvers.cp(self.F, G, h)
     return sol['x'],sol['z'] # }}}
Esempio n. 21
0
File: grad.py Progetto: tritm/ChuTo
 def optFunc(self,endhost_weight): 
     self.endhost_weight = endhost_weight
     for l in range(self.L):
         for i in range(self.I):
             for j in range(self.J):
                 self.GH1[l][i * self.J + j] = self.H[i][l][j]
     GH = matrix(self.GH1)
     DI = -numpy.identity(self.IJ)
     G = matrix(numpy.vstack((GH, DI)))
     h = matrix(numpy.vstack((self.capa, self.rhs2st)))
     sol = solvers.cp(self.F, G, h)
     # sol['zl']: congestion price for linear constraints [0:3]:constraints for l1,l2,l3; [4:8]: constraints for z >= 0
     return sol['x'], sol['zl'] 
Esempio n. 22
0
def acent(A, b, niter):
    m, n = A.size
    def F(x=None, z=None):
        if x is None: return 0, matrix(1.0, (n,1))
        if min(x) <= 0.0: return None
        print(x)
        f = -sum(log(x))
        Df = -(x**-1).T
        if z is None: return f, Df
        print("H.size=", (z[0] * x**-2).size)
        H = spdiag(z[0] * x**-2)
        return f, Df, H
    return solvers.cp(F, G=A, h=b, maxiters=niter, show_progress=False)['x']
Esempio n. 23
0
File: grad.py Progetto: tritm/ChuTo
 def optFunc(self, endhost_weight):
     self.endhost_weight = endhost_weight
     for l in range(self.L):
         for i in range(self.I):
             for j in range(self.J):
                 self.GH1[l][i * self.J + j] = self.H[i][l][j]
     GH = matrix(self.GH1)
     DI = -numpy.identity(self.IJ)
     G = matrix(numpy.vstack((GH, DI)))
     h = matrix(numpy.vstack((self.capa, self.rhs2st)))
     sol = solvers.cp(self.F, G, h)
     # sol['zl']: congestion price for linear constraints [0:3]:constraints for l1,l2,l3; [4:8]: constraints for z >= 0
     return sol['x'], sol['zl']
Esempio n. 24
0
    def setUp(self):
        """
        Use cvxopt to get ground truth values
        """

        from cvxopt import lapack, solvers, matrix, spdiag, log, div, normal, setseed
        from cvxopt.modeling import variable, op, max, sum
        solvers.options['show_progress'] = 0

        setseed()
        m, n = 100, 30
        A = normal(m, n)
        b = normal(m, 1)
        b /= (1.1 * max(abs(b)))
        self.m, self.n, self.A, self.b = m, n, A, b

        # l1 approximation
        # minimize || A*x + b ||_1
        x = variable(n)
        op(sum(abs(A * x + b))).solve()
        self.x1 = x.value

        # l2 approximation
        # minimize || A*x + b ||_2
        bprime = -matrix(b)
        Aprime = matrix(A)
        lapack.gels(Aprime, bprime)
        self.x2 = bprime[:n]

        # Deadzone approximation
        # minimize sum(max(abs(A*x+b)-0.5, 0.0))
        x = variable(n)
        dzop = op(sum(max(abs(A * x + b) - 0.5, 0.0)))
        dzop.solve()
        self.obj_dz = sum(
            np.max([np.abs(A * x.value + b) - 0.5,
                    np.zeros((m, 1))], axis=0))

        # Log barrier
        # minimize -sum (log ( 1.0 - (A*x+b)**2))
        def F(x=None, z=None):
            if x is None: return 0, matrix(0.0, (n, 1))
            y = A * x + b
            if max(abs(y)) >= 1.0: return None
            f = -sum(log(1.0 - y**2))
            gradf = 2.0 * A.T * div(y, 1 - y**2)
            if z is None: return f, gradf.T
            H = A.T * spdiag(2.0 * z[0] * div(1.0 + y**2, (1.0 - y**2)**2)) * A
            return f, gradf.T, H

        self.cxlb = solvers.cp(F)['x']
def solver(graph, update=False, data=None, SO=False, random=False):
    """Solve for the UE equilibrium using link-path formulation
    
    Parameters
    ----------
    graph: graph object
    update: if True, update link and path flows in graph
    data: (P,U,r) 
            P: link-path incidence matrix
            U,r: simplex constraints 
    SO: if True compute SO
    random: if True, initialize with a random feasible point
    """
    type = graph.links.values()[0].delayfunc.type
    if data is None:
        P = linkpath_incidence(graph)
        U,r = path_to_OD_simplex(graph)
    else: P,U,r = data
    m = graph.numpaths
    A, b = spmatrix(-1.0, range(m), range(m)), matrix(0.0, (m,1))
    ffdelays = graph.get_ffdelays()
    if type == 'Polynomial':
        coefs = graph.get_coefs()
        if not SO: coefs = coefs * spdiag([1.0/(j+2) for j in range(coefs.size[1])])
        parameters = matrix([[ffdelays], [coefs]])
        G = ue.objective_poly
    if type == 'Hyperbolic':
        ks = graph.get_ks()
        parameters = matrix([[ffdelays-div(ks[:,0],ks[:,1])], [ks]])
        G = ue.objective_hyper
    def F(x=None, z=None):
        if x is None: return 0, solver_init(U,r,random)
        if z is None:
            f, Df = G(P*x, z, parameters, 1)
            return f, Df*P
        f, Df, H = G(P*x, z, parameters, 1)
        return f, Df*P, P.T*H*P    
    failed = True
    while failed:
        x = solvers.cp(F, G=A, h=b, A=U, b=r)['x']
        l = ue.solver(graph, SO=SO)
        error = np.linalg.norm(P * x - l,1) / np.linalg.norm(l,1)
        if error > TOL:
            print 'error={} > {}, re-compute path_flow'.format(error, TOL)
        else: failed = False
    if update:
        logging.info('Update link flows, delays in Graph.'); graph.update_linkflows_linkdelays(P*x)
        logging.info('Update path delays in Graph.'); graph.update_pathdelays()
        logging.info('Update path flows in Graph object.'); graph.update_pathflows(x)
    # assert if x is a valid UE/SO
    return x, l
Esempio n. 26
0
    def robls(self, A, b, rho):
        m, n = A.size

        def F(x=None, z=None):
            if x is None: return 0, matrix(0.0, (n, 1))
            y = A * x - b
            w = sqrt(rho + y**2)
            f = sum(w)
            Df = div(y, w).T * A
            if z is None: return f, Df
            H = A.T * spdiag(z[0] * rho * (w**-3)) * A
            return f, Df, H

        return solvers.cp(F)['x']
def x_update(Y,D,G,h,Kinv,alpha,rho):
    '''
    This function performs the x-update step of the ADMM algorithm.
    See Boyd et al, 2010, page 55.
    '''    

    m,n=D.size
    ki=matrix(Kinv)
    
    def F(v=None,z=None):           
        if v is None: return 0,matrix(0.0,(m,1))
        
        u = -D.T*v #compute u
        
        #===define some auxilary variables===
        Y2=mul(ki,Y**2)
        z1=u-ki+rho*alpha    
        
        W=matrix(np.real( lambertw(mul((Y2/rho),exp(-z1/rho))) ))
        h_opt= W +(z1/rho)
        dh_to_du=(1/rho)*(div(1,1+W))
        z2=mul(Y2,exp(-h_opt))
        z3=z2+z1-rho*h_opt
        #===define some auxilary variables===
        
        #====compute f===
        f=sum( mul(u,h_opt)-mul(ki,h_opt)-z2-(rho/2)*(h_opt-alpha)**2 )
        #====compute f===
        
        #===compute Jacobian===     
        df_to_du=h_opt+mul(dh_to_du,z3)
        Df = -df_to_du.T*D.T
        if z is None: return f, Df                 
        #===compute Jacobian===    
        
        #===compute Hessian===
        d2h_to_du2=(1/rho**2)*div(W,(1+W)**3)
        d2f_to_du2=mul(d2h_to_du2,z3)+mul(dh_to_du,2-mul(z2+rho,dh_to_du))
        H=D*spdiag(mul(z[0],d2f_to_du2))*D.T
        #===compute Hessian===
        
        return f, Df, H 

    solvers.options['maxiters']=500;solvers.options['show_progress']=False
    sol=solvers.cp(F=F,G=G,h=h)
    v=sol['x'];#dual solution
    x=compute_primal(Y,v,D,matrix(Kinv),alpha,rho)#primal solution
    
    return x
Esempio n. 28
0
def acent(A, b):
    m, n = A.size

    def F(x=None, z=None):
        if x is None: return 0, matrix(np.random.rand(n, 1))

        f = -1 * (sum((x**2))) + 1
        #print f,"DD",x,z
        Df = 2 * (x).T

        if z is None: return f, Df
        H = spdiag(z[0] * x**-2)
        return f, Df, H

    return solvers.cp(F, A=A, b=b)['x']
    def setUp(self):
        """
        Use cvxopt to get ground truth values
        """

        from cvxopt import lapack,solvers,matrix,spdiag,log,div,normal,setseed
        from cvxopt.modeling import variable,op,max,sum
        solvers.options['show_progress'] = 0

        setseed()
        m,n = 100,30
        A = normal(m,n)
        b = normal(m,1)
        b /= (1.1*max(abs(b)))
        self.m,self.n,self.A,self.b = m,n,A,b

        # l1 approximation
        # minimize || A*x + b ||_1
        x = variable(n)
        op(sum(abs(A*x+b))).solve()
        self.x1 = x.value

        # l2 approximation
        # minimize || A*x + b ||_2
        bprime = -matrix(b)
        Aprime = matrix(A)
        lapack.gels(Aprime,bprime)
        self.x2 = bprime[:n]

        # Deadzone approximation
        # minimize sum(max(abs(A*x+b)-0.5, 0.0))
        x = variable(n)
        dzop = op(sum(max(abs(A*x+b)-0.5, 0.0)))
        dzop.solve()
        self.obj_dz = sum(np.max([np.abs(A*x.value+b)-0.5,np.zeros((m,1))],axis=0))

        # Log barrier
        # minimize -sum (log ( 1.0 - (A*x+b)**2))
        def F(x=None, z=None):
            if x is None: return 0, matrix(0.0,(n,1))
            y = A*x+b
            if max(abs(y)) >= 1.0: return None
            f = -sum(log(1.0 - y**2))
            gradf = 2.0 * A.T * div(y, 1-y**2)
            if z is None: return f, gradf.T
            H = A.T * spdiag(2.0*z[0]*div(1.0+y**2,(1.0-y**2)**2))*A
            return f,gradf.T,H
        self.cxlb = solvers.cp(F)['x']
Esempio n. 30
0
def robls(A, b, rho): 

    # Minimize  sum_k sqrt(rho + (A*x-b)_k^2).

    m, n = A.size
    def F(x=None, z=None):
        if x is None: return 0, matrix(0.0, (n,1))
        y = A*x-b
        w = sqrt(rho + y**2)
        f = sum(w)
        Df = div(y, w).T * A 
        if z is None: return f, Df 
        H = A.T * spdiag(z[0]*rho*(w**-3)) * A
        return f, Df, H

    return solvers.cp(F)['x']
Esempio n. 31
0
def acent(A, b, e):
    m, n = A.size

    def F(x=None, z=None):
        if x is None: return 0, x0
        if min(x) <= 0.0: return None
        f = -sum(log(x)) + e * np.random.normal()

        Df = -(x**-1).T + e * matrix(np.random.normal(n), (1, n))
        if z is None: return f, Df
        H = matrix(np.eye(n).astype(np.float))
        return f, Df, H

    sol = solvers.cp(F, A=A, b=b)
    print(sol['x'])
    return sol
Esempio n. 32
0
def _uceq_objective_fn(A, b):
    m, n = A.size

    def F(x=None, z=None):
        if x is None:
            return 0, matrix(0.5, (n, 1))
        if min(x) <= 0.0:
            return None
        f = sum(x)
        Df = -(x**-1).T
        if z is None:
            return f, Df
        H = spdiag(z[0] * x**-2)
        return f, Df, H

    return solvers.cp(F, A=A, b=b)['x']
Esempio n. 33
0
def odeObj(obj, theta, boxBounds):
    numParam = obj._numParam
    G = matrix(numpy.append(numpy.eye(numParam), -numpy.eye(numParam), axis=0))
    h = matrix(numpy.append(boxBounds[:, 1], -boxBounds[:, 0], axis=0))
    dims = {'l': G.size[0], 'q': [], 's':  []}
    def F(x=None, z=None):
        if x is None: return 0, matrix(theta)
        if min(x) <= 0.0: return None
        H, o = obj.jtj(numpy.array(x).ravel(),full_output=True)
        r = o['resid']
        f = matrix((r ** 2).sum())
        Df = matrix(o['grad']).T
        if z is None: return f, Df
        H = z[0] * matrix(H)
        return f, Df, H
    return solvers.cp(F, G, h)
Esempio n. 34
0
def MLE(obs_queue, L_max, p, max_iter=50, show_progress=True):
    """Given p, estimate P(Q=l) by maximizing the likelihood function
    Args:
        obs_queue: list[np.array], observed partial queues
        L_max: int, maximum queue length
        p: float, the (fixed) value of the penetration rate
        max_iter: int, max number of EM iterations
        show_progress: bool, show details of the MLE
    Returns:
        list(sol['x']): list[float], estimated pi
    """
    solvers.options['show_progress'] = show_progress
    solvers.options['maxiters'] = max_iter

    def F(x=None, z=None):
        x0 = matrix(0.0, (L_max + 1, 1))
        x0[:] = 1.0 / (L_max + 1)
        if x is None: return 0, x0
        # obj, gradient, Hessian
        f, Df, H = 0, matrix(0.0,
                             (1, L_max + 1)), matrix(0.0,
                                                     (L_max + 1, L_max + 1))
        for q in obs_queue:
            n_i = sum(q)
            den = sum(
                [x[l] * (1 - p)**(l - n_i) for l in range(len(q), L_max + 1)])
            # Check domain (different from feasible region)
            if den * p**n_i <= 0: return None
            f -= log(den * p**n_i)
            for j in range(len(q), L_max + 1):
                Df[j] -= (1 - p)**(j - n_i) / den
                for k in range(len(q), L_max + 1):
                    H[j, k] += (1 - p)**(j + k - 2 * n_i) / den**2
        if z is None: return f, Df
        return f, Df, H

    A, b = matrix(1.0, (1, L_max + 1)), matrix(1.0, (1, 1))
    G, h = matrix(0.0, (2 * (L_max + 1), L_max + 1)), matrix(
        0.0, (2 * (L_max + 1), 1))
    G[:L_max + 1, :] = spdiag(matrix(1.0, (1, L_max + 1)))
    G[L_max + 1:, :] = spdiag(matrix(-1.0, (1, L_max + 1)))
    h[:L_max + 1] = 1
    sol = solvers.cp(F, A=A, b=b, G=G, h=h)
    for i in range(len(sol['x']) - 1):
        sol['x'][i] = max(0, sol['x'][i])  # to avoid small negative numbers
    return list(sol['x'])
def exercice_particulier1():
    """
    .. exref::
        :title: solver.cp de cvxopt
        :tag: Computer Science

        On résoud le problème suivant avec `cvxopt <http://cvxopt.org/userguide/index.html>`_ :

        .. math::

            \\left\\{ \\begin{array}{l} \\min_{x,y} \\left \\{ x^2 + y^2 - xy + y \\right \\}
            \\\\ sous \\; contrainte \\; x + 2y = 1 \\end{array}\\right.

        Qui s'implémente à l'aide de la fonction suivante :

        ::

            def f_df_H(x=None,z=None) :
                if x is None :
                    # cas 1
                    x0 = matrix ( [[ random.random(), random.random() ]])
                    return 0,x0
                f = x[0]**2 + x[1]**2 - x[0]*x[1] + x[1]
                d = matrix ( [ x[0]*2 - x[1], x[1]*2 - x[0] + 1 ] ).T
                h = matrix ( [ [ 2.0, -1.0], [-1.0, 2.0] ])
                if z is None:
                    # cas 2
                    return  f, d
                else :
                    # cas 3
                    return f, d, h

            solvers.options['show_progress'] = False
            A = matrix([ [ 1.0, 2.0 ] ]).trans()
            b = matrix ( [[ 1.0] ] )
            sol = solvers.cp ( f_df_H, A = A, b = b)

    """
    from cvxopt import solvers, matrix
    t = solvers.options.get('show_progress', True)
    solvers.options['show_progress'] = False
    A = matrix([[1.0, 2.0]]).trans()
    b = matrix([[1.0]])
    sol = solvers.cp(f_df_H, A=A, b=b)
    solvers.options['show_progress'] = t
    return sol
Esempio n. 36
0
def exercice_particulier1():
    """
    .. exref::
        :title: solver.cp de cvxopt
        :tag: Computer Science

        On résoud le problème suivant avec `cvxopt <http://cvxopt.org/userguide/index.html>`_ :

        .. math::

            \\left\\{ \\begin{array}{l} \\min_{x,y} \\left \\{ x^2 + y^2 - xy + y \\right \\}
            \\\\ sous \\; contrainte \\; x + 2y = 1 \\end{array}\\right.

        Qui s'implémente à l'aide de la fonction suivante :

        ::

            def f_df_H(x=None,z=None) :
                if x is None :
                    # cas 1
                    x0 = matrix ( [[ random.random(), random.random() ]])
                    return 0,x0
                f = x[0]**2 + x[1]**2 - x[0]*x[1] + x[1]
                d = matrix ( [ x[0]*2 - x[1], x[1]*2 - x[0] + 1 ] ).T
                h = matrix ( [ [ 2.0, -1.0], [-1.0, 2.0] ])
                if z is None:
                    # cas 2
                    return  f, d
                else :
                    # cas 3
                    return f, d, h

            solvers.options['show_progress'] = False
            A = matrix([ [ 1.0, 2.0 ] ]).trans()
            b = matrix ( [[ 1.0] ] )
            sol = solvers.cp ( f_df_H, A = A, b = b)

    """
    from cvxopt import solvers, matrix
    t = solvers.options.get('show_progress', True)
    solvers.options['show_progress'] = False
    A = matrix([[1.0, 2.0]]).trans()
    b = matrix([[1.0]])
    sol = solvers.cp(f_df_H, A=A, b=b)
    solvers.options['show_progress'] = t
    return sol
Esempio n. 37
0
def acent(A, b):
    m, n = A.size

    def F(x=None, z=None):
        if x is None:
            return 0, matrix(np.random.rand(n, 1))

        f = -1 * (sum((x ** 2))) + 1
        # print f,"DD",x,z
        Df = 2 * (x).T

        if z is None:
            return f, Df
        H = spdiag(z[0] * x ** -2)
        return f, Df, H

    return solvers.cp(F, A=A, b=b)["x"]
def minll(G, h, p):
    m, v_in = G.size

    def F(x=None, z=None):
        if x is None:
            return 0, matrix(1.0, (v, 1))
        if min(x) <= 0.0:
            return None
        f = -sum(mul(p, log(x)))
        Df = mul(p.T, -(x**-1).T)
        if z is None:
            return f, Df
        # Fix the Hessian
        H = spdiag(z[0] * mul(p, x**-2))
        return f, Df, H

    return solvers.cp(F, G=G, h=h)
Esempio n. 39
0
def Logistic(absDataOrigin,absLabels, lamda,alpha=1):
    # This function uses both absolute label data and comparison data to train the logistic regression model.
    # Equation: min_{beta, const} sum(logisticLoss(absData))+lamda*norm(beta,1)

    # Parameter:
    # ------------
    # absDataOrigin : N by d numpy matrix where N the number of absolute label data and d is the dimension of data
    # abslabels : (N,) numpy array, +1 means positive label and -1 represents negative labels
    # lamda : weight on L1 penalty. Large lamda would have more zeros in beta.

    # Return:
    # ------------
    # beta : the logistic regression model parameter
    # const : the logistic regression global constant.

    absN,d = np.shape(absDataOrigin)
    absData = np.concatenate((np.array(absDataOrigin),np.ones([absN,1])),axis = 1)
#   A : y_i * x_i since y_i is a scalar
    A = np.multiply(absLabels.T, absData.T).T #absData must be in N, d matrix, and absLabels must be in (N,1) or (N,) matrix
    A = matrix(A)
    def F(x=None, z=None):
        # beta without constant x[:d], constant x[d], t = x[d+1:]
        if x is None: return 2 * d, matrix(0.0,(2*d+1,1)) # m = 2 *d is the number of constraints
        e = A*x[:d+1] # 0 - d contains the constant
        w = exp(e)
        f = matrix(0.0,(2*d+1,1))
        f[0] = alpha*(-sum(e) + sum(log(1+w))) + lamda * sum(x[d+1::])# from d+1 withou the constant
        f[1:d+1] = x[:d] - x[d+1:] # beta - t < 0
        f[d+1:] = -x[:d] - x[d+1:] # -beta - t <0
        Df = matrix(0.0,(2*d+1,2*d+1))
        # Df[0,:d+1] = (matrix(A.T * (div(w,1+w)-1.0))).T
        Df[0, :d + 1] = alpha*(matrix(A.T * (div(w, 1 + w) - 1.0))).T
        Df[0,d+1:] = lamda
        Df[1:d+1,0:d] = spdiag(matrix(1.0,(d,1)))
        Df[d+1:, 0:d] = spdiag(matrix(-1.0,(d,1)))
        Df[1:d+1,d+1:] = spdiag(matrix(-1.0,(d,1)))
        Df[d+1:,d+1:] = spdiag(matrix(-1.0,(d,1)))
        if z is None: return f ,Df
        H = matrix(0.0,(2*d+1,2*d+1))
        H[0:d+1,0:d+1] =  alpha*(A.T *spdiag(div(w, (1 + w) ** 2)) * A)
        return f, Df, z[0]*H
    solvers.options['show_progress'] = False
    sol = solvers.cp(F)
    beta, const = sol['x'][0:d], sol['x'][d]
    return beta, const
Esempio n. 40
0
    def batch_train(self, X, Y):
		'''
		Given unlabeled training examples (one per row) in matrix X and their
		associated (-1, +1) labels (one per row) in vector Y, returns a weight
		vector w that determines a separating hyperplane, if one exists, using
		a q-norm support vector machine with standard linear kernel.
		'''
		m = len(Y)
	    
	    # First find a feasible solution and create the objective function
		lp = soft_SVM(self.d, self.C)
		lp.batch_train(X, Y)
		s = 1.0 - dot(Y * X, lp.w)
		s[s < 0.0] = 0.0
		x_0 = hstack((lp.w, s))
		F = make_soft_q_svm_primal_objective(self.d, m, self.q, self.C, x_0)
	    
	    # Set up the appropriate matrices and call CVXOPT's convex programming
		G_top = -hstack((Y * X, identity(m)))
		G_bottom = -hstack((zeros((m, self.d)), identity(m)))
		G_fix1 = hstack((identity(self.d), zeros((self.d, m))))
		G_fix2 = -hstack((identity(self.d), zeros((self.d, m))))
		G = matrix(vstack((G_top, G_bottom, G_fix1, G_fix2)))
		h = matrix(hstack((-ones(m), zeros(m), 1e3 * ones(self.d), 1e3 * ones(self.d) )))
	    
	    # Change solver options
		solvers.options['maxiters'] = 100
		solvers.options['abstol'] = 1e-3
		solvers.options['reltol'] = 1e-2
	    
		result = solvers.cp(F, G, h)
	    
	    # Reset solver options to defaults
		solvers.options['maxiters'] = 2000
		solvers.options['abstol'] = 1e-7
		solvers.options['reltol'] = 1e-6
	    
		z = result['x']
		self.w = array(z[:self.d]).reshape((self.d,))
	    
		def classify(self, x):
			return sign(dot(self.w, x))
	    
		def margin(self, x):
			return dot(self.w, x)
    def compute_demand(self, p):
        """Computes demand x given price p and utility function U
        such that x is solution of min p'x-U(x) s.t. x >=0 """
        
        G, h = spdiag([-1.0]*self.n), matrix(0.0, (self.n, 1))
        
        if self.type == 'quad':
            Q, r = self.data
            return solvers.qp(-Q, p-r, G, h)['x']

        if self.type == 'sqrt':
            def F(x=None, z=None):
                if x is None: return 0, matrix(1.0, (self.n, 1))
                u, Du, H = self.utility(x)
                f, Df  = p.T*x - u, p.T - Du
                if z is None: return f, Df
                return f, Df, -z[0]*H
            return solvers.cp(F, G, h)['x']
Esempio n. 42
0
def acent(A, b):
     
    # Returns the solution of
    #
    #     minimize    -sum log(x)
    #     subject to  A*x = b
    
    m, n = A.size
    def F(x=None, z=None):
        if x is None:  return 0, matrix(1.0, (n,1))
        if min(x) <= 0.0:  return None
        f = -sum(log(x))
        Df = -(x**-1).T 
        if z is None: return matrix(f), Df
        H = spdiag(z[0] * x**-2)
        return f, Df, H
 
    return solvers.cp(F, A=A, b=b)['x']
Esempio n. 43
0
def testcp(opts):
     G = matrix([ 
               [0., -1.,  0.,  0., -21., -11.,   0., -11.,  10.,   8.,   0.,   8., 5.],
               [0.,  0., -1.,  0.,   0.,  10.,  16.,  10., -10., -10.,  16., -10., 3.],
               [0.,  0.,  0., -1.,  -5.,   2., -17.,   2.,  -6.,   8., -17.,  -7., 6.]
               ])  
     h = matrix(
          [1.0, 0.0, 0.0, 0.0, 20., 10., 40., 10., 80., 10., 40., 10., 15.])  
     dims = {'l': 0, 'q': [4], 's':  [3]}  
     if opts:
          solvers.options.update(opts)
     sol = solvers.cp(F, G, h, dims)  
     #sol = localcvx.cp(F, G, h, dims)  
     if sol['status'] == 'optimal':
          print("\nx = \n") 
          print helpers.strSpe(sol['x'], "%.17f")
          print helpers.strSpe(sol['znl'], "%.17f")
          print "\n *** running GO test ***"
          helpers.run_go_test("../testcp", {'x': sol['x']})
Esempio n. 44
0
        def __min_convex_cp(Q, q, lower, upper, d):
            i_cov = matrix(Q)
            b = matrix(q)

            def cOptFx(x=None, z=None):
                if x is None: return 0, matrix(0.0, (d, 1))
                f = (0.5 * (x.T * i_cov * x) - b.T * x)
                Df = (i_cov * x - b)
                if z is None: return f, Df.T
                H = z[0] * i_cov
                return f, Df.T, H

            ll_lower = matrix(lower, (d, 1))
            ll_upper = matrix(upper, (d, 1))
            I = matrix(0.0, (d, d))
            I[::d + 1] = 1
            G = matrix([I, -I])
            h = matrix([ll_upper, -ll_lower])
            return solvers.cp(cOptFx, G=G, h=h)
Esempio n. 45
0
def projector(y):
    # Projector on the closed convex : x in R^n, x >= 0, sum(x) = 1
    # Defined as an optimization problem
    # input : - y vector (array)
    # output : - projected vector as a CVX matrix
    solvers.options['maxiters'] = 200
    n = y.shape[0]
    y = matrix(y)
    def F(x = None, z = None):
        if x is None: return 0, matrix(1/float(n), (n,1))
        val = ((x-y).T*(x-y))[0]
        Df = 2*(x-y).T
        if z is None: return val, Df
        H = 2*z[0]*matrix(np.eye(n))
        return val, Df, H
    G = matrix(np.concatenate((np.array([[1 for i in range(n)],[-1 for i in range(n)]]), -np.eye(n)) , axis = 0))
    h = matrix(sum([[1., -1.],[0. for i in range(n)]], []))
    sol = solvers.cp(F,G,h)
    return sol['x']
Esempio n. 46
0
    def solve_it(self, _eps=1.e-10):
        self.var_eps = _eps
        self.q_xy = self.tidy_up_distrib(self.orig_marg_xy, self.var_eps)
        self.q_xz = self.tidy_up_distrib(self.orig_marg_xz, self.var_eps)

        if self.verbose_output:
            print("q_xy=",self.q_xy)
            print("q_xz=",self.q_xz)

        self.create_equations()
        self.create_ieqs()
        self.make_initial_solution()
        if self.verbose_output: print(self.p_0)

        self.solver_ret   = solvers.cp(self.callback, G=self.G, h=self.h, A=self.A, b=self.b)
        print("Solver terminated with status ",self.solver_ret['status'])

        self.p_final = dict()
        for xyz,i in self.var_idx.items():
            self.p_final[xyz] = self.solver_ret['x'][i]
        return self.p_final
Esempio n. 47
0
def x_solver(ffdelays, coefs, Aeq, beq, soft, obs, l_obs, lower):
    """
    optimization w.r.t. x_block
    
    Parameters
    ---------
    ffdelays: matrix of freeflow delays from graph.get_ffdelays()
    coefs: matrix of coefficients from graph.get_coefs()
    Aeq, beq: equality constraints of the ue program
    soft: parameter
    obs: indices of the observed links
    l_obs: observations
    lower: lower bound on linkflows, l[i]>=lower[i]
    """
    n = len(ffdelays)
    p = Aeq.size[1]/n    
    A1, A2 = spmatrix(-1.0, range(p*n), range(p*n)), matrix([[spmatrix(-1.0, range(n), range(n))]]*p)
    A, b = matrix([A1, A2]), matrix([matrix(0.0, (p*n,1)), -lower])
    def F(x=None, z=None): return ue.objective(x, z, matrix([[ffdelays], [coefs]]), p, 1000.0, obs, l_obs)
    x = solvers.cp(F, G=A, h=b, A=Aeq, b=beq)['x']
    linkflows = matrix(0.0, (n,1))
    for k in range(p): linkflows += x[k*n:(k+1)*n]
    return linkflows
Esempio n. 48
0
    def optimize(self, **kwargs):
        """
        Options:

            show_progress=False,
            maxiters=100,
            abstol=1e-7,
            reltol=1e-6,
            feastol=1e-7,
            refinement=0 if m=0 else 1

        """
        from cvxopt.solvers import cp, options

        old_options = options.copy()
        out = None

        try:
            options.clear()
            options.update(kwargs)
            with np.errstate(divide='ignore', invalid='ignore'):
                result = cp(F=self.F,
                            G=self.G,
                            h=self.h,
                            dims={'l':self.G.size[0], 'q':[], 's':[]},
                            A=self.A,
                            b=self.b)
        except:
            raise
        else:
            self.result = result
            out = np.asarray(result['x'])
        finally:
            options.clear()
            options.update(old_options)

        return out
Esempio n. 49
0
    H1 = matrix(0.0, (5,5))
    H1[0,0] = 2.0 * c[0]**2 * B[0,0] 
    H1[1,0] = 2.0 * ( c[0] * c[1] * B[0,0] + c[0]**2 * B[1,0] )
    H1[2,0] = 2.0 * c[0] * c[1] * B[1,0] 
    H1[3:,0] = -2.0 * c[0] * B[:,0] 
    H1[1,1] = 2.0 * c[0]**2 * B[1,1] + 4.0 * c[0]*c[1]*B[1,0]  + \
              2.0 * c[1]**2 + B[0,0]
    H1[2,1] = 2.0 * (c[1]**2 * B[1,0] + c[0]*c[1]*B[1,1])
    H1[3:,1] = -2.0 * B * c[[1,0]]
    H1[2,2] = 2.0 * c[1]**2 * B[1,1]
    H1[3:,2] = -2.0 * c[1] * B[:,1] 
    H1[3:,3:] = 2*B

    return f, Df, z[0]*H0 + sum(z[1:])*H1
    
sol = solvers.cp(F)
A = matrix( sol['x'][[0, 1, 1, 2]], (2,2)) 
b = sol['x'][3:]

if pylab_installed:
    pylab.figure(1, facecolor='w')
    pylab.plot(X[:,0], X[:,1], 'ko', X[:,0], X[:,1], '-k')
    
    # Ellipsoid in the form { x | || L' * (x-c) ||_2 <= 1 }
    L = +A
    lapack.potrf(L)
    c = +b
    lapack.potrs(L, c)    
    
    # 1000 points on the unit circle
    nopts = 1000
Esempio n. 50
0
def l2ac(A, b):
    """
    Solves

        minimize  (1/2) * ||A*x-b||_2^2 - sum log (1-xi^2)

    assuming A is m x n with m << n.
    """

    m, n = A.size
    def F(x = None, z = None):
        if x is None: 
            return 0, matrix(0.0, (n,1))
        if max(abs(x)) >= 1.0: 
            return None 
        r = - b
        blas.gemv(A, x, r, beta = -1.0)
        w = x**2
        f = 0.5 * blas.nrm2(r)**2  - sum(log(1-w))
        gradf = div(x, 1.0 - w)
        blas.gemv(A, r, gradf, trans = 'T', beta = 2.0)
        if z is None:
            return f, gradf.T
        else:
            def Hf(u, v, alpha = 1.0, beta = 0.0):
               """
                   v := alpha * (A'*A*u + 2*((1+w)./(1-w)).*u + beta *v
               """
               v *= beta
               v += 2.0 * alpha * mul(div(1.0+w, (1.0-w)**2), u)
               blas.gemv(A, u, r)
               blas.gemv(A, r, v, alpha = alpha, beta = 1.0, trans = 'T')
            return f, gradf.T, Hf


    # Custom solver for the Newton system
    #
    #     z[0]*(A'*A + D)*x = bx
    #
    # where D = 2 * (1+x.^2) ./ (1-x.^2).^2.  We apply the matrix inversion
    # lemma and solve this as
    #    
    #     (A * D^-1 *A' + I) * v = A * D^-1 * bx / z[0]
    #     D * x = bx / z[0] - A'*v.

    S = matrix(0.0, (m,m))
    v = matrix(0.0, (m,1))
    def Fkkt(x, z, W):
        ds = (2.0 * div(1 + x**2, (1 - x**2)**2))**-0.5
        Asc = A * spdiag(ds)
        blas.syrk(Asc, S)
        S[::m+1] += 1.0 
        lapack.potrf(S)
        a = z[0]
        def g(x, y, z):
            x[:] = mul(x, ds) / a
            blas.gemv(Asc, x, v)
            lapack.potrs(S, v)
            blas.gemv(Asc, v, x, alpha = -1.0, beta = 1.0, trans = 'T')
            x[:] = mul(x, ds)  
        return g

    return solvers.cp(F, kktsolver = Fkkt)['x']
Esempio n. 51
0
# The analytic centering with cone constraints example of section 9.1 
# (Problems with nonlinear objectives).

from cvxopt import matrix, log, div, spdiag 
from cvxopt import solvers  
 
def F(x = None, z = None):  
     if x is None:  return 0, matrix(0.0, (3,1))  
     if max(abs(x)) >= 1.0:  return None  
     u = 1 - x**2  
     val = -sum(log(u))  
     Df = div(2*x, u).T  
     if z is None:  return val, Df  
     H = spdiag(2 * z[0] * div(1 + u**2, u**2))  
     return val, Df, H  
 
G = matrix([ 
    [0., -1.,  0.,  0., -21., -11.,   0., -11.,  10.,   8.,   0.,   8., 5.],
    [0.,  0., -1.,  0.,   0.,  10.,  16.,  10., -10., -10.,  16., -10., 3.],
    [0.,  0.,  0., -1.,  -5.,   2., -17.,   2.,  -6.,   8., -17.,  -7., 6.]
    ])  
h = matrix(
    [1.0, 0.0, 0.0, 0.0, 20., 10., 40., 10., 80., 10., 40., 10., 15.])  
dims = {'l': 0, 'q': [4], 's':  [3]}  
sol = solvers.cp(F, G, h, dims)  
print("\nx = \n") 
print(sol['x'])
Esempio n. 52
0
def F(x=None, z=None):
    if x is None: return 0, matrix(1.0, (n,1))
    X = V * spdiag(x) * V.T
    L = +X
    try: lapack.potrf(L)
    except ArithmeticError: return None
    f = - 2.0 * (log(L[0,0])  + log(L[1,1]))
    W = +V
    blas.trsm(L, W)    
    gradf = matrix(-1.0, (1,2)) * W**2
    if z is None: return f, gradf
    H = matrix(0.0, (n,n))
    blas.syrk(W, H, trans='T')
    return f, gradf, z[0] * H**2
xd = solvers.cp(F, G, h, A = A, b = b)['x']

if pylab_installed:
    pylab.figure(1, facecolor='w', figsize=(6,6)) 
    pylab.plot(V[0,:], V[1,:],'ow', mec='k')
    pylab.plot([0], [0], 'k+')
    I = [ k for k in range(n) if xd[k] > 1e-5 ]
    pylab.plot(V[0,I], V[1,I],'or')

# Enclosing ellipse is  {x | x' * (V*diag(xe)*V')^-1 * x = sqrt(2)}
nopts = 1000
angles = matrix( [ a*2.0*pi/nopts for a in range(nopts) ], (1,nopts) )
circle = matrix(0.0, (2,nopts))
circle[0,:], circle[1,:] = cos(angles), sin(angles)

W = V * spdiag(xd) * V.T
Esempio n. 53
0
# minimize    x'*log x
# subject to  G*x <= h
#             A*x = b
#
# variable x (n).

def F(x=None, z=None):
   if x is None: return 0, matrix(1.0, (n,1))
   if min(x) <= 0: return None
   f = x.T*log(x)
   grad = 1.0 + log(x)
   if z is None: return f, grad.T
   H = spdiag(z[0] * x**-1)
   return f, grad.T, H 
sol = solvers.cp(F, G, h, A=A, b=b)
p = sol['x']


# Upper/lower bounds on cumulative distribution.
# 
# minimize/maximize  ck'*p = sum_{i<=alphak} pi 
# subject to         G*x <= h
#                    A*x = b
#                    x >= 0
#
# Solve via dual:
#
# maximize    -h'*z - b'*w 
# subject to   +/- c + G'*z + A'*w >= 0
#              z >= 0
Esempio n. 54
0
def analytic_center(G, h, progress=False):
    """
    Finds a point within constraints specified by Gx <= h that is furthest
    away from the constraint by maximizing prod(h - Gx).

    Parameters
    ----------
    G: pandas.DataFrame
        Array that specifies Gx <= h.
    h: pandas.Series
        The limits specifying Gx <= h.
    progress: bool
        True if detailed progress text from optimization should be shown.

    Returns
    -------
    x: pd.Series
        Centroid with index names equal to the column names of G.
    """

    # Aligning input
    h = h.ix[G.index]

    if h.isnull().values.any() or G.isnull().values.any():
        msg = 'Row indeces of G and h must match and contain no NaN entries.'
        omfa.logger.error(msg)
        raise ValueError(msg)

    if progress:
        solvers.options['show_progress'] = True
    else:
        solvers.options['show_progress'] = False

    # Initial point necessary for optimization
    if progress:
        omfa.logger.debug('Solving for feasible starting point')

    start = feasible_point(G, h, progress)

    # Setting up LP problem
    def F(x=None, z=None):
        if x is None:  
            return 0, matrix(start)
       
        y = h_opt - G_opt*x
        if min(y) <= 0.0: 
            return None

        f = -xsum(xlog(y))
        Df = (y**-1).T * G_opt

        if z is None: 
            return matrix(f), Df

        H = z[0] * G_opt.T * spdiag(y**-2) * G_opt
        return matrix(f), Df, H

    if progress:
        omfa.logger.debug('Calculating analytic center')

    sol = solvers.cp(F, G_opt, h_opt)

    x = pd.Series(sol['x'], index=G.columns)

    # Checking output
    if sol['status'] != 'optimal':
        if all(G.dot(x) <= h):
            msg = ('Centroid was not found, '
                   'but the last calculated point is feasible')
            omfa.logger.warn(msg)
        else:
            msg = 'Optimization failed on a non-feasible point'
            omfa.logger.error(msg)
            raise omfa.ModelError(msg)

    return(x)
Esempio n. 55
0
def fit_l1_cvxopt_cp(
        f, score, start_params, args, kwargs, disp=False, maxiter=100,
        callback=None, retall=False, full_output=False, hess=None):
    """
    Solve the l1 regularized problem using cvxopt.solvers.cp

    Specifically:  We convert the convex but non-smooth problem

    .. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|

    via the transformation to the smooth, convex, constrained problem in twice
    as many variables (adding the "added variables" :math:`u_k`)

    .. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,

    subject to

    .. math:: -u_k \\leq \\beta_k \\leq u_k.

    Parameters
    ----------
    All the usual parameters from LikelhoodModel.fit
    alpha : non-negative scalar or numpy array (same size as parameters)
        The weight multiplying the l1 penalty term
    trim_mode : 'auto, 'size', or 'off'
        If not 'off', trim (set to zero) parameters that would have been zero
            if the solver reached the theoretical minimum.
        If 'auto', trim params using the Theory above.
        If 'size', trim params if they have very small absolute value
    size_trim_tol : float or 'auto' (default = 'auto')
        For use when trim_mode === 'size'
    auto_trim_tol : float
        For sue when trim_mode == 'auto'.  Use
    qc_tol : float
        Print warning and don't allow auto trim when (ii) in "Theory" (above)
        is violated by this much.
    qc_verbose : Boolean
        If true, print out a full QC report upon failure
    abstol : float
        absolute accuracy (default: 1e-7).
    reltol : float
        relative accuracy (default: 1e-6).
    feastol : float
        tolerance for feasibility conditions (default: 1e-7).
    refinement : int
        number of iterative refinement steps when solving KKT equations
        (default: 1).
    """
    start_params = np.array(start_params).ravel('F')

    ## Extract arguments
    # k_params is total number of covariates, possibly including a leading constant.
    k_params = len(start_params)
    # The start point
    x0 = np.append(start_params, np.fabs(start_params))
    x0 = matrix(x0, (2 * k_params, 1))
    # The regularization parameter
    alpha = np.array(kwargs['alpha_rescaled']).ravel('F')
    # Make sure it's a vector
    alpha = alpha * np.ones(k_params)
    assert alpha.min() >= 0

    ## Wrap up functions for cvxopt
    f_0 = lambda x: _objective_func(f, x, k_params, alpha, *args)
    Df = lambda x: _fprime(score, x, k_params, alpha)
    G = _get_G(k_params)  # Inequality constraint matrix, Gx \leq h
    h = matrix(0.0, (2 * k_params, 1))  # RHS in inequality constraint
    H = lambda x, z: _hessian_wrapper(hess, x, z, k_params)

    ## Define the optimization function
    def F(x=None, z=None):
        if x is None:
            return 0, x0
        elif z is None:
            return f_0(x), Df(x)
        else:
            return f_0(x), Df(x), H(x, z)

    ## Convert optimization settings to cvxopt form
    solvers.options['show_progress'] = disp
    solvers.options['maxiters'] = maxiter
    if 'abstol' in kwargs:
        solvers.options['abstol'] = kwargs['abstol']
    if 'reltol' in kwargs:
        solvers.options['reltol'] = kwargs['reltol']
    if 'feastol' in kwargs:
        solvers.options['feastol'] = kwargs['feastol']
    if 'refinement' in kwargs:
        solvers.options['refinement'] = kwargs['refinement']

    ### Call the optimizer
    results = solvers.cp(F, G, h)
    x = np.asarray(results['x']).ravel()
    params = x[:k_params]

    ### Post-process
    # QC
    qc_tol = kwargs['qc_tol']
    qc_verbose = kwargs['qc_verbose']
    passed = l1_solvers_common.qc_results(
        params, alpha, score, qc_tol, qc_verbose)
    # Possibly trim
    trim_mode = kwargs['trim_mode']
    size_trim_tol = kwargs['size_trim_tol']
    auto_trim_tol = kwargs['auto_trim_tol']
    params, trimmed = l1_solvers_common.do_trim_params(
        params, k_params, alpha, score, passed, trim_mode, size_trim_tol,
        auto_trim_tol)

    ### Pack up return values for statsmodels
    # TODO These retvals are returned as mle_retvals...but the fit wasn't ML
    if full_output:
        fopt = f_0(x)
        gopt = float('nan')  # Objective is non-differentiable
        hopt = float('nan')
        iterations = float('nan')
        converged = 'True' if results['status'] == 'optimal'\
            else results['status']
        retvals = {
            'fopt': fopt, 'converged': converged, 'iterations': iterations,
            'gopt': gopt, 'hopt': hopt, 'trimmed': trimmed}
    else:
        x = np.array(results['x']).ravel()
        params = x[:k_params]

    ### Return results
    if full_output:
        return params, retvals
    else:
        return params
Esempio n. 56
0
def makesimulation():
    #MAKESIMULATION
    #public void mainnlp(int cont, int numvar, int numanz, int NbPipes,
    #			int[] bkc, double[] blc, double[] buc, int[] bkx, int[] ptrb,
    #			int[] ptre, double[] blx, double[] bux, double[] x, double[] y,
    #			double[] c, int[] sub, double[] val, double[] PipesConst,
    #			double[] TapsConst1, double[] TapsConst2, double[] oprfo,
    #			double[] oprgo, double[] oprho, int[] opro, int[] oprjo) {
    # case MSK_OPR_POW:
    #      if ( fxj )
    #        fxj[0] = f*pow(xj,g);
    #
    #      if ( grdfxj )
    #        grdfxj[0] = f*g*pow(xj,g-1.0);
    #
    #       if ( hesfxj )
    #        hesfxj[0] = f*g*(g-1)*pow(xj,g-2.0);
    #      break;
    #    default:
    cont=14
    numvar=21
    numanz=34
    NbPipes=13
    bkc=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] #modified later
    blc=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] #modified later
    buc=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] #modified later
    ptrb = [0, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 28, 29, 30, 31, 32, 33]
    ptre = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 28, 29, 30, 31, 32, 33, 34]
    blx = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    bux = [140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0, 140.0]
    x = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    y = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    c = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #modified later
    sub = [0, 0, 1, 1, 2, 2, 3, 2, 4, 3, 5, 4, 6, 3, 7, 5, 8, 5, 9, 5, 10, 6, 11, 6, 12, 4, 13, 7, 8, 9, 10, 11, 12, 13]
    val = [1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0]
    PipesConst = [0.1560557453662784, 1.2468083477323857, 4.452886956187092, 21.156501311195587, 0.7124619129899348, 2.7201215971537183, 3.6268287962049577, 0.30223573301707984, 0.30223573301707984, 0.30223573301707984, 0.30223573301707984, 0.30223573301707984, 3.324593063187878]
    TapsConst1 = [-12.0, -9.0, -9.0, -9.0, -18.0, -18.0, -50.0]
    TapsConst2 = [47.83271862139916, 16.666666666666664, 16.666666666666664, 16.666666666666664, 16.666666666666664, 16.666666666666664, 174.44480468749992]
    oprfo = [0.1560557453662784, 1.2468083477323857, 4.452886956187092, 21.156501311195587, 0.7124619129899348, 2.7201215971537183, 3.6268287962049577, 0.30223573301707984, 0.30223573301707984, 0.30223573301707984, 0.30223573301707984, 0.30223573301707984, 3.324593063187878, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    oprgo = [2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 2.7809999999999997, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    oprho = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #modified later
    opro  = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0] #modified later
    oprjo = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0, 0, 0, 0, 0, 0] #modified later

    #conversion
    numcon=cont
    numopro = numvar - 1
    for j in range(NbPipes,numvar-1):
        # f(x_j+h)^g
        opro[j]  = 3 # MSK_OPR_POW
        oprjo[j] = j+1 # variable index (col)
        oprfo[j] = TapsConst2[j - NbPipes] # coeff
        oprgo[j] = 3.0 # exponent
        oprho[j] = 0.0

    numoprc  = 0;
    for j in range(0,NbPipes + 1):
        c[j] = 0

    for j in range(NbPipes + 1, numvar):
        c[j] = TapsConst1[j - NbPipes - 1]

    for j in range(0,numcon):
        bkc[j] = 2 # MSK_BK_FX
        blc[j] = 0
        buc[j] = 0


    for j in range(0,numvar):
        bkx[j] = 4 #MSK_BK_RA

    b = [buc[i] for i in range(0,len(bkc)) if bkc[i]==2] # MSK_BK_FX
    b_i = [i for i in range(0,len(bkc)) if bkc[i]==2] # MSK_BK_FX
    A_val = []
    A_col = []
    A_row = []
    for col in range(0,numvar):
        for ptr in range(ptrb[col],ptre[col]):
            row = sub[ptr]
            nz  = val[ptr]
            if bkc[row] == 2: # MSK_BK_FX
                A_row.append(row)
                A_col.append(col)
                A_val.append(nz)
    cvx_A = spmatrix(A_val, A_row, A_col,(cont,numvar),tc='d')[b_i,:]
    cvx_b = matrix(b,tc='d')

    #Define a function for cvx_opt
    # Returns
    # F() : starting point
    # F(x) : value of F and dF
    # F(x,z) : value of F and dF and hessien

    def F(x=None, z=None):
        if x is None: return 0, matrix(1.,(numvar,1))
        #print(x.T)
        if min(x) < 0.0: return None # F domain
        f = sum([c[j]*x[j] for j in range(0,numvar)] + [oprfo[j]*x[j+1]**oprgo[j] for j in range(0,numvar-1)])
        Df1 = matrix(c).T
        Df2 = matrix([0] + [oprgo[j]*oprfo[j]*x[j+1]**(oprgo[j]-1) for j in range(0,numvar-1)]).T
        Df = Df1 + Df2
        if z is None: return f, Df
        #print([0] + [(oprgo[j]-1)*oprgo[j]*oprfo[j]*x[j+1]**(oprgo[j]-2) for j in range(0,numvar-1)])
        H = spdiag([0] + [(oprgo[j]-1)*oprgo[j]*oprfo[j]*x[j+1]**(oprgo[j]-2) for j in range(0,numvar-1)])
        #print(H)
        return f, Df, H

    sol = solvers.cp(F, A=cvx_A, b=cvx_b)

    flows = [0.9748586431385673, 0.9748586431385673, 0.9748586431385673, 0.5386406143048615, 0.43621802883370586, 0.37669788557581124, 0.1886468466046585, 0.16194272872905033, 0.12556596185860375, 0.12556596185860375, 0.12556596185860375, 0.09432342330232925, 0.09432342330232925, 0.24757118222904734, 0.16194272872905033, 0.12556596185860375, 0.12556596185860375, 0.12556596185860375, 0.09432342330232925, 0.09432342330232925, 0.24757118222904734, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    flows = matrix(flows)[range(0,numvar)]
    #diff = matrix(flows) - matrix(sol['x'])

    y = [0.0, 0.4147494536942568, 3.7283927966062573, 7.842601791791739, 17.154712030642827, 8.190783936278457, 17.542613256354226, 8.236696667202539, 8.21165947036742, 8.21165947036742, 8.21165947036742, 17.555154598066633, 17.555154598066633, 17.924053855319045]
    print(matrix(y) + sol['y'])

    #returns
    #X = sol['x']
    #Y = -sol['y']
    #print(X)

    #return solvers.cp(F, A=A, b=b)['x']

    #http://abel.ee.ucla.edu/cvxopt/userguide/coneprog.html?highlight=lp#cvxopt.solvers.lp
    #c = matrix([-4., -5.])
    #G = matrix([[2., 1., -1., 0.], [1., 2., 0., -1.]])
    #h = matrix([3., 3., 0., 0.])
    #sol = solvers.lp(c, G, h,solver='glpk')
    #print sol['x']