Esempio n. 1
0
	def get_2state_gaussian_seq(lens,dims=2,means1=[2,2,2,2],means2=[5,5,5,5],vars1=[1,1,1,1],vars2=[1,1,1,1],anom_prob=1.0):
		
		seqs = co.matrix(0.0, (dims, lens))
		lbls = co.matrix(0, (1,lens))
		marker = 0

		# generate first state sequence
		for d in range(dims):
			seqs[d,:] = co.normal(1,lens)*vars1[d] + means1[d]

		prob = np.random.uniform()
		if prob<anom_prob:		
			# add second state blocks
			while (True):
				max_block_len = 0.6*lens
				min_block_len = 0.1*lens
				block_len = np.int(max_block_len*np.single(co.uniform(1))+3)
				block_start = np.int(lens*np.single(co.uniform(1)))

				if (block_len - (block_start+block_len-lens)-3>min_block_len):
					break

			block_len = min(block_len,block_len - (block_start+block_len-lens)-3)
			lbls[block_start:block_start+block_len-1] = 1
			marker = 1
			for d in range(dims):
				#print block_len
				seqs[d,block_start:block_start+block_len-1] = co.normal(1,block_len-1)*vars2[d] + means2[d]

		return (seqs, lbls, marker)
Esempio n. 2
0
 def init0(self, dae):
     """Set bus Va and Vm initial values"""
     if not self.system.pflow.config.flatstart:
         dae.y[self.a] = self.angle + 1e-10 * uniform(self.n)
         dae.y[self.v] = self.voltage
     else:
         dae.y[self.a] = matrix(0.0,
                                (self.n, 1), 'd') + 1e-10 * uniform(self.n)
         dae.y[self.v] = matrix(1.0, (self.n, 1), 'd')
Esempio n. 3
0
 def get_hotstart_sol(self):
     sol = uniform(self.get_num_dims(), 1, a=-1, b=+1)
     sol[0:self.states * self.states] = self.hotstart_tradeoff
     print(
         'Hotstart position uniformly random with transition tradeoff {0}.'.
         format(self.hotstart_tradeoff))
     return sol
Esempio n. 4
0
def get_example_list(num, dims, signal, label, start, min_lens=600, max_lens=800):
	(foo, LEN) = label.shape
	min_genes = int(float(num)*0.15)

	X = []
	Y = []
	phi = []
	marker = []

	cnt_genes = 0
	cnt = 0
	while (cnt<num):
		lens = np.int(np.single(co.uniform(1, a=min_lens, b=max_lens)))
		if (start+lens)>LEN:
			print('Warning! End of genome. Could not add example.')
			break
		(exm, lbl, phi_i, isGene, end_pos) = get_example(signal, label, dims, start, start+lens)
		
		# accept example, if it has the correct length
		if (end_pos-start<=max_lens or (isGene==True and end_pos-start<800)):		
			X.append(exm)
			Y.append(lbl)
			phi.append(phi_i)
			if isGene:
				marker.append(0)
				cnt_genes += 1
				min_genes -= 1
			else:
				marker.append(1)
			cnt += 1
		start = end_pos

	print('Number of examples {0}. {1} of them are genic.'.format(len(Y), cnt_genes))
	return (X, Y, phi, marker, start) 
Esempio n. 5
0
	def get_hotstart_sol(self):
		sol = uniform(self.get_num_dims(), 1, a=-1,b=+1)
		sol[0:8] = self.hotstart_tradeoff
		sol[0:8] = 0.0
		print('Zero transition hotstart.')
		print('Hotstart position uniformly random with transition tradeoff {0}.'.format(self.hotstart_tradeoff))
		return sol
Esempio n. 6
0
 def _create_matrices(self, training_data, size, k=30):
     """
     Converts the training set into matrix form
     
     :param training_data: a dataset
     :type training_data: a tuple of lists
     :param size: dimensions of the matrix
     :type size: tuple
     :param k: the size of the common feature space for users and items
     :type k: int
     """
     print "Creating matrices.......\n"
     self._M = spmatrix(training_data[2], training_data[0], training_data[1], size)
     rated = [1]*len(training_data[2])
     self._R = spmatrix(rated, training_data[0], training_data[1], size)
     self._U = uniform(size[0], k, 0, 0.2)
     self._I = uniform(k, size[1], 0, 0.2)
Esempio n. 7
0
    def test_cvxopt_sparse(self):
        m = 100
        n = 20

        mu = cvxopt.exp(cvxopt.normal(m))
        F = cvxopt.normal(m, n)
        D = cvxopt.spdiag(cvxopt.uniform(m))
        x = Variable(m)
        exp = square(norm2(D * x))
Esempio n. 8
0
    def test_cvxopt_sparse(self):
        m = 100
        n = 20

        mu = cvxopt.exp(cvxopt.normal(m))
        F = cvxopt.normal(m, n)
        D = cvxopt.spdiag(cvxopt.uniform(m))
        x = Variable(m)
        exp = square(norm2(D * x))
Esempio n. 9
0
 def get_hotstart_sol(self):
     sol = uniform(self.get_num_dims(), 1, a=-1, b=+1)
     #sol[0:8] *= self.hotstart_tradeoff
     #sol[0:8] = 0.0
     #sol[1] = -100.0
     #sol[2:6] = -1.0*np.abs(sol[2:6])
     #sol[6] = -100.0
     print('Zero transition hotstart.')
     print(
         'Hotstart position uniformly random with transition tradeoff {0}.'.
         format(self.hotstart_tradeoff))
     return sol
Esempio n. 10
0
def build_fisher_kernel(data,
                        labels,
                        num_train,
                        ord=2,
                        param=2,
                        set_rand=False):
    # estimate the transition and emission matrix given the training
    # data only. Number of states is specifified in 'param'.
    N = len(data)
    (F, LEN) = data[0].size

    A = np.zeros((param, param))
    E = np.zeros((param, F))

    phi = co.matrix(0.0, (param * param + F * param, N))
    cnt = 0
    cnt_states = np.zeros(param)
    for n in xrange(num_train):
        lbl = np.array(labels[n])[0, :]
        exm = np.array(data[n])
        for i in range(param):
            for j in range(param):
                A[i, j] += np.where((lbl[:-1] == i) & (lbl[1:] == j))[0].size
        for i in range(param):
            for f in range(F):
                inds = np.where(lbl == i)[0]
                E[i, f] += np.sum(exm[f, inds])
                cnt_states[i] += inds.size
        cnt += LEN

    for i in range(param):
        E[i, :] /= cnt_states[i]
    sol = co.matrix(
        np.vstack(
            (A.reshape(param * param, 1) / float(cnt), E.reshape(param * F,
                                                                 1))))
    print sol

    if set_rand:
        print('Set random parameter vector for Fisher kernel.')
        # sol = co.uniform(param*param+param*F, a=-1.0, b=+1.0)
        sol = co.uniform(param * param + param * F)

    model = SOHMM(data, labels)
    for n in range(N):
        (val, latent, phi[:, n]) = model.argmax(sol, n)
        phi[:, n] /= np.linalg.norm(phi[:, n], ord=ord)

    kern = Kernel.get_kernel(phi, phi)
    return kern, phi
Esempio n. 11
0
def load_intergenics(num_iges,
                     signal,
                     label,
                     ige_intervals,
                     min_lens=600,
                     max_lens=800):
    # add intergenic examples
    marker = []
    trainX = []
    trainY = []
    phi1_list = []
    phi2_list = []
    phi3_list = []
    ige_cnt = 0
    IGE_EXMS = num_iges
    N = len(ige_intervals)
    for i in xrange(N):
        lens = ige_intervals[i][1] - ige_intervals[i][0]
        if lens > 10000:
            IGE_LEN = np.int(np.single(co.uniform(1, a=min_lens, b=max_lens)))
            num_ige_exms = np.int(np.floor(float(lens) / float(IGE_LEN)))
            if (num_ige_exms > IGE_EXMS - ige_cnt):
                num_ige_exms = IGE_EXMS - ige_cnt
            ige_cnt += num_ige_exms
            (X, Y, phis1, phis2,
             phis3) = add_intergenic(num_ige_exms, signal, label,
                                     ige_intervals[i][0], ige_intervals[i][1],
                                     IGE_LEN)
            trainX.extend(X)
            trainY.extend(Y)
            phi1_list.extend(phis1)
            phi2_list.extend(phis2)
            phi3_list.extend(phis3)
            for j in range(num_ige_exms):
                marker.append(1)
        if ige_cnt > IGE_EXMS:
            break
    print('IGE examples {0}'.format(ige_cnt))
    return (trainX, trainY, phi1_list, phi2_list, phi3_list, marker)
Esempio n. 12
0
    def test_cvxopt_sparse(self):
        m = 100
        n = 20

        mu = cvxopt.exp( cvxopt.normal(m) )
        F = cvxopt.normal(m, n)
        D = cvxopt.spdiag( cvxopt.uniform(m) )
        x = Variable(m)
        exp = square(norm2(D*x))

    # # TODO
    # # Test scipy sparse matrices.
    # def test_scipy_sparse(self):
    #     m = 100
    #     n = 20

    #     mu = cvxopt.exp( cvxopt.normal(m) )
    #     F = cvxopt.normal(m, n)
    #     x = Variable(m)

    #     D = scipy.sparse.spdiags(1.5, 0, m, m )
    #     exp = square(norm2(D*x))
Esempio n. 13
0
c2 = []  # link bandwidth
c3 = []  # execution time
c4 = []  # communication time
c5 = []  # resource capacity

m = j + k
n = j * k

TMU = np.zeros(shape=(m, n), dtype=int)  # a totally unimodular matrix of zeros

# an m x n matrix representing the constraints 5 (computational resource exceed) and constraints 6
# (each job assigned to at least 1 datacenter)
A = matrix(np.array(TMU), tc='i')

# b is our value that x can reach given constraints of A.
b = uniform(m, 1)

# Make x = 0 feasible for  barrier. A flow can be allocated 0 time in favor of antoher getting more time. b is always positive.
b /= (1.1 * max(abs(b)))

# %%
"""
Centering uses Newton's Centering Method. This part is from the example given by cvxopt library. 
Our barrier function is simply the update of the x value towards an optimal solution.

We are given mu by the tolerance above. If any centering reaches close to mu we stop since that is close to the 
edge of non-feasible solutions and exterior to any optimal solution.
"""


def barrier():
Esempio n. 14
0
# least squares solution:  minimize || A*x - b ||_2^2
xls = +b 
lapack.gels(+A, xls)
xls = xls[:n]

# Tikhonov solution:  minimize || A*x - b ||_2^2 + 0.1*||x||^2_2
xtik = A.T*b
S = A.T*A
S[::n+1] += 0.1
lapack.posv(S, xtik)

# Worst case solution
xwc = wcls(A, Ap, b)

notrials = 100000
r = sqrt(uniform(1,notrials))
theta = 2.0 * pi * uniform(1,notrials)
u = matrix(0.0, (2,notrials))
u[0,:] = mul(r, cos(theta))
u[1,:] = mul(r, sin(theta))

# LS solution 
q = A*xls - b
P = matrix(0.0, (m,2))
P[:,0], P[:,1] = Ap[0]*xls, Ap[1]*xls
r = P*u + q[:,notrials*[0]]
resls = sqrt( matrix(1.0, (1,m)) * mul(r,r) )

q = A*xtik - b
P[:,0], P[:,1] = Ap[0]*xtik, Ap[1]*xtik
r = P*u + q[:,notrials*[0]]
Esempio n. 15
0
	def train(self):
		self.rand = co.uniform(self.n_users, self.n_items)
		return self
Esempio n. 16
0
from cvxpy import Variable, Problem, Minimize, log
import cvxopt

cvxopt.solvers.options['show_progress'] = False

# create problem data
m, n = 5, 10
A = cvxopt.normal(m,n)
tmp = cvxopt.uniform(n,1)
b = A*tmp

x = Variable(n)

p = Problem(
    Minimize(-sum(log(x))),
    [A*x == b]
)
status = p.solve()
cvxpy_x = x.value

def acent(A, b):
    m, n = A.size
    def F(x=None, z=None):
        if x is None: return 0, cvxopt.matrix(1.0, (n,1))
        if min(x) <= 0.0: return None
        f = -sum(cvxopt.log(x))
        Df = -(x**-1).T
        if z is None: return f, Df
        H = cvxopt.spdiag(z[0] * x**-2)
        return f, Df, H
    sol = cvxopt.solvers.cp(F, A=A, b=b)
Esempio n. 17
0
import cvxopt
import numpy
from cvxpy import *
from multiprocessing import Pool
from pylab import figure, show
import math

num_assets = 100
num_factors = 20

mu = cvxopt.exp( cvxopt.normal(num_assets) )
F = cvxopt.normal(num_assets, num_factors)
D = cvxopt.spdiag( cvxopt.uniform(num_assets) )
x = Variable(num_assets)
gamma = Parameter(sign="positive")

expected_return = mu.T * x
variance = square(norm2(F.T*x)) + square(norm2(D*x))

# construct portfolio optimization problem *once*
p = Problem(
    Maximize(expected_return - gamma * variance),
    [sum(x) == 1, x >= 0]
)

# encapsulate the allocation function
def allocate(gamma_value):
    gamma.value = gamma_value
    p.solve()
    w = x.value
    expected_return, risk = mu.T*w, w.T*(F*F.T + D*D)*w
Esempio n. 18
0
File: svm.py Progetto: davidbat/ml
import cvxopt, svmcmpl
m = 2000
X = 2.0*cvxopt.uniform(m,2)-1.0
d = cvxopt.matrix([2*int(v>=0)-1 for v in cvxopt.mul(X[:,0],X[:,1])],(m,1))
gamma = 2.0; kernel = 'rbf'; sigma = 1.0; width = 20
sol1 = svmcmpl.softmargin(X, d, gamma, kernel, sigma)
sol2 = svmcmpl.softmargin_appr(X, d, gamma, width, kernel, sigma)
Esempio n. 19
0
g = matrix([1,0, 0,1], (n,n), 'd')
x = matrix(1., (n*(r+1),1))
z = matrix(0., (2*n*r,1))
W = {'d': matrix(1., (2*n*r,1))}
doit("simple", n, r, g, W, x, z)

# --- Random test (diagonal g)

setseed(0)
n = 5
r = 10

g = matrix(0., (n,n), 'd')
g[::n+1] = 1

W = {'d': uniform(2*n*r, 1) }

x = uniform(n*(r+1),1)
z = uniform(2*n*r,1)

doit("diagonal_g", n, r, g, W, x, z)

# --- Constant diagonal

setseed(-10)
n = 5
r = 10
g = uniform(n,n)
g = g * g.T

W = {'d': matrix(1., (2*n*r,1))}
Esempio n. 20
0
 def test_basic_no_gsl(self):
     import sys
     sys.modules['gsl'] = None
     import cvxopt
     cvxopt.normal(4, 8)
     cvxopt.uniform(4, 8)
Esempio n. 21
0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

from cvxpy import Variable, Problem, Minimize, log
import cvxopt

cvxopt.solvers.options['show_progress'] = False

# create problem data
m, n = 5, 10
A = cvxopt.normal(m, n)
tmp = cvxopt.uniform(n, 1)
b = A * tmp

x = Variable(n)

p = Problem(Minimize(-sum(log(x))), [A * x == b])
status = p.solve()
cvxpy_x = x.value


def acent(A, b):
    m, n = A.size

    def F(x=None, z=None):
        if x is None: return 0, cvxopt.matrix(1.0, (n, 1))
        if min(x) <= 0.0: return None
Esempio n. 22
0
import cvxopt
import numpy
from cvxpy import *
from multiprocessing import Pool
from pylab import figure, show
import math

num_assets = 100
num_factors = 20

mu = cvxopt.exp(cvxopt.normal(num_assets))
F = cvxopt.normal(num_assets, num_factors)
D = cvxopt.spdiag(cvxopt.uniform(num_assets))
x = Variable(num_assets)
gamma = Parameter(sign="positive")

expected_return = mu.T * x
variance = square(norm2(F.T * x)) + square(norm2(D * x))

# construct portfolio optimization problem *once*
p = Problem(Maximize(expected_return - gamma * variance), [sum(x) == 1, x >= 0])

# encapsulate the allocation function
def allocate(gamma_value):
    gamma.value = gamma_value
    p.solve()
    w = x.value
    expected_return, risk = mu.T * w, w.T * (F * F.T + D * D) * w
    return (expected_return[0], math.sqrt(risk[0]))

Esempio n. 23
0
# --- Simple test

n = 2
r = 2
g = matrix([1, 0, 0, 1], (n, n), 'd')
a = matrix([1, 0, 0, 0.4], (n * r, 1), 'd')
nu = matrix(1., (n, 1), 'd')
doit("simple", n, r, g, a, nu, 1.)

# --- Random test

setseed(1)
n = 8
r = 5
g = uniform(n, n)
g = g * g.T
a = uniform(n * r, 1)
nu = matrix(1., (n, 1), 'd')
doit("random", n, r, g, a, nu, 1.)

# --- Simple test

n = 2
r = 2
g = matrix([1, 0, 0, 1], (n, n), 'd')
a = matrix([1, 0, 0, 0.4], (n * r, 1), 'd')

nu = uniform(n, 1, 2, 10)
doit("simple_nu", n, r, g, a, nu, 1.)
Esempio n. 24
0
        step = 1.0
        while 1-step*max(y) < 0: step *= BETA 
        while True:
            if -sum(log(1-step*y)) < ALPHA*step*lam: break
            step *= BETA
        x += step*v


# Generate an analytic centering problem  
#
#    -b1 <=  Ar*x <= b2 
#
# with random mxn Ar and random b1, b2.

m, n  = 500, 500
Ar = normal(m,n);
A = matrix([Ar, -Ar])
b = uniform(2*m,1)

x, ntdecrs = acent(A, b)  
try: 
    import pylab
except ImportError: 
    pass
else:
    pylab.semilogy(range(len(ntdecrs)), ntdecrs, 'o', 
             range(len(ntdecrs)), ntdecrs, '-')
    pylab.xlabel('Iteration number')
    pylab.ylabel('Newton decrement')
    pylab.show()
Esempio n. 25
0
# The robust LP example of section 10.5 (Examples).

from cvxopt import normal, uniform  
from cvxopt.modeling import variable, dot, op, sum  
from cvxopt.blas import nrm2  
     
m, n = 500, 100  
A = normal(m,n)  
b = uniform(m)  
c = normal(n)  
     
x = variable(n)  
op(dot(c,x), A*x+sum(abs(x)) <= b).solve()  
     
x2 = variable(n)  
y = variable(n)  
op(dot(c,x2), [A*x2+sum(y) <= b, -y <= x2, x2 <= y]).solve()

print("\nDifference between two solutions %e" %nrm2(x.value - x2.value))
Esempio n. 26
0
# --- Simple test

n = 2
r = 2
g = matrix([1,0, 0,1], (n,n), 'd')
a = matrix([1, 0, 0, 0.4], (n*r,1), 'd')
nu = matrix(1.,(n,1),'d')
doit("simple", n, r, g, a, nu, 1.)


# --- Random test

setseed(1)
n = 8
r = 5
g = uniform(n,n)
g = g * g.T
a = uniform(n*r,1)
nu = matrix(1.,(n,1),'d')
doit("random", n, r, g, a, nu, 1.)

# --- Simple test

n = 2
r = 2
g = matrix([1,0, 0,1], (n,n), 'd')
a = matrix([1, 0, 0, 0.4], (n*r,1), 'd')

nu = uniform(n,1,2,10)
doit("simple_nu", n, r, g, a, nu, 1.)
Esempio n. 27
0
"""

import math

import cvxopt
import pylab

from cvxpy import Minimize, Problem, Variable, square

# create simple image
n = 32
img = cvxopt.matrix(0.0, (n, n))
img[1:2, 1:2] = 0.5

# add noise
img = img + 0.1 * cvxopt.uniform(n, n)

# show the image
plt = pylab.imshow(img)
plt.set_cmap('gray')
pylab.show()


# define the gradient functions
def grad(img, direction):
    m, n = img.size
    for i in range(m):
        for j in range(n):
            if direction == 'y' and j > 0 and j < m - 1:
                yield img[i, j + 1] - img[i, j - 1]
            elif direction == 'x' and i > 0 and i < n - 1:
Esempio n. 28
0
# least squares solution:  minimize || A*x - b ||_2^2
xls = +b
lapack.gels(+A, xls)
xls = xls[:n]

# Tikhonov solution:  minimize || A*x - b ||_2^2 + 0.1*||x||^2_2
xtik = A.T * b
S = A.T * A
S[::n + 1] += 0.1
lapack.posv(S, xtik)

# Worst case solution
xwc = wcls(A, Ap, b)

notrials = 100000
r = sqrt(uniform(1, notrials))
theta = 2.0 * pi * uniform(1, notrials)
u = matrix(0.0, (2, notrials))
u[0, :] = mul(r, cos(theta))
u[1, :] = mul(r, sin(theta))

# LS solution
q = A * xls - b
P = matrix(0.0, (m, 2))
P[:, 0], P[:, 1] = Ap[0] * xls, Ap[1] * xls
r = P * u + q[:, notrials * [0]]
resls = sqrt(matrix(1.0, (1, m)) * mul(r, r))

q = A * xtik - b
P[:, 0], P[:, 1] = Ap[0] * xtik, Ap[1] * xtik
r = P * u + q[:, notrials * [0]]
Esempio n. 29
0
File: test.py Progetto: bpiwowar/kqp
# [Note that n >= r]

DEBUG = 0
choice = "simple-1"

if choice == "random":
    # Bulds up a random example
    n = 50
    r = 25
    np = 30
    Lambda = 1000

    # Construct an n * n positive definite matrix by computing a lower
    # triangular matrix with positive diagonal entries
    
    A = uniform(n, n)
    for i in range(n):
        A[i,i] = abs(A[i,i])+0.2
        for j in range(i+1,n): A[i,j] = 0

    print A[::n+1].T

    g = A * A.T
    mA = +g

    # Construct the vectors
    a = uniform(n * r)
    
elif choice == "simple-1":
    n = 2
    r = 2
Esempio n. 30
0
from robsvm import robsvm
from cvxopt import matrix, normal, uniform

# parameters
m, n = 60, 30
gamma = 10.0

# generate random problem data
X = 2.0 * uniform(m, n) - 1.0
print(X)

d = matrix(1, (m, 1))

# generate noisy labels
w0 = matrix(range(n))
b0 = 0.4
# w0 = matrix([2.0,1.0])+normal(2,1); b0 = 0.4
print(w0)
z = 0.2 * normal(m, 1)

for i in range(m):
    if (X[i, :] * w0)[0] + b0 < z[i]: d[i] = -1

# generate uncertainty ellipsoids
k = 2
P = [0.1 * normal(4 * n, n) for i in range(k)]
P = [p.T * p for p in P]
e = matrix(0, (m, 1))
for i in range(m):
    if d[i] == -1: e[i] = 1
Esempio n. 31
0
from cvxpy import *
from itertools import izip, imap
import cvxopt
import pylab
import math

# create simple image
n = 32
img = cvxopt.matrix(0.0,(n,n))
img[1:2,1:2] = 0.5

# add noise
img = img + 0.1*cvxopt.uniform(n,n)

# show the image
plt = pylab.imshow(img)
plt.set_cmap('gray')
pylab.show()

# define the gradient functions
def grad(img, direction):
    m, n = img.size
    for i in range(m):
        for j in range(n):
            if direction == 'y' and j > 0 and j < m-1:
                yield img[i,j+1] - img[i,j-1]
            elif direction == 'x' and i > 0 and i < n-1:
                yield img[i+1,j] - img[i-1,j]
            else:
                yield 0.0
Esempio n. 32
0
        while 1 - step * max(y) < 0:
            step *= BETA
        while True:
            if -sum(log(1 - step * y)) < ALPHA * step * lam: break
            step *= BETA
        x += step * v


# Generate an analytic centering problem
#
#    -b1 <=  Ar*x <= b2
#
# with random mxn Ar and random b1, b2.

m, n = 500, 500
Ar = normal(m, n)
A = matrix([Ar, -Ar])
b = uniform(2 * m, 1)

x, ntdecrs = acent(A, b)
try:
    import pylab
except ImportError:
    pass
else:
    pylab.semilogy(range(len(ntdecrs)), ntdecrs, 'o', range(len(ntdecrs)),
                   ntdecrs, '-')
    pylab.xlabel('Iteration number')
    pylab.ylabel('Newton decrement')
    pylab.show()
Esempio n. 33
0
 def test_basic_no_gsl(self):
     import sys
     sys.modules['gsl'] = None
     import cvxopt
     cvxopt.normal(4,8)
     cvxopt.uniform(4,8)
Esempio n. 34
0
	def get_hotstart_sol(self):
		sol = uniform(self.get_num_dims(), 1, a=-1,b=+1)
		sol[0:self.states*self.states] = self.hotstart_tradeoff
		print('Hotstart position uniformly random with transition tradeoff {0}.'.format(self.hotstart_tradeoff))
		return sol
Esempio n. 35
0
# The robust LP example of section 10.5 (Examples).

from cvxopt import normal, uniform
from cvxopt.modeling import variable, dot, op, sum
from cvxopt.blas import nrm2

m, n = 500, 100
A = normal(m, n)
b = uniform(m)
c = normal(n)

x = variable(n)
op(dot(c, x), A * x + sum(abs(x)) <= b).solve()

x2 = variable(n)
y = variable(n)
op(dot(c, x2), [A * x2 + sum(y) <= b, -y <= x2, x2 <= y]).solve()

print("\nDifference between two solutions %e" % nrm2(x.value - x2.value))
Esempio n. 36
0
    # lemma and solve this as
    #    
    #     (A * D^-1 *A' + I) * v = A * D^-1 * bx / z[0]
    #     D * x = bx / z[0] - A'*v.

    S = matrix(0.0, (m,m))
    v = matrix(0.0, (m,1))
    def Fkkt(x, z, W):
        ds = (2.0 * div(1 + x**2, (1 - x**2)**2))**-0.5
        Asc = A * spdiag(ds)
        blas.syrk(Asc, S)
        S[::m+1] += 1.0 
        lapack.potrf(S)
        a = z[0]
        def g(x, y, z):
            x[:] = mul(x, ds) / a
            blas.gemv(Asc, x, v)
            lapack.potrs(S, v)
            blas.gemv(Asc, v, x, alpha = -1.0, beta = 1.0, trans = 'T')
            x[:] = mul(x, ds)  
        return g

    return solvers.cp(F, kktsolver = Fkkt)['x']

m, n = 200, 2000
setseed()
A = normal(m,n)
x = uniform(n,1)
b = A*x
x = l2ac(A, b)
Esempio n. 37
0
    def F(x=None, z=None):
        if x is None: return 0, matrix(1.0, (n, 1))
        if min(x) <= 0.0: return None
        f = -sum(log(x))
        Df = -(x**-1).T
        if z is None: return matrix(f), Df
        H = spdiag(z[0] * x**-2)
        return f, Df, H

    return solvers.cp(F, A=A, b=b)['x']


# Randomly generate a feasible problem

m, n = 50, 500
y = normal(m, 1)

# Random A with A'*y > 0.
s = uniform(n, 1)
A = normal(m, n)
r = s - A.T * y
# A = A - (1/y'*y) * y*r'
blas.ger(y, r, A, alpha=1.0 / blas.dot(y, y))

# Random feasible x > 0.
x = uniform(n, 1)
b = A * x

x = acent(A, b)
Esempio n. 38
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb  6 13:58:32 2018

@author: noch
"""

import cvxopt, svmcmpl

m = 2000
X = 2.0 * cvxopt.uniform(m, 2) - 1.0
d = cvxopt.matrix([2 * int(v >= 0) - 1 for v in cvxopt.mul(X[:, 0], X[:, 1])],
                  (m, 1))
gamma = 2.0
kernel = 'rbf'
sigma = 1.0
width = 20
sol1 = svmcmpl.softmargin(X, d, gamma, kernel, sigma)
sol2 = svmcmpl.softmargin_appr(X, d, gamma, width, kernel, sigma)