Example #1
0
    def __init__(self, f, x, test = 'f'):
        self.f = f
        self.x = x.copy()

        # sA = FuncDesigner.oovar('A',shape=(len(x),len(x)))
        sx = FuncDesigner.oovar('x', size = len(x))
        sy = 0.5*FuncDesigner.dot(sx*sx,FuncDesigner.dot(f.A,sx))

        print 'sy=',sy
        
        # self.sA = sA
        self.sx = sx
        self.sy = sy
Example #2
0
    def __init__(self, f, x, test = 'f'):
        self.f = f
        self.x = x.copy()

        # sA = FuncDesigner.oovar('A',shape=(len(x),len(x)))
        sx = FuncDesigner.oovar('x', size = len(x))
        sy = 0.5*FuncDesigner.dot(sx*sx,FuncDesigner.dot(f.A,sx))

        print('sy=',sy)
        
        # self.sA = sA
        self.sx = sx
        self.sy = sy
Example #3
0
def fit_kernel_model(kernel, loss, X, y, gamma, weights=None):
    n_samples = X.shape[0]
    gamma = float(gamma)
    if weights is not None:
        weights = weights / np.sum(weights) * weights.size

    # --- optimize bias term ---

    bias = fd.oovar('bias', size=1)

    if weights is None:
        obj_fun = fd.sum(loss(y, bias))
    else:
        obj_fun = fd.sum(fd.dot(weights, loss(y, bias)))
    optimizer = NLP(obj_fun, {bias: 0.}, ftol=1e-6, iprint=-1)

    result = optimizer.solve('ralg')
    bias = result(bias)

    # --- optimize betas ---

    beta = fd.oovar('beta', size=n_samples)

    # gram matrix
    K = kernel(X, X)
    assert K.shape == (n_samples, n_samples)

    K_dot_beta = fd.dot(K, beta)

    penalization_term = gamma * fd.dot(beta, K_dot_beta)
    if weights is None:
        loss_term = fd.sum(loss(y - bias, K_dot_beta))
    else:
        loss_term = fd.sum(fd.dot(weights, loss(y - bias, K_dot_beta)))
    obj_fun = penalization_term + loss_term

    beta0 = np.zeros((n_samples, ))

    optimizer = NLP(obj_fun, {beta: beta0}, ftol=1e-4, iprint=-1)
    result = optimizer.solve('ralg')
    beta = result(beta)

    return KernelModel(X, kernel, beta, bias)
def compressed_sensing2(x1, trans):
    """L1 compressed sensing
    
    :Parameters:
        x1 : array-like, shape=(n_outputs,)
            input sparse vector
        trans : array-like, shape=(n_outputs, n_inputs)
            transformation matrix
    :Returns:
        decoded vector, shape=(n_inpus,)
    :RType:
        array-like
    """

    # obrain sizes of inputs and outputs
    (n_outputs, n_inputs) = trans.shape

    # define variable
    t = fd.oovar('t', size=n_inputs)
    x = fd.oovar('x', size=n_inputs)

    # objective to minimize: f x^T -> min
    objective = fd.sum(t)

    # init constraints
    constraints = []

    # equality constraint: a_eq x^T = b_eq
    constraints.append(fd.dot(trans, x) == x1)

    # inequality constraint: -t < x < t
    constraints.append(-t <= x)
    constraints.append(x <= t)

    # start_point
    start_point = {x:np.zeros(n_inputs), t:np.zeros(n_inputs)}

    # solve linear programming
    prob = LP(objective, start_point, constraints=constraints)
    result = prob.minimize('pclp') # glpk, lpSolve... if available

    # print result
#    print "x =", result.xf # arguments at mimimum
#    print "objective =", result.ff # value of objective

    return result.xf[x]
Example #5
0
def automaticdiffertest():
    
    #from FuncDesigner import *

    a, b, c = fd.oovars('a', 'b', 'c')

    f1, f2 = fd.sin(a) + fd.cos(b) - fd.log2(c) + fd.sqrt(b), fd.sum(c) + c * fd.cosh(b) / fd.arctan(a) + c[0] * c[1] + c[-1] / (a * c.size)

    f3 = f1*f2 + 2*a + fd.sin(b) * (1+2*c.size + 3*f2.size)

    f = 2*a*b*c + f1*f2 + f3 + fd.dot(a+c, b+c)

    point = {a:1, b:2, c:[3, 4, 5]} # however, you'd better use numpy arrays instead of Python lists

    print(f(point))

    print(f.D(point))

    print(f.D(point, a))

    print(f.D(point, [b]))

    print(f.D(point, fixedVars = [a, c]))
Example #6
0
	h = []
	for i in range(n):
		if i == j:
			h.append(12+n/(i+1.0))
		elif i == j + 1:
			h.append(0)
		elif j == i + 2:
			h.append(0)
		else:
			h.append(15/((i+1)+0.1*(j+1)))
	H.append(h)
			
#print H
#H = [[15,0,4.83871],
#     [12.5,13.5,0],
#	 	 [0,6.52174,13]]
#print H
x = fd.oovars(n)
f = fd.dot(fd.dot(x, H), x)
startPoint = {x: np.zeros(n)}
constraints = []
constraints.append(x > np.zeros(n))
constraints.append(fd.sum(x) == 1)
p = QP(f, startPoint, constraints=constraints)
r = p.solve("qlcp")
x_opt = r(x)
print(math.sqrt(r.ff))

# problem
# http://abel.ee.ucla.edu/cvxopt/examples/tutorial/qp.html