Esempio n. 1
0
    def test_W_jump(self):

        " tests that W is where I think it should be "

        stats = self.stats

        cov_hs = stats.d['second_hs']
        assert cov_hs.dtype == config.floatX
        #mean_hsv[i,j] = E_D,Q h_i s_i v_j
        mean_hsv = stats.d['mean_hsv']

        regularized = cov_hs + alloc_diag(
            T.ones_like(self.model.mu) * self.model.W_eps)
        assert regularized.dtype == config.floatX

        inv = matrix_inverse(regularized)
        assert inv.dtype == config.floatX

        new_W = T.dot(mean_hsv.T, inv)

        f = function([], new_W)

        Wv = f()
        aWv = self.model.W.get_value()

        diffs = Wv - aWv
        max_diff = np.abs(diffs).max()

        if max_diff > self.tol:
            raise Exception("W deviates from its correct value by at most " +
                            str(max_diff))
Esempio n. 2
0
    def test_W_jump(self):

        " tests that W is where I think it should be "

        stats = self.stats

        cov_hs = stats.d['second_hs']
        assert cov_hs.dtype == config.floatX
        #mean_hsv[i,j] = E_D,Q h_i s_i v_j
        mean_hsv = stats.d['mean_hsv']

        regularized = cov_hs + alloc_diag(T.ones_like(self.model.mu) * self.model.W_eps)
        assert regularized.dtype == config.floatX


        inv = matrix_inverse(regularized)
        assert inv.dtype == config.floatX

        new_W = T.dot(mean_hsv.T, inv)

        f = function([], new_W)

        Wv = f()
        aWv = self.model.W.get_value()

        diffs = Wv - aWv
        max_diff = np.abs(diffs).max()

        if max_diff > self.tol:
            raise Exception("W deviates from its correct value by at most "+str(max_diff))
def linearRegression_1(inputs, outputs):
    """
    Computers the least squares estimator (LSE) B_hat that minimises the sum of the
    squared errors.

    Computes B_hat as B_hat = (X.T . X)^-1 . X.T . y
    -> Ordinarly Least Squares (OLS)
    http://en.wikipedia.org/wiki/Ordinary_least_squares

    In:
        inputs: Matrix of inputs (X) (nxp matrix)
                format: [[observation_1], ..., [observation_n]]
        outputs: Column vector (Matrix) of outputs y
                 format: [[y_1], ... , [y_n]]
    Out:
        B_hat: Column vector (Matrix) of fitted slopes
               format: [[b_0], ... , [b_{p-1}]]
    """
    X = T.dmatrix('X')
    y = T.dcol('y')
    # B_hat = (X.T . X)^-1 . X.T . y
    # http://deeplearning.net/software/theano/library/sandbox/linalg.html
    # MatrixInverse is the class.
    # matrix_inverse is the method base upon the MatrixInverse class.
    B_hat = T.dot(T.dot(linOps.matrix_inverse(T.dot(X.T, X)),X.T),y)
    lse = function([X, y], B_hat)
    b = lse(inputs, outputs)
    return b
Esempio n. 4
0
    def __init__(self, kernel, X=None, Y=None):
        self.kernel = kernel
        self.X = X
        self.Y = Y

        self.th_hyp = self.kernel.th_hyp
        self.th_X = self.kernel.th_X
        self.th_N = self.kernel.th_N
        self.th_D = self.kernel.th_D
        self.th_K = self.kernel.th_K

        self.th_Y = T.matrix('Y')

        prec = sT.matrix_inverse(self.th_K)

        # Calculate the lml in a slow but stable way
        self.th_lml_stable = (
            -0.5 * sT.trace(T.dot(self.th_Y.T, T.dot(prec, self.th_Y))) +
            -T.sum(T.log(sT.diag(sT.cholesky(self.th_K)))) +
            -0.5 * self.th_N * T.log(2.0 * const.pi))
        # or in a fast but unstable way
        self.th_lml = (
            -0.5 * sT.trace(T.dot(self.th_Y.T, T.dot(prec, self.th_Y))) +
            -0.5 * T.log(sT.det(self.th_K)) +
            -0.5 * self.th_N * T.log(2.0 * const.pi))
        self.th_dlml_dhyp = theano.grad(self.th_lml, self.th_hyp)

        # Compile them to functions
        self.lml = theano.function([self.th_hyp, self.th_X, self.th_Y],
                                   self.th_lml)
        self.lml_stable = theano.function([self.th_hyp, self.th_X, self.th_Y],
                                          self.th_lml_stable)
        self.dlml_dhyp = theano.function([self.th_hyp, self.th_X, self.th_Y],
                                         self.th_dlml_dhyp)
Esempio n. 5
0
def test_matrix_inverse_solve():
    if not imported_scipy:
        raise SkipTest("Scipy needed for the Solve op.")
    A = theano.tensor.dmatrix('A')
    b = theano.tensor.dmatrix('b')
    node = matrix_inverse(A).dot(b).owner
    [out] = inv_as_solve.transform(node)
    assert isinstance(out.owner.op, Solve)               
Esempio n. 6
0
def test_matrix_inverse_solve():
    if not imported_scipy:
        pytest.skip("Scipy needed for the Solve op.")
    A = theano.tensor.dmatrix("A")
    b = theano.tensor.dmatrix("b")
    node = matrix_inverse(A).dot(b).owner
    [out] = inv_as_solve.transform(node)
    assert isinstance(out.owner.op, Solve)
Esempio n. 7
0
def test_rop_lop():
    mx = tensor.matrix('mx')
    mv = tensor.matrix('mv')
    v = tensor.vector('v')
    y = matrix_inverse(mx).sum(axis=0)

    yv = tensor.Rop(y, mx, mv)
    yv2 = tensor.Rop_via_Lop(y, mx, mv)
    rop_f = function([mx, mv], [yv, yv2])

    sy, _ = theano.scan(lambda i, y, x, v: (tensor.grad(y[i], x) * v).sum(),
                        sequences=tensor.arange(y.shape[0]),
                        non_sequences=[y, mx, mv])
    scan_f = function([mx, mv], sy)

    rng = np.random.RandomState(utt.fetch_seed())
    vx = np.asarray(rng.randn(4, 4), theano.config.floatX)
    vv = np.asarray(rng.randn(4, 4), theano.config.floatX)

    v1 = scan_f(vx, vv)
    v2, v3 = rop_f(vx, vv)

    assert _allclose(v2, v1), ('Rop mismatch: %s %s' % (v2, v1))
    assert _allclose(v3, v1), ('Rop_via_Lop mismatch: %s %s' % (v3, v1))

    raised = False
    try:
        tensor.Rop(theano.clone(y, replace={mx: break_op(mx)}), mx, mv)
    except ValueError:
        raised = True
    if not raised:
        raise Exception(('Op did not raised an error even though the function'
                         ' is not differentiable'))

    try:
        tensor.Rop_via_Lop(theano.clone(y, replace={mx: break_op(mx)}), mx, mv)
    except theano.gradient.NullTypeGradError:
        raised = True
    except theano.gradient.DisconnectedInputError:
        raised = True

    if not raised:
        raise Exception((
            'Rop_via_Lop for Op did not raise an error even though the function'
            ' is not differentiable'))

    vv = np.asarray(rng.uniform(size=(4, )), theano.config.floatX)
    yv = tensor.Lop(y, mx, v)
    lop_f = function([mx, v], yv)

    sy = tensor.grad((v * y).sum(), mx)
    scan_f = function([mx, v], sy)

    v1 = lop_f(vx, vv)
    v2 = scan_f(vx, vv)
    assert _allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
Esempio n. 8
0
def test_inverse_singular():
    singular = numpy.array([[1, 0, 0]] + [[0, 1, 0]] * 2,
                           dtype=theano.config.floatX)
    a = tensor.matrix()
    f = function([a], matrix_inverse(a))
    try:
        f(singular)
    except numpy.linalg.LinAlgError:
        return
    assert False
Esempio n. 9
0
def test_inverse_singular():
    singular = numpy.array([[1, 0, 0]] + [[0, 1, 0]] * 2,
                           dtype=theano.config.floatX)
    a = tensor.matrix()
    f = function([a], matrix_inverse(a))
    try:
        f(singular)
    except numpy.linalg.LinAlgError:
        return
    assert False
Esempio n. 10
0
def test_matrix_inverse_as_solve_right():
    if not imported_scipy:
        raise SkipTest("Scipy needed for the Solve op.")
    A = theano.tensor.dmatrix('A')
    B = theano.tensor.dmatrix('B')
    node = B.dot(matrix_inverse(A)).owner
    [out] = inv_as_solve.transform(node)
    # take into account the transpose after the solve operation, so go up one
    # in expression tree
    assert isinstance(out.owner.inputs[0].owner.op, Solve)
Esempio n. 11
0
def test_inverse_correctness():
    rng = numpy.random.RandomState(utt.fetch_seed())

    r = rng.randn(4, 4).astype(theano.config.floatX)

    x = tensor.matrix()
    xi = matrix_inverse(x)

    ri = function([x], xi)(r)
    assert ri.shape == r.shape
    assert ri.dtype == r.dtype

    rir = numpy.dot(ri, r)
    rri = numpy.dot(r, ri)

    assert _allclose(numpy.identity(4), rir), rir
    assert _allclose(numpy.identity(4), rri), rri
Esempio n. 12
0
def test_rop_lop():
    mx = tensor.matrix('mx')
    mv = tensor.matrix('mv')
    v = tensor.vector('v')
    y = matrix_inverse(mx).sum(axis=0)

    yv = tensor.Rop(y, mx, mv)
    rop_f = function([mx, mv], yv)

    sy, _ = theano.scan(lambda i, y, x, v: (tensor.grad(y[i], x) * v).sum(),
                        sequences=tensor.arange(y.shape[0]),
                        non_sequences=[y, mx, mv])
    scan_f = function([mx, mv], sy)

    rng = numpy.random.RandomState(utt.fetch_seed())
    vx = numpy.asarray(rng.randn(4, 4), theano.config.floatX)
    vv = numpy.asarray(rng.randn(4, 4), theano.config.floatX)

    v1 = rop_f(vx, vv)
    v2 = scan_f(vx, vv)

    assert _allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))

    raised = False
    try:
        tensor.Rop(
            theano.clone(y, replace={mx: break_op(mx)}),
            mx,
            mv)
    except ValueError:
        raised = True
    if not raised:
        raise Exception((
            'Op did not raised an error even though the function'
            ' is not differentiable'))

    vv = numpy.asarray(rng.uniform(size=(4,)), theano.config.floatX)
    yv = tensor.Lop(y, mx, v)
    lop_f = function([mx, v], yv)

    sy = tensor.grad((v * y).sum(), mx)
    scan_f = function([mx, v], sy)

    v1 = lop_f(vx, vv)
    v2 = scan_f(vx, vv)
    assert _allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
Esempio n. 13
0
def test_inverse_correctness():
    rng = numpy.random.RandomState(utt.fetch_seed())

    r = rng.randn(4, 4).astype(theano.config.floatX)

    x = tensor.matrix()
    xi = matrix_inverse(x)

    ri = function([x], xi)(r)
    assert ri.shape == r.shape
    assert ri.dtype == r.dtype

    rir = numpy.dot(ri, r)
    rri = numpy.dot(r, ri)

    assert _allclose(numpy.identity(4), rir), rir
    assert _allclose(numpy.identity(4), rri), rri
Esempio n. 14
0
def test_rop_lop():
    mx = tensor.matrix("mx")
    mv = tensor.matrix("mv")
    v = tensor.vector("v")
    y = matrix_inverse(mx).sum(axis=0)

    yv = tensor.Rop(y, mx, mv)
    rop_f = function([mx, mv], yv)

    sy, _ = theano.scan(
        lambda i, y, x, v: (tensor.grad(y[i], x) * v).sum(),
        sequences=tensor.arange(y.shape[0]),
        non_sequences=[y, mx, mv],
    )
    scan_f = function([mx, mv], sy)

    rng = np.random.RandomState(utt.fetch_seed())
    vx = np.asarray(rng.randn(4, 4), theano.config.floatX)
    vv = np.asarray(rng.randn(4, 4), theano.config.floatX)

    v1 = rop_f(vx, vv)
    v2 = scan_f(vx, vv)

    assert _allclose(v1, v2), "ROP mismatch: %s %s" % (v1, v2)

    raised = False
    try:
        tensor.Rop(theano.clone(y, replace={mx: break_op(mx)}), mx, mv)
    except ValueError:
        raised = True
    if not raised:
        raise Exception(("Op did not raised an error even though the function"
                         " is not differentiable"))

    vv = np.asarray(rng.uniform(size=(4, )), theano.config.floatX)
    yv = tensor.Lop(y, mx, v)
    lop_f = function([mx, v], yv)

    sy = tensor.grad((v * y).sum(), mx)
    scan_f = function([mx, v], sy)

    v1 = lop_f(vx, vv)
    v2 = scan_f(vx, vv)
    assert _allclose(v1, v2), "LOP mismatch: %s %s" % (v1, v2)
 def feature_sign_search( self ):
     '''
     This function runs the feature_sign_search on the coefficients while
     holding the bases clamped.
     '''
     #Declare effective zero for usefullness
     effective_zero = 1e-19
     opt_cond = np.inf
     '''
     theta[ i ] is:
         -1 if self.coefficients[ i ] < 0
         1 if self.coefficients[ i ] > 0
         0 if self.coefficients[ i ] == 0
     '''
     theta = T.sgn( self.coefficients )
     active_set = T.ivector( name = 'active_set' )
     #This corresponds to the gram matrix by dotting the basis vectors by it's transpose
     gram_matrix = T.dot( self.bases.T, self.bases )
     target_correlation = T.dot( self.bases.T, self.x )
     
     cost = -T.sum( ( target_correlation - T.dot( gram_matrix, self.coefficients ) ) ** 2 )
     cost_grad = T.grad( cost, self.coefficients )
     
     candidate = T.argmax( cost_grad )
     if T.gt( cost_grad[ candidate ], self.gamma ):
         print 'Found candidate  greater than gamma'
         updated_theta = T.set_subtensor( theta[ candidate ], -1 )
         active_set = active_set + candidate
     if T.lt( cost_grad[ candidate ], ( -1 * self.gamma ) ):
         print 'Found candidate less than negative gamma'
         updated_theta = T.set_subtensor( theta[ candidate ], 1 )
         active_set = active_set + candidate
     
     active_bases = self.bases[ active_set ]
                                                       
     active_coefficients = self.coefficients[ active_set ]
                                                       
     active_theta = updated_theta[ active_set ]
     
     new_coefficients = ( matrix_inverse( T.dot( active_bases.T, active_bases ) ) *
                                 ( T.dot( active_bases.T, target_correlation )
                                 - 0.5 * self.gamma * active_theta ) )
     sign_changes = 0
Esempio n. 16
0
 def grad(self, inputs, g_outputs):
     gz, = g_outputs
     x, = inputs
     return [gz * T.dot(x,op.matrix_inverse(T.dot(x.T,x)))]
Esempio n. 17
0
import numpy.random as rnd

import theano as th
import theano.tensor as T
#import theano.sandbox.linalg.ops as sT
from theano.sandbox.linalg import ops as sT

import GPy

import mltools.prob as mlprob

import time

mu = T.vector('mu')
sigma = T.matrix('sigma')
prec = sT.matrix_inverse(sigma)

# x = np.array([[15, -1.5], [-1.5, 1.5], [-1.4, 1.5], [1.4, -1.5], [-45.0, 83.5]])
# x = np.array([[15, -1.5],
#               [-1.5, 1.5],
#               [-1.4, 1.5],
#               [1.4, -1.5],
#               [-45.0, 83.5],
#               [-100.3, 68.3],
#               [1000.4, 432.4],
#               [32441.8, 12341.3]])

N = 100

x = rnd.randn(N, 1)
Esempio n. 18
0
 def grad(self, inputs, g_outputs):
     gz, = g_outputs
     x, = inputs
     return [gz * T.dot(x,op.matrix_inverse(T.dot(x.T,x)))]
Esempio n. 19
0
def test_matrix_inverse_solve():
    A = theano.tensor.dmatrix('A')
    b = theano.tensor.dmatrix('b')
    node = matrix_inverse(A).dot(b).owner
    [out] = inv_as_solve.transform(node)
    assert isinstance(out.owner.op, Solve)
Esempio n. 20
0
def test_matrix_inverse_solve():
    A = theano.tensor.dmatrix('A')
    b = theano.tensor.dmatrix('b')
    node = matrix_inverse(A).dot(b).owner
    [out] = inv_as_solve.transform(node)
    assert isinstance(out.owner.op, Solve)