def get_updates(h, c, U, V, d, bias=1e-5, decomposition="svd", zca=True):
    updates = []
    checks = []

    # theano applies updates in parallel, so all updates are in terms
    # of the old values.  use this and assign the return value, i.e.
    # x = update(x, foo()).  x is then a non-shared variable that
    # refers to the updated value.
    def update(variable, new_value):
        updates.append((variable, new_value))
        return new_value

    # compute canonical parameters
    W = T.dot(U, V)
    b = d - T.dot(c, W)

    # update estimates of c, U
    c = update(c, h.mean(axis=0))
    U = update(U, whiten_by[decomposition](h - c, bias, zca))

    # check that the new covariance is indeed identity
    n = h.shape[0].astype(theano.config.floatX)
    covar = T.dot((h - c).T, (h - c)) / (n - 1)
    whiteh = T.dot(h - c, U)
    whitecovar = T.dot(whiteh.T, whiteh) / (n - 1)
    checks.append(PdbBreakpoint
                  ("correlated after whitening")
                  (1 - T.allclose(whitecovar,
                                  T.identity_like(whitecovar),
                                  rtol=1e-3, atol=1e-3),
                   c, U, covar, whitecovar, h)[0])

    # adjust V, d so that the total transformation is unchanged
    # (lstsq is much more stable than T.inv)
    V = update(V, util.lstsq()(U, W, -1)[0])
    d = update(d, b + T.nlinalg.matrix_dot(c, U, V))

    # check that the total transformation is unchanged
    before = b + T.dot(h, W)
    after = d + T.nlinalg.matrix_dot(h - c, U, V)
    checks.append(
        PdbBreakpoint
        ("transformation changed")
        (1 - T.allclose(before, after,
                        rtol=1e-3, atol=1e-3),
         T.constant(0.0), W, b, c, U, V, d, h, before, after)[0])

    return updates, checks
Ejemplo n.º 2
0
    def logp(self, value):
        """
        Calculate log-probability of defined Mixture distribution at specified value.

        Parameters
        ----------
        value: numeric
            Value(s) for which log-probability is calculated. If the log probabilities for multiple
            values are desired the values must be provided in a numpy array or theano tensor

        Returns
        -------
        TensorVariable
        """
        w = self.w

        return bound(
            logsumexp(tt.log(w) + self._comp_logp(value),
                      axis=-1,
                      keepdims=False),
            w >= 0,
            w <= 1,
            tt.allclose(w.sum(axis=-1), 1),
            broadcast_conditions=False,
        )
Ejemplo n.º 3
0
 def __init__(self, loss=None, inputs=None, C=None):
     symmetrize = False
     A, b = inputs
     if A.shape[0] <> A.shape[1]:
         symmetrize = True
     elif not T.allclose(A.T, A):
         print('not sym th')
         symmetrize = True
     if symmetrize:
         print('symetrize thean')
         self._A = T.dot(A.T, A)
         self._b = T.dot(A.T, b)
     else:
         self._A = A
         self._b = b
     #         self._A = theano.shared(A)
     #         self._b = theano.shared(b)
     if C is None:
         self._C = T.eye(self._A.shape[1])
     else:
         self._C = C
     b = self._b.eval()
     A = self._A.eval()
     self._x0 = np.zeros(b.shape[0])
     self._r0 = b - np.dot(A, self._x0)
     # self._z = T.dot(self._C,theano.shared(self._x0))
     self._t_x = theano.shared(self._x0)  # T.vector('x')
     self._output_tf = loss
     if loss is None:
         self._output_tf = self._tf_CG_loss()
Ejemplo n.º 4
0
def test_determinant():
    det = np.linalg.det(K)
    args = [
        -5, -2, 1, [1, 2, 3],
        np.linspace(0, 10, 10), 1e-5 * np.ones((3, 10))
    ]
    assert tt.allclose(np.log(det), log_det(*args))
Ejemplo n.º 5
0
 def test_neibs_half_step_by_valid(self):
     neib_shapes = ((3, 3), (3, 5), (5, 3))
     for shp_idx, (shape, neib_step) in enumerate([
         [(7, 8, 5, 5), (1, 1)],
         [(7, 8, 5, 5), (2, 2)],
         [(7, 8, 5, 5), (4, 4)],
         [(7, 8, 5, 5), (1, 4)],
         [(7, 8, 5, 5), (4, 1)],
         [(80, 90, 5, 5), (1, 2)],
         [(1025, 9, 5, 5), (2, 1)],
         [(1, 1, 5, 1037), (2, 4)],
         [(1, 1, 1045, 5), (4, 2)]]
     ):
         for neib_shape in neib_shapes:
             for dtype in self.dtypes:
                 x = theano.shared(np.random.randn(*shape).astype(dtype))
                 extra = (neib_shape[0] // 2, neib_shape[1] // 2)
                 padded_shape = (x.shape[0], x.shape[1], x.shape[2] + 2 * extra[0], x.shape[3] + 2 * extra[1])
                 padded_x = T.zeros(padded_shape)
                 padded_x = T.set_subtensor(padded_x[:, :, extra[0]:-extra[0], extra[1]:-extra[1]], x)
                 x_using_valid = images2neibs(padded_x, neib_shape, neib_step, mode="valid")
                 x_using_half = images2neibs(x, neib_shape, neib_step, mode="half")
                 close = T.allclose(x_using_valid, x_using_half)
                 f = theano.function([], close, mode=self.mode)
                 assert f()
Ejemplo n.º 6
0
def test_dot_l():
    z = np.random.randn(30, 1)
    args = [
        -5, -2, 1, [1, 2, 3],
        np.linspace(0, 10, 10), 1e-5 * np.ones((3, 10)), z
    ]
    y = np.dot(np.linalg.cholesky(K), z)
    assert tt.allclose(y, mult_l(*args))
Ejemplo n.º 7
0
def test_inverse():
    z = np.random.randn(30, 1)
    y = np.dot(np.linalg.inv(K), z)
    args = [
        -5, -2, 1, [1, 2, 3],
        np.linspace(0, 10, 10), 1e-5 * np.ones((3, 10)), z
    ]
    assert tt.allclose(y, apply_inv(*args))
Ejemplo n.º 8
0
    def logp(self, value):
        w = self.w

        return bound(logsumexp(tt.log(w) + self._comp_logp(value), axis=-1),
                     w >= 0,
                     w <= 1,
                     tt.allclose(w.sum(axis=-1), 1),
                     broadcast_conditions=False)
Ejemplo n.º 9
0
    def logp(self, value):
        """
        Calculate log-probability of defined ``MixtureSameFamily`` distribution at specified value.

        Parameters
        ----------
        value : numeric
            Value(s) for which log-probability is calculated. If the log probabilities for multiple
            values are desired the values must be provided in a numpy array or theano tensor

        Returns
        -------
        TensorVariable
        """

        comp_dists = self.comp_dists
        w = self.w
        mixture_axis = self.mixture_axis

        event_shape = comp_dists.shape[mixture_axis + 1:]

        # To be able to broadcast the comp_dists.logp with w and value
        # We first have to pad the shape of w to the right with ones
        # so that it can broadcast with the event_shape.

        w = tt.shape_padright(w, len(event_shape))

        # Second, we have to add the mixture_axis to the value tensor
        # To insert the mixture axis at the correct location, we use the
        # negative number index. This way, we can also handle situations
        # in which, value is an observed value with more batch dimensions
        # than the ones present in the comp_dists.
        comp_dists_ndim = len(comp_dists.shape)

        value = tt.shape_padaxis(value, axis=mixture_axis - comp_dists_ndim)

        comp_logp = comp_dists.logp(value)
        return bound(
            logsumexp(tt.log(w) + comp_logp, axis=mixture_axis,
                      keepdims=False),
            w >= 0,
            w <= 1,
            tt.allclose(w.sum(axis=mixture_axis - comp_dists_ndim), 1),
            broadcast_conditions=False,
        )
Ejemplo n.º 10
0
def CGD_optimizer(As, bs, iter=10, C=None):
    symmetrize = False
    if As.shape[0] <> As.shape[1]:
        symmetrize = True
    elif not T.allclose(As.T, As):
        symmetrize = True
    if symmetrize:
        A = T.dot(As.T, As)
        b = T.dot(As.T, bs)
    C = T.eye(b.shape[0]) if C is None else C
    x0 = T.zeros_like(b)
    r0 = b - T.dot(A, x0)
    z0 = T.dot(C, r0)
    result, updates = theano.scan(fn=CG_single_step,
                                  outputs_info=[x0, r0, z0],
                                  non_sequences=[A, C],
                                  n_steps=iter,
                                  strict=True)
    return result[0][-1]
Ejemplo n.º 11
0
    def logp(self, value):
        """
        Calculate log-probability of defined Mixture distribution at specified value.

        Parameters
        ----------
        value : numeric
            Value(s) for which log-probability is calculated. If the log probabilities for multiple
            values are desired the values must be provided in a numpy array or theano tensor

        Returns
        -------
        TensorVariable
        """
        w = self.w

        return bound(logsumexp(tt.log(w) + self._comp_logp(value), axis=-1),
                     w >= 0, w <= 1, tt.allclose(w.sum(axis=-1), 1),
                     broadcast_conditions=False)
Ejemplo n.º 12
0
 def test_neibs_full_step_by_valid(self):
     for shp_idx, (shape, neib_step,
                   neib_shapes) in enumerate([[(7, 8, 5, 5), (1, 1),
                                               ((3, 3), (3, 5), (5, 3))],
                                              [(7, 8, 5, 5), (2, 2),
                                               ((3, 3), (3, 5), (5, 3))],
                                              [(7, 8, 6, 6), (3, 3),
                                               ((2, 2), (2, 5), (5, 2))],
                                              [(7, 8, 6, 6), (1, 3),
                                               ((2, 2), (2, 5), (5, 2))],
                                              [(7, 8, 6, 6), (3, 1),
                                               ((2, 2), (2, 5), (5, 2))],
                                              [(80, 90, 5, 5), (1, 2),
                                               ((3, 3), (3, 5), (5, 3))],
                                              [(1025, 9, 5, 5), (2, 1),
                                               ((3, 3), (3, 5), (5, 3))],
                                              [(1, 1, 11, 1037), (2, 3),
                                               ((3, 3), (5, 3))],
                                              [(1, 1, 1043, 11), (3, 2),
                                               ((3, 3), (3, 5))]]):
         for neib_shape in neib_shapes:
             for dtype in self.dtypes:
                 x = theano.shared(np.random.randn(*shape).astype(dtype))
                 extra = (neib_shape[0] - 1, neib_shape[1] - 1)
                 padded_shape = (x.shape[0], x.shape[1],
                                 x.shape[2] + 2 * extra[0],
                                 x.shape[3] + 2 * extra[1])
                 padded_x = T.zeros(padded_shape)
                 padded_x = T.set_subtensor(
                     padded_x[:, :, extra[0]:-extra[0], extra[1]:-extra[1]],
                     x)
                 x_using_valid = images2neibs(padded_x,
                                              neib_shape,
                                              neib_step,
                                              mode="valid")
                 x_using_full = images2neibs(x,
                                             neib_shape,
                                             neib_step,
                                             mode="full")
                 close = T.allclose(x_using_valid, x_using_full)
                 f = theano.function([], close, mode=self.mode)
                 assert f()
Ejemplo n.º 13
0
 def test_neibs_full_step_by_valid(self):
     for shp_idx, (shape, neib_step, neib_shapes) in enumerate([
         [(7, 8, 5, 5), (1, 1), ((3, 3), (3, 5), (5, 3))],
         [(7, 8, 5, 5), (2, 2), ((3, 3), (3, 5), (5, 3))],
         [(7, 8, 6, 6), (3, 3), ((2, 2), (2, 5), (5, 2))],
         [(7, 8, 6, 6), (1, 3), ((2, 2), (2, 5), (5, 2))],
         [(7, 8, 6, 6), (3, 1), ((2, 2), (2, 5), (5, 2))],
         [(80, 90, 5, 5), (1, 2), ((3, 3), (3, 5), (5, 3))],
         [(1025, 9, 5, 5), (2, 1), ((3, 3), (3, 5), (5, 3))],
         [(1, 1, 11, 1037), (2, 3), ((3, 3), (5, 3))],
         [(1, 1, 1043, 11), (3, 2), ((3, 3), (3, 5))]]
     ):
         for neib_shape in neib_shapes:
             for dtype in self.dtypes:
                 x = theano.shared(np.random.randn(*shape).astype(dtype))
                 extra = (neib_shape[0] - 1, neib_shape[1] - 1)
                 padded_shape = (x.shape[0], x.shape[1], x.shape[2] + 2 * extra[0], x.shape[3] + 2 * extra[1])
                 padded_x = T.zeros(padded_shape)
                 padded_x = T.set_subtensor(padded_x[:, :, extra[0]:-extra[0], extra[1]:-extra[1]], x)
                 x_using_valid = images2neibs(padded_x, neib_shape, neib_step, mode="valid")
                 x_using_full = images2neibs(x, neib_shape, neib_step, mode="full")
                 close = T.allclose(x_using_valid, x_using_full)
                 f = theano.function([], close, mode=self.mode)
                 assert f()
Ejemplo n.º 14
0
 def test_neibs_half_step_by_valid(self):
     neib_shapes = ((3, 3), (3, 5), (5, 3))
     for shp_idx, (shape, neib_step) in enumerate([[(7, 8, 5, 5), (1, 1)],
                                                   [(7, 8, 5, 5), (2, 2)],
                                                   [(7, 8, 5, 5), (4, 4)],
                                                   [(7, 8, 5, 5), (1, 4)],
                                                   [(7, 8, 5, 5), (4, 1)],
                                                   [(80, 90, 5, 5), (1, 2)],
                                                   [(1025, 9, 5, 5),
                                                    (2, 1)],
                                                   [(1, 1, 5, 1037),
                                                    (2, 4)],
                                                   [(1, 1, 1045, 5),
                                                    (4, 2)]]):
         for neib_shape in neib_shapes:
             for dtype in self.dtypes:
                 x = theano.shared(np.random.randn(*shape).astype(dtype))
                 extra = (neib_shape[0] // 2, neib_shape[1] // 2)
                 padded_shape = (x.shape[0], x.shape[1],
                                 x.shape[2] + 2 * extra[0],
                                 x.shape[3] + 2 * extra[1])
                 padded_x = T.zeros(padded_shape)
                 padded_x = T.set_subtensor(
                     padded_x[:, :, extra[0]:-extra[0], extra[1]:-extra[1]],
                     x)
                 x_using_valid = images2neibs(padded_x,
                                              neib_shape,
                                              neib_step,
                                              mode="valid")
                 x_using_half = images2neibs(x,
                                             neib_shape,
                                             neib_step,
                                             mode="half")
                 close = T.allclose(x_using_valid, x_using_half)
                 f = theano.function([], close, mode=self.mode)
                 assert f()
Ejemplo n.º 15
0
    def logp(self, value):
        w = self.w

        return bound(
            logsumexp(tt.log(w) + self._comp_logp(value), axis=-1).sum(),
            w >= 0, w <= 1, tt.allclose(w.sum(axis=-1), 1))
Ejemplo n.º 16
0
# if partakes in roulette but does not get terminated
weight_after_roulette = T.switch(
    T.and_(partakes_roulette, T.invert(loses_roulette)),
    weight_after_roulette / CHANCE, weight_after_roulette)
#theano.printing.Print('new weight')(new_weight)
#theano.printing.Print('partakes_roulette')(partakes_roulette)
#theano.printing.Print('loses_roulette')(loses_roulette)
#theano.printing.Print('weight_after_roulette')(weight_after_roulette)

one_cycle = theano.function(inputs=[mu_a, mu_s, microns_per_shell],
                            outputs=[shells, new_heats],
                            updates=OrderedDict({
                                xyz:
                                xyz_moved,
                                uvw:
                                uvw_new_direction,
                                weight:
                                weight_after_roulette,
                                finished:
                                T.allclose(weight, 0.)
                            }))

start = time.time()
print("start simulation")

while not finished.get_value():
    new_shells, new_heats = one_cycle(2, 20, 50)

end = time.time()
print("end simulation after: " + str(end - start) + " seconds")
Ejemplo n.º 17
0
    def logp(self, value):
        w = self.w

        return bound(logsumexp(tt.log(w) + self._comp_logp(value), axis=-1).sum(),
                     w >= 0, w <= 1, tt.allclose(w.sum(axis=-1), 1),
                     broadcast_conditions=False)
Ejemplo n.º 18
0
weight_after_roulette = T.switch(T.and_(partakes_roulette, loses_roulette),
                                 0.,
                                 new_weight)
# if partakes in roulette but does not get terminated
weight_after_roulette = T.switch(T.and_(partakes_roulette, T.invert(loses_roulette)),
                                 weight_after_roulette / CHANCE,
                                 weight_after_roulette)
#theano.printing.Print('new weight')(new_weight)
#theano.printing.Print('partakes_roulette')(partakes_roulette)
#theano.printing.Print('loses_roulette')(loses_roulette)
#theano.printing.Print('weight_after_roulette')(weight_after_roulette)


one_cycle = theano.function(inputs=[mu_a, mu_s, microns_per_shell],
                            outputs=[shells, new_heats],
                            updates=OrderedDict({xyz: xyz_moved, uvw: uvw_new_direction,
                                                 weight: weight_after_roulette,
                                                 finished: T.allclose(weight, 0.)}))


start = time.time()
print("start simulation")

while not finished.get_value():
    new_shells, new_heats = one_cycle(2, 20, 50)

end = time.time()
print("end simulation after: " + str(end - start) + " seconds")