Ejemplo n.º 1
0
  def _get_mu(self, ranks, x, y):
    """Initializes latent inputs expectations mu.

    Either loads pretrained values of tt-cores of mu, or initializes it
    according to optimal formulas from the given data.

    Args:
      ranks: tt-ranks of mu
      x: features of a batch of objects
      y: targets of a batch of objects
    """
    # TODO: test if this is needed.
    w = self.inputs.interpolate_on_batch(self.cov.project(x))
    Sigma = ops.tt_tt_matmul(self.sigma_l, ops.transpose(self.sigma_l))
    temp = ops.tt_tt_matmul(w, y)        
    anc = ops.tt_tt_matmul(Sigma, temp) 
    res = TensorTrain([core[0, :, :, :, :] for core in anc.tt_cores], 
            tt_ranks=[1]*(anc.ndims()+1))
    res = res
    for i in range(1, anc.get_shape()[0]):
      elem = TensorTrain([core[i, :, :, :, :] for core in anc.tt_cores],
              tt_ranks=[1]*(anc.ndims()+1))
      res = ops.add(res, elem)
    mu_ranks = [1] + [ranks] * (res.ndims() - 1) + [1]
    return t3f.get_variable('tt_mu', initializer=TensorTrain(res.tt_cores, 
                                res.get_raw_shape(), mu_ranks))
Ejemplo n.º 2
0
    def __add__(self, other):
        """Returns a TensorTrain corresponding to element-wise sum tt_a + tt_b.

    Supports broadcasting (e.g. you can add TensorTrainBatch and TensorTrain).
    Just calls t3f.add, see its documentation for details.
    """
        # TODO: ugly.
        # We can't import ops in the beginning since it creates cyclic dependencies.
        from t3f import ops
        return ops.add(self, other)
Ejemplo n.º 3
0
    def __sub__(self, other):
        """Returns a TensorTrain corresponding to element-wise difference tt_a - tt_b.

    Supports broadcasting (e.g. you can subtract TensorTrainBatch and
    TensorTrain).
    Just calls t3f.add(self, (-1) * other), see its documentation for details.
    """
        # TODO: ugly.
        # We can't import ops in the beginning since it creates cyclic dependencies.
        from t3f import ops
        return ops.add(self, ops.multiply(other, -1.))
Ejemplo n.º 4
0
 def testAdd(self):
     # Sum two TT-tensors.
     tt_a = initializers.random_tensor((2, 1, 3, 4), tt_rank=2)
     tt_b = initializers.random_tensor((2, 1, 3, 4),
                                       tt_rank=[1, 2, 4, 3, 1])
     with self.test_session() as sess:
         res_actual = ops.full(ops.add(tt_a, tt_b))
         res_actual2 = ops.full(tt_a + tt_b)
         res_desired = ops.full(tt_a) + ops.full(tt_b)
         to_run = [res_actual, res_actual2, res_desired]
         res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
         self.assertAllClose(res_actual_val, res_desired_val)
         self.assertAllClose(res_actual2_val, res_desired_val)
Ejemplo n.º 5
0
 def testAddBroadcasting(self):
   # Sum two TT-tensors with broadcasting.
   tt_a = initializers.random_tensor_batch((2, 1, 4), tt_rank=2, batch_size=1,
                                           dtype=self.dtype)
   tt_b = initializers.random_tensor_batch((2, 1, 4), tt_rank=[1, 2, 4, 1],
                                           batch_size=3, dtype=self.dtype)
   with self.test_session() as sess:
     res_actual = ops.full(ops.add(tt_a, tt_b))
     res_actual2 = ops.full(tt_b + tt_a)
     res_desired = ops.full(tt_a) + ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)
Ejemplo n.º 6
0
 def testAddSameBatchSize(self):
   # Sum two TT-matrices with the same batch size.
   tt_a = initializers.random_matrix_batch(((2, 1, 4), None), tt_rank=2,
                                           batch_size=3, dtype=self.dtype)
   tt_b = initializers.random_matrix_batch(((2, 1, 4), None),
                                           tt_rank=[1, 2, 4, 1], batch_size=3,
                                           dtype=self.dtype)
   with self.test_session() as sess:
     res_actual = ops.full(ops.add(tt_a, tt_b))
     res_actual2 = ops.full(tt_a + tt_b)
     res_desired = ops.full(tt_a) + ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)
Ejemplo n.º 7
0
 def testAdd(self):
     # Sum two TT-tensors.
     tt_a = initializers.random_tensor((2, 1, 3, 4),
                                       tt_rank=2,
                                       dtype=self.dtype)
     tt_b = initializers.random_tensor((2, 1, 3, 4),
                                       tt_rank=[1, 2, 4, 3, 1],
                                       dtype=self.dtype)
     res_actual = ops.full(ops.add(tt_a, tt_b))
     res_actual2 = ops.full(tt_a + tt_b)
     res_desired = ops.full(tt_a) + ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = self.evaluate(
         to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)
Ejemplo n.º 8
0
 def _get_mus(self, ranks, x_init, y_init):
     w = self.inputs.interpolate_on_batch(self.cov.project(x_init))
     Sigma = ops.tt_tt_matmul(self.sigma_ls[0], ops.transpose(self.sigma_ls[0]))
     temp = ops.tt_tt_matmul(w, y_init)        
     anc = ops.tt_tt_matmul(Sigma, temp) 
     res = TensorTrain([core[0, :, :, :, :] for core in anc.tt_cores], 
             tt_ranks=[1]*(anc.ndims()+1))
     res = res
     for i in range(1, anc.get_shape()[0]):
         elem = TensorTrain([core[i, :, :, :, :] for core in anc.tt_cores],
                 tt_ranks=[1]*(anc.ndims()+1))
         res = ops.add(res, elem)
     mu_ranks = [1] + [ranks] * (res.ndims() - 1) + [1]
     mu_cores = []
     for core in res.tt_cores:
         mu_cores.append(tf.tile(core[None, ...], [self.n_class, 1, 1, 1, 1]))
     return t3f.get_variable('tt_mus', 
         initializer=TensorTrainBatch(mu_cores, res.get_raw_shape(), mu_ranks))
Ejemplo n.º 9
0
 def testAddBroadcasting(self):
     # Sum two TT-matrices with broadcasting.
     tt_a = initializers.random_matrix_batch(((2, 1, 4), (2, 2, 2)),
                                             tt_rank=2,
                                             batch_size=3,
                                             dtype=self.dtype)
     tt_b = initializers.random_matrix_batch(((2, 1, 4), (2, 2, 2)),
                                             tt_rank=[1, 2, 4, 1],
                                             batch_size=1,
                                             dtype=self.dtype)
     res_actual = ops.full(ops.add(tt_a, tt_b))
     res_actual2 = ops.full(tt_b + tt_a)
     res_desired = ops.full(tt_a) + ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = self.evaluate(
         to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)
Ejemplo n.º 10
0
 def testAddSameBatchSize(self):
     # Sum two TT-tensors with the same batch size.
     tt_a = initializers.random_tensor_batch((2, 1, 4),
                                             tt_rank=2,
                                             batch_size=3,
                                             dtype=self.dtype)
     tt_b = initializers.random_tensor_batch((2, 1, 4),
                                             tt_rank=[1, 2, 4, 1],
                                             batch_size=3,
                                             dtype=self.dtype)
     res_actual = ops.full(ops.add(tt_a, tt_b))
     res_actual2 = ops.full(tt_a + tt_b)
     res_desired = ops.full(tt_a) + ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = self.evaluate(
         to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)