def testMultiplyUnknownBatchSizeBroadcasting(self): c1 = tf.placeholder(tf.float32, [None, 1, 3, 2]) c2 = tf.placeholder(tf.float32, [None, 2, 3, 1]) tt_a = TensorTrainBatch([c1, c2]) tt_b = initializers.random_tensor_batch((3, 3), tt_rank=3, batch_size=1) tt_c = initializers.random_tensor((3, 3), tt_rank=3) res_ab = ops.full(ops.multiply(tt_a, tt_b)) res_ba = ops.full(ops.multiply(tt_b, tt_a)) res_ac = ops.full(ops.multiply(tt_a, tt_c)) res_ca = ops.full(ops.multiply(tt_c, tt_a)) res_desired_ab = ops.full(tt_a) * ops.full(tt_b) res_desired_ac = ops.full(tt_a) * ops.full(tt_c) to_run = [ res_ab, res_ba, res_ac, res_ca, res_desired_ab, res_desired_ac ] feed_dict = { c1: np.random.rand(7, 1, 3, 2), c2: np.random.rand(7, 2, 3, 1) } with self.test_session() as sess: ab, ba, ac, ca, des_ab, des_ac = sess.run(to_run, feed_dict=feed_dict) self.assertAllClose(ab, des_ab) self.assertAllClose(ba, des_ab) self.assertAllClose(ac, des_ac) self.assertAllClose(ca, des_ac)
def testMultiplyTwoBatchesUnknownSize(self): c1 = tf.placeholder(self.dtype, [None, 1, 3, 2]) c2 = tf.placeholder(self.dtype, [None, 2, 3, 1]) c3 = tf.placeholder(self.dtype, [None, 1, 3, 2]) c4 = tf.placeholder(self.dtype, [None, 2, 3, 1]) tt_a = TensorTrainBatch([c1, c2]) tt_b = TensorTrainBatch([c3, c4]) res_ab = ops.full(ops.multiply(tt_a, tt_b)) res_ba = ops.full(ops.multiply(tt_b, tt_a)) res_desired = ops.full(tt_a) * ops.full(tt_b) to_run = [res_ab, res_ba, res_desired] feed_dict = {c1:np.random.rand(7, 1, 3, 2), c2:np.random.rand(7, 2, 3, 1), c3:np.random.rand(7, 1, 3, 2), c4:np.random.rand(7, 2, 3, 1)} feed_dict_err = {c1:np.random.rand(7, 1, 3, 2), c2:np.random.rand(7, 2, 3, 1), c3:np.random.rand(1, 1, 3, 2), c4:np.random.rand(1, 2, 3, 1)} with self.test_session() as sess: ab_full, ba_full, des_full = sess.run(to_run, feed_dict=feed_dict) self.assertAllClose(ab_full, des_full) self.assertAllClose(ba_full, des_full) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run(to_run, feed_dict=feed_dict_err)
def testMultiplyUnknownSizeBatchAndBatch(self): c1 = tf.placeholder(tf.float32, [None, 1, 3, 2]) c2 = tf.placeholder(tf.float32, [None, 2, 3, 1]) tt_b = initializers.random_tensor_batch((3, 3), tt_rank=2, batch_size=8) tt_a = TensorTrainBatch([c1, c2]) res_ab = ops.full(ops.multiply(tt_a, tt_b)) res_ba = ops.full(ops.multiply(tt_b, tt_a)) res_desired = ops.full(tt_a) * ops.full(tt_b) to_run = [res_ab, res_ba, res_desired] feed_dict = { c1: np.random.rand(8, 1, 3, 2), c2: np.random.rand(8, 2, 3, 1) } feed_dict_err = { c1: np.random.rand(1, 1, 3, 2), c2: np.random.rand(1, 2, 3, 1) } with self.test_session() as sess: ab_full, ba_full, des_full = sess.run(to_run, feed_dict=feed_dict) self.assertAllClose(ab_full, des_full) self.assertAllClose(ba_full, des_full) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run(to_run, feed_dict=feed_dict_err)
def testMultiplyBatchByTensor(self): tt_a = initializers.random_tensor((3, 3, 3), tt_rank=2, dtype=self.dtype) tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5, dtype=self.dtype) with self.test_session() as sess: res_actual = ops.full(ops.multiply(tt_a, tt_b)) res_actual2 = ops.full(ops.multiply(tt_b, tt_a)) res_desired = ops.full(tt_a) * ops.full(tt_b) to_run = [res_actual, res_actual2, res_desired] res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) self.assertAllClose(res_actual_val, res_desired_val) self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyBroadcasting(self): tt_a = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=1, dtype=self.dtype) tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5, dtype=self.dtype) res_actual = ops.full(ops.multiply(tt_a, tt_b)) res_actual2 = ops.full(ops.multiply(tt_b, tt_a)) res_desired = ops.full(tt_a) * ops.full(tt_b) to_run = [res_actual, res_actual2, res_desired] res_actual_val, res_actual2_val, res_desired_val = self.evaluate( to_run) self.assertAllClose(res_actual_val, res_desired_val) self.assertAllClose(res_actual2_val, res_desired_val)
def __neg__(self): """Returns a TensorTrain corresponding to element-wise negative -tt_a. Just calls t3f.multiply(self, -1.), see its documentation for details. """ # TODO: ugly. # We can't import ops in the beginning since it creates cyclic dependencies. from t3f import ops return ops.multiply(self, -1.)
def __mul__(self, other): """Returns a TensorTrain corresponding to element-wise product tt_a * tt_b. Supports broadcasting (e.g. you can multiply TensorTrainBatch and TensorTrain). Just calls t3f.multiply, see its documentation for details. """ # TODO: ugly. # We can't import ops in the beginning since it creates cyclic dependencies. from t3f import ops return ops.multiply(self, other)
def testMultiplyByNumber(self): # Multiply a tensor by a number. tt = initializers.random_tensor((1, 2, 3), tt_rank=(1, 2, 3, 1)) with self.test_session() as sess: res_actual = ops.full(ops.multiply(tt, 4)) res_actual2 = ops.full(4.0 * tt) res_desired = 4.0 * ops.full(tt) to_run = [res_actual, res_actual2, res_desired] res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) self.assertAllClose(res_actual_val, res_desired_val) self.assertAllClose(res_actual2_val, res_desired_val)
def __sub__(self, other): """Returns a TensorTrain corresponding to element-wise difference tt_a - tt_b. Supports broadcasting (e.g. you can subtract TensorTrainBatch and TensorTrain). Just calls t3f.add(self, (-1) * other), see its documentation for details. """ # TODO: ugly. # We can't import ops in the beginning since it creates cyclic dependencies. from t3f import ops return ops.add(self, ops.multiply(other, -1.))
def testMultiply(self): # Multiply two TT-tensors. tt_a = initializers.random_tensor((1, 2, 3, 4), tt_rank=2) tt_b = initializers.random_tensor((1, 2, 3, 4), tt_rank=[1, 1, 4, 3, 1]) with self.test_session() as sess: res_actual = ops.full(ops.multiply(tt_a, tt_b)) res_actual2 = ops.full(tt_a * tt_b) res_desired = ops.full(tt_a) * ops.full(tt_b) to_run = [res_actual, res_actual2, res_desired] res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) self.assertAllClose(res_actual_val, res_desired_val) self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyByNumber(self): # Multiply batch of tensors by a number. tt = initializers.random_tensor_batch((1, 2, 3), tt_rank=(1, 2, 3, 1), batch_size=3, dtype=self.dtype) res_actual = ops.full(ops.multiply(tt, 4)) res_actual2 = ops.full(4.0 * tt) res_desired = 4.0 * ops.full(tt) to_run = [res_actual, res_actual2, res_desired] res_actual_val, res_actual2_val, res_desired_val = self.evaluate( to_run) self.assertAllClose(res_actual_val, res_desired_val) self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiply(self): # Multiply two TT-tensors. tt_a = initializers.random_tensor((1, 2, 3, 4), tt_rank=2, dtype=self.dtype) tt_b = initializers.random_tensor((1, 2, 3, 4), tt_rank=[1, 1, 4, 3, 1], dtype=self.dtype) res_actual = ops.full(ops.multiply(tt_a, tt_b)) res_actual2 = ops.full(tt_a * tt_b) res_desired = ops.full(tt_a) * ops.full(tt_b) to_run = [res_actual, res_actual2, res_desired] res_actual_val, res_actual2_val, res_desired_val = self.evaluate( to_run) self.assertAllClose(res_actual_val, res_desired_val) self.assertAllClose(res_actual2_val, res_desired_val)