def test_matmul(self): # computation a = tf.placeholder(tf.float32, shape=(2, 3)) b = tf.placeholder(tf.float32, shape=(3, 4)) c = tf.matmul(a, b) # value a_val = np.random.rand(*tf_to_shape_tuple(a)) b_val = np.random.rand(*tf_to_shape_tuple(b)) # test self.run(c, tf_feed_dict={a: a_val, b: b_val})
def test_binary_ops(self): # computation a = tf.placeholder(tf.float32, shape=(2, 3)) b = tf.placeholder(tf.float32, shape=(2, 3)) c = tf.add(a, b) d = tf.mul(c, a) e = tf.div(d, b) f = tf.sub(a, e) g = tf.maximum(a, f) # value a_val = np.random.rand(*tf_to_shape_tuple(a)) b_val = np.random.rand(*tf_to_shape_tuple(b)) # test self.run(g, tf_feed_dict={a: a_val, b: b_val})
def test_neg(self): # computation a = tf.placeholder(tf.float32, shape=(20, 30)) neg_a = tf.neg(a) # test feed_dict = {a: np.random.rand(*tf_to_shape_tuple(a))} self.run(neg_a, tf_feed_dict=feed_dict)
def test_relu(self): # computation a = tf.placeholder(tf.float32, shape=(100, 200)) f = tf.nn.relu(a) # value feed_dict = {a: np.random.randn(*tf_to_shape_tuple(a))} # test self.run(f, tf_feed_dict=feed_dict)
def test_identity(self): # computation a = tf.placeholder(tf.float32, shape=(2, 3)) b = tf.placeholder(tf.float32, shape=(2, 3)) c = tf.identity(a) + b f = tf.identity(c) # value feed_dict = dict() for x in [a, b]: feed_dict[x] = np.random.rand(*tf_to_shape_tuple(x)) # test self.run(f, tf_feed_dict=feed_dict)
def test_sum_and_broadcast(self): # placeholder a = tf.placeholder(tf.float32, shape=[3, 4, 5, 6]) b = tf.placeholder(tf.float32, shape=[3, 4, 5]) a_sum = tf.reduce_sum(a, reduction_indices=[0, 3]) # shape (4, 5) b_sum = tf.reduce_sum(b, reduction_indices=[0, 1]) # shape (5,) f = a_sum + b_sum + b # (4, 5) + (5,) + (3, 4, 5) -> (3, 4, 5) # value feed_dict = dict() for x in [a, b]: feed_dict[x] = np.random.rand(*tf_to_shape_tuple(x)) # test self.run(f, tf_feed_dict=feed_dict)
def test_tanh_sigmoid(self): # computation a = tf.placeholder(tf.float32, shape=(2, 3)) b = tf.placeholder(tf.float32, shape=(3, 4)) c = tf.placeholder(tf.float32, shape=(2, 4)) d = tf.sigmoid(tf.matmul(a, tf.tanh(b))) e = tf.sigmoid(c) f = d + e # value feed_dict = dict() for x in [a, b, c]: feed_dict[x] = np.random.rand(*tf_to_shape_tuple(x)) # test self.run(f, tf_feed_dict=feed_dict)
def test_matmul_transpose(self): # case 1 a = tf.placeholder(tf.float32, shape=(2, 3)) b = tf.placeholder(tf.float32, shape=(3, 4)) a_val = np.random.rand(*tf_to_shape_tuple(a)) b_val = np.random.rand(*tf_to_shape_tuple(b)) self.run(tf.matmul(a, b, transpose_a=False, transpose_b=False), tf_feed_dict={ a: a_val, b: b_val }) # case 2 a = tf.placeholder(tf.float32, shape=(3, 2)) b = tf.placeholder(tf.float32, shape=(3, 4)) a_val = np.random.rand(*tf_to_shape_tuple(a)) b_val = np.random.rand(*tf_to_shape_tuple(b)) self.run(tf.matmul(a, b, transpose_a=True, transpose_b=False), tf_feed_dict={ a: a_val, b: b_val }) # case 3 a = tf.placeholder(tf.float32, shape=(2, 3)) b = tf.placeholder(tf.float32, shape=(4, 3)) a_val = np.random.rand(*tf_to_shape_tuple(a)) b_val = np.random.rand(*tf_to_shape_tuple(b)) self.run(tf.matmul(a, b, transpose_a=False, transpose_b=True), tf_feed_dict={ a: a_val, b: b_val }) # case 4 a = tf.placeholder(tf.float32, shape=(3, 2)) b = tf.placeholder(tf.float32, shape=(4, 3)) a_val = np.random.rand(*tf_to_shape_tuple(a)) b_val = np.random.rand(*tf_to_shape_tuple(b)) self.run(tf.matmul(a, b, transpose_a=True, transpose_b=True), tf_feed_dict={ a: a_val, b: b_val })
def test_shape(self): # shapes to test shapes = [(1, ), (1, 2), (1, 2, 3), (1, 2, 3, 4)] # tf placeholders placeholders = [tf.placeholder(tf.float32, shape=s) for s in shapes] # ranks result_ops = [tf.shape(p) for p in placeholders] # values feed_dict = dict() for x in placeholders: feed_dict[x] = np.random.rand(*tf_to_shape_tuple(x)) # test for op in result_ops: self.run(op, tf_feed_dict=feed_dict)
def test_sum_mean(self): # test cases reduction_indices_list = [None, [], [ 0, ], [0, 1], [1, 2], [0, 1, 2]] # tf placeholder a = tf.placeholder(tf.float32, shape=[3, 4, 5]) # value feed_dict = {a: np.random.rand(*tf_to_shape_tuple(a))} # test for reduction_indices in reduction_indices_list: f = tf.reduce_sum(a, reduction_indices=reduction_indices) self.run(f, tf_feed_dict=feed_dict) g = tf.reduce_mean(a, reduction_indices=reduction_indices) self.run(g, tf_feed_dict=feed_dict)
def test_broadcast_rules(self): # tf have un-implemented broadcasts # for example: (2, 1, 2, 1) + (1, 2, 1, 2) is not implemented in tf # (10, 1, 2, 1, 5) + (11, 1, 1, 5) is not implemented in tf a = tf.placeholder(tf.float32, shape=(5, 1, 1, 4)) b = tf.placeholder(tf.float32, shape=(1, 1, 3, 1, 1)) c = tf.placeholder(tf.float32, shape=(1, 1, 4)) d = tf.placeholder(tf.float32, shape=(4, )) f = a + b * c + d # value feed_dict = dict() for x in [a, b, c, d]: feed_dict[x] = np.random.rand(*tf_to_shape_tuple(x)) # test self.run(f, tf_feed_dict=feed_dict)