Example #1
0
 def testArithmeticRenames(self):
     with self.test_session() as s:
         stuff = tf.split(1, 2, [[1, 2, 3, 4], [4, 5, 6, 7]])
         vals = s.run(stuff)
         self.assertAllEqual(vals, [[[1, 2], [4, 5]], [[3, 4], [6, 7]]])
         self.assertAllEqual(tf.neg(tf.mul(tf.add(1, 2), tf.sub(5, 3))).eval(), -6)
         self.assertAllEqual(s.run(tf.listdiff([1, 2, 3], [3, 3, 4]))[0], [1, 2])
         self.assertAllEqual(tf.list_diff([1, 2, 3], [3, 3, 4])[0].eval(), [1, 2])
         a = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
         foo = np.where(np.less(a, 2), np.negative(a), a)
         self.assertAllEqual(tf.select(tf.less(a, 2), tf.neg(a), a).eval(), foo)
         self.assertAllEqual(tf.complex_abs(tf.constant(3 + 4.0j)).eval(), 5)
    def _testListDiff(self, x, y, out, idx, dtype=np.int32):
        x = np.array(x, dtype=dtype)
        y = np.array(y, dtype=dtype)
        out = np.array(out, dtype=dtype)
        idx = np.array(idx, dtype=dtype)

        with self.test_session() as sess:
            x_tensor = tf.convert_to_tensor(x)
            y_tensor = tf.convert_to_tensor(y)
            out_tensor, idx_tensor = tf.listdiff(x_tensor, y_tensor)
            tf_out, tf_idx = sess.run([out_tensor, idx_tensor])

        self.assertAllEqual(tf_out, out)
        self.assertAllEqual(tf_idx, idx)
        self.assertEqual(1, out_tensor.get_shape().ndims)
        self.assertEqual(1, idx_tensor.get_shape().ndims)
Example #3
0
 def testArithmeticRenames(self):
     with self.cached_session() as s:
         stuff = tf.split(1, 2, [[1, 2, 3, 4], [4, 5, 6, 7]])
         vals = s.run(stuff)
         self.assertAllEqual(vals, [[[1, 2], [4, 5]], [[3, 4], [6, 7]]])
         self.assertAllEqual(
             tf.neg(tf.mul(tf.add(1, 2), tf.sub(5, 3))).eval(), -6)
         self.assertAllEqual(
             s.run(tf.listdiff([1, 2, 3], [3, 3, 4]))[0], [1, 2])
         self.assertAllEqual(
             tf.list_diff([1, 2, 3], [3, 3, 4])[0].eval(), [1, 2])
         a = [[1., 2., 3.], [4., 5., 6.]]
         foo = np.where(np.less(a, 2), np.negative(a), a)
         self.assertAllEqual(
             tf.select(tf.less(a, 2), tf.neg(a), a).eval(), foo)
         self.assertAllEqual(tf.complex_abs(tf.constant(3 + 4.j)).eval(), 5)
  def _testListDiff(self, x, y, out, idx, dtype=np.int32):
    x = np.array(x, dtype=dtype)
    y = np.array(y, dtype=dtype)
    out = np.array(out, dtype=dtype)
    idx = np.array(idx, dtype=dtype)

    with self.test_session() as sess:
      x_tensor = tf.convert_to_tensor(x)
      y_tensor = tf.convert_to_tensor(y)
      out_tensor, idx_tensor = tf.listdiff(x_tensor, y_tensor)
      tf_out, tf_idx = sess.run([out_tensor, idx_tensor])

    self.assertAllEqual(tf_out, out)
    self.assertAllEqual(tf_idx, idx)
    self.assertEqual(1, out_tensor.get_shape().ndims)
    self.assertEqual(1, idx_tensor.get_shape().ndims)
Example #5
0
    def _testListDiff(self, x, y, out, idx):
        for dtype in _TYPES:
            if dtype == tf.string:
                x = [tf.compat.as_bytes(str(a)) for a in x]
                y = [tf.compat.as_bytes(str(a)) for a in y]
                out = [tf.compat.as_bytes(str(a)) for a in out]

            with self.test_session() as sess:
                x_tensor = tf.convert_to_tensor(x, dtype=dtype)
                y_tensor = tf.convert_to_tensor(y, dtype=dtype)
                out_tensor, idx_tensor = tf.listdiff(x_tensor, y_tensor)
                tf_out, tf_idx = sess.run([out_tensor, idx_tensor])

            self.assertAllEqual(tf_out, out)
            self.assertAllEqual(tf_idx, idx)
            self.assertEqual(1, out_tensor.get_shape().ndims)
            self.assertEqual(1, idx_tensor.get_shape().ndims)
Example #6
0
  def _testListDiff(self, x, y, out, idx):
    for dtype in _TYPES:
      if dtype == tf.string:
        x = [tf.compat.as_bytes(str(a)) for a in x]
        y = [tf.compat.as_bytes(str(a)) for a in y]
        out = [tf.compat.as_bytes(str(a)) for a in out]

      with self.test_session() as sess:
        x_tensor = tf.convert_to_tensor(x, dtype=dtype)
        y_tensor = tf.convert_to_tensor(y, dtype=dtype)
        out_tensor, idx_tensor = tf.listdiff(x_tensor, y_tensor)
        tf_out, tf_idx = sess.run([out_tensor, idx_tensor])

      self.assertAllEqual(tf_out, out)
      self.assertAllEqual(tf_idx, idx)
      self.assertEqual(1, out_tensor.get_shape().ndims)
      self.assertEqual(1, idx_tensor.get_shape().ndims)
 def forward(self, phrase_max_size=1, composition_function='RNN', dim=100, batch_size=1, neg=1,
             freq_table=[], init_word_matrix=[], init_context_matrix=[], embedding_train=False):
     if len(init_word_matrix) > 0:
         embed = tf.Variable(initial_value=init_word_matrix, trainable=embedding_train, name='embed')
     else:
         embed = tf.Variable(initial_value=tf.random_uniform([len(freq_table), dim], -0.5 / dim, 0.5 / dim),
                             trainable=embedding_train, name='embed')
     self._embed = embed
     if len(init_context_matrix) > 0:
         context_emb = tf.Variable(initial_value=init_context_matrix, trainable=embedding_train, name='context_emb')
     else:
         context_emb = tf.Variable(initial_value=tf.zeros([len(freq_table), dim]),
                             trainable=embedding_train, name='context_emb')
     self._context_emb = context_emb
     holder, composed = self.construct_composition(phrase_max_size, composition_function, dim, batch_size, embedding_train)
     context_id = tf.placeholder(tf.int32, [batch_size])
     #negative sampling for each example
     sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
         true_classes=tf.reshape(tf.cast(context_id, dtype=tf.int64), [batch_size, 1]),
         num_true=1,
         num_sampled=neg+1,
         unique=True,
         range_max=len(freq_table),
         distortion=0.75,
         unigrams=freq_table))
     exclude_list = tf.cast(tf.constant([self._word_id['</s>']]), dtype=tf.int64) #exclude terminal node from negative sampling result
     sampled_ids, _ = tf.listdiff(sampled_ids, exclude_list)
     true_logit = {}
     negative_logit = {}
     true_context = tf.nn.embedding_lookup(self._context_emb, context_id)
     negative_context = tf.nn.embedding_lookup(self._context_emb, sampled_ids[:neg])
     for length, compose_embed in composed.iteritems():
         #calculate prob for positive example
         true_logit[length] = tf.reduce_sum(tf.mul(compose_embed, true_context), 1)
         negative_logit[length] = tf.matmul(compose_embed, negative_context, transpose_b=True)
     return holder, composed, context_id, true_logit, negative_logit
Example #8
0
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.constant([[2, 5, 3, -5], [0, 3, -2, 5], [4, 3, 5, 3], [6, 1, 4, 0]])
listx = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
listy = tf.constant([4, 5, 8, 9])

boolx = tf.constant([[True, False], [False, True]])

tf.argmin(x, 1).eval()  # Position of the maximum value of columns
tf.argmax(x, 1).eval()  # Position of the minimum value of rows
tf.listdiff(listx, listy)[0].eval()  # List differences
tf.where(boolx).eval()  # Show true values
tf.unique(listx)[0].eval()  # Unique values in list
Example #9
0
 def test_ListDiff(self):
     if td._tf_version[:2] <= (0, 11):
         l = np.random.randint(0, 5, 100)
         t1, t2 = tf.listdiff(l, l[::-2])
         self.check(t1)
         self.check(t2)
Example #10
0
import tensorflow as tf
import numpy as np

# tf.argmin
x = np.random.rand(10, 5, 4)
z_argmin = tf.argmin(x, dimension=0)

# tf.argmax
x = np.random.rand(10, 5, 4)
z_argmax = tf.argmax(x, dimension=1)

# tf.listdiff
x = np.random.randint(0, 10, 100)
y = np.random.randint(0, 10, 10)
z_listdiff = tf.listdiff(x, y)

# tf.where
x = np.random.randint(0, 2, 10 * 5 * 4)
z = np.empty(10 * 5 * 4, dtype=np.bool)
for i, x_ in enumerate(x):
    if x_ > 0:
        z[i] = True
    else:
        z[i] = False
z = z.reshape((10, 5, 4))
z_where = tf.where(z)

# tf.unique
x = np.random.randint(0, 10, 100)
z_unique = tf.unique(x)
Example #11
0
 def test_ListDiff(self):
     l = np.random.randint(0, 5, 100)
     t1, t2 = tf.listdiff(l, l[::-2])
     self.check(t1)
     self.check(t2)
Example #12
0
import tensorflow as tf 
sess = tf.InteractiveSession()
x = tf.constant([[2, 5, 3, -5], 
                 [0, 3,-2,  5], 
                 [4, 3, 5,  3], 
                 [6, 1, 4,  0]]) 
listx = tf.constant([1,2,3,4,5,6,7,8])
listy = tf.constant([4,5,8,9])

boolx = tf.constant([[True,False], [False,True]])

tf.argmin(x, 1).eval() # Position of the maximum value of columns
tf.argmax(x, 1).eval() # Position of the minimum value of rows
tf.listdiff(listx, listy)[0].eval() # List differences
tf.where(boolx).eval() # Show true values
tf.unique(listx)[0].eval() # Unique values in list
Example #13
0
    tf.unsorted_segment_sum(data, segment_ids,
    num_segments, name=None) 	与tf.segment_sum函数类似,
    不同在于segment_ids中id顺序可以是无序的
    tf.sparse_segment_sum(data, indices,
    segment_ids, name=None) 	输入进行稀疏分割求和
    c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
    # Select two rows, one segment.
    tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
    ==> [[0 0 0 0]]
    对原data的indices为[0,1]位置的进行分割,
    并按照segment_ids的分组进行求和

七、序列比较与索引提取(Sequence Comparison and Indexing)
    tf.argmin(input, dimension, name=None) 	返回input最小值的索引index
    tf.argmax(input, dimension, name=None) 	返回input最大值的索引index
    tf.listdiff(x, y, name=None) 	返回x,y中不同值的索引
    tf.where(input, name=None) 	返回bool型tensor中为True的位置
    # ‘input’ tensor is
    #[[True, False]
    #[True, False]]
    # ‘input’ 有两个’True’,那么输出两个坐标值.
    # ‘input’的rank为2, 所以每个坐标为具有两个维度.
    where(input) ==>
    [[0, 0],
    [1, 0]]
    tf.unique(x, name=None) 	返回一个元组tuple(y,idx),y为x的列表的唯一化数据列表,
    idx为x数据对应y元素的index
    # tensor ‘x’ is [1, 1, 2, 4, 4, 4, 7, 8, 8]
    y, idx = unique(x)
    y ==> [1, 2, 4, 7, 8]
    idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]