def compute_message_sent_by_one_layer(hidden, graph, n_dims, reuse=None):
    ''' hidden: bs x n_nodes x n_dims
    '''
    with tf.variable_scope('message', reuse=reuse):
        weight_rel_set = tf.get_variable(
            'weight_rel_set',
            shape=[graph.n_relations, n_dims, n_dims],
            initializer=tf.constant_initializer(
                np.tile(np.identity(n_dims),
                        (graph.n_relations, 1,
                         1))))  # n_relations x n_dims x n_dims
        bias_rel_set = tf.get_variable(
            'bias_rel_set',
            shape=[graph.n_relations, n_dims],
            initializer=tf.zeros_initializer())  # n_relations x n_dims

        weight_comb = tf.sparse_segment_sum(
            weight_rel_set, graph.unfolded_edges('rel_id_list'),
            graph.unfolded_edges('edge_id_list'))  # n_edges x n_dims x n_dims
        bias_comb = tf.sparse_segment_sum(
            bias_rel_set, graph.unfolded_edges('rel_id_list'),
            graph.unfolded_edges('edge_id_list'))  # n_edges x n_dims

        hidden_emb = tf.transpose(hidden, perm=[1, 0,
                                                2])  # n_nodes x bs x n_dims
        hidden_v1 = tf.gather(
            hidden_emb, graph.folded_edges('v1_list'))  # n_edges x bs x n_dims

        message = tf.tanh(
            tf.matmul(hidden_v1, weight_comb) +
            tf.expand_dims(bias_comb, axis=1))  # n_edges x bs x n_dims
        message = tf.transpose(message, perm=[1, 0,
                                              2])  # bs x n_edges x n_dims
        return message
示例#2
0
def main_model(u0, u1, ad0, ad1, label, ids0, ids1, gear_inputs):
  graph0 = tf.sparse_segment_sum(gear_inputs[0], tf.range(tf.shape(gear_inputs[0])[0]), ids0, num_segments=tf.shape(label)[0])
  graph1 = tf.sparse_segment_sum(gear_inputs[1], tf.range(tf.shape(gear_inputs[1])[0]), ids1, num_segments=tf.shape(label)[0])
  input_layer = tf.concat([u0, u1, ad0, ad1, graph0, graph1], -1)
  l1 = fc(input_layer, main_input_length, 100, "mainfc1")
  l2 = fc(l1, 100, 10, "mainfc2")
  l3 = fc(l2, 10, 1, "mainfc3")
  return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=l3, labels=label))
示例#3
0
def main_model(u0, u1, ad0, ad1, label, ids0, ids1, gear_inputs):
    graph0 = tf.sparse_segment_sum(gear_inputs[0],
                                   tf.range(tf.shape(gear_inputs[0])[0]),
                                   ids0,
                                   num_segments=tf.shape(label)[0])
    graph1 = tf.sparse_segment_sum(gear_inputs[1],
                                   tf.range(tf.shape(gear_inputs[1])[0]),
                                   ids1,
                                   num_segments=tf.shape(label)[0])
    input_layer = tf.concat([u0, u1, ad0, ad1, graph0, graph1], -1)
    l1 = fc(input_layer, main_input_length, 100, "mainfc1")
    l2 = fc(l1, 100, 10, "mainfc2")
    l3 = fc(l2, 10, 1, "mainfc3")
    return tf.reduce_sum(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=l3, labels=label))
def compute_edge_attention_by_one_layer(hidden, graph, n_dims, reuse=None):
    ''' hidden: bs x n_nodes x n_dims
    '''
    with tf.variable_scope('edge_attention', reuse=reuse):
        weight_rel_set = tf.get_variable(
            'weight_rel_set',
            shape=[graph.n_relations, n_dims, n_dims],
            initializer=tf.constant_initializer(
                np.tile(np.identity(n_dims),
                        (graph.n_relations, 1,
                         1))))  # n_relations x n_dims x n_dims
        weight_comb = tf.sparse_segment_sum(
            weight_rel_set, graph.unfolded_edges('rel_id_list'),
            graph.unfolded_edges('edge_id_list'))  # n_edges x n_dims x n_dims

        hidden_emb = tf.transpose(hidden, perm=[1, 0,
                                                2])  # n_nodes x bs x n_dims
        hidden_v1 = tf.gather(
            hidden_emb, graph.folded_edges('v1_list'))  # n_edges x bs x n_dims
        hidden_v2 = tf.gather(
            hidden_emb, graph.folded_edges('v2_list'))  # n_edges x bs x n_dims

        edge_attention = tf.nn.softplus(
            tf.reduce_sum(tf.matmul(hidden_v1, weight_comb) * hidden_v2,
                          axis=2))  # n_edges x bs
        edge_attention_sum = tf.segment_sum(
            edge_attention, graph.folded_edges('v1_list'))  # n_nodes x bs
        edge_attention_sum = tf.gather(
            edge_attention_sum, graph.folded_edges('v1_list'))  # n_edges x bs
        edge_attention_nor = tf.transpose(edge_attention /
                                          edge_attention_sum)  # bs x n_edges

        return edge_attention_nor
示例#5
0
def embedding_lookup_sparse_sumexp(params, sp_ids, name=None):
    segment_ids = sp_ids.indices[:, 0]
    if segment_ids.dtype != tf.int32:
        segment_ids = tf.cast(segment_ids, tf.int32)

    ids = sp_ids.values
    ids, idx = tf.unique(ids)

    embeddings = tf.nn.embedding_lookup(params, ids)
    embeddings = tf.exp(embeddings)
    embeddings = tf.sparse_segment_sum(embeddings, idx, segment_ids, name=name)

    return embeddings
示例#6
0
 def test_SparseSegmentSum(self):
     t = tf.sparse_segment_sum(self.random(4, 3, 2), [0, 2, 3], [0, 1, 1])
     self.check(t)
  def AddTraining(self,
                  task_context,
                  batch_size,
                  learning_rate=0.1,
                  decay_steps=4000,
                  momentum=None,
                  corpus_name='documents'):
    with tf.name_scope('training'):
      n = self.training
      n['accumulated_alive_steps'] = self._AddVariable(
          [batch_size], tf.int32, 'accumulated_alive_steps',
          tf.zeros_initializer())
      n.update(self._AddBeamReader(task_context, batch_size, corpus_name))
      # This adds a required 'step' node too:
      learning_rate = tf.constant(learning_rate, dtype=tf.float32)
      n['learning_rate'] = self._AddLearningRate(learning_rate, decay_steps)
      # Call BuildNetwork *only* to set up the params outside of the main loop.
      self._BuildNetwork(list(n['features']))

      n.update(self._BuildSequence(batch_size, self._max_steps, n['features'],
                                   n['state']))

      flat_concat_scores = tf.reshape(n['concat_scores'], [-1])
      (indices_and_paths, beams_and_slots, n['gold_slot'], n[
          'beam_path_scores']) = gen_parser_ops.beam_parser_output(n[
              'state'])
      n['indices'] = tf.reshape(tf.gather(indices_and_paths, [0]), [-1])
      n['path_ids'] = tf.reshape(tf.gather(indices_and_paths, [1]), [-1])
      n['all_path_scores'] = tf.sparse_segment_sum(
          flat_concat_scores, n['indices'], n['path_ids'])
      n['beam_ids'] = tf.reshape(tf.gather(beams_and_slots, [0]), [-1])
      n.update(AddCrossEntropy(batch_size, n))

      if self._only_train:
        trainable_params = {k: v for k, v in self.params.iteritems()
                            if k in self._only_train}
      else:
        trainable_params = self.params
      for p in trainable_params:
        tf.logging.info('trainable_param: %s', p)

      regularized_params = [
          tf.nn.l2_loss(p) for k, p in trainable_params.iteritems()
          if k.startswith('weights') or k.startswith('bias')]
      l2_loss = 1e-4 * tf.add_n(regularized_params) if regularized_params else 0

      n['cost'] = tf.add(n['cross_entropy'], l2_loss, name='cost')

      n['gradients'] = tf.gradients(n['cost'], trainable_params.values())

      with tf.control_dependencies([n['alive_steps']]):
        update_accumulators = tf.group(
            tf.assign_add(n['accumulated_alive_steps'], n['alive_steps']))

      def ResetAccumulators():
        return tf.assign(
            n['accumulated_alive_steps'], tf.zeros([batch_size], tf.int32))
      n['reset_accumulators_func'] = ResetAccumulators

      optimizer = tf.train.MomentumOptimizer(n['learning_rate'],
                                             momentum,
                                             use_locking=self._use_locking)
      train_op = optimizer.minimize(n['cost'],
                                    var_list=trainable_params.values())
      for param in trainable_params.values():
        slot = optimizer.get_slot(param, 'momentum')
        self.inits[slot.name] = state_ops.init_variable(slot,
                                                        tf.zeros_initializer())
        self.variables[slot.name] = slot

      def NumericalChecks():
        return tf.group(*[
            tf.check_numerics(param, message='Parameter is not finite.')
            for param in trainable_params.values()
            if param.dtype.base_dtype in [tf.float32, tf.float64]])
      check_op = cf.cond(tf.equal(tf.mod(self.GetStep(), self._check_every), 0),
                         NumericalChecks, tf.no_op)
      avg_update_op = tf.group(*self._averaging.values())
      train_ops = [train_op]
      if self._check_parameters:
        train_ops.append(check_op)
      if self._use_averaging:
        train_ops.append(avg_update_op)
      with tf.control_dependencies([update_accumulators]):
        n['train_op'] = tf.group(*train_ops, name='train_op')
      n['alive_steps'] = tf.identity(n['alive_steps'], name='alive_steps')
    return n
import tensorflow as tf
import numpy as np

input_a = np.array([[1, 1, 2], [2, 3, 4], [3, 1, 1], [2, 4, 6]])
a_seg_sum = tf.segment_sum(data=input_a, segment_ids=[0, 1, 1, 1])
a_seg_prod = tf.segment_prod(data=input_a, segment_ids=[0, 0, 1, 1])
a_seg_max = tf.segment_max(data=input_a, segment_ids=[0, 0, 0, 1])
a_seg_min = tf.segment_min(data=input_a, segment_ids=[1, 1, 1, 1])
a_seg_mean = tf.segment_mean(data=input_a, segment_ids=[0, 0, 0, 1])
a_seg_sum_num = tf.unsorted_segment_sum(data=input_a,
                                        segment_ids=[0, 1, 1, 0],
                                        num_segments=2)
a_sparse_seg_sum = tf.sparse_segment_sum(data=input_a,
                                         indices=[0, 1, 2],
                                         segment_ids=[0, 0, 1])

with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    print(sess.run(a_seg_sum), '\n', sess.run(a_seg_prod), '\n',
          sess.run(a_seg_max), '\n', sess.run(a_seg_min))
    print(sess.run(a_seg_mean), '\n', sess.run(a_seg_sum_num), '\n',
          sess.run(a_sparse_seg_sum))
import tensorflow as tf
"""tf.sparse_segment_sum(data, indices, segment_ids, name=None)
功能:tensor进行拆分后求和。和segment_sum类似,只是segment_ids的rank数可以小于‘data’第0维度数。
输入:indices:选择第0维度参与运算的编号。"""

a = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
z = tf.sparse_segment_sum(a, tf.constant([0, 1]), tf.constant([0, 0]))
# 选择前两行,并且计算前两行的和
z2 = tf.sparse_segment_sum(a, tf.constant([0, 1]), tf.constant([0, 1]))
# 选择前两行,但是只计算第0行的和,第一行保留原样
z3 = tf.sparse_segment_sum(a, tf.constant([0, 2]), tf.constant([0, 1]))
# 选择第0行和第2行但是只计算第0行,第2行保留原样
z4 = tf.sparse_segment_sum(a, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# 选择三行,但是只是计算0行和1行,第2行保留原样
sess = tf.Session()
print(sess.run(z))
print(sess.run(z2))
print(sess.run(z3))
print(sess.run(z4))
sess.close()

# z==>[[6 8 10 12]]
# z2==>[[1 2 3 4]
#       [5 6 7 8]]
# z3==>[[1 2 3 4]
#       [9 10 11 12]]
# z4==>[[6 8 10 12]
#       [9 10 11 12]]
示例#10
0
    def AddTraining(self,
                    task_context,
                    batch_size,
                    learning_rate=0.1,
                    decay_steps=4000,
                    momentum=None,
                    corpus_name='documents'):
        with tf.name_scope('training'):
            n = self.training
            n['accumulated_alive_steps'] = self._AddVariable(
                [batch_size], tf.int32, 'accumulated_alive_steps',
                tf.zeros_initializer())
            n.update(self._AddBeamReader(task_context, batch_size,
                                         corpus_name))
            # This adds a required 'step' node too:
            learning_rate = tf.constant(learning_rate, dtype=tf.float32)
            n['learning_rate'] = self._AddLearningRate(learning_rate,
                                                       decay_steps)
            # Call BuildNetwork *only* to set up the params outside of the main loop.
            self._BuildNetwork(list(n['features']))

            n.update(
                self._BuildSequence(batch_size, self._max_steps, n['features'],
                                    n['state']))

            flat_concat_scores = tf.reshape(n['concat_scores'], [-1])
            (indices_and_paths, beams_and_slots, n['gold_slot'],
             n['beam_path_scores']) = gen_parser_ops.beam_parser_output(
                 n['state'])
            n['indices'] = tf.reshape(tf.gather(indices_and_paths, [0]), [-1])
            n['path_ids'] = tf.reshape(tf.gather(indices_and_paths, [1]), [-1])
            n['all_path_scores'] = tf.sparse_segment_sum(
                flat_concat_scores, n['indices'], n['path_ids'])
            n['beam_ids'] = tf.reshape(tf.gather(beams_and_slots, [0]), [-1])
            n.update(AddCrossEntropy(batch_size, n))

            if self._only_train:
                trainable_params = {
                    k: v
                    for k, v in self.params.iteritems()
                    if k in self._only_train
                }
            else:
                trainable_params = self.params
            for p in trainable_params:
                tf.logging.info('trainable_param: %s', p)

            regularized_params = [
                tf.nn.l2_loss(p) for k, p in trainable_params.iteritems()
                if k.startswith('weights') or k.startswith('bias')
            ]
            l2_loss = 1e-4 * tf.add_n(
                regularized_params) if regularized_params else 0

            n['cost'] = tf.add(n['cross_entropy'], l2_loss, name='cost')

            n['gradients'] = tf.gradients(n['cost'], trainable_params.values())

            with tf.control_dependencies([n['alive_steps']]):
                update_accumulators = tf.group(
                    tf.assign_add(n['accumulated_alive_steps'],
                                  n['alive_steps']))

            def ResetAccumulators():
                return tf.assign(n['accumulated_alive_steps'],
                                 tf.zeros([batch_size], tf.int32))

            n['reset_accumulators_func'] = ResetAccumulators

            optimizer = tf.train.MomentumOptimizer(
                n['learning_rate'], momentum, use_locking=self._use_locking)
            train_op = optimizer.minimize(n['cost'],
                                          var_list=trainable_params.values())
            for param in trainable_params.values():
                slot = optimizer.get_slot(param, 'momentum')
                self.inits[slot.name] = state_ops.init_variable(
                    slot, tf.zeros_initializer())
                self.variables[slot.name] = slot

            def NumericalChecks():
                return tf.group(*[
                    tf.check_numerics(param,
                                      message='Parameter is not finite.')
                    for param in trainable_params.values()
                    if param.dtype.base_dtype in [tf.float32, tf.float64]
                ])

            check_op = cf.cond(
                tf.equal(tf.mod(self.GetStep(), self._check_every), 0),
                NumericalChecks, tf.no_op)
            avg_update_op = tf.group(*self._averaging.values())
            train_ops = [train_op]
            if self._check_parameters:
                train_ops.append(check_op)
            if self._use_averaging:
                train_ops.append(avg_update_op)
            with tf.control_dependencies([update_accumulators]):
                n['train_op'] = tf.group(*train_ops, name='train_op')
            n['alive_steps'] = tf.identity(n['alive_steps'],
                                           name='alive_steps')
        return n
示例#11
0
文件: ops.py 项目: kestrelm/tfdeploy
 def test_SparseSegmentSum(self):
     t = tf.sparse_segment_sum(self.random(4, 3, 2), [0, 2, 3], [0, 1, 1])
     self.check(t)
示例#12
0
# tf.segment_mean
x = np.random.rand(10, 4, 3)
segment_ids = [0, 0, 0, 1, 2, 2, 3, 4, 5, 5]
z_segment_mean = tf.segment_mean(x, segment_ids)

# tf.unsorted_segment_sum
x = np.random.rand(10, 4, 3)
segment_ids = [5, 0, 0, 2, 1, 2, 3, 4, 0, 5]
num_segments = np.max(segment_ids) + 1
z_unsorted_segment_sum = tf.unsorted_segment_sum(x, segment_ids, num_segments)

# tf.sparse_segment_sum
x = np.random.rand(10, 4, 3)
indices = [0, 2, 3, 4]
segment_ids = [0, 0, 1, 1]
z_sparse_segment_sum = tf.sparse_segment_sum(x, indices, segment_ids)

# tf.sparse_segment_mean
x = np.random.rand(10, 4, 3)
indices = [0, 2, 3, 4]
segment_ids = [0, 1, 1, 1]
z_sparse_segment_mean = tf.sparse_segment_mean(x, indices, segment_ids)

with tf.Session() as sess:
    print "tf.segment_sum"
    print sess.run(z_segment_sum)

    print "tf.segment_prod"
    print sess.run(z_segment_prod)

    print "tf.segment_min"
示例#13
0
    其中id为int型数据,最大id不大于size
    c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
    tf.segment_sum(c, tf.constant([0, 0, 1]))
    ==>[[0 0 0 0]
    [5 6 7 8]]
    上面例子分为[0,1]两id,对相同id的data相应数据进行求和,
    并放入结果的相应id中,
    且segment_ids只升不降
    tf.segment_prod(data, segment_ids, name=None) 	根据segment_ids的分段计算各个片段的积
    tf.segment_min(data, segment_ids, name=None) 	根据segment_ids的分段计算各个片段的最小值
    tf.segment_max(data, segment_ids, name=None) 	根据segment_ids的分段计算各个片段的最大值
    tf.segment_mean(data, segment_ids, name=None) 	根据segment_ids的分段计算各个片段的平均值
    tf.unsorted_segment_sum(data, segment_ids,
    num_segments, name=None) 	与tf.segment_sum函数类似,
    不同在于segment_ids中id顺序可以是无序的
    tf.sparse_segment_sum(data, indices,
    segment_ids, name=None) 	输入进行稀疏分割求和
    c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
    # Select two rows, one segment.
    tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
    ==> [[0 0 0 0]]
    对原data的indices为[0,1]位置的进行分割,
    并按照segment_ids的分组进行求和

七、序列比较与索引提取(Sequence Comparison and Indexing)
    tf.argmin(input, dimension, name=None) 	返回input最小值的索引index
    tf.argmax(input, dimension, name=None) 	返回input最大值的索引index
    tf.listdiff(x, y, name=None) 	返回x,y中不同值的索引
    tf.where(input, name=None) 	返回bool型tensor中为True的位置
    # ‘input’ tensor is
    #[[True, False]
    #[True, False]]
示例#14
0
import tensorflow as tf


with tf.Session():
	c = tf.constant([[1.,2.,3.,4.],[-1.,-2.,-3.,-5.],[5.,6.,7.,8.]])
	print tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])).eval()