Пример #1
0
 def testSegmentMinGradient(self):
   data = tf.constant([1.0, 2.0, 3.0], dtype=tf.float32)
   segment_ids = tf.constant([0, 0, 1], dtype=tf.int64)
   segment_min = tf.segment_min(data, segment_ids)
   with self.test_session():
     error = tf.test.compute_gradient_error(data, [3], segment_min, [2])
     self.assertLess(error, 1e-4)
Пример #2
0
 def testSegmentMinGradient(self):
     data = tf.constant([1.0, 2.0, 3.0], dtype=tf.float32)
     segment_ids = tf.constant([0, 0, 1], dtype=tf.int64)
     segment_min = tf.segment_min(data, segment_ids)
     with self.test_session():
         error = tf.test.compute_gradient_error(data, [3], segment_min, [2])
         self.assertLess(error, 1e-4)
Пример #3
0
def matrix_segment():
    """
    :return:
    """
    isses = tf.InteractiveSession()
    # 对角值
    X = tf.constant([5., 1., 7., 2., 3., 4., 1., 3.], dtype=tf.float32)
    s_id = [0, 0, 0, 1, 2, 2, 3, 3]

    logger.info("X\n%s" % X)
    logger.info("s_id\n%s" % s_id)
    logger.info("tf.segment_sum(X,s_id)\n {0}".format(tf.segment_sum(X, s_id)))
    logger.info("tf.segment_mean(X,s_id)\n {0}".format(
        tf.segment_mean(X, s_id).eval()))
    logger.info("tf.segment_max(X, s_id)\n {0}".format(
        tf.segment_max(X, s_id).eval()))
    logger.info("tf.segment_min(X, s_id)\n {0}".format(
        tf.segment_min(X, s_id).eval()))
    logger.info("tf.segment_prod(X, s_id)\n {0}".format(
        tf.segment_prod(X, s_id).eval()))
    logger.info("tf.unsorted_segment_sum(X, s_id)\n {0}".format(
        tf.unsorted_segment_sum(X, s_id, 2)))

    # c = tf.constant([0., 1.], dtype=tf.float32)
    # logger.info("tf.sparse_segment_sum(X, s_id)\n {0}".format(tf.sparse_segment_sum(X, c, s_id)))
    # logger.info("tf.sparse_segment_mean(X, s_id)\n {0}".format(tf.sparse_segment_mean(X, c, s_id)))
    # logger.info("tf.sparse_segment_sqrt_n(X, s_id)\n {0}".format(tf.sparse_segment_sqrt_n(X, c, s_id)))
    isses.close()
Пример #4
0
        def do_segments(values):
            """Actually does segmentation.

      Args:
        values: 1D tensor of any type. Non-empty.

      Returns:
        run_centers: int32 tensor
        run_lengths: int32 tensor

      Raises:
        ValueError: if mode is not recognized.
      """
            length = tf.shape(values)[0]
            values = tf.convert_to_tensor(values)
            # The first run has id 0, so we don't increment the id.
            # Otherwise, the id is incremented when the value changes.
            run_start_bool = tf.concat(
                [[False], tf.not_equal(values[1:], values[:-1])], axis=0)
            # Cumulative sum the run starts to get the run ids.
            segment_ids = tf.cumsum(tf.cast(run_start_bool, tf.int32))
            if mode is SegmentsMode.STARTS:
                run_centers = tf.segment_min(tf.range(length), segment_ids)
            elif mode is SegmentsMode.CENTERS:
                run_centers = tf.segment_mean(
                    tf.cast(tf.range(length), tf.float32), segment_ids)
                run_centers = tf.cast(tf.floor(run_centers), tf.int32)
            else:
                raise ValueError("Unexpected mode: %s" % mode)
            run_lengths = tf.segment_sum(tf.ones([length], tf.int32),
                                         segment_ids)
            return run_centers, run_lengths
Пример #5
0
    def reduce_per_answer_loss(self, loss):

        # Get any of the alternatives right
        loss = tf.segment_min(loss, self.answer_partition)
        # Get all of the answers right
        loss = tf.segment_mean(loss, self.answer_question_partition)
        return tf.reduce_mean(loss)
Пример #6
0
def get_feature_with_length(features, name):
    """Reads out embeddings and sequence lengths for a symbol modality 
  from the features.

  Args:
    features (dict): Dictionary with features.
    name (string): Feature to extract (will read features[name] and maybe
                    features[name_raw]

  Returns:
    Pair of (embed, length) tensors, where `embed` is a (batch_size,
    max_len, embed_size) float32 tensor with embeddings, and `length`
    is a (batch_size,) int32 tensor with sequence lengths.
  """
    # features[name] shape: (batch_size, max_len, 1, embed_size)
    embed = common_layers.flatten4d3d(features[name])
    # embed shape: (batch_size, max_len, embed_size)
    if "%s_raw" % name in features:
        raw = tf.squeeze(features["%s_raw" % name], axis=[2, 3])
        not_padding = tf.not_equal(raw, text_encoder.PAD_ID)
    else:
        tf.logging.warn(
            "Feature %s is not exposed by T2T in raw form which makes it difficult "
            "to extract sequence lengths. Consider using the T2T fork at "
            "https://github.com/fstahlberg/tensor2tensor. For now we back off to a "
            "more ineffective way to get sequence lengths to maintain compatibility "
            "with the T2T master fork.", name)
        not_padding = tf.greater(tf.reduce_sum(tf.abs(embed), axis=2),
                                 0.000001)
    not_padding_with_guardian = tf.pad(not_padding, [[0, 0], [0, 1]])
    indices = tf.where(tf.logical_not(not_padding_with_guardian))
    length = tf.segment_min(indices[:, 1], indices[:, 0])
    return embed, tf.cast(length, tf.int32)
Пример #7
0
def get_len(sen, eos):
    '''
        get the length of each sample
    '''
    indices = tf.where(tf.equal(sen, eos))
    result = tf.segment_min(indices[:, 1], indices[:, 0])
    return result
Пример #8
0
 def testSegmentMinGradientWithTies(self):
   inputs = tf.constant([1.0], dtype=tf.float32)
   data = tf.concat_v2([inputs, inputs], 0)
   segment_ids = tf.constant([0, 0], dtype=tf.int64)
   segment_min = tf.segment_min(data, segment_ids)
   with self.test_session():
     error = tf.test.compute_gradient_error(inputs, [1], segment_min, [1])
     self.assertLess(error, 1e-4)
Пример #9
0
 def testSegmentMinGradientWithTies(self):
   inputs = tf.constant([1.0], dtype=tf.float32)
   data = tf.concat(0, [inputs, inputs])
   segment_ids = tf.constant([0, 0], dtype=tf.int64)
   segment_min = tf.segment_min(data, segment_ids)
   with self.test_session():
     error = tf.test.compute_gradient_error(inputs, [1], segment_min, [1])
     self.assertLess(error, 1e-4)
Пример #10
0
def get_first_occurrence_indices(reference, symbol, optimize_for_tpu=False):
    """For each row in reference, get index after the first occurrence of symbol.

  If symbol is not present on a row, return reference.shape[1] instead.

  Args:
    reference: [B, T] tensor of elements of the same type as symbol.
    symbol: int or [] scalar tensor of the same dtype as symbol.
    optimize_for_tpu: bool, whether to use a TPU-capable variant.

  Returns:
    A [B] reference of tf.int32 where x[i] is such that
    reference[i, x[i]-1] == symbol, and reference[i, j] != symbol
    for j<i-1. If symbol is not present on row i then x[i] = T.
  """
    if optimize_for_tpu:
        # Run code which can be compiled on TPU.
        # Transpose refernce to [T, B]
        reference = tf.transpose(reference, [1, 0])
        range_tensor = tf.range(reference.shape.as_list()[0])
        indexes = tf.stack([range_tensor] * reference.shape.as_list()[1], 1)
        symbol = tf.stack([symbol] * reference.shape.as_list()[1], 0)

        initial_indices = tf.constant(reference.shape.as_list()[0],
                                      shape=[reference.shape.as_list()[1]],
                                      dtype=tf.int32)

        # We want a function which moves backwards.
        def fn(current_index, elems):
            ref, ind = elems
            return tf.where(tf.equal(ref, symbol), ind + 1, current_index)

        min_indexes = tf.scan(fn, (reference, indexes),
                              initializer=initial_indices,
                              parallel_iterations=1,
                              reverse=True)
        return min_indexes[0]

    batch_size, max_length = reference.get_shape().as_list()
    symbol = tf.convert_to_tensor(symbol)
    symbol.shape.assert_is_compatible_with([])
    # Add symbol at the end of each row, to make sure tf.where works.
    tensor = tf.concat(
        [reference, tf.tile(symbol[None, None], [batch_size, 1])], axis=1)
    index_all_occurrences = tf.where(tf.equal(tensor, symbol))
    index_all_occurrences = tf.cast(index_all_occurrences, tf.int32)
    # `index_all_occurrences` is a [N, 2] tensor with coordinates of all positions
    # of `symbol` in `tensor`. So N will be >= batch size since there can be
    # several `symbol` in one row of tensor. We need to take only the position
    # of the first occurrence for each row. `segment_min` does that, taking the
    # lowest column index for each row index.
    index_first_occurrences = tf.segment_min(index_all_occurrences[:, 1],
                                             index_all_occurrences[:, 0])
    index_first_occurrences.set_shape([batch_size])
    index_first_occurrences = tf.minimum(index_first_occurrences + 1,
                                         max_length)
    return index_first_occurrences
Пример #11
0
    def _build_graph(self):
        # Let
        # N = batch size,
        # K = embedding size,
        # W = number of negative samples per a user-positive-item pair

        # user embedding (N, K)
        users = tf.nn.embedding_lookup(self.user_embeddings, self.user_positive_items_pairs[:, 0], name="users")
        user_reg = tf.reduce_sum(tf.square(users), 1, name="user_reg")

        # positive item embedding (N, K)
        pos_items = tf.nn.embedding_lookup(self.item_embeddings, self.user_positive_items_pairs[:, 1], name="pos_items")
        pos_reg = tf.reduce_sum(tf.square(pos_items), 1, name="pos_reg")
        pos_bias = tf.squeeze(tf.nn.embedding_lookup(self.item_bias, self.user_positive_items_pairs[:, 1], name="pos_bias"))
        # positive item to user distance (N)
        pos_distances = tf.reduce_sum(tf.multiply(users, pos_items), 1 ) + pos_bias

        # negative item embedding (N, K, W)
        neg_items = tf.transpose(tf.nn.embedding_lookup(self.item_embeddings, self.negative_samples), (0, 2, 1), name="neg_items")
        neg_reg = tf.reduce_sum(tf.square(neg_items), 1, name="neg_reg")
        neg_bias = tf.squeeze(tf.nn.embedding_lookup(self.item_bias, self.negative_samples), name="neg_bias")
        # distance to negative items (N x W)
        distance_to_neg_items = tf.reduce_sum(tf.multiply(tf.expand_dims(users, -1), neg_items), 1) + neg_bias

        impostors = tf.multiply(self.negative_flags, (tf.expand_dims(-pos_distances, -1) + distance_to_neg_items + self.margin))
        indexes = tf.where(tf.greater(impostors, 0))

        self.impostor_num = tf.shape(indexes)[0]
        self.impostor_log = tf.nn.moments(impostors, axes=[0, 1])

        x_min_y = tf.segment_min(indexes[:, 1], indexes[:, 0])
        uni_x, _ = tf.unique(indexes[:,0])
        uni_y = tf.nn.embedding_lookup(x_min_y, uni_x)
        xy = tf.concat([tf.expand_dims(uni_x, -1), tf.expand_dims(uni_y, -1)], 1)

        impostor_xy = tf.gather_nd(impostors, xy)
        rank = tf.log((self.n_items - 1) / tf.cast(uni_y + 1, tf.float32))

        self.eloss = tf.reduce_sum(tf.clip_by_value(rank * impostor_xy, 0, 10))
        self.rloss = tf.reduce_sum(self.alpha * (tf.gather_nd(neg_reg, xy) + tf.nn.embedding_lookup(pos_reg, uni_x) + tf.nn.embedding_lookup(user_reg, uni_x)))

        self.loss = (self.eloss + self.rloss) / tf.cast(tf.shape(self.user_positive_items_pairs)[0], tf.float32)
        self.optimize = tf.train.AdamOptimizer(self.master_learning_rate).minimize(self.loss, var_list=[self.item_bias, self.item_embeddings, self.user_embeddings])

        # (N_USER_IDS, 1, K)
        user = tf.expand_dims(tf.nn.embedding_lookup(self.user_embeddings, self.score_user_ids), 1)
        # (1, N_ITEM, K)
        item = tf.expand_dims(self.item_embeddings, 0)
        self.item_scores = tf.reduce_sum(tf.multiply(user, item), 2) + tf.squeeze(self.item_bias)

        self.topk = tf.nn.top_k(self.item_scores, self.n_items)
Пример #12
0
    def _comp_f(self):
        """
        Encodes all queries (including supporting queries)
        :return: encoded queries
        """
        with tf.device("/cpu:0"):
            max_length = tf.cast(tf.reduce_max(self._length), tf.int32)
            context_t = tf.transpose(self._context)
            context_t = tf.slice(context_t, [0, 0], tf.pack([max_length, -1]))
            embedded = tf.nn.embedding_lookup(self.input_embedding, context_t)
            embedded = tf.nn.dropout(embedded, self.keep_prob)
            batch_size = tf.shape(self._context)[0]
            batch_size_32 = tf.reshape(batch_size, [1])
            batch_size_64 = tf.cast(batch_size, tf.int64)

        with tf.device(self._device1):
            #use other device for backward rnn
            with tf.variable_scope("backward"):
                min_end = tf.segment_min(self._ends, self._span_context)
                init_state = tf.get_variable("init_state", [self._size], initializer=self._init)
                init_state = tf.reshape(tf.tile(init_state, batch_size_32), [-1, self._size])
                rev_embedded = tf.reverse_sequence(embedded, self._length, 0, 1)
                # TIME-MAJOR: [T, B, S]
                outs_bw = self._composition_function(rev_embedded, self._length - min_end, init_state)
                # reshape to all possible queries for all sequences. Dim[0]=batch_size*(max_length+1).
                # "+1" because we include the initial state
                outs_bw = tf.reshape(tf.concat(0, [tf.expand_dims(init_state, 0), outs_bw]), [-1, self._size])
                # gather respective queries via their lengths-start (because reversed sequence)
                lengths_aligned = tf.gather(self._length, self._span_context)
                out_bw = tf.gather(outs_bw, (lengths_aligned - self._ends) * batch_size_64 + self._span_context)

        with tf.device(self._device2):
            with tf.variable_scope("forward"):
                #e_inputs = [tf.reshape(e, [-1, self._size]) for e in tf.split(1, self._max_length, embedded)]
                max_start = tf.segment_max(self._starts, self._span_context)
                init_state = tf.get_variable("init_state", [self._size], initializer=self._init)
                init_state = tf.reshape(tf.tile(init_state, batch_size_32), [-1, self._size])
                # TIME-MAJOR: [T, B, S]
                outs_fw = self._composition_function(embedded, max_start, init_state)
                # reshape to all possible queries for all sequences. Dim[0]=batch_size*(max_length+1).
                # "+1" because we include the initial state
                outs_fw = tf.reshape(tf.concat(0, [tf.expand_dims(init_state, 0), outs_fw]), [-1, self._size])
                # gather respective queries via their positions (with offset of batch_size*ends)
                out_fw = tf.gather(outs_fw, self._starts * batch_size_64 + self._span_context)
            # form query from forward and backward compositions
            query = tf.contrib.layers.fully_connected(tf.concat(1, [out_fw, out_bw]), self._size,
                                                      activation_fn=None, weights_initializer=None, biases_initializer=None)
            query = tf.add_n([query, out_bw, out_fw])

        return query
Пример #13
0
def get_eval_user_model(user_model, indices):
    batch_idx = tf.reshape(
        tf.slice(indices, begin=[0, 0], size=[tf.shape(indices)[0], 1]), [-1])
    step_idx = tf.reshape(
        tf.slice(indices, begin=[0, 1], size=[tf.shape(indices)[0], 1]), [-1])
    uniq_batch_idx, ori_batch_idx = tf.unique(batch_idx)
    min_step_idx = tf.segment_min(step_idx, batch_idx)
    min_step_idx = tf.gather(min_step_idx, uniq_batch_idx)
    used_indices = tf.concat([
        tf.expand_dims(uniq_batch_idx, 1),
        tf.expand_dims(min_step_idx, 1),
    ], 1)
    used_model = tf.gather_nd(user_model, used_indices)
    return used_model, uniq_batch_idx, ori_batch_idx, step_idx
Пример #14
0
def test_segment():
    seg_ids = tf.constant([0, 1, 1, 2, 2])
    x = tf.constant([[2, 5, 3, -5], [0, 3, -2, 5], [4, 3, 5, 3], [6, 1, 4, 0],
                     [6, 1, 4, 0]])
    with tf.Session() as sess:
        # 按seg_ids进行加法
        print(tf.segment_sum(x, seg_ids).eval())
        # 按seg_ids进行乘法
        print(tf.segment_prod(x, seg_ids).eval())
        # 按seg_ids进行min运算
        print(tf.segment_min(x, seg_ids).eval())
        # 按seg_ids进行max运算
        print(tf.segment_max(x, seg_ids).eval())
        # 按seg_ids进行mean运算
        print(tf.segment_mean(x, seg_ids).eval())
Пример #15
0
def _form_triplet(tensors):
    """Returns triplet indices [ij, jk], where r_ij, r_jk < r_c"""
    p_iind = tensors['ind_2'][:, 0]
    n_atoms = tf.shape(tensors['ind_1'])[0]
    n_pairs = tf.shape(tensors['ind_2'])[0]
    p_aind = tf.cumsum(tf.ones(n_pairs, tf.int32))
    p_rind = p_aind - tf.gather(tf.segment_min(p_aind, p_iind), p_iind)
    t_dense = tf.scatter_nd(tf.stack([p_iind, p_rind], axis=1), p_aind,
                            [n_atoms, tf.reduce_max(p_rind) + 1])
    t_dense = tf.gather(t_dense, p_iind)
    t_index = tf.cast(tf.where(t_dense), tf.int32)
    t_ijind = t_index[:, 0]
    t_ikind = tf.gather_nd(t_dense, t_index) - 1
    t_ind = tf.gather_nd(tf.stack([t_ijind, t_ikind], axis=1),
                         tf.where(tf.not_equal(t_ijind, t_ikind)))
    return t_ind
def xqa_min_crossentropy_span_loss(candidate_scores, span_candidates, answer_span, answer_to_question):
    """
    very common XQA loss function when predicting for entire spans
    """
    # align total spans and scores with correct answer spans
    span_candidates = tf.gather(span_candidates, answer_to_question)
    candidate_scores = tf.gather(candidate_scores, answer_to_question)

    # tile correct answers to num spans to find matching span
    answer_span_tiled = tf.expand_dims(answer_span, 1)

    span_labels = tf.cast(tf.reduce_all(tf.equal(answer_span_tiled, span_candidates), 2), tf.float32)

    loss = tf.nn.softmax_cross_entropy_with_logits(logits=candidate_scores, labels=span_labels)
    loss = tf.segment_min(loss, answer_to_question)
    return [tf.reduce_mean(loss)]
def xqa_min_crossentropy_loss(start_scores, end_scores, answer_span, answer_to_question):
    """
    very common XQA loss function
    """
    start, end = [tf.squeeze(t, 1) for t in tf.split(answer_span, 2, 1)]

    batch_size1 = tf.shape(start)[0]
    batch_size2 = tf.unstack(tf.shape(start_scores))[0]
    is_aligned = tf.equal(batch_size1, batch_size2)

    start_scores = tf.cond(is_aligned, lambda: start_scores, lambda: tf.gather(start_scores, answer_to_question))
    end_scores = tf.cond(is_aligned, lambda: end_scores, lambda: tf.gather(end_scores, answer_to_question))
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=start_scores,
            labels=start) + \
           tf.nn.sparse_softmax_cross_entropy_with_logits(logits=end_scores, labels=end)
    loss = tf.segment_min(loss, answer_to_question)
    return [tf.reduce_mean(loss)]
Пример #18
0
 def one_bp_iteration_network(self, xe_c2v_pre_iter, xe_0, alpha, beta):
     xe_c_sumv = self.staircase_quantizer(
         tf.add(tf.matmul(self.H_x_to_xe0, xe_0),
                alpha * tf.matmul(self.H_sumV_to_C, xe_c2v_pre_iter)), beta)
     xe_sign = tf.to_float(tf.sign(xe_c_sumv))
     xe_sum_log_img = tf.matmul(
         self.H_sumC_to_V,
         tf.multiply(tf.truediv((1 - xe_sign), [2.0]), [3.1415926]))
     xe_sum_log_complex = tf.complex(
         tf.zeros([self.num_all_edges, self.batch_size]), xe_sum_log_img)
     xe_product = tf.real(tf.exp(xe_sum_log_complex))
     xe_product_temp = tf.multiply(tf.sign(xe_product), -2e-7)
     xe_pd_modified = tf.add(xe_product, xe_product_temp)
     xe_v_minc = tf.segment_min(
         tf.abs(tf.gather(xe_c_sumv, self.h_minC_to_V)),
         self.h_min_segmentIDs)
     xe_v_sumc = tf.multiply(xe_pd_modified, xe_v_minc)
     return xe_v_sumc
Пример #19
0
def get_sentence_length(raw_x):
    """Extracts sequence lengths from the raw feature.
  Example:
    raw_x = [
      [123, 3, 2, 0, 0],
      [321, 1, 0, 0, 0]]
    return:
      [3, 2]
  Args:
    raw_x: A [batch_size, max_length] int32 tensor
  
  Returns:
    A [batch_size] int32 tensor with sequence lengths
  """
    not_padding = tf.not_equal(raw_x, PAD_ID)
    not_padding_with_guardian = tf.pad(not_padding, [[0, 0], [0, 1]])
    indices = tf.where(tf.logical_not(not_padding_with_guardian))
    length = tf.segment_min(indices[:, 1], indices[:, 0])
    return length
Пример #20
0
def dense_to_sparse(tensor, eos_id, merge_repeated=True):
    if merge_repeated:
        added_values = tf.cast(tf.fill((tf.shape(tensor)[0], 1), eos_id), tensor.dtype)

        # merge consecutive values
        concat_tensor = tf.concat((tensor, added_values), axis=-1)
        diff = tf.cast(concat_tensor[:, 1:] - concat_tensor[:, :-1], tf.bool)

        # trim after first eos token
        eos_indices = tf.where(tf.equal(concat_tensor, eos_id))
        first_eos = tf.segment_min(eos_indices[:, 1], eos_indices[:, 0])
        mask = tf.sequence_mask(first_eos, maxlen=tf.shape(tensor)[1])

        indices = tf.where(diff & mask & tf.not_equal(tensor, -1))
        values = tf.gather_nd(tensor, indices)
        shape = tf.shape(tensor, out_type=tf.int64)

        return tf.SparseTensor(indices, values, shape)
    else:
        return tf.contrib.layers.dense_to_sparse(tensor, eos_id)
 def one_bp_iteration(self, xe_c2v_pre_iter, xe_0, llr_into_bp_net,
                      H_sumC_to_V, h_minC_to_V, h_min_segmentIDs,
                      H_sumV_to_C, H_xe_v_sumc_to_y):
     quantizer_index, syndrome_weight = self.get_quantizer_index(
         xe_c2v_pre_iter, H_xe_v_sumc_to_y, llr_into_bp_net)
     xe_c_sumv_nonq = tf.add(xe_0, tf.matmul(H_sumV_to_C, xe_c2v_pre_iter))
     xe_c_sumv = self.quantizer(
         tf.add(xe_0, tf.matmul(H_sumV_to_C, xe_c2v_pre_iter)),
         quantizer_index)
     xe_sign = tf.to_float(tf.sign(xe_c_sumv))
     xe_sum_log_img = tf.matmul(
         H_sumC_to_V,
         tf.multiply(tf.truediv((1 - xe_sign), [2.0]), [3.1415926]))
     xe_sum_log_complex = tf.complex(
         tf.zeros([self.num_all_edges, self.batch_size]), xe_sum_log_img)
     xe_product = tf.real(tf.exp(xe_sum_log_complex))
     xe_product_temp = tf.multiply(tf.sign(xe_product), -2e-7)
     xe_pd_modified = tf.add(xe_product, xe_product_temp)
     xe_v_minc = tf.segment_min(tf.abs(tf.gather(xe_c_sumv, h_minC_to_V)),
                                h_min_segmentIDs)
     xe_v_sumc = tf.multiply(xe_pd_modified, xe_v_minc)
     return xe_v_sumc, quantizer_index, syndrome_weight, xe_c_sumv_nonq, xe_c_sumv
Пример #22
0
def dense_to_sparse(tensor, eos_id):
    '''
    Convert tensor to a specific sparse format
    Because when calculating tf.edit_distance, only sparse tensor is received
    '''
    added_values = tf.cast(tf.fill((tf.shape(tensor)[0], 1), eos_id),
                           tensor.dtype)

    #Add eos to the entire tensor
    concat_tensor = tf.concat((tensor, added_values), axis=-1)
    #Find duplicate phonemes
    diff = tf.cast(concat_tensor[:, 1:] - concat_tensor[:, :-1], tf.bool)
    eos_indices = tf.where(tf.equal(concat_tensor, eos_id))
    #Find the position of the first eos in each decoded sequence
    first_eos = tf.segment_min(eos_indices[:, 1], eos_indices[:, 0])
    #
    mask = tf.sequence_mask(first_eos, maxlen=tf.shape(tensor)[1])
    indices = tf.where(diff & mask & tf.not_equal(tensor, -1))
    values = tf.gather_nd(tensor, indices)
    shape = tf.shape(tensor, out_type=tf.int64)

    return tf.SparseTensor(indices, values, shape)
Пример #23
0
 def one_bp_iteration(self, xe_v2c_pre_iter, H_sumC_to_V, H_sumV_to_C,
                      h_minC_to_V, h_min_segmentIDs, xe_0):
     xe_sign = tf.to_float(tf.sign(xe_v2c_pre_iter))
     xe_sum_log_img = tf.matmul(
         H_sumC_to_V,
         tf.multiply(tf.truediv((1 - xe_sign), [2.0]), [3.1415926]))
     xe_sum_log_complex = tf.complex(
         tf.zeros([self.num_all_edges, self.batch_size]), xe_sum_log_img)
     xe_product = tf.real(tf.exp(xe_sum_log_complex))
     xe_product_temp = tf.multiply(tf.sign(xe_product), -2e-7)
     xe_pd_modified = tf.add(xe_product, xe_product_temp)
     xe_v_minc = tf.zeros([self.num_all_edges, self.batch_size],
                          dtype=tf.float32)
     xe_v_minc = tf.segment_min(
         tf.abs(tf.gather(xe_v2c_pre_iter, h_minC_to_V)), h_min_segmentIDs)
     xe_v_sumc = tf.multiply(
         xe_pd_modified,
         tf.maximum((self.alpha * xe_v_minc + self.beta),
                    tf.zeros([self.num_all_edges, self.batch_size],
                             dtype=tf.float32)))
     xe_c_sumv = tf.add(xe_0, tf.matmul(H_sumV_to_C, xe_v_sumc))
     return xe_v_sumc, xe_c_sumv
Пример #24
0
    def _calculate_marginal_x_by_y_axis(self, indices):
        xs = indices[..., 1]
        ys = indices[..., 0]

        x_mins = tf.segment_min(xs, ys)
        x_maxs = tf.segment_max(xs, ys)
        y_pos = tf.range(0, tf.shape(x_mins)[0])
        y_pos = tf.cast(y_pos, tf.int64)
        left_marginal = tf.gather_nd(tf.stack([y_pos, x_mins], axis=-1),
                                     tf.where(tf.not_equal(x_mins, x_maxs)))
        right_marginal = tf.gather_nd(tf.stack([y_pos, x_maxs], axis=-1),
                                      tf.where(tf.not_equal(x_mins, x_maxs)))

        # 노이즈을 줄이기 위해 앞뒤 15% drop
        valid_counts = tf.cast(tf.shape(left_marginal)[0], tf.float32)
        drop_counts = tf.clip_by_value(tf.cast(valid_counts * 0.15, tf.int32),
                                       1, 2**31)

        left_marginal = tf.cast(left_marginal[drop_counts:-drop_counts],
                                tf.float32)
        right_marginal = tf.cast(right_marginal[drop_counts:-drop_counts],
                                 tf.float32)
        return left_marginal, right_marginal
Пример #25
0
 def _get_constrained_steps(self, indices, mode):
     batch_idx = tf.reshape(
         tf.slice(indices, begin=[0, 0], size=[tf.shape(indices)[0], 1]),
         [-1])
     uniq_batch_idx, ori_batch_idx = tf.unique(batch_idx)
     step_idx = tf.reshape(
         tf.slice(indices, begin=[0, 1], size=[tf.shape(indices)[0], 1]),
         [-1])
     start_limit = None
     end_limit = None
     if mode == 'train':
         split_limit = tf.gather(tf.segment_max(step_idx, batch_idx),
                                 uniq_batch_idx)
         end_limit = tf.gather(split_limit, ori_batch_idx)
         start_limit = end_limit - self._train_steps + 1
     elif mode == 'eval':
         split_limit = tf.gather(tf.segment_min(step_idx, batch_idx),
                                 uniq_batch_idx)
         start_limit = tf.gather(split_limit, ori_batch_idx)
         end_limit = start_limit + self._eval_steps - 1
     return tf.boolean_mask(
         indices,
         tf.logical_and(step_idx >= start_limit, step_idx <= end_limit))
Пример #26
0
 def test_SegmentMin(self):
     t = tf.segment_min(self.random(4, 2, 3), np.array([0, 1, 1, 2]))
     self.check(t)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'

import tensorflow as tf


sess = tf.InteractiveSession() 
seg_ids = tf.constant([0,1,1,2,2]) # Group indexes : 0|1,2|3,4 


tens1 = tf.constant([[2, 5, 3, -5], 
                 [0, 3,-2,  5], 
                 [4, 3, 5,  3], 
                 [6, 1, 4,  0], 
                 [6, 1, 4,  0]])  # A sample constant m

print('\nseg_ids->', seg_ids.eval())
print('tens1->', tens1.eval())

print("\ntf.segment_sum(tens1, seg_ids).eval() ")   # Sum segmen
print(tf.segment_sum(tens1, seg_ids).eval() )   # Sum segmen

print("\ntf.segment_prod(tens1, seg_ids).eval() ") # Product segmen
print(tf.segment_prod(tens1, seg_ids).eval() ) # Product segmen

print(tf.segment_min(tens1, seg_ids).eval() ) # minimun value goes to
print(tf.segment_max(tens1, seg_ids).eval() ) # maximum value goes to
print(tf.segment_mean(tens1, seg_ids).eval() ) # mean value goes to group 
import tensorflow as tf
import numpy as np

input_a = np.array([[1, 1, 2], [2, 3, 4], [3, 1, 1], [2, 4, 6]])
a_seg_sum = tf.segment_sum(data=input_a, segment_ids=[0, 1, 1, 1])
a_seg_prod = tf.segment_prod(data=input_a, segment_ids=[0, 0, 1, 1])
a_seg_max = tf.segment_max(data=input_a, segment_ids=[0, 0, 0, 1])
a_seg_min = tf.segment_min(data=input_a, segment_ids=[1, 1, 1, 1])
a_seg_mean = tf.segment_mean(data=input_a, segment_ids=[0, 0, 0, 1])
a_seg_sum_num = tf.unsorted_segment_sum(data=input_a,
                                        segment_ids=[0, 1, 1, 0],
                                        num_segments=2)
a_sparse_seg_sum = tf.sparse_segment_sum(data=input_a,
                                         indices=[0, 1, 2],
                                         segment_ids=[0, 0, 1])

with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    print(sess.run(a_seg_sum), '\n', sess.run(a_seg_prod), '\n',
          sess.run(a_seg_max), '\n', sess.run(a_seg_min))
    print(sess.run(a_seg_mean), '\n', sess.run(a_seg_sum_num), '\n',
          sess.run(a_sparse_seg_sum))
Пример #29
0
#Segmentation Examples
import tensorflow as tf
sess = tf.InteractiveSession()
seg_ids = tf.constant([0,1,1,2,2]); # Group indexes : 0|1,2|3,4

tens1 = tf.constant([[2, 5, 3, -5],  
                    [0, 3,-2,  5], 
                    [4, 3, 5,  3], 
                    [6, 1, 4,  0],
                    [6, 1, 4,  0]])  # A sample constant matrix

tf.segment_sum(tens1, seg_ids).eval()   # Sum segmentation
tf.segment_prod(tens1, seg_ids).eval() # Product segmantation
tf.segment_min(tens1, seg_ids).eval() # minimun value goes to group
tf.segment_max(tens1, seg_ids).eval() # maximum value goes to group
tf.segment_mean(tens1, seg_ids).eval() # mean value goes to group
Пример #30
0
tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]

# Computes the sum/mean/max/min/prod along segments of a tensor
tf.segment_sum(data, segment_ids, name=None)
# Eg:
m = tf.constant([5,1,7,2,3,4,1,3])
s_id = [0,0,0,1,2,2,3,3]
s.run(tf.segment_sum(m, segment_ids=s_id))
"""
>array([13,  2,  7,  4], dtype=int32)
"""

tf.segment_mean(data, segment_ids, name=None)
tf.segment_max(data, segment_ids, name=None)
tf.segment_min(data, segment_ids, name=None)
tf.segment_prod(data, segment_ids, name=None)
 
# 其它
tf.unsorted_segment_sum
tf.sparse_segment_sum
tf.sparse_segment_mean
tf.sparse_segment_sqrt_n

# 比较两个 list 或者 string 的不同,并返回不同的值和索引
tf.setdiff1d(x, y, index_dtype=tf.int32, name=None)
 
# 返回 x 中的唯一值所组成的tensor 和原 tensor 中元素在现 tensor 中的索引
tf.unique(x, out_idx=None, name=None)
 
# x if condition else y, condition 为 bool 类型的,可用tf.equal()等来表示
Пример #31
0
def worker(cluster, n_items, embed_dim, worker_n, epoch, alpha,
           master_learning_rate, sampler, evaluator, num_batch, num_user):
    per_item_embedding = int(
        np.ceil(embed_dim * 1.0 / len(cluster.job_tasks('ps'))))
    per_item_bias = int(np.ceil(n_items * 1.0 / len(cluster.job_tasks('ps'))))
    item_embedding_chunk = list(chunks(range(embed_dim), per_item_embedding))
    item_bias_chunk = list(chunks(range(n_items), per_item_bias))
    item_embedding_list = []
    item_bias_list = []
    for ps_n in range(len(cluster.job_tasks('ps'))):
        with tf.device("/job:ps/task:%s/cpu:0" % ps_n):
            item_embedding_list.append(
                tf.Variable(tf.random_normal([
                    n_items, item_embedding_chunk[ps_n][-1] -
                    item_embedding_chunk[ps_n][0] + 1
                ],
                                             stddev=1 / (embed_dim**0.5),
                                             dtype=tf.float32),
                            name="ps_item_embeddings_%s" % ps_n))
            item_bias_list.append(
                tf.Variable(tf.random_normal([
                    item_bias_chunk[ps_n][-1] - item_bias_chunk[ps_n][0] + 1, 1
                ],
                                             stddev=1 / (embed_dim**0.5),
                                             dtype=tf.float32),
                            name="ps_item_bias_%s" % ps_n))

    with tf.device("/job:ps/task:0/cpu:0"):
        task_queue = tf.FIFOQueue(
            len(cluster.job_tasks('ps') + cluster.job_tasks('worker')),
            [tf.bool],
            shared_name='ps_task_queue')

    with tf.device("/job:worker/task:%s" % worker_n):
        item_embedding_copy_list = [
            tf.identity(item) for item in item_embedding_list
        ]
        item_bias_copy_list = [tf.identity(item) for item in item_bias_list]
        item_embeddings_copy = tf.concat(item_embedding_copy_list, 1)
        item_bias_copy = tf.concat(item_bias_copy_list, 0)
        user_positive_items_pairs = tf.placeholder(tf.int32, [None, 2])
        negative_samples = tf.placeholder(tf.int32, [None, None])
        negative_flags = tf.placeholder(tf.float32, [None, None])
        score_user_ids = tf.placeholder(tf.int32, [None])
        user_embeddings = tf.Variable(
            tf.random_normal([num_user, embed_dim],
                             stddev=1 / (embed_dim**0.5),
                             dtype=tf.float32),
            name="worker_%s_user_embeddings" % worker_n)
        # N = batch size,
        # K = embedding size,
        # W = number of negative samples per a user-positive-item pair

        # user embedding (N, K)
        users = tf.nn.embedding_lookup(user_embeddings,
                                       user_positive_items_pairs[:, 0],
                                       name="worker_%s_users" % worker_n)
        user_reg = tf.reduce_sum(tf.square(users),
                                 1,
                                 name="worker_%s_user_reg" % worker_n)
        # positive item embedding (N, K)
        pos_items = tf.nn.embedding_lookup(item_embeddings_copy,
                                           user_positive_items_pairs[:, 1])
        pos_reg = tf.reduce_sum(tf.square(pos_items), 1)
        pos_bias = tf.squeeze(
            tf.nn.embedding_lookup(item_bias_copy,
                                   user_positive_items_pairs[:, 1]))
        # positive item to user distance (N)
        pos_distances = tf.reduce_sum(tf.multiply(users, pos_items),
                                      1) + pos_bias

        # negative item embedding (N, K, W)
        neg_items = tf.transpose(
            tf.nn.embedding_lookup(item_embeddings_copy, negative_samples),
            (0, 2, 1))
        neg_reg = tf.reduce_sum(tf.square(neg_items), 1)
        neg_bias = tf.squeeze(
            tf.nn.embedding_lookup(item_bias_copy, negative_samples))
        # distance to negative items (N x W)
        distance_to_neg_items = tf.reduce_sum(
            tf.multiply(tf.expand_dims(users, -1), neg_items), 1) + neg_bias

        impostors = tf.multiply(
            negative_flags,
            (tf.expand_dims(-pos_distances, -1) + distance_to_neg_items + 1))
        indexes = tf.where(tf.greater(impostors, 0))

        impostor_num = tf.shape(indexes)[0]
        impostor_log = tf.nn.moments(impostors, axes=[0, 1])

        x_min_y = tf.segment_min(indexes[:, 1], indexes[:, 0])
        uni_x, _ = tf.unique(indexes[:, 0])
        uni_y = tf.nn.embedding_lookup(x_min_y, uni_x)
        xy = tf.concat([tf.expand_dims(uni_x, -1),
                        tf.expand_dims(uni_y, -1)], 1)

        impostor_xy = tf.gather_nd(impostors, xy)
        rank = tf.log((n_items - 1) / tf.cast(uni_y + 1, tf.float32))

        eloss = tf.reduce_sum(tf.clip_by_value(rank * impostor_xy, 0, 10))
        rloss = tf.reduce_sum(
            alpha * (tf.gather_nd(neg_reg, xy) + tf.nn.embedding_lookup(
                pos_reg, uni_x) + tf.nn.embedding_lookup(user_reg, uni_x)))

        loss = (eloss + rloss) / tf.cast(
            tf.shape(user_positive_items_pairs)[0], tf.float32)
        optimizer = tf.train.AdamOptimizer(master_learning_rate)
        grads_and_vars = optimizer.compute_gradients(loss)
        dense_grads_and_vars = [(tf.convert_to_tensor(grad), var)
                                for grad, var in grads_and_vars]
        opt = optimizer.apply_gradients(dense_grads_and_vars)
        item_scores = tf.reduce_sum(
            tf.multiply(
                tf.expand_dims(
                    tf.nn.embedding_lookup(user_embeddings, score_user_ids),
                    1), tf.expand_dims(item_embeddings_copy, 0)),
            2) + tf.squeeze(item_bias_copy)
        topk = tf.nn.top_k(item_scores, n_items)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    print("Worker %d: start server" % worker_n)
    server = tf.train.Server(cluster,
                             job_name="worker",
                             task_index=worker_n,
                             config=config,
                             protocol="grpc+verbs")
    print("Worker %d: start session" % worker_n)
    sess = tf.Session(target=server.target)

    print("Worker %d: initializing variables; free memory: %s" %
          (worker_n, free_memory()))
    sess.run(tf.global_variables_initializer())
    sess.run(task_queue.enqueue(True))
    while sess.run(task_queue.size()) != len(
            cluster.job_tasks('ps') + cluster.job_tasks('worker')):
        print("Worker %s: waiting..." % worker_n)
        sleep(1)
    print("Worker %d: variables initialized; free memory: %s" %
          (worker_n, free_memory()))

    _epoch = 0
    while _epoch < epoch:
        _losses, _users, _tops = [], evaluator.users(), []
        for _ in tqdm(range(num_batch), desc="Optimizing...", file=sys.stdout):
            user_pos, neg, flags = sampler.next_batch()
            #_, loss= sess.run((opt, loss), {user_positive_items_pairs: user_pos, negative_samples: neg, negative_flags: flags})
            #options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            #run_metadata = tf.RunMetadata()
            _, _impostor_num, _impostor_log, _loss, _eloss, _rloss = sess.run(
                [opt, impostor_num, impostor_log, loss, eloss, rloss],
                {
                    user_positive_items_pairs: user_pos,
                    negative_samples: neg,
                    negative_flags: flags
                },
                #    options=options, run_metadata=run_metadata
            )
            print("Worker %d: training" % worker_n, _impostor_num,
                  _impostor_log, _loss, _eloss, _rloss)
            #fetched_timeline = timeline.Timeline(run_metadata.step_stats)
            #chrome_trace = fetched_timeline.generate_chrome_trace_format()
            #with open(os.path.join(output_path, 'timeline_%s.json' % worker_n), 'w') as f:
            #    f.write(chrome_trace)
            #for device in run_metadata.step_stats.dev_stats:
            #    print(device.device)
            #    for node in device.node_stats:
            #        print("  ", node.node_name)
            _losses.append(_loss)
        print("Worker %d: training" % worker_n, free_memory())
        for chunk in chunks(_users, 100):
            _, _top = sess.run(topk, {score_user_ids: chunk})
            _tops.extend(_top)
        print("Worker %s: epoch: %s" % (worker_n, _epoch), free_memory(),
              np.mean(_losses), 50, evaluator.eval(zip(_users, _tops), 50),
              evaluator.eval(zip(_users, _tops), 50, 'recall'))
        _epoch += 1
    sess.run(task_queue.dequeue())
    sampler.close()
    print("Worker %d: done" % worker_n)
Пример #32
0
        def cal_out():
            r, ind = tf.nn.top_k(self.input, self.n_in)
            r_ind = tf.reverse(ind, [0])
            nx = tf.gather(self.input, r_ind)
            nW = tf.transpose(tf.gather(self.W, r_ind))
            nxW = tf.multiply(nx, nW)

            def body_z(i, z):
                z = tf.slice(
                    tf.concat([
                        tf.cast(
                            tf.reduce_sum(
                                tf.slice(nxW, [0, 0], [self.n_out, i + 1]), 1,
                                True), tf.float32), z
                    ], 1), [0, 0], [self.n_out, self.n_in])
                return [i + 1, z]

            def body_W(i, z):
                z = tf.slice(
                    tf.concat([
                        tf.cast(
                            tf.reduce_sum(
                                tf.slice(nW, [0, 0], [self.n_out, i + 1]), 1,
                                True), tf.float32), z
                    ], 1), [0, 0], [self.n_out, self.n_in])
                return [i + 1, z]

            def condition(i, z):
                return i < self.n_in

            r1, n_sum_z = tf.while_loop(condition, body_z,
                                        [self.i, self.sum_z])
            r2, n_sum_W = tf.while_loop(condition, body_W,
                                        [self.i, self.sum_W])
            f_sum_z = tf.reverse(n_sum_z, [1])
            f_sum_W = tf.reverse(n_sum_W, [1])

            out_all = tf.divide(f_sum_z, tf.subtract(f_sum_W, 1))
            out_all_2 = tf.concat([
                out_all,
                tf.transpose([
                    tf.tile([tf.divide(self.c_one, self.c_zero)], [self.n_out])
                ])
            ], 1)

            out_ok = tf.where(
                tf.logical_and(
                    tf.less(
                        tf.cast(
                            tf.tile([tf.concat([nx, [1]], 0)],
                                    [self.n_out, 1]), tf.float32), out_all_2),
                    tf.greater_equal(
                        tf.cast(
                            tf.tile([
                                tf.slice(
                                    tf.concat([
                                        nx,
                                        [
                                            tf.divide(self.c_one, self.c_zero),
                                            tf.divide(self.c_one, self.c_zero)
                                        ]
                                    ], 0), [1], [self.n_in + 1])
                            ], [self.n_out, 1]), tf.float32), out_all_2)))

            out_idx = tf.transpose(
                tf.concat(
                    [[tf.range(0, self.n_out)],
                     [
                         tf.cast(tf.segment_min(out_ok[:, 1], out_ok[:, 0]),
                                 tf.int32)
                     ]], 0))
            out = tf.gather_nd(out_all_2, out_idx)
            output = tf.where(out > 1e5, tf.multiply(tf.ones_like(out), 1e5),
                              out)
            return output
Пример #33
0
def get_finised_pos(token_seq, finished_index, max_length): 
	tmp_indices = tf.where(tf.equal(token_seq, int(finished_index)))
	finished_pos = tf.segment_min(tmp_indices[:, 1], tmp_indices[:, 0])
	sequence_mask = tf.sequence_mask(finished_pos+1, maxlen=max_length)
	return tf.cast(sequence_mask, tf.int32)
Пример #34
0
import tensorflow as tf

sess = tf.InteractiveSession()
seg_ids = tf.constant([0, 1, 1, 2, 2])  #下标
tens1 = tf.constant([[2, 5, 3, -5], [0, 3, -2, 5], [6, 1, 4, 0], [6, 1, 4, 0]])

print(tf.segment_sum(tens1, seg_ids).eval())
print(tf.segment_prod(tens1, seg_ids).eval())
print(tf.segment_min(tens1, seg_ids).eval())
print(tf.segment_max(tens1, seg_ids).eval())
print(tf.segment_mean(tens1, seg_ids).eval())
Пример #35
0
    def _comp_f(self):
        """
        Encodes all queries (including supporting queries)
        :return: encoded queries
        """
        with tf.device("/cpu:0"):
            max_length = tf.cast(tf.reduce_max(self._length), tf.int32)
            context_t = tf.transpose(self._context)
            context_t = tf.slice(context_t, [0, 0], tf.pack([max_length, -1]))
            embedded = tf.nn.embedding_lookup(self.input_embedding, context_t)
            embedded = tf.nn.dropout(embedded, self.keep_prob)
            batch_size = tf.shape(self._context)[0]
            batch_size_32 = tf.reshape(batch_size, [1])
            batch_size_64 = tf.cast(batch_size, tf.int64)

        with tf.device(self._device1):
            #use other device for backward rnn
            with tf.variable_scope("backward"):
                min_end = tf.segment_min(self._ends, self._span_context)
                init_state = tf.get_variable("init_state", [self._size],
                                             initializer=self._init)
                init_state = tf.reshape(tf.tile(init_state, batch_size_32),
                                        [-1, self._size])
                rev_embedded = tf.reverse_sequence(embedded, self._length, 0,
                                                   1)
                # TIME-MAJOR: [T, B, S]
                outs_bw = self._composition_function(rev_embedded,
                                                     self._length - min_end,
                                                     init_state)
                # reshape to all possible queries for all sequences. Dim[0]=batch_size*(max_length+1).
                # "+1" because we include the initial state
                outs_bw = tf.reshape(
                    tf.concat(0, [tf.expand_dims(init_state, 0), outs_bw]),
                    [-1, self._size])
                # gather respective queries via their lengths-start (because reversed sequence)
                lengths_aligned = tf.gather(self._length, self._span_context)
                out_bw = tf.gather(
                    outs_bw, (lengths_aligned - self._ends) * batch_size_64 +
                    self._span_context)

        with tf.device(self._device2):
            with tf.variable_scope("forward"):
                #e_inputs = [tf.reshape(e, [-1, self._size]) for e in tf.split(1, self._max_length, embedded)]
                max_start = tf.segment_max(self._starts, self._span_context)
                init_state = tf.get_variable("init_state", [self._size],
                                             initializer=self._init)
                init_state = tf.reshape(tf.tile(init_state, batch_size_32),
                                        [-1, self._size])
                # TIME-MAJOR: [T, B, S]
                outs_fw = self._composition_function(embedded, max_start,
                                                     init_state)
                # reshape to all possible queries for all sequences. Dim[0]=batch_size*(max_length+1).
                # "+1" because we include the initial state
                outs_fw = tf.reshape(
                    tf.concat(0, [tf.expand_dims(init_state, 0), outs_fw]),
                    [-1, self._size])
                # gather respective queries via their positions (with offset of batch_size*ends)
                out_fw = tf.gather(
                    outs_fw, self._starts * batch_size_64 + self._span_context)
            # form query from forward and backward compositions
            query = tf.contrib.layers.fully_connected(tf.concat(
                1, [out_fw, out_bw]),
                                                      self._size,
                                                      activation_fn=None,
                                                      weights_initializer=None,
                                                      biases_initializer=None)
            query = tf.add_n([query, out_bw, out_fw])

        return query
Пример #36
0
 def test_SegmentMin(self):
     t = tf.segment_min(self.random(4, 2, 3), np.array([0, 1, 1, 2]))
     self.check(t)