Ejemplo n.º 1
0
  def testGradSerialTwoLoops(self):
    with self.test_session():
      num_steps = 100
      acc = tf.TensorArray(dtype=tf.float32, size=num_steps,
                           clear_after_read=False,
                           element_shape=tensor_shape.scalar())
      i = tf.constant(0, name="i")
      x = tf.constant(2.0, name="x")

      c = lambda i, acc: i < 5
      def b(i, acc):
        x1 = tf.cond(tf.equal(i, 0),
                     lambda: x,
                     lambda: tf.mul(acc.read(i - 1), 2.0))
        return i + 1, acc.write(i, x1)
      i1, acc1 = tf.while_loop(c, b, [i, acc])

      z = tf.constant(0.0)
      def fn(i, acc):
        return i + 1, acc.write(i, z)
      _, acc2 = tf.while_loop(lambda i, acc: i < num_steps, fn, [i1, acc1])

      r = acc2.stack()
      grad = tf.gradients(r, [x])[0]
      self.assertAllClose(31.0, grad.eval())
Ejemplo n.º 2
0
  def _testStackWhileSwap(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      n = tf.constant(0)
      h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")

      def c(x):
        return tf.less(x, 10)
      def b(x):
        with tf.control_dependencies([x]):
          a = tf.constant(np.ones(2000), dtype=tf.float32)
          v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
        with tf.control_dependencies([v]):
          return tf.add(x, 1)
      r = tf.while_loop(c, b, [n])

      v = tf.constant(np.zeros(2000), dtype=tf.float32)
      def c1(x, y):
        return tf.greater(x, 0)
      def b1(x, y):
        nx = tf.sub(x, 1)
        ny = y + gen_data_flow_ops._stack_pop(h, tf.float32)
        return [nx, ny]
      rx, ry = tf.while_loop(c1, b1, [r, v],
                             [r.get_shape(), tensor_shape.unknown_shape()])
      self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
Ejemplo n.º 3
0
 def body(x):
     def nest_body(c):
         return tf.multiply(c, 2)
     def cd(c): return tf.less(c, 10)
     c = tf.constant(2)
     res = tf.while_loop(cd, nest_body, loop_vars=[c])
     return tf.nn.relu(x + res)
def batch_embed_lookup(embedding, ids):
    '''
        embedding: shape(b_sz, tstp, emb_sz)
        ids : shape(b_sz, k)
    '''
    input_shape = tf.shape(embedding)
    time_steps = input_shape[0]
    def _create_ta(name, dtype):
        return tf.TensorArray(dtype=dtype,
                              size=time_steps,
                              tensor_array_name=name)
    input_ta = _create_ta('input_ta', embedding.dtype)
    fetch_ta = _create_ta('fetch_ta', ids.dtype)
    output_ta = _create_ta('output_ta', embedding.dtype)
    input_ta = input_ta.unstack(embedding)
    fetch_ta = fetch_ta.unstack(ids)

    def loop_body(time, output_ta):
        embed = input_ta.read(time) #shape(tstp, emb_sz) type of float32
        fetch_id = fetch_ta.read(time) #shape(tstp) type of int32
        out_emb = tf.nn.embedding_lookup(embed, fetch_id)
        output_ta = output_ta.write(time, out_emb)

        next_time = time+1
        return next_time, output_ta
    time = tf.constant(0)
    _, output_ta = tf.while_loop(cond=lambda time, *_: time < time_steps,
                  body=loop_body, loop_vars=(time, output_ta),
                  swap_memory=True)
    ret_t = output_ta.stack() #shape(b_sz, tstp, embd_sz)
    return ret_t
  def loss(self, predicts, labels, objects_num):
    """Add Loss to all the trainable variables

    Args:
      predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
      ===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
      labels  : 3-D tensor of [batch_size, max_objects, 5]
      objects_num: 1-D tensor [batch_size]
    """
    class_loss = tf.constant(0, tf.float32)
    object_loss = tf.constant(0, tf.float32)
    noobject_loss = tf.constant(0, tf.float32)
    coord_loss = tf.constant(0, tf.float32)
    loss = [0, 0, 0, 0]
    for i in range(self.batch_size):
      predict = predicts[i, :, :, :]
      label = labels[i, :, :]
      object_num = objects_num[i]
      nilboy = tf.ones([7,7,2])
      tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
      for j in range(4):
        loss[j] = loss[j] + tuple_results[2][j]
      nilboy = tuple_results[5]

    tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)

    tf.summary.scalar('class_loss', loss[0]/self.batch_size)
    tf.summary.scalar('object_loss', loss[1]/self.batch_size)
    tf.summary.scalar('noobject_loss', loss[2]/self.batch_size)
    tf.summary.scalar('coord_loss', loss[3]/self.batch_size)
    tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )

    return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy
Ejemplo n.º 6
0
    def prior_model(prior_queue_init, length=SIG_LEN):
        def cond(loop_counter, *_):
            return tf.less(loop_counter, length)

        def body(loop_counter, accumulated_output_array, accumulated_logits_array, next_input, *queue_contents):
            next_logit, queue_updates = sub_predictor(next_input, queue_contents)
            gumbeled = next_logit[:, 0, :] - tf.log(-tf.log(tf.random_uniform((tf.shape(next_logit)[0], QUANT_LEVELS))))
            sample_disc = tf.arg_max(gumbeled, 1)
            sample_cont = dequantizer(sample_disc, QUANT_LOWER, QUANT_UPPER, QUANT_LEVELS)
            accumulated_output_array = accumulated_output_array.write(loop_counter, sample_cont)
            accumulated_logits_array = accumulated_logits_array.write(loop_counter, next_logit[:, 0, :])
            sample_cont = tf.expand_dims(sample_cont, 1)
            sample_cont = tf.expand_dims(sample_cont, 1) # sic
            next_input = tf.concat(2, (sample_cont, tf.ones_like(sample_cont)))
            return [loop_counter+1, accumulated_output_array, accumulated_logits_array, next_input] + queue_updates

        accumulated_output_array = tf.TensorArray(tf.float32, size=SIG_LEN, clear_after_read=False)
        accumulated_logits_array = tf.TensorArray(tf.float32, size=SIG_LEN, clear_after_read=False)

        loop_var_init = [tf.constant(0, dtype=tf.int32), accumulated_output_array, accumulated_logits_array, tf.zeros((PRIOR_BATCH_SIZE, 1, 2))] + prior_queue_init
        accumulated_output_array, accumulated_logits_array = tf.while_loop(cond, body, loop_var_init, back_prop=False)[1:3]
        output = tf.transpose(accumulated_output_array.pack(), [1, 0])
        logits = tf.transpose(accumulated_logits_array.pack(), [1, 0, 2])

        output.set_shape((PRIOR_BATCH_SIZE, length))
        logits.set_shape((PRIOR_BATCH_SIZE, length, QUANT_LEVELS))
        return output, logits
Ejemplo n.º 7
0
def forward_prop_nodes(i_start, size, acts, offset):
  # Note: In the corpus that we've seen, parse trees are always ordered such that
  # iteration forward through the list will be in bottom-up order.
  # Conversely, iteration in reverse is always top-down.
  # This enables a simple iterative algorithm. If this were not the case,
  # putting the nodes in order by a postorder traversal would fix it.
  def fwd_continue(*parms):
    (_, sz, cur, _) = parms
    return tf.less(cur, sz, name='cur_le_size')

  def forward_prop(*parms):
    (i0, sz, cur, act) = parms
    with tf.device('/gpu:0'):
      gact = act
      gcur = cur
      next_idx = i0 + gcur
    node_out = tf.reshape(forward_node(next_idx, act, offset), [1, FLAGS.wvs, 1], name='node_out')
    tf.scatter_add(gact, tf.pack([gcur]), node_out, name='act_update')
    act = gact
    return [i0, sz, cur + iONE, act]

  with tf.device('/cpu:0'):
    i_start = tf.convert_to_tensor(i_start, dtype=tf.int32, name='i_start')
    size = tf.convert_to_tensor(size, dtype=tf.int32, name='size')
    iZ = tf.convert_to_tensor(0, dtype=tf.int32, name='ZERO')

  while_parms = [i_start, size, iZ, acts]
  wresult = tf.while_loop(fwd_continue, forward_prop, while_parms, parallel_iterations=1,
                          name='forward_prop_while')
  (_, _, _, result) = wresult
  return tf.slice(result, [0, 0, 0], tf.pack([size, -1, -1]), name='fwd_prop_nodes')
Ejemplo n.º 8
0
 def hinge_loss(self, y_true, y_pred):
     # Custom loss function
     margin = 0.1
     # loop counter
     i = tf.constant(0)
     # loop condition function
     c = lambda i, _: tf.less(i, tf.shape(y_true)[0])
     outer_sum_loss = tf.constant(0.0)
     def process_ele(i, outer_sum_loss):
         # Get a subtensor from batch
         y_true_one = y_true[i]
         y_pred_one = y_pred[i]
         # Stack margin to a num_class*1 matrix
         margin_stack = tf.reshape(tf.stack([tf.constant(0.1)] * self.num_classes), [self.num_classes, 1])
         # Stack true label to a word_dim*num_class matrix and transpose it
         y_true_one_stack = tf.stack([tf.transpose(y_true_one)] * self.num_classes)
         # Reshape predict from (word_dim,) to (word_dim,1)
         y_pred_one_t = tf.reshape(y_pred_one, [self.word_dim, 1])
         # Calculate loss
         r = margin_stack - tf.matmul(y_true_one_stack, y_pred_one_t) + tf.matmul(self.label_vec_tensor, y_pred_one_t)
         # Summation
         # We did not exclude true label inside summation, so we subtract extra margin
         sum_inner_loss = tf.reduce_sum(K.relu(r)) - margin
         # Return counter++ and accumulated loss
         return tf.add(i, 1), tf.add(outer_sum_loss, sum_inner_loss)
     
     _, outer_sum_loss = tf.while_loop(c, process_ele, [i, outer_sum_loss])
     # Return average loss over batch
     return outer_sum_loss / tf.cast(tf.shape(y_true)[0], dtype=tf.float32)
Ejemplo n.º 9
0
    def decoding_loop(self) -> BeamSearchOutput:
        """Create the decoding loop.

        This function mimics the behavior of the ``decoding_loop`` method of
        the ``AutoregressiveDecoder``, except the initial loop state is created
        outside this method because it is accessed and fed during ensembling.

        TODO: The ``finalize_loop`` method and the handling of attention loop
        states might be implemented in the future.

        Returns:
            This method returns a populated ``BeamSearchOutput`` object.
        """

        final_loop_state = tf.while_loop(
            self.loop_continue_criterion,
            self.get_body(),
            self.initial_loop_state,
            shape_invariants=tf.contrib.framework.nest.map_structure(
                get_state_shape_invariants, self.initial_loop_state))

        # TODO: return att_loop_states properly
        return BeamSearchOutput(
            last_search_step_output=final_loop_state.search_results,
            last_dec_loop_state=final_loop_state.decoder_loop_state,
            last_search_state=final_loop_state.search_state,
            attention_loop_states=[])
Ejemplo n.º 10
0
  def _build_train_specific_graph(self, iterator, model_fn, params,
                                  record_files_placeholder, num_train_steps):
    """Builds the part of the model that is specific to training."""

    def build():
      features, labels = iterator.get_next()
      estimator_spec = model_fn(
          features, labels, tf.estimator.ModeKeys.TRAIN, params)
      with tf.control_dependencies([estimator_spec.train_op]):
        run_model_op = self._global_step.assign_add(1)
      return run_model_op, estimator_spec.loss

    if self._use_while_loop:
      def body(i):
        run_model_op_single_step, _ = build()
        with tf.control_dependencies([run_model_op_single_step]):
          return i + 1

      run_model_op = tf.while_loop(lambda i: i < num_train_steps, body, [0],
                                   parallel_iterations=1)
      loss = None
    else:
      run_model_op, loss = build()

    return self._TrainModelProperties(
        record_files_placeholder, iterator, loss, params["batch_size"],
        run_model_op)
Ejemplo n.º 11
0
def batch_logits(indices, acts):
  init_outs = tf.zeros([1, FLAGS.wvs, 1])

  def logits_continue(*parms):
    cur, idxs, _, _, _ = parms
    return tf.less(cur, tf.size(idxs), name='batch_done')

  def logits_batch_body(*parms):
    i, idxs, ptr, css, act = parms
    i_s = tf.reshape(tf.slice(idxs, tf.pack([i]), [1]), [])
    start, size = get_bounds(i_s)
    outs = forward_prop_nodes(start, size, acts, ptr)
    new_css = tf.cond(tf.equal(i, iZERO),
                      lambda: outs,
                      lambda: tf.concat(0, [css, outs]))
    return i + iONE, indices, ptr + size, new_css, acts
  with tf.device('/cpu:0'):
    iZ =  tf.convert_to_tensor(0, dtype=tf.int32)
  zero_activations(acts)
  while_parms = [iZ, indices, iZ, init_outs, acts]
  _, _, _, outs, _ = tf.while_loop(logits_continue, logits_batch_body, while_parms,
                                   parallel_iterations=1, name='batch_logits')
  lumpy_logits = tf.map_fn(activation_to_logits, outs, name='raw_logits')
  logits = tf.squeeze(lumpy_logits, [2], name='logits')
  return logits
Ejemplo n.º 12
0
    def decoding_loop(self, train_mode: bool, sample: bool = False,
                      temperature: float = 1) -> LoopState:
        """Run the decoding while loop.

        Calls get_initial_loop_state and constructs tf.while_loop
        with the continuation criterion returned from loop_continue_criterion,
        and body function returned from get_body.

        After finishing the tf.while_loop, it calls finalize_loop
        to further postprocess the final decoder loop state (usually
        by stacking Tensors containing decoding histories).

        Arguments:
            train_mode: Boolean flag, telling whether this is
                a training run.
            sample: Boolean flag, telling whether we should sample
                the output symbols from the output distribution instead
                of using argmax or gold data.
            temperature: float value specifying the softmax temperature
        """
        initial_loop_state = self.get_initial_loop_state()
        with tf.control_dependencies([self.decoding_w, self.decoding_b]):
            final_loop_state = tf.while_loop(
                self.loop_continue_criterion,
                self.get_body(train_mode, sample, temperature),
                initial_loop_state,
                shape_invariants=tf.contrib.framework.nest.map_structure(
                    get_state_shape_invariants, initial_loop_state))
        self.finalize_loop(final_loop_state, train_mode)

        return final_loop_state
Ejemplo n.º 13
0
  def _build_eval_specific_graph(self, iterator, model_fn, params,
                                 record_files_placeholder, num_eval_steps):
    """Builds the part of the model that is specific to evaluation."""

    def build():
      features = iterator.get_next()
      estimator_spec = model_fn(
          features, None, tf.estimator.ModeKeys.EVAL, params)
      run_model_op = tf.group(*(update_op for _, update_op in
                                estimator_spec.eval_metric_ops.values()))
      eval_metric_tensors = {k: tensor for (k, (tensor, _))
                             in estimator_spec.eval_metric_ops.items()}
      return run_model_op, estimator_spec.loss, eval_metric_tensors

    if self._use_while_loop:
      def body(i):
        run_model_op_single_step, _, _ = build()
        with tf.control_dependencies([run_model_op_single_step]):
          return i + 1

      run_model_op = tf.while_loop(lambda i: i < num_eval_steps, body, [0],
                                   parallel_iterations=1)
      loss = None
      eval_metric_tensors = {
          "HR": self._compute_metric_mean(rconst.HR_METRIC_NAME),
          "NDCG": self._compute_metric_mean(rconst.NDCG_METRIC_NAME),
      }
    else:
      run_model_op, loss, eval_metric_tensors = build()

    metric_initializer = tf.variables_initializer(
        tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
    return self._EvalModelProperties(
        record_files_placeholder, iterator, loss, params["eval_batch_size"],
        run_model_op, eval_metric_tensors, metric_initializer)
Ejemplo n.º 14
0
  def _sample_n(self, n, seed=None):
    """Sample `n` draws from the DP. Draws from the base
    distribution are memoized across `n` and across calls to
    `sample()`.

    Draws from the base distribution are not memoized across the batch
    shape, i.e., each independent DP in the batch shape has its own
    memoized samples.

    Returns:
      tf.Tensor.
      A `tf.Tensor` of shape `[n] + batch_shape + event_shape`,
      where `n` is the number of samples for each DP,
      `batch_shape` is the number of independent DPs, and
      `event_shape` is the shape of the base distribution.

    #### Notes

    The implementation has one inefficiency, which is that it draws
    (batch_shape,) samples from the base distribution when adding a
    new persistent state. Ideally, we would only draw new samples for
    those in the loop which require it.
    """
    if seed is not None:
      raise NotImplementedError("seed is not implemented.")

    batch_shape = self.batch_shape.as_list()
    event_shape = self.event_shape.as_list()
    rank = 1 + len(batch_shape) + len(event_shape)
    # Note this is for scoping within the while loop's body function.
    self._temp_scope = [n, batch_shape, event_shape, rank]

    # Start at the beginning of the stick, i.e. the k'th index
    k = tf.constant(0)

    # Define boolean tensor. It is True for samples that require continuing
    # the while loop and False for samples that can receive their base
    # distribution (coin lands heads). Also note that we need one bool for
    # each sample
    bools = tf.ones([n] + batch_shape, dtype=tf.bool)

    # Initialize all samples as zero, they will be overwritten in any case
    draws = tf.zeros([n] + batch_shape + event_shape, dtype=self.base.dtype)

    # Calculate shape invariance conditions for locs and probs as these
    # can change shape between loop iterations.
    locs_shape = tf.TensorShape([None])
    probs_shape = tf.TensorShape([None])
    if len(self.locs.shape) > 1:
      locs_shape = locs_shape.concatenate(self.locs.shape[1:])
      probs_shape = probs_shape.concatenate(self.probs.shape[1:])

    # While we have not broken enough sticks, keep sampling.
    _, _, self._locs, self._probs, samples = tf.while_loop(
        self._sample_n_cond, self._sample_n_body,
        loop_vars=[k, bools, self.locs, self.probs, draws],
        shape_invariants=[
            k.shape, bools.shape, locs_shape, probs_shape, draws.shape])

    return samples
Ejemplo n.º 15
0
def geometric(p):
  i = tf.constant(0)
  sample = tf.while_loop(
      cond=lambda i: tf.cast(1 - Bernoulli(probs=p), tf.bool),
      body=lambda i: i + 1,
      loop_vars=[i])
  return sample
Ejemplo n.º 16
0
    def compute_states(self,emb,idx_batch=0):


        num_leaves = tf.squeeze(tf.gather(self.num_leaves,idx_batch))
        #num_leaves=tf.Print(num_leaves,[num_leaves])
        n_inodes = tf.gather(self.n_inodes,idx_batch)
        #embx=tf.gather(emb,tf.range(num_leaves))
        embx=tf.gather(tf.gather(emb,idx_batch),tf.range(num_leaves))
        #treestr=self.treestr#tf.gather(self.treestr,tf.range(self.n_inodes))
        treestr=tf.gather(tf.gather(self.treestr,idx_batch),tf.range(n_inodes))
        leaf_hc = self.process_leafs(embx)
        leaf_h,leaf_c=tf.split(1,2,leaf_hc)


        node_h=tf.identity(leaf_h)
        node_c=tf.identity(leaf_c)

        idx_var=tf.constant(0) #tf.Variable(0,trainable=False)

        with tf.variable_scope("Composition",reuse=True):

            cW = tf.get_variable("cW",[self.degree*self.hidden_dim,(self.degree+3)*self.hidden_dim])
            cb = tf.get_variable("cb",[4*self.hidden_dim])
            bu,bo,bi,bf=tf.split(0,4,cb)

            def _recurrence(node_h,node_c,idx_var):
                node_info=tf.gather(treestr,idx_var)

                child_h=tf.gather(node_h,node_info)
                child_c=tf.gather(node_c,node_info)

                flat_ = tf.reshape(child_h,[-1])
                tmp=tf.matmul(tf.expand_dims(flat_,0),cW)
                u,o,i,fl,fr=tf.split(1,5,tmp)

                i=tf.nn.sigmoid(i+bi)
                o=tf.nn.sigmoid(o+bo)
                u=tf.nn.tanh(u+bu)
                fl=tf.nn.sigmoid(fl+bf)
                fr=tf.nn.sigmoid(fr+bf)

                f=tf.concat(0,[fl,fr])
                c = i * u + tf.reduce_sum(f*child_c,[0])
                h = o * tf.nn.tanh(c)

                node_h = tf.concat(0,[node_h,h])

                node_c = tf.concat(0,[node_c,c])

                idx_var=tf.add(idx_var,1)

                return node_h,node_c,idx_var
            loop_cond = lambda a1,b1,idx_var: tf.less(idx_var,n_inodes)

            loop_vars=[node_h,node_c,idx_var]
            node_h,node_c,idx_var=tf.while_loop(loop_cond, _recurrence,
                                                loop_vars,parallel_iterations=10)

            return node_h
Ejemplo n.º 17
0
def while_module_fn():
  """Compute x^n with while_loop."""
  x = tf.placeholder(dtype=tf.float32, name="x", shape=[])
  n = tf.placeholder(dtype=tf.int32, name="n")
  _, pow_x = tf.while_loop(
      lambda i, ix: i < n, lambda i, ix: [tf.add(i, 1), ix * x],
      [tf.constant(0), tf.constant(1.0)])
  hub.add_signature(inputs={"x": x, "n": n}, outputs=pow_x)
Ejemplo n.º 18
0
        def fn1(a, b):
            i = tf.constant(0)

            def cd(i): return tf.less(i, 10)

            def bd(i): return tf.add(i, 1)
            res = tf.while_loop(cd, bd, [i])
            return tf.multiply(tf.add(20, res), 10)
Ejemplo n.º 19
0
def custom_dynamic_rnn(cell, inputs, inputs_len, initial_state=None):
    """
    Implements a dynamic rnn that can store scores in the pointer network,
    the reason why we implements this is that the raw_rnn or dynamic_rnn function in Tensorflow
    seem to require the hidden unit and memory unit has the same dimension, and we cannot
    store the scores directly in the hidden unit.
    Args:
        cell: RNN cell
        inputs: the input sequence to rnn
        inputs_len: valid length
        initial_state: initial_state of the cell
    Returns:
        outputs and state
    """
    batch_size = tf.shape(inputs)[0]
    max_time = tf.shape(inputs)[1]

    inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
    inputs_ta = inputs_ta.unstack(tf.transpose(inputs, [1, 0, 2]))
    emit_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)
    t0 = tf.constant(0, dtype=tf.int32)
    if initial_state is not None:
        s0 = initial_state
    else:
        s0 = cell.zero_state(batch_size, dtype=tf.float32)
    f0 = tf.zeros([batch_size], dtype=tf.bool)

    def loop_fn(t, prev_s, emit_ta, finished):
        """
        the loop function of rnn
        """
        cur_x = inputs_ta.read(t)
        scores, cur_state = cell(cur_x, prev_s)

        # copy through
        scores = tf.where(finished, tf.zeros_like(scores), scores)

        if isinstance(cell, tc.rnn.LSTMCell):
            cur_c, cur_h = cur_state
            prev_c, prev_h = prev_s
            cur_state = tc.rnn.LSTMStateTuple(tf.where(finished, prev_c, cur_c),
                                              tf.where(finished, prev_h, cur_h))
        else:
            cur_state = tf.where(finished, prev_s, cur_state)

        emit_ta = emit_ta.write(t, scores)
        finished = tf.greater_equal(t + 1, inputs_len)
        return [t + 1, cur_state, emit_ta, finished]

    _, state, emit_ta, _ = tf.while_loop(
        cond=lambda _1, _2, _3, finished: tf.logical_not(tf.reduce_all(finished)),
        body=loop_fn,
        loop_vars=(t0, s0, emit_ta, f0),
        parallel_iterations=32,
        swap_memory=False)

    outputs = tf.transpose(emit_ta.stack(), [1, 0, 2])
    return outputs, state
Ejemplo n.º 20
0
 def construct_tensor_array(self):
     loop_condition = lambda tensor_array, i: \
                      tf.less(i, tf.squeeze(tf.shape(self.is_a_leaf)))
     #iterate over all leaves + composition
     tensor_array_op = tf.while_loop(cond=loop_condition,
                                     body=self._loop_over_tree,
                                     loop_vars=[self.tensor_array, 0],
                                     parallel_iterations=1)[0]
     return tensor_array_op
Ejemplo n.º 21
0
def tf_orthogonalize(W, eps=1e-6, back_prop=False):
    p = W.get_shape()[0].value
    eye = tf.constant(np.eye(p, dtype=np.float32))
    def ortho_step(Q0):
        Q1 = Q0 / tf.sqrt(tf_max_abs_row_sum_norm(tf.matmul(Q0, Q0, transpose_b=True)))
        return 3/2*Q1 - 0.5*tf.matmul(Q1, tf.matmul(Q1, Q1, transpose_a=True))
    c = lambda Q: tf.greater(tf_max_abs_row_sum_norm(tf.matmul(Q, Q, transpose_a=True) - eye)/p, eps)
    b = lambda Q: ortho_step(Q)
    return tf.while_loop(c, b, [W], back_prop=back_prop)
Ejemplo n.º 22
0
def decode_for_training(cell, final_enc_state, labels):
    # [actual batch size, max seq len, decoder cell size]
    tf.assert_rank(labels, 3)

    cell_size = cell.output_size
    context = bridge(final_enc_state, cell_size)

    # [actual batch size, decoder cell size]
    assert context.get_shape().as_list() == [None, cell_size]

    # tf.shape(labels): tuple of 1 element
    batch_size = tf.shape(labels)[0]  # type: tf.Tensor of rank 0
    max_time_step = labels.get_shape()[1].value

    with tf.variable_scope('decoder'):
        def cond(loop_cnt, _, __, ___):
            return tf.less(loop_cnt, max_time_step)

        def body(loop_cnt, prev_label, prev_state, losses):
            cell_output, state = cell(prev_label, prev_state)
            output = decoder_projection(cell_output, cell_size)

            # cut out the `loop_cnt`-th label
            label = tf.reshape(
                tf.slice(labels, begin=[0, loop_cnt, 0], size=[batch_size, 1, cell_size]),
                shape=[batch_size, cell_size]
            )

            # loss for output past the last time step is calculated to be 0
            loss = tf.nn.softmax_cross_entropy_with_logits(
                logits=output,
                labels=label
            )

            return (
                tf.add(loop_cnt, 1),
                # pass the label as the output of the current step
                label,
                state,
                losses.write(loop_cnt, loss)
            )

        _, _, _, result_loss = tf.while_loop(
            cond,
            body,
            loop_vars=(
                tf.constant(0),
                context,
                cell.zero_state(batch_size=batch_size, dtype=tf.float32),
                tf.TensorArray(tf.float32, size=0, dynamic_size=True)
            ),
        )

        losses = tf.reduce_sum(result_loss.stack(), axis=0)
        time_steps = tf.reduce_sum(tf.reduce_sum(labels, axis=2), axis=1)
        return tf.div(losses, time_steps)
Ejemplo n.º 23
0
  def remap_keys(sparse_tensor):
    # Current indices of our SparseTensor that we need to fix
    bad_indices = sparse_tensor.indices
    # Current values of our SparseTensor that we need to fix
    bad_values = sparse_tensor.values 
  
    # Group by the batch_indices and get the count for each  
    size = tf.segment_sum(data = tf.ones_like(bad_indices[:,0], dtype = tf.int64), segment_ids = bad_indices[:,0]) - 1
    # The number of batch_indices (this should be batch_size unless it is a partially full batch)
    length = tf.shape(size, out_type = tf.int64)[0]
    # Finds the cumulative sum which we can use for indexing later
    cum = tf.cumsum(size)
    # The offsets between each example in the batch due to our concatentation of the keys in the decode_example method
    length_range = tf.range(start = 0, limit = length, delta = 1, dtype = tf.int64)
    # Indices of the SparseTensor's indices member of the rows we added by the concatentation of our keys in the decode_example method
    cum_range = cum + length_range

    # The keys that we have extracted back out of our concatentated SparseTensor
    gathered_indices = tf.squeeze(tf.gather(bad_indices, cum_range)[:,1])

    # The enumerated row indices of the SparseTensor's indices member
    sparse_indices_range = tf.range(tf.shape(bad_indices, out_type = tf.int64)[0], dtype = tf.int64)

    # We want to find here the row indices of the SparseTensor's indices member that are of our actual data and not the concatentated rows
    # So we want to find the intersection of the two sets and then take the opposite of that
    x = sparse_indices_range
    s = cum_range

    # Number of multiples we are going to tile x, which is our sparse_indices_range
    tile_multiples = tf.concat([tf.ones(tf.shape(tf.shape(x)), dtype=tf.int64), tf.shape(s, out_type = tf.int64)], axis = 0)
    # Expands x, our sparse_indices_range, into a rank 2 tensor and then multiplies the rows by 1 (no copying) and the columns by the number of examples in the batch
    x_tile = tf.tile(tf.expand_dims(x, -1), tile_multiples)
    # Essentially a vectorized logical or, that we then negate
    x_not_in_s = ~tf.reduce_any(tf.equal(x_tile, s), -1)

    # The SparseTensor's indices that are our actual data by using the boolean_mask we just made above applied to the entire indices member of our SparseTensor
    selected_indices = tf.boolean_mask(tensor = bad_indices, mask = x_not_in_s, axis = 0)
    # Apply the same boolean_mask to the entire values member of our SparseTensor to get the actual values data
    selected_values = tf.boolean_mask(tensor = bad_values, mask = x_not_in_s, axis = 0)

    # Need to replace the first column of our selected_indices with keys, so we first need to tile our gathered_indices
    tiling = tf.tile(input = tf.expand_dims(gathered_indices[0], -1), multiples = tf.expand_dims(size[0] , -1))
    
    # We have to repeatedly apply the tiling to each example in the batch
    # Since it is jagged we cannot use tf.map_fn due to the stacking of the TensorArray, so we have to create our own custom version
    def loop_body(i, tensor_grow):
      return i + 1, tf.concat(values = [tensor_grow, tf.tile(input = tf.expand_dims(gathered_indices[i], -1), multiples = tf.expand_dims(size[i] , -1))], axis = 0)

    _, result = tf.while_loop(lambda i, tensor_grow: i < length, loop_body, [tf.constant(1, dtype = tf.int64), tiling])
    
    # Concatenate tiled keys with the 2nd column of selected_indices
    selected_indices_fixed = tf.concat([tf.expand_dims(result, -1), tf.expand_dims(selected_indices[:, 1], -1)], axis = 1)
    
    # Combine everything together back into a SparseTensor
    remapped_sparse_tensor = tf.SparseTensor(indices = selected_indices_fixed, values = selected_values, dense_shape = sparse_tensor.dense_shape)
    return remapped_sparse_tensor
Ejemplo n.º 24
0
def infer(encoder_cell, decoder_cell, sentences):
    tf.assert_rank(sentences, 3)
    assert sentences.get_shape()[0].value == 1  # batch size
    assert sentences.get_shape()[2].value == FEATURE_SIZE

    # stops generating output if the length reaches the double of the source
    output_len_threshold = sentences.get_shape()[1].value * 2

    final_state_tuple = encode(sentences, encoder_cell, reuse=True)
    context = bridge(final_state_tuple.c, decoder_cell.output_size, reuse=True)

    with tf.variable_scope('decoder', reuse=True):
        def cond(loop_cnt, prev_out, _, __):
            less = tf.less(loop_cnt, output_len_threshold)
            is_regular_word = tf.reduce_any(
                tf.not_equal(
                    prev_out,
                    tf.one_hot([0], FEATURE_SIZE)  # <eos>
                )
            )

            return tf.logical_and(less, is_regular_word)

        def body(loop_cnt, prev_out, prev_state, result):
            cell_output, state = decoder_cell(prev_out, prev_state)
            num_outputs = decoder_cell.output_size
            output = decoder_projection(
                cell_output,
                num_outputs=num_outputs,
                reuse=True
            )
            arg_max = tf.arg_max(output, dimension=1)
            one_hot_output = tf.one_hot(
                indices=arg_max,
                depth=num_outputs
            )

            return (
                tf.add(loop_cnt, 1),
                one_hot_output,
                state,
                result.write(result.size(), tf.cast(one_hot_output, dtype=tf.int8))
            )

        _, __, ___, inferred = tf.while_loop(
            cond,
            body,
            loop_vars=(
                tf.constant(0),
                context,
                decoder_cell.zero_state(batch_size=1, dtype=tf.float32),
                tf.TensorArray(tf.int8, size=0, dynamic_size=True)
            )
        )

        return inferred.stack()
Ejemplo n.º 25
0
def mat_pow(matrix, power):
    _, r = tf.while_loop(
        lambda i, _: i > 0,
        lambda i, c: [i-1, tf.matmul(c, matrix, transpose_a=True)],
        [power, matrix],
        parallel_iterations=1,
        back_prop=False,
        swap_memory=False
    )
    return r
Ejemplo n.º 26
0
def _minimize_in_graph(build_loss_fn, num_steps=200, optimizer=None):
  """Run an optimizer within the graph to minimize a loss function."""
  optimizer = tf.train.AdamOptimizer(0.1) if optimizer is None else optimizer
  def train_loop_body(step):
    train_op = optimizer.minimize(
        build_loss_fn if tf.executing_eagerly() else build_loss_fn())
    return tf.tuple([tf.add(step, 1)], control_inputs=[train_op])
  return tf.while_loop(cond=lambda step: step < num_steps,
                       body=train_loop_body,
                       loop_vars=[tf.constant(0)])
Ejemplo n.º 27
0
def csoftmax_for_slice(input):
    """ It is a implementation of the constrained softmax (csoftmax) for slice.
        Based on the paper:
        https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers" (page 4)
    Args:
        input: A list of [input tensor, cumulative attention].
    Returns:
        output: A list of [csoftmax results, masks]
    """

    [ten, u] = input

    shape_t = ten.shape
    shape_u = u.shape

    ten -= tf.reduce_mean(ten)
    q = tf.exp(ten)
    active = tf.ones_like(u, dtype=tf.int32)
    mass = tf.constant(0, dtype=tf.float32)
    found = tf.constant(True, dtype=tf.bool)

    def loop(q_, mask, mass_, found_):
        q_list = tf.dynamic_partition(q_, mask, 2)
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(q_)[0]), mask, 2)  # 0 element it False,
        #  1 element if true

        p = q_list[1] * (1.0 - mass_) / tf.reduce_sum(q_list[1])
        p_new = tf.dynamic_stitch(condition_indices, [q_list[0], p])

        # condition verification and mask modification
        less_mask = tf.cast(tf.less(u, p_new), tf.int32)  # 0 when u is bigger than p, 1 when u is less than p
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(p_new)[0]), less_mask,
                                                 2)  # 0 when u is bigger than p, 1 when u is less than p

        split_p_new = tf.dynamic_partition(p_new, less_mask, 2)
        split_u = tf.dynamic_partition(u, less_mask, 2)

        alpha = tf.dynamic_stitch(condition_indices, [split_p_new[0], split_u[1]])
        mass_ += tf.reduce_sum(split_u[1])

        mask = mask * (tf.ones_like(less_mask) - less_mask)

        found_ = tf.cond(tf.equal(tf.reduce_sum(less_mask), 0),
                         lambda: False,
                         lambda: True)

        alpha = tf.reshape(alpha, q_.shape)

        return alpha, mask, mass_, found_

    (csoft, mask_, _, _) = tf.while_loop(cond=lambda _0, _1, _2, f: f,
                                         body=loop,
                                         loop_vars=(q, active, mass, found))

    return [csoft, mask_]
Ejemplo n.º 28
0
def sample_sequence(*, hparams, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0):
    if start_token is None:
        assert context is not None, 'Specify exactly one of start_token and context!'
    else:
        assert context is None, 'Specify exactly one of start_token and context!'
        context = tf.fill([batch_size, 1], start_token)

    def step(hparams, tokens, past=None):
        lm_output = model.model(hparams=hparams, X=tokens, past=past, reuse=tf.AUTO_REUSE)

        logits = lm_output['logits'][:, :, :hparams.n_vocab]
        presents = lm_output['present']
        presents.set_shape(model.past_shape(hparams=hparams, batch_size=batch_size))
        return {
            'logits': logits,
            'presents': presents,
        }

    with tf.name_scope('sample_sequence'):
        # Don't feed the last context token -- leave that to the loop below
        # TODO: Would be slightly faster if we called step on the entire context,
        # rather than leaving the last token transformer calculation to the while loop.
        context_output = step(hparams, context[:, :-1])

        def body(past, prev, output):
            next_outputs = step(hparams, prev[:, tf.newaxis], past=past)
            logits = next_outputs['logits'][:, -1, :]  / tf.to_float(temperature)
            logits = top_k_logits(logits, k=top_k)
            samples = tf.multinomial(logits, num_samples=1, output_dtype=tf.int32)
            return [
                tf.concat([past, next_outputs['presents']], axis=-2),
                tf.squeeze(samples, axis=[1]),
                tf.concat([output, samples], axis=1),
            ]

        def cond(*args):
            return True

        _, _, tokens = tf.while_loop(
            cond=cond, body=body,
            maximum_iterations=length,
            loop_vars=[
                context_output['presents'],
                context[:, -1],
                context,
            ],
            shape_invariants=[
                tf.TensorShape(model.past_shape(hparams=hparams, batch_size=batch_size)),
                tf.TensorShape([batch_size]),
                tf.TensorShape([batch_size, None]),
            ],
            back_prop=False,
        )

        return tokens
Ejemplo n.º 29
0
  def _compute_backwards_messages(self, xs):
    """Computes the backwards messages used in smoothing."""
    batch_size = tf.shape(xs)[1]
    num_xs = tf.shape(xs)[0]
    until_t = self.num_timesteps - num_xs
    xs = tf.TensorArray(dtype=xs.dtype,
                        size=num_xs,
                        dynamic_size=False,
                        clear_after_read=True).unstack(xs)
    messages_ta = tf.TensorArray(dtype=xs.dtype,
                                 size=num_xs,
                                 dynamic_size=False,
                                 clear_after_read=False)

    def compute_message(t, prev_mean, prev_prec, messages_ta):
      """Computes one step of the backwards messages."""
      z_var = self.transition_variances.read(t)
      w_z = self.transition_weights.read(t-1)
      x_var = self.emission_variances.read(t)
      w_x = self.emission_weights.read(t)
      cur_x = xs.read(t - until_t)

      # If it isn't the first message, add the terms from the transition.
      def transition_term():
        return (tf.square(self.transition_weights.read(t))/
                self.transition_variances.read(t+1))

      unary_prec = 1/z_var + tf.square(w_x)/x_var
      unary_prec += tf.cond(tf.less(t, self.num_timesteps-1),
                            transition_term, lambda: 0.)

      unary_mean = (w_x / x_var) * cur_x
      pairwise_prec = w_z / z_var

      next_prec = -tf.square(pairwise_prec)/(unary_prec + prev_prec)
      next_mean = (pairwise_prec * (unary_mean + prev_mean) /
                   (unary_prec + prev_prec))
      next_prec = tf.reshape(next_prec, [batch_size])
      next_mean = tf.reshape(next_mean, [batch_size])
      messages_ta = messages_ta.write(t - until_t,
                                      tf.stack([next_mean, next_prec]))
      return t-1, next_mean, next_prec, messages_ta

    def pred(t, *unused_args):
      return tf.greater_equal(t, until_t)

    init_prec = tf.zeros([batch_size], dtype=xs.dtype)
    init_mean = tf.zeros([batch_size], dtype=xs.dtype)
    t0 = tf.constant(self.num_timesteps - 1, dtype=tf.int32)

    outs = tf.while_loop(pred, compute_message,
                         (t0, init_mean, init_prec, messages_ta))
    messages = outs[-1]
    return messages
Ejemplo n.º 30
0
 def _scan_body(args_list, num_steps):
   """Closure which implements `tf.scan` body."""
   current_state, previous_kernel_results = args_list
   return tf.while_loop(
       cond=lambda it_, *args: it_ < num_steps,
       body=lambda it_, cs, pkr: [it_ + 1] + list(kernel.one_step(cs, pkr)),
       loop_vars=[
           np.int32(0),  # it_
           current_state,
           previous_kernel_results,
       ],
       parallel_iterations=parallel_iterations)[1:]  # Lop off `it_`.
Ejemplo n.º 31
0
    def graph(self, x_tensor):
        nb_classes = self.model.output_dim
        nb_features = self.model.input_dim
        # Compute our initial search domain. We optimize the initial search domain
        # by removing all features that are already at their maximum values (if
        # increasing input features).
        search_domain = tf.reshape(
            tf.cast(x_tensor < self.clip_max_input, tf.float32),
            [-1, nb_features])

        # y_in_init = tf.reshape(tf.one_hot(self.targed_y_input, depth=nb_classes), [-1, nb_classes])

        # Loop variables
        # x_in: the tensor that holds the latest adversarial outputs that are in
        #       progress.
        # y_in: the tensor for target labels
        # domain_in: the tensor that holds the latest search domain
        # cond_in: the boolean tensor to show if more iteration is needed for
        #          generating adversarial samples
        def _cond(x_in, domain_in, i, cond_in):
            # Repeat the loop until we have achieved misclassification or
            # reaches the maximum iterations
            return tf.logical_and(tf.less(i, self.iterations), cond_in)

        def _body(x_in, domain_in, i, cond_in):
            logits = self.model.get_logits(x_in)
            preds = tf.nn.softmax(logits)
            preds_onehot = tf.one_hot(tf.argmax(preds, axis=1),
                                      depth=nb_classes)

            # get corresponding derivatives
            derivatives, = tf.gradients(tf.reduce_mean(
                preds[:, 0]), x_in)  # malicious samples are labeled as '1'

            # Remove the already-used input features from the search space
            # Subtract 2 times the maximum value from those value so that
            # they won't be picked later
            increase_coef = 2 * tf.cast(tf.equal(domain_in, 0), tf.float32)

            derivatives -= increase_coef \
                          * tf.reduce_max(tf.abs(derivatives), axis=1, keepdims=True)
            derivatives = tf.reshape(derivatives, shape=[-1, nb_features])

            # Create a mask to only keep features that match conditions
            scores_mask = derivatives > 0

            # Extract the best malware feature
            scores = tf.cast(scores_mask, tf.float32) * derivatives
            best = tf.argmax(scores, axis=1)
            p1_one_hot = tf.one_hot(best, depth=nb_features)

            # Check if more modification is needed for each sample
            # mod_not_done = tf.equal(tf.reduce_sum(y_in * preds_onehot, axis=1), 0)
            mod_not_done = tf.equal(preds_onehot[:, 0], 0)

            if self.force_iteration:
                cond = (tf.reduce_sum(
                    domain_in * tf.cast(scores_mask, tf.float32), axis=1) >= 1)
            else:
                cond = mod_not_done & (tf.reduce_sum(
                    domain_in * tf.cast(scores_mask, tf.float32), axis=1) >= 1)

            cond_float = tf.reshape(tf.cast(cond, tf.float32), shape=[-1, 1])
            to_mod = p1_one_hot * cond_float

            domain_out = domain_in - to_mod

            to_mod_reshape = tf.reshape(to_mod,
                                        shape=([-1] +
                                               x_in.shape[1:].as_list()))
            x_out = tf.minimum(x_in + to_mod_reshape, self.scaled_clip_max)

            # Increase the iterator, and check if all miss-classifications are done
            i_out = tf.add(i, 1)
            cond_out = tf.reduce_any(cond)

            return x_out, domain_out, i_out, cond_out

        x_adv_batch, _2, _3, _4 = tf.while_loop(
            _cond, _body, [x_tensor, search_domain, 0, True])

        return x_adv_batch
Ejemplo n.º 32
0
    def build_sampling_graph(self, config, tokenizer, max_length=12):

        if self.samples is not None:
            return

        # define stopping conditions
        def stop_cond(states_c, states_h, tokens, seq_length, stop_indicator):

            has_unfinished_dialogue = tf.less(
                tf.shape(tf.where(stop_indicator))[0],
                tf.shape(stop_indicator)
                [0])  # TODO use "any" instead of checking shape
            has_not_reach_size_limit = tf.less(tf.reduce_max(seq_length),
                                               max_length)

            return tf.logical_and(has_unfinished_dialogue,
                                  has_not_reach_size_limit)

        # define one_step sampling
        with tf.variable_scope(self.scope_name):
            stop_token = tf.constant(tokenizer.stop_token)
            stop_dialogue_token = tf.constant(tokenizer.stop_dialogue)

        def step(prev_state_c, prev_state_h, tokens, seq_length,
                 stop_indicator):
            input = tf.gather(tokens, tf.shape(tokens)[0] - 1)

            # Look for new finish dialogue
            is_stop_token = tf.equal(input, stop_token)
            is_stop_dialogue_token = tf.equal(input, stop_dialogue_token)
            is_stop = tf.logical_or(is_stop_token, is_stop_dialogue_token)
            stop_indicator = tf.logical_or(
                stop_indicator, is_stop)  # flag to false new finished dialogue

            # increment seq_length when the dialogue is not over
            seq_length = tf.where(stop_indicator, seq_length,
                                  tf.add(seq_length, 1))

            # compute the next words. TODO: factorize with qgen.. but how?!
            with tf.variable_scope(self.scope_name, reuse=True):
                word_emb = utils.get_embedding(
                    input,
                    n_words=tokenizer.no_words,
                    n_dim=config['word_embedding_size'],
                    scope="word_embedding",
                    reuse=True)

                inp_emb = tf.concat([word_emb, self.image_emb], axis=1)
                with tf.variable_scope("word_decoder"):
                    lstm_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
                        config['num_lstm_units'],
                        layer_norm=False,
                        dropout_keep_prob=1.0,
                        reuse=True)

                    state = tf.contrib.rnn.LSTMStateTuple(c=prev_state_c,
                                                          h=prev_state_h)
                    out, state = lstm_cell(inp_emb, state)

                    # store/update the state when the dialogue is not finished (after sampling the <?> token)
                    cond = tf.greater_equal(
                        seq_length, tf.subtract(tf.reduce_max(seq_length), 1))
                    state_c = tf.where(cond, state.c, prev_state_c)
                    state_h = tf.where(cond, state.h, prev_state_h)

                with tf.variable_scope('decoder_output'):
                    output = utils.fully_connected(state_h,
                                                   tokenizer.no_words,
                                                   reuse=True)

                    sampled_tokens = tf.cond(
                        self.greedy, lambda: tf.argmax(output, 1),
                        lambda: tf.reshape(tf.multinomial(output, 1), [-1]))
                    sampled_tokens = tf.cast(sampled_tokens, tf.int32)

            tokens = tf.concat(
                [tokens, tf.expand_dims(sampled_tokens, 0)],
                axis=0)  # check axis!

            return state_c, state_h, tokens, seq_length, stop_indicator

        # initialialize sequences
        batch_size = tf.shape(self.seq_length)[0]
        seq_length = tf.fill([batch_size], 0)
        stop_indicator = tf.fill([batch_size], False)

        transpose_dialogue = tf.transpose(self.dialogues, perm=[1, 0])

        self.samples = tf.while_loop(
            stop_cond,
            step, [
                self.decoder_zero_state_c, self.decoder_zero_state_h,
                transpose_dialogue, seq_length, stop_indicator
            ],
            shape_invariants=[
                self.decoder_zero_state_c.get_shape(),
                self.decoder_zero_state_h.get_shape(),
                tf.TensorShape([None, None]),
                seq_length.get_shape(),
                stop_indicator.get_shape()
            ])
Ejemplo n.º 33
0
    def __call__(self,
                 c_x,
                 n,
                 d,
                 k,
                 combination,
                 eps_passed,
                 sample,
                 scope=None):
        '''
		Args:
			c_x - tensor to be filled up with random walk property
			n - number of nodes 
			d - number of features in the feature matrix
			k - length of random walk
				defaults to be None
    	'''
        with tf.variable_scope(scope or type(self).__name__):
            '''
            c_x = input_layer(c_x, self.adj, self.weight, self.features, k, n, d, activation=None, batch_norm=False, istrain=False, scope=None)
            c_x = tf.Print(c_x,[c_x], message="my c_x-values:")
	    
            with tf.variable_scope("Prior"):
                prior_mu = tf.zeros(shape=[n,self.z_dim,1],name="prior_mu") 
                prior_sigma = tf.matrix_diag(tf.ones(shape=[n,self.z_dim]),name="prior_sigma")

            with tf.variable_scope("Encoder"):
                list_cx = tf.unstack(c_x)
                # output will be of shape n X kd
                enc_hidden = fc_layer(tf.concat(list_cx,1), k*d, activation=tf.nn.relu, scope="hidden")
                #output will be of shape n X 5 (this is a hyper paramater)
                enc_mu = fc_layer(enc_hidden, self.z_dim,activation=tf.nn.softplus, scope='mu')
                enc_mu = tf.reshape(enc_mu, [n,self.z_dim,1])
                enc_mu = tf.Print(enc_mu,[enc_mu], message="my enc_mu-values:")

                # output will be n X 1 then convert that to a diagonal matrix
                debug_sigma = fc_layer(enc_hidden, self.z_dim, activation=tf.nn.softplus, scope='sigma')
                debug_sigma = tf.Print(debug_sigma,[debug_sigma], message="my debug_sigma-values:")
                enc_sigma = tf.matrix_diag(debug_sigma, name="enc_sigma")
                enc_sigma = tf.Print(enc_sigma,[enc_sigma], message="my enc_sigma-values:")

            # Random sampling ~ N(0, 1)
            eps = eps_passed
            temp_stack = []
        
            for i in range(n):
                temp_stack.append(tf.matmul(enc_sigma[i], eps[i]))
	    
            z = tf.add(enc_mu, tf.stack(temp_stack))
                
            # While we are trying to sample some edges, we sample Z from prior
            '''
            eps = eps_passed
            #if sample:
            z = eps

            z = tf.Print(z, [z], message="my z-values:")
            with tf.variable_scope("Poisson"):
                #nodes = tf.reshape(self.node_count, [-1, self.z_dim])
                #lambda_node = fc_layer(nodes, 1, activation=tf.nn.softplus, scope = "node")
                lambda_node = fc_layer(tf.transpose(self.node_count),
                                       1,
                                       activation=tf.nn.softplus,
                                       scope="node")
                z_reshape = tf.reshape(z, [-1, self.z_dim])
                z_reshape = tf.Print(z_reshape, [z_reshape],
                                     message="my z-reshape-values:")
                n_cast = tf.fill([1, self.z_dim], tf.cast(n, dtype=tf.float32))
                z_concat = tf.concat(values=(z_reshape, n_cast), axis=0)
                print("Debug concat z", z_concat.get_shape())
                lambda_edge = fc_layer(z_concat,
                                       1,
                                       activation=tf.nn.softplus,
                                       scope="edge")

            def loop_cond(t, k, z, z_stack, z_stack_weight):
                N = tf.stack([tf.shape(t)[0]])[0]
                #N= tf.Print(N, [N], message="my N-values")
                return tf.less(k, N)

            def body(t, k, z, z_stack, z_stack_weight):
                # need to check once sanity
                print("Debug shape", t[k].get_shape(), tf.shape(z))
                print("Debug shape gather",
                      tf.gather(z, t[k][0]).get_shape(), z.get_shape())
                dots = tf.concat(values=([tf.gather(z, t[k][0])],
                                         [tf.gather(z, t[k][1])]),
                                 axis=1)
                print("Debug shape dots", dots.get_shape())
                for j in range(self.bin_dim):
                    m = np.zeros((1, self.bin_dim))
                    m[0][j] = 1
                    temp = tf.concat(values=(dots, tf.cast(m, tf.float32)),
                                     axis=1)
                    z_stack_weight = tf.concat(values=(z_stack_weight, temp),
                                               axis=0)
                return (t, k + 1, z, tf.concat(values=(z_stack, dots),
                                               axis=0), z_stack_weight)

            k = tf.constant(0)
            z_new = tf.reshape(z, [n, self.z_dim])
            print("Debug z shape", z_new.get_shape())
            dec_hidden = []
            weight = []
            #label = []

            with tf.variable_scope("Decoder", reuse=tf.AUTO_REUSE):
                z_stack_label = []
                for u in range(n):
                    for j in range(4):
                        # we considered 4 types of atom C, H, O, N
                        m = np.zeros((1, 4))
                        m[0][j] = 1
                        z_stack_label.append(
                            tf.concat(values=(tf.transpose(z[u]), m),
                                      axis=1)[0])
                label = fc_layer(tf.stack(z_stack_label),
                                 1,
                                 activation=tf.nn.softplus,
                                 scope="label")

            for i in range(combination):
                z_stack = tf.constant(0,
                                      shape=[1, 2 * self.z_dim],
                                      dtype=tf.float32)
                z_stack_weight = tf.constant(
                    0,
                    shape=[1, 2 * self.z_dim + self.bin_dim],
                    dtype=tf.float32)
                t = self.edges[i]
                _, _, _, z_stack, z_stack_weight = tf.while_loop(
                    loop_cond,
                    body, [t, k, z_new, z_stack, z_stack_weight],
                    shape_invariants=[
                        t.get_shape(),
                        k.get_shape(),
                        z_new.get_shape(),
                        tf.TensorShape([None, 2 * self.z_dim]),
                        tf.TensorShape([None, 2 * self.z_dim + self.bin_dim])
                    ])
                with tf.variable_scope("Decoder", reuse=tf.AUTO_REUSE):

                    dec_hidden.append(
                        fc_layer(z_stack[1:],
                                 1,
                                 activation=tf.nn.softplus,
                                 scope="hidden"))
                    weight.append(
                        fc_layer(z_stack_weight[1:],
                                 1,
                                 activation=tf.nn.softplus,
                                 scope="marker"))
                    #label.append(fc_layer(tf.stack(z_stack_label), 1, activation=tf.nn.relu, scope = "label"))

        return (c_x, dec_hidden, z, weight, label, tf.reduce_mean(lambda_node),
                tf.reduce_mean(lambda_edge))
Ejemplo n.º 34
0
def construct_beam_search_functions(models, beam_size):
    """
    Strategy:
        compute the log_probs - same as with sampling
        for sentences that are ended set log_prob(<eos>)=0, log_prob(not eos)=-inf
        add previous cost to log_probs
        run top k -> (idxs, values)
        use values as new costs
        divide idxs by num_classes to get state_idxs
        use gather to get new states
        take the remainder of idxs after num_classes to get new_predicted words
    """

    # Get some parameter settings.  For ensembling, some parameters are required
    # to be consistent across all models but others are not.  In the former
    # case, we assume that consistency has already been checked.  For the
    # parameters that are allowed to vary across models, the first model's
    # settings take precedence.
    decoder = models[0].decoder
    batch_size = tf.shape(decoder.init_state)[0]
    embedding_size = decoder.embedding_size
    translation_maxlen = decoder.translation_maxlen
    target_vocab_size = decoder.target_vocab_size
    high_depth = 0 if decoder.high_gru_stack == None \
                   else len(decoder.high_gru_stack.grus)

    # Initialize loop variables
    i = tf.constant(0)
    init_ys = -tf.ones(dtype=tf.int32, shape=[batch_size])
    init_embs = [tf.zeros(dtype=tf.float32, shape=[batch_size,embedding_size])] * len(models)

    f_min = numpy.finfo(numpy.float32).min
    init_cost = [0.] + [f_min]*(beam_size-1) # to force first top k are from first hypo only
    init_cost = tf.constant(init_cost, dtype=tf.float32)
    init_cost = tf.tile(init_cost, multiples=[batch_size/beam_size])
    ys_array = tf.TensorArray(
                dtype=tf.int32,
                size=translation_maxlen,
                clear_after_read=True,
                name='y_sampled_array')
    p_array = tf.TensorArray(
                dtype=tf.int32,
                size=translation_maxlen,
                clear_after_read=True,
                name='parent_idx_array')
    init_base_states = [m.decoder.init_state for m in models]
    init_high_states = [[m.decoder.init_state] * high_depth for m in models]
    init_loop_vars = [i, init_base_states, init_high_states, init_ys, init_embs,
                      init_cost, ys_array, p_array]

    # Prepare cost matrix for completed sentences -> Prob(EOS) = 1 and Prob(x) = 0
    eos_log_probs = tf.constant(
                        [[0.] + ([f_min]*(target_vocab_size - 1))],
                        dtype=tf.float32)
    eos_log_probs = tf.tile(eos_log_probs, multiples=[batch_size,1])

    def cond(i, prev_base_states, prev_high_states, prev_ys, prev_embs, cost, ys_array, p_array):
        return tf.logical_and(
                tf.less(i, translation_maxlen),
                tf.reduce_any(tf.not_equal(prev_ys, 0)))

    def body(i, prev_base_states, prev_high_states, prev_ys, prev_embs, cost, ys_array, p_array):
        # get predictions from all models and sum the log probs
        sum_log_probs = None
        base_states = [None] * len(models)
        high_states = [None] * len(models)
        for j in range(len(models)):
            d = models[j].decoder
            states1 = d.grustep1.forward(prev_base_states[j], prev_embs[j])
            att_ctx = d.attstep.forward(states1)
            base_states[j] = d.grustep2.forward(states1, att_ctx)
            if d.high_gru_stack == None:
                stack_output = base_states[j]
                high_states[j] = []
            else:
                if d.high_gru_stack.context_state_size == 0:
                    stack_output, high_states[j] = d.high_gru_stack.forward_single(
                        prev_high_states[j], base_states[j])
                else:
                    stack_output, high_states[j] = d.high_gru_stack.forward_single(
                        prev_high_states[j], base_states[j], context=att_ctx)
            logits = d.predictor.get_logits(prev_embs[j], stack_output,
                                            att_ctx, multi_step=False)
            log_probs = tf.nn.log_softmax(logits) # shape (batch, vocab_size)
            if sum_log_probs == None:
                sum_log_probs = log_probs
            else:
                sum_log_probs += log_probs

        # set cost of EOS to zero for completed sentences so that they are in top k
        # Need to make sure only EOS is selected because a completed sentence might
        # kill ongoing sentences
        sum_log_probs = tf.where(tf.equal(prev_ys, 0), eos_log_probs, sum_log_probs)

        all_costs = sum_log_probs + tf.expand_dims(cost, axis=1) # TODO: you might be getting NaNs here since -inf is in log_probs

        all_costs = tf.reshape(all_costs,
                               shape=[-1, target_vocab_size * beam_size])
        values, indices = tf.nn.top_k(all_costs, k=beam_size) #the sorted option is by default True, is this needed? 
        new_cost = tf.reshape(values, shape=[batch_size])
        offsets = tf.range(
                    start = 0,
                    delta = beam_size,
                    limit = batch_size,
                    dtype=tf.int32)
        offsets = tf.expand_dims(offsets, axis=1)
        survivor_idxs = (indices/target_vocab_size) + offsets
        new_ys = indices % target_vocab_size
        survivor_idxs = tf.reshape(survivor_idxs, shape=[batch_size])
        new_ys = tf.reshape(new_ys, shape=[batch_size])
        new_embs = [m.decoder.y_emb_layer.forward(new_ys, factor=0) for m in models]
        new_base_states = [tf.gather(s, indices=survivor_idxs) for s in base_states]
        new_high_states = [[tf.gather(s, indices=survivor_idxs) for s in states] for states in high_states]
        new_cost = tf.where(tf.equal(new_ys, 0), tf.abs(new_cost), new_cost)

        ys_array = ys_array.write(i, value=new_ys)
        p_array = p_array.write(i, value=survivor_idxs)

        return i+1, new_base_states, new_high_states, new_ys, new_embs, new_cost, ys_array, p_array


    final_loop_vars = tf.while_loop(
                        cond=cond,
                        body=body,
                        loop_vars=init_loop_vars,
                        back_prop=False)
    i, _, _, _, _, cost, ys_array, p_array = final_loop_vars

    indices = tf.range(0, i)
    sampled_ys = ys_array.gather(indices)
    parents = p_array.gather(indices)
    cost = tf.abs(cost) #to get negative-log-likelihood
    return sampled_ys, parents, cost
Ejemplo n.º 35
0
 def run_episode():
     time_step = tf_env.reset()
     return tf.while_loop(cond=c, body=body, loop_vars=[time_step])
Ejemplo n.º 36
0
def homography_adaptation(image, net, config):
    """Perfoms homography adaptation.
    Inference using multiple random warped patches of the same input image for robust
    predictions.
    Arguments:
        image: A `Tensor` with shape `[N, H, W, 1]`.
        net: A function that takes an image as input, performs inference, and outputs the
            prediction dictionary.
        config: A configuration dictionary containing optional entries such as the number
            of sampled homographies `'num'`, the aggregation method `'aggregation'`.
    Returns:
        A dictionary which contains the aggregated detection probabilities.
    """

    probs = net(image)['prob']
    counts = tf.ones_like(probs)
    images = image

    probs = tf.expand_dims(probs, axis=-1)
    counts = tf.expand_dims(counts, axis=-1)
    images = tf.expand_dims(images, axis=-1)

    shape = tf.shape(image)[1:3]
    config = dict_update(homography_adaptation_default_config, config)

    def step(i, probs, counts, images):
        # Sample image patch
        H = sample_homography(shape, **config['homographies'])
        H_inv = invert_homography(H)
        warped = H_transform(image, H, interpolation='BILINEAR')
        count = H_transform(tf.expand_dims(tf.ones(tf.shape(image)[:3]), -1),
                            H_inv, interpolation='NEAREST')[..., 0]

        # Predict detection probabilities
        warped_shape = tf.to_int32(
                tf.to_float(shape)*config['homographies']['patch_ratio'])
        input_warped = tf.image.resize_images(warped, warped_shape)
        prob = net(input_warped)['prob']
        prob = tf.image.resize_images(tf.expand_dims(prob, axis=-1), shape)[..., 0]
        prob_proj = H_transform(tf.expand_dims(prob, -1), H_inv,
                                interpolation='BILINEAR')[..., 0]

        probs = tf.concat([probs, tf.expand_dims(prob_proj, -1)], axis=-1)
        counts = tf.concat([counts, tf.expand_dims(count, -1)], axis=-1)
        images = tf.concat([images, tf.expand_dims(warped, -1)], axis=-1)
        return i + 1, probs, counts, images

    _, probs, counts, images = tf.while_loop(
            lambda i, p, c, im: tf.less(i, config['num'] - 1),
            step,
            [0, probs, counts, images],
            parallel_iterations=1,
            back_prop=False,
            shape_invariants=[
                    tf.TensorShape([]),
                    tf.TensorShape([None, None, None, None]),
                    tf.TensorShape([None, None, None, None]),
                    tf.TensorShape([None, None, None, 1, None])])

    counts = tf.reduce_sum(counts, axis=-1)
    max_prob = tf.reduce_max(probs, axis=-1)
    mean_prob = tf.reduce_sum(probs, axis=-1) / counts

    if config['aggregation'] == 'max':
        prob = max_prob
    elif config['aggregation'] == 'sum':
        prob = mean_prob
    else:
        raise ValueError('Unkown aggregation method: {}'.format(config['aggregation']))

    if config['filter_counts']:
        prob = tf.where(tf.greater_equal(counts, config['filter_counts']),
                        prob, tf.zeros_like(prob))

    return {'prob': prob, 'counts': counts,
            'mean_prob': mean_prob, 'input_images': images, 'H_probs': probs}  # debug
Ejemplo n.º 37
0
    def _sample_n(self, n, seed=None):
        seed = seed_stream.SeedStream(seed, salt='vom_mises_fisher')
        # The sampling strategy relies on the fact that vMF variates are symmetric
        # about the mean direction. Accordingly, if we have a sampling strategy for
        # the away-from-mean angle, then we can uniformly sample the remaining
        # dimensions on the S^{dim-2} sphere for , and rotate these samples from a
        # (1, 0, 0, ..., 0)-mode distribution into the target orientation.
        #
        # This is easy to imagine on the 1-sphere (S^1; in 2-D space): sample a
        # von-Mises distributed `x` value in [-1, 1], then uniformly select what
        # amounts to a "up" or "down" additional degree of freedom after unit
        # normalizing, followed by a final rotation to the desired mean direction
        # from a basis of (1, 0).
        #
        # On S^2 (in 3-D), selecting a vMF `x` identifies a circle in `yz` on the
        # unit sphere over which the distribution is uniform, in particular the
        # circle where x = \hat{x} intersects the unit sphere. We pick a point on
        # that circle, then rotate to the desired mean direction from a basis of
        # (1, 0, 0).
        event_dim = (tf.compat.dimension_value(self.event_shape[0])
                     or self._event_shape_tensor()[0])

        sample_batch_shape = tf.concat([[n], self._batch_shape_tensor()],
                                       axis=0)
        dim = tf.cast(event_dim - 1, self.dtype)
        if event_dim == 3:
            samples_dim0 = self._sample_3d(n, seed=seed)
        else:
            # Wood'94 provides a rejection algorithm to sample the x coordinate.
            # Wood'94 definition of b:
            # b = (-2 * kappa + tf.sqrt(4 * kappa**2 + dim**2)) / dim
            # https://stats.stackexchange.com/questions/156729 suggests:
            b = dim / (2 * self.concentration +
                       tf.sqrt(4 * self.concentration**2 + dim**2))
            # TODO(bjp): Integrate any useful numerical tricks from hyperspherical VAE
            #     https://github.com/nicola-decao/s-vae-tf/
            x = (1 - b) / (1 + b)
            c = self.concentration * x + dim * tf.math.log1p(-x**2)
            beta = beta_lib.Beta(dim / 2, dim / 2)

            def cond_fn(w, should_continue):
                del w
                return tf.reduce_any(input_tensor=should_continue)

            def body_fn(w, should_continue):
                z = beta.sample(sample_shape=sample_batch_shape, seed=seed())
                w = tf.where(should_continue,
                             (1 - (1 + b) * z) / (1 - (1 - b) * z), w)
                w = tf.debugging.check_numerics(w, 'w')
                should_continue = tf.logical_and(
                    should_continue,
                    self.concentration * w + dim * tf.math.log1p(-x * w) - c <
                    tf.math.log(
                        tf.random.uniform(sample_batch_shape,
                                          seed=seed(),
                                          dtype=self.dtype)))
                return w, should_continue

            w = tf.zeros(sample_batch_shape, dtype=self.dtype)
            should_continue = tf.ones(sample_batch_shape, dtype=tf.bool)
            samples_dim0 = tf.while_loop(cond=cond_fn,
                                         body=body_fn,
                                         loop_vars=(w, should_continue))[0]
            samples_dim0 = samples_dim0[..., tf.newaxis]
        if not self._allow_nan_stats:
            # Verify samples are w/in -1, 1, with useful error output tensors (top
            # value rather than all values).
            with tf.control_dependencies([
                    assert_util.assert_less_equal(
                        samples_dim0,
                        self.dtype.as_numpy_dtype(1.01),
                        data=[tf.nn.top_k(tf.reshape(samples_dim0, [-1]))[0]]),
                    assert_util.assert_greater_equal(
                        samples_dim0,
                        self.dtype.as_numpy_dtype(-1.01),
                        data=[
                            -tf.nn.top_k(tf.reshape(-samples_dim0, [-1]))[0]
                        ])
            ]):
                samples_dim0 = tf.identity(samples_dim0)
        samples_otherdims_shape = tf.concat(
            [sample_batch_shape, [event_dim - 1]], axis=0)
        unit_otherdims = tf.nn.l2_normalize(tf.random.normal(
            samples_otherdims_shape, seed=seed(), dtype=self.dtype),
                                            axis=-1)
        samples = tf.concat(
            [
                samples_dim0,  # we must avoid sqrt(1 - (>1)**2)
                tf.sqrt(tf.maximum(1 - samples_dim0**2, 0.)) * unit_otherdims
            ],
            axis=-1)
        samples = tf.nn.l2_normalize(samples, axis=-1)
        if not self._allow_nan_stats:
            samples = tf.debugging.check_numerics(samples, 'samples')

        # Runtime assert that samples are unit length.
        if not self._allow_nan_stats:
            worst, idx = tf.nn.top_k(
                tf.reshape(tf.abs(1 - tf.linalg.norm(tensor=samples, axis=-1)),
                           [-1]))
            with tf.control_dependencies([
                    assert_util.assert_near(
                        self.dtype.as_numpy_dtype(0),
                        worst,
                        data=[
                            worst, idx,
                            tf.gather(tf.reshape(samples, [-1, event_dim]),
                                      idx)
                        ],
                        atol=1e-4,
                        summarize=100)
            ]):
                samples = tf.identity(samples)
        # The samples generated are symmetric around a mode at (1, 0, 0, ...., 0).
        # Now, we move the mode to `self.mean_direction` using a rotation matrix.
        if not self._allow_nan_stats:
            # Assert that the basis vector rotates to the mean direction, as expected.
            basis = tf.cast(
                tf.concat([[1.], tf.zeros([event_dim - 1])], axis=0),
                self.dtype)
            with tf.control_dependencies([
                    assert_util.assert_less(
                        tf.linalg.norm(
                            tensor=self._rotate(basis) - self.mean_direction,
                            axis=-1), self.dtype.as_numpy_dtype(1e-5))
            ]):
                return self._rotate(samples)
        return self._rotate(samples)
Ejemplo n.º 38
0
    def __batch_while_loop(
            self, truth_padded_data: padder.PlaceholderPaddedData,
            initial_hidden_vector_input: tf.Tensor,
            num_batches: int) -> (tf.TensorArray, tf.TensorArray):
        def body(step, batch_states_ta: tf.TensorArray,
                 batch_outputs_ta: tf.TensorArray,
                 batch_outputs_counts_ta: tf.TensorArray,
                 batch_step_counts_ta: tf.TensorArray):

            with tf.variable_scope("initial_hidden_vector"):
                current_initial_hidden_vector_input = tf.gather(
                    initial_hidden_vector_input,
                    step,
                    name="current_initial_hidden_vector_input")
                current_hidden_vector = self.__create_fully_connected_layers(
                    current_initial_hidden_vector_input,
                    [self.hidden_vector_size])

            with tf.variable_scope("step_while_loop"):
                current_step_count = tf.gather(truth_padded_data.step_counts,
                                               step,
                                               name="current_step_count")
                current_outputs_padded = tf.gather(
                    truth_padded_data.outputs_padded,
                    step,
                    name="current_outputs_padded")
                current_outputs_counts = tf.gather(
                    truth_padded_data.outputs_counts,
                    step,
                    name="current_outputs_counts")

                current_states, current_outputs, current_outputs_counts, current_step_count = \
                    self.__step_while_loop(
                        current_step_count,
                        current_outputs_padded,
                        current_outputs_counts,
                        current_hidden_vector)

            return \
                step + 1, \
                batch_states_ta.write(step, current_states, "write_batch_states"), \
                batch_outputs_ta.write(step, current_outputs, "write_batch_outputs"), \
                batch_outputs_counts_ta.write(step, current_outputs_counts, "write_batch_outputs_counts"), \
                batch_step_counts_ta.write(step, current_step_count, "write_step_counts")

        def cond(step, *_):
            return step < num_batches

        *_, \
            generated_states_padded_ta, \
            generated_outputs_padded_ta, \
            generated_outputs_counts_padded_ta, \
            generated_step_counts_ta = tf.while_loop(
                cond=cond,
                body=body,
                loop_vars=[
                    tf.constant(0),
                    tf.TensorArray(dtype=tf.float32, size=num_batches, name="batch_states_ta"),
                    tf.TensorArray(dtype=tf.float32, size=num_batches, name="batch_outputs_ta"),
                    tf.TensorArray(dtype=tf.int32, size=num_batches, name="batch_outputs_counts_ta"),
                    tf.TensorArray(dtype=tf.int32, size=num_batches, name="batch_step_counts_ta")])

        return \
            generated_states_padded_ta, \
            generated_outputs_padded_ta, \
            generated_outputs_counts_padded_ta, \
            generated_step_counts_ta
Ejemplo n.º 39
0
                    size), facts_0s)

        # Inverse attention mask, for what's retained in the state.
        retain = 1 - attend_to

        # GRU pass over the facts, according to the attention mask.
        while_valid_index = (lambda state, index: index < tf.shape(cs)[1])
        update_state = (
            lambda state, index: (attend_to[:, index, :] * attention_gru(
                cs[:, index, :], state)[0] + retain[:, index, :] * state))

        # start loop with most recent memory and at the first index
        memory.append(
            tuple(
                tf.while_loop(while_valid_index,
                              (lambda state, index:
                               (update_state(state, index), index + 1)),
                              loop_vars=[memory[-1], 0]))[0])

        attends.append(attend_to)

        # Reuse variables so the GRU pass uses the same variables every pass.
        scope.reuse_variables()

# Answer Module

# a0: Final memory state. (Input to answer module)
a0 = tf.concat([memory[-1], q], -1)

# fc_init: Initializer for the final fully connected layer's weights.
fc_init = tf.random_normal_initializer(stddev=0.1)
Ejemplo n.º 40
0
def ExtractKeypointDescriptor(image, layer_name, image_scales, iou,
                              max_feature_num, abs_thres, model_fn):
    """Extract keypoint descriptor for input image.

  Args:
    image: A image tensor with shape [h, w, channels].
    layer_name: The endpoint of feature extraction layer.
    image_scales: A 1D float tensor which contains the scales.
    iou: A float scalar denoting the IOU threshold for NMS.
    max_feature_num: An int tensor denoting the maximum selected feature points.
    abs_thres: A float tensor denoting the score threshold for feature
      selection.
    model_fn: Model function. Follows the signature:

      * Args:
        * `images`: Image tensor which is re-scaled.
        * `normalized_image`: Whether or not the images are normalized.
        * `reuse`: Whether or not the layer and its variables should be reused.

      * Returns:
        * `attention`: Attention score after the non-linearity.
        * `feature_map`: Feature map obtained from the ResNet model.

  Returns:
    boxes: [N, 4] float tensor which denotes the selected receptive box. N is
      the number of final feature points which pass through keypoint selection
      and NMS steps.
    feature_scales: [N] float tensor. It is the inverse of the input image
      scales such that larger image scales correspond to larger image regions,
      which is compatible with scale-space keypoint detection convention.
    features: [N, depth] float tensor with feature descriptors.
    scores: [N, 1] float tensor denoting the attention score.

  Raises:
    ValueError: If the layer_name is unsupported.
  """
    original_image_shape_float = tf.gather(tf.to_float(tf.shape(image)),
                                           [0, 1])
    image_tensor = NormalizePixelValues(image)
    image_tensor = tf.expand_dims(image_tensor, 0, name='image/expand_dims')

    # Feature depth and receptive field parameters for each network version.
    if layer_name == 'resnet_v1_50/block3':
        feature_depth = 1024
        rf, stride, padding = [291.0, 32.0, 145.0]
    elif layer_name == 'resnet_v1_50/block4':
        feature_depth = 2048
        rf, stride, padding = [483.0, 32.0, 241.0]
    else:
        raise ValueError('Unsupported layer_name.')

    def _ProcessSingleScale(scale_index,
                            boxes,
                            features,
                            scales,
                            scores,
                            reuse=True):
        """Resize the image and run feature extraction and keypoint selection.

       This function will be passed into tf.while_loop() and be called
       repeatedly. The input boxes are collected from the previous iteration
       [0: scale_index -1]. We get the current scale by
       image_scales[scale_index], and run image resizing, feature extraction and
       keypoint selection. Then we will get a new set of selected_boxes for
       current scale. In the end, we concat the previous boxes with current
       selected_boxes as the output.

    Args:
      scale_index: A valid index in the image_scales.
      boxes: Box tensor with the shape of [N, 4].
      features: Feature tensor with the shape of [N, depth].
      scales: Scale tensor with the shape of [N].
      scores: Attention score tensor with the shape of [N].
      reuse: Whether or not the layer and its variables should be reused.

    Returns:
      scale_index: The next scale index for processing.
      boxes: Concatenated box tensor with the shape of [K, 4]. K >= N.
      features: Concatenated feature tensor with the shape of [K, depth].
      scales: Concatenated scale tensor with the shape of [K].
      scores: Concatenated attention score tensor with the shape of [K].
    """
        scale = tf.gather(image_scales, scale_index)
        new_image_size = tf.to_int32(
            tf.round(original_image_shape_float * scale))
        resized_image = tf.image.resize_bilinear(image_tensor, new_image_size)

        attention, feature_map = model_fn(resized_image,
                                          normalized_image=True,
                                          reuse=reuse)

        rf_boxes = CalculateReceptiveBoxes(
            tf.shape(feature_map)[1],
            tf.shape(feature_map)[2], rf, stride, padding)
        # Re-project back to the original image space.
        rf_boxes = tf.divide(rf_boxes, scale)
        attention = tf.reshape(attention, [-1])
        feature_map = tf.reshape(feature_map, [-1, feature_depth])

        # Use attention score to select feature vectors.
        indices = tf.reshape(tf.where(attention >= abs_thres), [-1])
        selected_boxes = tf.gather(rf_boxes, indices)
        selected_features = tf.gather(feature_map, indices)
        selected_scores = tf.gather(attention, indices)
        selected_scales = tf.ones_like(selected_scores, tf.float32) / scale

        # Concat with the previous result from different scales.
        boxes = tf.concat([boxes, selected_boxes], 0)
        features = tf.concat([features, selected_features], 0)
        scales = tf.concat([scales, selected_scales], 0)
        scores = tf.concat([scores, selected_scores], 0)

        return scale_index + 1, boxes, features, scales, scores

    output_boxes = tf.zeros([0, 4], dtype=tf.float32)
    output_features = tf.zeros([0, feature_depth], dtype=tf.float32)
    output_scales = tf.zeros([0], dtype=tf.float32)
    output_scores = tf.zeros([0], dtype=tf.float32)

    # Process the first scale separately, the following scales will reuse the
    # graph variables.
    (_, output_boxes, output_features, output_scales,
     output_scores) = _ProcessSingleScale(0,
                                          output_boxes,
                                          output_features,
                                          output_scales,
                                          output_scores,
                                          reuse=False)
    i = tf.constant(1, dtype=tf.int32)
    num_scales = tf.shape(image_scales)[0]
    keep_going = lambda j, boxes, features, scales, scores: tf.less(
        j, num_scales)

    (_, output_boxes, output_features, output_scales,
     output_scores) = tf.while_loop(cond=keep_going,
                                    body=_ProcessSingleScale,
                                    loop_vars=[
                                        i, output_boxes, output_features,
                                        output_scales, output_scores
                                    ],
                                    shape_invariants=[
                                        i.get_shape(),
                                        tf.TensorShape([None, 4]),
                                        tf.TensorShape([None, feature_depth]),
                                        tf.TensorShape([None]),
                                        tf.TensorShape([None])
                                    ],
                                    back_prop=False)

    feature_boxes = box_list.BoxList(output_boxes)
    feature_boxes.add_field('features', output_features)
    feature_boxes.add_field('scales', output_scales)
    feature_boxes.add_field('scores', output_scores)

    nms_max_boxes = tf.minimum(max_feature_num, feature_boxes.num_boxes())
    final_boxes = box_list_ops.non_max_suppression(feature_boxes, iou,
                                                   nms_max_boxes)

    return (final_boxes.get(), final_boxes.get_field('scales'),
            final_boxes.get_field('features'),
            tf.expand_dims(final_boxes.get_field('scores'), 1))
Ejemplo n.º 41
0
def inference_winner_take_all(images,
                              cams,
                              depth_num,
                              depth_start,
                              depth_end,
                              is_master_gpu=True,
                              reg_type='GRU',
                              inverse_depth=False):
    """ infer disparity image from stereo images and cameras """

    if not inverse_depth:
        depth_interval = (depth_end -
                          depth_start) / (tf.cast(depth_num, tf.float32) - 1)

    # reference image
    ref_image = tf.squeeze(tf.slice(images, [0, 0, 0, 0, 0],
                                    [-1, 1, -1, -1, 3]),
                           axis=1)
    ref_cam = tf.squeeze(tf.slice(cams, [0, 0, 0, 0, 0], [-1, 1, 2, 4, 4]),
                         axis=1)

    # image feature extraction
    if is_master_gpu:
        ref_tower = UNetDS2GN({'data': ref_image},
                              is_training=True,
                              reuse=False)
    else:
        ref_tower = UNetDS2GN({'data': ref_image},
                              is_training=True,
                              reuse=True)
    view_towers = []
    for view in range(1, FLAGS.view_num):
        view_image = tf.squeeze(tf.slice(images, [0, view, 0, 0, 0],
                                         [-1, 1, -1, -1, -1]),
                                axis=1)
        view_tower = UNetDS2GN({'data': view_image},
                               is_training=True,
                               reuse=True)
        view_towers.append(view_tower)

    # get all homographies
    view_homographies = []
    for view in range(1, FLAGS.view_num):
        view_cam = tf.squeeze(tf.slice(cams, [0, view, 0, 0, 0],
                                       [-1, 1, 2, 4, 4]),
                              axis=1)
        if inverse_depth:
            homographies = get_homographies_inv_depth(ref_cam,
                                                      view_cam,
                                                      depth_num=depth_num,
                                                      depth_start=depth_start,
                                                      depth_end=depth_end)
        else:
            homographies = get_homographies(ref_cam,
                                            view_cam,
                                            depth_num=depth_num,
                                            depth_start=depth_start,
                                            depth_interval=depth_interval)
        view_homographies.append(homographies)

    # gru unit
    gru1_filters = 16
    gru2_filters = 4
    gru3_filters = 2
    feature_shape = [FLAGS.batch_size, FLAGS.max_h / 4, FLAGS.max_w / 4, 32]
    gru_input_shape = [feature_shape[1], feature_shape[2]]
    state1 = tf.zeros(
        [FLAGS.batch_size, feature_shape[1], feature_shape[2], gru1_filters])
    state2 = tf.zeros(
        [FLAGS.batch_size, feature_shape[1], feature_shape[2], gru2_filters])
    state3 = tf.zeros(
        [FLAGS.batch_size, feature_shape[1], feature_shape[2], gru3_filters])
    conv_gru1 = ConvGRUCell(shape=gru_input_shape,
                            kernel=[3, 3],
                            filters=gru1_filters)
    conv_gru2 = ConvGRUCell(shape=gru_input_shape,
                            kernel=[3, 3],
                            filters=gru2_filters)
    conv_gru3 = ConvGRUCell(shape=gru_input_shape,
                            kernel=[3, 3],
                            filters=gru3_filters)

    # initialize variables
    exp_sum = tf.Variable(tf.zeros(
        [FLAGS.batch_size, feature_shape[1], feature_shape[2], 1]),
                          name='exp_sum',
                          trainable=False,
                          collections=[tf.GraphKeys.LOCAL_VARIABLES])
    depth_image = tf.Variable(tf.zeros(
        [FLAGS.batch_size, feature_shape[1], feature_shape[2], 1]),
                              name='depth_image',
                              trainable=False,
                              collections=[tf.GraphKeys.LOCAL_VARIABLES])
    max_prob_image = tf.Variable(tf.zeros(
        [FLAGS.batch_size, feature_shape[1], feature_shape[2], 1]),
                                 name='max_prob_image',
                                 trainable=False,
                                 collections=[tf.GraphKeys.LOCAL_VARIABLES])
    init_map = tf.zeros(
        [FLAGS.batch_size, feature_shape[1], feature_shape[2], 1])

    # define winner take all loop
    def body(depth_index, state1, state2, state3, depth_image, max_prob_image,
             exp_sum, incre):
        """Loop body."""

        # calculate cost
        ave_feature = ref_tower.get_output()
        ave_feature2 = tf.square(ref_tower.get_output())
        for view in range(0, FLAGS.view_num - 1):
            homographies = view_homographies[view]
            homographies = tf.transpose(homographies, perm=[1, 0, 2, 3])
            homography = homographies[depth_index]
            # warped_view_feature = homography_warping(view_towers[view].get_output(), homography)
            warped_view_feature = tf_transform_homography(
                view_towers[view].get_output(), homography)
            ave_feature = ave_feature + warped_view_feature
            ave_feature2 = ave_feature2 + tf.square(warped_view_feature)
        ave_feature = ave_feature / FLAGS.view_num
        ave_feature2 = ave_feature2 / FLAGS.view_num
        cost = ave_feature2 - tf.square(ave_feature)
        cost.set_shape(
            [FLAGS.batch_size, feature_shape[1], feature_shape[2], 32])

        # gru
        reg_cost1, state1 = conv_gru1(-cost, state1, scope='conv_gru1')
        reg_cost2, state2 = conv_gru2(reg_cost1, state2, scope='conv_gru2')
        reg_cost3, state3 = conv_gru3(reg_cost2, state3, scope='conv_gru3')
        reg_cost = tf.layers.conv2d(reg_cost3,
                                    1,
                                    3,
                                    padding='same',
                                    reuse=tf.AUTO_REUSE,
                                    name='prob_conv')
        prob = tf.exp(reg_cost)

        # index
        d_idx = tf.cast(depth_index, tf.float32)
        if inverse_depth:
            inv_depth_start = tf.div(1.0, depth_start)
            inv_depth_end = tf.div(1.0, depth_end)
            inv_interval = (inv_depth_start - inv_depth_end) / (
                tf.cast(depth_num, 'float32') - 1)
            inv_depth = inv_depth_start - d_idx * inv_interval
            depth = tf.div(1.0, inv_depth)
        else:
            depth = depth_start + d_idx * depth_interval
        temp_depth_image = tf.reshape(depth, [FLAGS.batch_size, 1, 1, 1])
        temp_depth_image = tf.tile(
            temp_depth_image,
            [1, int(feature_shape[1]),
             int(feature_shape[2]), 1
             ])  # modified by zhantao deng @ 26-07-2019, float -> int

        # update the best
        update_flag_image = tf.cast(tf.less(max_prob_image, prob),
                                    dtype='float32')
        new_max_prob_image = update_flag_image * prob + (
            1 - update_flag_image) * max_prob_image
        new_depth_image = update_flag_image * temp_depth_image + (
            1 - update_flag_image) * depth_image
        max_prob_image = tf.assign(max_prob_image, new_max_prob_image)
        depth_image = tf.assign(depth_image, new_depth_image)

        # update counter
        exp_sum = tf.assign_add(exp_sum, prob)
        depth_index = tf.add(depth_index, incre)

        return depth_index, state1, state2, state3, depth_image, max_prob_image, exp_sum, incre

    # run forward loop
    exp_sum = tf.assign(exp_sum, init_map)
    depth_image = tf.assign(depth_image, init_map)
    max_prob_image = tf.assign(max_prob_image, init_map)
    depth_index = tf.constant(0)
    incre = tf.constant(1)
    cond = lambda depth_index, *_: tf.less(depth_index, depth_num)
    _, state1, state2, state3, depth_image, max_prob_image, exp_sum, incre = tf.while_loop(
        cond,
        body, [
            depth_index, state1, state2, state3, depth_image, max_prob_image,
            exp_sum, incre
        ],
        back_prop=False,
        parallel_iterations=1)

    # get output
    forward_exp_sum = exp_sum + 1e-7
    forward_depth_map = depth_image
    return forward_depth_map, max_prob_image / forward_exp_sum
def max_margin(labels, mentions, scores):
    batch = 100
    # current_mention = mentions[0]
    # labels_to_process = []
    # pred_to_process = []
    # i_to_process = tf.Variable([], dtype=tf.int32, validate_shape=False, trainable=False, name='i_to_process')
    # # i_to_process = []
    # loss = []

    # labels_to_process = tf.Variable([labels[0]], validate_shape=False, trainable=False)
    # pred_to_process = tf.Variable([scores[0]], validate_shape=False, trainable=False)

    def same(i):
        # labels_to_process.append(label)
        # pred_to_process.append(score)

        # i_to_process.append(i)
        # nonlocal i_to_process
        #
        # concat_i = tf.concat([i_to_process, [i]], 0, name='concat_i')
        # assign_i = tf.assign(i_to_process, concat_i, validate_shape=False, name='assign_i')
        # print_i = tf.Print(i_to_process, [i_to_process], 'i_to_process: ', first_n=20, summarize=100)

        # i_to_process = tf.concat([i_to_process, i], 1)
        # nonlocal labels_to_process
        # nonlocal pred_to_process
        #
        # labels_to_process = tf.stack([labels_to_process, label])
        # # assign_op_label = tf.assign(labels_to_process, concat_label, validate_shape=False)
        #
        # pred_to_process = tf.stack([pred_to_process, score])

        # assign_op_score = tf.assign(pred_to_process, concat_score, validate_shape=False)
        #
        # with tf.control_dependencies([assign_op_label, assign_op_score]):
        #     labels_to_process = tf.Print(labels_to_process, data=[labels_to_process, labels_to_process.read_value()],
        #                                  message='labels_to_process, _read: ')

        # labels_print = tf.Print(labels_to_process, data=[labels_to_process], message='labels_to_process: ')
        # mention_print = tf.Print(mention, data=[mention], message='mention: ')

        # with tf.control_dependencies([assign_i, print_i]):
        return K.constant(0.)

    def different(indices):
        # nonlocal labels_to_process
        # nonlocal pred_to_process
        # nonlocal i_to_process

        # labels = tf.stack(labels_to_process)
        # labels_to_process = tf.Print(labels_to_process, [labels_to_process], 'labels_to_process:')
        # predictions = tf.stack(pred_to_process)
        # predictions = tf.Print(predictions, [predictions], 'predictions:')

        labels_to_process = tf.gather(labels, indices)
        predictions_to_process = tf.gather(scores, indices)

        # labels_to_process = [label]
        # pred_to_process = [score]

        # i_to_process = [i]
        # concat_i = tf.concat([[], [i]], 0)
        # null_i = tf.Variable([], dtype=tf.int32, validate_shape=False, trainable=False, name='i_to_process_null')
        # assign_i = tf.assign(i_to_process, null_i, validate_shape=False)
        # print_i = tf.Print(i_to_process, [i_to_process], 'i_to_process: ', first_n=20, summarize=100)

        # i_to_process = tf.concat([[], i], 1)

        # labels_to_process = tf.concat([[], label], 0)
        # labels_to_process = tf.Variable([label], validate_shape=False, trainable=False)
        #
        # # pred_to_process = tf.concat([[], score], 0)
        # pred_to_process = tf.Variable([score], validate_shape=False, trainable=False)

        # assign_op_label = tf.assign(labels_to_process, concat_label, validate_shape=False)
        #
        # concat_score = tf.stack([pred_to_process, score], 0)
        # assign_op_score = tf.assign(pred_to_process, concat_score, validate_shape=False)
        #
        # with tf.control_dependencies([assign_op_label, assign_op_score]):

        # labels_print = tf.Print(labels, data=[labels], message='labels: ')
        # predictions_print = tf.Print(predictions, data=[predictions], message='predictions: ')

        # with tf.control_dependencies([assign_i, print_i]):
        return process_antecedents(labels_to_process, predictions_to_process)

    labels_arr = tf.unstack(labels, num=batch)
    # print('Labels_arr: {}'.format(labels_arr))

    mentions_arr = tf.unstack(mentions, num=batch)
    # print('Mentions_arr: {}'.format(mentions_arr))

    scores_arr = tf.unstack(scores, num=batch)
    # print('Scores_arr: {}'.format(scores_arr))

    def mention_group_loss(i, prev_mention, indices):
        condition = tf.equal(tf.gather(mentions, i), prev_mention)

        return tf.cond(condition, lambda: K.constant(0.), lambda: different(indices))

    def set_indices(i, prev_mention, indices):
        condition = tf.equal(tf.gather(mentions, i), prev_mention)

        return tf.cond(condition, lambda: tf.concat([indices, [i]], axis=0), lambda: tf.zeros([0], tf.int32))

    i0 = tf.constant(0)
    prev_mention0 = mentions_arr[0]
    indices0 = tf.zeros([0], tf.int32)
    loss0 = tf.constant(0.)

    c = lambda i, prev_mention, indices, loss: tf.less(i, batch)
    b = lambda i, prev_mention, indices, loss: [i + 1, tf.gather(mentions, i),
                                                set_indices(i, prev_mention, indices),
                                                loss + mention_group_loss(i, prev_mention, indices)]

    loop = tf.while_loop(c, b, [i0, prev_mention0, indices0, loss0],
                         shape_invariants=[i0.get_shape(), prev_mention0.get_shape(),
                                           tf.TensorShape([None]), loss0.get_shape()])

    # c = lambda i, prev_mention, loss: tf.less(i, batch)
    # b = lambda i, prev_mention, loss: [i + 1, tf.gather(mentions_arr, i), loss + mention_group_loss(i, prev_mention)]
    # loop = tf.while_loop(c, b, [i0, prev_mention0, loss0])

    # for i, mention in enumerate(mentions_arr):
    #
    #     label = labels_arr[i]
    #     # label = tf.Print(label, [label], 'label: ')
    #     score = scores_arr[i]
    #     # score = tf.Print(score, [score], 'score: ')
    #
    #     condition = tf.equal(mention, current_mention)
    #     # condition = tf.Print(condition, [condition], 'condition: ')
    #
    #     loss.append(tf.cond(condition, lambda: same(label, score, i), lambda: different(label, score, i)))
    #     current_mention = mention

    # mentions_arr = tf.Print(mentions_arr, [mentions_arr], "mentions_arr: ", first_n=1, summarize=100)
    # labels_arr = tf.Print(labels_arr, [labels_arr], "labels_arr: ", first_n=1, summarize=100)
    # scores_arr = tf.Print(scores_arr, [scores_arr], "scores_arr: ", first_n=1, summarize=100)
    #
    # # loss = tf.Print(loss, [loss], 'loss_arr: ', first_n=1, summarize=100)
    # print_loop = tf.Print(loop, [loop[2]], 'loop vars: ')
    # with tf.control_dependencies([print_loop]):
    # return K.sum(loss)

    return loop[3]
Ejemplo n.º 43
0
def sample_sequence(*,
                    hparams,
                    length,
                    start_token=None,
                    batch_size=None,
                    context=None,
                    temperature=1,
                    top_k=0):
    if start_token is None:
        assert context is not None, 'Specify exactly one of start_token and context!'
    else:
        assert context is None, 'Specify exactly one of start_token and context!'
        context = tf.fill([batch_size, 1], start_token)

    def step(hparams, tokens, past=None):
        lm_output = model.model(hparams=hparams,
                                X=tokens,
                                past=past,
                                reuse=tf.AUTO_REUSE)

        logits = lm_output['logits'][:, :, :hparams.n_vocab]
        presents = lm_output['present']
        presents.set_shape(
            model.past_shape(hparams=hparams, batch_size=batch_size))
        return {
            'logits': logits,
            'presents': presents,
        }

    with tf.name_scope('sample_sequence'):
        # Don't feed the last context token -- leave that to the loop below
        # TODO: Would be slightly faster if we called step on the entire context,
        # rather than leaving the last token transformer calculation to the while loop.
        context_output = step(hparams, context[:, :-1])

        def body(past, prev, output):
            next_outputs = step(hparams, prev[:, tf.newaxis], past=past)
            logits = next_outputs['logits'][:,
                                            -1, :] / tf.to_float(temperature)
            logits = top_k_logits(logits, k=top_k)
            samples = tf.multinomial(logits,
                                     num_samples=1,
                                     output_dtype=tf.int32)
            return [
                tf.concat([past, next_outputs['presents']], axis=-2),
                tf.squeeze(samples, axis=[1]),
                tf.concat([output, samples], axis=1),
            ]

        def cond(*args):
            return True

        _, _, tokens = tf.while_loop(
            cond=cond,
            body=body,
            maximum_iterations=length,
            loop_vars=[
                context_output['presents'],
                context[:, -1],
                context,
            ],
            shape_invariants=[
                tf.TensorShape(
                    model.past_shape(hparams=hparams, batch_size=batch_size)),
                tf.TensorShape([batch_size]),
                tf.TensorShape([batch_size, None]),
            ],
            back_prop=False,
        )

        return tokens
Ejemplo n.º 44
0
def _dynamic_unroll_multi_step(cell, inputs, reset_mask, initial_state,
                               zero_state, dtype, mask_fn, parallel_iterations,
                               swap_memory, iterations, batch_size,
                               const_batch_size):
    """Helper for dynamic_unroll which uses a tf.while_loop."""

    # Convert all inputs to TensorArrays
    def ta_and_unstack(x):
        return (tf.TensorArray(dtype=x.dtype,
                               size=iterations,
                               element_shape=x.shape[1:]).unstack(x))

    inputs_tas = nest.map_structure(ta_and_unstack, inputs)
    reset_mask_ta = ta_and_unstack(reset_mask)

    # Create a TensorArray for each output
    def create_output_ta(s):
        return tf.TensorArray(dtype=_infer_state_dtype(dtype, initial_state),
                              size=iterations,
                              element_shape=(tf.TensorShape(
                                  [const_batch_size]).concatenate(
                                      _maybe_tensor_shape_from_tensor(s))))

    output_tas = nest.map_structure(create_output_ta, cell.output_size)

    if mask_fn:
        masks_ta = tf.TensorArray(dtype=tf.float32,
                                  size=iterations,
                                  element_shape=tf.TensorShape(
                                      [const_batch_size]))
    else:
        masks_ta = ()

    def pred(time, *unused_args):
        return time < iterations

    def body(time, time_since_reset, state, output_tas, masks_ta):
        """Internal while_loop body.

    Args:
      time: time
      time_since_reset: time since last prev_time_steps.is_first() == true.
        (only accurate / valid when mask_fn is not None).
      state: rnn state @ time
      output_tas: output tensorarrays
      masks_ta: optional mask tensorarray

    Returns:
      - time + 1
      - time_since_reset (next value)
      - state: rnn state @ time + 1
      - output_tas: output tensorarrays with values written @ time
      - masks_ta: optional mask tensorarray with mask written @ time
    """
        input_ = nest.map_structure(lambda ta: ta.read(time), inputs_tas)
        is_reset = reset_mask_ta.read(time)
        state = nest.map_structure(
            lambda s_zero, s: _maybe_reset_state(is_reset, s_zero, s),
            zero_state, state)

        outputs, next_state = cell(input_, state)

        output_tas = nest.map_structure(lambda ta, x: ta.write(time, x),
                                        output_tas, outputs)

        if mask_fn:
            time_since_reset = tf.where(is_reset,
                                        tf.zeros_like(time_since_reset),
                                        time_since_reset + 1,
                                        name="time_since_reset")
            masks_ta = masks_ta.write(time, mask_fn(time, time_since_reset))

        return (time + 1, time_since_reset, next_state, output_tas, masks_ta)

    # Create a new scope in which the caching device is either
    # determined by the parent scope, or is set to place the cached
    # Variable using the same placement as for the rest of the RNN.
    with tf.variable_scope(tf.get_variable_scope()) as varscope:
        if (not tf.contrib.eager.executing_eagerly()
                and varscope.caching_device is None):
            varscope.set_caching_device(lambda op: op.device)

        _, _, final_state, output_tas, masks_ta = (tf.while_loop(
            pred,
            body,
            (tf.constant(0, name="time"),
             tf.zeros((batch_size, ), dtype=tf.int32, name="time_since_reset"),
             initial_state, output_tas, masks_ta),
            parallel_iterations=parallel_iterations,
            swap_memory=swap_memory,
            maximum_iterations=iterations))

    outputs = nest.map_structure(lambda ta: ta.stack(), output_tas)

    if mask_fn:
        mask = masks_ta.stack()
    else:
        mask = None

    if isinstance(iterations, int):
        # TensorArray.stack() doesn't set a static value for dimension 0,
        # even if the size is known. Set the shapes here.
        iterations_shape = tf.TensorShape([iterations])
        for tensor in nest.flatten(outputs) + ([mask] if mask_fn else []):
            tensor.set_shape(iterations_shape.concatenate(tensor.shape[1:]))

    # Convert everything back to batch major
    outputs = nest.map_structure(tf.contrib.rnn.transpose_batch_time, outputs)
    if mask is not None:
        mask = tf.transpose(mask)

    return (outputs, final_state, mask)
Ejemplo n.º 45
0
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):
    """Return yolo_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(N, 2), wh
    num_classes: integer
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)
    """
    num_layers = len(anchors) // 3  # default setting
    yolo_outputs = args[:num_layers]
    y_true = args[num_layers:]
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]
                   ] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
    input_shape = K.cast(
        K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
    grid_shapes = [
        K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0]))
        for l in range(num_layers)
    ]
    loss = 0
    m = K.shape(yolo_outputs[0])[0]  # batch size, tensor
    mf = K.cast(m, K.dtype(yolo_outputs[0]))

    for l in range(num_layers):
        object_mask = y_true[l][..., 4:5]
        true_class_probs = y_true[l][..., 5:]

        grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
                                                     anchors[anchor_mask[l]],
                                                     num_classes,
                                                     input_shape,
                                                     calc_loss=True)
        pred_box = K.concatenate([pred_xy, pred_wh])

        # Darknet raw box to calculate loss.
        raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid
        raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] *
                            input_shape[::-1])
        raw_true_wh = K.switch(object_mask, raw_true_wh,
                               K.zeros_like(raw_true_wh))  # avoid log(0)=-inf
        box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]),
                                     size=1,
                                     dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')

        def loop_body(b, ignore_mask):
            true_box = tf.boolean_mask(y_true[l][b, ..., 0:4],
                                       object_mask_bool[b, ..., 0])
            iou = box_iou(pred_box[b], true_box)
            best_iou = K.max(iou, axis=-1)
            ignore_mask = ignore_mask.write(
                b, K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
            return b + 1, ignore_mask

        _, ignore_mask = tf.while_loop(lambda b, *args: b < m, loop_body,
                                       [0, ignore_mask])
        ignore_mask = ignore_mask.stack()
        ignore_mask = K.expand_dims(ignore_mask, -1)

        # K.binary_crossentropy is helpful to avoid exp overflow.
        xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(
            raw_true_xy, raw_pred[..., 0:2], from_logits=True)
        wh_loss = object_mask * box_loss_scale * 0.5 * K.square(
            raw_true_wh - raw_pred[..., 2:4])
        confidence_loss = object_mask * K.binary_crossentropy(
            object_mask, raw_pred[..., 4:5], from_logits=True) + (
                (1 - object_mask) * K.binary_crossentropy(
                    object_mask, raw_pred[..., 4:5], from_logits=True) *
                ignore_mask)
        class_loss = object_mask * K.binary_crossentropy(
            true_class_probs, raw_pred[..., 5:], from_logits=True)

        xy_loss = K.sum(xy_loss) / mf
        wh_loss = K.sum(wh_loss) / mf
        confidence_loss = K.sum(confidence_loss) / mf
        class_loss = K.sum(class_loss) / mf
        loss += xy_loss + wh_loss + confidence_loss + class_loss
        if print_loss:
            loss = tf.Print(loss, [
                loss, xy_loss, wh_loss, confidence_loss, class_loss,
                K.sum(ignore_mask)
            ],
                            message="loss: ")
    return loss
Ejemplo n.º 46
0
def inference_mem(images,
                  cams,
                  depth_num,
                  depth_start,
                  depth_interval,
                  is_master_gpu=True):
    """ infer depth image from multi-view images and cameras """

    # dynamic gpu params
    depth_end = depth_start + (tf.cast(depth_num, tf.float32) -
                               1) * depth_interval
    feature_c = 32
    feature_h = FLAGS.max_h / 4
    feature_w = FLAGS.max_w / 4

    # reference image
    ref_image = tf.squeeze(tf.slice(images, [0, 0, 0, 0, 0],
                                    [-1, 1, -1, -1, 3]),
                           axis=1)
    ref_cam = tf.squeeze(tf.slice(cams, [0, 0, 0, 0, 0], [-1, 1, 2, 4, 4]),
                         axis=1)

    # image feature extraction
    if is_master_gpu:
        ref_tower = UNetDS2GN({'data': ref_image},
                              is_training=True,
                              reuse=False)
    else:
        ref_tower = UNetDS2GN({'data': ref_image},
                              is_training=True,
                              reuse=True)
    ref_feature = ref_tower.get_output()
    ref_feature2 = tf.square(ref_feature)

    view_features = []
    for view in range(1, FLAGS.view_num):
        view_image = tf.squeeze(tf.slice(images, [0, view, 0, 0, 0],
                                         [-1, 1, -1, -1, -1]),
                                axis=1)
        view_tower = UNetDS2GN({'data': view_image},
                               is_training=True,
                               reuse=True)
        view_features.append(view_tower.get_output())
    view_features = tf.stack(view_features, axis=0)

    # get all homographies
    view_homographies = []
    for view in range(1, FLAGS.view_num):
        view_cam = tf.squeeze(tf.slice(cams, [0, view, 0, 0, 0],
                                       [-1, 1, 2, 4, 4]),
                              axis=1)
        homographies = get_homographies(ref_cam,
                                        view_cam,
                                        depth_num=depth_num,
                                        depth_start=depth_start,
                                        depth_interval=depth_interval)
        view_homographies.append(homographies)
    view_homographies = tf.stack(view_homographies, axis=0)

    # build cost volume by differentialble homography
    with tf.name_scope('cost_volume_homography'):
        depth_costs = []

        for d in range(depth_num):
            # compute cost (standard deviation feature)
            ave_feature = tf.Variable(
                tf.zeros([FLAGS.batch_size, feature_h, feature_w, feature_c]),
                name='ave',
                trainable=False,
                collections=[tf.GraphKeys.LOCAL_VARIABLES])
            ave_feature2 = tf.Variable(
                tf.zeros([FLAGS.batch_size, feature_h, feature_w, feature_c]),
                name='ave2',
                trainable=False,
                collections=[tf.GraphKeys.LOCAL_VARIABLES])
            ave_feature = tf.assign(ave_feature, ref_feature)
            ave_feature2 = tf.assign(ave_feature2, ref_feature2)

            def body(view, ave_feature, ave_feature2):
                """Loop body."""
                homography = tf.slice(view_homographies[view],
                                      begin=[0, d, 0, 0],
                                      size=[-1, 1, 3, 3])
                homography = tf.squeeze(homography, axis=1)
                # warped_view_feature = homography_warping(view_features[view], homography)
                warped_view_feature = tf_transform_homography(
                    view_features[view], homography)
                ave_feature = tf.assign_add(ave_feature, warped_view_feature)
                ave_feature2 = tf.assign_add(ave_feature2,
                                             tf.square(warped_view_feature))
                view = tf.add(view, 1)
                return view, ave_feature, ave_feature2

            view = tf.constant(0)
            cond = lambda view, *_: tf.less(view, FLAGS.view_num - 1)
            _, ave_feature, ave_feature2 = tf.while_loop(
                cond,
                body, [view, ave_feature, ave_feature2],
                back_prop=False,
                parallel_iterations=1)

            ave_feature = tf.assign(
                ave_feature,
                tf.square(ave_feature) / (FLAGS.view_num * FLAGS.view_num))
            ave_feature2 = tf.assign(
                ave_feature2, ave_feature2 / FLAGS.view_num - ave_feature)
            depth_costs.append(ave_feature2)
        cost_volume = tf.stack(depth_costs, axis=1)

    # filtered cost volume, size of (B, D, H, W, 1)
    if is_master_gpu:
        filtered_cost_volume_tower = RegNetUS0({'data': cost_volume},
                                               is_training=True,
                                               reuse=False)
    else:
        filtered_cost_volume_tower = RegNetUS0({'data': cost_volume},
                                               is_training=True,
                                               reuse=True)
    filtered_cost_volume = tf.squeeze(filtered_cost_volume_tower.get_output(),
                                      axis=-1)

    # depth map by softArgmin
    with tf.name_scope('soft_arg_min'):
        # probability volume by soft max
        probability_volume = tf.nn.softmax(tf.scalar_mul(
            -1, filtered_cost_volume),
                                           axis=1,
                                           name='prob_volume')

        # depth image by soft argmin
        volume_shape = tf.shape(probability_volume)
        soft_2d = []
        for i in range(FLAGS.batch_size):
            soft_1d = tf.linspace(depth_start[i], depth_end[i],
                                  tf.cast(depth_num, tf.int32))
            soft_2d.append(soft_1d)
        soft_2d = tf.reshape(tf.stack(soft_2d, axis=0),
                             [volume_shape[0], volume_shape[1], 1, 1])
        soft_4d = tf.tile(soft_2d, [1, 1, volume_shape[2], volume_shape[3]])
        estimated_depth_map = tf.reduce_sum(soft_4d * probability_volume,
                                            axis=1)
        estimated_depth_map = tf.expand_dims(estimated_depth_map, axis=3)

    # probability map
    prob_map = get_propability_map(probability_volume, estimated_depth_map,
                                   depth_start, depth_interval)

    # return filtered_depth_map,
    return estimated_depth_map, prob_map
Ejemplo n.º 47
0
def ssd_crop(image, boxes, classes):
  """IoU biassed random crop.

  Reference: https://github.com/chauhan-utk/ssd.DomainAdaptation
  """

  num_boxes = tf.shape(boxes)[0]

  def no_crop_check():
    return (tf.random_uniform(shape=(), minval=0, maxval=1, dtype=tf.float32)
            < ssd_constants.P_NO_CROP_PER_PASS)

  def no_crop_proposal():
    return (
        tf.ones((), tf.bool),
        tf.convert_to_tensor([0, 0, 1, 1], dtype=tf.float32),
        tf.ones((num_boxes,), tf.bool),
    )

  def crop_proposal():
    rand_vec = lambda minval, maxval: tf.random_uniform(
        shape=(ssd_constants.NUM_CROP_PASSES, 1), minval=minval, maxval=maxval,
        dtype=tf.float32)

    width, height = rand_vec(0.3, 1), rand_vec(0.3, 1)
    left, top = rand_vec(0, 1-width), rand_vec(0, 1-height)

    right = left + width
    bottom = top + height

    ltrb = tf.concat([left, top, right, bottom], axis=1)

    min_iou = tf.random_shuffle(ssd_constants.CROP_MIN_IOU_CHOICES)[0]
    ious = calc_iou_tensor(ltrb, boxes)

    # discard any bboxes whose center not in the cropped image
    xc, yc = [tf.tile(0.5 * (boxes[:, i + 0] + boxes[:, i + 2])[tf.newaxis, :],
                      (ssd_constants.NUM_CROP_PASSES, 1)) for i in range(2)]

    masks = tf.reduce_all(tf.stack([
        tf.greater(xc, tf.tile(left, (1, num_boxes))),
        tf.less(xc, tf.tile(right, (1, num_boxes))),
        tf.greater(yc, tf.tile(top, (1, num_boxes))),
        tf.less(yc, tf.tile(bottom, (1, num_boxes))),
    ], axis=2), axis=2)

    # Checks of whether a crop is valid.
    valid_aspect = tf.logical_and(tf.less(height/width, 2),
                                  tf.less(height/width, 2))
    valid_ious = tf.reduce_all(tf.greater(ious, min_iou), axis=1, keepdims=True)
    valid_masks = tf.reduce_any(masks, axis=1, keepdims=True)

    valid_all = tf.cast(tf.reduce_all(tf.concat(
        [valid_aspect, valid_ious, valid_masks], axis=1), axis=1), tf.int32)

    # One indexed, as zero is needed for the case of no matches.
    index = tf.range(1, 1 + ssd_constants.NUM_CROP_PASSES, dtype=tf.int32)

    # Either one-hot, or zeros if there is no valid crop.
    selection = tf.equal(tf.reduce_max(index * valid_all), index)

    use_crop = tf.reduce_any(selection)
    output_ltrb = tf.reduce_sum(tf.multiply(ltrb, tf.tile(tf.cast(
        selection, tf.float32)[:, tf.newaxis], (1, 4))), axis=0)
    output_masks = tf.reduce_any(tf.logical_and(masks, tf.tile(
        selection[:, tf.newaxis], (1, num_boxes))), axis=0)

    return use_crop, output_ltrb, output_masks

  def proposal(*args):
    return tf.cond(
        pred=no_crop_check(),
        true_fn=no_crop_proposal,
        false_fn=crop_proposal,
    )

  _, crop_bounds, box_masks = tf.while_loop(
      cond=lambda x, *_: tf.logical_not(x),
      body=proposal,
      loop_vars=[tf.zeros((), tf.bool), tf.zeros((4,), tf.float32), tf.zeros((num_boxes,), tf.bool)],
  )

  filtered_boxes = tf.boolean_mask(boxes, box_masks, axis=0)
  num_filtered_boxes = tf.shape(filtered_boxes)[0]

  # Clip boxes to the cropped region.
  filtered_boxes = tf.stack([
      tf.maximum(filtered_boxes[:, 0], crop_bounds[0]),
      tf.maximum(filtered_boxes[:, 1], crop_bounds[1]),
      tf.minimum(filtered_boxes[:, 2], crop_bounds[2]),
      tf.minimum(filtered_boxes[:, 3], crop_bounds[3]),
  ], axis=1)

  left = crop_bounds[0]
  top = crop_bounds[1]
  width = crop_bounds[2] - left
  height = crop_bounds[3] - top

  cropped_boxes = tf.stack([
      (filtered_boxes[:, 0] - left) / width,
      (filtered_boxes[:, 1] - top) / height,
      (filtered_boxes[:, 2] - left) / width,
      (filtered_boxes[:, 3] - top) / height,
  ], axis=1)

  cropped_image = tf.image.crop_and_resize(
      image=image[tf.newaxis, :, :, :],
      boxes=crop_bounds[tf.newaxis, :],
      box_ind=tf.zeros((1,), tf.int32),
      crop_size=(ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE),
  )[0, :, :, :]

  cropped_classes = tf.boolean_mask(classes, box_masks, axis=0)

  return cropped_image, cropped_boxes, cropped_classes
Ejemplo n.º 48
0
def beam_search(symbols_to_logits_fn,
                initial_ids,
                beam_size,
                decode_length,
                vocab_size,
                alpha,
                states=None,
                eos_id=EOS_ID):
    """Beam search with length penalties.

  Requires a function that can take the currently decoded sybmols and return
  the logits for the next symbol. The implementation is inspired by
  https://arxiv.org/abs/1609.08144.

  When running, the beam search steps can be visualized by using tfdbg to watch
  the operations generating the output ids for each beam step.  These operations
  have the pattern:
    (alive|finished)_topk_(seq,scores)

  Operations marked `alive` represent the new beam sequences that will be
  processed in the next step.  Operations marked `finished` represent the
  completed beam sequences, which may be padded with 0s if no beams finished.

  Operations marked `seq` store the full beam sequence for the time step.
  Operations marked `scores` store the sequence's final log scores.

  The beam search steps will be processed sequentially in order, so when
  capturing observed from these operations, tensors, clients can make
  assumptions about which step is being recorded.

  Args:
    symbols_to_logits_fn: Interface to the model, to provide logits.
        Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size]
    initial_ids: Ids to start off the decoding, this will be the first thing
        handed to symbols_to_logits_fn (after expanding to beam size)
        [batch_size]
    beam_size: Size of the beam.
    decode_length: Number of steps to decode for.
    vocab_size: Size of the vocab, must equal the size of the logits returned by
        symbols_to_logits_fn
    alpha: alpha for length penalty.
    eos_id: ID for end of sentence.
    states: dict (possibly nested) of decoding states.
  Returns:
    Tuple of
    (decoded beams [batch_size, beam_size, decode_length]
     decoding probablities [batch_size, beam_size])
  """
    batch_size = tf.shape(initial_ids)[0]

    # Assume initial_ids are prob 1.0
    initial_log_probs = tf.constant([[0.] + [-float("inf")] * (beam_size - 1)])
    # Expand to beam_size (batch_size, beam_size)
    alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1])

    # Expand each batch and state to beam_size
    alive_seq = expand_to_beam_size(initial_ids, beam_size)
    alive_seq = tf.expand_dims(alive_seq, axis=2)  # (batch_size, beam_size, 1)
    if states:
        states = nest.map_structure(
            lambda state: expand_to_beam_size(state, beam_size), states)
    else:
        states = {}

    # Finished will keep track of all the sequences that have finished so far
    # Finished log probs will be negative infinity in the beginning
    # finished_flags will keep track of booleans
    finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)
    # Setting the scores of the initial to negative infinity.
    finished_scores = tf.ones([batch_size, beam_size]) * -INF
    finished_flags = tf.zeros([batch_size, beam_size], tf.bool)

    def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq,
                      curr_scores, curr_finished):
        """Given sequences and scores, will gather the top k=beam size sequences.

    Args:
      finished_seq: Current finished sequences.
        [batch_size, beam_size, current_decoded_length]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]
      finished_flags: finished bools for each of these sequences.
        [batch_size, beam_size]
      curr_seq: current topk sequence that has been grown by one position.
        [batch_size, beam_size, current_decoded_length]
      curr_scores: scores for each of these sequences. [batch_size, beam_size]
      curr_finished: Finished flags for each of these sequences.
        [batch_size, beam_size]
    Returns:
      Tuple of
        (Topk sequences based on scores,
         log probs of these sequences,
         Finished flags of these sequences)
    """
        # First append a column of 0'ids to finished to make the same length with
        # finished scores
        finished_seq = tf.concat(
            [finished_seq,
             tf.zeros([batch_size, beam_size, 1], tf.int32)],
            axis=2)

        # Set the scores of the unfinished seq in curr_seq to large negative
        # values
        curr_scores += (1. - tf.to_float(curr_finished)) * -INF
        # concatenating the sequences and scores along beam axis
        curr_finished_seq = tf.concat([finished_seq, curr_seq], axis=1)
        curr_finished_scores = tf.concat([finished_scores, curr_scores],
                                         axis=1)
        curr_finished_flags = tf.concat([finished_flags, curr_finished],
                                        axis=1)
        return compute_topk_scores_and_seq(curr_finished_seq,
                                           curr_finished_scores,
                                           curr_finished_scores,
                                           curr_finished_flags, beam_size,
                                           batch_size, "grow_finished")

    def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished,
                   states):
        """Given sequences and scores, will gather the top k=beam size sequences.

    Args:
      curr_seq: current topk sequence that has been grown by one position.
        [batch_size, beam_size, i+1]
      curr_scores: scores for each of these sequences. [batch_size, beam_size]
      curr_log_probs: log probs for each of these sequences.
        [batch_size, beam_size]
      curr_finished: Finished flags for each of these sequences.
        [batch_size, beam_size]
      states: dict (possibly nested) of decoding states.
    Returns:
      Tuple of
        (Topk sequences based on scores,
         log probs of these sequences,
         Finished flags of these sequences)
    """
        # Set the scores of the finished seq in curr_seq to large negative
        # values
        curr_scores += tf.to_float(curr_finished) * -INF
        return compute_topk_scores_and_seq(curr_seq, curr_scores,
                                           curr_log_probs, curr_finished,
                                           beam_size, batch_size, "grow_alive",
                                           states)

    def grow_topk(i, alive_seq, alive_log_probs, states):
        r"""Inner beam seach loop.

    This function takes the current alive sequences, and grows them to topk
    sequences where k = 2*beam. We use 2*beam because, we could have beam_size
    number of sequences that might hit <EOS> and there will be no alive
    sequences to continue. With 2*beam_size, this will not happen. This relies
    on the assumption the vocab size is > beam size. If this is true, we'll
    have at least beam_size non <EOS> extensions if we extract the next top
    2*beam words.
    Length penalty is given by = (5+len(decode)/6) ^ -\alpha. Pls refer to
    https://arxiv.org/abs/1609.08144.

    Args:
      i: loop index
      alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
      alive_log_probs: probabilities of these sequences. [batch_size, beam_size]
      states: dict (possibly nested) of decoding states.
    Returns:
      Tuple of
        (Topk sequences extended by the next word,
         The log probs of these sequences,
         The scores with length penalty of these sequences,
         Flags indicating which of these sequences have finished decoding,
         dict of transformed decoding states)
    """
        # Get the logits for all the possible next symbols
        flat_ids = tf.reshape(alive_seq, [batch_size * beam_size, -1])

        # (batch_size * beam_size, decoded_length)
        if states:
            flat_states = nest.map_structure(
                lambda state: tf.reshape(state, [batch_size * beam_size, -1]),
                states)
            flat_logits, flat_states = symbols_to_logits_fn(
                flat_ids, flat_states)
            states = nest.map_structure(
                lambda state: tf.reshape(state, [batch_size, beam_size, -1]),
                flat_states)
        else:
            flat_logits = symbols_to_logits_fn(flat_ids)
        logits = tf.reshape(flat_logits, (batch_size, beam_size, -1))

        # Convert logits to normalized log probs
        candidate_log_probs = log_prob_from_logits(logits)

        # Multiply the probabilites by the current probabilites of the beam.
        # (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1)
        log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs,
                                                         axis=2)

        length_penalty = tf.pow(((5. + tf.to_float(i + 1)) / 6.), alpha)

        curr_scores = log_probs / length_penalty
        # Flatten out (beam_size, vocab_size) probs in to a list of possibilites
        flat_curr_scores = tf.reshape(curr_scores,
                                      [-1, beam_size * vocab_size])

        topk_scores, topk_ids = tf.nn.top_k(flat_curr_scores, k=beam_size * 2)

        # Recovering the log probs because we will need to send them back
        topk_log_probs = topk_scores * length_penalty

        # Work out what beam the top probs are in.
        topk_beam_index = topk_ids // vocab_size
        topk_ids %= vocab_size  # Unflatten the ids

        # The next three steps are to create coordinates for tf.gather_nd to pull
        # out the correct seqences from id's that we need to grow.
        # We will also use the coordinates to gather the booleans of the beam items
        # that survived.
        batch_pos = compute_batch_indices(batch_size, beam_size * 2)

        # top beams will give us the actual coordinates to do the gather.
        # stacking will create a tensor of dimension batch * beam * 2, where the
        # last dimension contains the i,j gathering coordinates.
        topk_coordinates = tf.stack([batch_pos, topk_beam_index], axis=2)

        # Gather up the most probable 2*beams both for the ids and finished_in_alive
        # bools
        topk_seq = tf.gather_nd(alive_seq, topk_coordinates)
        if states:
            states = nest.map_structure(
                lambda state: tf.gather_nd(state, topk_coordinates), states)

        # Append the most probable alive
        topk_seq = tf.concat(
            [topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2)

        topk_finished = tf.equal(topk_ids, eos_id)

        return topk_seq, topk_log_probs, topk_scores, topk_finished, states

    def inner_loop(i, alive_seq, alive_log_probs, finished_seq,
                   finished_scores, finished_flags, states):
        """Inner beam seach loop.

    There are three groups of tensors, alive, finished, and topk.
    The alive group contains information about the current alive sequences
    The topk group contains information about alive + topk current decoded words
    the finished group contains information about finished sentences, that is,
    the ones that have decoded to <EOS>. These are what we return.
    The general beam search algorithm is as follows:
    While we haven't terminated (pls look at termination condition)
      1. Grow the current alive to get beam*2 topk sequences
      2. Among the topk, keep the top beam_size ones that haven't reached EOS
      into alive
      3. Among the topk, keep the top beam_size ones have reached EOS into
      finished
    Repeat
    To make things simple with using fixed size tensors, we will end
    up inserting unfinished sequences into finished in the beginning. To stop
    that we add -ve INF to the score of the unfinished sequence so that when a
    true finished sequence does appear, it will have a higher score than all the
    unfinished ones.

    Args:
      i: loop index
      alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
      alive_log_probs: probabilities of the beams. [batch_size, beam_size]
      finished_seq: Current finished sequences.
        [batch_size, beam_size, i+1]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]
      finished_flags: finished bools for each of these sequences.
        [batch_size, beam_size]
      states: dict (possibly nested) of decoding states.

    Returns:
      Tuple of
        (Incremented loop index
         New alive sequences,
         Log probs of the alive sequences,
         New finished sequences,
         Scores of the new finished sequences,
         Flags inidicating which sequence in finished as reached EOS,
         dict of final decoding states)
    """

        # Each inner loop, we carry out three steps:
        # 1. Get the current topk items.
        # 2. Extract the ones that have finished and haven't finished
        # 3. Recompute the contents of finished based on scores.
        topk_seq, topk_log_probs, topk_scores, topk_finished, states = grow_topk(
            i, alive_seq, alive_log_probs, states)
        alive_seq, alive_log_probs, _, states = grow_alive(
            topk_seq, topk_scores, topk_log_probs, topk_finished, states)
        finished_seq, finished_scores, finished_flags, _ = grow_finished(
            finished_seq, finished_scores, finished_flags, topk_seq,
            topk_scores, topk_finished)

        return (i + 1, alive_seq, alive_log_probs, finished_seq,
                finished_scores, finished_flags, states)

    def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq,
                     finished_scores, finished_in_finished, unused_states):
        """Checking termination condition.

    We terminate when we decoded up to decode_length or the lowest scoring item
    in finished has a greater score that the higest prob item in alive divided
    by the max length penalty

    Args:
      i: loop index
      alive_log_probs: probabilities of the beams. [batch_size, beam_size]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]
      finished_in_finished: finished bools for each of these sequences.
        [batch_size, beam_size]

    Returns:
      Bool.
    """
        max_length_penalty = tf.pow(((5. + tf.to_float(decode_length)) / 6.),
                                    alpha)
        # The best possible score of the most likley alive sequence
        lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty

        # Now to compute the lowest score of a finished sequence in finished
        # If the sequence isn't finished, we multiply it's score by 0. since
        # scores are all -ve, taking the min will give us the score of the lowest
        # finished item.
        lowest_score_of_fininshed_in_finished = tf.reduce_min(
            finished_scores * tf.to_float(finished_in_finished), axis=1)
        # If none of the sequences have finished, then the min will be 0 and
        # we have to replace it by -ve INF if it is. The score of any seq in alive
        # will be much higher than -ve INF and the termination condition will not
        # be met.
        lowest_score_of_fininshed_in_finished += (
            (1. - tf.to_float(tf.reduce_any(finished_in_finished, 1))) * -INF)

        bound_is_met = tf.reduce_all(
            tf.greater(lowest_score_of_fininshed_in_finished,
                       lower_bound_alive_scores))

        return tf.logical_and(tf.less(i, decode_length),
                              tf.logical_not(bound_is_met))

    (_, alive_seq, alive_log_probs, finished_seq, finished_scores,
     finished_flags, _) = tf.while_loop(
         _is_finished,
         inner_loop, [
             tf.constant(0), alive_seq, alive_log_probs, finished_seq,
             finished_scores, finished_flags, states
         ],
         shape_invariants=[
             tf.TensorShape([]),
             tf.TensorShape([None, None, None]),
             alive_log_probs.get_shape(),
             tf.TensorShape([None, None, None]),
             finished_scores.get_shape(),
             finished_flags.get_shape(),
             nest.map_structure(
                 lambda tensor: tf.TensorShape([None] * tensor.shape.ndims),
                 states),
         ],
         parallel_iterations=1,
         back_prop=False)

    alive_seq.set_shape((None, beam_size, None))
    finished_seq.set_shape((None, beam_size, None))

    # Accounting for corner case: It's possible that no sequence in alive for a
    # particular batch item ever reached EOS. In that case, we should just copy
    # the contents of alive for that batch item. tf.reduce_any(finished_flags, 1)
    # if 0, means that no sequence for that batch index had reached EOS. We need
    # to do the same for the scores as well.
    finished_seq = tf.where(tf.reduce_any(finished_flags, 1), finished_seq,
                            alive_seq)
    finished_scores = tf.where(tf.reduce_any(finished_flags, 1),
                               finished_scores, alive_log_probs)
    return finished_seq, finished_scores
Ejemplo n.º 49
0
def structural_loss(fake1, fake2):

    eps = 1e-4
    win_rad = 1

    win_size = (win_rad * 2 + 1)**2
    b, d, h, w = fake1.get_shape().as_list()

    c_h, c_w = h - 2 * win_rad, w - 2 * win_rad
    win_diam = win_rad * 2 + 1

    indsM = tf.reshape(tf.range(h * w), [1, h, w, 1])
    win_ids = extract_patches(indsM, win_rad * 2 + 1, h, w)

    A = tf.reshape(win_ids, [-1, 9, 1])
    A = tf.tile(A, [1, 1, 9])

    col = tf.transpose(A, [0, 2, 1])
    col = tf.cast(tf.reshape(col, [-1]), tf.int64)

    row = tf.cast(tf.reshape(A, [-1]), tf.int64)

    def laplacian_matrix(img):

        temp = tf.transpose(img, [0, 2, 3, 1])

        winI = extract_patches(temp, win_rad * 2 + 1, h, w)
        winI = tf.reshape(winI, [-1, (h - 2) * (w - 2), 9, 3])

        win_mu = tf.reduce_mean(winI, axis=2, keep_dims=True)

        term1 = tf.matmul(tf.transpose(winI, [0, 1, 3, 2]), winI)
        term2 = tf.matmul(tf.transpose(win_mu, [0, 1, 3, 2]), win_mu)

        win_var = term1 / win_size - term2

        inv = tf.linalg.inv(win_var + (eps / win_size) * tf.eye(3))

        X = tf.matmul(winI - win_mu, inv)

        vals = tf.eye(win_size) - (1.0 / win_size) * (
            1 + tf.matmul(X, tf.transpose(winI - win_mu, [0, 1, 3, 2])))

        vals = tf.layers.flatten(vals)

        SM = tf.SparseTensor(indices=tf.stack([row, col], 1),
                             values=vals[0],
                             dense_shape=[h * w, h * w])

        return SM

    def condition(i, loss):
        return tf.less(i, tf.shape(fake1)[0])

    def action(i, loss):

        slice1 = fake1[i:i + 1]
        slice2 = fake2[i:i + 1]

        L1 = laplacian_matrix(slice1)
        L2 = laplacian_matrix(slice2)

        size = fake1.get_shape().as_list()[2]

        temp = tf.reshape(slice1, [3, -1])
        covariance = tf.matmul(
            temp, tf.sparse_tensor_dense_matmul(L2,
                                                tf.transpose(temp))) / size**2
        str_loss_1 = tf.trace(covariance)

        temp = tf.reshape(slice2, [3, -1])
        covariance = tf.matmul(
            temp, tf.sparse_tensor_dense_matmul(L1,
                                                tf.transpose(temp))) / size**2
        str_loss_2 = tf.trace(covariance)

        str_loss_1 = tf.reshape(str_loss_1, [1, 1])
        str_loss_2 = tf.reshape(str_loss_2, [1, 1])

        loss = tf.concat([loss, str_loss_1, str_loss_2], axis=0)

        return tf.add(i, 1), loss

    i = tf.constant(0)
    loss = tf.Variable(np.zeros((0, 1), dtype=np.float32))

    final_index, loss = tf.while_loop(
        condition,
        action, [i, loss],
        shape_invariants=[i.get_shape(),
                          tf.TensorShape([None, 1])])

    return loss
Ejemplo n.º 50
0
def sample_annealed_importance_chain(num_steps,
                                     proposal_log_prob_fn,
                                     target_log_prob_fn,
                                     current_state,
                                     make_kernel_fn,
                                     parallel_iterations=10,
                                     name=None):
    """Runs annealed importance sampling (AIS) to estimate normalizing constants.

  This function uses Hamiltonian Monte Carlo to sample from a series of
  distributions that slowly interpolates between an initial "proposal"
  distribution:

  `exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`

  and the target distribution:

  `exp(target_log_prob_fn(x) - target_log_normalizer)`,

  accumulating importance weights along the way. The product of these
  importance weights gives an unbiased estimate of the ratio of the
  normalizing constants of the initial distribution and the target
  distribution:

  `E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.

  Note: `proposal_log_prob_fn` and `target_log_prob_fn` are called exactly three
  times (although this may be reduced to two times, in the future).

  Args:
    num_steps: Integer number of Markov chain updates to run. More
      iterations means more expense, but smoother annealing between q
      and p, which in turn means exponentially lower variance for the
      normalizing constant estimator.
    proposal_log_prob_fn: Python callable that returns the log density of the
      initial distribution.
    target_log_prob_fn: Python callable which takes an argument like
      `current_state` (or `*current_state` if it's a list) and returns its
      (possibly unnormalized) log-density under the target distribution.
    current_state: `Tensor` or Python `list` of `Tensor`s representing the
      current state(s) of the Markov chain(s). The first `r` dimensions index
      independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
    make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like
      object. Must take one argument representing the `TransitionKernel`'s
      `target_log_prob_fn`. The `target_log_prob_fn` argument represents the
      `TransitionKernel`'s target log distribution.  Note:
      `sample_annealed_importance_chain` creates a new `target_log_prob_fn`
      which
    is an interpolation between the supplied `target_log_prob_fn` and
    `proposal_log_prob_fn`; it is this interpolated function which is used as an
    argument to `make_kernel_fn`.
    parallel_iterations: The number of iterations allowed to run in parallel.
        It must be a positive integer. See `tf.while_loop` for more details.
    name: Python `str` name prefixed to Ops created by this function.
      Default value: `None` (i.e., "sample_annealed_importance_chain").

  Returns:
    next_state: `Tensor` or Python list of `Tensor`s representing the
      state(s) of the Markov chain(s) at the final iteration. Has same shape as
      input `current_state`.
    ais_weights: Tensor with the estimated weight(s). Has shape matching
      `target_log_prob_fn(current_state)`.
    kernel_results: `collections.namedtuple` of internal calculations used to
      advance the chain.

  #### Examples

  ##### Estimate the normalizing constant of a log-gamma distribution.

  ```python
  tfd = tf.contrib.distributions

  # Run 100 AIS chains in parallel
  num_chains = 100
  dims = 20
  dtype = np.float32

  proposal = tfd.MultivatiateNormalDiag(
     loc=tf.zeros([dims], dtype=dtype))

  target = tfd.TransformedDistribution(
    distribution=tfd.Gamma(concentration=dtype(2),
                           rate=dtype(3)),
    bijector=tfd.bijectors.Invert(tfd.bijectors.Exp()),
    event_shape=[dims])

  chains_state, ais_weights, kernels_results = (
      tfp.mcmc.sample_annealed_importance_chain(
          num_steps=1000,
          proposal_log_prob_fn=proposal.log_prob,
          target_log_prob_fn=target.log_prob,
          current_state=proposal.sample(num_chains),
          make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=tlp_fn,
            step_size=0.2,
            num_leapfrog_steps=2)))

  log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
                              - np.log(num_chains))
  log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)
  ```

  ##### Estimate marginal likelihood of a Bayesian regression model.

  ```python
  tfd = tf.contrib.distributions

  def make_prior(dims, dtype):
    return tfd.MultivariateNormalDiag(
        loc=tf.zeros(dims, dtype))

  def make_likelihood(weights, x):
    return tfd.MultivariateNormalDiag(
        loc=tf.tensordot(weights, x, axes=[[0], [-1]]))

  # Run 100 AIS chains in parallel
  num_chains = 100
  dims = 10
  dtype = np.float32

  # Make training data.
  x = np.random.randn(num_chains, dims).astype(dtype)
  true_weights = np.random.randn(dims).astype(dtype)
  y = np.dot(x, true_weights) + np.random.randn(num_chains)

  # Setup model.
  prior = make_prior(dims, dtype)
  def target_log_prob_fn(weights):
    return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)

  proposal = tfd.MultivariateNormalDiag(
      loc=tf.zeros(dims, dtype))

  weight_samples, ais_weights, kernel_results = (
      tfp.mcmc.sample_annealed_importance_chain(
        num_steps=1000,
        proposal_log_prob_fn=proposal.log_prob,
        target_log_prob_fn=target_log_prob_fn
        current_state=tf.zeros([num_chains, dims], dtype),
        make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
          target_log_prob_fn=tlp_fn,
          step_size=0.1,
          num_leapfrog_steps=2)))
  log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)
                             - np.log(num_chains))
  ```

  """
    with tf.name_scope(name, "sample_annealed_importance_chain",
                       [num_steps, current_state]):
        num_steps = tf.convert_to_tensor(num_steps,
                                         dtype=tf.int32,
                                         name="num_steps")
        if mcmc_util.is_list_like(current_state):
            current_state = [
                tf.convert_to_tensor(s, name="current_state")
                for s in current_state
            ]
        else:
            current_state = tf.convert_to_tensor(current_state,
                                                 name="current_state")

        def _make_convex_combined_log_prob_fn(iter_):
            def _fn(*args):
                p = tf.identity(proposal_log_prob_fn(*args),
                                name="proposal_log_prob")
                t = tf.identity(target_log_prob_fn(*args),
                                name="target_log_prob")
                dtype = p.dtype.base_dtype
                beta = tf.cast(iter_ + 1, dtype) / tf.cast(num_steps, dtype)
                return tf.identity(beta * t + (1. - beta) * p,
                                   name="convex_combined_log_prob")

            return _fn

        def _loop_body(iter_, ais_weights, current_state, kernel_results):
            """Closure which implements `tf.while_loop` body."""
            x = (current_state
                 if mcmc_util.is_list_like(current_state) else [current_state])
            proposal_log_prob = proposal_log_prob_fn(*x)
            target_log_prob = target_log_prob_fn(*x)
            ais_weights += ((target_log_prob - proposal_log_prob) /
                            tf.cast(num_steps, ais_weights.dtype))
            kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_))
            next_state, inner_results = kernel.one_step(
                current_state, kernel_results.inner_results)
            kernel_results = AISResults(
                proposal_log_prob=proposal_log_prob,
                target_log_prob=target_log_prob,
                inner_results=inner_results,
            )
            return [iter_ + 1, ais_weights, next_state, kernel_results]

        def _bootstrap_results(init_state):
            """Creates first version of `previous_kernel_results`."""
            kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_=0))
            inner_results = kernel.bootstrap_results(init_state)

            convex_combined_log_prob = inner_results.accepted_results.target_log_prob
            dtype = convex_combined_log_prob.dtype.as_numpy_dtype
            shape = tf.shape(convex_combined_log_prob)
            proposal_log_prob = tf.fill(shape,
                                        dtype(np.nan),
                                        name="bootstrap_proposal_log_prob")
            target_log_prob = tf.fill(shape,
                                      dtype(np.nan),
                                      name="target_target_log_prob")

            return AISResults(
                proposal_log_prob=proposal_log_prob,
                target_log_prob=target_log_prob,
                inner_results=inner_results,
            )

        previous_kernel_results = _bootstrap_results(current_state)
        inner_results = previous_kernel_results.inner_results

        ais_weights = tf.zeros(shape=tf.broadcast_dynamic_shape(
            tf.shape(inner_results.proposed_results.target_log_prob),
            tf.shape(inner_results.accepted_results.target_log_prob)),
                               dtype=inner_results.proposed_results.
                               target_log_prob.dtype.base_dtype)

        [_, ais_weights, current_state, kernel_results] = tf.while_loop(
            cond=lambda iter_, *args: iter_ < num_steps,
            body=_loop_body,
            loop_vars=[
                np.int32(0),  # iter_
                ais_weights,
                current_state,
                previous_kernel_results,
            ],
            parallel_iterations=parallel_iterations)

        return [current_state, ais_weights, kernel_results]
Ejemplo n.º 51
0
def dynamic_decode(decoder: Union[Decoder, BaseDecoder],
                   output_time_major: bool = False,
                   impute_finished: bool = False,
                   maximum_iterations: Optional[TensorLike] = None,
                   parallel_iterations: int = 32,
                   swap_memory: bool = False,
                   training: Optional[bool] = None,
                   scope: Optional[str] = None,
                   **kwargs) -> Tuple[Any, Any, Any]:
    """Perform dynamic decoding with `decoder`.

    Calls initialize() once and step() repeatedly on the Decoder object.

    Args:
      decoder: A `Decoder` instance.
      output_time_major: Python boolean.  Default: `False` (batch major). If
        `True`, outputs are returned as time major tensors (this mode is
        faster). Otherwise, outputs are returned as batch major tensors (this
        adds extra time to the computation).
      impute_finished: Python boolean.  If `True`, then states for batch
        entries which are marked as finished get copied through and the
        corresponding outputs get zeroed out.  This causes some slowdown at
        each time step, but ensures that the final state and outputs have
        the correct values and that backprop ignores time steps that were
        marked as finished.
      maximum_iterations: `int32` scalar, maximum allowed number of decoding
         steps.  Default is `None` (decode until the decoder is fully done).
      parallel_iterations: Argument passed to `tf.while_loop`.
      swap_memory: Argument passed to `tf.while_loop`.
      training: Python boolean. Indicates whether the layer should behave
          in training  mode or in inference mode. Only relevant
          when `dropout` or `recurrent_dropout` is used.
      scope: Optional variable scope to use.
      **kwargs: dict, other keyword arguments for dynamic_decode. It might
        contain arguments for `BaseDecoder` to initialize, which takes all
        tensor inputs during call().

    Returns:
      `(final_outputs, final_state, final_sequence_lengths)`.

    Raises:
      ValueError: if `maximum_iterations` is provided but is not a scalar.
    """
    with tf.compat.v1.variable_scope(scope, "decoder") as varscope:
        # Determine context types.
        ctxt = tf.compat.v1.get_default_graph()._get_control_flow_context()
        is_xla = control_flow_util.GetContainingXLAContext(ctxt) is not None
        in_while_loop = control_flow_util.GetContainingWhileContext(
            ctxt) is not None
        # Properly cache variable values inside the while_loop.
        # Don't set a caching device when running in a loop, since it is
        # possible that train steps could be wrapped in a tf.while_loop. In that
        # scenario caching prevents forward computations in loop iterations from
        # re-reading the updated weights.
        if not tf.executing_eagerly() and not in_while_loop:
            if varscope.caching_device is None:
                varscope.set_caching_device(lambda op: op.device)

        if maximum_iterations is not None:
            maximum_iterations = tf.convert_to_tensor(
                maximum_iterations, dtype=tf.int32, name="maximum_iterations")
            if maximum_iterations.shape.ndims != 0:
                raise ValueError("maximum_iterations must be a scalar")

        if isinstance(decoder, Decoder):
            initial_finished, initial_inputs, initial_state = decoder.initialize(
            )
        else:
            # For BaseDecoder that takes tensor inputs during call.
            decoder_init_input = kwargs.pop("decoder_init_input", None)
            decoder_init_kwargs = kwargs.pop("decoder_init_kwargs", {})
            initial_finished, initial_inputs, initial_state = decoder.initialize(
                decoder_init_input, **decoder_init_kwargs)

        zero_outputs = tf.nest.map_structure(
            lambda shape, dtype: tf.zeros(
                _prepend_batch(decoder.batch_size, shape), dtype=dtype),
            decoder.output_size,
            decoder.output_dtype,
        )

        if is_xla and maximum_iterations is None:
            raise ValueError(
                "maximum_iterations is required for XLA compilation.")
        if maximum_iterations is not None:
            initial_finished = tf.logical_or(initial_finished,
                                             0 >= maximum_iterations)
        initial_sequence_lengths = tf.zeros_like(initial_finished,
                                                 dtype=tf.int32)
        initial_time = tf.constant(0, dtype=tf.int32)

        def _shape(batch_size, from_shape):
            if not isinstance(from_shape,
                              tf.TensorShape) or from_shape.ndims == 0:
                return None
            else:
                batch_size = tf.get_static_value(
                    tf.convert_to_tensor(batch_size, name="batch_size"))
                return tf.TensorShape([batch_size]).concatenate(from_shape)

        dynamic_size = maximum_iterations is None or not is_xla

        def _create_ta(s, d):
            return tf.TensorArray(
                dtype=d,
                size=0 if dynamic_size else maximum_iterations,
                dynamic_size=dynamic_size,
                element_shape=_shape(decoder.batch_size, s),
            )

        initial_outputs_ta = tf.nest.map_structure(_create_ta,
                                                   decoder.output_size,
                                                   decoder.output_dtype)

        def condition(
            unused_time,
            unused_outputs_ta,
            unused_state,
            unused_inputs,
            finished,
            unused_sequence_lengths,
        ):
            return tf.logical_not(tf.reduce_all(finished))

        def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
            """Internal while_loop body.

            Args:
              time: scalar int32 tensor.
              outputs_ta: structure of TensorArray.
              state: (structure of) state tensors and TensorArrays.
              inputs: (structure of) input tensors.
              finished: bool tensor (keeping track of what's finished).
              sequence_lengths: int32 tensor (keeping track of time of finish).

            Returns:
              `(time + 1, outputs_ta, next_state, next_inputs, next_finished,
                next_sequence_lengths)`.
              ```
            """
            (next_outputs, decoder_state, next_inputs,
             decoder_finished) = decoder.step(time, inputs, state, training)
            decoder_state_sequence_lengths = False
            if decoder.tracks_own_finished:
                next_finished = decoder_finished
                lengths = getattr(decoder_state, "lengths", None)
                if lengths is not None:
                    # sequence lengths are provided by decoder_state.lengths;
                    # overwrite our sequence lengths.
                    decoder_state_sequence_lengths = True
                    sequence_lengths = tf.cast(lengths, tf.int32)
            else:
                next_finished = tf.logical_or(decoder_finished, finished)

            if decoder_state_sequence_lengths:
                # Just pass something through the loop; at the next iteration
                # we'll pull the sequence lengths from the decoder_state again.
                next_sequence_lengths = sequence_lengths
            else:
                next_sequence_lengths = tf.where(
                    tf.logical_not(finished),
                    tf.fill(tf.shape(sequence_lengths), time + 1),
                    sequence_lengths,
                )

            tf.nest.assert_same_structure(state, decoder_state)
            tf.nest.assert_same_structure(outputs_ta, next_outputs)
            tf.nest.assert_same_structure(inputs, next_inputs)

            # Zero out output values past finish
            if impute_finished:

                def zero_out_finished(out, zero):
                    if finished.shape.rank < zero.shape.rank:
                        broadcast_finished = tf.broadcast_to(
                            tf.expand_dims(finished, axis=-1), zero.shape)
                        return tf.where(broadcast_finished, zero, out)
                    else:
                        return tf.where(finished, zero, out)

                emit = tf.nest.map_structure(zero_out_finished, next_outputs,
                                             zero_outputs)
            else:
                emit = next_outputs

            # Copy through states past finish
            def _maybe_copy_state(new, cur):
                # TensorArrays and scalar states get passed through.
                if isinstance(cur, tf.TensorArray):
                    pass_through = True
                else:
                    new.set_shape(cur.shape)
                    pass_through = new.shape.ndims == 0
                if not pass_through:
                    broadcast_finished = tf.broadcast_to(
                        tf.expand_dims(finished, axis=-1), new.shape)
                    return tf.where(broadcast_finished, cur, new)
                else:
                    return new

            if impute_finished:
                next_state = tf.nest.map_structure(_maybe_copy_state,
                                                   decoder_state, state)
            else:
                next_state = decoder_state

            outputs_ta = tf.nest.map_structure(
                lambda ta, out: ta.write(time, out), outputs_ta, emit)
            return (
                time + 1,
                outputs_ta,
                next_state,
                next_inputs,
                next_finished,
                next_sequence_lengths,
            )

        res = tf.while_loop(
            condition,
            body,
            loop_vars=(
                initial_time,
                initial_outputs_ta,
                initial_state,
                initial_inputs,
                initial_finished,
                initial_sequence_lengths,
            ),
            parallel_iterations=parallel_iterations,
            maximum_iterations=maximum_iterations,
            swap_memory=swap_memory,
        )

        final_outputs_ta = res[1]
        final_state = res[2]
        final_sequence_lengths = res[5]

        final_outputs = tf.nest.map_structure(lambda ta: ta.stack(),
                                              final_outputs_ta)

        try:
            final_outputs, final_state = decoder.finalize(
                final_outputs, final_state, final_sequence_lengths)
        except NotImplementedError:
            pass

        if not output_time_major:
            final_outputs = tf.nest.map_structure(_transpose_batch_time,
                                                  final_outputs)

    return final_outputs, final_state, final_sequence_lengths
Ejemplo n.º 52
0
    def __init__(self, batch_size, vocab_size, embedding_dim, hidden_size, max_anslen, beam_search_size, dropout_rate,
                 initial_learning_rate, mode='train'):
        # inputs
        self.inputs_q = tf.placeholder(tf.int32, shape=[batch_size, None], name='inputs_q') # index了
        self.inputs_actual_length_q = tf.placeholder(tf.int32, [batch_size], name='inputs_actual_length_q')
        self.inputs_p = tf.placeholder(tf.int32, shape=[batch_size, None], name='inputs_p')
        self.inputs_actual_length_p = tf.placeholder(tf.int32, [batch_size], name='inputs_actual_length_p')
        self.starts = tf.placeholder(tf.float32, [batch_size, None, 1])  # 论文后面说维度50是啥??这里不应该就是一个数,即每个词0或1?
        self.ends = tf.placeholder(tf.float32, [batch_size, None, 1])

        # targets
        if mode == 'train':
            self.targets_a = tf.placeholder(tf.int32, shape=[batch_size, None], name='targets_a')
            self.targets_actual_length_a = tf.placeholder(tf.int32, [batch_size], name='targets_actual_length_a')

        # embeddings
        # 如果要标点,则不能lookup,得自己设个词汇表embedding W,来更新;这里为方便先用下吧
        with tf.variable_scope('embedding', reuse=tf.AUTO_REUSE):
            embedding = tf.get_variable(name='embedding',
                                        initializer=tf.truncated_normal(shape=[vocab_size, embedding_dim],
                                                                        stddev=0.1))
            # 截断的产生正态分布的函数,值与均值的差值大于两倍标准差则重新生成

        inputs_embedded_q = tf.nn.embedding_lookup(embedding, self.inputs_q)
        inputs_embedded_p = tf.nn.embedding_lookup(embedding, self.inputs_p)
        if mode == 'train':
            targets_embedded_a = tf.nn.embedding_lookup(embedding, self.targets_a)

        inputs_concat_pos = tf.concat([self.starts, self.ends], axis=2)
        inputs_concat_p = tf.concat([inputs_embedded_p, inputs_concat_pos], axis=2)
        # print inputs_concat_pos, inputs_concat_p

        # question encoder
        with tf.variable_scope("q_encoder", reuse=tf.AUTO_REUSE):
            fcell_q = tf.nn.rnn_cell.GRUCell(hidden_size)
            bcell_q = tf.nn.rnn_cell.GRUCell(hidden_size)
            fcell_q = tf.contrib.rnn.DropoutWrapper(fcell_q, output_keep_prob=1 - dropout_rate)  # 有3个dropout,应该用哪个??
            bcell_q = tf.contrib.rnn.DropoutWrapper(bcell_q, output_keep_prob=1 - dropout_rate)
            (fw_outputs_q, bw_outputs_q), (fw_final_state_q, bw_final_state_q) = \
                tf.nn.bidirectional_dynamic_rnn(cell_fw=fcell_q,
                                                cell_bw=bcell_q,
                                                inputs=inputs_embedded_q,
                                                sequence_length=self.inputs_actual_length_q,
                                                dtype=tf.float32)
            h_q = tf.concat((fw_outputs_q, bw_outputs_q), 2)
            # print h_q  # 输出是root,root_1的;但前面embedding是共享的
            # print 'bw_outputs_q', bw_outputs_q

        # passage encoder
        with tf.variable_scope("p_encoder", reuse=tf.AUTO_REUSE):
            fcell_p = tf.nn.rnn_cell.GRUCell(hidden_size)
            bcell_p = tf.nn.rnn_cell.GRUCell(hidden_size)
            fcell_p = tf.contrib.rnn.DropoutWrapper(fcell_p, output_keep_prob=1 - dropout_rate)  # 有3个dropout,应该用哪个??
            bcell_p = tf.contrib.rnn.DropoutWrapper(bcell_p, output_keep_prob=1 - dropout_rate)
            (fw_outputs_p, bw_outputs_p), (fw_final_state_p, bw_final_state_p) = \
                tf.nn.bidirectional_dynamic_rnn(cell_fw=fcell_p,
                                                cell_bw=bcell_p,
                                                inputs=inputs_concat_p,
                                                sequence_length=self.inputs_actual_length_p,
                                                dtype=tf.float32)
            h_p = tf.concat((fw_outputs_p, bw_outputs_p), 2)
            # print h_p
            # print 'bw_outputs_p', bw_outputs_p

        # decoder
        with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
            with tf.variable_scope("initial_state"):
                w_d = tf.get_variable(name='w_d', shape=[hidden_size * 2, hidden_size * 2])
                bias = tf.get_variable(name='bias', shape=[1, hidden_size * 2])

                def compute_d_0(i, bw_p, bw_q, d_0_b):
                    bw_p_0_i = tf.reshape(bw_p[i][0], shape=[1, hidden_size])  # 第一个是这个0的吗
                    bw_q_0_i = tf.reshape(bw_q[i][0], shape=[1, hidden_size])
                    h_concat_i = tf.concat([bw_p_0_i, bw_q_0_i], axis=1)
                    d_0_i = tf.tanh(tf.matmul(h_concat_i, w_d) + bias)  # d_0的维度也可随便定的,用矩阵w_d调即可;按论文,这里应该150吧
                    d_0_b.write(i, d_0_i)
                    i = tf.add(i, 1)
                    return i, bw_p, bw_q, d_0_b

                d_0_b = tf.TensorArray(dtype=tf.float32, size=batch_size)
                c = lambda x, y, z, w: tf.less(x, batch_size)  # batch的循环
                b = lambda x, y, z, w: compute_d_0(x, y, z, w)
                i = tf.constant(0)  # batch号
                d_res = tf.while_loop(cond=c, body=b, loop_vars=(i, bw_outputs_p, bw_outputs_q, d_0_b))
                d_0 = d_res[-1].stack()
                # print 'd_0', d_0

            with tf.variable_scope("attention_decoder_ouput", reuse=tf.AUTO_REUSE):
                w_a = tf.get_variable(name='w_a', shape=[hidden_size * 2, hidden_size * 2])
                u_a = tf.get_variable(name='u_a', shape=[hidden_size * 2, hidden_size * 2])
                v_a = tf.get_variable(name='v_a', shape=[hidden_size * 2, 1])

                cell_d = tf.nn.rnn_cell.GRUCell(hidden_size * 2)  # 这个维度??w,c,d怎么结合的;不对啊,d就是hidden_size大小吧

                w_r = tf.get_variable(name='w_r', shape=[embedding_dim, hidden_size * 2])  # 注意要和词向量乘,维度不同
                u_r = tf.get_variable(name='u_r', shape=[hidden_size * 2, hidden_size * 2])
                v_r = tf.get_variable(name='v_r', shape=[hidden_size * 2, hidden_size * 2])
                w_o = tf.get_variable(name='w_o', shape=[hidden_size, vocab_size])  # 是这里调吗?

                def attention_step(h_p_i, h_q_i, d_t_1):  # w_t应是当前输入词的embedding,也就是上一个输出的词
                    h = tf.concat([h_p_i, h_q_i], axis=0)  # 不对啊词数都不同,这里怎么拼的??直接第一维拼?嗯这里就按这种做法
                    # 感觉是这样的:拼是p+q,然后每个词做attention,所以还是一起乘,对的
                    # print d_t_1, h
                    sum_d = tf.matmul(d_t_1, w_a) + tf.matmul(h, u_a)  # 矩阵了;h的行数是是p+q词数,列数是向量维度
                    # print 'h, sum_d', h, sum_d
                    s_t = tf.matmul(tf.tanh(sum_d), v_a)  # 列向量
                    a_t = tf.nn.softmax(s_t)
                    a_t = tf.transpose(a_t)  # 转成行向量
                    c_t = tf.matmul(a_t, h)
                    # print 'c_t', c_t
                    return c_t

                def compute_m_t_j(j, r_t, m_t):
                    r_m = tf.maximum(r_t[0][j], r_t[0][j + 1])
                    # print r_m
                    m_t.write(j, r_m)
                    j = tf.add(j, 1)
                    return j, r_t, m_t

                def maxout_hidden_layer(r_t):
                    m_t = tf.TensorArray(dtype=tf.float32, size=hidden_size)
                    c = lambda x, y, z: tf.less(x, hidden_size * 2 - 1)
                    b = lambda x, y, z: compute_m_t_j(x, y, z)
                    j = tf.constant(0)
                    m_t_res = tf.while_loop(cond=c, body=b, loop_vars=(j, r_t, m_t))
                    m_t = m_t_res[-1].stack()
                    m_t = tf.reshape(m_t, shape=[1, hidden_size])
                    # print 'm_t', m_t
                    return m_t

                # train中两部分可分开;test中有依赖关系不可分开
                if mode == 'train':
    
                    def output_step_train(w_t_1, c_t, d_t):
                        r_t = tf.matmul(w_t_1, w_r) + tf.matmul(c_t, u_r) + tf.matmul(d_t, v_r)  # 其实w,c,d维度都不需一样, 用参数矩阵调即可;论文中说r_t是2d维,到底应该多少
                        # print 'r_t', r_t
                        m_t = maxout_hidden_layer(r_t)  # maxout hidden layer
                        prob_t = tf.nn.softmax(tf.matmul(m_t, w_o))  # 行向量
                        # w_t_index = tf.argmax(prob_t, axis=1)  # argmax??找出id,再embedding[id]
                        # print 'w_t_index', w_t_index
                        # w_t = tf.reshape(embedding[w_t_index[0]], shape=[1, -1])
                        return prob_t
    
                    def one_step_train(t, h_p_i, h_q_i, ta_i, d_t, w_t, prob_a):  # 不对,train的时候w_t是输入的真实target的词
                        c_t = attention_step(h_p_i, h_q_i, d_t)
    
                        # print 'w_t, c_t, d_t ', w_t, c_t, d_t
                        w_c_d = tf.concat([w_t, c_t, d_t], axis=1)
                        # print 'w_c_d, d_t', w_c_d, d_t
                        out, d_tp1 = cell_d(inputs=w_c_d, state=d_t)
    
                        prob_t = output_step_train(w_t, c_t, d_tp1)
                        prob_a.write(t, prob_t)
                        # print 'prob_t', prob_t

                        # print ta_i[t]
                        w_tp1 = tf.reshape(ta_i[t], shape=[1, -1])  # -1是SOS,则从0开始是答案
    
                        t = tf.add(t, 1)
    
                        return t, h_p_i, h_q_i, ta_i, d_tp1, w_tp1, prob_a
    
                    def one_answer_train(i, prob_b):
                        c = lambda x, y, z, m, n, p, q: tf.less(x, self.targets_actual_length_a[i])  # 改这里,这里不应该是文章长,应该是答案长;应该根据生成词判断终止条件,也就是这里搞个函数,但回头再说,这里先简化
                        b = lambda x, y, z, m, n, p, q: one_step_train(x, y, z, m, n, p, q)
                        t = tf.constant(0)  # batch号
                        h_p_i = h_p[i]
                        h_q_i = h_q[i]
                        d_0_i = d_0[i]
                        ta_i = targets_embedded_a[i]
                        # print 'd_0_i', d_0_i
                        w_0 = tf.reshape(embedding[SOS_ID], shape=[1, -1])  # 应是start标签的embedding;对的对的
                        prob_a = tf.TensorArray(dtype=tf.float32, size=self.targets_actual_length_a[i])
                        # print prob_a
                        res = tf.while_loop(cond=c, body=b, loop_vars=(t, h_p_i, h_q_i, ta_i, d_0_i, w_0, prob_a))
                        temp = tf.squeeze(res[-1].stack(), axis=1)  # 具体做了什么,stack和squeeze的作用?
                        # print res[-1].stack(), temp
                        prob_b.write(i, temp)
                        i = tf.add(i, 1)
                        return i, prob_b
    
                    prob_b = tf.TensorArray(dtype=tf.float32, size=batch_size)  # 应是batch,答案t长,词汇表长
                    c = lambda x, y: tf.less(x, batch_size)  # batch的循环
                    b = lambda x, y: one_answer_train(x, y)
                    i = tf.constant(0)  # batch号
                    prob_b_res = tf.while_loop(cond=c, body=b, loop_vars=(i, prob_b))
                    prob = prob_b_res[-1].stack()
                    # print 'prob', prob

                if mode == 'test':  # 要用beam search;先用最后只取概率最大的

                    def output_step_test(w_t_1, c_t, d_t, seq_w_ts_j, seq_w_ts_indices_j, seq_prob_ts_j):
                        # 这里需要考虑 输入!输入!有没有EOS之类的了
                        # 如果输入是结束符

                        # 就现在改了吧
                        def eos():
                            w_tp1s_j = tf.reshape(embedding[EOS_ID], shape=[1, -1])  # 就一直上EOS吧
                            seq_w_tp1s_j = tf.concat([seq_w_ts_j, w_tp1s_j], axis=0)
                            seq_w_tp1s_j = tf.expand_dims(seq_w_tp1s_j, 0)
                            w_tp1s_indices_j = tf.reshape([EOS_ID], shape=[1, 1])
                            seq_w_tp1s_indices_j = tf.expand_dims(seq_w_ts_indices_j, 0)
                            seq_w_tp1s_indices_j = tf.concat([seq_w_tp1s_indices_j, w_tp1s_indices_j], axis=1)
                            seq_prob_tp1s_j = tf.reshape(seq_prob_ts_j, shape=[1, 1])
                            # print seq_w_tp1s_j, seq_w_tp1s_indices_j, seq_prob_tp1s_j
                            return seq_w_tp1s_j, seq_w_tp1s_indices_j, seq_prob_tp1s_j

                        def not_eos():
                            r_t = tf.matmul(w_t_1, w_r) + tf.matmul(c_t, u_r) + tf.matmul(d_t, v_r)
                            # print 'r_t', r_t
                            m_t = maxout_hidden_layer(r_t)  # maxout hidden layer
                            prob_t = tf.nn.softmax(tf.matmul(m_t, w_o))  # 行向量

                            # 先prob乘,选最大beam_size个
                            seq_prob_tp1s_all = seq_prob_ts_j[0] * prob_t  # 应该是这么乘吧;这里取数
                            rs = tf.nn.top_k(seq_prob_tp1s_all, beam_search_size)
                            # 乘完的筛选差不多
                            # prob
                            seq_prob_tp1s = tf.reshape(rs.values, shape=[-1, 1])  # 直接替换,变列向量
                            # id
                            seq_w_tp1_indices = tf.concat([tf.reshape(seq_w_ts_indices_j, shape=[1, -1])]
                                                          * beam_search_size, axis=0)  # 搞成行向量,重复12次,拼接
                            seq_w_tp1_indices = tf.concat([seq_w_tp1_indices, tf.transpose(rs.indices)], axis=1)
                            # embedding
                            seq_w_tp1s = tf.concat([tf.expand_dims(seq_w_ts_j, 0)] * beam_search_size,
                                                   axis=0)  # 是不是有别的办法,这样写好奇怪
                            w_tp1s = tf.nn.embedding_lookup(embedding, rs.indices[0])
                            w_tp1s = tf.expand_dims(w_tp1s, 1)  # 升维,第1维加
                            seq_w_tp1s = tf.concat([seq_w_tp1s, w_tp1s], axis=1)
                            # print 'seq_w_tp1s, seq_w_tp1_indices, seq_prob_tp1s', seq_w_tp1s, seq_w_tp1_indices, seq_prob_tp1s

                            return seq_w_tp1s, seq_w_tp1_indices, seq_prob_tp1s

                        result = tf.cond(tf.equal(seq_w_ts_indices_j[-1], tf.constant(EOS_ID)),
                                         lambda: eos(), lambda: not_eos())
                        # print 'result', result
                        return result

                    def get_best_seqs(seq_w_ts, seq_w_ts_indices, seq_prob_ts):
                        # 可以转成字典,排序求最大,再取得相应下标??
                        # 或连接,排序,再拆开
                        # 啊啊啊啊有现成函数
                        seq_prob_ts = tf.reshape(seq_prob_ts, shape=[1, -1])
                        rs = tf.nn.top_k(seq_prob_ts, beam_search_size)  # 这里得是行向量
                        # print rs.indices
                        # 下面根据indices切片
                        best_seq_prob_ts = tf.reshape(rs.values, shape=[-1, 1])  # 这是行向量,看看改不改;改列向量吧
                        best_seq_w_ts = tf.gather(seq_w_ts, rs.indices[0])  # 都是对于第0维取
                        best_seq_w_ts_indices = tf.gather(seq_w_ts_indices, rs.indices[0])
                        # print 'best_seq_w_ts, best_seq_w_ts_indices, best_seq_prob_ts', \
                        #     best_seq_w_ts, best_seq_w_ts_indices, best_seq_prob_ts
                        return best_seq_w_ts, best_seq_w_ts_indices, best_seq_prob_ts

                    def one_step_test(t, h_p_i, h_q_i, d_ts, best_seq_w_ts, best_seq_w_ts_indices, best_seq_prob_ts):
                        d_tp1s = []
                        seq_w_tp1s_list = []
                        seq_w_tp1_indices_list = []
                        seq_prob_tp1s_list = []

                        for j in range(beam_search_size):  # 这里看看要不要也改成lambda;如果用lambda似乎要arraywrite了
                            seq_w_ts_j = best_seq_w_ts[j]
                            seq_w_ts_indices_j = best_seq_w_ts_indices[j]
                            seq_prob_ts_j = best_seq_prob_ts[j]  # 先不取到数吧
                            # print 'seq_w_ts_j, seq_w_ts_indices_j, seq_prob_ts_j ', seq_w_ts_j, seq_w_ts_indices_j, seq_prob_ts_j
                            w_t = tf.reshape(seq_w_ts_j[-1], shape=[1, -1])  # 取最后一个?不对,返回的是全部啊;要么在外面拼,要么在里面拼
                            d_t = tf.reshape(d_ts[j], shape=[1, -1])  # 这也复数
                            c_t = attention_step(h_p_i, h_q_i, d_t)
                            w_c_d = tf.concat([w_t, c_t, d_t], axis=1)
                            # print 'w_t, c_t, d_t, w_c_d ', w_t, c_t, d_t, w_c_d
                            out, d_tp1 = cell_d(inputs=w_c_d, state=d_t)  # 但是连d都不一样,这咋搞
                            d_tp1s.append(d_tp1)
                            seq_w_tp1s, seq_w_tp1_indices, seq_prob_tp1s = \
                                output_step_test(w_t, c_t, d_tp1, seq_w_ts_j, seq_w_ts_indices_j, seq_prob_ts_j)
                            # 输出得12*12后再一起筛选成12;具体咋筛呢;还要这里w不能只是当前词了,得是从1至当前序列的所有词
                            # 每个12个就够了,自己都进不了前12的,其他也进不了
                            seq_w_tp1s_list.append(seq_w_tp1s)
                            seq_w_tp1_indices_list.append(seq_w_tp1_indices)
                            seq_prob_tp1s_list.append(seq_prob_tp1s)
                        # 拼起来
                        d_tp1s = tf.concat(d_tp1s, axis=0)
                        seq_w_tp1s = tf.concat(seq_w_tp1s_list, axis=0)
                        seq_w_tp1_indices = tf.concat(seq_w_tp1_indices_list, axis=0)
                        seq_prob_tp1s = tf.concat(seq_prob_tp1s_list, axis=0)  # 看如果是列向量就是这样
                        # 筛选
                        best_seq_w_tp1s, best_seq_w_tp1_indices, best_seq_prob_tp1s = \
                            get_best_seqs(seq_w_tp1s, seq_w_tp1_indices, seq_prob_tp1s)

                        # 求下not_all_eos;判断最后是不是EOS都
                        # ???不会求啊。。。。先不管他,直接搞到最后好了

                        t =tf.add(t, 1)
                        return t, h_p_i, h_q_i, d_tp1s, best_seq_w_tp1s, best_seq_w_tp1_indices, best_seq_prob_tp1s
                    # 每次应返回到目前为止概率最大的beam_size个序列(ids,embeddings)及各自概率
                    # 第一步就是一个SOS,但后面是beam_size的输入了,但也有可能有重复的词;t=0在函数里分布对待好了

                    def output_step_test_t_0(w_0, c_0, d_1):
                        r_0 = tf.matmul(w_0, w_r) + tf.matmul(c_0, u_r) + tf.matmul(d_1, v_r)
                        # 其实w,c,d维度都不需一样, 用参数矩阵调即可;论文中说r_t是2d维,到底应该多少
                        # print 'r_0', r_0
                        m_t = maxout_hidden_layer(r_0)  # maxout hidden layer
                        prob_0 = tf.nn.softmax(tf.matmul(m_t, w_o))  # 行向量

                        rs = tf.nn.top_k(prob_0, beam_search_size)
                        # prob
                        seq_prob_1s = tf.reshape(rs.values, shape=[-1, 1])
                        # print 'seq_prob_1s', seq_prob_1s
                        # id
                        seq_w_0_indices = tf.reshape(tf.constant([SOS_ID] * beam_search_size), shape=[-1, 1])  # 搞成列向量
                        # print 'seq_w_0_indices', seq_w_0_indices
                        seq_w_1_indices = tf.concat([seq_w_0_indices, tf.transpose(rs.indices)], axis=1)  # e二维的
                        # embedding
                        seq_w_0s = tf.concat([w_0] * beam_search_size, axis=0)
                        seq_w_0s = tf.expand_dims(seq_w_0s, 1)  # 升维,第1维加
                        w_1s = tf.nn.embedding_lookup(embedding, rs.indices[0])
                        w_1s = tf.expand_dims(w_1s, 1)  # 升维,第1维加
                        seq_w_1s = tf.concat([seq_w_0s, w_1s], axis=1)  # SOS 拼上相应词
                        # print 'seq_w_1', seq_w_1s

                        return seq_w_1s, seq_w_1_indices, seq_prob_1s

                    def one_step_test_t_0(w_0, d_0, h_p_i, h_q_i):
                        c_0 = attention_step(h_p_i, h_q_i, d_0)
                        w_c_d = tf.concat([w_0, c_0, d_0], axis=1)
                        # print 't==0 ', w_0, c_0, d_0, w_c_d
                        out, d_1 = cell_d(inputs=w_c_d, state=d_0)
                        best_seq_w_1s, best_seq_w_1_indices, best_seq_prob_1s \
                            = output_step_test_t_0(w_0, c_0, d_1)
                        d_1s = tf.concat([d_1] * beam_search_size, axis=0)  # 重复下;行向量拼的
                        return d_1s, best_seq_w_1s, best_seq_w_1_indices, best_seq_prob_1s

                    def stop_condition(t, best_seq_w_t_indices):  # 应是到最大长度了,或前beam_size个概率最大的序列全到结束符了
                        w_t_indices = best_seq_w_t_indices[:, -1]  # 先抽出每个序列最后一个词的indices,直接切片即可吧
                        all_eos = tf.constant([EOS_ID]*beam_search_size)  # 拼出EOS的
                        cond1 = tf.equal(w_t_indices, all_eos)  # 然后equals进行比较
                        cond1 = tf.logical_not(tf.reduce_all(cond1))  # True不终止,False终止
                        cond2 = t < max_anslen  # True不终止,False终止
                        cond = tf.logical_and(cond1, cond2)  # 都True才不终止,一个False即终止
                        # print cond
                        return cond

                    def one_answer(i, a_ids_b):
                        # t=0时单独搞一个
                        h_p_i = h_p[i]
                        h_q_i = h_q[i]
                        d_0_i = d_0[i]
                        w_0 = tf.reshape(embedding[SOS_ID], shape=[1, -1])
                        d_1s, best_seq_w_1s, best_seq_w_1_indices, best_seq_prob_1s = one_step_test_t_0(w_0, d_0_i, h_p_i, h_q_i)
                        # t=1开始
                        t = tf.constant(1)
                        c = lambda x, y, z, m, n, p, q: stop_condition(x, p)
                        b = lambda x, y, z, m, n, p, q: one_step_test(x, y, z, m, n, p, q)
                        res = tf.while_loop(cond=c, body=b, loop_vars=[t, h_p_i, h_q_i, d_1s,
                                                                       best_seq_w_1s, best_seq_w_1_indices,
                                                                       best_seq_prob_1s],
                                            shape_invariants=[t.get_shape(), h_p_i.get_shape(),
                                                              h_q_i.get_shape(), d_1s.get_shape(),
                                                              tf.TensorShape([beam_search_size, None, embedding_dim]),
                                                              tf.TensorShape([beam_search_size, None]),
                                                              tf.TensorShape([beam_search_size, 1])])
                                            # 可能还有问题,后面不加词了是不是需要padding;应该要,里面维度要一样
                        # 到最后一步取最大概率的序列,填入a_ids_b
                        seq_probs = res[-1]
                        # w_ts = res[-3].stack()
                        w_ts_indices = res[-2]
                        best_seq_no = tf.argmax(seq_probs)
                        best_seq_no = best_seq_no[0]
                        # best_w_ts = w_ts[best_seq_no]
                        best_a_ids = w_ts_indices[best_seq_no]
                        print 'best_seq_no, best_a_ids ', best_seq_no, best_a_ids
                        a_ids_b.write(i, best_a_ids)
                        i = tf.add(i, 1)
                        return i, a_ids_b

                    answers_ids_b = tf.TensorArray(dtype=tf.int32, size=batch_size)
                    c = lambda x, y: tf.less(x, batch_size)  # batch的循环
                    b = lambda x, y: one_answer(x, y)
                    i = tf.constant(0)  # batch号
                    answers_ids_b_res = tf.while_loop(cond=c, body=b, loop_vars=(i, answers_ids_b))
                    # print 'prob', prob
                    self.answers_ids = answers_ids_b_res[-1].stack()
                    print 'answers_ids', self.answers_ids

        # loss, train_op
        if mode == 'train':
            with tf.variable_scope('loss', reuse=tf.AUTO_REUSE):

                def compute_loss_step(t, ta_index_i, prob_i, loss_a):
                    ta_index_t = ta_index_i[t]
                    prob_t = prob_i[t][ta_index_t]
                    # print prob_t
                    loss_a *= prob_t
                    t = tf.add(t, 1)
                    return t, ta_index_i, prob_i, loss_a

                def compute_loss_batch(i, loss):
                    t = tf.constant(0)
                    prob_i = prob[i]
                    ta_index_i = self.targets_a[i]
                    loss_a = tf.constant(1.0)
                    c = lambda x, y, z, m: tf.less(x, self.targets_actual_length_a[i])
                    b = lambda x, y, z, m: compute_loss_step(x, y, z, m)
                    loss_res = tf.while_loop(cond=c, body=b, loop_vars=(t, ta_index_i, prob_i, loss_a))
                    loss_a = loss_res[-1]
                    loss += loss_a
                    # print loss_a, loss
                    i = tf.add(i, 1)
                    return i, loss

                loss = tf.constant(0.0)
                c = lambda x, y: tf.less(x, batch_size)
                b = lambda x, y: compute_loss_batch(x, y)
                loss_res = tf.while_loop(cond=c, body=b, loop_vars=(i, loss))
                self.loss = loss_res[-1]
                # print 'loss', self.loss

            with tf.variable_scope('train_op', reuse=tf.AUTO_REUSE):
                tvars = tf.trainable_variables()
                # print tvars
                # grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), grad_clip)
                optimizer = tf.train.AdamOptimizer(initial_learning_rate)
                grads_and_vars = optimizer.compute_gradients(self.loss)
                self.train_op = optimizer.apply_gradients(grads_and_vars)  # 还是报一样的错
                # self.train_op = optimizer.apply_gradients(zip(grads, tvars))
                print self.train_op
def swap_strongest(colonies, empires, empires_numbers, num_of_colonies,
                   cost_function):
    colonies_power = helpers.evaluate_countries_power(colonies, cost_function)
    colonies_indexes = helpers.create_index_column(colonies_power)
    colonies_power_with_index = helpers.concatenate_tensor_with_index(
        colonies_power, colonies_indexes, num_of_colonies)

    empires_power = helpers.evaluate_countries_power(empires, cost_function)
    empires_numbers_to_check, _ = tf.unique(empires_numbers)
    swap_initial_params = (constants.int_zero, empires_numbers_to_check,
                           colonies, empires, empires_numbers, empires_power,
                           colonies_power, tf.squeeze(colonies_indexes),
                           colonies_power_with_index)

    _, _, new_colonies, new_empires, _, new_empires_power, new_colonies_power, _, _ = tf.while_loop(
        condition_swap_strongest, body_swap_strongest, swap_initial_params)
    return new_colonies, new_empires, new_empires_power, new_colonies_power
Ejemplo n.º 54
0
def constrained_l2_proj_distance_save_memory(x, grads, biases):
    u, w, b = x, grads, -biases[:, :, None]

    ndim = K.int_shape(u)[1]
    neurons = K.int_shape(w)[1]
    w = K.reshape(w, (-1, ndim))
    b = K.reshape(b, (-1, 1))

    # Handy naming to make code a bit more readable.
    def is_(x):
        return K.cast(x, 'float32')

    def where(x):
        return K.cast(x, 'float32')

    # Check feasibility.
    # The problem is infeasible if `q` (defined below) can be brought to
    # infinity as `lam` -> +/-infinity. We see that as `lam` -> +infinity,
    # `x_star[i]` becomes 0 if `w[i]` is positive and if `w[i]` is negative.
    # Thus, dq/dlam = -b + sum_{i : w_i < 0}{w_i}. If dq/dlam is positive, then
    # `q` will go to infinity. The other case comes from the symmetric case for
    # when `lam` -> -infinity.
    infeasible = (is_(K.sum(w * where(w > 0), axis=1) < b[:, 0]) +
                  is_(K.sum(w * where(w < 0), axis=1) > b[:, 0]))

    feasible = 1. - infeasible

    # Get the order (as lambda goes from -infinity to 0) in which each dimention
    # transitions to the I stage.
    I_in_order = tf.argsort(u / w * where(w < 0) + (u - 1) / w * where(w > 0))

    # Get the order (as lambda goes from 0 to +infinity) in which each dimention
    # transitions out of the I stage.
    I_out_order = tf.argsort(u / w * where(w > 0) + (u - 1) / w * where(w < 0))

    w_I_in = tf.gather(w, I_in_order, batch_dims=1)
    u_I_in = tf.gather(u, I_in_order, batch_dims=1)
    w_1_in = tf.gather(w * where(w > 0), I_in_order, batch_dims=1)

    in_nums = w_I_in * u_I_in - w_1_in
    in_denoms = w_I_in**2

    w_I_out = tf.gather(w, I_out_order, batch_dims=1)
    u_I_out = tf.gather(u, I_out_order, batch_dims=1)
    w_1_out = tf.gather(w * where(w < 0), I_out_order, batch_dims=1)

    out_nums = -w_I_out * u_I_out + w_1_in
    out_denoms = -w_I_out**2

    nums = (K.sum(w * where(w > 0), axis=1)[:, None] +
            K.cumsum(K.concatenate(
                (in_nums, out_nums), axis=1), axis=1)[:, :-1] - b)

    denoms = K.cumsum(K.concatenate((in_denoms, out_denoms), axis=1),
                      axis=1)[:, :-1]

    argmaxes = nums / denoms

    # Find the inflection points in `q`.
    inflections = K.concatenate(((u - 1) / w, u / w))

    lam = K.concatenate((argmaxes, inflections))

    i0 = tf.constant(0)
    m0 = K.zeros((0, ndim))

    loop_condition = lambda i, m: i < K.shape(lam)[0]

    def loop_body(i, m):
        x_star_candidate_i = K.clip(u[0, None] - lam[i, :, None] * w[i, None],
                                    0., 1.)

        max_q_candidate_i = (
            K.sum(.5 * (x_star_candidate_i - u[0, None])**2 +
                  lam[i, :, None] * w[i, None] * x_star_candidate_i,
                  axis=-1) - lam[i] * b[i])

        opt_i = K.cast(K.argmax(max_q_candidate_i, axis=0), 'int32')

        return [i + 1, tf.concat((m, x_star_candidate_i[opt_i, None]), axis=0)]

    x_star = tf.while_loop(
        loop_condition,
        loop_body,
        loop_vars=[i0, m0],
        shape_invariants=[i0.get_shape(),
                          tf.TensorShape([None, ndim])])[1]

    d = tf.norm(x_star - u, axis=1) / feasible

    return K.reshape(d, (-1, neurons))
Ejemplo n.º 55
0
def inference(data, sequence_length, keep_prob):
    """
    The inference function.
    :param data: [batch_size, 107, 20]
    :param sequence_length: [batch_size]
    :param keep_prob: the parameter for dropout layer.
    :return: the logits.
    """
    tf.set_random_seed(0)
    batch_size_op = tf.shape(data)[0]
    max_length_op = tf.shape(data)[1]

    with tf.variable_scope('lstm_variable_sequence') as scope:
        # For the LSTM weight and biases initialization.
        cell_fw = tf.nn.rnn_cell.LSTMCell(
            num_units=64, initializer=tf.glorot_normal_initializer(seed=0))
        cell_fw = tf.nn.rnn_cell.DropoutWrapper(cell_fw,
                                                output_keep_prob=keep_prob,
                                                seed=0)
        cell_bw = tf.nn.rnn_cell.LSTMCell(
            num_units=64, initializer=tf.glorot_normal_initializer(seed=0))
        cell_bw = tf.nn.rnn_cell.DropoutWrapper(cell_bw,
                                                output_keep_prob=keep_prob,
                                                seed=0)
        init_fw = cell_fw.zero_state(batch_size_op, dtype=tf.float32)
        init_bw = cell_bw.zero_state(batch_size_op, dtype=tf.float32)

        bidrnn_outputs, final_states = tf.nn.bidirectional_dynamic_rnn(
            cell_fw=cell_fw,
            cell_bw=cell_bw,
            inputs=data,
            sequence_length=sequence_length,
            initial_state_fw=init_fw,
            initial_state_bw=init_bw)

        fw_lstm_outputs = final_states[0][1]
        bw_lstm_outputs = final_states[1][1]
        lstm_outputs = tf.concat((fw_lstm_outputs, bw_lstm_outputs),
                                 axis=1)  # shape = [batch_size, 128]

    with tf.variable_scope('conv_pssm') as scope:
        matrix = sio.loadmat('./program/model_dir/pssm.mat')['pssm']
        initializer_filters = tf.reshape(tf.constant(matrix, dtype=tf.float32),
                                         [1, 20, 1, 20])
        initializer_biases = tf.constant_initializer(0)
        filters = tf.get_variable('filters',
                                  initializer=initializer_filters,
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [20],
                                 initializer=initializer_biases,
                                 dtype=tf.float32,
                                 trainable=False)

        input = tf.reshape(data, [batch_size_op, max_length_op, 20, 1])
        temp = tf.nn.conv2d(input,
                            filters,
                            strides=[1, 1, 20, 1],
                            padding='SAME')
        temp_b = tf.nn.bias_add(temp, biases)
        conv_pssm = temp_b  # shape= [batch_size, 107, 1, 20]

        bandwidth = tf.floor(tf.divide(sequence_length, 4))
        width = tf.cast(tf.multiply(bandwidth, 4), tf.int32)

        Tensor_array = tf.TensorArray(tf.float32, batch_size_op)

        def cond(i, array):
            return i < batch_size_op

        def body(i, array):
            avblock_temp = tf.reshape(conv_pssm[i][0:width[i]], [4, -1, 20])
            avblock = tf.reshape(tf.reduce_mean(avblock_temp, axis=1), [4, 20])
            array = array.write(i, avblock)
            return i + 1, array

        i, array = tf.while_loop(cond, body, (0, Tensor_array))
        outputs = array.stack()

    with tf.variable_scope('conv_feature_extraction') as scope:
        initializer_filters = tf.truncated_normal_initializer(stddev=0.4,
                                                              seed=0)
        initializer_biases = tf.constant_initializer(0)
        filters = tf.get_variable('filters', [4, 4, 1, 20],
                                  initializer=initializer_filters,
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [20],
                                 initializer=initializer_biases,
                                 dtype=tf.float32)

        input = tf.reshape(outputs, [batch_size_op, 4, 20, 1])
        temp = tf.nn.conv2d(input,
                            filters,
                            strides=[1, 4, 4, 1],
                            padding='SAME')
        temp_b = tf.nn.bias_add(temp, biases)
        conv_feature_extraction = tf.nn.relu(
            temp_b)  # shape= [batch_size, 1, 5, 20]

    with tf.variable_scope('dropout') as scope:
        dropout = tf.nn.dropout(conv_feature_extraction,
                                keep_prob=keep_prob,
                                seed=0)

    with tf.variable_scope('Merge_features') as scope:
        conv = tf.reshape(dropout, [batch_size_op, 100])
        merge_features = tf.concat([lstm_outputs, conv],
                                   axis=1)  # shape = [batch_size, 228]

    with tf.variable_scope('fully_connected_1') as scope:
        initializer_weights = tf.truncated_normal_initializer(stddev=0.4,
                                                              seed=0)
        initializer_biases = tf.constant_initializer(0.1)
        weights = tf.get_variable('weight', [228, 100],
                                  initializer=initializer_weights,
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [100],
                                 initializer=initializer_biases,
                                 dtype=tf.float32)
        f1_l2_loss = tf.multiply(tf.nn.l2_loss(weights),
                                 0.2,
                                 name='f1_weight_loss')
        tf.add_to_collection('losses', f1_l2_loss)
        temp = tf.nn.xw_plus_b(merge_features, weights, biases)
        fc1 = tf.nn.relu(temp)

    with tf.variable_scope('fully_connected_2') as scope:
        initializer_weights = tf.truncated_normal_initializer(stddev=0.4,
                                                              seed=0)
        initializer_biases = tf.constant_initializer(0.1)
        weights = tf.get_variable('weight', [100, 2],
                                  initializer=initializer_weights,
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [2],
                                 initializer=initializer_biases,
                                 dtype=tf.float32)
        f2_l2_loss = tf.multiply(tf.nn.l2_loss(weights),
                                 0.2,
                                 name='f2_weight_loss')
        tf.add_to_collection('losses', f2_l2_loss)
        logits = tf.nn.xw_plus_b(fc1, weights, biases)

    return logits
Ejemplo n.º 56
0
def yolo_loss(args,
              anchors,
              ignore_thresh=.5,
              seg_loss_weight=0.1,
              print_loss=False):
    '''Return yolo_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(N, 2), wh
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)

    '''
    num_layers = len(anchors) // 3  # default setting
    yolo_outputs = args[:1]
    mask_prob = args[1]
    co_enegy = args[2]
    y_true = args[3:4]
    mask_gt = args[4]
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [
        [0, 1, 2]
    ]  ##due to deleting 2 scales  change [[6,7,8], [3,4,5], [0,1,2]] to [[0,1,2]]
    input_shape = K.cast(
        K.shape(yolo_outputs[0])[1:3] * 32,
        K.dtype(y_true[0]))  # x32 is original size
    grid_shapes = [
        K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0]))
        for l in range(num_layers)
    ]  #3 degree scales output
    loss = 0
    m = K.shape(yolo_outputs[0])[0]  # batch size, tensor
    mf = K.cast(m, K.dtype(yolo_outputs[0]))

    for l in range(num_layers):
        object_mask = y_true[l][..., 4:5]
        # true_class_probs = y_true[l][..., 5:]  #... ==????

        grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
                                                     anchors[anchor_mask[l]],
                                                     input_shape,
                                                     calc_loss=True)
        pred_box = K.concatenate([pred_xy, pred_wh])
        # Darknet raw box to calculate loss.
        raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid
        raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] *
                            input_shape[::-1])
        raw_true_wh = K.switch(object_mask, raw_true_wh,
                               K.zeros_like(raw_true_wh))  # avoid log(0)=-inf
        box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]),
                                     size=1,
                                     dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')

        def loop_body(b, ignore_mask):
            true_box = tf.boolean_mask(y_true[l][b, ..., 0:4],
                                       object_mask_bool[b, ..., 0])
            iou = box_iou(pred_box[b], true_box)
            best_iou = K.max(iou, axis=-1)
            ignore_mask = ignore_mask.write(
                b, K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
            return b + 1, ignore_mask

        _, ignore_mask = tf.while_loop(lambda b, *args: b < m, loop_body,
                                       [0, ignore_mask])
        ignore_mask = ignore_mask.stack()
        ignore_mask = K.expand_dims(ignore_mask, -1)

        def smooth_L1(y_true, y_pred, sigma=3.0):
            """ Create a smooth L1 loss functor.

            Args
                sigma: This argument defines the point where the loss changes from L2 to L1.

            Returns
                A functor for computing the smooth L1 loss given target data and predicted data.
            """
            sigma_squared = sigma**2

            # compute smooth L1 loss
            # f(x) = 0.5 * (sigma * x)^2          if |x| < 1 / sigma / sigma
            #        |x| - 0.5 / sigma / sigma    otherwise
            regression_diff = y_true - y_pred
            regression_diff = K.abs(regression_diff)
            regression_loss = tf.where(
                K.less(regression_diff, 1.0 / sigma_squared),
                0.5 * sigma_squared * K.pow(regression_diff, 2),
                regression_diff - 0.5 / sigma_squared)
            return regression_loss

        # K.binary_crossentropy is helpful to avoid exp overflow.
        xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(
            raw_true_xy, raw_pred[..., 0:2], from_logits=True)
        wh_loss = object_mask * box_loss_scale * 0.5 * smooth_L1(
            raw_true_wh, raw_pred[..., 2:4])
        confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
            (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
        seg_loss = K.binary_crossentropy(mask_gt, mask_prob, from_logits=True)

        xy_loss = K.sum(xy_loss) / mf
        wh_loss = K.sum(wh_loss) / mf
        confidence_loss = K.sum(confidence_loss) / mf
        seg_loss = K.sum(seg_loss) / mf
        co_enegy_loss = cem_loss(co_enegy) / mf

        loss += xy_loss + wh_loss + confidence_loss + seg_loss * seg_loss_weight + co_enegy_loss
        if print_loss:
            loss = tf.Print(loss, [
                '\n'
                'co_peak_loss: ', co_enegy_loss, 'co_peak_energe: ',
                K.sum(co_enegy) / mf
            ],
                            message='loss: ')
    return K.expand_dims(loss, axis=0)
Ejemplo n.º 57
0
  def map_fn(x):
    """Internal function to flat_map over.

    Consumes a batch of input examples and produces a variable number of output
    examples.

    Args:
      x: a single example
    Returns:
      a tf.data.Dataset
    """
    partial = empty_example.copy()
    i = tf.zeros([], dtype=tf.int32)
    dynamic_batch_size = tf.shape(x[keys[0]])[0]
    outputs = {}
    for k in keys:
      outputs[k] = tf.TensorArray(
          tf.int32, size=0, dynamic_size=True, element_shape=[length])
      outputs[k + "_position"] = tf.TensorArray(
          tf.int32, size=0, dynamic_size=True, element_shape=[length])
    def cond_fn(i, partial, outputs):
      del partial, outputs
      return i < dynamic_batch_size
    def body_fn(i, partial, outputs):
      """Body function for while_loop.

      Args:
        i: integer scalar
        partial: dictionary of Tensor (partially-constructed example)
        outputs: dictionary of TensorArray
      Returns:
        A triple containing the new values of the inputs.
      """
      can_append = True
      one_example = {}
      for k in keys:
        val = tf.cast(x[k][i], tf.int32)
        val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]
        one_example[k] = val
      for k in keys:
        can_append = tf.logical_and(
            can_append,
            tf.less_equal(
                tf.size(partial[k]) + tf.size(one_example[k]), length))
      def false_fn():
        return write_packed_example(partial, outputs)
      def true_fn():
        return partial, outputs
      partial, outputs = tf.cond(can_append, true_fn, false_fn)
      new_partial = {}
      for k in keys:
        new_seq = one_example[k][:length]
        new_seq_len = tf.size(new_seq)
        new_partial[k] = tf.concat([partial[k], new_seq], 0)
        new_partial[k + "_position"] = tf.concat(
            [partial[k + "_position"],
             tf.range(new_seq_len, dtype=tf.int32)], 0)
      partial = new_partial
      return i+1, partial, outputs

    i, partial, outputs = tf.while_loop(
        cond_fn, body_fn, (i, partial, outputs),
        back_prop=False,
        shape_invariants=(
            tf.TensorShape([]),
            {k: tf.TensorShape([None]) for k in keys_etc},
            {k: tf.TensorShape(None) for k in keys_etc},
            ))
    partial, outputs = write_packed_example(partial, outputs)
    packed = {k: outputs[k].stack() for k in keys_etc}
    for k in keys:
      packed[k + "_segmentation"] = (
          tf.cumsum(
              tf.cast(tf.equal(packed[k + "_position"], 0), tf.int32), axis=1) *
          tf.cast(tf.not_equal(packed[k], 0), tf.int32))
    return packed
Ejemplo n.º 58
0
def sample_sequence(*,
                    hparams,
                    length,
                    start_token=None,
                    batch_size=None,
                    context=None,
                    sampler='k',
                    temperature=1,
                    top_k=0,
                    alpha=0.05,
                    nuc_prob=0.25,
                    flat_prob=0.02,
                    k_window_size=None,
                    window_weights=None):
    if start_token is None:
        assert context is not None, 'Specify exactly one of start_token and context!'  # this is where the whole context is already given into the model.
        # it is the primer that I write for it!
    else:
        assert context is None, 'Specify exactly one of start_token and context!'
        context = tf.fill([batch_size, 1],
                          start_token)  # this is not used in my case!

    def step(hparams, tokens, past=None):
        lm_output = model.model(hparams=hparams,
                                X=tokens,
                                past=past,
                                reuse=tf.AUTO_REUSE)

        logits = lm_output['logits'][:, :, :hparams.n_vocab]
        presents = lm_output['present']
        presents.set_shape(
            model.past_shape(hparams=hparams, batch_size=batch_size))
        return {
            'logits': logits,
            'presents': presents,
        }

    with tf.name_scope('sample_sequence'):

        # this will store all of the logits for a specific sampling run.
        # ultimately I want this to be a:  samples * prompts (batch size) * words * length  sized matrix.

        #all_logits = tf.Variable(tf.zeros([batch_size, 50257, length]), dtype=tf.float32)
        #all_logits = tf.Variable(name='all_logits', shape=[batch_size, 50257, length], initializer= ,dtype=tf.float32, trainable=False)

        def body(past, prev, output, all_logits):
            next_outputs = step(hparams, prev, past=past)

            logits = next_outputs['logits'][:,
                                            -1, :] / tf.to_float(temperature)
            if sampler == 'k':
                print('using top k')
                logits = top_k_logits(logits, k=top_k)
            elif sampler == 'n':
                print('using nucleus')
                logits = nucleus(logits, p=nuc_prob)
            elif sampler == 'tfs':
                print('using tail free sampling')
                logits = tail_free(logits,
                                   alpha)  #, k_window_size, window_weights)
            elif sampler == 'flat':
                print('using flat percentage sampling')
                logits = flat_perc(logits, flat_prob)
            else:
                print('defauling to top k sampling')
                logits = top_k_logits(logits, k=top_k)
            #print('the logits shape post processing is: ', logits.shape)
            samples = tf.multinomial(logits,
                                     num_samples=1,
                                     output_dtype=tf.int32)
            #print('the samples shape is: ', samples.shape)
            return [
                next_outputs['presents'] if past is None else tf.concat(
                    [past, next_outputs['presents']], axis=-2),
                tf.reshape(samples, [batch_size, 1]),
                tf.concat([output, samples], axis=1),
                tf.expand_dims(next_outputs['logits'][:, -1, :], axis=2)
                if all_logits is None else tf.concat([
                    all_logits,
                    tf.expand_dims(next_outputs['logits'][:, -1, :], axis=2)
                ],
                                                     axis=2)
                #tf.concat([all_logits, tf.expand_dims(next_outputs['logits'][:, -1, :], axis=2)], axis=2)
                #all_logits[:,:,tf.shape(output)[1]+1].assign(tf.expand_dims(next_outputs['logits'][:, -1, :], axis=2) )
            ]

        past, prev, output, all_logits = body(
            None, context, context, None
        )  # for the first run the output and previous are both the context.

        def cond(*args):
            return True

        _, _, tokens, all_logits_out = tf.while_loop(
            cond=cond,
            body=body,
            maximum_iterations=length - 1,
            loop_vars=[past, prev, output, all_logits],
            #changed the 2nd shape invariant so that it can handle the ? shape (which is actually batch size) for the TFS sampling.
            shape_invariants=[
                tf.TensorShape(
                    model.past_shape(hparams=hparams, batch_size=batch_size)),
                tf.TensorShape([batch_size, None]),
                tf.TensorShape([batch_size, None]),
                tf.TensorShape([batch_size, 50257, None])  #batch size
            ],
            back_prop=False,
        )

        return (tokens, all_logits_out)
    def __init__(self,
                 inp,
                 inp_mask,
                 inp_id,
                 seq2seq_gtruth,
                 post_gtruth,
                 hyper_params=None,
                 training=True,
                 name='Tacotron',
                 reuse=False):
        """
        Build the computational graph.
        :param inp:
        :param inp_mask:
        :param seq2seq_gtruth:
        :param post_gtruth:
        :param hyper_params:
        :param training:
        :param name:
        """
        super(Tacotron, self).__init__(name)
        self.hyper_params = HyperParams(
        ) if hyper_params is None else hyper_params
        with tf.variable_scope(name, reuse=reuse):
            self.global_step = tf.Variable(0,
                                           name='global_step',
                                           trainable=False)
            self.learning_rate = tf.Variable(
                self.hyper_params.learning_rate[0],
                name='learning_rate',
                trainable=False,
                dtype=tf.float32)

            batch_size = tf.shape(inp)[0]
            input_time_steps = tf.shape(inp)[1]
            output_time_steps = tf.shape(seq2seq_gtruth)[1]

            ### Encoder [begin]
            with tf.variable_scope('character_embedding'):
                embed_inp = EmbeddingLayer(self.hyper_params.embed_class,
                                           self.hyper_params.embed_dim)(inp)
            with tf.variable_scope("changeToVarible"):
                self.single_style_token = tf.get_variable(
                    'style_token', (1, self.hyper_params.styles_kind,
                                    self.hyper_params.style_dim),
                    dtype=tf.float32)
                # self.Popt = tf.Print(self.single_style_token, [self.single_style_token], message='style:')
                self.style_token = tf.tile(self.single_style_token,
                                           (batch_size, 1, 1))
            with tf.variable_scope("inp_att_onehot"):
                inp_att = tf.one_hot(inp_id,
                                     depth=self.hyper_params.styles_kind,
                                     dtype=tf.float32)
                self.inp_att = inp_att
                # inp_att = tf.Print(inp_att, [inp_att], message='input_att:', summarize=10)
                # print(inp_att)
            with tf.variable_scope('encoder_pre_net'):
                pre_ed_inp = tf.layers.dropout(tf.layers.dense(
                    embed_inp, 256, tf.nn.relu),
                                               training=training)
                pre_ed_inp = tf.layers.dropout(tf.layers.dense(
                    pre_ed_inp, 128, tf.nn.relu),
                                               training=training)
            encoder_output = modules.cbhg(pre_ed_inp,
                                          training=training,
                                          k=16,
                                          bank_filters=128,
                                          projection_filters=(128, 128),
                                          highway_layers=4,
                                          highway_units=128,
                                          bi_gru_units=128,
                                          sequence_length=inp_mask,
                                          name='encoder_cbhg',
                                          reuse=False)
            # with tf.variable_scope('post_text'):
            #     all_outputs, _ = tf.nn.dynamic_rnn(cell=GRUCell(256), inputs=encoder_output, sequence_length=inp_mask,
            #                                    dtype=encoder_output.dtype, parallel_iterations=unkonwn_parallel_iterations)
            #     all_outputs = tf.transpose(all_outputs, [1, 0, 2])
            #     static_encoder_output = all_outputs[-1]
            # ### Encoder [end]
            #
            # sentence_style_att = tf.layers.dense(static_encoder_output, 256, tf.nn.relu)
            # sentence_style_att = tf.layers.dense(sentence_style_att, 64, tf.nn.relu)
            # sentence_style = tf.layers.dense(sentence_style_att, 10, tf.nn.softmax)
            # sentence_style = tf.Print(sentence_style, [sentence_style[0]], message='sentence', summarize=10)
            sentence_style = tf.reduce_sum(tf.expand_dims(inp_att, axis=-1) *
                                           self.style_token,
                                           axis=1)
            # sentence_style = tf.Print(sentence_style, [sentence_style], message='sentence_style', summarize=10)

            ### Attention Module
            with tf.variable_scope('attention'):
                att_module = AttentionModule(256,
                                             encoder_output,
                                             sequence_length=inp_mask,
                                             time_major=False)
            # with tf.variable_scope("attention_style"):
            #     att_module_style = AttentionModule(256, self.style_token, time_major=False)

            ### Decoder [begin]
            att_cell = GRUCell(256)
            dec_cell = MultiRNNCell(
                [ResidualWrapper(GRUCell(256)) for _ in range(2)])
            # prepare output alpha TensorArray
            with tf.variable_scope('prepare_decode'):
                reduc = self.hyper_params.reduction_rate
                reduced_time_steps = tf.div(output_time_steps, reduc)
                init_att_cell_state = att_cell.zero_state(
                    batch_size, tf.float32)
                init_dec_cell_state = dec_cell.zero_state(
                    batch_size, tf.float32)
                init_state_tup = tuple(
                    [init_att_cell_state, init_dec_cell_state])
                init_output_ta = tf.TensorArray(size=reduced_time_steps,
                                                dtype=tf.float32)
                init_alpha_ta = tf.TensorArray(size=reduced_time_steps,
                                               dtype=tf.float32)
                init_weight_ta = tf.TensorArray(size=reduced_time_steps,
                                                dtype=tf.float32)
                init_weight_per_ta = tf.TensorArray(size=reduced_time_steps,
                                                    dtype=tf.float32)
                # init_alpha_style_ta = tf.TensorArray(size=reduced_time_steps, dtype=tf.float32)
                time_major_seq2seq_gtruth = tf.transpose(seq2seq_gtruth,
                                                         perm=(1, 0, 2))
                indic_array = tf.concat([
                    tf.zeros([
                        reduc, batch_size, self.hyper_params.seq2seq_dim
                    ]), time_major_seq2seq_gtruth
                ],
                                        axis=0)
                init_context = tf.zeros([batch_size, 256], dtype=tf.float32)
                # init_context_style = tf.zeros([batch_size, 256], dtype=tf.float32)
                init_time = tf.constant(0, dtype=tf.int32)
            cond = lambda x, *_: tf.less(x, reduced_time_steps)

            def body(this_time, old_context, old_output_ta, old_alpha_ta,
                     old_weight_per_ta, old_state_tup):
                with tf.variable_scope('decoder_pre_net'):
                    dec_pre_ed_inp = indic_array[reduc * this_time + reduc - 1]
                    dec_pre_ed_inp = tf.layers.dropout(tf.layers.dense(
                        dec_pre_ed_inp, 256, tf.nn.relu),
                                                       training=training)
                    dec_pre_ed_inp = tf.layers.dropout(tf.layers.dense(
                        dec_pre_ed_inp, 128, tf.nn.relu),
                                                       training=training)
                with tf.variable_scope('attention_rnn'):
                    att_cell_inp = tf.concat([old_context, dec_pre_ed_inp],
                                             axis=-1)
                    att_cell_out, att_cell_state = att_cell(
                        att_cell_inp, old_state_tup[0])
                with tf.variable_scope('attention'):
                    query = att_cell_state[0]
                    context, alpha = att_module(query)
                    new_alpha_ta = old_alpha_ta.write(this_time, alpha)
                with tf.variable_scope('decoder_rnn'):
                    # context = tf.Print(context, [context], message='context', summarize=10)
                    weighting_context = context + sentence_style
                    weight_per = tf.reduce_mean(
                        tf.abs(sentence_style[0]) /
                        (tf.abs(context[0]) + tf.abs(sentence_style[0])))
                    # weight_per = tf.Print(weight_per, [weight_per], message='weight', summarize=10)
                    new_weight_per_ta = old_weight_per_ta.write(
                        this_time, weight_per)
                    dec_input = tf.layers.dense(
                        tf.concat([att_cell_out, weighting_context], axis=-1),
                        256)
                    dec_cell_out, dec_cell_state = dec_cell(
                        dec_input, old_state_tup[1])
                    dense_out = tf.layers.dense(
                        dec_cell_out, self.hyper_params.seq2seq_dim * reduc)
                    new_output_ta = old_output_ta.write(this_time, dense_out)
                new_state_tup = tuple([att_cell_state, dec_cell_state])
                return tf.add(
                    this_time, 1
                ), context, new_output_ta, new_alpha_ta, new_weight_per_ta, new_state_tup

            # run loop
            _, _, seq2seq_output_ta, alpha_ta, weight_per_ta, *_ = tf.while_loop(
                cond,
                body, [
                    init_time, init_context, init_output_ta, init_alpha_ta,
                    init_weight_per_ta, init_state_tup
                ],
                parallel_iterations=unkonwn_parallel_iterations)

            with tf.variable_scope('reshape_decode'):
                seq2seq_output = tf.reshape(
                    seq2seq_output_ta.stack(),
                    shape=(reduced_time_steps, batch_size,
                           self.hyper_params.seq2seq_dim * reduc))
                seq2seq_output = tf.reshape(
                    tf.transpose(seq2seq_output, perm=(1, 0, 2)),
                    shape=(batch_size, output_time_steps,
                           self.hyper_params.seq2seq_dim))
                self.seq2seq_output = seq2seq_output

                alpha_output = tf.reshape(alpha_ta.stack(),
                                          shape=(reduced_time_steps,
                                                 batch_size, input_time_steps))
                alpha_output = tf.expand_dims(
                    tf.transpose(alpha_output, perm=(1, 0, 2)), -1)
                self.alpha_output = alpha_output

                alpha_style_att = inp_att
                alpha_style_att = tf.expand_dims(alpha_style_att,
                                                 -1)  # batch major
                alpha_style_att = tf.expand_dims(alpha_style_att,
                                                 -1)  # batch major
                alpha_style_att = tf.transpose(alpha_style_att,
                                               perm=(0, 2, 1, 3))
                self.alpha_style_att = alpha_style_att

                weight_per_ta = tf.reshape(weight_per_ta.stack(),
                                           shape=(reduced_time_steps, 1))
                self.weight_per_ta = weight_per_ta
            ### Decoder [end]

            ### PostNet [begin]
            post_output = modules.cbhg(
                seq2seq_output,
                training=training,
                k=8,
                bank_filters=128,
                projection_filters=(256, self.hyper_params.seq2seq_dim),
                highway_layers=4,
                highway_units=128,
                bi_gru_units=128,
                sequence_length=None,
                name='decoder_cbhg',
                reuse=False)
            post_output = tf.layers.dense(post_output,
                                          self.hyper_params.post_dim,
                                          name='post_linear_transform')
            self.post_output = post_output
            ### PostNet [end]

        ### Loss
        with tf.variable_scope('loss'):
            self.seq2seq_loss = l1_loss(seq2seq_gtruth, seq2seq_output)
            self.post_loss = l1_loss(post_gtruth, post_output)
            self.loss = self.seq2seq_loss + self.post_loss
Ejemplo n.º 60
0
    def __step_while_loop(self, step_count: int,
                          truth_outputs_padded: tf.Tensor,
                          truth_outputs_counts: tf.Tensor,
                          initial_hidden_vector: tf.Tensor):
        def create_guess_layers(parent_hidden_vector: tf.Tensor,
                                child_hidden_vector: tf.Tensor,
                                inner_hidden_vector: tf.Tensor,
                                state_number: int):
            state = self.object_type.get_all_states()[state_number]
            return self.hidden_vector_network(parent_hidden_vector,
                                              child_hidden_vector,
                                              inner_hidden_vector, state)

        def body(step: int, stack_1, stack_2, states_ta: tf.TensorArray,
                 outputs_ta: tf.TensorArray, outputs_counts_ta: tf.TensorArray,
                 return_value: tf.Tensor):

            # stack_1 = tf.Print(stack_1, [step, tf.slice(stack_1, [0, 1], [-1, 2])], "stack: ", summarize=100)

            # Rebuild `stack` tuple
            # TODO: Find way to avoid this by putting tuples into `tf.while_loop` arguments
            stack = stack_1, stack_2

            # Get the state and hidden vector we have to deal with for this iteration
            state, hidden_vector, stack = state_stack.pop(stack)

            # Get the summary for all hidden vectors excluding this one
            hidden_vector_summary = state_stack.get_hidden_vector_summary(
                stack)

            # Get the number of outputs for padding
            # TODO: Doing this twice, once here, and once when calling create_guess_layers. Fix this
            num_outputs = tf.case(pred_fn_pairs=[
                (tf.equal(state, i), lambda i=i: tf.constant(
                    self.object_type.get_all_states()[i].num_outputs))
                for i in range(len(self.object_type.get_all_states()))
            ],
                                  default=lambda: tf.constant(0))

            # Call `create_guess_layers(...)` depending on what state we're in
            next_hidden_vector, current_choice = tf.case(
                pred_fn_pairs=[
                    (tf.equal(state, i), lambda i=i: create_guess_layers(
                        hidden_vector_summary, return_value, hidden_vector, i))
                    for i in range(len(self.object_type.get_all_states()))
                ],
                default=lambda: (hidden_vector, tf.constant(
                    0, dtype=tf.float32) / tf.constant(0, tf.float32)))

            # Zero pad the current choice
            current_choice = tf.concat([
                current_choice,
                tf.zeros([self.max_outputs - tf.shape(current_choice)[0]])
            ],
                                       axis=0,
                                       name="current_choice_zero_padded")

            # Reshape the hidden vector so we know what size it is
            next_hidden_vector = tf.reshape(next_hidden_vector,
                                            [self.hidden_vector_size],
                                            name="next_hidden_vector_reshaped")

            if self.training:
                # If we're training, the choice we send to the update_state_stack_fn should be determined by the truth
                stack_update_choice = tf.gather(truth_outputs_padded,
                                                step,
                                                name="choice_from_input")
            else:
                # Otherwise, the choice should be what we outputted
                stack_update_choice = current_choice

            # stack = (tf.Print(stack[0], [step, stack[0][:stack_2]], "estack: ", summarize=100), stack[1])

            # Update the state stack
            stack, return_value = self.__update_state_stack(
                state, stack, next_hidden_vector, stack_update_choice)

            return \
                step + 1, \
                (*stack), \
                states_ta.write(step, state, "write_state"), \
                outputs_ta.write(step, current_choice, "write_outputs"), \
                outputs_counts_ta.write(step, num_outputs, "write_outputs_count"), \
                return_value

        def cond(step, stack_1, stack_2, *_):
            # Rebuild `stack` tuple
            stack = stack_1, stack_2

            if self.training:
                return step < step_count
            else:
                return tf.logical_and(
                    step < self.max_steps,
                    tf.not_equal(state_stack.check_size(stack, 1), True))

        # Create the initial stack with initial hidden vector and state
        initial_stack = state_stack.create(
            max_size=self.max_steps,
            hidden_vector_size=self.hidden_vector_size)
        initial_stack = state_stack.push(initial_stack, -1,
                                         initial_hidden_vector)
        initial_stack = state_stack.push(
            initial_stack,
            self.object_type.get_initial_state().id, initial_hidden_vector)

        # If we're training we know the output sizes and don't need larger than necessary `TensorArray`s
        output_sizes = step_count if self.training else self.max_steps

        # Create the `tf.TensorArray` to hold all outputs
        initial_states_ta = tf.TensorArray(dtype=tf.float32,
                                           size=output_sizes,
                                           name="initial_states_ta")
        initial_outputs_ta = tf.TensorArray(dtype=tf.float32,
                                            size=output_sizes,
                                            name="initial_outputs_ta")
        initial_outputs_counts_ta = tf.TensorArray(
            dtype=tf.int32,
            size=output_sizes,
            name="initial_outputs_counts_ta")

        initial_return_value = tf.constant(np.nan,
                                           dtype=tf.float32,
                                           shape=[self.hidden_vector_size])

        final_step, *_, final_states_ta, final_outputs_ta, final_outputs_counts_ta, _ = tf.while_loop(
            cond=cond,
            body=body,
            loop_vars=[
                0, *initial_stack, initial_states_ta, initial_outputs_ta,
                initial_outputs_counts_ta, initial_return_value
            ],
            shape_invariants=[
                tf.TensorShape([]),
                tf.TensorShape([None, self.hidden_vector_size + 1]),
                tf.TensorShape([]),
                tf.TensorShape(None),
                tf.TensorShape(None),
                tf.TensorShape(None),
                tf.TensorShape([self.hidden_vector_size])
            ],
            name="step_while_loop")

        # If in test mode, we don't know the amount of steps we will execute. So we need to resize the `tf.TensorArray`
        # to match the actual output
        # TODO: Check if we can overcome this by using padding
        if not self.training:
            final_states_ta = tf_utils.resize_tensor_array(
                final_states_ta, final_step)
            final_outputs_ta = tf_utils.resize_tensor_array(
                final_outputs_ta, final_step)
            final_outputs_counts_ta = tf_utils.resize_tensor_array(
                final_outputs_counts_ta, final_step)

        return \
            self.__stack_and_pad(final_states_ta, self.max_steps), \
            self.__stack_and_pad(final_outputs_ta, self.max_steps), \
            self.__stack_and_pad(final_outputs_counts_ta, self.max_steps), \
            final_step