コード例 #1
0
    def get_states_b(self):
        """
        Iterates through time/ sequence to get all hidden state
        """

        all_hidden_states, all_memory_states = self.get_states_f()

        # Reversing the hidden and memory state to get the final hidden and
        # memory state
        last_hidden_states = tf.reverse(
            all_hidden_states, [True, False, False])[0, :, :]
        last_memory_states = tf.reverse(
            all_memory_states, [True, False, False])[0, :, :]

        # For backward pass using the last hidden and memory of the forward
        # pass
        initial_hidden = tf.pack([last_hidden_states, last_memory_states])

        # Getting all hidden state throuh time
        all_hidden_memory_states = tf.scan(self.Lstm_b,
                                           self.processed_input_rev,
                                           initializer=initial_hidden,
                                           name='states')

        # Now reversing the states to keep those in original order
        all_hidden_states = tf.reverse(all_hidden_memory_states[
                                       :, 0, :, :], [True, False, False])
        all_memory_states = tf.reverse(all_hidden_memory_states[
                                       :, 1, :, :], [True, False, False])

        return all_hidden_states, all_memory_states
コード例 #2
0
ファイル: math.py プロジェクト: bowrian/SSD-Tensorflow
def cummax(x, reverse=False, name=None):
    """Compute the cumulative maximum of the tensor `x` along `axis`. This
    operation is similar to the more classic `cumsum`. Only support 1D Tensor
    for now.

    Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
       axis: A `Tensor` of type `int32` (default: 0).
       reverse: A `bool` (default: False).
       name: A name for the operation (optional).
    Returns:
    A `Tensor`. Has the same type as `x`.
    """
    with ops.name_scope(name, "Cummax", [x]) as name:
        x = ops.convert_to_tensor(x, name="x")
        # Not very optimal: should directly integrate reverse into tf.scan.
        if reverse:
            x = tf.reverse(x, axis=[0])
        # 'Accumlating' maximum: ensure it is always increasing.
        cmax = tf.scan(lambda a, y: tf.maximum(a, y), x,
                       initializer=None, parallel_iterations=1,
                       back_prop=False, swap_memory=False)
        if reverse:
            cmax = tf.reverse(cmax, axis=[0])
        return cmax
コード例 #3
0
ファイル: layers.py プロジェクト: rsennrich/nematus
    def forward(self, x, x_mask=None, context_layer=None):

        assert not (self.reverse_alternation and x_mask == None)

#        assert (context_layer == None or
#                tf.shape(context_layer)[-1] == self.context_state_size)

        def create_step_fun(gru):
            def step_fn(prev_state, x):
                gates_x2d, proposal_x2d = x[0], x[1]
                new_state = gru.forward(prev_state,
                                        gates_x=gates_x2d,
                                        proposal_x=proposal_x2d)
                if len(x) > 2:
                    mask = x[2]
                    new_state *= mask # batch x 1
                    # first couple of states of reversed encoder should be zero
                    # this is why we need to multiply by mask
                    # this way, when the reversed encoder reaches actual words
                    # the state will be zeros and not some accumulated garbage
                return new_state
            return step_fn

        init_state = tf.zeros(shape=[self.batch_size, self.state_size],
                              dtype=tf.float32)
        if x_mask != None:
            x_mask_r = tf.reverse(x_mask, axis=[0])
            x_mask_bwd = tf.expand_dims(x_mask_r, axis=[2]) #seqLen x batch x 1

        for i, gru in enumerate(self.grus):
            layer = RecurrentLayer(initial_state=init_state,
                                   step_fn=create_step_fun(gru))
            if context_layer == None:
                x2 = x
            else:
                x2 = tf.concat([x, context_layer], axis=-1)
            if not self.alternating:
                left_to_right = True
            else:
                if self.reverse_alternation:
                    left_to_right = (i % 2 == 1)
                else:
                    left_to_right = (i % 2 == 0)
            if left_to_right:
                # Recurrent state flows from left to right in this layer.
                gates_x, proposal_x = gru.precompute_from_x(x2)
                h = layer.forward((gates_x, proposal_x))
            else:
                # Recurrent state flows from right to left in this layer.
                x2_reversed = tf.reverse(x2, axis=[0])
                gates_x, proposal_x = gru.precompute_from_x(x2_reversed)
                h_reversed = layer.forward((gates_x, proposal_x, x_mask_bwd))
                h = tf.reverse(h_reversed, axis=[0])
            # Compute the word states, which will become the input for the
            # next layer (or the output of the stack if we're at the top).
            if i == 0:
                x = h
            else:
                x += h # Residual connection
        return x
コード例 #4
0
ファイル: tfutils.py プロジェクト: hedgefair/pycodesuggest
def sparse_transpose(sp_input):
    transposed_indices = tf.reverse(tf.cast(sp_input.indices, tf.int32), [False, True])
    transposed_values = sp_input.values
    transposed_shape = tf.reverse(tf.cast(sp_input.shape, tf.int32), [True])
    sp_output = tf.SparseTensor(tf.cast(transposed_indices, tf.int64), transposed_values, tf.cast(transposed_shape, tf.int64))
    sp_output = tf.sparse_reorder(sp_output)
    return sp_output
コード例 #5
0
ファイル: model.py プロジェクト: siddk/lang2program
 def embed_sequences(self, embed_sequence_batch):
     """Return sentence embeddings as a tensor with with shape
     [batch_size, hidden_size * 2]
     """
     forward_values = embed_sequence_batch.values
     forward_mask = embed_sequence_batch.mask
     backward_values = tf.reverse(forward_values, [False, True, False])
     backward_mask = tf.reverse(forward_mask, [False, True])
     # Initialize LSTMs
     self._forward_lstm = LSTM(self.hidden_size, return_sequences=True)
     self._backward_lstm = LSTM(self.hidden_size, return_sequences=True)
     # Pass input through the LSTMs
     # Shape: (batch_size, seq_length, hidden_size)
     forward_seq = self._forward_lstm(forward_values, forward_mask)
     forward_seq.set_shape((None, self.seq_length, self.hidden_size))
     backward_seq = self._backward_lstm(backward_values, backward_mask)
     backward_seq.set_shape((None, self.seq_length, self.hidden_size))
     # Stitch the outputs together --> hidden states (for computing attention)
     # Final dimension: (batch_size, seq_length, hidden_size * 2)
     lstm_states = tf.concat(2, [forward_seq, tf.reverse(backward_seq, [False, True, False])])
     self._hidden_states = SequenceBatch(lstm_states, forward_mask)
     # Stitch the final outputs together --> sequence embedding
     # Final dimension: (batch_size, hidden_size * 2)
     seq_length = tf.shape(forward_values)[1]
     forward_final = tf.slice(forward_seq, [0, seq_length - 1, 0], [-1, 1, self.hidden_size])
     backward_final = tf.slice(backward_seq, [0, seq_length - 1, 0], [-1, 1, self.hidden_size])
     return tf.squeeze(tf.concat(2, [forward_final, backward_final]), [1])
コード例 #6
0
ファイル: lstm1d.py プロジェクト: brchiu/tensorflow
def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using dynamic_rnn and
  the TensorFlow LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)
  """
  with tf.variable_scope(scope, "SeqLstm", [inputs]):
    # TODO(tmb) make batch size, sequence_length dynamic
    # example: sequence_length = tf.shape(inputs)[0]
    _, batch_size, _ = _shape(inputs)
    lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
    state = tf.zeros([batch_size, lstm_cell.state_size])
    sequence_length = int(inputs.get_shape()[0])
    sequence_lengths = tf.to_int64(tf.fill([batch_size], sequence_length))
    if reverse:
      inputs = tf.reverse(inputs, [True, False, False])
    outputs, _ = tf.nn.dynamic_rnn(lstm_cell,
                                   inputs,
                                   sequence_lengths,
                                   state,
                                   time_major=True)
    if reverse:
      outputs = tf.reverse(outputs, [True, False, False])
    return outputs
コード例 #7
0
ファイル: image_ops.py プロジェクト: lrjconan/img-count
def random_transformation2(x, y, padding, phase_train, rnd_vflip=True, rnd_hflip=True, rnd_transpose=True, rnd_colour=False):
    """
    Perform random crop, flip, transpose, hue, saturation, brightness, contrast.

    Args:
        x: [B, H, W, 3]
        y: [B, T, H, W]
        padding: int
        phase_train: bool
    """
    # Random image transformation layers.
    phase_train_f = tf.to_float(phase_train)
    x_shape = tf.shape(x)
    y_shape = tf.shape(y)
    num_ex = x_shape[0]
    inp_height = x_shape[1]
    inp_width = x_shape[2]
    inp_depth_x = x_shape[3]
    inp_depth_y = y_shape[3]

    # Add padding
    x_pad = tf.pad(x, [[0, 0], [padding, padding], [padding, padding], [0, 0]])
    y_pad = tf.pad(y, [[0, 0], [padding, padding], [padding, padding], [0, 0]])

    # Random crop
    offset = tf.random_uniform([2], dtype='int32', maxval=padding * 2)
    x_rand = tf.slice(x_pad, tf.pack([0, offset[0], offset[1], 0]),
                      tf.pack([-1, inp_height, inp_width, inp_depth_x]))
    y_rand = tf.slice(y_pad, tf.pack([0, offset[0], offset[1], 0]),
                      tf.pack([-1, inp_height, inp_width, inp_depth_y]))

    # Center slices (for inference)
    x_ctr = tf.slice(x_pad, [0, padding, padding, 0],
                     tf.pack([-1, inp_height, inp_width, -1]))
    y_ctr = tf.slice(y_pad, [0, padding, padding, 0],
                     tf.pack([-1, inp_height, inp_width, -1]))

    # Random horizontal & vertical flip & transpose
    rand_h = tf.random_uniform([1], 1.0 - float(rnd_hflip), 1.0)
    rand_v = tf.random_uniform([1], 1.0 - float(rnd_vflip), 1.0)
    mirror = tf.pack([1.0, rand_v[0], rand_h[0], 1.0]) < 0.5
    x_rand = tf.reverse(x_rand, mirror)
    y_rand = tf.reverse(y_rand, mirror)
    rand_t = tf.random_uniform([1], 1.0 - float(rnd_transpose), 1.0)
    do_tr = tf.cast(rand_t[0] < 0.5, 'int32')
    x_rand = tf.transpose(x_rand, tf.pack([0, 1 + do_tr, 2 - do_tr, 3]))
    y_rand = tf.transpose(y_rand, tf.pack([0, 1 + do_tr, 2 - do_tr, 3]))

    # Random hue, saturation, brightness, contrast
    if rnd_colour:
        x_rand = random_hue(x_rand, 0.1)
        x_rand = random_saturation(x_rand, 0.9, 1.1)
        x_rand = tf.image.random_brightness(x_rand, 0.1)
        x_rand = tf.image.random_contrast(x_rand, 0.9, 1.1)

    x = (1.0 - phase_train_f) * x_ctr + phase_train_f * x_rand
    y = (1.0 - phase_train_f) * y_ctr + phase_train_f * y_rand

    return x, y
コード例 #8
0
ファイル: nerveseg_input.py プロジェクト: mtourne/nerveseg
def image_distortions(image, distortions):
    distort_left_right_random = distortions[0]
    mirror = tf.less(tf.pack([1.0, distort_left_right_random, 1.0]), 0.5)
    image = tf.reverse(image, mirror)
    distort_up_down_random = distortions[1]
    mirror = tf.less(tf.pack([distort_up_down_random, 1.0, 1.0]), 0.5)
    image = tf.reverse(image, mirror)
    return image
コード例 #9
0
 def _compute_rnn_outputs(self):
     reversed_inputs = tf.reverse(self.inputs, [False, True, False])
     reversed_resets = tf.reverse(self.resets, [False, True, False])
     self._rv_lstm = LSTM(reversed_inputs, reversed_resets, self.training,
                          self.num_layers, self.hidden_layer_size,
                          self.init_scale, self.dropout_keep_prob)
     outputs = tf.reverse(self._rv_lstm.outputs, [False, True, False])
     return outputs
コード例 #10
0
  def reconstruct(self, inputs, samples=1, sample_static=False,
                  sample_dynamic=False, swap_static=False, swap_dynamic=False,
                  fix_static=False, fix_dynamic=False):
    """Reconstruct the given input sequences.

    Args:
      inputs: A batch of image sequences `x_{1:T}` of shape
        `[batch_size, timesteps, height, width, channels]`.
      samples: Number of samples to draw from the latent distributions.
      sample_static: Boolean for whether or not to randomly sample the
        static latent variable `f` from its prior distribution.
      sample_dynamic: Boolean for whether or not to randomly sample the
        dynamic latent variable `z_{1:T}` from its prior distribution.
      swap_static: Boolean for whether or not to swap the encodings for
        the static latent variable `f` between the examples.
      swap_dynamic: Boolean for whether or not to swap the encodings for
        the dynamic latent variable `z_{1:T}` between the examples.
      fix_static: Boolean for whether or not to share the same random
        sample of the static latent variable `f` from its prior across
        all examples.
      fix_dynamic: Boolean for whether or not to share the same random
        sample of the dynamic latent variable `z_{1:T}` from its prior
        across all examples.

    Returns:
      A batched Independent distribution wrapping a set of Normal
      distributions over the pixels of the reconstruction of the input,
      where the Independent distribution has event shape [height, width,
      channels], batch shape [samples, batch_size, timesteps], and
      sample shape [sample_shape, samples, batch_size, timesteps,
      height, width, channels].
    """
    batch_size = tf.shape(inputs)[-5]
    length = len(tf.unstack(inputs, axis=-4))  # hack for graph mode

    features = self.compressor(inputs)  # (..., batch, timesteps, hidden)

    if sample_static:
      static_sample, _ = self.sample_static_prior(
          samples, batch_size, fix_static)
    else:
      static_sample, _ = self.sample_static_posterior(features, samples)

    if swap_static:
      static_sample = tf.reverse(static_sample, axis=[1])

    if sample_dynamic:
      dynamic_sample, _ = self.sample_dynamic_prior(
          samples, batch_size, length, fix_dynamic)
    else:
      dynamic_sample, _ = self.sample_dynamic_posterior(
          features, samples, static_sample)

    if swap_dynamic:
      dynamic_sample = tf.reverse(dynamic_sample, axis=[1])

    likelihood = self.decoder((dynamic_sample, static_sample))
    return likelihood
コード例 #11
0
ファイル: tf_utils.py プロジェクト: rohitjun08/rasa_core
    def __call__(self, cell_output, scores, scores_state, ignore_mask):
        # apply exponential moving average with interpolation gate weight
        # to scores from previous time which are equal to probs at this point
        # different from original NTM where it is applied after softmax
        i_g = self.inter_gate(cell_output)

        # scores limited by time
        scores = tf.concat([i_g * scores[:, :-1] + (1 - i_g) * scores_state,
                            scores[:, -1:]], 1)
        next_scores_state = scores

        # create probabilities for attention
        if self.sparse_attention:
            probs = tf.contrib.sparsemax.sparsemax(scores)
        else:
            probs = tf.nn.softmax(scores)

        if self.shift_weight is not None:
            s_w = self.shift_weight(cell_output)

            # we want to go back in time during convolution
            conv_probs = tf.reverse(probs, axis=[1])

            # preare probs for tf.nn.depthwise_conv2d
            # [in_width, in_channels=batch]
            conv_probs = tf.transpose(conv_probs, [1, 0])
            # [batch=1, in_height=1, in_width=time+1, in_channels=batch]
            conv_probs = conv_probs[tf.newaxis, tf.newaxis, :, :]

            # [filter_height=1, filter_width=2*attn_shift_range+1,
            #   in_channels=batch, channel_multiplier=1]
            conv_s_w = tf.transpose(s_w, [1, 0])
            conv_s_w = conv_s_w[tf.newaxis, :, :, tf.newaxis]

            # perform 1d convolution
            # [batch=1, out_height=1, out_width=time+1, out_channels=batch]
            conv_probs = tf.nn.depthwise_conv2d_native(conv_probs, conv_s_w,
                                                       [1, 1, 1, 1], 'SAME')
            conv_probs = conv_probs[0, 0, :, :]
            conv_probs = tf.transpose(conv_probs, [1, 0])

            probs = tf.reverse(conv_probs, axis=[1])

        # Sharpening
        g_sh = self.gamma_sharp(cell_output)

        powed_probs = tf.pow(probs, g_sh)
        probs = powed_probs / (
                tf.reduce_sum(powed_probs, 1, keepdims=True) + 1e-32)

        # set probs for no intents and action_listens to zero
        if ignore_mask is not None:
            probs = tf.concat([tf.where(ignore_mask,
                                        tf.zeros_like(probs[:, :-1]),
                                        probs[:, :-1]),
                               probs[:, -1:]], 1)
        return probs, next_scores_state
コード例 #12
0
ファイル: utility.py プロジェクト: shamanez/agents
def discounted_return(reward, length, discount):
  """Discounted Monte-Carlo returns."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  return_ = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur + discount * agg,
      tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
      tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(return_), 'return')
コード例 #13
0
ファイル: multi_event_rnn_lib.py プロジェクト: danabo/magenta
def ni_slice(sub_values, last_ind, axis=0):
  # TODO: Allow both to be negative indexed...
  ndims = len(shape(sub_values))
  im1 = 0 + abs(last_ind)
  i = [[None, None]] * ndims
  i[axis] = [im1, None]
  am = [False] * ndims
  am[axis] = True
  sl = [slice(*ii) for ii in i]
  ti = tf.reverse(sub_values, am)[sl]
  return tf.reverse(ti, am)
コード例 #14
0
ファイル: utility.py プロジェクト: shamanez/agents
def lambda_advantage(reward, value, length, discount):
  """Generalized Advantage Estimation."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)
  delta = reward + discount * next_value - value
  advantage = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur + discount * agg,
      tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]),
      tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')
コード例 #15
0
ファイル: utility.py プロジェクト: shamanez/agents
def lambda_return(reward, value, length, discount, lambda_):
  """TD-lambda returns."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  sequence = mask * reward + discount * value * (1 - lambda_)
  discount = mask * discount * lambda_
  sequence = tf.stack([sequence, discount], 2)
  return_ = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * agg,
      tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
      tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(return_), 'return')
コード例 #16
0
  def fix_variables(self, sess, pretrained_model):
    print('Fix Resnet V1 layers..')
    with tf.variable_scope('Fix_Resnet_V1') as scope:
      with tf.device("/cpu:0"):
        # fix RGB to BGR
        conv1_rgb = tf.get_variable("conv1_rgb", [7, 7, 3, 64], trainable=False)
        restorer_fc = tf.train.Saver({self._resnet_scope + "/conv1/weights": conv1_rgb})
        restorer_fc.restore(sess, pretrained_model)

        sess.run(tf.assign(self._variables_to_fix['rpn_network/'+self._resnet_scope + '/conv1/weights:0'],
                           tf.reverse(conv1_rgb, [2])))
        sess.run(tf.assign(self._variables_to_fix['rfcn_network/'+self._resnet_scope + '/conv1/weights:0'],
                           tf.reverse(conv1_rgb, [2])))
コード例 #17
0
def image_mirroring(img, label):
    """
    Randomly mirrors the images.

    Args:
      img: Training image to mirror.
      label: Segmentation mask to mirror.
    """
    
    distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)[0]
    mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5)
    mirror = tf.boolean_mask([0, 1, 2], mirror)
    img = tf.reverse(img, mirror)
    label = tf.reverse(label, mirror)
    return img, label
コード例 #18
0
def process_reals(x, lod, mirror_augment, drange_data, drange_net):
    with tf.name_scope('ProcessReals'):
        with tf.name_scope('DynamicRange'):
            x = tf.cast(x, tf.float32)
            x = misc.adjust_dynamic_range(x, drange_data, drange_net)
        if mirror_augment:
            with tf.name_scope('MirrorAugment'):
                s = tf.shape(x)
                mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
                mask = tf.tile(mask, [1, s[1], s[2], s[3]])
                x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
        with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
            s = tf.shape(x)
            y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
            y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
            y = tf.tile(y, [1, 1, 1, 2, 1, 2])
            y = tf.reshape(y, [-1, s[1], s[2], s[3]])
            x = tfutil.lerp(x, y, lod - tf.floor(lod))
        with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
            s = tf.shape(x)
            factor = tf.cast(2 ** tf.floor(lod), tf.int32)
            x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
            x = tf.tile(x, [1, 1, 1, factor, 1, factor])
            x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
        return x
コード例 #19
0
ファイル: train.py プロジェクト: wu-yy/tensorpack
    def fastrcnn_training(self, image,
                          rcnn_labels, fg_rcnn_boxes, gt_boxes_per_fg,
                          rcnn_label_logits, fg_rcnn_box_logits):
        """
        Args:
            image (NCHW):
            rcnn_labels (n): labels for each sampled targets
            fg_rcnn_boxes (fg x 4): proposal boxes for each sampled foreground targets
            gt_boxes_per_fg (fg x 4): matching gt boxes for each sampled foreground targets
            rcnn_label_logits (n): label logits for each sampled targets
            fg_rcnn_box_logits (fg x 4): box logits for each sampled foreground targets
        """

        with tf.name_scope('fg_sample_patch_viz'):
            fg_sampled_patches = crop_and_resize(
                image, fg_rcnn_boxes,
                tf.zeros(tf.shape(fg_rcnn_boxes)[0], dtype=tf.int32), 300)
            fg_sampled_patches = tf.transpose(fg_sampled_patches, [0, 2, 3, 1])
            fg_sampled_patches = tf.reverse(fg_sampled_patches, axis=[-1])  # BGR->RGB
            tf.summary.image('viz', fg_sampled_patches, max_outputs=30)

        encoded_boxes = encode_bbox_target(
            gt_boxes_per_fg, fg_rcnn_boxes) * tf.constant(config.FASTRCNN_BBOX_REG_WEIGHTS)
        fastrcnn_label_loss, fastrcnn_box_loss = fastrcnn_losses(
            rcnn_labels, rcnn_label_logits,
            encoded_boxes,
            fg_rcnn_box_logits)
        return fastrcnn_label_loss, fastrcnn_box_loss
コード例 #20
0
ファイル: models.py プロジェクト: nivwusquorum/tf_nlp
 def final_hidden(self, input_idxes):
     rnn_hiddens = self.hiddens(input_idxes)
     # execute our rnn using scan function
     # extract final timestep's hidden
     rnn_hiddens_reverse = tf.reverse(rnn_hiddens, [True,False,False])
     rnn_final_hidden = rnn_hiddens_reverse[0,:,:]
     return rnn_final_hidden
コード例 #21
0
ファイル: ptest.py プロジェクト: mingkaic/tenncor
    def test_flip(self):
        shape = [3, 4, 5]
        data = np.random.rand(*shape)
        var = llo.variable(data, 'var')
        tf_var = tf.Variable(data)

        sess = tf.Session()
        sess.run(tf_var.initializer)

        out = age.flip(var, 1)
        tf_out = tf.reverse(tf_var, [1])

        fout = out.evaluate(dtype=np.dtype(float))
        tf_fout = sess.run(tf_out)

        self._array_close(tf_fout, fout)

        var2 = llo.variable(data, 'var2')
        zero = llo.derive(out, var2)
        ex = llo.derive(out, var)

        rej = zero.evaluate()
        der = ex.evaluate()

        tf_grad = tf.gradients(tf_fout, [tf_var])[0]
        self.assertEqual(None, tf_grad)

        data0 = np.zeros(shape, dtype=float)
        data1 = np.ones(shape, dtype=float)
        self._array_eq(data0, rej)
        self._array_eq(data1, der)
コード例 #22
0
ファイル: esn.py プロジェクト: andreasjansson/cr
    def compute_states(self):
        states_list = []
        state = self.zero_state()

        timesteps = xrange(self.esn.n_steps)
        if self.backwards:
            timesteps = reversed(timesteps)

        for i in timesteps:
            prev_state = state

            state = (tf.matmul(self.esn.scaled_x[:, i, :], self.w_xh) +
                     tf.matmul(prev_state, self.w_hh, b_is_sparse=True))

            state = (1 - self.leakage) * tf.tanh(state) + self.leakage * prev_state

            states_list.append(state)

        states = tf.reshape(tf.concat(
            1, states_list), [-1, self.esn.n_steps, self.esn.n_hidden])

        if self.backwards:
            states = tf.reverse(states, [False, True, False])

        return states
コード例 #23
0
ファイル: lowlevel.py プロジェクト: rjw57/dtcwt
def _conv_2d(X, h, strides=[1,1,1,1]):
    """
    Perform 2d convolution in tensorflow.

    X will to be manipulated to be of shape [batch, height, width, ch],
    and h to be of shape [height, width, ch, num]. This function does the
    necessary reshaping before calling the conv2d function, and does the
    reshaping on the output, returning Y of shape [batch, height, width]
    """

    # Check the shape of X is what we expect
    if len(X.shape) != 3:
        raise ValueError('X needs to be of shape [batch, height, width] ' +
                         'for conv_2d')

    # Check the shape of h is what we expect
    if len(h.shape) != 2:
        raise ValueError('Filter inputs must only have height and width ' +
                         'for conv_2d')

    # Add in the unit dimensions for conv
    X = tf.expand_dims(X, axis=-1)
    h = tf.expand_dims(tf.expand_dims(h, axis=-1),axis=-1)

    # Have to reverse h as tensorflow 2d conv is actually cross-correlation
    h = tf.reverse(h, axis=[0,1])
    Y = tf.nn.conv2d(X, h, strides=strides, padding='VALID')

    # Remove the final dimension, returning a result of shape
    # [batch, height, width]
    Y = tf.squeeze(Y, axis=-1)

    return Y
コード例 #24
0
def model(voxels, transform_matrix, params, is_training):
  """Model transforming the 3D voxels into 2D projections.

  Args:
    voxels: A tensor of size [batch, depth, height, width, channel]
      representing the input of projection layer (tf.float32).
    transform_matrix: A tensor of size [batch, 16] representing
      the flattened 4-by-4 matrix for transformation (tf.float32).
    params: Model parameters (dict).
    is_training: Set to True if while training (boolean).

  Returns:
    A transformed tensor (tf.float32)

  """
  del is_training  # Doesn't make a difference for projector
  # Rearrangement (batch, z, y, x, channel) --> (batch, y, z, x, channel).
  # By the standard, projection happens along z-axis but the voxels
  # are stored in a different way. So we need to switch the y and z
  # axis for transformation operation.
  voxels = tf.transpose(voxels, [0, 2, 1, 3, 4])
  z_near = params.focal_length
  z_far = params.focal_length + params.focal_range
  transformed_voxels = perspective_transform.transformer(
      voxels, transform_matrix, [params.vox_size] * 3, z_near, z_far)
  views = tf.reduce_max(transformed_voxels, [1])
  views = tf.reverse(views, [1])
  return views
コード例 #25
0
ファイル: audio_demo.py プロジェクト: jlewi/tensorboard
def bisine_wahwah_wave(frequency):
  """Emit two sine waves with balance oscillating left and right."""
  #
  # This is clearly intended to build on the bisine wave defined above,
  # so we can start by generating that.
  waves_a = bisine_wave(frequency)
  #
  # Then, by reversing axis 2, we swap the stereo channels. By mixing
  # this with `waves_a`, we'll be able to create the desired effect.
  waves_b = tf.reverse(waves_a, axis=[2])
  #
  # Let's have the balance oscillate from left to right four times.
  iterations = 4
  #
  # Now, we compute the balance for each sample: `ts` has values
  # in [0, 1] that indicate how much we should use `waves_a`.
  xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
  thetas = xs / _samples() * iterations
  ts = (tf.sin(math.pi * 2 * thetas) + 1) / 2
  #
  # Finally, we can mix the two together, and we're done.
  wave = ts * waves_a + (1.0 - ts) * waves_b
  #
  # Alternately, we can make the effect more pronounced by exaggerating
  # the sample data. Let's emit both variations.
  exaggerated_wave = wave ** 3.0
  return tf.concat([wave, exaggerated_wave], axis=0)
コード例 #26
0
ファイル: coco_v1.py プロジェクト: Kairobo/FastMaskRCNN
def preprocess_for_test(image, gt_boxes, gt_masks):


    ih, iw = tf.shape(image)[0], tf.shape(image)[1]

    ## min size resizing
    new_ih, new_iw = preprocess_utils._smallest_size_at_least(ih, iw, cfg.FLAGS.image_min_size)
    image = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image, [new_ih, new_iw], align_corners=False)
    image = tf.squeeze(image, axis=[0])

    gt_masks = tf.expand_dims(gt_masks, -1)
    gt_masks = tf.cast(gt_masks, tf.float32)
    gt_masks = tf.image.resize_nearest_neighbor(gt_masks, [new_ih, new_iw], align_corners=False)
    gt_masks = tf.cast(gt_masks, tf.int32)
    gt_masks = tf.squeeze(gt_masks, axis=[-1])

    scale_ratio = tf.to_float(new_ih) / tf.to_float(ih)
    gt_boxes = preprocess_utils.resize_gt_boxes(gt_boxes, scale_ratio)
    
    ## zero mean image
    image = tf.cast(image, tf.float32)
    image = image / 256.0
    image = (image - 0.5) * 2.0
    image = tf.expand_dims(image, axis=0)

    ## rgb to bgr
    image = tf.reverse(image, axis=[-1])

    return image, gt_boxes, gt_masks 
コード例 #27
0
def get_test_iterator(src_dataset, src_vocab_table, batch_size, config):
    src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(config.eos)), tf.int32)
    src_dataset = src_dataset.map(lambda src: tf.string_split([src]).values)

    src_dataset = src_dataset.map(lambda src: src[:config.src_max_len])

    src_dataset = src_dataset.map(
        lambda src: tf.cast(src_vocab_table.lookup(src), tf.int32))

    if config.reverse_src:
        src_dataset = src_dataset.map(lambda src: tf.reverse(src, axis=[0]))

    src_dataset = src_dataset.map(lambda src: (src, tf.size(src)))

    def batching_func(x):
        return x.padded_batch(
            config.batch_size,
            padded_shapes=(tf.TensorShape([None]),
                           tf.TensorShape([])),
            padding_values=(src_eos_id,
                            0))

    batched_dataset = batching_func(src_dataset)
    batched_iter = batched_dataset.make_initializable_iterator()
    src_ids, src_seq_len = batched_iter.get_next()
    return BatchedInput(
        initializer=batched_iter.initializer,
        source=src_ids,
        target_input=None,
        target_output=None,
        source_sequence_length=src_seq_len,
        target_sequence_length=None)
コード例 #28
0
    def forward_backward(self, obs_prob_seq):
        """
        runs forward backward algorithm on observation sequence

        Arguments
        ---------
        - obs_seq : matrix of size N by S, where N is number of timesteps and
            S is the number of states

        Returns
        -------
        - forward : matrix of size N by S representing
            the forward probability of each state at each time step
        - backward : matrix of size N by S representing
            the backward probability of each state at each time step
        - posterior : matrix of size N by S representing
            the posterior probability of each state at each time step
        """
        obs_prob_list_for = tf.split(0, self.N, obs_prob_seq)
        
        with tf.name_scope('forward_belief_propagation'):
            # forward belief propagation
            self._forward(obs_prob_list_for)

        obs_prob_seq_rev = tf.reverse(obs_prob_seq, [True, False])
        obs_prob_list_back = tf.split(0, self.N, obs_prob_seq_rev)

        with tf.name_scope('backward_belief_propagation'):
            # backward belief propagation
            self._backward(obs_prob_list_back)
コード例 #29
0
def _rnd_img_transformation(x, y_gt, padding, phase_train):
    """
    Perform random crop, flip, transpose, hue, saturation, brightness, contrast.
    """
    # Random image transformation layers.
    phase_train_f = tf.to_float(phase_train)
    x_shape = tf.shape(x)
    num_ex = x_shape[0]
    full_height = x_shape[1]
    full_width = x_shape[2]
    inp_height = full_height - 2 * padding
    inp_width = full_width - 2 * padding
    inp_depth = x_shape[3]

    # Random crop
    offset = tf.random_uniform([2], dtype='int32', maxval=padding * 2)
    x_rand = tf.slice(x, tf.pack([0, offset[0], offset[1], 0]),
                      tf.pack([-1, inp_height, inp_width, inp_depth]))
    y_rand = tf.slice(y_gt, tf.pack([0, 0, offset[0], offset[1]]),
                      tf.pack([-1, -1, inp_height, inp_width]))

    # Center slices (for inference)
    x_ctr = tf.slice(x, [0, padding, padding, 0],
                     tf.pack([-1, inp_height, inp_width, -1]))
    y_ctr = tf.slice(y_gt, [0, 0, padding, padding],
                     tf.pack([-1, -1, inp_height, inp_width]))

    # Random horizontal & vertical flip & transpose
    rand = tf.random_uniform([3], 0, 1.0)
    mirror_x = tf.pack([1.0, rand[0], rand[1], 1.0]) < 0.5
    mirror_y = tf.pack([1.0, 1.0, rand[0], rand[1]]) < 0.5
    x_rand = tf.reverse(x_rand, mirror_x)
    y_rand = tf.reverse(y_rand, mirror_y)
    do_tr = tf.cast(rand[2] > 0.5, 'int32')
    x_rand = tf.transpose(x_rand, tf.pack([0, 1 + do_tr, 2 - do_tr, 3]))
    y_rand = tf.transpose(y_rand, tf.pack([0, 1, 2 + do_tr, 3 - do_tr]))

    # Random hue, saturation, brightness, contrast
    # x_rand = img.random_hue(x_rand, 0.5)
    # x_rand = img.random_saturation(x_rand, 0.5, 2.0)
    # x_rand = tf.image.random_brightness(x_rand, 0.5)
    # x_rand = tf.image.random_contrast(x_rand, 0.5, 2.0)

    x = (1.0 - phase_train_f) * x_ctr + phase_train_f * x_rand
    y_gt = (1.0 - phase_train_f) * y_ctr + phase_train_f * y_rand

    return x, y_gt
コード例 #30
0
ファイル: tf_backend.py プロジェクト: trungnt13/odin_old
def reverse(x, axis=-1):
    '''Apply [::-1] to appropriate axis'''
    ndim = len(x.get_shape())
    dims = [False] * ndim
    if axis < 0:
        axis = axis % ndim
    dims[axis] = True
    return tf.reverse(x, dims)
コード例 #31
0
(source_int_text,
 target_int_text), (source_vocab_to_int,
                    target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max(
    [len(sentence) for sentence in source_int_text])

train_graph = tf.Graph()
with train_graph.as_default():
    input_data, targets, lr, keep_prob = model_inputs()
    sequence_length = tf.placeholder_with_default(max_source_sentence_length,
                                                  None,
                                                  name='sequence_length')
    input_shape = tf.shape(input_data)

    train_logits, inference_logits = seq2seq_model(
        tf.reverse(input_data, [-1]), targets, keep_prob,
        batch_size, sequence_length, len(source_vocab_to_int),
        len(target_vocab_to_int), encoding_embedding_size,
        decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)

    tf.identity(inference_logits, 'logits')
    with tf.name_scope("optimization"):
        # Loss function
        cost = tf.contrib.seq2seq.sequence_loss(
            train_logits, targets, tf.ones([input_shape[0], sequence_length]))

        # Optimizer
        optimizer = tf.train.AdamOptimizer(lr)

        # Gradient Clipping
        gradients = optimizer.compute_gradients(cost)
コード例 #32
0
def ae_transformer_internal(inputs,
                            targets,
                            target_space,
                            hparams,
                            cache=None,
                            predict_mask=1.0,
                            means=None,
                            ema_count=None,
                            ema_means=None):
  """AE Transformer, main step used for training."""
  # Summaries break with the do_refine cond, turn them off in that case.
  global _DO_SUMMARIES
  if hparams.do_refine:
    _DO_SUMMARIES = False

  # Prepare.
  batch_size = common_layers.shape_list(inputs)[0]
  targets = tf.reshape(targets, [batch_size, -1, 1, hparams.hidden_size])

  # Encoder.
  if inputs is not None:
    inputs = common_layers.flatten4d3d(inputs)
    inputs, ed = encode(inputs, target_space, hparams, "input_enc")
  else:
    ed = None

  # Autoencoding.
  losses = {"extra": tf.constant(0.0), "latent_pred": tf.constant(0.0)}
  if hparams.do_ae:
    max_targets_len_from_inputs = tf.concat([inputs, inputs], axis=1)
    targets, _ = common_layers.pad_to_same_length(
        targets, max_targets_len_from_inputs,
        final_length_divisible_by=2**hparams.num_compress_steps)
    targets_c = compress(targets, inputs, False, hparams, "compress")
    if hparams.mode != tf.estimator.ModeKeys.PREDICT:
      # Compress and bottleneck.
      latents_dense, latents_discrete, extra_loss, _ = bottleneck(
          targets_c, hparams, 2 * 2048, "vc", means, ema_count, ema_means)
      if _DO_SUMMARIES:
        tf.summary.histogram("b0", tf.reshape(latents_discrete[:, 0, :], [-1]))
      pc = common_layers.inverse_exp_decay(hparams.startup_steps)
      pc = pc if hparams.mode == tf.estimator.ModeKeys.TRAIN else 1.0
      cond = tf.less(tf.random_uniform([batch_size]), pc)
      latents_dense = tf.where(cond, latents_dense, targets_c)
      # TODO(lukaszkaiser): return extra losses batchwise, multiply before mean.
      losses["extra"] = extra_loss * tf.reduce_mean(tf.to_float(cond))
      # Extra loss predicting latent code from input. Discrete only.
      if hparams.bottleneck_kind not in ["dense", "vae"]:
        latents_pred = decode_transformer(
            tf.stop_gradient(inputs), tf.stop_gradient(ed),
            tf.stop_gradient(latents_dense), hparams, "extra")
        _, latent_pred_loss = ae_latent_softmax(
            latents_pred, latents_discrete, hparams)
        losses["latent_pred"] = tf.reduce_mean(
            latent_pred_loss * 0.5 * tf.to_float(cond))
      else:
        inputs_c = decode_transformer(inputs, ed, targets_c, hparams, "dec_c")
        losses["latent_pred"] = tf.reduce_mean((inputs_c - targets_c)**2) * 20
        def bn_inputs():
          with tf.variable_scope(tf.get_variable_scope(), reuse=True):
            bn, _, _, _ = bottleneck(inputs_c, hparams, 2 * 2048, "vc", means,
                                     ema_count, ema_means)
          return bn
        pbn = 0.8 if hparams.mode == tf.estimator.ModeKeys.TRAIN else 1.0
        inputs_c = tf.cond(tf.less(tf.random_uniform([]), pbn),
                           bn_inputs, lambda: inputs_c)
        ptc = 1.0 - common_layers.inverse_lin_decay(200000) * 0.5
        ptc = ptc if hparams.mode == tf.estimator.ModeKeys.TRAIN else 1.0
        latents_dense = tf.where(tf.less(tf.random_uniform([batch_size]), ptc),
                                 latents_dense, inputs_c)
    else:
      if hparams.bottleneck_kind in ["dense", "vae"]:
        inputs_c = decode_transformer(inputs, ed, targets_c, hparams, "dec_c")
        latents_dense, _, _, _ = bottleneck(inputs_c, hparams, 2 * 2048, "vc",
                                            means, ema_count, ema_means)
      else:
        latent_len = common_layers.shape_list(targets_c)[1]
        _, _, _, embed = bottleneck(targets_c, hparams, 2 * 2048, "vc", means,
                                    ema_count, ema_means)
        latents_dense = tf.zeros_like(targets_c[:, :latent_len, :, :])
        if cache is None:
          cache = ae_latent_sample(latents_dense, inputs, ed, embed, 8, hparams)
        latents_dense = embed(cache)
    # Postprocess.
    d = latents_dense
    pos = tf.get_variable("pos", [1, 1000, 1, hparams.hidden_size])
    pos = pos[:, :common_layers.shape_list(latents_dense)[1] + 1, :, :]
    latents_dense = tf.pad(latents_dense,
                           [[0, 0], [1, 0], [0, 0], [0, 0]]) + pos

    # Masking.
    if hparams.do_mask:
      masking = common_layers.inverse_lin_decay(hparams.mask_startup_steps)
      masking *= common_layers.inverse_exp_decay(
          hparams.mask_startup_steps // 4)  # Not much at start.
      if not hparams.do_refine:
        masking -= tf.random_uniform([]) * hparams.unmasked_percentage
      masking = tf.minimum(tf.maximum(masking, 0.0), 1.0)
      if hparams.mode == tf.estimator.ModeKeys.PREDICT:
        masking = predict_mask
      mask = tf.less(masking, tf.random_uniform(
          common_layers.shape_list(targets)[:-1]))
      mask = tf.expand_dims(tf.to_float(mask), 3)
      for i in xrange(hparams.num_compress_steps):
        j = hparams.num_compress_steps - i - 1
        d = residual_conv(d, 1, (3, 1), hparams, "decompress_rc_%d" % j)
        if hparams.do_attend_decompress:
          d = attend(d, inputs, hparams, "decompress_attend_%d" % j)
        d = decompress_step(d, hparams, i > 0, False, "decompress_%d" % j)
      targets = mask * targets + (1.0 - mask) * d
    targets = tf.concat([tf.reverse(latents_dense, [1]), targets], axis=1)

  res = decode_transformer(inputs, ed, targets, hparams, "decoder")
  if hparams.do_ae:
    res = res[:, common_layers.shape_list(latents_dense)[1]:, :, :]
    if hparams.do_mask and hparams.do_refine:
      def refine_res():
        # return residual_conv(res, 1, (5, 1), hparams, "refine")
        r, _ = encode(tf.squeeze(res, axis=[2]),
                      target_space, hparams, "refine_enc")
        return tf.expand_dims(r, axis=2)
      masked_batches = tf.reduce_sum(mask, axis=[1, 2, 3])
      all_masked = tf.less(masked_batches, 0.1)
      res = tf.where(all_masked, refine_res(), res)
    # We'll start training the extra model of latents after mask_startup_steps.
    latent_time = tf.less(hparams.mask_startup_steps,
                          tf.to_int32(tf.train.get_global_step()))
    losses["latent_pred"] *= tf.to_float(latent_time)
  return res, losses, cache
コード例 #33
0
def rot90(images):
    return tf.transpose(tf.reverse(images, [2]), [0, 2, 1, 3])
コード例 #34
0
def _parse_eval_(image):
  image = (image*1.0+1.0)*127.5
  image = tf.reverse(image,[-1])
  image = tf.clip_by_value(image,0,255)
  return image
コード例 #35
0
def build_bert_inputs(example):
    """Convert example <Tensor [30, 70]> into bert inputs."""
    k_size = FLAGS.k_size

    CLS_ID = tf.constant([101], dtype=tf.int64)  # pylint: disable=invalid-name
    SEP_ID = tf.constant([102], dtype=tf.int64)  # pylint: disable=invalid-name
    max_len = tf.constant([FLAGS.max_para_length])
    context_size = tf.constant([FLAGS.context_size])

    intermediate_examples_tensor = tf.reduce_sum(tf.abs(example), 1)
    examples_zero_vector = tf.zeros(shape=(1, 1), dtype=tf.int64)
    examples_bool_mask = tf.squeeze(
        tf.not_equal(intermediate_examples_tensor, examples_zero_vector))
    paragraph_len = tf.reduce_sum(tf.cast(examples_bool_mask, tf.int32))

    start = tf.random.uniform([1],
                              0,
                              tf.reshape(paragraph_len, []) -
                              tf.reshape(context_size, []) + 1,
                              dtype=tf.int32)

    # Slice the document into the before, after and context.
    # Discard the zero padding.
    sizes = tf.squeeze(
        tf.concat([[
            start, context_size, paragraph_len - context_size - start,
            max_len - paragraph_len
        ]], 0))
    before, context, after, _ = tf.split(example, sizes, axis=0)

    # Gather the context removing zero padding at end of sentences.
    non_zeros = tf.where(tf.not_equal(context, tf.zeros_like(context)))
    context_gathered = tf.gather_nd(context, non_zeros)

    # Flip before so we select the 4 sentences closest to target
    before = tf.reverse(before, axis=[0])

    # pad both to longer than needed
    paddings = tf.constant([[0, 8], [0, 0]])
    before = tf.pad(before, paddings)
    after = tf.pad(after, paddings)

    # Extend targets to 3 sentences
    # pad both
    before_minus_one = before[1:][:k_size]
    before_minus_two = before[2:][:k_size]
    after_plus_one = after[1:][:k_size]
    after_plus_two = after[2:][:k_size]
    before = before[:k_size]
    after = after[:k_size]

    before = tf.concat([before_minus_two, before_minus_one, before], axis=1)
    after = tf.concat([after, after_plus_one, after_plus_two], axis=1)
    ############################################################################

    # These 8 sentences are the 8 surrounding targets. Some are padding.
    targets = tf.concat([before, after], axis=0)

    # Remove the padding from the sourrounding sentences
    # Eg. if context starts at beginning of paragraph, before is all padding
    intermediate_tensor = tf.reduce_sum(tf.abs(targets), 1)
    zero_vector = tf.zeros(shape=(1, 1), dtype=tf.int64)
    bool_mask = tf.squeeze(tf.not_equal(intermediate_tensor, zero_vector))
    bool_mask.set_shape([None])
    targets = tf.boolean_mask(targets, bool_mask)

    # Randomly select 4 targets
    # We will also select the label_types for each selected target
    indices = tf.range(0, limit=tf.shape(targets)[0], dtype=tf.int32)
    shuffled_indices = tf.random.shuffle(indices)[:k_size]

    targets = tf.gather(targets, shuffled_indices)
    if k_size == 4:
        full_labels = tf.concat([tf.range(3, -1, -1), tf.range(4, 8)], axis=0)
    elif k_size == 3:
        full_labels = tf.concat([tf.range(2, -1, -1), tf.range(3, 6)], axis=0)
    elif k_size == 2:
        full_labels = tf.concat([tf.range(1, -1, -1), tf.range(2, 4)], axis=0)
    elif k_size == 1:
        full_labels = tf.concat([tf.range(0, -1, -1), tf.range(1, 2)], axis=0)
    label_types = tf.boolean_mask(full_labels, bool_mask)
    label_types = tf.gather(label_types, shuffled_indices)

    # create inputs
    bert_inputs = []
    input_masks = []
    segment_ids = []

    # make context
    ctx_segment_id = tf.concat([
        tf.zeros_like(CLS_ID, dtype=tf.int64),
        tf.zeros_like(context_gathered),
        tf.zeros_like(SEP_ID, dtype=tf.int64)
    ],
                               axis=0)
    ctx_segment_id = pad_and_cut(ctx_segment_id, FLAGS.max_seq_length)
    segment_ids.append(ctx_segment_id)

    new_ctx_input = tf.concat([CLS_ID, context_gathered, SEP_ID], axis=0)
    ctx_input_mask = tf.ones_like(new_ctx_input)
    ctx_input_mask = pad_and_cut(ctx_input_mask, FLAGS.max_seq_length)
    input_masks.append(ctx_input_mask)
    padded_new_ctx_input = pad_and_cut(new_ctx_input, FLAGS.max_seq_length)
    bert_inputs.append(padded_new_ctx_input)

    for i in range(k_size):
        target_non_zero = tf.where(
            tf.not_equal(targets[i], tf.zeros_like(targets[i])))
        targets_stripped = tf.gather_nd(targets[i], target_non_zero)
        if FLAGS.include_context:
            segment_id = tf.concat([
                tf.zeros_like(CLS_ID, dtype=tf.int64),
                tf.zeros_like(context_gathered),
                tf.zeros_like(SEP_ID, dtype=tf.int64),
                tf.ones_like(targets_stripped),
                tf.ones_like(SEP_ID, dtype=tf.int64)
            ],
                                   axis=0)
        else:
            segment_id = tf.concat([
                tf.zeros_like(CLS_ID, dtype=tf.int64),
                tf.zeros_like(targets_stripped),
                tf.zeros_like(SEP_ID, dtype=tf.int64)
            ],
                                   axis=0)
        segment_id = pad_and_cut(segment_id, FLAGS.max_seq_length)
        segment_ids.append(segment_id)
        if FLAGS.include_context:
            new_input = tf.concat(
                [CLS_ID, context_gathered, SEP_ID, targets_stripped, SEP_ID],
                axis=0)
        else:
            new_input = tf.concat([CLS_ID, targets_stripped, SEP_ID], axis=0)
        input_mask = tf.ones_like(new_input)
        input_mask = pad_and_cut(input_mask, FLAGS.max_seq_length)
        input_masks.append(input_mask)
        padded_new_input = pad_and_cut(new_input, FLAGS.max_seq_length)
        bert_inputs.append(padded_new_input)
    bert_inputs = tf.stack(bert_inputs, axis=0)
    input_masks = tf.stack(input_masks, axis=0)
    segment_ids = tf.stack(segment_ids, axis=0)

    out = Outputs_And_Context(bert_inputs, input_masks, segment_ids,
                              label_types, context_gathered)

    return out
コード例 #36
0
ファイル: transformation.py プロジェクト: waffletower/stftGAN
def tf_flip_slices_2d(dl, ur, ul):
    flip_dl = tf.reverse(dl, axis=[1])
    flip_ur = tf.reverse(ur, axis=[2])
    flip_ul = tf.reverse(ul, axis=[1, 2])
    return flip_dl, flip_ur, flip_ul
コード例 #37
0
ファイル: transformation.py プロジェクト: waffletower/stftGAN
def tf_flip_slices_1d(l):
    return tf.reverse(l, axis=[1])
コード例 #38
0
ファイル: chatbot.py プロジェクト: rkopec91/Chatbot
learning_rate = 0.001
learning_rate_decay = 0.9
min_learning_rate = 0.0001
keep_probability = 0.5

tf.reset_default_graph()
session = tf.InteractiveSession()

inputs, targets, lr, keep_prob = model_inputs()

sequence_length = tf.placeholder_with_default(25, None, name='sequence_length')

input_shape = tf.shape(inputs)

training_predictions, test_predictions = seq2seq_model(
    tf.reverse(inputs, [-1]), targets, keep_prob, batch_size, sequence_length,
    len(answerswords2int), len(questionswords2int), encoding_embedding_size,
    decoding_embedding_size, rnn_size, num_layers, questionswords2int)

with tf.name_scope("optimization"):
    loss_error = tf.contrib.seq2seq.sequence_loss(
        training_predictions, targets,
        tf.ones([input_shape[0], sequence_length]))
    optimizer = tf.train.AdamOptimizer(learning_rate)
    gradients = optimizer.compute_gradients(loss_error)
    clipped_gradients = [(tf.clip_by_value(grad_tensor, -5.,
                                           5.), grad_variable)
                         for grad_tensor, grad_variable in gradients
                         if grad_tensor is not None]
    optimizer_gradient_clipping = optimizer.apply_gradients(clipped_gradients)
コード例 #39
0
def trainSurvivalNet(mat_file_path, n_hidden, num_steps, num_shuffles,
                     penaltyLambdaArray, alphaArray, prefix):
    """ This function is to train SurvivalNet with Tensorflow.
        :type mat_file_path: string
        :param mat_file_path: path to the file that stores data in .mat format
        
        :type n_hidden: integer
        :param n_hidden: number of hidden nodes in a layer
        
        :type num_steps: integer
        :param num_steps: number of iterations to run

        :type num_shuffles: integer
        :param num_shuffles: number of shuffles to run

        :type penaltyLambdaArray: np.float32 array
        :param penaltyLambdaArray: array of lambda (regularization parameters) to train to model

        :type alphaArray: np.float32 array
        :param alphaArray: array of alpha (balancing factor between L1 and L2 in elastic net) to train the model

        :type prefix: string
        :param prefix: prefix of output file that stores all results
        
        """

    p = os.path.join(os.getcwd(), mat_file_path)
    Brain_C = sio.loadmat(p)

    data = Brain_C['Integ_X']
    censor = np.asarray([c[0] for c in Brain_C['Censored']])
    survival = np.asarray([t[0] for t in Brain_C['Survival']])

    T = np.asarray([t[0] for t in Brain_C['Survival']])
    O = 1 - np.asarray([c[0] for c in Brain_C['Censored']])
    X = Brain_C['Integ_X']

    #Use the whole dataset fotr pretraining
    pretrain_set = X

    #foldsize denotes th amount of data used for testing. The same amount
    #of data is used for model selection. The rest is used for training.
    fold_size = int(len(X) / 10)

    train_set = {}
    test_set = {}
    final_set = {}

    #caclulate the risk group for every patient i: patients who die after i
    sa = SurvivalAnalysis()
    train_set['X'], train_set['T'], train_set['O'], train_set[
        'A'] = sa.calc_at_risk(X[0:fold_size * 6, ], T[0:fold_size * 6],
                               O[0:fold_size * 6])
    test_set['X'], test_set['T'], test_set['O'], test_set[
        'A'] = sa.calc_at_risk(X[fold_size * 6:fold_size * 8, ],
                               T[fold_size * 6:fold_size * 8],
                               O[fold_size * 6:fold_size * 8])
    final_set['X'], final_set['T'], final_set['O'], final_set[
        'A'] = sa.calc_at_risk(X[fold_size * 8:, ], T[fold_size * 8:],
                               O[fold_size * 8:])

    ## initialization
    n_obs = train_set['X'].shape[0]  # 302
    n_in = train_set['X'].shape[1]  # 201

    test_obs = test_set['X'].shape[0]  # 64
    test_in = test_set['X'].shape[1]  # 201

    n_out = 1

    #### tensorflow implementation
    def cumsum(x, observations):
        x = tf.reshape(x, (1, observations))
        values = tf.split(1, x.get_shape()[1], x)
        out = []
        prev = tf.zeros_like(values[0])
        for val in values:
            s = prev + val
            out.append(s)
            prev = s
        cumsum = tf.concat(1, out)
        cumsum = tf.reshape(cumsum, (observations, 1))
        return cumsum

    with tf.device('/gpu:1'):
        ## dropout
        keep_prob = tf.placeholder(tf.float32)

        ## penaltyLambda
        penaltyLambda = tf.placeholder(tf.float32)

        ## alpha
        alpha = tf.placeholder(tf.float32)

        ## data
        input = tf.placeholder(tf.float32, [n_obs, n_in])
        at_risk = tf.placeholder(tf.int32, [
            n_obs,
        ])
        observed = tf.placeholder(tf.float32, [
            n_obs,
        ])

        # testing data
        test_input = tf.placeholder(tf.float32, [test_obs, test_in])
        prediction_at_risk = tf.placeholder(tf.int32, [
            test_obs,
        ])
        prediction_observed = tf.placeholder(tf.float32, [
            test_obs,
        ])

        ## layer_1
        w_1 = tf.Variable(
            tf.truncated_normal([n_in, n_hidden], dtype=tf.float32) / 20)
        output_layer1 = tf.nn.relu(tf.matmul(input, w_1))
        output_layer1_drop = tf.nn.dropout(output_layer1, keep_prob)
        prediciton_layer1 = tf.nn.relu(tf.matmul(test_input, w_1))

        ## layer_2
        w_2 = tf.Variable(
            tf.truncated_normal([n_hidden, n_hidden], dtype=tf.float32) / 20)
        output_layer2 = tf.nn.relu(tf.matmul(output_layer1_drop, w_2))
        output_layer2_drop = tf.nn.dropout(output_layer2, keep_prob)
        prediciton_layer2 = tf.nn.relu(tf.matmul(prediciton_layer1, w_2))

        ## layer_3
        w_3 = tf.Variable(
            tf.truncated_normal([n_hidden, n_hidden], dtype=tf.float32) / 20)
        output_layer3 = tf.nn.relu(tf.matmul(output_layer2_drop, w_3))
        output_layer3_drop = tf.nn.dropout(output_layer3, keep_prob)
        prediciton_layer3 = tf.nn.relu(tf.matmul(prediciton_layer2, w_3))

        ## layer_4
        w_4 = tf.Variable(
            tf.truncated_normal([n_hidden, n_hidden], dtype=tf.float32) / 20)
        output_layer4 = tf.nn.relu(tf.matmul(output_layer3_drop, w_4))
        output_layer4_drop = tf.nn.dropout(output_layer4, keep_prob)
        prediciton_layer4 = tf.nn.relu(tf.matmul(prediciton_layer3, w_4))

        # layer_5
        w_5 = tf.Variable(
            tf.truncated_normal([n_hidden, n_hidden], dtype=tf.float32) / 20)
        output_layer5 = tf.nn.relu(tf.matmul(output_layer4_drop, w_5))
        output_layer5_drop = tf.nn.dropout(output_layer5, keep_prob)
        prediciton_layer5 = tf.nn.relu(tf.matmul(prediciton_layer4, w_5))

        ## output layer
        w_6 = tf.Variable(
            tf.truncated_normal([n_hidden, n_out], dtype=tf.float32) / 20)
        output = tf.matmul(output_layer5_drop, w_6)

        prediction_output = tf.matmul(prediciton_layer5, w_6)

        exp = tf.reverse(tf.exp(output), dims=[True, False])
        partial_sum_a = cumsum(exp, n_obs)
        partial_sum = tf.reverse(partial_sum_a, dims=[True, False]) + 1
        log_at_risk = tf.log(
            tf.gather(partial_sum, tf.reshape(at_risk, [-1])) + 1e-50)
        diff = tf.sub(output, log_at_risk)
        times = tf.reshape(diff, [-1]) * observed
        cost = -(tf.reduce_sum(times)) + alpha * tf.reduce_sum(
            penaltyLambda * tf.nn.l2_loss(w_6)) + alpha * tf.reduce_sum(
                penaltyLambda * tf.nn.l2_loss(w_5)) + alpha * tf.reduce_sum(
                    penaltyLambda *
                    tf.nn.l2_loss(w_4)) + alpha * tf.reduce_sum(
                        penaltyLambda *
                        tf.nn.l2_loss(w_3)) + alpha * tf.reduce_sum(
                            penaltyLambda *
                            tf.nn.l2_loss(w_3)) + alpha * tf.reduce_sum(
                                penaltyLambda *
                                tf.nn.l2_loss(w_2)) + alpha * tf.reduce_sum(
                                    penaltyLambda * tf.nn.l2_loss(w_1)
                                ) + (1 - alpha) * tf.reduce_sum(
                                    penaltyLambda *
                                    tf.abs(w_6)) + (1 - alpha) * tf.reduce_sum(
                                        penaltyLambda * tf.abs(w_5)
                                    ) + (1 - alpha) * tf.reduce_sum(
                                        penaltyLambda * tf.abs(w_4)
                                    ) + (1 - alpha) * tf.reduce_sum(
                                        penaltyLambda * tf.abs(w_3)
                                    ) + (1 - alpha) * tf.reduce_sum(
                                        penaltyLambda * tf.abs(w_3)
                                    ) + (1 - alpha) * tf.reduce_sum(
                                        penaltyLambda * tf.abs(w_2)) + (
                                            1 - alpha) * tf.reduce_sum(
                                                penaltyLambda * tf.abs(w_1))

        weightSize = tf.nn.l2_loss(w_1) + tf.nn.l2_loss(w_2) + tf.nn.l2_loss(
            w_3) + tf.nn.l2_loss(w_4) + tf.nn.l2_loss(w_5) + tf.nn.l2_loss(w_6)

        ### prediction
        prediction_exp = tf.reverse(tf.exp(prediction_output),
                                    dims=[True, False])
        prediction_partial_sum_a = cumsum(prediction_exp, test_obs)
        prediction_partial_sum = tf.reverse(prediction_partial_sum_a,
                                            dims=[True, False]) + 1
        prediction_log_at_risk = tf.log(
            tf.gather(prediction_partial_sum,
                      tf.reshape(prediction_at_risk, [-1])) + 1e-50)
        prediction_diff = tf.sub(prediction_output, prediction_log_at_risk)
        prediction_times = tf.reshape(prediction_diff,
                                      [-1]) * prediction_observed
        prediction_cost = -(tf.reduce_sum(prediction_times))

        global_step = tf.Variable(0, trainable=False)
        starter_learning_rate = 0.0001
        learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                                   global_step,
                                                   100000,
                                                   0.989,
                                                   staircase=True)

        # optimizer
        optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
            cost)

    for alphaArrayIndex in range(len(alphaArray)):
        print("alpha: " + str(alphaArray[alphaArrayIndex]))

        for penaltyLambdaIndex in range(len(penaltyLambdaArray)):
            print("lambda: " + str(penaltyLambdaArray[penaltyLambdaIndex]))

            targetFile = prefix + ".lambda." + str(
                penaltyLambdaArray[penaltyLambdaIndex]) + ".alpha." + str(
                    alphaArray[alphaArrayIndex]) + ".txt"

            target = open(targetFile, "w")
            finalTestingAcc = np.zeros(num_shuffles)
            testingAcc = np.zeros(num_shuffles)

            bestAccInOneShuffle = np.zeros(num_steps)

            session = tf.InteractiveSession()

            header = prefix + ".lambda." + str(
                penaltyLambdaArray[penaltyLambdaIndex]) + ".alpha." + str(
                    alphaArray[alphaArrayIndex])

            for shuffle in range(num_shuffles):
                outputFile = header + "." + str(shuffle) + ".txt"
                outputFH = open(outputFile, "w")
                outputFH.write("trainCost" + "\t" + "testCost" + "\t" +
                               "trainCIndex" + "\t" + "testCIndex" + "\t" +
                               "weightSize" + "\n")

                tf.initialize_all_variables().run()
                index = np.arange(data.shape[0])
                random.shuffle(index)

                X = X[index, :]
                O = O[index]
                T = T[index]

                fold_size = int(len(X) / 10)

                train_set = {}
                test_set = {}
                final_set = {}

                sa = SurvivalAnalysis()
                train_set['X'], train_set['T'], train_set['O'], train_set[
                    'A'] = sa.calc_at_risk(X[0:fold_size * 6, ],
                                           T[0:fold_size * 6],
                                           O[0:fold_size * 6])
                test_set['X'], test_set['T'], test_set['O'], test_set[
                    'A'] = sa.calc_at_risk(X[fold_size * 6:fold_size * 8, ],
                                           T[fold_size * 6:fold_size * 8],
                                           O[fold_size * 6:fold_size * 8])
                final_set['X'], final_set['T'], final_set['O'], final_set[
                    'A'] = sa.calc_at_risk(X[fold_size * 8:fold_size * 10, ],
                                           T[fold_size * 8:fold_size * 10],
                                           O[fold_size * 8:fold_size * 10])

                number_of_range = 0
                sum_of_test_c_index = np.zeros(15)
                for step in range(num_steps):
                    feed_dict = {
                        input: train_set['X'],
                        at_risk: train_set['A'],
                        observed: train_set['O'],
                        test_input: test_set['X'],
                        prediction_at_risk: test_set['A'],
                        prediction_observed: test_set['O'],
                        keep_prob: 1,
                        penaltyLambda: penaltyLambdaArray[penaltyLambdaIndex],
                        alpha: alphaArray[alphaArrayIndex]
                    }

                    timesV, _, test_outputV, outputV, costV, expV, partialV, logV, diffV, w1V, costTestV, weightSizeV = session.run(
                        [
                            times, optimizer, prediction_output, output, cost,
                            exp, partial_sum, log_at_risk, diff, w_1,
                            prediction_cost, weightSize
                        ],
                        feed_dict=feed_dict)
                    train_c_index = _naive_concordance_index(
                        train_set['T'], -outputV, train_set['O'])
                    test_c_index = _naive_concordance_index(
                        test_set['T'], -test_outputV, test_set['O'])

                    bestAccInOneShuffle[step] = test_c_index

                    outputFH.write(
                        str(costV) + "\t" + str(costTestV) + "\t" +
                        str(train_c_index) + "\t" + str(test_c_index) + "\t" +
                        str(weightSizeV) + "\n")

                    if (step % 10 == 0):
                        print("step: " + str(step) + ", cost: " + str(costV))
                        print("train cIndex: " + str(train_c_index) +
                              ", test cIndex: " + str(test_c_index))

                    if (step == num_steps - 1):
                        print("best result: " +
                              str(np.max(bestAccInOneShuffle)))
                        feed_dict = {
                            input: train_set['X'],
                            at_risk: train_set['A'],
                            observed: train_set['O'],
                            test_input: final_set['X'],
                            keep_prob: 1,
                            penaltyLambda:
                            penaltyLambdaArray[penaltyLambdaIndex],
                            alpha: alphaArray[alphaArrayIndex]
                        }

                        final_outputV = session.run(prediction_output,
                                                    feed_dict=feed_dict)
                        final_c_index = _naive_concordance_index(
                            final_set['T'], -final_outputV, final_set['O'])
                        finalTestingAcc[shuffle] = final_c_index
                        testingAcc[shuffle] = test_c_index

                outputFH.close()

            target.write("final mean: " + str(np.mean(finalTestingAcc)) + "\n")
            target.write("final sd: " + str(np.std(finalTestingAcc)) + "\n")

            target.write("---\n")

            target.write("validation mean: " + str(np.mean(testingAcc)) + "\n")
            target.write("validation sd: " + str(np.std(testingAcc)) + "\n")

            target.close()
コード例 #40
0
def tf_discount_rewards(tf_r):  # tf_r ~ [game_steps,1]
    discount_f = lambda a, v: a * gamma + v
    tf_r_reverse = tf.scan(discount_f, tf.reverse(tf_r, [True, False]))
    tf_discounted_r = tf.reverse(tf_r_reverse, [True, False])
    return tf_discounted_r
コード例 #41
0
def pixel_wise_softmax(output_map):
    exponential_map = tf.exp(output_map)
    evidence = tf.add(exponential_map,
                      tf.reverse(exponential_map, [False, False, False, True]))
    return tf.div(exponential_map, evidence, name="pixel_wise_softmax")
コード例 #42
0
 
# Defining a session
tf.reset_default_graph()
session = tf.InteractiveSession()
 
# Loading the model inputs
inputs, targets, lr, keep_prob = model_inputs()
 
# Setting the sequence length
sequence_length = tf.placeholder_with_default(25, None, name = 'sequence_length')
 
# Getting the shape of the inputs tensor
input_shape = tf.shape(inputs)
 
# Getting the training and test predictions
training_predictions, test_predictions = seq2seq_model(tf.reverse(inputs, [-1]),
                                                       targets,
                                                       keep_prob,
                                                       batch_size,
                                                       sequence_length,
                                                       len(answerswords2int),
                                                       len(questionswords2int),
                                                       encoding_embedding_size,
                                                       decoding_embedding_size,
                                                       rnn_size,
                                                       num_layers,
                                                       questionswords2int)
 
# Setting up the Loss Error, the Optimizer and Gradient Clipping
with tf.name_scope("optimization"):
    loss_error = tf.contrib.seq2seq.sequence_loss(training_predictions,