Example #1
0
  def _noisy_identity_kernel_initializer(shape,
                                         dtype=tf.float32,
                                         partition_info=None):
    """Constructs a noisy identity kernel.

    Args:
      shape: List of integers. Represents shape of result.
      dtype: data type for values in result.
      partition_info: Partition information for initializer functions. Ignored.

    Returns:
      Tensor of desired shape and dtype such that applying it as a convolution
        kernel results in a noisy near-identity operation.

    Raises:
      ValueError: If shape does not define a valid kernel.
                  If filter width and height differ.
                  If filter width and height are not odd numbers.
                  If number of input and output channels are not multiples of
                    base_num_channels.
    """
    if len(shape) != 4:
      raise ValueError("Convolution kernels must be rank 4.")

    filter_height, filter_width, in_channels, out_channels = shape

    if filter_width != filter_height:
      raise ValueError(
          "Noisy identity initializer only works for square filters.")
    if filter_width % 2 != 1:
      raise ValueError(
          "Noisy identity initializer requires filters have odd height and "
          "width.")
    if (in_channels % base_num_channels != 0 or
        out_channels % base_num_channels != 0):
      raise ValueError("in_channels and out_channels must both be multiples of "
                       "base_num_channels.")

    middle_pixel = filter_height // 2
    is_middle_pixel = tf.logical_and(
        tf.equal(_range_along_dimension(0, shape), middle_pixel),
        tf.equal(_range_along_dimension(1, shape), middle_pixel))
    is_same_channel_multiple = tf.equal(
        tf.floordiv(
            _range_along_dimension(2, shape) * base_num_channels, in_channels),
        tf.floordiv(
            _range_along_dimension(3, shape) * base_num_channels, out_channels))
    noise = tf.truncated_normal(shape, stddev=stddev, dtype=dtype)
    return tf.where(
        tf.logical_and(is_same_channel_multiple, is_middle_pixel),
        tf.ones(
            shape, dtype=dtype) * (base_num_channels / out_channels),
        noise)
Example #2
0
    def _extract_feature(inputs, idxs):

        idxs = tf.expand_dims(idxs,1)

        idx_i = tf.floordiv(idxs, map_h)
        idx_j = tf.mod(idxs, map_h)

        # NOTE: 
        # calculate the center of input batches
        # this depends on coarse layer's architecture
        origin_i = 2*(2*idx_i+1)+3
        origin_j = 2*(2*idx_j+1)+3

        origin_centers = tf.concat(1,[origin_i,origin_j])
        origin_centers = tf.to_float(origin_centers)

        # NOTE: size also depends on the architecture
        patches = tf.image.extract_glimpse(inputs, size=[14,14], offsets=origin_centers, 
                                           centered=False, normalized=False)

        fine_features = fine_layers(patches)

        # reuse variables
        tf.get_variable_scope().reuse_variables()
        
        src_idxs = tf.concat(1,[idx_i,idx_j])

        return fine_features, src_idxs
Example #3
0
def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length):
  """Computes the time_steps/ctc_input_length after convolution.

  Suppose that the original feature contains two parts:
  1) Real spectrogram signals, spanning input_length steps.
  2) Padded part with all 0s.
  The total length of those two parts is denoted as max_time_steps, which is
  the padded length of the current batch. After convolution layers, the time
  steps of a spectrogram feature will be decreased. As we know the percentage
  of its original length within the entire length, we can compute the time steps
  for the signal after conv as follows (using ctc_input_length to denote):
  ctc_input_length = (input_length / max_time_steps) * output_length_of_conv.
  This length is then fed into ctc loss function to compute loss.

  Args:
    max_time_steps: max_time_steps for the batch, after padding.
    ctc_time_steps: number of timesteps after convolution.
    input_length: actual length of the original spectrogram, without padding.

  Returns:
    the ctc_input_length after convolution layer.
  """
  ctc_input_length = tf.to_float(tf.multiply(
      input_length, ctc_time_steps))
  return tf.to_int32(tf.floordiv(
      ctc_input_length, tf.to_float(max_time_steps)))
def int_to_bit(x_int, nbits):
  """Turn x_int representing numbers into a bitwise (lower-endian) tensor."""
  x_l = tf.expand_dims(x_int, axis=-1)
  x_labels = []
  for i in range(nbits):
    x_labels.append(tf.floormod(tf.floordiv(x_l, 2**i), 2))
  res = tf.concat(x_labels, axis=-1)
  return tf.to_float(res)
Example #5
0
 def get_staves(self, page):
   filt = staff_center_filter(page)
   n = self.num_slices
   increment = tf.floordiv(tf.shape(filt)[1], tf.constant(n + 1, tf.int32))
   staff_sections = []
   for i in range(n):
     img = filt[:, i*increment:(i+1)*increment if i + 1 < n else None]
     staff_sections.append(self.detect(img))
   results = tf.py_func(_join_staves, [page.staff_dist] + staff_sections,
                        [staff_sections[0].dtype])
   return results
Example #6
0
def ae_latent_softmax(latents_pred, latents_discrete, hparams):
  """Latent prediction and loss."""
  vocab_size = 2 ** hparams.z_size
  if hparams.num_decode_blocks < 2:
    latents_logits = tf.layers.dense(latents_pred, vocab_size,
                                     name="extra_logits")
    if hparams.logit_normalization:
      latents_logits *= tf.rsqrt(1e-8 +
                                 tf.reduce_mean(tf.square(latents_logits)))

    loss = None
    if latents_discrete is not None:
      if hparams.soft_em:
        # latents_discrete is actually one-hot of multinomial samples
        assert hparams.num_decode_blocks == 1
        loss = tf.nn.softmax_cross_entropy_with_logits_v2(
            labels=latents_discrete, logits=latents_logits)
      else:
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=latents_discrete, logits=latents_logits)
    sample = multinomial_sample(
        latents_logits, vocab_size, hparams.sampling_temp)
    return sample, loss

  # Multi-block case.
  vocab_bits = int(math.log(vocab_size, 2))
  assert vocab_size == 2**vocab_bits
  assert vocab_bits % hparams.num_decode_blocks == 0
  block_vocab_size = 2**(vocab_bits // hparams.num_decode_blocks)
  latents_logits = [
      tf.layers.dense(
          latents_pred, block_vocab_size, name="extra_logits_%d" % i)
      for i in range(hparams.num_decode_blocks)
  ]
  loss = None
  if latents_discrete is not None:
    losses = []
    for i in range(hparams.num_decode_blocks):
      d = tf.floormod(tf.floordiv(latents_discrete,
                                  block_vocab_size**i), block_vocab_size)
      losses.append(tf.nn.sparse_softmax_cross_entropy_with_logits(
          labels=d, logits=latents_logits[i]))
    loss = sum(losses)
  samples = [multinomial_sample(l, block_vocab_size, hparams.sampling_temp)
             for l in latents_logits]
  sample = sum([s * block_vocab_size**i for i, s in enumerate(samples)])
  return sample, loss
Example #7
0
def get_angle(page):
  img = tf.cast(page.image, tf.float32)
  square = get_square(img)
  f = tf.complex_abs(tf.fft2d(tf.cast(square, tf.complex64))[:MAX_SIZE//2, :])
  x_arr = (
      tf.cast(tf.concat(0,
                        [tf.range(MAX_SIZE // 2),
                         tf.range(1, MAX_SIZE // 2 + 1)[::-1]]),
              tf.float32))[None, :]
  y_arr = tf.cast(tf.range(MAX_SIZE // 2), tf.float32)[:, None]
  f = tf.select(x_arr * x_arr + y_arr * y_arr < 32 * 32, tf.zeros_like(f), f)
  m = tf.argmax(tf.reshape(f, [-1]), dimension=0)
  x = tf.cast((m + MAX_SIZE // 4) % (MAX_SIZE // 2) - (MAX_SIZE // 4), tf.float32)
  y = tf.cast(tf.floordiv(m, MAX_SIZE // 2), tf.float32)
  return(tf.cond(
      y > 0, lambda: tf.atan(x / y), lambda: tf.constant(np.nan, tf.float32)),
      square)
Example #8
0
def compute_progress(current_image_id, stable_stage_num_images,
                     transition_stage_num_images, num_blocks):
  """Computes the training progress.

  The training alternates between stable phase and transition phase.
  The `progress` indicates the training progress, i.e. the training is at
  - a stable phase p if progress = p
  - a transition stage between p and p + 1 if progress = p + fraction
  where p = 0,1,2.,...

  Note the max value of progress is `num_blocks` - 1.

  In terms of LOD (of the original implementation):
  progress = `num_blocks` - 1 - LOD

  Args:
    current_image_id: An scalar integer `Tensor` of the current image id, count
        from 0.
    stable_stage_num_images: An integer representing the number of images in
        each stable stage.
    transition_stage_num_images: An integer representing the number of images in
        each transition stage.
    num_blocks: Number of network blocks.

  Returns:
    A scalar float `Tensor` of the training progress.
  """
  # Note when current_image_id >= min_total_num_images - 1 (which means we
  # are already at the highest resolution), we want to keep progress constant.
  # Therefore, cap current_image_id here.
  capped_current_image_id = tf.minimum(
      current_image_id,
      min_total_num_images(stable_stage_num_images, transition_stage_num_images,
                           num_blocks) - 1)

  stage_num_images = stable_stage_num_images + transition_stage_num_images
  progress_integer = tf.floordiv(capped_current_image_id, stage_num_images)
  progress_fraction = tf.maximum(
      0.0,
      tf.to_float(
          tf.mod(capped_current_image_id, stage_num_images) -
          stable_stage_num_images) / tf.to_float(transition_stage_num_images))
  return tf.to_float(progress_integer) + progress_fraction
    def build_network(self): # build the network for one BP iteration
        # BP initialization
        llr_into_bp_net = tf.Variable(np.ones([self.v_node_num, self.batch_size], dtype=np.float32))
        xe_0 = tf.matmul(self.H_x_to_xe0, llr_into_bp_net)
        xe_v2c_pre_iter = tf.Variable(np.ones([self.num_all_edges, self.batch_size], dtype=np.float32)) # the v->c messages of the previous iteration
        xe_v2c_pre_iter_assign = xe_v2c_pre_iter.assign(xe_0)

        # one iteration
        H_sumC_to_V = tf.constant(self.H_sumC_to_V, dtype=tf.float32)
        H_sumV_to_C = tf.constant(self.H_sumV_to_C, dtype=tf.float32)
        xe_v_sumc, xe_c_sumv = self.one_bp_iteration(xe_v2c_pre_iter, H_sumC_to_V, H_sumV_to_C, xe_0)

        # start the next iteration
        start_next_iteration = xe_v2c_pre_iter.assign(xe_c_sumv)

        # get the final marginal probability and decoded results
        bp_out_llr = tf.add(llr_into_bp_net, tf.matmul(self.H_xe_v_sumc_to_y, xe_v_sumc))
        dec_out = tf.transpose(tf.floordiv(1-tf.to_int32(tf.sign(bp_out_llr)), 2))

        return llr_into_bp_net, xe_0, xe_v2c_pre_iter_assign, start_next_iteration, dec_out
Example #10
0
def int_to_bit(x_int, num_bits, base=2):
  """Turn x_int representing numbers into a bitwise (lower-endian) tensor.

  Args:
    x_int: Tensor containing integer to be converted into base notation.
    num_bits: Number of bits in the representation.
    base: Base of the representation.

  Returns:
    Corresponding number expressed in base.
  """
  x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))
  x_labels = []
  for i in range(num_bits):
    x_labels.append(
        tf.floormod(
            tf.floordiv(tf.to_int32(x_l),
                        tf.to_int32(base)**i), tf.to_int32(base)))
  res = tf.concat(x_labels, axis=-1)
  return tf.to_float(res)
Example #11
0
    def beam_step(time, beam_probs, beam_seqs, cand_probs, cand_seqs, *states):
      batch_size = tf.shape(beam_probs)[0]
      inputs = tf.reshape(tf.slice(beam_seqs, [0, time], [batch_size, 1]), [batch_size])
      decoder_input = embedding_ops.embedding_lookup(self.L_dec, inputs)
      decoder_output, state_output = self.decoder_graph(decoder_input, states)

      with vs.variable_scope("Logistic", reuse=True):
        do2d = tf.reshape(decoder_output, [-1, self.size])
        logits2d = rnn_cell._linear(do2d, self.vocab_size, True, 1.0)
        logprobs2d = tf.nn.log_softmax(logits2d)

      total_probs = logprobs2d + tf.reshape(beam_probs, [-1, 1])
      total_probs_noEOS = tf.concat(1, [tf.slice(total_probs, [0, 0], [batch_size, nlc_data.EOS_ID]),
                                        tf.tile([[-3e38]], [batch_size, 1]),
                                        tf.slice(total_probs, [0, nlc_data.EOS_ID + 1],
                                                 [batch_size, self.vocab_size - nlc_data.EOS_ID - 1])])

      flat_total_probs = tf.reshape(total_probs_noEOS, [-1])
      beam_k = tf.minimum(tf.size(flat_total_probs), self.beam_size)
      next_beam_probs, top_indices = tf.nn.top_k(flat_total_probs, k=beam_k)

      next_bases = tf.floordiv(top_indices, self.vocab_size)
      next_mods = tf.mod(top_indices, self.vocab_size)

      next_states = [tf.gather(state, next_bases) for state in state_output]
      next_beam_seqs = tf.concat(1, [tf.gather(beam_seqs, next_bases),
                                     tf.reshape(next_mods, [-1, 1])])

      cand_seqs_pad = tf.pad(cand_seqs, [[0, 0], [0, 1]])
      beam_seqs_EOS = tf.pad(beam_seqs, [[0, 0], [0, 1]])
      new_cand_seqs = tf.concat(0, [cand_seqs_pad, beam_seqs_EOS])
      EOS_probs = tf.slice(total_probs, [0, nlc_data.EOS_ID], [batch_size, 1])
      new_cand_probs = tf.concat(0, [cand_probs, tf.reshape(EOS_probs, [-1])])

      cand_k = tf.minimum(tf.size(new_cand_probs), self.beam_size)
      next_cand_probs, next_cand_indices = tf.nn.top_k(new_cand_probs, k=cand_k)
      next_cand_seqs = tf.gather(new_cand_seqs, next_cand_indices)

      return [time + 1, next_beam_probs, next_beam_seqs, next_cand_probs, next_cand_seqs] + next_states
Example #12
0
def ae_latent_softmax(latents_pred, latents_discrete, hparams):
  """Latent prediction and loss."""
  vocab_size = hparams.v_size
  if hparams.bottleneck_kind == "semhash":
    vocab_size = 2**hparams.z_size
  if hparams.num_decode_blocks < 2:
    latents_logits = tf.layers.dense(latents_pred, vocab_size,
                                     name="extra_logits")
    loss = None
    if latents_discrete is not None:
      loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
          labels=latents_discrete, logits=latents_logits)
    sample = multinomial_sample(
        latents_logits, vocab_size, hparams.sampling_temp)
    return sample, loss

  # Multi-block case.
  vocab_bits = int(math.log(vocab_size, 2))
  assert vocab_size == 2**vocab_bits
  assert vocab_bits % hparams.num_decode_blocks == 0
  block_vocab_size = 2**(vocab_bits // hparams.num_decode_blocks)
  latents_logits = [
      tf.layers.dense(
          latents_pred, block_vocab_size, name="extra_logits_%d" % i)
      for i in xrange(hparams.num_decode_blocks)
  ]
  loss = None
  if latents_discrete is not None:
    losses = []
    for i in xrange(hparams.num_decode_blocks):
      d = tf.floormod(tf.floordiv(latents_discrete,
                                  block_vocab_size**i), block_vocab_size)
      losses.append(tf.nn.sparse_softmax_cross_entropy_with_logits(
          labels=d, logits=latents_logits[i]))
    loss = sum(losses)
  samples = [multinomial_sample(l, block_vocab_size, hparams.sampling_temp)
             for l in latents_logits]
  sample = sum([s * block_vocab_size**i for i, s in enumerate(samples)])
  return sample, loss
Example #13
0
def _calc_ctc_input_length(args):
  """Compute the actual input length after convolution for ctc_loss function.

  Basically, we need to know the scaled input_length after conv layers.
  new_input_length = old_input_length * ctc_time_steps / max_time_steps

  Args:
    args: the input args to compute ctc input length.

  Returns:
    ctc_input_length, which is required for ctc loss calculation.
  """
  # py2 needs explicit tf import for keras Lambda layer
  import tensorflow as tf

  input_length, input_data, y_pred = args
  max_time_steps = tf.shape(input_data)[1]
  ctc_time_steps = tf.shape(y_pred)[1]
  ctc_input_length = tf.multiply(
      tf.to_float(input_length), tf.to_float(ctc_time_steps))
  ctc_input_length = tf.to_int32(tf.floordiv(
      ctc_input_length, tf.to_float(max_time_steps)))
  return ctc_input_length
Example #14
0
    def _extract_feature(inputs, idxs):

        idxs = tf.expand_dims(idxs,1)

        idx_i = tf.floordiv(idxs, map_h)
        idx_j = tf.mod(idxs, map_h)

        # NOTE: the below origins are starting points, not center!
        origin_i = 2*(2*idx_i+1)+3 - 5 + 2
        origin_j = 2*(2*idx_j+1)+3 - 5 + 2

        origin_centers = tf.concat(1,[origin_i,origin_j])

        # NOTE: size also depends on the architecture
        #patches = tf.image.extract_glimpse(inputs, size=[14,14], offsets=origin_centers, 
        #                                   centered=False, normalized=False)
        patches = extract_patches(inputs, size=[14,14], offsets=origin_centers)
        
        #fine_features = fine_layers(patches)
        fine_features = []

        src_idxs = tf.concat(1,[idx_i,idx_j])

        return fine_features, src_idxs, patches
Example #15
0
def modgrad(op, grad):
    x = op.inputs[
        0]  # the first argument (normally you need those to calculate the gradient, like the gradient of x^2 is 2x. )
    y = op.inputs[1]  # the second argument

    return grad * 1, grad * tf.negative(tf.floordiv(x, y))
Example #16
0
def resize_real_images(image, params):
    """Resizes real images to match the GAN's current size.

    Args:
        image: tensor, original image.
        params: dict, user passed parameters.

    Returns:
        Resized image tensor.
    """
    func_name = "resize_real_images"
    print_obj("\n" + func_name, "image", image)
    # Resize real image for each block.
    if len(params["conv_num_filters"]) == 1:
        print("\n: NEVER GOING TO GROW, SKIP SWITCH CASE!".format(func_name))
        # If we never are going to grow, no sense using the switch case.
        # 4x4
        resized_image = resize_real_image(image=image,
                                          params=params,
                                          block_idx=0)
    else:
        if params["growth_idx"] is not None:
            block_idx = min((params["growth_idx"] - 1) // 2 + 1,
                            len(params["conv_num_filters"]) - 1)
            resized_image = resize_real_image(image=image,
                                              params=params,
                                              block_idx=block_idx)
        else:
            # Find growth index based on global step and growth frequency.
            growth_index = tf.add(x=tf.floordiv(
                x=tf.minimum(x=tf.cast(x=tf.floordiv(
                    x=tf.train.get_or_create_global_step() - 1,
                    y=params["num_steps_until_growth"],
                    name="{}_global_step_floordiv".format(func_name)),
                                       dtype=tf.int32),
                             y=(len(params["conv_num_filters"]) - 1) * 2) - 1,
                y=2),
                                  y=1,
                                  name="{}_growth_index".format(func_name))

            # Switch to case based on number of steps for resized image.
            resized_image = tf.switch_case(
                branch_index=growth_index,
                branch_fns=[
                    # 4x4
                    lambda: resize_real_image(
                        image=image, params=params, block_idx=0),
                    # 8x8
                    lambda: resize_real_image(
                        image=image,
                        params=params,
                        block_idx=min(1,
                                      len(params["conv_num_filters"]) - 1)),
                    # 16x16
                    lambda: resize_real_image(
                        image=image,
                        params=params,
                        block_idx=min(2,
                                      len(params["conv_num_filters"]) - 1)),
                    # 32x32
                    lambda: resize_real_image(
                        image=image,
                        params=params,
                        block_idx=min(3,
                                      len(params["conv_num_filters"]) - 1)),
                    # 64x64
                    lambda: resize_real_image(
                        image=image,
                        params=params,
                        block_idx=min(4,
                                      len(params["conv_num_filters"]) - 1)),
                    # 128x128
                    lambda: resize_real_image(
                        image=image,
                        params=params,
                        block_idx=min(5,
                                      len(params["conv_num_filters"]) - 1)),
                    # 256x256
                    lambda: resize_real_image(
                        image=image,
                        params=params,
                        block_idx=min(6,
                                      len(params["conv_num_filters"]) - 1)),
                    # 512x512
                    lambda: resize_real_image(
                        image=image,
                        params=params,
                        block_idx=min(7,
                                      len(params["conv_num_filters"]) - 1)),
                    # 1024x1024
                    lambda: resize_real_image(
                        image=image,
                        params=params,
                        block_idx=min(8,
                                      len(params["conv_num_filters"]) - 1))
                ],
                name="{}_switch_case_resized_image".format(func_name))
    print_obj(func_name, "resized_image", resized_image)

    return resized_image
Example #17
0
def hier_homography_estimator(inputs,
                              num_param=8,
                              num_layer=7,
                              num_level=3,
                              dropout_keep_prob=0.8,
                              reuse=None,
                              is_training=True,
                              trainable=True,
                              final_endpoint=None,
                              scope='hier_hmg'):
    """A hierarchical VGG-style neural network for homograhy estimation.

  Args:
    inputs: batch of input image pairs of data type float32 and of shape
      [batch_size, height, width, 2]
    num_param: the number of parameters for homography (default 8)
    num_layer: the number of convolutional layers in the motion feature network
    num_level: the number of hierarchical levels
    dropout_keep_prob: the percentage of activation values that are kept
    reuse: whether to reuse this network weights
    is_training: whether used for training or testing
    trainable: whether this network is to be trained or not
    final_endpoint: specifies the endpoint to construct the network up to
    scope: the scope of variables in this function

  Returns:
    a list of homographies at each level and motion feature maps if
    final_endpoint='mfeature'; otherwise a list of images warped by the list of
    corresponding homographies
  """
    _, h_input, w_input = inputs.get_shape().as_list()[0:3]
    hmgs_list = []
    warped_list = []
    with tf.variable_scope(scope, [inputs], reuse=reuse):
        for level_index in range(num_level):
            scale = 2**(num_level - 1 - level_index)
            h = tf.to_float(tf.floordiv(h_input, scale))
            w = tf.to_float(tf.floordiv(w_input, scale))
            inputs_il = tf.image.resize_images(inputs, tf.to_int32([h, w]))
            if level_index == 0:
                mfeature = hier_base_layers(inputs_il,
                                            num_layer + 1 - num_level +
                                            level_index,
                                            level_index,
                                            is_training=is_training,
                                            trainable=trainable)
                hmgs_il = homography_regression(
                    mfeature,
                    num_param,
                    level_index,
                    dropout_keep_prob=dropout_keep_prob,
                    is_training=is_training,
                    trainable=trainable)
                hmgs_list.append(hmgs_il)
            else:
                warped, _ = hmg_util.homography_scale_warp_per_batch(
                    inputs_il[:, :, :, 0], w / 2, h / 2,
                    hmgs_list[level_index - 1])
                pre_warped_inputs_il = tf.stack(
                    [warped, inputs_il[:, :, :, 1]], -1)
                warped_list.append(pre_warped_inputs_il)
                if level_index == num_level - 1 and final_endpoint == 'mfeature':
                    mfeature = hier_base_layers(pre_warped_inputs_il,
                                                num_layer - num_level +
                                                level_index,
                                                level_index,
                                                is_training=is_training,
                                                trainable=trainable)
                    return hmgs_list, mfeature
                else:
                    mfeature = hier_base_layers(pre_warped_inputs_il,
                                                num_layer + 1 - num_level +
                                                level_index,
                                                level_index,
                                                is_training=is_training,
                                                trainable=trainable)
                hmgs_il = homography_regression(
                    mfeature,
                    num_param,
                    level_index,
                    dropout_keep_prob=dropout_keep_prob,
                    is_training=is_training,
                    trainable=trainable)
                new_hmgs_il = hmg_util.homography_shift_mult_batch(
                    hmgs_list[level_index - 1], w / 2, h / 2, hmgs_il, w, h, w,
                    h)
                hmgs_list.append(new_hmgs_il)
    return hmgs_list, warped_list
Example #18
0
def build_neurosat(d):

    # Hyperparameters
    learning_rate = 2e-5
    parameter_l2norm_scaling = 1e-10
    global_norm_gradient_clipping_ratio = 0.65

    # Define placeholder for satisfiability statuses (one per problem)
    instance_SAT = tf.placeholder(tf.float32, [None], name="instance_SAT")
    time_steps = tf.placeholder(tf.int32, shape=(), name='time_steps')
    matrix_placeholder = tf.placeholder(tf.float32, [None, None], name="M")
    num_vars_on_instance = tf.placeholder(tf.int32, [None], name="instance_n")

    # Literals
    s = tf.shape(matrix_placeholder)
    l = s[0]
    m = s[1]
    n = tf.floordiv(l, tf.constant(2))
    # Compute number of problems
    p = tf.shape(instance_SAT)[0]

    # Define INV, a tf function to exchange positive and negative literal embeddings
    def INV(Lh):
        l = tf.shape(Lh)[0]
        n = tf.div(l, tf.constant(2))
        # Send messages from negated literals to positive ones, and vice-versa
        Lh_pos = tf.gather(Lh, tf.range(tf.constant(0), n))
        Lh_neg = tf.gather(Lh, tf.range(n, l))
        Lh_inverted = tf.concat([Lh_neg, Lh_pos], axis=0)
        return Lh_inverted

    #end

    var = {"L": d, "C": d}
    s = tf.shape(matrix_placeholder)
    num_vars = {"L": l, "C": m}
    initial_embeddings = {
        v: tf.get_variable(initializer=tf.random_normal((1, d)),
                           dtype=tf.float32,
                           name='{v}_init'.format(v=v))
        for (v, d) in var.items()
    }
    tiled_and_normalized_initial_embeddings = {
        v: tf.tile(tf.div(init, tf.sqrt(tf.cast(var[v], tf.float32))),
                   [num_vars[v], 1])
        for v, init in initial_embeddings.items()
    }

    # Define Graph neural network
    gnn = GraphNN(var, {"M": ("L", "C")}, {
        "Lmsg": ("L", "C"),
        "Cmsg": ("C", "L")
    }, {
        "L": [{
            "fun": INV,
            "var": "L"
        }, {
            "mat": "M",
            "msg": "Cmsg",
            "var": "C"
        }],
        "C": [{
            "mat": "M",
            "transpose?": True,
            "msg": "Lmsg",
            "var": "L"
        }]
    },
                  name="NeuroSAT")

    # Define L_vote
    L_vote_MLP = Mlp(layer_sizes=[d for _ in range(3)],
                     activations=[tf.nn.relu for _ in range(3)],
                     output_size=1,
                     name="L_vote",
                     name_internal_layers=True,
                     kernel_initializer=tf.contrib.layers.xavier_initializer(),
                     bias_initializer=tf.zeros_initializer())

    # Get the last embeddings
    L_n = gnn({"M": matrix_placeholder},
              tiled_and_normalized_initial_embeddings, time_steps)["L"].h
    L_vote = L_vote_MLP(L_n)

    # Reorganize votes' result to obtain a prediction for each problem instance
    def _vote_while_cond(i, p, n_acc, n, n_var_list, predicted_sat, L_vote):
        return tf.less(i, p)

    #end _vote_while_cond

    def _vote_while_body(i, p, n_acc, n, n_var_list, predicted_SAT, L_vote):
        # Helper for the amount of variables in this problem
        i_n = n_var_list[i]
        # Gather the positive and negative literals for that problem
        pos_lits = tf.gather(L_vote, tf.range(n_acc, tf.add(n_acc, i_n)))
        neg_lits = tf.gather(
            L_vote, tf.range(tf.add(n, n_acc), tf.add(n, tf.add(n_acc, i_n))))
        # Concatenate positive and negative literals and average their vote values
        problem_predicted_SAT = tf.reduce_mean(
            tf.concat([pos_lits, neg_lits], axis=1))
        # Update TensorArray
        predicted_SAT = predicted_SAT.write(i, problem_predicted_SAT)
        return tf.add(i, tf.constant(1)), p, tf.add(
            n_acc, i_n), n, n_var_list, predicted_SAT, L_vote

    #end _vote_while_body

    predicted_SAT = tf.TensorArray(size=p, dtype=tf.float32)
    _, _, _, _, _, predicted_SAT, _ = tf.while_loop(
        _vote_while_cond, _vote_while_body, [
            tf.constant(0, dtype=tf.int32), p,
            tf.constant(0, dtype=tf.int32), n, num_vars_on_instance,
            predicted_SAT, L_vote
        ])
    predicted_SAT = predicted_SAT.stack()

    # Define loss, accuracy
    predict_costs = tf.nn.sigmoid_cross_entropy_with_logits(
        labels=instance_SAT, logits=predicted_SAT)
    predict_cost = tf.reduce_mean(predict_costs)
    vars_cost = tf.zeros([])
    tvars = tf.trainable_variables()
    for var in tvars:
        vars_cost = tf.add(vars_cost, tf.nn.l2_loss(var))
    #end for
    loss = tf.add(predict_cost, tf.multiply(vars_cost,
                                            parameter_l2norm_scaling))
    optimizer = tf.train.AdamOptimizer(name="Adam",
                                       learning_rate=learning_rate)
    grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars),
                                      global_norm_gradient_clipping_ratio)
    train_step = optimizer.apply_gradients(zip(grads, tvars))

    accuracy = tf.reduce_mean(
        tf.cast(
            tf.equal(tf.cast(instance_SAT, tf.bool),
                     tf.cast(tf.round(tf.nn.sigmoid(predicted_SAT)), tf.bool)),
            tf.float32))

    # Define neurosat dictionary
    neurosat = {}
    neurosat["M"] = matrix_placeholder
    neurosat["time_steps"] = time_steps
    neurosat["gnn"] = gnn
    neurosat["instance_SAT"] = instance_SAT
    neurosat["predicted_SAT"] = predicted_SAT
    neurosat["num_vars_on_instance"] = num_vars_on_instance
    neurosat["loss"] = loss
    neurosat["accuracy"] = accuracy
    neurosat["train_step"] = train_step

    return neurosat
def VIN(state_input, state_dim, action_dim, config, weights=None):
    numactions = action_dim;
    numstates = config.numstates;
    k = config.k
    width = config.width
    assert width % 2 == 1
    ch_i = config.ch_i
    ch_h = config.ch_h
    ch_q = config.ch_q
    hiddenUnits = config.hidden1
    state_batch_size = tf.shape(state_input)[0];

    if (weights == None):
        #Weights for each action's reward
        w     = tf.Variable(np.random.randn(width, 1, ch_q) * 0.001, dtype=tf.float32)

        # feedback weights from v layer into q layer (~transition probabilities in Bellman equation)
        w_fb  = tf.Variable(np.random.randn(width, 1, ch_q) * 0.001, dtype=tf.float32)

        #Output weights
        bias1 = tf.Variable(np.random.randn(1, hiddenUnits) * 0.001, dtype=tf.float32)
        w_h1   = tf.Variable(np.random.randn(ch_q + state_dim, hiddenUnits) * 0.001, dtype=tf.float32)
        bias_o = tf.Variable(np.random.randn(1, numactions) * 0.001, dtype=tf.float32)
        w_o = tf.Variable(np.random.randn(hiddenUnits, numactions) * 0.001, dtype=tf.float32)

        #Reward Map
        r = tf.Variable(np.random.randn(config.batchsize, config.numstates, config.ch_i) * 0.001, dtype=tf.float32)
    else:
        w     = weights[0]
        w_fb  = weights[1]
        bias1 = weights[2]
        w_h1   = weights[3]
        bias_o = weights[4]
        w_o = weights[5]
        r = weights[6]

    q = circularConv(r, w)
    v = tf.reduce_max(q, axis=2, keep_dims=True, name="v")
    wwfb = tf.concat([w, w_fb], 1)

    #Value Iteration
    for i in range(0, k-1):
        rv = tf.concat([r, v], 2)
        q = circularConv(rv, wwfb)
        v = tf.reduce_max(q, axis=2, keep_dims=True, name="v")

    # do one last convolution
    q = circularConv(tf.concat([r, v], 2), wwfb) #tf.nn.conv1d(tf.concat([r, v], 2), wwfb, stride=1, padding='SAME', name="q")

    # Select the conv-net channels at the state position

    position = tf.transpose(state_input, perm=[1,0])
    angle = theta(position[0], position[1])
    S1 = tf.cast(tf.floordiv(angle, 2*math.pi / numstates), tf.int32)
    ins1 = tf.zeros(tf.shape(S1), tf.int32)
    idx_in = tf.transpose(tf.stack([ins1, S1]), [1,0])

    #Concat action values to observations
    q_out = tf.gather_nd(q, idx_in, name="q_out")
    inputs = tf.concat([state_input, q_out], axis=1)

    #Output action
    hiddenLayer1 = tf.nn.relu(tf.matmul(inputs, w_h1) + bias1)
    output = tf.nn.tanh(tf.matmul(hiddenLayer1, w_o) + bias_o);
    return state_input, output, [w, w_fb, bias1, w_h1, bias_o, w_o, r]
x = tf.add(a, b, name='add')
writer = tf.summary.FileWriter('./graphs/simple', tf.get_default_graph()) 
with tf.Session() as sess:
    # writer = tf.summary.FileWriter('./graphs', sess.graph) 
    print(sess.run(x))
writer.close() # close the writer when you’re done using it

# Example 2: The wonderful wizard of div
a = tf.constant([2, 2], name='a')
b = tf.constant([[0, 1], [2, 3]], name='b')

with tf.Session() as sess:
    print(sess.run(tf.div(b, a)))
    print(sess.run(tf.divide(b, a)))
    print(sess.run(tf.truediv(b, a)))
    print(sess.run(tf.floordiv(b, a)))
    # print(sess.run(tf.realdiv(b, a)))
    print(sess.run(tf.truncatediv(b, a)))
    print(sess.run(tf.floor_div(b, a)))

# Example 3: multiplying tensors
a = tf.constant([10, 20], name='a')
b = tf.constant([2, 3], name='b')

with tf.Session() as sess:
    print(sess.run(tf.multiply(a, b)))
    print(sess.run(tf.tensordot(a, b, 1)))

# Example 4: Python native type
t_0 = 19 
x = tf.zeros_like(t_0) 					# ==> 0
def mean_shift(X, bandwidth, max_iter):

    (m,n) = X.shape
    print m,n
    graph = tf.Graph()
    with graph.as_default():

        with tf.name_scope("input") as scope:
            data = tf.constant(X, name="data_points")
            b = tf.constant(bandwidth,dtype=tf.float32, name="bandwidth")
            m = tf.constant(max_iter, name="maximum_iteration")
            # n_samples = tf.constant(m, name="no_of_samples")
            # n_features = tf.constant(n, name="no_of_features")

        # with tf.name_scope("seeding") as scope:
        #     seed = tf.placeholder(tf.float32, [5], name="seed")

        with tf.name_scope("mean_shifting") as scope:
            old_mean = tf.placeholder(tf.float32, [n], name="old_mean")
            neighbors = tf.placeholder(tf.float32, [None,n], name="neighbors")
            new_mean = tf.reduce_mean(neighbors,0)

            euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(old_mean, new_mean), 2)), name="mean_distance")


        center_intensity_dict = {}
        nbrs = NearestNeighbors(radius=bandwidth).fit(X)

        sess = tf.Session()
        init = tf.initialize_all_variables()
        sess.run(init)
        writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph_def)

        bin_sizes = defaultdict(int)

        data_point = tf.placeholder(tf.float32, [n],"data_point")
        binned_point = tf.floordiv(data_point,b)

        for point in X:
            feed={data_point:point}
            bp = sess.run(binned_point,feed_dict=feed)
            bin_sizes[tuple(bp)] +=1

        bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if freq >= 1], dtype=np.float32)

        bin_seeds = bin_seeds*bandwidth

        print len(bin_seeds)


        j=0

        for x in bin_seeds:
            print "Seed ",j,": ",x
            i = 0
            o_mean=x

            while True:
                i_nbrs = nbrs.radius_neighbors([o_mean], bandwidth, return_distance=False)[0]
                points_within = X[i_nbrs]

                feed = {neighbors: points_within}
                n_mean = sess.run(new_mean, feed_dict=feed)

                feed = {new_mean: n_mean, old_mean: o_mean}
                dist = sess.run(euclid_dist, feed_dict=feed)

                if dist < 1e-3*bandwidth or i==max_iter:
                    center_intensity_dict[tuple(n_mean)] = len(i_nbrs)
                    break
                else:
                    o_mean = n_mean

                print "\t",i,dist,len(i_nbrs)

                i+=1

            # if j>10:
            #     break

            j+=1

        print center_intensity_dict

        sorted_by_intensity = sorted(center_intensity_dict.items(),key=lambda tup: tup[1], reverse=True)
        sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
        unique = np.ones(len(sorted_centers), dtype=np.bool)
        nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
        for i, center in enumerate(sorted_centers):
            if unique[i]:
                neighbor_idxs = nbrs.radius_neighbors([center],return_distance=False)[0]
                unique[neighbor_idxs] = 0
                unique[i] = 1  # leave the current point as unique
        cluster_centers = sorted_centers[unique]

        nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
        labels = np.zeros(154401, dtype=np.int)
        distances, idxs = nbrs.kneighbors(X)

        labels = idxs.flatten()
        return cluster_centers, labels
Example #22
0
 def test_FloorDiv(self):
     t = tf.floordiv(*self.random((3, 5), (3, 5)))
     self.check(t)
Example #23
0
def build_attention_model(params,
                          src_vocab,
                          trg_vocab,
                          source_placeholders,
                          target_placeholders,
                          beam_size=1,
                          mode=MODE.TRAIN,
                          burn_in_step=100000,
                          increment_step=10000,
                          teacher_rate=1.0,
                          max_step=100):
    """
    Build a model.

    :param params: dict.
     {encoder: {rnn_cell: {},
                ...},
      decoder: {rnn_cell: {},
                ...}}
      for example:
        {'encoder': {'rnn_cell': {'state_size': 512,
                                   'cell_name': 'BasicLSTMCell',
                                   'num_layers': 2,
                                   'input_keep_prob': 1.0,
                                   'output_keep_prob': 1.0},
                      'attention_key_size': attention_size},
        'decoder':  {'rnn_cell': {'cell_name': 'BasicLSTMCell',
                                   'state_size': 512,
                                   'num_layers': 1,
                                   'input_keep_prob': 1.0,
                                   'output_keep_prob': 1.0},
                      'trg_vocab_size': trg_vocab_size}}
    :param src_vocab: Vocab of source symbols.
    :param trg_vocab: Vocab of target symbols.
    :param source_ids: placeholder
    :param source_seq_length: placeholder
    :param target_ids: placeholder
    :param target_seq_length: placeholder
    :param beam_size: used in beam inference
    :param mode:
    :return:
    """
    if mode != MODE.TRAIN:
        params = sq.disable_dropout(params)

    tf.logging.info(json.dumps(params, indent=4))

    decoder_params = params['decoder']
    # parameters
    source_ids = source_placeholders['src']
    source_seq_length = source_placeholders['src_len']
    source_sample_matrix = source_placeholders['src_sample_matrix']
    source_word_seq_length = source_placeholders['src_word_len']

    target_ids = target_placeholders['trg']
    target_seq_length = target_placeholders['trg_len']

    # Because source encoder is different to the target feedback,
    # we construct source_embedding_table manually
    source_char_embedding_table = sq.LookUpOp(src_vocab.vocab_size,
                                              src_vocab.embedding_dim,
                                              name='source')
    source_char_embedded = source_char_embedding_table(source_ids)

    # encode char to word
    char_encoder = sq.StackRNNEncoder(params['char_encoder'],
                                      params['attention_key_size']['char'],
                                      name='char_rnn',
                                      mode=mode)

    # char_encoder_outputs: T_c B F
    char_encoded_representation = char_encoder.encode(source_char_embedded,
                                                      source_seq_length)
    char_encoder_outputs = char_encoded_representation.outputs
    #dynamical_batch_size = tf.shape(char_encoder_outputs)[1]
    #space_indices = tf.where(tf.equal(tf.transpose(source_ids), src_vocab.space_id))
    ##space_indices = tf.transpose(tf.gather_nd(tf.transpose(space_indices), [[1], [0]]))
    #space_indices = tf.concat(tf.split(space_indices, 2, axis=1)[::-1], axis=1)
    #space_indices = tf.transpose(tf.reshape(space_indices, [dynamical_batch_size, -1, 2]),
    #                             [1, 0, 2])
    ## T_w * B * F
    #source_embedded = tf.gather_nd(char_encoder_outputs, space_indices)

    # must be time major
    char_encoder_outputs = tf.transpose(char_encoder_outputs, perm=(1, 0, 2))
    sampled_word_embedded = tf.matmul(source_sample_matrix,
                                      char_encoder_outputs)
    source_embedded = tf.transpose(sampled_word_embedded, perm=(1, 0, 2))

    encoder = sq.StackBidirectionalRNNEncoder(
        params['encoder'],
        params['attention_key_size']['word'],
        name='stack_rnn',
        mode=mode)
    encoded_representation = encoder.encode(source_embedded,
                                            source_word_seq_length)
    attention_keys = encoded_representation.attention_keys
    attention_values = encoded_representation.attention_values
    attention_length = encoded_representation.attention_length
    encoder_final_states_bw = encoded_representation.final_state[-1][-1].h

    # feedback
    if mode == MODE.RL:
        tf.logging.info('BUILDING RL TRAIN FEEDBACK......')
        dynamical_batch_size = tf.shape(attention_keys)[1]
        feedback = sq.RLTrainingFeedBack(target_ids,
                                         target_seq_length,
                                         trg_vocab,
                                         dynamical_batch_size,
                                         burn_in_step=burn_in_step,
                                         increment_step=increment_step,
                                         max_step=max_step)
    elif mode == MODE.TRAIN:

        tf.logging.info('BUILDING TRAIN FEEDBACK WITH {} TEACHER_RATE'
                        '......'.format(teacher_rate))
        feedback = sq.TrainingFeedBack(target_ids,
                                       target_seq_length,
                                       trg_vocab,
                                       teacher_rate,
                                       max_step=max_step)
    elif mode == MODE.EVAL:
        tf.logging.info('BUILDING EVAL FEEDBACK ......')
        feedback = sq.TrainingFeedBack(target_ids,
                                       target_seq_length,
                                       trg_vocab,
                                       0.,
                                       max_step=max_step)
    else:
        tf.logging.info('BUILDING INFER FEEDBACK WITH BEAM_SIZE {}'
                        '......'.format(beam_size))
        infer_key_size = attention_keys.get_shape().as_list()[-1]
        infer_value_size = attention_values.get_shape().as_list()[-1]
        infer_states_bw_shape = encoder_final_states_bw.get_shape().as_list(
        )[-1]

        encoder_final_states_bw = tf.reshape(
            tf.tile(encoder_final_states_bw, [1, beam_size]),
            [-1, infer_states_bw_shape])

        # expand beam
        if TIME_MAJOR:
            # batch size should be dynamical
            dynamical_batch_size = tf.shape(attention_keys)[1]
            final_key_shape = [
                -1, dynamical_batch_size * beam_size, infer_key_size
            ]
            final_value_shape = [
                -1, dynamical_batch_size * beam_size, infer_value_size
            ]
            attention_keys = tf.reshape(
                (tf.tile(attention_keys, [1, 1, beam_size])), final_key_shape)
            attention_values = tf.reshape(
                (tf.tile(attention_values, [1, 1, beam_size])),
                final_value_shape)
        else:
            dynamical_batch_size = tf.shape(attention_keys)[0]
            final_key_shape = [
                dynamical_batch_size * beam_size, -1, infer_key_size
            ]
            final_value_shape = [
                dynamical_batch_size * beam_size, -1, infer_value_size
            ]
            attention_keys = tf.reshape(
                (tf.tile(attention_keys, [1, beam_size, 1])), final_key_shape)
            attention_values = tf.reshape(
                (tf.tile(attention_values, [1, beam_size, 1])),
                final_value_shape)

        attention_length = tf.reshape(
            tf.transpose(tf.tile([attention_length], [beam_size, 1])), [-1])

        feedback = sq.BeamFeedBack(trg_vocab,
                                   beam_size,
                                   dynamical_batch_size,
                                   max_step=max_step)

    encoder_decoder_bridge = EncoderDecoderBridge(
        encoder_final_states_bw.get_shape().as_list()[-1],
        decoder_params['rnn_cell'])
    decoder_state_size = decoder_params['rnn_cell']['state_size']
    # attention
    attention = sq.Attention(decoder_state_size, attention_keys,
                             attention_values, attention_length)
    context_size = attention.context_size

    with tf.variable_scope('logits_func'):
        attention_mix = LinearOp(context_size + feedback.embedding_dim +
                                 decoder_state_size,
                                 decoder_state_size,
                                 name='attention_mix')
        attention_mix_middle = LinearOp(decoder_state_size,
                                        decoder_state_size // 2,
                                        name='attention_mix_middle')
        logits_trans = LinearOp(decoder_state_size // 2,
                                feedback.vocab_size,
                                name='logits_trans')
        logits_func = lambda _softmax: logits_trans(
            tf.nn.relu(
                attention_mix_middle(tf.nn.relu(attention_mix(_softmax)))))

    # decoder
    decoder = sq.AttentionRNNDecoder(
        decoder_params,
        attention,
        feedback,
        logits_func=logits_func,
        init_state=encoder_decoder_bridge(encoder_final_states_bw),
        mode=mode)
    decoder_output, decoder_final_state = sq.dynamic_decode(decoder,
                                                            swap_memory=True,
                                                            scope='decoder')

    # not training
    if mode == MODE.EVAL or mode == MODE.INFER:
        return decoder_output, decoder_final_state

    # bos is added in feedback
    # so target_ids is predict_ids
    if not TIME_MAJOR:
        ground_truth_ids = tf.transpose(target_ids, [1, 0])
    else:
        ground_truth_ids = target_ids

    # construct the loss
    if mode == MODE.RL:
        # Creates a variable to hold the global_step.
        global_step_tensor = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                               scope='global_step')[0]
        rl_time_steps = tf.floordiv(
            tf.maximum(global_step_tensor - burn_in_step, 0), increment_step)
        start_rl_step = target_seq_length - rl_time_steps

        baseline_states = tf.stop_gradient(decoder_output.baseline_states)
        predict_ids = tf.stop_gradient(decoder_output.predicted_ids)

        # TODO: bug in tensorflow
        ground_or_predict_ids = tf.cond(tf.greater(rl_time_steps,
                                                   0), lambda: predict_ids,
                                        lambda: ground_truth_ids)

        reward, sequence_length = tf.py_func(
            func=_py_func,
            inp=[ground_or_predict_ids, ground_truth_ids, trg_vocab.eos_id],
            Tout=[tf.float32, tf.int32],
            name='reward')
        sequence_length.set_shape((None, ))

        total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted \
            = rl_sequence_loss(
            logits=decoder_output.logits,
            predict_ids=predict_ids,
            sequence_length=sequence_length,
            baseline_states=baseline_states,
            start_rl_step=start_rl_step,
            reward=reward)
        return decoder_output, total_loss_avg, entropy_loss_avg, \
               reward_loss_rmse, reward_predicted
    else:
        total_loss_avg = cross_entropy_sequence_loss(
            logits=decoder_output.logits,
            targets=ground_truth_ids,
            sequence_length=target_seq_length)
        return decoder_output, total_loss_avg, total_loss_avg, \
               tf.to_float(0.), tf.to_float(0.)
Example #24
0
def generate_detections_per_image_op(
    cls_outputs, box_outputs, anchor_boxes, image_id, image_info,
    num_detections=100, pre_nms_num_detections=1000, nms_threshold=0.3,
    bbox_reg_weights=(10., 10., 5., 5.)):
  """Generates detections with model outputs and anchors.

  Args:
    cls_outputs: a Tensor with shape [N, num_classes], which stacks class
      logit outputs on all feature levels. The N is the number of total anchors
      on all levels. The num_classes is the number of classes predicted by the
      model. Note that the cls_outputs should be the output of softmax().
    box_outputs: a Tensor with shape [N, num_classes*4], which stacks
      box regression outputs on all feature levels. The N is the number of total
      anchors on all levels.
    anchor_boxes: a Tensor with shape [N, 4], which stacks anchors on all
      feature levels. The N is the number of total anchors on all levels.
    image_id: an integer number to specify the image id.
    image_info: a tensor of shape [5] which encodes the input image's [height,
      width, scale, original_height, original_width]
    num_detections: Number of detections after NMS.
    pre_nms_num_detections: Number of candidates before NMS.
    nms_threshold: a float number to specify the threshold of NMS.
    bbox_reg_weights: a list of 4 float scalars, which are default weights on
      (dx, dy, dw, dh) for normalizing bbox regression targets.
  Returns:
    detections: detection results in a tensor with each row representing
      [image_id, ymin, xmin, ymax, xmax, score, class]
  """
  num_boxes, num_classes = cls_outputs.get_shape().as_list()

  # Removes background class scores.
  cls_outputs = cls_outputs[:, 1:num_classes]
  top_k_scores, top_k_indices_with_classes = tf.nn.top_k(
      tf.reshape(cls_outputs, [-1]),
      k=pre_nms_num_detections,
      sorted=True)
  classes = tf.mod(top_k_indices_with_classes, num_classes - 1)
  top_k_indices = tf.floordiv(top_k_indices_with_classes, num_classes - 1)

  anchor_boxes = tf.gather(anchor_boxes, top_k_indices)
  box_outputs = tf.reshape(
      box_outputs, [num_boxes, num_classes, 4])[:, 1:num_classes, :]
  box_outputs = tf.gather_nd(box_outputs,
                             tf.stack([top_k_indices, classes], axis=1))

  # Applies bounding box regression to anchors.
  boxes = box_utils.batch_decode_box_outputs_op(
      tf.expand_dims(anchor_boxes, axis=0),
      tf.expand_dims(box_outputs, axis=0),
      bbox_reg_weights)[0]
  boxes = box_utils.clip_boxes(
      tf.expand_dims(boxes, axis=0), tf.expand_dims(image_info[:2], axis=0))[0]

  classes = tf.tile(tf.reshape(classes, [1, pre_nms_num_detections]),
                    [num_classes - 1, 1])
  scores = tf.tile(tf.reshape(top_k_scores, [1, pre_nms_num_detections]),
                   [num_classes - 1, 1])
  boxes = tf.tile(tf.reshape(boxes, [1, pre_nms_num_detections, 4]),
                  [num_classes - 1, 1, 1])

  class_bitmask = tf.tile(
      tf.reshape(tf.range(num_classes-1), [num_classes - 1, 1]),
      [1, pre_nms_num_detections])
  scores = tf.where(tf.equal(classes, class_bitmask), scores,
                    tf.zeros_like(scores))
  scores = tf.where(tf.greater(scores, 0.05), scores, tf.zeros_like(scores))
  # Reshape classes to be compartible with the top_k function.
  classes = tf.reshape(classes, [num_classes -1, pre_nms_num_detections, 1])
  scores, sorted_tensors = box_utils.top_k(
      scores, k=pre_nms_num_detections, tensors=[boxes, classes])
  boxes = sorted_tensors[0]
  classes = tf.reshape(sorted_tensors[1],
                       [num_classes - 1, pre_nms_num_detections])

  (post_nms_scores,
   post_nms_boxes, idx) = non_max_suppression.non_max_suppression_padded(
       scores, boxes, max_output_size=num_detections,
       iou_threshold=nms_threshold, level=0)

  # Sorts all results.
  sorted_scores, sorted_indices = tf.nn.top_k(
      tf.to_float(tf.reshape(post_nms_scores, [-1])),
      k=num_detections,
      sorted=True)
  post_nms_boxes = tf.gather(tf.reshape(post_nms_boxes, [-1, 4]),
                             sorted_indices)
  classes = tf.batch_gather(classes, idx)
  post_nms_classes = tf.gather(tf.reshape(classes, [-1]), sorted_indices) + 1

  if isinstance(image_id, int):
    image_id = tf.constant(image_id)
  image_id = tf.reshape(image_id, [])
  detections_result = tf.stack(
      [
          tf.to_float(tf.fill(tf.shape(sorted_scores), image_id)),
          post_nms_boxes[:, 0],
          post_nms_boxes[:, 1],
          post_nms_boxes[:, 2],
          post_nms_boxes[:, 3],
          sorted_scores,
          tf.to_float(post_nms_classes),
      ],
      axis=1)
  return detections_result
def generate_detections_per_image_tpu(cls_outputs,
                                      box_outputs,
                                      anchor_boxes,
                                      image_id,
                                      image_info,
                                      pre_nms_num_detections=1000,
                                      post_nms_num_detections=100,
                                      nms_threshold=0.3,
                                      bbox_reg_weights=(10., 10., 5., 5.)):
    """Generate the final detections per image given the model outputs.

  Args:
    cls_outputs: a tensor with shape [N, num_classes], which stacks class
      logit outputs on all feature levels. The N is the number of total anchors
      on all levels. The num_classes is the number of classes predicted by the
      model. Note that the cls_outputs should be the output of softmax().
    box_outputs: a tensor with shape [N, num_classes*4], which stacks box
      regression outputs on all feature levels. The N is the number of total
      anchors on all levels.
    anchor_boxes: a tensor with shape [N, 4], which stacks anchors on all
      feature levels. The N is the number of total anchors on all levels.
    image_id: an integer number to specify the image id.
    image_info: a tensor of shape [5] which encodes the input image's [height,
      width, scale, original_height, original_width]
    pre_nms_num_detections: an integer that specifies the number of candidates
      before NMS.
    post_nms_num_detections: an integer that specifies the number of candidates
      after NMS.
    nms_threshold: a float number to specify the IOU threshold of NMS.
    bbox_reg_weights: a list of 4 float scalars, which are default weights on
      (dx, dy, dw, dh) for normalizing bbox regression targets.

  Returns:
    detections: detection results in a tensor with each row representing
      [image_id, ymin, xmin, ymax, xmax, score, class]
  """
    num_boxes, num_classes = cls_outputs.get_shape().as_list()

    # Remove background class scores.
    cls_outputs = cls_outputs[:, 1:num_classes]
    top_k_scores, top_k_indices_with_classes = tf.nn.top_k(
        tf.reshape(cls_outputs, [-1]), k=pre_nms_num_detections, sorted=False)
    classes = tf.mod(top_k_indices_with_classes, num_classes - 1)
    top_k_indices = tf.floordiv(top_k_indices_with_classes, num_classes - 1)

    anchor_boxes = tf.gather(anchor_boxes, top_k_indices)
    box_outputs = tf.reshape(box_outputs,
                             [num_boxes, num_classes, 4])[:, 1:num_classes, :]
    class_indices = classes
    box_outputs = tf.gather_nd(
        box_outputs, tf.stack([top_k_indices, class_indices], axis=1))

    # apply bounding box regression to anchors
    boxes = box_utils.decode_boxes(box_outputs, anchor_boxes, bbox_reg_weights)
    boxes = box_utils.clip_boxes(boxes, image_info[0], image_info[1])

    list_of_all_boxes = []
    list_of_all_scores = []
    list_of_all_classes = []
    # Skip background class.
    for class_i in range(num_classes):
        # Compute bitmask for the given classes.
        class_i_bitmask = tf.cast(tf.equal(classes, class_i),
                                  top_k_scores.dtype)
        # This works because score is in [0, 1].
        class_i_scores = top_k_scores * class_i_bitmask
        # The TPU and CPU have different behaviors for
        # tf.image.non_max_suppression_padded (b/116754376).
        (class_i_post_nms_indices,
         class_i_nms_num_valid) = tf.image.non_max_suppression_padded(
             tf.to_float(boxes),
             tf.to_float(class_i_scores),
             post_nms_num_detections,
             iou_threshold=nms_threshold,
             score_threshold=0.05,
             pad_to_max_output_size=True,
             name='nms_detections_' + str(class_i))
        class_i_post_nms_boxes = tf.gather(boxes, class_i_post_nms_indices)
        class_i_post_nms_scores = tf.gather(class_i_scores,
                                            class_i_post_nms_indices)
        mask = tf.less(tf.range(post_nms_num_detections),
                       [class_i_nms_num_valid])
        class_i_post_nms_scores = tf.where(
            mask, class_i_post_nms_scores,
            tf.zeros_like(class_i_post_nms_scores))
        class_i_classes = tf.fill(tf.shape(class_i_post_nms_scores),
                                  class_i + 1)
        list_of_all_boxes.append(class_i_post_nms_boxes)
        list_of_all_scores.append(class_i_post_nms_scores)
        list_of_all_classes.append(class_i_classes)

    post_nms_boxes = tf.concat(list_of_all_boxes, axis=0)
    post_nms_scores = tf.concat(list_of_all_scores, axis=0)
    post_nms_classes = tf.concat(list_of_all_classes, axis=0)

    # sort all results.
    post_nms_scores, sorted_indices = tf.nn.top_k(tf.to_float(post_nms_scores),
                                                  k=post_nms_num_detections,
                                                  sorted=True)

    post_nms_boxes = tf.gather(post_nms_boxes, sorted_indices)
    post_nms_classes = tf.gather(post_nms_classes, sorted_indices)

    if isinstance(image_id, int):
        image_id = tf.constant(image_id)
    image_id = tf.reshape(image_id, [])
    detections_result = tf.stack([
        tf.to_float(tf.fill(tf.shape(post_nms_scores), image_id)),
        post_nms_boxes[:, 0],
        post_nms_boxes[:, 1],
        post_nms_boxes[:, 2],
        post_nms_boxes[:, 3],
        post_nms_scores,
        tf.to_float(post_nms_classes),
    ],
                                 axis=1)
    return detections_result
Example #26
0
import tensorflow as tf

a = tf.constant(0, dtype=tf.float32, name="a")
b = tf.constant(1, dtype=tf.float32, name="b")
c = tf.constant(2, dtype=tf.float32, name="c")

with tf.control_dependencies([a, c]):
    d = tf.add(a, b, name="d")
    e = tf.subtract(b, c, name="e")
    f = tf.multiply(d, e, name="f")
    with tf.control_dependencies([d, f]):
        g = tf.floordiv(b, d, name="g")

init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)

    writer = tf.summary.FileWriter("logs", sess.graph)
    print(g.eval())
    writer.close()
    print()

import tensorflow as tf

x = tf.placeholder(tf.float32, shape=[None, 16, 16, 3], name='x')
y = tf.placeholder(tf.float32, shape=[None, 16, 16, 3], name='y')
xl = tf.placeholder(tf.bool, shape=[None, 16, 16, 3], name='xl')
yl = tf.placeholder(tf.bool, shape=[None, 16, 16, 3], name='yl')
c = tf.placeholder(tf.float32, shape=[1], name='c')
b = tf.placeholder(tf.bool, shape=[1], name='b')

g = tf.greater(x, y, name='g')
ge = tf.greater_equal(x, y, name='ge')
l = tf.less(x, y, name='l')
le = tf.less_equal(x, y, name='l3')
ne = tf.not_equal(x, y, name='ne')
p = tf.pow(x, y)
fd = tf.floordiv(x, y, name='fd')
lo = tf.logical_or(xl, yl, name='lo')
s = tf.where(b, x, y, name='s')
m = tf.minimum(x, y, name='m')

sess = tf.Session()
sess.run(tf.global_variables_initializer())

c_g = tf.contrib.lite.TFLiteConverter.from_session(sess, [x, y], [g])
c_g_lm = c_g.convert()
open('greater.tflite', 'wb').write(c_g_lm)

c_ge = tf.contrib.lite.TFLiteConverter.from_session(sess, [x, y], [ge])
c_ge_lm = c_ge.convert()
open('greater_equal.tflite', 'wb').write(c_ge_lm)
Example #28
0
def hier_homography_fmask_estimator(color_inputs,
                                    num_param=8,
                                    num_layer=7,
                                    num_level=3,
                                    dropout_keep_prob=0.8,
                                    reuse=None,
                                    is_training=True,
                                    trainable=True,
                                    scope='hier_hmg'):
    """A hierarchical neural network with mask for homograhy estimation.

  Args:
    color_inputs: batch of input image pairs of data type float32 and of shape
      [batch_size, height, width, 6]
    num_param: the number of parameters for homography (default 8)
    num_layer: the number of convolutional layers in the motion feature network
    num_level: the number of hierarchical levels
    dropout_keep_prob: the percentage of activation values that are kept
    reuse: whether to reuse this network weights
    is_training: whether used for training or testing
    trainable: whether this network is to be trained or not
    scope: the scope of variables in this function

  Returns:
    a list of homographies at each level and motion feature maps if
    final_endpoint='mfeature'; otherwise a list of images warped by the list of
    corresponding homographies
  """
    _, h_input, w_input = color_inputs.get_shape().as_list()[0:3]
    vgg_inputs = (color_inputs[Ellipsis, 3:6] * 256 + 128) - VGG_MEANS

    with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding='SAME'):
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            trainable=False):
            with slim.arg_scope([slim.conv2d], normalizer_fn=None):
                with slim.arg_scope(vgg.vgg_arg_scope()):
                    sfeature, _ = vgg.vgg_16(vgg_inputs,
                                             1000,
                                             predictions_fn=slim.softmax,
                                             global_pool=False,
                                             is_training=False,
                                             reuse=reuse,
                                             spatial_squeeze=True,
                                             final_endpoint='pool5',
                                             scope='vgg_16')

    gray_image1 = tf.image.rgb_to_grayscale(color_inputs[Ellipsis, 0:3])
    gray_image2 = tf.image.rgb_to_grayscale(color_inputs[Ellipsis, 3:6])
    inputs = tf.concat([gray_image1, gray_image2], 3)

    hmgs_list = []
    warped_list = []
    with tf.variable_scope(scope, [inputs], reuse=reuse):
        for level_index in range(num_level):
            scale = 2**(num_level - 1 - level_index)
            h = tf.to_float(tf.floordiv(h_input, scale))
            w = tf.to_float(tf.floordiv(w_input, scale))
            inputs_il = tf.image.resize_images(inputs, tf.to_int32([h, w]))
            if level_index == 0:
                mfeature = hier_base_layers(inputs_il,
                                            num_layer + 1 - num_level +
                                            level_index,
                                            level_index,
                                            is_training=is_training,
                                            trainable=trainable)
                hmgs_il = homography_regression(
                    mfeature,
                    num_param,
                    level_index,
                    dropout_keep_prob=dropout_keep_prob,
                    is_training=is_training,
                    trainable=trainable)
                hmgs_list.append(hmgs_il)
            else:
                warped, _ = hmg_util.homography_scale_warp_per_batch(
                    inputs_il[:, :, :, 0], w / 2, h / 2,
                    hmgs_list[level_index - 1])
                pre_warped_inputs_il = tf.stack(
                    [warped, inputs_il[:, :, :, 1]], -1)
                warped_list.append(pre_warped_inputs_il)
                mfeature = hier_base_layers(pre_warped_inputs_il,
                                            num_layer + 1 - num_level +
                                            level_index,
                                            level_index,
                                            is_training=is_training,
                                            trainable=trainable)
                if level_index == num_level - 1:
                    mfeature = fmask_layers_semantic(mfeature,
                                                     sfeature,
                                                     level_index,
                                                     is_training=is_training,
                                                     trainable=trainable)
                hmgs_il = homography_regression(
                    mfeature,
                    num_param,
                    level_index,
                    dropout_keep_prob=dropout_keep_prob,
                    is_training=is_training,
                    trainable=trainable)
                new_hmgs_il = hmg_util.homography_shift_mult_batch(
                    hmgs_list[level_index - 1], w / 2, h / 2, hmgs_il, w, h, w,
                    h)
                hmgs_list.append(new_hmgs_il)
    return hmgs_list, warped_list
Example #29
0
        def beam_step(beam_seqs, beam_probs, cand_seqs, cand_probs, states,
                      time):
            '''
            beam_seqs : [beam_size, time]
            beam_probs: [beam_size, ]
            cand_seqs : [beam_size, time]
            cand_probs: [beam_size, ]
            states : [beam_size * hidden_size, beam_size * hidden_size]
            '''
            inputs = tf.reshape(tf.slice(beam_seqs, [0, time], [beam_size, 1]),
                                [beam_size])
            # print inputs.get_shape().as_list()
            x_t = tf.nn.embedding_lookup(self.embedding, inputs)
            # print(x_t.get_shape().as_list())
            o_t, s_nt = self.dec_lstm(x_t, states)
            o_t, w_t = self.att_layer(o_t)
            o_t = self.dec_out(o_t)
            logprobs2d = tf.nn.log_softmax(o_t)
            print logprobs2d.get_shape().as_list()
            total_probs = logprobs2d + tf.reshape(beam_probs, [-1, 1])
            print total_probs.get_shape().as_list()
            total_probs_noEOS = tf.concat([
                tf.slice(total_probs, [0, 0], [beam_size, self.stop_token]),
                tf.tile([[-3e38]], [beam_size, 1]),
                tf.slice(total_probs, [0, self.stop_token + 1],
                         [beam_size, self.target_vocab - self.stop_token - 1])
            ], 1)
            print total_probs_noEOS.get_shape().as_list()
            flat_total_probs = tf.reshape(total_probs_noEOS, [-1])
            print flat_total_probs.get_shape().as_list()

            beam_k = tf.minimum(tf.size(flat_total_probs), beam_size)
            next_beam_probs, top_indices = tf.nn.top_k(flat_total_probs,
                                                       k=beam_k)
            print next_beam_probs.get_shape().as_list()

            next_bases = tf.floordiv(top_indices, self.target_vocab)
            next_mods = tf.mod(top_indices, self.target_vocab)
            print next_mods.get_shape().as_list()

            next_beam_seqs = tf.concat([
                tf.gather(beam_seqs, next_bases),
                tf.reshape(next_mods, [-1, 1])
            ], 1)
            next_states = (tf.gather(s_nt[0], next_bases),
                           tf.gather(s_nt[1], next_bases))
            print next_beam_seqs.get_shape().as_list()

            cand_seqs_pad = tf.pad(cand_seqs, [[0, 0], [0, 1]])
            beam_seqs_EOS = tf.pad(beam_seqs, [[0, 0], [0, 1]])
            new_cand_seqs = tf.concat([cand_seqs_pad, beam_seqs_EOS], 0)
            print new_cand_seqs.get_shape().as_list()

            EOS_probs = tf.slice(total_probs, [0, self.stop_token],
                                 [beam_size, 1])
            new_cand_probs = tf.concat(
                [cand_probs, tf.reshape(EOS_probs, [-1])], 0)
            cand_k = tf.minimum(tf.size(new_cand_probs), self.beam_size)
            next_cand_probs, next_cand_indices = tf.nn.top_k(new_cand_probs,
                                                             k=cand_k)
            next_cand_seqs = tf.gather(new_cand_seqs, next_cand_indices)

            return next_beam_seqs, next_beam_probs, next_cand_seqs, next_cand_probs, next_states, time + 1
Example #30
0
    def __call__(self, inputs, state, scope=None):
        (
            past_cand_symbols, # [batch_size, max_len]
            past_cand_logprobs,# [batch_size]
            past_beam_symbols, # [batch_size*self.beam_size, max_len], right-aligned!!!
            past_beam_logprobs,# [batch_size*self.beam_size]
            past_cell_state,
                ) = state

        batch_size = tf.shape(past_cand_logprobs)[0] # TODO: get as int, if possible

        full_size = batch_size * self.beam_size

        cell_inputs = inputs
        cell_outputs, raw_cell_state = self.cell(cell_inputs, past_cell_state)

        logprobs = tf.nn.log_softmax(cell_outputs)

        logprobs_batched = tf.reshape(logprobs + tf.expand_dims(past_beam_logprobs, 1),
                                      [-1, self.beam_size * self.num_classes])
        logprobs_batched.set_shape((None, self.beam_size * self.num_classes))

        # prints and asserts
        tf.assert_less_equal(logprobs, 0.0)
        tf.assert_less_equal(past_beam_logprobs, 0.0)

        masked_logprobs = tf.reshape(logprobs_batched, [-1, self.beam_size * self.num_classes])
        # print masked_logprobs.get_shape()

        beam_logprobs, indices = tf.nn.top_k(
            masked_logprobs,
            self.beam_size
        )

        beam_logprobs = tf.reshape(beam_logprobs, [-1])

        # For continuing to the next symbols
        symbols = indices % self.num_classes # [batch_size, self.beam_size]
        parent_refs = tf.reshape(indices // self.num_classes, [-1]) # [batch_size*self.beam_size]

        # TODO: this technically doesn't need to be recalculated every loop
        parent_refs_offsets = tf.mul(tf.floordiv(tf.range(full_size), self.beam_size), self.beam_size)
        parent_refs = parent_refs + parent_refs_offsets

        if past_beam_symbols is not None:
            symbols_history = tf.gather(past_beam_symbols, parent_refs)
            beam_symbols = tf.concat(1, [tf.reshape(symbols, [-1, 1]), symbols_history])
        else:
            beam_symbols = tf.reshape(symbols, [-1, 1])

        # Above ends up outputting reversed. Below doesn't work though because tf doesn't support negative indexing.
        # last = past_beam_symbols.get_shape()[1]
        # symbols_history = tf.gather(past_beam_symbols[:,last - 1], parent_refs)
        # beam_symbols = tf.concat(1, [past_beam_symbols[:,:last-1], tf.reshape(symbols_history, [-1, 1]), tf.reshape(symbols, [-1, 1]), ])

        # Handle the output and the cell state shuffling
        outputs = tf.reshape(symbols, [-1]) # [batch_size*beam_size, 1]
        cell_state = nest_map(
            lambda element: tf.gather(element, parent_refs),
            raw_cell_state
        )

        # Handling for getting a done token
        # logprobs_done = tf.reshape(logprobs_batched, [-1, self.beam_size, self.num_classes])[:,:,self.stop_token]
        # done_parent_refs = tf.to_int32(tf.argmax(logprobs_done, 1))
        # done_parent_refs_offsets = tf.range(batch_size) * self.beam_size
        # done_symbols = tf.gather(past_beam_symbols, done_parent_refs + done_parent_refs_offsets)

        # logprobs_done_max = tf.reduce_max(logprobs_done, 1)
        # cand_symbols = tf.select(logprobs_done_max > past_cand_logprobs,
        #                         done_symbols,
        #                         past_cand_symbols)
        # cand_logprobs = tf.maximum(logprobs_done_max, past_cand_logprobs)
        cand_symbols = past_cand_symbols # current last symbol in the beam [batch_size*self.beam_size]
        cand_logprobs = past_cand_logprobs

        return outputs, (
            cand_symbols,
            cand_logprobs,
            beam_symbols,
            beam_logprobs,
            cell_state,
        )
Example #31
0
        def beam_init():
            # return beam_seqs_1 beam_probs_1 cand_seqs_1 cand_prob_1 next_states time
            time_1 = tf.constant(1, dtype=tf.int32)
            beam_seqs_0 = tf.constant([[self.start_token]] * beam_size)
            beam_probs_0 = tf.constant([0.] * beam_size)

            cand_seqs_0 = tf.constant([[self.start_token]])
            cand_probs_0 = tf.constant([-3e38])

            beam_seqs_0._shape = tf.TensorShape((None, None))
            beam_probs_0._shape = tf.TensorShape((None, ))
            cand_seqs_0._shape = tf.TensorShape((None, None))
            cand_probs_0._shape = tf.TensorShape((None, ))

            inputs = [self.start_token]
            x_t = tf.nn.embedding_lookup(self.embedding, inputs)
            print(x_t.get_shape().as_list())
            o_t, s_nt = self.dec_lstm(x_t, initial_state)
            o_t, w_t = self.att_layer(o_t)
            o_t = self.dec_out(o_t)
            print(s_nt[0].get_shape().as_list())
            # initial_state = tf.reshape(initial_state, [1,-1])
            logprobs2d = tf.nn.log_softmax(o_t)
            total_probs = logprobs2d + tf.reshape(beam_probs_0, [-1, 1])
            total_probs_noEOS = tf.concat([
                tf.slice(total_probs, [0, 0], [1, self.stop_token]),
                tf.tile([[-3e38]], [1, 1]),
                tf.slice(total_probs, [0, self.stop_token + 1],
                         [1, self.target_vocab - self.stop_token - 1])
            ], 1)
            flat_total_probs = tf.reshape(total_probs_noEOS, [-1])
            print flat_total_probs.get_shape().as_list()

            beam_k = tf.minimum(tf.size(flat_total_probs), beam_size)
            next_beam_probs, top_indices = tf.nn.top_k(flat_total_probs,
                                                       k=beam_k)

            next_bases = tf.floordiv(top_indices, self.target_vocab)
            next_mods = tf.mod(top_indices, self.target_vocab)

            next_beam_seqs = tf.concat([
                tf.gather(beam_seqs_0, next_bases),
                tf.reshape(next_mods, [-1, 1])
            ], 1)

            cand_seqs_pad = tf.pad(cand_seqs_0, [[0, 0], [0, 1]])
            beam_seqs_EOS = tf.pad(beam_seqs_0, [[0, 0], [0, 1]])
            new_cand_seqs = tf.concat([cand_seqs_pad, beam_seqs_EOS], 0)
            print new_cand_seqs.get_shape().as_list()

            EOS_probs = tf.slice(total_probs, [0, self.stop_token],
                                 [beam_size, 1])
            new_cand_probs = tf.concat(
                [cand_probs_0, tf.reshape(EOS_probs, [-1])], 0)
            cand_k = tf.minimum(tf.size(new_cand_probs), self.beam_size)
            next_cand_probs, next_cand_indices = tf.nn.top_k(new_cand_probs,
                                                             k=cand_k)
            next_cand_seqs = tf.gather(new_cand_seqs, next_cand_indices)

            part_state_0 = tf.reshape(tf.stack([s_nt[0]] * beam_size),
                                      [beam_size, self.hidden_size])
            part_state_1 = tf.reshape(tf.stack([s_nt[1]] * beam_size),
                                      [beam_size, self.hidden_size])
            part_state_0._shape = tf.TensorShape((None, None))
            part_state_1._shape = tf.TensorShape((None, None))
            next_states = (part_state_0, part_state_1)
            print next_states[0].get_shape().as_list()
            return next_beam_seqs, next_beam_probs, next_cand_seqs, next_cand_probs, next_states, time_1
Example #32
0
def build_attention_model(params,
                          src_vocab,
                          trg_vocab,
                          source_ids,
                          source_seq_length,
                          target_ids,
                          target_seq_length,
                          beam_size=1,
                          mode=MODE.TRAIN,
                          burn_in_step=100000,
                          increment_step=10000,
                          teacher_rate=1.0,
                          max_step=100):
    """
    Build a model.

    :param params: dict.
     {encoder: {rnn_cell: {},
                ...},
      decoder: {rnn_cell: {},
                ...}}
      for example:
        {'encoder': {'rnn_cell': {'state_size': 512,
                                   'cell_name': 'BasicLSTMCell',
                                   'num_layers': 2,
                                   'input_keep_prob': 1.0,
                                   'output_keep_prob': 1.0},
                      'attention_key_size': attention_size},
        'decoder':  {'rnn_cell': {'cell_name': 'BasicLSTMCell',
                                   'state_size': 512,
                                   'num_layers': 1,
                                   'input_keep_prob': 1.0,
                                   'output_keep_prob': 1.0},
                      'trg_vocab_size': trg_vocab_size}}
    :param src_vocab: Vocab of source symbols.
    :param trg_vocab: Vocab of target symbols.
    :param source_ids: placeholder
    :param source_seq_length: placeholder
    :param target_ids: placeholder
    :param target_seq_length: placeholder
    :param beam_size: used in beam inference
    :param mode:
    :return:
    """
    if mode != MODE.TRAIN:
        params = sq.disable_dropout(params)

    tf.logging.info(json.dumps(params, indent=4))

    # parameters
    encoder_params = params['encoder']
    decoder_params = params['decoder']

    # Because source encoder is different to the target feedback,
    # we construct source_embedding_table manually
    source_embedding_table = sq.LookUpOp(src_vocab.vocab_size,
                                         src_vocab.embedding_dim,
                                         name='source')
    source_embedded = source_embedding_table(source_ids)

    encoder = sq.StackBidirectionalRNNEncoder(encoder_params,
                                              name='stack_rnn',
                                              mode=mode)
    encoded_representation = encoder.encode(source_embedded, source_seq_length)
    attention_keys = encoded_representation.attention_keys
    attention_values = encoded_representation.attention_values
    attention_length = encoded_representation.attention_length

    # feedback
    if mode == MODE.RL:
        tf.logging.info('BUILDING RL TRAIN FEEDBACK......')
        dynamical_batch_size = tf.shape(attention_keys)[1]
        feedback = sq.RLTrainingFeedBack(target_ids,
                                         target_seq_length,
                                         trg_vocab,
                                         dynamical_batch_size,
                                         burn_in_step=burn_in_step,
                                         increment_step=increment_step,
                                         max_step=max_step)
    elif mode == MODE.TRAIN:
        tf.logging.info('BUILDING TRAIN FEEDBACK WITH {} TEACHER_RATE'
                        '......'.format(teacher_rate))
        feedback = sq.TrainingFeedBack(target_ids,
                                       target_seq_length,
                                       trg_vocab,
                                       teacher_rate,
                                       max_step=max_step)
    elif mode == MODE.EVAL:
        tf.logging.info('BUILDING EVAL FEEDBACK ......')
        feedback = sq.TrainingFeedBack(target_ids,
                                       target_seq_length,
                                       trg_vocab,
                                       0.,
                                       max_step=max_step)
    else:
        tf.logging.info('BUILDING INFER FEEDBACK WITH BEAM_SIZE {}'
                        '......'.format(beam_size))
        infer_key_size = attention_keys.get_shape().as_list()[-1]
        infer_value_size = attention_values.get_shape().as_list()[-1]

        # expand beam
        if TIME_MAJOR:
            # batch size should be dynamical
            dynamical_batch_size = tf.shape(attention_keys)[1]
            final_key_shape = [
                -1, dynamical_batch_size * beam_size, infer_key_size
            ]
            final_value_shape = [
                -1, dynamical_batch_size * beam_size, infer_value_size
            ]
            attention_keys = tf.reshape(
                (tf.tile(attention_keys, [1, 1, beam_size])), final_key_shape)
            attention_values = tf.reshape(
                (tf.tile(attention_values, [1, 1, beam_size])),
                final_value_shape)
        else:
            dynamical_batch_size = tf.shape(attention_keys)[0]
            final_key_shape = [
                dynamical_batch_size * beam_size, -1, infer_key_size
            ]
            final_value_shape = [
                dynamical_batch_size * beam_size, -1, infer_value_size
            ]
            attention_keys = tf.reshape(
                (tf.tile(attention_keys, [1, beam_size, 1])), final_key_shape)
            attention_values = tf.reshape(
                (tf.tile(attention_values, [1, beam_size, 1])),
                final_value_shape)

        attention_length = tf.reshape(
            tf.transpose(tf.tile([attention_length], [beam_size, 1])), [-1])

        feedback = sq.BeamFeedBack(trg_vocab,
                                   beam_size,
                                   dynamical_batch_size,
                                   max_step=max_step)

    # attention
    attention = sq.Attention(decoder_params['rnn_cell']['state_size'],
                             attention_keys, attention_values,
                             attention_length)

    # decoder
    decoder = sq.AttentionRNNDecoder(decoder_params,
                                     attention,
                                     feedback,
                                     mode=mode)
    decoder_output, decoder_final_state = sq.dynamic_decode(decoder,
                                                            swap_memory=True,
                                                            scope='decoder')

    # not training
    if mode == MODE.EVAL or mode == MODE.INFER:
        return decoder_output, decoder_final_state

    # bos is added in feedback
    # so target_ids is predict_ids
    if not TIME_MAJOR:
        ground_truth_ids = tf.transpose(target_ids, [1, 0])
    else:
        ground_truth_ids = target_ids

    # construct the loss
    if mode == MODE.RL:
        # Creates a variable to hold the global_step.
        global_step_tensor = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                               scope='global_step')[0]
        rl_time_steps = tf.floordiv(
            tf.maximum(global_step_tensor - burn_in_step, 0), increment_step)
        start_rl_step = target_seq_length - rl_time_steps

        baseline_states = tf.stop_gradient(decoder_output.baseline_states)
        predict_ids = tf.stop_gradient(decoder_output.predicted_ids)

        # TODO: bug in tensorflow
        ground_or_predict_ids = tf.cond(tf.greater(rl_time_steps,
                                                   0), lambda: predict_ids,
                                        lambda: ground_truth_ids)

        reward, sequence_length = tf.py_func(
            func=_py_func,
            inp=[ground_or_predict_ids, ground_truth_ids, trg_vocab.eos_id],
            Tout=[tf.float32, tf.int32],
            name='reward')
        sequence_length.set_shape((None, ))

        total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted \
            = rl_sequence_loss(
            logits=decoder_output.logits,
            predict_ids=predict_ids,
            sequence_length=sequence_length,
            baseline_states=baseline_states,
            start_rl_step=start_rl_step,
            reward=reward)
        return decoder_output, total_loss_avg, entropy_loss_avg, \
               reward_loss_rmse, reward_predicted
    else:
        total_loss_avg = cross_entropy_sequence_loss(
            logits=decoder_output.logits,
            targets=ground_truth_ids,
            sequence_length=target_seq_length)
        return decoder_output, total_loss_avg, total_loss_avg, \
               tf.to_float(0.), tf.to_float(0.)
Example #33
0
File: Wall_NN.py Project: tebesk/NN
#Readout Layer
with tf.name_scope('fc2') as scope:
	W_fc2 = weight_variable([1024, NUM_CLASSES])
	b_fc2 = bias_variable([NUM_CLASSES])
	
with tf.name_scope('RELU') as scope:
	y_conv =tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2)+b_fc2)

#Train and Evaluate the Model############################################################

# 評価系の関数を用意
cross_entropy = -tf.reduce_sum(tf.square(y_ - y_conv))#-tf.reduce_sum(y_*tf.log(y_conv))←うまく行かず
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

divide = tf.constant([0.7])
correct_prediction = tf.equal(tf.floordiv(y_conv, divide), y_ )#01に修正divideでわって切り捨て
accuracy = tf.reduce_sum(tf.cast(correct_prediction, tf.float32), name = "accuracy")
sess.run(tf.initialize_all_variables())

#tensorboard 書き出し用
tf.scalar_summary("accuracy" , accuracy)

print "befor merge all"
# 全ての要約をマージしてそれらを /tmp/mnist_logs に書き出します。
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("/tmp/logs",sess.graph)




print "NetWorkMaking Fin"
Example #34
0
    def _model_fn(self, model_inputs, model_targets, mode):
        inputs = {}
        if self._hparams.context_actions:
            inputs['context_frames'] = model_inputs['images'][:, :self._hparams
                                                              .context_actions]
        inputs['start_images'] = model_inputs['images'][:, self._hparams.
                                                        context_actions]
        inputs['goal_images'] = model_inputs['images'][:, -1]

        n_xy = (len(self._hparams.pivots[0]) +
                1) * (len(self._hparams.pivots[1]) + 1)
        n_z = len(self._hparams.pivots[2]) + 1
        n_theta = len(self._hparams.pivots[3]) + 1

        if mode == tf.estimator.ModeKeys.TRAIN:
            one_hot_actions = _binarize(model_targets['actions'],
                                        self._hparams.pivots)
            if self._hparams.context_actions:
                inputs['context_actions'] = tf.concat([
                    x[:, :self._hparams.context_actions]
                    for x in one_hot_actions
                ], -1)
            real_pred_actions = [
                x[:, self._hparams.context_actions:] for x in one_hot_actions
            ]
            inputs['real_actions'] = tf.concat(real_pred_actions, -1)
            inputs['T'] = model_targets['actions'].get_shape().as_list(
            )[1] - self._hparams.context_actions
        else:
            assert model_inputs[
                'adim'] == 4, "only supports [x,y,z,theta] action space for now!"
            inputs['T'] = model_inputs['T'] - self._hparams.context_actions
            if self._hparams.context_actions:
                one_hot_actions = _binarize(model_inputs['context_actions'],
                                            self._hparams.pivots)
                inputs['context_actions'] = tf.concat([
                    x[:, :self._hparams.context_actions]
                    for x in one_hot_actions
                ], -1)

        inputs['adim'] = (len(self._hparams.pivots[0]) + 1) * (
            len(self._hparams.pivots[1]) + 1) + sum(
                [len(arr) + 1 for arr in self._hparams.pivots[2:]])

        # build the graph
        self._model_graph = model_graph = self._graph_class()
        if self._num_gpus <= 1:
            outputs = model_graph.build_graph(mode, inputs, self._hparams,
                                              self._graph_scope)
        else:
            # TODO: add multi-gpu support
            raise NotImplementedError

        # train
        if mode == tf.estimator.ModeKeys.TRAIN:
            global_step = tf.train.get_or_create_global_step()
            lr, optimizer = tf_utils.build_optimizer(self._hparams.lr,
                                                     self._hparams.beta1,
                                                     self._hparams.beta2,
                                                     global_step=global_step)
            pred_xy = outputs['pred_actions'][:, :, :n_xy]
            pred_z = outputs['pred_actions'][:, :, n_xy:n_z + n_xy]
            pred_theta = outputs['pred_actions'][:, :, n_z + n_xy:]
            pred_one_hots = [pred_xy, pred_z, pred_theta]

            losses = [
                tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits_v2(real, pred))
                for real, pred in zip(real_pred_actions, pred_one_hots)
            ]
            loss = sum(losses)

            print('computing gradient and train_op')
            g_train_op = optimizer.minimize(loss, global_step=global_step)

            est = tf.estimator.EstimatorSpec(mode,
                                             loss=loss,
                                             train_op=g_train_op)
            scalar_summaries = {}
            if 'ground_truth_sampling_mean' in outputs:
                scalar_summaries['ground_truth_sampling_mean'] = outputs[
                    'ground_truth_sampling_mean']

            for k, loss in zip(['xy_loss', 'z_loss', 'theta_loss'], losses):
                scalar_summaries[k] = loss
            return est, scalar_summaries, {}

        #test
        means = tf.convert_to_tensor(self._hparams.means)
        pred_xy = outputs['pred_actions'][:, :, :n_xy]
        pred_z = outputs['pred_actions'][:, :, n_xy:n_z + n_xy]
        pred_theta = outputs['pred_actions'][:, :, n_z + n_xy:]

        pred_xy = tf.reshape(
            tf.random.categorical(tf.reshape(pred_xy, (-1, n_xy)),
                                  1,
                                  dtype=tf.int32), (-1, inputs['T']))
        pred_x, pred_y = tf.mod(pred_xy,
                                len(self._hparams.pivots[0]) + 1), tf.floordiv(
                                    pred_xy,
                                    len(self._hparams.pivots[0]) + 1)
        pred_z = tf.reshape(
            tf.random.categorical(tf.reshape(pred_z, (-1, n_z)),
                                  1,
                                  dtype=tf.int32), (-1, inputs['T']))
        pred_theta = tf.reshape(
            tf.random.categorical(tf.reshape(pred_theta, (-1, n_theta)),
                                  1,
                                  dtype=tf.int32), (-1, inputs['T']))

        outputs['pred_actions'] = tf.concat([
            tf.gather(means[i], indices)[:, :, None]
            for i, indices in enumerate([pred_x, pred_y, pred_z, pred_theta])
        ],
                                            axis=-1)
        return outputs['pred_actions']
Example #35
0
x = tf.add(a, b, name='add')
writer = tf.summary.FileWriter('./graphs/simple', tf.get_default_graph())
with tf.Session() as sess:
    # writer = tf.summary.FileWriter('./graphs', sess.graph)
    print(sess.run(x))
writer.close()  # close the writer when you’re done using it

# Example 2: The wonderful wizard of div
a = tf.constant([2, 2], name='a')
b = tf.constant([[0, 1], [2, 3]], name='b')

with tf.Session() as sess:
    print(sess.run(tf.div(b, a)))
    print(sess.run(tf.divide(b, a)))
    print(sess.run(tf.truediv(b, a)))
    print(sess.run(tf.floordiv(b, a)))
    # print(sess.run(tf.realdiv(b, a)))
    print(sess.run(tf.truncatediv(b, a)))
    print(sess.run(tf.floor_div(b, a)))

# Example 3: multiplying tensors
a = tf.constant([10, 20], name='a')
b = tf.constant([2, 3], name='b')

with tf.Session() as sess:
    print(sess.run(tf.multiply(a, b)))
    print(sess.run(tf.tensordot(a, b, 1)))

# Example 4: Python native type
t_0 = 19
x = tf.zeros_like(t_0)  # ==> 0
 def _pad_top_bottom():
     pad_top = tf.floordiv(width - height, 2)
     pad_bottom = width - height - pad_top
     return [[pad_top, pad_bottom], [0, 0], [0, 0]]
Example #37
0
import tensorflow as tf
import numpy as np

# 计算图的其他操作

sess = tf.Session()
#div()函数返回值的数据类型与输入数据类型一致
print(sess.run(tf.div(3, 4)))

#truediv()函数会将计算结果强制转换为浮点数类型
print(sess.run(tf.truediv(3, 4)))

#floordiv()函数,会将计算结果向下取整
print(sess.run(tf.floordiv(5, 4)))

#mod()取模运算,返回除法的余数
print(sess.run(tf.mod(22.0, 5.0)))

#通过cross()函数计算两个张量级的点积。只为三维向量定义。
print(sess.run(tf.cross([1., 0., 0.], [0., 1., 0.])))

# 组合预处理函数生成自定义函数
print(sess.run(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.))))


# 延伸学习
#创建一个自定义二次多项式函数,3x^2-x+10
def custom_polynomial(value):
    return (tf.subtract(3 * tf.square(value), value) + 10)

Example #38
0
 def _expand_to_binary_form(self, x, input_bits):
   """Expands input into binary representation."""
   # This operation is inverse of self._pack_binary_form, except padded zeros
   # are not removed.
   expand_vector = tf.constant([2**i for i in range(input_bits)], tf.int32)
   return tf.reshape(tf.mod(tf.floordiv(x, expand_vector), 2), [-1])
def mean_shift(X, bandwidth, max_iter):

    (m, n) = X.shape
    print m, n
    graph = tf.Graph()
    with graph.as_default():

        with tf.name_scope("input") as scope:
            data = tf.constant(X, name="data_points")
            b = tf.constant(bandwidth, dtype=tf.float32, name="bandwidth")
            m = tf.constant(max_iter, name="maximum_iteration")
            # n_samples = tf.constant(m, name="no_of_samples")
            # n_features = tf.constant(n, name="no_of_features")

        # with tf.name_scope("seeding") as scope:
        #     seed = tf.placeholder(tf.float32, [5], name="seed")

        with tf.name_scope("mean_shifting") as scope:
            old_mean = tf.placeholder(tf.float32, [n], name="old_mean")
            neighbors = tf.placeholder(tf.float32, [None, n], name="neighbors")
            new_mean = tf.reduce_mean(neighbors, 0)

            euclid_dist = tf.sqrt(tf.reduce_sum(
                tf.pow(tf.sub(old_mean, new_mean), 2)),
                                  name="mean_distance")

        center_intensity_dict = {}
        nbrs = NearestNeighbors(radius=bandwidth).fit(X)

        sess = tf.Session()
        init = tf.initialize_all_variables()
        sess.run(init)
        writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph_def)

        bin_sizes = defaultdict(int)

        data_point = tf.placeholder(tf.float32, [n], "data_point")
        binned_point = tf.floordiv(data_point, b)

        for point in X:
            feed = {data_point: point}
            bp = sess.run(binned_point, feed_dict=feed)
            bin_sizes[tuple(bp)] += 1

        bin_seeds = np.array(
            [point for point, freq in six.iteritems(bin_sizes) if freq >= 1],
            dtype=np.float32)

        bin_seeds = bin_seeds * bandwidth

        print len(bin_seeds)

        j = 0

        for x in bin_seeds:
            print "Seed ", j, ": ", x
            i = 0
            o_mean = x

            while True:
                i_nbrs = nbrs.radius_neighbors([o_mean],
                                               bandwidth,
                                               return_distance=False)[0]
                points_within = X[i_nbrs]

                feed = {neighbors: points_within}
                n_mean = sess.run(new_mean, feed_dict=feed)

                feed = {new_mean: n_mean, old_mean: o_mean}
                dist = sess.run(euclid_dist, feed_dict=feed)

                if dist < 1e-3 * bandwidth or i == max_iter:
                    center_intensity_dict[tuple(n_mean)] = len(i_nbrs)
                    break
                else:
                    o_mean = n_mean

                print "\t", i, dist, len(i_nbrs)

                i += 1

            # if j>10:
            #     break

            j += 1

        print center_intensity_dict

        sorted_by_intensity = sorted(center_intensity_dict.items(),
                                     key=lambda tup: tup[1],
                                     reverse=True)
        sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
        unique = np.ones(len(sorted_centers), dtype=np.bool)
        nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
        for i, center in enumerate(sorted_centers):
            if unique[i]:
                neighbor_idxs = nbrs.radius_neighbors([center],
                                                      return_distance=False)[0]
                unique[neighbor_idxs] = 0
                unique[i] = 1  # leave the current point as unique
        cluster_centers = sorted_centers[unique]

        nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
        labels = np.zeros(154401, dtype=np.int)
        distances, idxs = nbrs.kneighbors(X)

        labels = idxs.flatten()
        return cluster_centers, labels
Example #40
0
def network(batch_size=1,
            class_num=5990,
            character_height=32,
            character_width=32,
            character_step=8,
            is_train=False):
    network = {}

    network["inputs"] = tf.placeholder(tf.float32, [batch_size, 32, None, 1],
                                       name='inputs')
    network["seq_len"] = tf.multiply(
        tf.ones(shape=[batch_size], dtype=tf.int32),
        tf.floordiv(
            tf.shape(network["inputs"])[2] - character_width, character_step))

    network["inputs_batch"] = tf.py_func(
        sliding_generate_batch_layer,
        [network["inputs"], character_width, character_step], tf.float32)
    network["inputs_batch"] = tf.reshape(
        network["inputs_batch"], [-1, character_height, character_width, 1])

    network["conv1"] = tf.layers.conv2d(inputs=network["inputs_batch"],
                                        filters=50,
                                        kernel_size=(3, 3),
                                        padding="same",
                                        activation=None)
    network["batch_norm1"] = tf.contrib.layers.batch_norm(network["conv1"],
                                                          decay=0.9,
                                                          center=True,
                                                          scale=True,
                                                          epsilon=0.001,
                                                          is_training=is_train)
    network["batch_norm1"] = tf.nn.relu(network["batch_norm1"])
    network["conv2"] = tf.layers.conv2d(inputs=network["batch_norm1"],
                                        filters=100,
                                        kernel_size=(3, 3),
                                        padding="same",
                                        activation=tf.nn.relu)
    network["dropout2"] = tf.layers.dropout(inputs=network["conv2"], rate=0.1)
    network["conv3"] = tf.layers.conv2d(inputs=network["dropout2"],
                                        filters=100,
                                        kernel_size=(3, 3),
                                        padding="same",
                                        activation=None)
    network["dropout3"] = tf.layers.dropout(inputs=network["conv3"], rate=0.1)
    network["batch_norm3"] = tf.contrib.layers.batch_norm(network["dropout3"],
                                                          decay=0.9,
                                                          center=True,
                                                          scale=True,
                                                          epsilon=0.001,
                                                          is_training=is_train)
    network["batch_norm3"] = tf.nn.relu(network["batch_norm3"])
    network["pool3"] = tf.layers.max_pooling2d(inputs=network["batch_norm3"],
                                               pool_size=[2, 2],
                                               strides=2)
    network["conv4"] = tf.layers.conv2d(inputs=network["pool3"],
                                        filters=150,
                                        kernel_size=(3, 3),
                                        padding="same",
                                        activation=None)
    network["dropout4"] = tf.layers.dropout(inputs=network["conv4"], rate=0.2)
    network["batch_norm4"] = tf.contrib.layers.batch_norm(network["dropout4"],
                                                          decay=0.9,
                                                          center=True,
                                                          scale=True,
                                                          epsilon=0.001,
                                                          is_training=is_train)
    network["batch_norm4"] = tf.nn.relu(network["batch_norm4"])
    network["conv5"] = tf.layers.conv2d(inputs=network["batch_norm4"],
                                        filters=200,
                                        kernel_size=(3, 3),
                                        padding="same",
                                        activation=tf.nn.relu)
    network["dropout5"] = tf.layers.dropout(inputs=network["conv5"], rate=0.2)
    network["conv6"] = tf.layers.conv2d(inputs=network["dropout5"],
                                        filters=200,
                                        kernel_size=(3, 3),
                                        padding="same",
                                        activation=None)
    network["dropout6"] = tf.layers.dropout(inputs=network["conv6"], rate=0.2)
    network["batch_norm6"] = tf.contrib.layers.batch_norm(network["dropout6"],
                                                          decay=0.9,
                                                          center=True,
                                                          scale=True,
                                                          epsilon=0.001,
                                                          is_training=is_train)
    network["batch_norm6"] = tf.nn.relu(network["batch_norm6"])
    network["pool6"] = tf.layers.max_pooling2d(inputs=network["batch_norm6"],
                                               pool_size=[2, 2],
                                               strides=2)
    network["conv7"] = tf.layers.conv2d(inputs=network["pool6"],
                                        filters=250,
                                        kernel_size=(3, 3),
                                        padding="same",
                                        activation=None)
    network["dropout7"] = tf.layers.dropout(inputs=network["conv7"], rate=0.3)
    network["batch_norm7"] = tf.contrib.layers.batch_norm(network["dropout7"],
                                                          decay=0.9,
                                                          center=True,
                                                          scale=True,
                                                          epsilon=0.001,
                                                          is_training=is_train)
    network["batch_norm7"] = tf.nn.relu(network["batch_norm7"])
    network["conv8"] = tf.layers.conv2d(inputs=network["batch_norm7"],
                                        filters=300,
                                        kernel_size=(3, 3),
                                        padding="same",
                                        activation=tf.nn.relu)
    network["dropout8"] = tf.layers.dropout(inputs=network["conv8"], rate=0.3)
    network["conv9"] = tf.layers.conv2d(inputs=network["dropout8"],
                                        filters=300,
                                        kernel_size=(3, 3),
                                        padding="same",
                                        activation=None)
    network["dropout9"] = tf.layers.dropout(inputs=network["conv9"], rate=0.3)
    network["batch_norm9"] = tf.contrib.layers.batch_norm(network["dropout9"],
                                                          decay=0.9,
                                                          center=True,
                                                          scale=True,
                                                          epsilon=0.001,
                                                          is_training=is_train)
    network["batch_norm9"] = tf.nn.relu(network["batch_norm9"])
    network["pool9"] = tf.layers.max_pooling2d(inputs=network["batch_norm9"],
                                               pool_size=[2, 2],
                                               strides=2)
    network["conv10"] = tf.layers.conv2d(inputs=network["pool9"],
                                         filters=350,
                                         kernel_size=(3, 3),
                                         padding="same",
                                         activation=None)
    network["dropout10"] = tf.layers.dropout(inputs=network["conv10"],
                                             rate=0.4)
    network["batch_norm10"] = tf.contrib.layers.batch_norm(
        network["dropout10"],
        decay=0.9,
        center=True,
        scale=True,
        epsilon=0.001,
        is_training=is_train)
    network["batch_norm10"] = tf.nn.relu(network["batch_norm10"])
    network["conv11"] = tf.layers.conv2d(inputs=network["batch_norm10"],
                                         filters=400,
                                         kernel_size=(3, 3),
                                         padding="same",
                                         activation=tf.nn.relu)
    network["dropout11"] = tf.layers.dropout(inputs=network["conv11"],
                                             rate=0.4)
    network["conv12"] = tf.layers.conv2d(inputs=network["dropout11"],
                                         filters=400,
                                         kernel_size=(3, 3),
                                         padding="same",
                                         activation=None)
    network["dropout12"] = tf.layers.dropout(inputs=network["conv12"],
                                             rate=0.4)
    network["batch_norm12"] = tf.contrib.layers.batch_norm(
        network["dropout12"],
        decay=0.9,
        center=True,
        scale=True,
        epsilon=0.001,
        is_training=is_train)
    network["batch_norm12"] = tf.nn.relu(network["batch_norm12"])
    #2*2*400
    network["pool12"] = tf.layers.max_pooling2d(inputs=network["batch_norm12"],
                                                pool_size=[2, 2],
                                                strides=2)
    network["flatten"] = tf.contrib.layers.flatten(network["pool12"])

    network["fc1"] = tf.contrib.layers.fully_connected(
        inputs=network["flatten"], num_outputs=900, activation_fn=tf.nn.relu)
    network["dropout_fc1"] = tf.layers.dropout(inputs=network["fc1"], rate=0.5)
    network["fc2"] = tf.contrib.layers.fully_connected(
        inputs=network["dropout_fc1"],
        num_outputs=200,
        activation_fn=tf.nn.relu)
    if is_train:
        network["fc3"] = tf.contrib.layers.fully_connected(
            inputs=network["fc2"], num_outputs=class_num, activation_fn=None)
    else:
        network["fc3"] = tf.contrib.layers.fully_connected(
            inputs=network["fc2"],
            num_outputs=class_num,
            activation_fn=tf.nn.sigmoid)
    network["outputs"] = tf.reshape(network["fc3"],
                                    [batch_size, -1, class_num])
    network["outputs"] = tf.transpose(network["outputs"], (1, 0, 2))

    return network
Example #41
0
 def test_FloorDiv(self):
     t = tf.floordiv(*self.random((3, 5), (3, 5)))
     self.check(t)
def multilevel_crop_and_resize(features,
                               boxes,
                               output_size=7,
                               is_gpu_inference=False):
    """Crop and resize on multilevel feature pyramid.

  Generate the (output_size, output_size) set of pixels for each input box
  by first locating the box into the correct feature level, and then cropping
  and resizing it using the correspoding feature map of that level.

  Args:
    features: A dictionary with key as pyramid level and value as features. The
      features are in shape of [batch_size, height_l, width_l, num_filters].
    boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row represents
      a box with [y1, x1, y2, x2] in un-normalized coordinates.
    output_size: A scalar to indicate the output crop size.
    is_gpu_inference: whether to build the model for GPU inference.

  Returns:
    A 5-D tensor representing feature crop of shape
    [batch_size, num_boxes, output_size, output_size, num_filters].
  """
    with tf.name_scope('multilevel_crop_and_resize'):
        levels = features.keys()
        min_level = min(levels)
        max_level = max(levels)
        _, max_feature_height, max_feature_width, _ = (
            features[min_level].get_shape().as_list())
        # Stack feature pyramid into a features_all of shape
        # [batch_size, levels, height, width, num_filters].
        features_all = []
        for level in range(min_level, max_level + 1):
            features_all.append(
                tf.image.pad_to_bounding_box(features[level], 0, 0,
                                             max_feature_height,
                                             max_feature_width))
        features_all = tf.stack(features_all, axis=1)

        # Assign boxes to the right level.
        box_width = tf.squeeze(boxes[:, :, 3:4] - boxes[:, :, 1:2], axis=-1)
        box_height = tf.squeeze(boxes[:, :, 2:3] - boxes[:, :, 0:1], axis=-1)
        areas_sqrt = tf.sqrt(box_height * box_width)
        levels = tf.floordiv(tf.log(tf.div(areas_sqrt, 224.0)),
                             tf.log(2.0)) + 4.0
        if not is_gpu_inference:
            levels = tf.cast(levels, dtype=tf.int32)

        # Map levels between [min_level, max_level].
        levels = tf.minimum(
            float(max_level) if is_gpu_inference else max_level,
            tf.maximum(levels,
                       float(min_level) if is_gpu_inference else min_level))

        # Project box location and sizes to corresponding feature levels.
        scale_to_level = tf.cast(tf.pow(
            tf.constant(2.0),
            levels if is_gpu_inference else tf.cast(levels, tf.float32)),
                                 dtype=boxes.dtype)
        boxes /= tf.expand_dims(scale_to_level, axis=2)
        box_width /= scale_to_level
        box_height /= scale_to_level
        boxes = tf.concat([
            boxes[:, :, 0:2],
            tf.expand_dims(box_height, -1),
            tf.expand_dims(box_width, -1)
        ],
                          axis=-1)

        # Map levels to [0, max_level-min_level].
        levels -= min_level
        level_strides = tf.pow(
            [[2.0]],
            levels if is_gpu_inference else tf.cast(levels, tf.float32))
        boundary = tf.cast(
            tf.concat([
                tf.expand_dims([[tf.cast(max_feature_height, tf.float32)]] /
                               level_strides - 1,
                               axis=-1),
                tf.expand_dims([[tf.cast(max_feature_width, tf.float32)]] /
                               level_strides - 1,
                               axis=-1),
            ],
                      axis=-1), boxes.dtype)

        return selective_crop_and_resize(features_all, boxes, levels, boundary,
                                         output_size, is_gpu_inference)
Example #43
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr

        completed_updates = K.cast(
            tf.floordiv(self.iterations, self.accum_iters), K.floatx())

        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay * completed_updates))

        t = completed_updates + 1

        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                     (1. - K.pow(self.beta_1, t)))

        # self.iterations incremented after processing a batch
        # batch:              1 2 3 4 5 6 7 8 9
        # self.iterations:    0 1 2 3 4 5 6 7 8
        # update_switch = 1:        x       x    (if accum_iters=4)
        update_switch = K.equal((self.iterations + 1) % self.accum_iters, 0)
        update_switch = K.cast(update_switch, K.floatx())

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        gs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]

        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        else:
            vhats = [K.zeros(1) for _ in params]

        self.weights = [self.iterations] + ms + vs + vhats

        for p, g, m, v, vhat, tg in zip(params, grads, ms, vs, vhats, gs):

            sum_grad = tg + g
            avg_grad = sum_grad / self.accum_iters_float

            m_t = (self.beta_1 * m) + (1. - self.beta_1) * avg_grad
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(avg_grad)

            if self.amsgrad:
                vhat_t = K.maximum(vhat, v_t)
                p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
                self.updates.append(
                    K.update(vhat, (1 - update_switch) * vhat +
                             update_switch * vhat_t))
            else:
                p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append(
                K.update(m, (1 - update_switch) * m + update_switch * m_t))
            self.updates.append(
                K.update(v, (1 - update_switch) * v + update_switch * v_t))
            self.updates.append(K.update(tg, (1 - update_switch) * sum_grad))
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(
                K.update(p, (1 - update_switch) * p + update_switch * new_p))
        return self.updates
Example #44
0
 def __rfloordiv__(self, other):
     return tf.floordiv(other, self)
  def body(x_in, y_in, domain_in, i_in, cond_in):
    # Create graph for model logits and predictions
    logits = model.get_logits(x_in)
    preds = tf.nn.softmax(logits)
    preds_onehot = tf.one_hot(tf.argmax(preds, axis=-1), depth=nb_classes)

    # create the Jacobian graph
    list_derivatives = []
    for class_ind in xrange(nb_classes):
      derivatives = tf.gradients(logits[:, class_ind], x_in)
      list_derivatives.append(derivatives[0])
    grads = tf.reshape(tf.stack(list_derivatives), shape=[nb_classes, -1, nb_features])

    # Compute the Jacobian components
    # To help with the computation later, reshape the target_class and other_class to [nb_classes, -1, 1].
    # The last dimention is added to allow broadcasting later.
    target_class = tf.reshape(tf.transpose(y_in, perm=[1, 0]), shape=[nb_classes, -1, 1])
    other_classes = tf.cast(tf.not_equal(target_class, 1), tf_dtype)

    grads_target = reduce_sum(grads * target_class, axis=0)
    grads_other = reduce_sum(grads * other_classes, axis=0)

    # Remove the already-used input features from the search space
    # Subtract 2 times the maximum value from those value so that
    # they won't be picked later
    increase_coef = (4 * int(increase) - 2) * tf.cast(tf.equal(domain_in, 0), tf_dtype)

    target_tmp = grads_target
    target_tmp -= increase_coef * reduce_max(tf.abs(grads_target), axis=-1, keepdims=True)
    target_sum = tf.reshape(target_tmp, shape=[-1, nb_features, 1]) + tf.reshape(target_tmp, shape=[-1, 1, nb_features])

    other_tmp = grads_other
    other_tmp += increase_coef * reduce_max(tf.abs(grads_other), axis=-1, keepdims=True)
    other_sum = tf.reshape(other_tmp, shape=[-1, nb_features, 1]) + tf.reshape(other_tmp, shape=[-1, 1, nb_features])

    # Create a mask to only keep features that match conditions
    if increase:
      scores_mask = ((target_sum > 0) & (other_sum < 0))
    else:
      scores_mask = ((target_sum < 0) & (other_sum > 0))

    # Create a 2D numpy array of scores for each pair of candidate features
    scores = tf.cast(scores_mask, tf_dtype) * (-target_sum * other_sum) * zero_diagonal

    # Extract the best two pixels
    best = tf.argmax(tf.reshape(scores, shape=[-1, nb_features * nb_features]), axis=-1)

    p1 = tf.mod(best, nb_features)
    p2 = tf.floordiv(best, nb_features)
    p1_one_hot = tf.one_hot(p1, depth=nb_features)
    p2_one_hot = tf.one_hot(p2, depth=nb_features)

    # Check if more modification is needed for each sample
    mod_not_done = tf.equal(reduce_sum(y_in * preds_onehot, axis=-1), 0)
    cond = mod_not_done & (reduce_sum(domain_in, axis=-1) >= 2)

    # Update the search domain
    cond_float = tf.reshape(tf.cast(cond, tf_dtype), shape=[-1, 1])
    to_mod = (p1_one_hot + p2_one_hot) * cond_float

    domain_out = domain_in - to_mod

    # Apply the modification to the images
    to_mod_reshape = tf.reshape(to_mod, shape=([-1] + x_in.shape[1:].as_list()))
    if increase:
      x_out = tf.minimum(clip_max, x_in + to_mod_reshape * theta)
    else:
      x_out = tf.maximum(clip_min, x_in - to_mod_reshape * theta)

    # Increase the iterator, and check if all misclassifications are done
    i_out = tf.add(i_in, 1)
    cond_out = reduce_any(cond)

    return x_out, y_in, domain_out, i_out, cond_out
 def _pad_left_right():
     pad_left = tf.floordiv(height - width, 2)
     pad_right = height - width - pad_left
     return [[0, 0], [pad_left, pad_right], [0, 0]]
    print('*9*'.center(50, '-'))
    print(sess.run(A))
    print(sess.run(A + B))
    print(sess.run(tf.add(A, B)))
    print(sess.run(A - B))
    print(sess.run(tf.subtract(A, B)))
    print(sess.run(tf.matmul(B, identity_matrix)))
    print(sess.run(tf.transpose(C)))
    print(sess.run(tf.matrix_determinant(D)))
    print(sess.run(tf.matrix_inverse(D)))
    print(sess.run(tf.cholesky(identity_matrix)))
    print(sess.run(tf.self_adjoint_eig(D)))
    print('*10*'.center(50, '-'))
    # print(sess.run(tf.div(3,4)))
    print(sess.run(tf.truediv(3, 4)))
    print(sess.run(tf.floordiv(3.0, 4.0)))
    print(sess.run(tf.mod(22.0, 5.0)))
    print(sess.run(tf.cross([1., 0., 0.], [0., 1., 0.])))
    print('*12*'.center(50, '-'))
    print(sess.run(output, feed_dict={input1: [3.], input2: [5]}))

"""
TensorFlow是采用数据流图(data flow graphs)来计算, 所以首先我们得创建一个数据流流图,
然后再将我们的数据(数据以张量(tensor)的形式存在)放在数据流图中计算. 节点(Nodes)在图
中表示数学操作,图中的线(edges)则表示在节点间相互联系的多维数据数组, 即张量(tensor).
训练模型时tensor会不断的从数据流图中的一个节点flow到另一节点, 这就是TensorFlow名字的由来.
  
Tensor 张量意义

张量(Tensor):
Example #48
0
 def __floordiv__(self, other):
     return tf.floordiv(self, other)
# in Tensorflow

# Declaring Operations
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()

# Open graph session
sess = tf.Session()

# div() vs truediv() vs floordiv()
print(sess.run(tf.div(3,4)))
print(sess.run(tf.truediv(3,4)))
print(sess.run(tf.floordiv(3.0,4.0)))

# Mod function
print(sess.run(tf.mod(22.0,5.0)))

# Cross Product
print(sess.run(tf.cross([1.,0.,0.],[0.,1.,0.])))

# Trig functions
print(sess.run(tf.sin(3.1416)))
print(sess.run(tf.cos(3.1416)))
# Tangemt
print(sess.run(tf.div(tf.sin(3.1416/4.), tf.cos(3.1416/4.))))

# Custom operation
test_nums = range(15)
Example #50
0
#--------------1.3基本运算-------------------
# Declaring Operations
import matplotlib.pyplot as plt
'''基本运算
add()加法
subtract()
multiply()
matmul()矩阵相乘
div()除法

'''
# div() vs truediv() vs floordiv()
print(sess.run(tf.div(3, 4)))
print(sess.run(tf.truediv(3, 4)))  #浮点运算
print(sess.run(tf.floordiv(3.0, 4.0)))  #对浮点数进行整数除法运算
# Mod function取模运算
print(sess.run(tf.mod(22.0, 5.0)))
# Cross Product,点积运算,只有三维向量可用
print(sess.run(tf.cross([1., 0., 0.], [0., 1., 0.])))

#三角函数运算.
# Trig functions
print(sess.run(tf.sin(3.1416)))
print(sess.run(tf.cos(3.1416)))
# Tangemt
print(sess.run(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.))))

#自定义函数运算
# Custom operation
test_nums = range(15)