Ejemplo n.º 1
0
def get2d_deconv_output_size(input_height, input_width, filter_height,
                             filter_width, row_stride, col_stride,
                             padding_type):
    """Returns the number of rows and columns in a convolution/pooling output."""
    input_height = tensor_shape.as_dimension(input_height)
    input_width = tensor_shape.as_dimension(input_width)
    filter_height = tensor_shape.as_dimension(filter_height)
    filter_width = tensor_shape.as_dimension(filter_width)
    row_stride = int(row_stride)
    col_stride = int(col_stride)

    # Compute number of rows in the output, based on the padding.
    if input_height.value is None or filter_height.value is None:
        out_rows = None
    elif padding_type == "VALID":
        out_rows = (input_height.value - 1) * row_stride + filter_height.value
    elif padding_type == "SAME":
        out_rows = input_height.value * row_stride
    else:
        raise ValueError("Invalid value for padding: %r" % padding_type)

    # Compute number of columns in the output, based on the padding.
    if input_width.value is None or filter_width.value is None:
        out_cols = None
    elif padding_type == "VALID":
        out_cols = (input_width.value - 1) * col_stride + filter_width.value
    elif padding_type == "SAME":
        out_cols = input_width.value * col_stride

    return out_rows, out_cols
Ejemplo n.º 2
0
def get2d_deconv_output_size(input_height, input_width, filter_height,
                           filter_width, row_stride, col_stride, padding_type):
    """Returns the number of rows and columns in a convolution/pooling output."""
    input_height = tensor_shape.as_dimension(input_height)
    input_width = tensor_shape.as_dimension(input_width)
    filter_height = tensor_shape.as_dimension(filter_height)
    filter_width = tensor_shape.as_dimension(filter_width)
    row_stride = int(row_stride)
    col_stride = int(col_stride)

    # Compute number of rows in the output, based on the padding.
    if input_height.value is None or filter_height.value is None:
      out_rows = None
    elif padding_type == "VALID":
      out_rows = (input_height.value - 1) * row_stride + filter_height.value
    elif padding_type == "SAME":
      out_rows = input_height.value * row_stride
    else:
      raise ValueError("Invalid value for padding: %r" % padding_type)

    # Compute number of columns in the output, based on the padding.
    if input_width.value is None or filter_width.value is None:
      out_cols = None
    elif padding_type == "VALID":
      out_cols = (input_width.value - 1) * col_stride + filter_width.value
    elif padding_type == "SAME":
      out_cols = input_width.value * col_stride

    return out_rows, out_cols
Ejemplo n.º 3
0
 def testAsDimension(self):
     self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(tensor_shape.Dimension(12)))
     self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(12))
     self.assertEqual(
         tensor_shape.Dimension(None).value, tensor_shape.as_dimension(tensor_shape.Dimension(None)).value
     )
     self.assertEqual(tensor_shape.Dimension(None).value, tensor_shape.as_dimension(None).value)
Ejemplo n.º 4
0
 def testAsDimension(self):
   self.assertEqual(tensor_shape.Dimension(12),
                    tensor_shape.as_dimension(tensor_shape.Dimension(12)))
   self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(12))
   self.assertEqual(
       tensor_shape.Dimension(None).value,
       tensor_shape.as_dimension(tensor_shape.Dimension(None)).value)
   self.assertEqual(tensor_shape.Dimension(None).value,
                    tensor_shape.as_dimension(None).value)
Ejemplo n.º 5
0
def get2d_conv_output_size(input_height, input_width, filter_height,
                           filter_width, row_stride, col_stride, padding_type):
    """Returns the number of rows and columns in a convolution/pooling output."""
    input_height = tensor_shape.as_dimension(input_height)
    input_width = tensor_shape.as_dimension(input_width)
    filter_height = tensor_shape.as_dimension(filter_height)
    filter_width = tensor_shape.as_dimension(filter_width)
    row_stride = int(row_stride)
    col_stride = int(col_stride)

    if filter_height.value == 1 and filter_width.value == 1 and (
            row_stride == 1 and col_stride == 1):
        return input_height, input_width
    else:
        if filter_height > input_height or filter_width > input_width:
            raise ValueError(
                "filter must not be larger than the input: "
                "Filter: [%sx%s] Input: [%sx%s]" %
                (filter_height, filter_width, input_height, input_width))
        if row_stride > filter_height or col_stride > filter_width:
            raise ValueError(
                "stride must be less than or equal to filter size",
                "stride: [%sx%s] filter: [%sx%s]" %
                (row_stride, col_stride, filter_height, filter_width))

        # Compute number of rows in the output, based on the padding.
        if input_height.value is None or filter_height.value is None:
            out_rows = None
        elif padding_type == b"VALID":
            out_rows = (
                (input_height.value - filter_height.value + row_stride) //
                row_stride)
        elif padding_type == b"SAME":
            out_rows = (input_height.value + row_stride - 1) // row_stride
        else:
            raise ValueError("Invalid value for padding: %r" % padding_type)

        # Compute number of columns in the output, based on the padding.
        if input_width.value is None or filter_width.value is None:
            out_cols = None
        elif padding_type == b"VALID":
            out_cols = (
                (input_width.value - filter_width.value + col_stride) //
                col_stride)
        elif padding_type == b"SAME":
            out_cols = (input_width.value + col_stride - 1) // col_stride

        return out_rows, out_cols
Ejemplo n.º 6
0
def _Get2DOutputSize(input_height, input_width, filter_height, filter_width,
                     row_stride, col_stride, padding_type):
    """Returns the number of rows and columns in a convolution/pooling output."""
    input_height = tensor_shape.as_dimension(input_height)
    input_width = tensor_shape.as_dimension(input_width)
    filter_height = tensor_shape.as_dimension(filter_height)
    filter_width = tensor_shape.as_dimension(filter_width)
    row_stride = int(row_stride)
    col_stride = int(col_stride)

    if filter_height.value == 1 and filter_width.value == 1 and (
            row_stride == 1 and col_stride == 1):
        return input_height, input_width
    else:
        if filter_height > input_height or filter_width > input_width:
            raise ValueError("filter must not be larger than the input: ",
                             "Filter: [", filter_height, "x", filter_width,
                             "] ", "Input: [", input_height, "x", input_width,
                             "] ")
        if row_stride > filter_height or col_stride > filter_width:
            raise ValueError(
                "stride must be less than or equal to filter size",
                "stride: [", row_stride, "x", col_stride, "] ", "filter: [",
                filter_height, "x", filter_width, "] ")

        # Compute number of rows in the output, based on the padding.
        if input_height.value is None or filter_height.value is None:
            out_rows = None
        elif padding_type == "VALID":
            out_rows = int(
                math.ceil((input_height.value - filter_height.value + 1.0) /
                          row_stride))
        elif padding_type == "SAME":
            out_rows = int(math.ceil(input_height.value * 1.0 / row_stride))
        else:
            raise ValueError("Invalid value for padding: %r" % padding_type)

        # Compute number of columns in the output, based on the padding.
        if input_width.value is None or filter_width.value is None:
            out_cols = None
        elif padding_type == "VALID":
            out_cols = int(
                math.ceil((input_width.value - filter_width.value + 1.0) /
                          col_stride))
        elif padding_type == "SAME":
            out_cols = int(math.ceil(input_width.value * 1.0 / col_stride))

        return out_rows, out_cols
Ejemplo n.º 7
0
def _Get2DOutputSize(input_height, input_width, filter_height, filter_width,
                     row_stride, col_stride, padding_type):
  """Returns the number of rows and columns in a convolution/pooling output."""
  input_height = tensor_shape.as_dimension(input_height)
  input_width = tensor_shape.as_dimension(input_width)
  filter_height = tensor_shape.as_dimension(filter_height)
  filter_width = tensor_shape.as_dimension(filter_width)
  row_stride = int(row_stride)
  col_stride = int(col_stride)

  if filter_height.value == 1 and filter_width.value == 1 and (
      row_stride == 1 and col_stride == 1):
    return input_height, input_width
  else:
    if filter_height > input_height or filter_width > input_width:
      raise ValueError("filter must not be larger than the input: ",
                       "Filter: [", filter_height, "x", filter_width, "] ",
                       "Input: [", input_height, "x", input_width, "] ")
    if row_stride > filter_height or col_stride > filter_width:
      raise ValueError("stride must be less than or equal to filter size",
                       "stride: [", row_stride, "x", col_stride, "] ",
                       "filter: [", filter_height, "x", filter_width, "] ")

    # Compute number of rows in the output, based on the padding.
    if input_height.value is None or filter_height.value is None:
      out_rows = None
    elif padding_type == "VALID":
      out_rows = int(
          math.ceil((input_height.value - filter_height.value + 1.0)
                    / row_stride))
    elif padding_type == "SAME":
      out_rows = int(math.ceil(input_height.value * 1.0
                               / row_stride))
    else:
      raise ValueError("Invalid value for padding: %r" % padding_type)

    # Compute number of columns in the output, based on the padding.
    if input_width.value is None or filter_width.value is None:
      out_cols = None
    elif padding_type == "VALID":
      out_cols = int(
          math.ceil((input_width.value - filter_width.value + 1.0)
                    / col_stride))
    elif padding_type == "SAME":
      out_cols = int(math.ceil(input_width.value * 1.0 / col_stride))

    return out_rows, out_cols
Ejemplo n.º 8
0
    def __cond_encoder(self,
                       scope,
                       input_tensor,
                       bn_is_training,
                       keep_prob,
                       in_nch=1,
                       reuse=False):

        lf = self.layer_factory
        input_tensor2d = tf.reshape(input_tensor, [self.flags.batch_size, \
         self.flags.img_height, self.flags.img_width, 1])
        nch = tensor_shape.as_dimension(input_tensor2d.get_shape()[3]).value
        nout = self.flags.hidden_size

        if (reuse == False):
            W_conv1 = lf.weight_variable(name='W_conv1_cond',
                                         shape=[5, 5, nch, 128])
            W_conv2 = lf.weight_variable(name='W_conv2_cond',
                                         shape=[5, 5, 128, 256])
            W_conv3 = lf.weight_variable(name='W_conv3_cond',
                                         shape=[5, 5, 256, 512])
            W_conv4 = lf.weight_variable(
                name='W_conv4_cond', shape=[4, 4, 512, self.flags.hidden_size])

            b_conv1 = lf.bias_variable(name='b_conv1_cond', shape=[128])
            b_conv2 = lf.bias_variable(name='b_conv2_cond', shape=[256])
            b_conv3 = lf.bias_variable(name='b_conv3_cond', shape=[512])
            b_conv4 = lf.bias_variable(name='b_conv4_cond',
                                       shape=[self.flags.hidden_size])
        else:
            W_conv1 = lf.weight_variable(name='W_conv1_cond')
            W_conv2 = lf.weight_variable(name='W_conv2_cond')
            W_conv3 = lf.weight_variable(name='W_conv3_cond')
            W_conv4 = lf.weight_variable(name='W_conv4_cond')

            b_conv1 = lf.bias_variable(name='b_conv1_cond')
            b_conv2 = lf.bias_variable(name='b_conv2_cond')
            b_conv3 = lf.bias_variable(name='b_conv3_cond')
            b_conv4 = lf.bias_variable(name='b_conv4_cond')

        conv1 = tf.nn.relu(
            lf.conv2d(input_tensor2d, W_conv1, stride=2) + b_conv1)
        conv1_norm = lf.batch_norm_aiuiuc_wrapper(conv1, bn_is_training, \
          'BN1_cond', reuse_vars=reuse)

        conv2 = tf.nn.relu(lf.conv2d(conv1_norm, W_conv2, stride=2) + b_conv2)
        conv2_norm = lf.batch_norm_aiuiuc_wrapper(conv2, bn_is_training, \
          'BN2_cond', reuse_vars=reuse)

        conv3 = tf.nn.relu(lf.conv2d(conv2_norm, W_conv3, stride=2) + b_conv3)
        conv3_norm = lf.batch_norm_aiuiuc_wrapper(conv3, bn_is_training, \
          'BN3_cond', reuse_vars=reuse)

        conv4 = tf.nn.relu(lf.conv2d(conv3_norm, W_conv4, stride=2) + b_conv4)
        conv4_norm = lf.batch_norm_aiuiuc_wrapper(conv4, bn_is_training, \
          'BN4_cond', reuse_vars=reuse)

        return conv1_norm, conv2_norm, conv3_norm, conv4_norm
Ejemplo n.º 9
0
def get2d_conv_output_size(input_height, input_width, filter_height,
                           filter_width, row_stride, col_stride, padding_type):
  """Returns the number of rows and columns in a convolution/pooling output."""
  input_height = tensor_shape.as_dimension(input_height)
  input_width = tensor_shape.as_dimension(input_width)
  filter_height = tensor_shape.as_dimension(filter_height)
  filter_width = tensor_shape.as_dimension(filter_width)
  row_stride = int(row_stride)
  col_stride = int(col_stride)

  if filter_height.value == 1 and filter_width.value == 1 and (
      row_stride == 1 and col_stride == 1):
    return input_height, input_width
  else:
    if filter_height > input_height or filter_width > input_width:
      raise ValueError(
          "filter must not be larger than the input: "
          "Filter: [%sx%s] Input: [%sx%s]"
          % (filter_height, filter_width, input_height, input_width))
    if row_stride > filter_height or col_stride > filter_width:
      raise ValueError("stride must be less than or equal to filter size",
                       "stride: [%sx%s] filter: [%sx%s]"
                       % (row_stride, col_stride, filter_height, filter_width))

    # Compute number of rows in the output, based on the padding.
    if input_height.value is None or filter_height.value is None:
      out_rows = None
    elif padding_type == b"VALID":
      out_rows = ((input_height.value - filter_height.value + row_stride) //
                  row_stride)
    elif padding_type == b"SAME":
      out_rows = (input_height.value + row_stride - 1) // row_stride
    else:
      raise ValueError("Invalid value for padding: %r" % padding_type)

    # Compute number of columns in the output, based on the padding.
    if input_width.value is None or filter_width.value is None:
      out_cols = None
    elif padding_type == b"VALID":
      out_cols = ((input_width.value - filter_width.value + col_stride) //
                  col_stride)
    elif padding_type == b"SAME":
      out_cols = (input_width.value + col_stride - 1) // col_stride

    return out_rows, out_cols
Ejemplo n.º 10
0
 def __init__(self, dims, obj=None):
     super().__init__(obj=obj)
     self.dims = dims
     if self.dims is not None and not isvar(self.dims):
         # TODO: Just like the comment in `TFlowMetaTensor.inputs`,
         # `self.dims` should be something like `cons(var(), ...)` and not a
         # straight logic variable.
         self.dims = tuple(
             tensor_shape.as_dimension(d).value for d in self.dims)
Ejemplo n.º 11
0
    def zero_state(self, batch_size, dtype):
        state = super().zero_state(batch_size, dtype)

        assert type(batch_size) == list

        input_shape = tensor_shape.as_shape(batch_size + [tensor_shape.as_dimension(self.input_size)])
        if self._keep_i < 1.0:
            noise_i = tf.random.uniform(shape=input_shape, dtype=tf.float32) / self._keep_i
        else:
            noise_i = tf.ones(shape=input_shape, dtype=tf.float32)

        state_shape = tensor_shape.as_shape(batch_size + [tensor_shape.as_dimension(self._num_units)])
        if self._keep_h < 1.0:
            noise_h = tf.random.uniform(shape=state_shape, dtype=tf.float32) / self._keep_h
        else:
            noise_h = tf.ones(shape=state_shape, dtype=tf.float32)

        return [state, noise_i, noise_h]
Ejemplo n.º 12
0
def get_conv_output_size(input_size, filter_size, strides, padding_type):
    """Returns the spatial size of a n-d convolution/pooling output."""
    input_size = tuple(
        [tensor_shape.as_dimension(x).value for x in input_size])
    filter_size = tuple(
        [tensor_shape.as_dimension(x).value for x in filter_size])
    strides = [int(x) for x in strides]

    if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
        return input_size

    if any(x is not None and y is not None and x > y
           for x, y in zip(filter_size, input_size)):
        raise ValueError("Filter must not be larger than the input: "
                         "Filter: %r Input: %r" % (filter_size, input_size))

    if padding_type == b"VALID":

        def _valid(in_dim, k_dim, s_dim):
            if in_dim is not None and k_dim is not None:
                return (in_dim - k_dim + s_dim) // s_dim
            else:
                return None

        output_size = [
            _valid(in_dim, k_dim, s_dim)
            for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
        ]
    elif padding_type == b"SAME":

        def _same(in_dim, s_dim):
            if in_dim is not None:
                return (in_dim + s_dim - 1) // s_dim
            else:
                return None

        output_size = [
            _same(in_dim, s_dim) for in_dim, s_dim in zip(input_size, strides)
        ]
    else:
        raise ValueError("Invalid padding: %r" % padding_type)

    return tuple(output_size)
Ejemplo n.º 13
0
def _cudnn_rnn_forward_shape(op):
    """Shape function for the CudnnRNN forward operation.

  Args:
    op: the forward op.
  Returns:
    A list of shapes for the forward operation.
  """
    input_shape = op.inputs[0].get_shape()
    input_h_shape = op.inputs[1].get_shape()
    seq_length = input_shape[0]
    batch_size = input_shape[1]
    num_units = input_h_shape[2]
    direction = op.get_attr("direction")
    rnn_mode = op.get_attr("rnn_mode")
    dir_count = tensor_shape.as_dimension(2) if direction == "bidirectional" else tensor_shape.as_dimension(1)
    output_shape = [seq_length, batch_size, dir_count * num_units]
    output_h_shape = input_h_shape
    output_c_shape = output_h_shape if rnn_mode == "lstm" else []
    return [output_shape, output_h_shape, output_c_shape, None]
Ejemplo n.º 14
0
def _cudnn_rnn_forward_shape(op):
  """Shape function for the CudnnRNN forward operation.

  Args:
    op: the forward op.
  Returns:
    A list of shapes for the forward operation.
  """
  input_shape = op.inputs[0].get_shape()
  input_h_shape = op.inputs[1].get_shape()
  seq_length = input_shape[0]
  batch_size = input_shape[1]
  num_units = input_h_shape[2]
  direction = op.get_attr("direction")
  rnn_mode = op.get_attr("rnn_mode")
  dir_count = tensor_shape.as_dimension(
      2) if direction == "bidirectional" else tensor_shape.as_dimension(1)
  output_shape = [seq_length, batch_size, dir_count * num_units]
  output_h_shape = input_h_shape
  output_c_shape = output_h_shape if rnn_mode == "lstm" else []
  return [output_shape, output_h_shape, output_c_shape, None]
Ejemplo n.º 15
0
def get_conv_output_size(input_size, filter_size, strides, padding_type):
  """Returns the spatial size of a n-d convolution/pooling output."""
  input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])
  filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])
  strides = [int(x) for x in strides]

  if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
    return input_size

  if any(x is not None and y is not None and x > y for x, y in
         zip(filter_size, input_size)):
    raise ValueError("Filter must not be larger than the input: "
                     "Filter: %r Input: %r" % (filter_size, input_size))

  if padding_type == b"VALID":

    def _valid(in_dim, k_dim, s_dim):
      if in_dim is not None and k_dim is not None:
        return (in_dim - k_dim + s_dim) // s_dim
      else:
        return None

    output_size = [
        _valid(in_dim, k_dim, s_dim)
        for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
    ]
  elif padding_type == b"SAME":

    def _same(in_dim, s_dim):
      if in_dim is not None:
        return (in_dim + s_dim - 1) // s_dim
      else:
        return None

    output_size = [_same(in_dim, s_dim)
                   for in_dim, s_dim in zip(input_size, strides)]
  else:
    raise ValueError("Invalid padding: %r" % padding_type)

  return tuple(output_size)
Ejemplo n.º 16
0
def get_static_batch_size(layer):
  """Gets the static batch size of a Layer.

  Arguments:
    layer: a `Layer` instance.

  Returns:
    The static batch size of a Layer.
  """
  batch_input_shape, _ = get_input_shape_and_dtype(layer)
  if batch_input_shape is not None:
    return tensor_shape.as_dimension(batch_input_shape[0]).value
  return None
Ejemplo n.º 17
0
def get_static_batch_size(layer):
    """Gets the static batch size of a Layer.

  Arguments:
    layer: a `Layer` instance.

  Returns:
    The static batch size of a Layer.
  """
    batch_input_shape, _ = get_input_shape_and_dtype(layer)
    if batch_input_shape is not None:
        return tensor_shape.as_dimension(batch_input_shape[0]).value
    return None
Ejemplo n.º 18
0
  def set_shard_dimension(self, shard_dimension):
    """Sets the shard dimension for the current policy.

    If the policy has been frozen then shard_dimension must match the
    existing setting.

    Args:
      shard_dimension: The shard dimension to use in the policy.

    Raises:
      ValueError: If the policy has been frozen and shard_dimension
        differs from the frozen value, or shard_dimension can't be
        interpreted as a Dimension.
    """
    if self._frozen:
      if self._shard_dimension != shard_dimension:
        raise ValueError(
            "Can't set shard dimension to %d since it has been frozen to "
            "use %d." % (shard_dimension, self._shard_dimension))
    else:
      self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
Ejemplo n.º 19
0
  def set_shard_dimension(self, shard_dimension):
    """Sets the shard dimension for the current policy.

    If the policy has been frozen then shard_dimension must match the
    existing setting.

    Args:
      shard_dimension: The shard dimension to use in the policy.

    Raises:
      ValueError: If the policy has been frozen and shard_dimension
        differs from the frozen value, or shard_dimension can't be
        interpreted as a Dimension.
    """
    if self._frozen:
      if self._shard_dimension != shard_dimension:
        raise ValueError(
            "Can't set shard dimension to %d since it has been frozen to "
            "use %d." % (shard_dimension, self._shard_dimension))
    else:
      self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
Ejemplo n.º 20
0
 def _fill_default_values(self):
   if self._number_of_shards is None:
     self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
   if self._shard_dimension is None:
     self._shard_dimension = tensor_shape.as_dimension(
         _DEFAULT_SHARD_DIMENSION)
Ejemplo n.º 21
0
 def _fill_default_values(self):
   if self._number_of_shards is None:
     self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
   if self._shard_dimension is None:
     self._shard_dimension = tensor_shape.as_dimension(
         _DEFAULT_SHARD_DIMENSION)
Ejemplo n.º 22
0
 def _is_shape_component(element):
     value = tensor_shape.as_dimension(element).value
     return value is None or isinstance(value, int)
Ejemplo n.º 23
0
    def __encoder(self,
                  scope,
                  input_tensor,
                  bn_is_training,
                  keep_prob,
                  in_nch=2,
                  reuse=False):

        lf = self.layer_factory

        input_tensor2d = tf.reshape(input_tensor, [self.flags.batch_size, \
          self.flags.img_height, self.flags.img_width, in_nch])

        nch = tensor_shape.as_dimension(input_tensor2d.get_shape()[3]).value

        if (reuse == False):
            W_conv1 = lf.weight_variable(name='W_conv1',
                                         shape=[5, 5, nch, 128])
            W_conv2 = lf.weight_variable(name='W_conv2',
                                         shape=[5, 5, 128, 256])
            W_conv3 = lf.weight_variable(name='W_conv3',
                                         shape=[5, 5, 256, 512])
            W_conv4 = lf.weight_variable(name='W_conv4',
                                         shape=[4, 4, 512, 1024])
            W_fc1 = lf.weight_variable(
                name='W_fc1', shape=[4 * 4 * 1024, self.flags.hidden_size * 2])

            b_conv1 = lf.bias_variable(name='b_conv1', shape=[128])
            b_conv2 = lf.bias_variable(name='b_conv2', shape=[256])
            b_conv3 = lf.bias_variable(name='b_conv3', shape=[512])
            b_conv4 = lf.bias_variable(name='b_conv4', shape=[1024])
            b_fc1 = lf.bias_variable(name='b_fc1',
                                     shape=[self.flags.hidden_size * 2])
        else:
            W_conv1 = lf.weight_variable(name='W_conv1')
            W_conv2 = lf.weight_variable(name='W_conv2')
            W_conv3 = lf.weight_variable(name='W_conv3')
            W_conv4 = lf.weight_variable(name='W_conv4')
            W_fc1 = lf.weight_variable(name='W_fc1')

            b_conv1 = lf.bias_variable(name='b_conv1')
            b_conv2 = lf.bias_variable(name='b_conv2')
            b_conv3 = lf.bias_variable(name='b_conv3')
            b_conv4 = lf.bias_variable(name='b_conv4')
            b_fc1 = lf.bias_variable(name='b_fc1')

        conv1 = tf.nn.relu(
            lf.conv2d(input_tensor2d, W_conv1, stride=2) + b_conv1)
        conv1_norm = lf.batch_norm_aiuiuc_wrapper(conv1, bn_is_training, \
         'BN1', reuse_vars=reuse)

        conv2 = tf.nn.relu(lf.conv2d(conv1_norm, W_conv2, stride=2) + b_conv2)
        conv2_norm = lf.batch_norm_aiuiuc_wrapper(conv2, bn_is_training, \
         'BN2', reuse_vars=reuse)

        conv3 = tf.nn.relu(lf.conv2d(conv2_norm, W_conv3, stride=2) + b_conv3)
        conv3_norm = lf.batch_norm_aiuiuc_wrapper(conv3, bn_is_training, \
         'BN3', reuse_vars=reuse)

        conv4 = tf.nn.relu(lf.conv2d(conv3_norm, W_conv4, stride=2) + b_conv4)
        conv4_norm = lf.batch_norm_aiuiuc_wrapper(conv4, bn_is_training, \
         'BN4', reuse_vars=reuse)

        dropout1 = tf.nn.dropout(conv4_norm, keep_prob)
        flatten1 = tf.reshape(dropout1, [-1, 4 * 4 * 1024])

        fc1 = tf.matmul(flatten1, W_fc1) + b_fc1

        return fc1
Ejemplo n.º 24
0
 def _is_shape_component(element):
   value = tensor_shape.as_dimension(element).value
   return value is None or isinstance(value, int)