示例#1
0
def block_m3(inputs, filters, training, strides, data_format):

    resnet_log_helper.log_begin_block(input_tensor=inputs,
                                      block_type=mlperf_log.BOTTLENECK_BLOCK)

    avg_pool = tf.layers.average_pooling2d(inputs=inputs,
                                           pool_size=strides,
                                           strides=strides,
                                           padding='SAME',
                                           data_format=data_format)
    shortcut = conv2d_fixed_padding(inputs=avg_pool,
                                    filters=1664,
                                    kernel_size=1,
                                    strides=1,
                                    data_format=data_format)
    resnet_log_helper.log_projection(input_tensor=inputs,
                                     output_tensor=shortcut)
    shortcut = batch_norm(inputs=shortcut,
                          training=training,
                          data_format=data_format)

    inputs = conv2d_fixed_padding(inputs=inputs,
                                  filters=256,
                                  kernel_size=1,
                                  strides=1,
                                  data_format=data_format)
    inputs = batch_norm(inputs, training, data_format)

    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
    inputs = tf.nn.relu(inputs)

    inputs = conv2d_fixed_padding(inputs=inputs,
                                  filters=256,
                                  kernel_size=3,
                                  strides=strides,
                                  data_format=data_format)
    inputs = batch_norm(inputs, training, data_format)

    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
    inputs = tf.nn.relu(inputs)

    inputs = conv2d_fixed_padding(inputs=inputs,
                                  filters=1664,
                                  kernel_size=1,
                                  strides=1,
                                  data_format=data_format)
    inputs = batch_norm(inputs, training, data_format)

    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
    inputs += shortcut

    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
    inputs = tf.nn.relu(inputs)

    resnet_log_helper.log_end_block(output_tensor=inputs)
    return inputs
示例#2
0
def block_m4(inputs, filters, training, strides, data_format):

    resnet_log_helper.log_begin_block(input_tensor=inputs,
                                      block_type=mlperf_log.BOTTLENECK_BLOCK)

    shortcut = inputs

    inputs = conv2d_fixed_padding(inputs=inputs,
                                  filters=384,
                                  kernel_size=1,
                                  strides=1,
                                  data_format=data_format)
    inputs = batch_norm(inputs, training, data_format)

    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
    inputs = tf.nn.relu(inputs)

    inputs = conv2d_fixed_padding(
        inputs=inputs,
        filters=384,
        kernel_size=3,
        strides=strides,  # 384
        data_format=data_format)
    inputs = batch_norm(inputs, training, data_format)

    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
    inputs = tf.nn.relu(inputs)

    inputs = conv2d_fixed_padding(inputs=inputs,
                                  filters=1664,
                                  kernel_size=1,
                                  strides=1,
                                  data_format=data_format)
    inputs = batch_norm(inputs, training, data_format)

    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
    inputs += shortcut

    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
    inputs = tf.nn.relu(inputs)

    resnet_log_helper.log_end_block(output_tensor=inputs)
    return inputs
def _bottleneck_block_v1(inputs, filters, training, projection_shortcut,
                         strides, data_format):
  """A single block for ResNet v1, with a bottleneck.

  Similar to _building_block_v1(), except using the "bottleneck" blocks
  described in:
    Convolution then batch normalization then ReLU as described by:
      Deep Residual Learning for Image Recognition
      https://arxiv.org/pdf/1512.03385.pdf
      by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.

  Args:
    inputs: A tensor of size [batch, channels, height_in, width_in] or
      [batch, height_in, width_in, channels] depending on data_format.
    filters: The number of filters for the convolutions.
    training: A Boolean for whether the model is in training or inference
      mode. Needed for batch normalization.
    projection_shortcut: The function to use for projection shortcuts
      (typically a 1x1 convolution when downsampling the input).
    strides: The block's stride. If greater than 1, this block will ultimately
      downsample the input.
    data_format: The input format ('channels_last' or 'channels_first').

  Returns:
    The output tensor of the block; shape should match inputs.
  """
  resnet_log_helper.log_begin_block(
      input_tensor=inputs, block_type=mlperf_log.BOTTLENECK_BLOCK)

  shortcut = inputs

  if projection_shortcut is not None:
    shortcut = projection_shortcut(inputs)
    resnet_log_helper.log_projection(input_tensor=inputs,
                                     output_tensor=shortcut)
    shortcut = batch_norm(inputs=shortcut, training=training,
                          data_format=data_format)

  inputs = conv2d_fixed_padding(
      inputs=inputs, filters=filters, kernel_size=1, strides=1,
      data_format=data_format)
  inputs = batch_norm(inputs, training, data_format)

  mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
  inputs = tf.nn.relu(inputs)

  inputs = conv2d_fixed_padding(
      inputs=inputs, filters=filters, kernel_size=3, strides=strides,
      data_format=data_format)
  inputs = batch_norm(inputs, training, data_format)

  mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
  inputs = tf.nn.relu(inputs)

  inputs = conv2d_fixed_padding(
      inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
      data_format=data_format)
  inputs = batch_norm(inputs, training, data_format)

  mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
  inputs += shortcut

  mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
  inputs = tf.nn.relu(inputs)

  resnet_log_helper.log_end_block(output_tensor=inputs)
  return inputs
def bottleneck_block(inputs, filters, is_training, strides,
                     use_projection=False, data_format='channels_first'):
  """Bottleneck block variant for residual networks with BN after convolutions.

  Args:
    inputs: `Tensor` of size `[batch, channels, height, width]`.
    filters: `int` number of filters for the first two convolutions. Note that
        the third and final convolution will use 4 times as many filters.
    is_training: `bool` for whether the model is in training.
    strides: `int` block stride. If greater than 1, this block will ultimately
        downsample the input.
    use_projection: `bool` for whether this block should use a projection
        shortcut (versus the default identity shortcut). This is usually `True`
        for the first block of a block group, which may change the number of
        filters and the resolution.
    data_format: `str` either "channels_first" for `[batch, channels, height,
        width]` or "channels_last for `[batch, height, width, channels]`.

  Returns:
    The output `Tensor` of the block.
  """
  if is_training and FLAGS.mlperf_logging:
      mlperf_log.resnet_print(
          key=mlperf_log.MODEL_HP_BLOCK_TYPE, value=mlperf_log.BOTTLENECK_BLOCK)
      resnet_log_helper.log_begin_block(
          input_tensor=inputs, block_type=mlperf_log.BOTTLENECK_BLOCK)
  shortcut = inputs
  if use_projection:
    # Projection shortcut only in first block within a group. Bottleneck blocks
    # end with 4 times the number of filters.
    filters_out = 4 * filters
    shortcut = conv2d_fixed_padding(
        inputs=inputs,
        filters=filters_out,
        kernel_size=1,
        strides=strides,
        is_training=is_training,
        data_format=data_format)
    shortcut = batch_norm_relu(shortcut, is_training, relu=False,
                               data_format=data_format)
    if is_training and FLAGS.mlperf_logging:
      resnet_log_helper.log_projection(
          input_tensor=inputs, output_tensor=shortcut)

  inputs = conv2d_fixed_padding(
      inputs=inputs,
      filters=filters,
      kernel_size=1,
      strides=1,
      is_training=is_training,
      data_format=data_format)
  inputs = batch_norm_relu(inputs, is_training, data_format=data_format)

  inputs = conv2d_fixed_padding(
      inputs=inputs,
      filters=filters,
      kernel_size=3,
      strides=strides,
      is_training=is_training,
      data_format=data_format)
  inputs = batch_norm_relu(inputs, is_training, data_format=data_format)

  inputs = conv2d_fixed_padding(
      inputs=inputs,
      filters=4 * filters,
      kernel_size=1,
      strides=1,
      is_training=is_training,
      data_format=data_format)
  inputs = batch_norm_relu(inputs, is_training, relu=False, init_zero=True,
                           data_format=data_format)

  output = tf.nn.relu(inputs + shortcut)
  if is_training and FLAGS.mlperf_logging:
    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
    mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
    resnet_log_helper.log_end_block(output_tensor=output)

  return output
示例#5
0
 def log_begin_block(self, input_tensor, block_type):
     if self.model == 'resnet50':
         resnet_log_helper.log_begin_block(input_tensor, block_type)