Exemplo n.º 1
0
def _shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward,
                        is_training):
  """Building a 2 branching convnet."""
  x = tf.nn.relu(x)
  x = ops.conv2d(x, output_filters, 3, stride=stride, scope='conv1')
  x = ops.batch_norm(x, scope='bn1')
  x = tf.nn.relu(x)
  x = ops.conv2d(x, output_filters, 3, scope='conv2')
  x = ops.batch_norm(x, scope='bn2')
  if is_training:
    x = x * rand_backward + tf.stop_gradient(x * rand_forward -
                                             x * rand_backward)
  else:
    x *= 1.0 / 2
  return x
Exemplo n.º 2
0
def _shake_shake_skip_connection(x, output_filters, stride):
  """Adds a residual connection to the filter x for the shake-shake model."""
  curr_filters = int(x.shape[3])
  if curr_filters == output_filters:
    return x
  stride_spec = ops.stride_arr(stride, stride)
  # Skip path 1
  path1 = tf.nn.avg_pool(
      x, [1, 1, 1, 1], stride_spec, 'VALID', data_format='NHWC')
  path1 = ops.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')

  # Skip path 2
  # First pad with 0's then crop
  pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
  path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :]
  concat_axis = 3

  path2 = tf.nn.avg_pool(
      path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format='NHWC')
  path2 = ops.conv2d(path2, int(output_filters / 2), 1, scope='path2_conv')

  # Concat and apply BN
  final_path = tf.concat(values=[path1, path2], axis=concat_axis)
  final_path = ops.batch_norm(final_path, scope='final_path_bn')
  return final_path
Exemplo n.º 3
0
def build_shake_shake_model(images, num_classes, hparams, is_training):
  """Builds the Shake-Shake model.

  Build the Shake-Shake model from https://arxiv.org/abs/1705.07485.

  Args:
    images: Tensor of images that will be fed into the Wide ResNet Model.
    num_classes: Number of classed that the model needs to predict.
    hparams: tf.HParams object that contains additional hparams needed to
      construct the model. In this case it is the `shake_shake_widen_factor`
      that is used to determine how many filters the model has.
    is_training: Is the model training or not.

  Returns:
    The logits of the Shake-Shake model.
  """
  depth = 26
  k = hparams.shake_shake_widen_factor  # The widen factor
  n = int((depth - 2) / 6)
  x = images

  x = ops.conv2d(x, 16, 3, scope='init_conv')
  x = ops.batch_norm(x, scope='init_bn')
  with tf.variable_scope('L1'):
    x = _shake_shake_layer(x, 16 * k, n, 1, is_training)
  with tf.variable_scope('L2'):
    x = _shake_shake_layer(x, 32 * k, n, 2, is_training)
  with tf.variable_scope('L3'):
    x = _shake_shake_layer(x, 64 * k, n, 2, is_training)
  x = tf.nn.relu(x)
  x = ops.global_avg_pool(x)

  # Fully connected
  logits = ops.fc(x, num_classes)
  return logits
Exemplo n.º 4
0
def bottleneck_layer(x, n, stride, prob, is_training, alpha, beta):
    """Bottleneck layer for shake drop model."""
    assert alpha[1] > alpha[0]
    assert beta[1] > beta[0]
    with tf.variable_scope('bottleneck_{}'.format(prob)):
        input_layer = x
        x = ops.batch_norm(x, scope='bn_1_pre')
        x = ops.conv2d(x, n, 1, scope='1x1_conv_contract')
        x = ops.batch_norm(x, scope='bn_1_post')
        x = tf.nn.relu(x)
        x = ops.conv2d(x, n, 3, stride=stride, scope='3x3')
        x = ops.batch_norm(x, scope='bn_2')
        x = tf.nn.relu(x)
        x = ops.conv2d(x, n * 4, 1, scope='1x1_conv_expand')
        x = ops.batch_norm(x, scope='bn_3')

        # Apply regularization here
        # Sample bernoulli with prob
        if is_training:
            batch_size = tf.shape(x)[0]
            bern_shape = [batch_size, 1, 1, 1]
            random_tensor = prob
            random_tensor += tf.random_uniform(bern_shape, dtype=tf.float32)
            binary_tensor = tf.floor(random_tensor)

            alpha_values = tf.random_uniform([batch_size, 1, 1, 1],
                                             minval=alpha[0],
                                             maxval=alpha[1],
                                             dtype=tf.float32)
            beta_values = tf.random_uniform([batch_size, 1, 1, 1],
                                            minval=beta[0],
                                            maxval=beta[1],
                                            dtype=tf.float32)
            rand_forward = (binary_tensor + alpha_values -
                            binary_tensor * alpha_values)
            rand_backward = (binary_tensor + beta_values -
                             binary_tensor * beta_values)
            x = x * rand_backward + tf.stop_gradient(x * rand_forward -
                                                     x * rand_backward)
        else:
            expected_alpha = (alpha[1] + alpha[0]) / 2
            # prob is the expectation of the bernoulli variable
            x = (prob + expected_alpha - prob * expected_alpha) * x

        res = shortcut(input_layer, n * 4, stride)
        return x + res
Exemplo n.º 5
0
def build_shake_drop_model(images, num_classes, is_training):
    """Builds the PyramidNet Shake-Drop model.

  Build the PyramidNet Shake-Drop model from https://arxiv.org/abs/1802.02375.

  Args:
    images: Tensor of images that will be fed into the Wide ResNet Model.
    num_classes: Number of classed that the model needs to predict.
    is_training: Is the model training or not.

  Returns:
    The logits of the PyramidNet Shake-Drop model.
  """

    is_training = is_training
    # ShakeDrop Hparams
    p_l = 0.5
    alpha_shake = [-1, 1]
    beta_shake = [0, 1]

    # PyramidNet Hparams
    alpha = 200
    depth = 272
    # This is for the bottleneck architecture specifically
    n = int((depth - 2) / 9)
    start_channel = 16
    add_channel = alpha / (3 * n)

    # Building the models
    x = images
    x = ops.conv2d(x, 16, 3, scope='init_conv')
    x = ops.batch_norm(x, scope='init_bn')

    layer_num = 1
    total_layers = n * 3
    start_channel += add_channel
    prob = calc_prob(layer_num, total_layers, p_l)
    x = bottleneck_layer(x, round_int(start_channel), 1, prob, is_training,
                         alpha_shake, beta_shake)
    layer_num += 1
    for _ in range(1, n):
        start_channel += add_channel
        prob = calc_prob(layer_num, total_layers, p_l)
        x = bottleneck_layer(x, round_int(start_channel), 1, prob, is_training,
                             alpha_shake, beta_shake)
        layer_num += 1

    start_channel += add_channel
    prob = calc_prob(layer_num, total_layers, p_l)
    x = bottleneck_layer(x, round_int(start_channel), 2, prob, is_training,
                         alpha_shake, beta_shake)
    layer_num += 1
    for _ in range(1, n):
        start_channel += add_channel
        prob = calc_prob(layer_num, total_layers, p_l)
        x = bottleneck_layer(x, round_int(start_channel), 1, prob, is_training,
                             alpha_shake, beta_shake)
        layer_num += 1

    start_channel += add_channel
    prob = calc_prob(layer_num, total_layers, p_l)
    x = bottleneck_layer(x, round_int(start_channel), 2, prob, is_training,
                         alpha_shake, beta_shake)
    layer_num += 1
    for _ in range(1, n):
        start_channel += add_channel
        prob = calc_prob(layer_num, total_layers, p_l)
        x = bottleneck_layer(x, round_int(start_channel), 1, prob, is_training,
                             alpha_shake, beta_shake)
        layer_num += 1

    assert layer_num - 1 == total_layers
    x = ops.batch_norm(x, scope='final_bn')
    x = tf.nn.relu(x)
    x = ops.global_avg_pool(x)
    # Fully connected
    logits = ops.fc(x, num_classes)
    return logits