Example #1
0
def build_shake_shake_model(images, num_classes, hparams, is_training):
  """Builds the Shake-Shake model.

    Build the Shake-Shake model from https://arxiv.org/abs/1705.07485.

    Args:
      images: Tensor of images that will be fed into the Wide ResNet Model.
      num_classes: Number of classed that the model needs to predict.
      hparams: tf.HParams object that contains additional hparams needed to
        construct the model. In this case it is the `shake_shake_widen_factor`
        that is used to determine how many filters the model has.
      is_training: Is the model training or not.

    Returns:
      The logits of the Shake-Shake model.
    """
  depth = 26
  k = hparams.shake_shake_widen_factor  # The widen factor
  n = int((depth - 2) / 6)
  x = images

  x = ops.conv2d(x, 16, 3, scope='init_conv')
  x = ops.batch_norm(x, scope='init_bn')
  with tf.variable_scope('L1'):
    x = _shake_shake_layer(x, 16 * k, n, 1, is_training)
  with tf.variable_scope('L2'):
    x = _shake_shake_layer(x, 32 * k, n, 2, is_training)
  with tf.variable_scope('L3'):
    x = _shake_shake_layer(x, 64 * k, n, 2, is_training)
  x = tf.nn.relu(x)
  x = ops.global_avg_pool(x)

  # Fully connected
  logits = ops.fc(x, num_classes)
  return logits
Example #2
0
def build_wrn_model(images, num_classes, wrn_size, update_bn=True):
    """Builds the WRN model.

    Build the Wide ResNet model from https://arxiv.org/abs/1605.07146.

    Args:
        images: Tensor of images that will be fed into the Wide ResNet Model.
        num_classes: Number of classed that the model needs to predict.
        wrn_size: Parameter that scales the number of filters in the Wide ResNet
            model.

    Returns:
        The logits of the Wide ResNet model.
    """
    # wrn_size = 16 * widening factor k
    kernel_size = wrn_size
    filter_size = 3
    # depth = num_blocks_per_resnet * 6 + 4 = 28
    num_blocks_per_resnet = 4
    filters = [
        min(kernel_size, 16), kernel_size, kernel_size * 2, kernel_size * 4
    ]
    strides = [1, 2, 2]  # stride for each resblock

    # Run the first conv
    with tf.variable_scope('init'):
        x = images
        output_filters = filters[0]
        x = ops.conv2d(x, output_filters, filter_size, scope='init_conv')

    first_x = x  # Res from the beginning
    orig_x = x  # Res from previous block

    for block_num in range(1, 4):
        with tf.variable_scope('unit_{}_0'.format(block_num)):
            activate_before_residual = True if block_num == 1 else False
            x = residual_block(
                x,
                filters[block_num - 1],
                filters[block_num],
                strides[block_num - 1],
                update_bn=update_bn,
                activate_before_residual=activate_before_residual)
        for i in range(1, num_blocks_per_resnet):
            with tf.variable_scope('unit_{}_{}'.format(block_num, i)):
                x = residual_block(
                    x,
                    filters[block_num],
                    filters[block_num],
                    1,
                    update_bn=update_bn,
                    activate_before_residual=False)
        x, orig_x = _res_add(filters[block_num - 1], filters[block_num],
                             strides[block_num - 1], x, orig_x)
    final_stride_val = np.prod(strides)
    x, _ = _res_add(filters[0], filters[3], final_stride_val, x, first_x)
    with tf.variable_scope('unit_last'):
        x = ops.batch_norm(x, scope='final_bn')
        x = tf.nn.relu(x)
        x = ops.global_avg_pool(x)
        logits = ops.fc(x, num_classes)
    return logits
Example #3
0
def build_shake_drop_model(images, num_classes, is_training):
    """Builds the PyramidNet Shake-Drop model.

    Build the PyramidNet Shake-Drop model from https://arxiv.org/abs/1802.02375.

    Args:
      images: Tensor of images that will be fed into the Wide ResNet Model.
      num_classes: Number of classed that the model needs to predict.
      is_training: Is the model training or not.

    Returns:
      The logits of the PyramidNet Shake-Drop model.
    """

    is_training = is_training
    # ShakeDrop Hparams
    p_l = 0.5
    alpha_shake = [-1, 1]
    beta_shake = [0, 1]

    # PyramidNet Hparams
    alpha = 200
    depth = 272
    # This is for the bottleneck architecture specifically
    n = int((depth - 2) / 9)
    start_channel = 16
    add_channel = alpha / (3 * n)

    # Building the models
    x = images
    x = ops.conv2d(x, 16, 3, scope='init_conv')
    x = ops.batch_norm(x, scope='init_bn')

    layer_num = 1
    total_layers = n * 3
    start_channel += add_channel
    prob = calc_prob(layer_num, total_layers, p_l)
    x = bottleneck_layer(x, round_int(start_channel), 1, prob, is_training,
                         alpha_shake, beta_shake)
    layer_num += 1
    for _ in range(1, n):
        start_channel += add_channel
        prob = calc_prob(layer_num, total_layers, p_l)
        x = bottleneck_layer(x, round_int(start_channel), 1, prob, is_training,
                             alpha_shake, beta_shake)
        layer_num += 1

    start_channel += add_channel
    prob = calc_prob(layer_num, total_layers, p_l)
    x = bottleneck_layer(x, round_int(start_channel), 2, prob, is_training,
                         alpha_shake, beta_shake)
    layer_num += 1
    for _ in range(1, n):
        start_channel += add_channel
        prob = calc_prob(layer_num, total_layers, p_l)
        x = bottleneck_layer(x, round_int(start_channel), 1, prob, is_training,
                             alpha_shake, beta_shake)
        layer_num += 1

    start_channel += add_channel
    prob = calc_prob(layer_num, total_layers, p_l)
    x = bottleneck_layer(x, round_int(start_channel), 2, prob, is_training,
                         alpha_shake, beta_shake)
    layer_num += 1
    for _ in range(1, n):
        start_channel += add_channel
        prob = calc_prob(layer_num, total_layers, p_l)
        x = bottleneck_layer(x, round_int(start_channel), 1, prob, is_training,
                             alpha_shake, beta_shake)
        layer_num += 1

    assert layer_num - 1 == total_layers
    x = ops.batch_norm(x, scope='final_bn')
    x = tf.nn.relu(x)
    x = ops.global_avg_pool(x)
    # Fully connected
    logits = ops.fc(x, num_classes)
    return logits