コード例 #1
0
ファイル: features.py プロジェクト: sebastiani/versa
def extract_features(images, output_size, use_batch_norm, dropout_keep_prob):
    """
    Based on the architecture described in 'Matching Networks for One-Shot Learning'
    http://arxiv.org/abs/1606.04080.pdf.

    :param images: batch of images.
    :param output_size: dimensionality of the output features.
    :param use_batch_norm: whether to use batch normalization or not.
    :param dropout_keep_prob: keep probability parameter for dropout.
    :return: features.
    """

    # 4X conv2d + pool blocks
    h = conv2d_pool_block(images, use_batch_norm, dropout_keep_prob,
                          'fe_block_1')
    h = conv2d_pool_block(h, use_batch_norm, dropout_keep_prob, 'fe_block_2')
    h = conv2d_pool_block(h, use_batch_norm, dropout_keep_prob, 'fe_block_3')
    h = conv2d_pool_block(h, use_batch_norm, dropout_keep_prob, 'fe_block_4')

    # flatten output
    h = tf.contrib.layers.flatten(h)

    # dense layer
    h = dense_block(h, output_size, use_batch_norm, dropout_keep_prob,
                    'fe_dense')

    return h
コード例 #2
0
ファイル: features.py プロジェクト: philippeitis/versa
def extract_features_mini_imagenet(images, output_size, use_batch_norm, rate):
    """
    Based on the architecture described in 'Matching Networks for One-Shot Learning'
    http://arxiv.org/abs/1606.04080.pdf.

    :param images: batch of images.
    :param output_size: dimensionality of the output features.
    :param use_batch_norm: whether to use batch normalization or not.
    :param rate: drop probability parameter for dropout.
    :return: features.
    """

    # 5X conv2d + pool blocks
    h = conv2d_pool_block(images, use_batch_norm, rate, 'valid', 'fe_block_1')
    h = conv2d_pool_block(h, use_batch_norm, rate, 'valid', 'fe_block_2')
    h = conv2d_pool_block(h, use_batch_norm, rate, 'valid', 'fe_block_3')
    h = conv2d_pool_block(h, use_batch_norm, rate, 'valid', 'fe_block_4')
    h = conv2d_pool_block(h, use_batch_norm, rate, 'valid', 'fe_block_5')

    # flatten output
    h = tf.compat.v1.layers.flatten(h)

    return h