예제 #1
0
def task(x, activation='relu', output_dim=256, scope='task_network', norm='layer', b_train=False):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        if activation == 'swish':
            act_func = util.swish
        elif activation == 'relu':
            act_func = tf.nn.relu
        elif activation == 'lrelu':
            act_func = tf.nn.leaky_relu
        else:
            act_func = tf.nn.sigmoid

        print('Task Layer1: ' + str(x.get_shape().as_list()))

        block_depth = dense_block_depth
        l = x
        l = layers.conv(l, scope='conv1', filter_dims=[3, 3, block_depth], stride_dims=[1, 1],
                        non_linear_fn=None, bias=False, dilation=[1, 1, 1, 1])

        if norm == 'layer':
            l = layers.layer_norm(l, scope='ln1')
        elif norm == 'batch':
            l = layers.batch_norm_conv(l, b_train=b_train, scope='bn1')

        l = act_func(l)

        for i in range(15):
            l = layers.add_residual_block(l,  filter_dims=[3, 3, block_depth], num_layers=2, act_func=act_func,
                                          norm=norm, b_train=b_train, scope='block1_' + str(i))

        latent = layers.global_avg_pool(l, output_length=output_dim)

    return latent
예제 #2
0
def first_block(x,
                target_size,
                noise_dim,
                upsampling='deconv',
                normalization='batch',
                is_training=True):
    if upsampling == 'deconv':
        _x = reshape(x, (1, 1, noise_dim))
        _x = conv2d_transpose(_x,
                              1024,
                              target_size,
                              strides=(1, 1),
                              padding='valid')
    elif upsampling == 'dense':
        _x = dense(x, target_size[0] * target_size[1] * 1024)
        _x = reshape(_x, (target_size[1], target_size[0], 1024))
    else:
        raise ValueError

    if normalization == 'batch':
        _x = batch_norm(_x, is_training=is_training)
    elif normalization == 'layer':
        _x = layer_norm(_x, is_training=is_training)
    elif normalization is None:
        pass
    else:
        raise ValueError
    _x = activation(_x, 'relu')
    return _x
예제 #3
0
def layer_norm(input_tensor, name=None):
    """Run layer normalization on the last dimension of the tensor."""
    # return tf.contrib.layers.layer_norm(inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
    return layers.layer_norm(input_tensor,
                             begin_norm_axis=-1,
                             begin_params_axis=-1,
                             scope=name)
예제 #4
0
def discriminator_block(x,
                        filters,
                        activation_='lrelu',
                        kernel_size=(3, 3),
                        is_training=True,
                        normalization=None,
                        residual=True):
    with tf.variable_scope(None, discriminator_block.__name__):
        _x = conv_block(x, filters, activation_, kernel_size, is_training,
                        'same', normalization, 0., 'conv_first')
        _x = conv_block(_x, filters, None, kernel_size, is_training, 'same',
                        None, 0., 'conv_first')
        if residual:
            _x += x
        _x = activation(_x, activation_)
        if normalization == 'layer':
            _x = layer_norm(_x, is_training=is_training)
        return _x
예제 #5
0
def encoder(x, activation='relu', scope='encoder_network', norm='layer', b_train=False):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):

        if activation == 'swish':
            act_func = util.swish
        elif activation == 'relu':
            act_func = tf.nn.relu
        elif activation == 'lrelu':
            act_func = tf.nn.leaky_relu
        else:
            act_func = tf.nn.sigmoid

        # [192 x 192]
        block_depth = dense_block_depth // 4

        l = layers.conv(x, scope='conv1', filter_dims=[5, 5, block_depth], stride_dims=[1, 1],
                       non_linear_fn=None, bias=False, dilation=[1, 1, 1, 1])

        if norm == 'layer':
            l = layers.layer_norm(l, scope='ln0')
        elif norm == 'batch':
            l = layers.batch_norm_conv(l, b_train=b_train, scope='bn0')

        l = act_func(l)

        for i in range(4):
            l = layers.add_residual_dense_block(l, filter_dims=[3, 3, block_depth], num_layers=2,
                                                act_func=act_func, norm=norm, b_train=b_train,
                                                scope='dense_block_1_' + str(i))

        # [64 x 64]
        block_depth = block_depth * 2

        l = layers.conv(l, scope='tr1', filter_dims=[3, 3, block_depth], stride_dims=[2, 2], non_linear_fn=None)

        if norm == 'layer':
            l = layers.layer_norm(l, scope='ln1')
        elif norm == 'batch':
            l = layers.batch_norm_conv(l, b_train=b_train, scope='bn1')

        l = act_func(l)

        print('Encoder Block 1: ' + str(l.get_shape().as_list()))

        for i in range(2):
            l = layers.add_residual_block(l, filter_dims=[3, 3, block_depth], num_layers=2, act_func=act_func,
                                          norm=norm, b_train=b_train, scope='res_block_1_' + str(i))

        # [32 x 32]
        block_depth = block_depth * 2

        l = layers.conv(l, scope='tr2', filter_dims=[3, 3, block_depth], stride_dims=[2, 2], non_linear_fn=None)

        if norm == 'layer':
            l = layers.layer_norm(l, scope='ln2')
        elif norm == 'batch':
            l = layers.batch_norm_conv(l, b_train=b_train, scope='bn2')

        l = act_func(l)

        print('Encoder Block 2: ' + str(l.get_shape().as_list()))

        for i in range(2):
            l = layers.add_residual_block(l, filter_dims=[3, 3, block_depth], num_layers=2, act_func=act_func,
                                          norm=norm, b_train=b_train, scope='res_block_2_' + str(i))

        # [16 x 16]
        block_depth = block_depth * 2

        l = layers.conv(l, scope='tr3', filter_dims=[3, 3, block_depth], stride_dims=[2, 2], non_linear_fn=None)

        if norm == 'layer':
            l = layers.layer_norm(l, scope='ln3')
        elif norm == 'batch':
            l = layers.batch_norm_conv(l, b_train=b_train, scope='bn3')

        l = act_func(l)

        print('Encoder Block 3: ' + str(l.get_shape().as_list()))

        for i in range(2):
            l = layers.add_residual_block(l, filter_dims=[3, 3, block_depth], num_layers=2, act_func=act_func,
                                          norm=norm, b_train=b_train, scope='res_block_3' + str(i))

        # [8 x 8]
        block_depth = block_depth * 2
        l = layers.conv(l, scope='tr4', filter_dims=[3, 3, block_depth], stride_dims=[2, 2], non_linear_fn=None)

        if norm == 'layer':
            l = layers.layer_norm(l, scope='ln4')
        elif norm == 'batch':
            l = layers.batch_norm_conv(l, b_train=b_train, scope='bn4')

        l = act_func(l)

        print('Encoder Block 4: ' + str(l.get_shape().as_list()))

        for i in range(2):
            l = layers.add_residual_block(l, filter_dims=[3, 3, block_depth], num_layers=2, act_func=act_func,
                                          norm=norm, b_train=b_train, use_dilation=True, scope='res_block_4_' + str(i))

        # [4 x 4]
        block_depth = block_depth * 2
        l = layers.conv(l, scope='tr5', filter_dims=[3, 3, block_depth], stride_dims=[2, 2], non_linear_fn=None)
        print('Encoder Block 5: ' + str(l.get_shape().as_list()))

        if norm == 'layer':
            l = layers.layer_norm(l, scope='ln5')
        elif norm == 'batch':
            l = layers.batch_norm_conv(l, b_train=b_train, scope='bn5')

        l = act_func(l)

        for i in range(2):
            l = layers.add_residual_block(l, filter_dims=[3, 3, block_depth], num_layers=2, act_func=act_func,
                                          norm=norm, b_train=b_train, use_dilation=True, scope='res_block_5_' + str(i))

        last_layer = l

        context = layers.global_avg_pool(last_layer, output_length=representation_dim, use_bias=True, scope='gp')
        print('Encoder GP Dims: ' + str(context.get_shape().as_list()))

        context = tf.reshape(context, [batch_size, num_context_patches, num_context_patches, -1])
        print('Context Dims: ' + str(context.get_shape().as_list()))

    return context
예제 #6
0
파일: srl_model.py 프로젝트: xhades/unisrl
def _residual_fn(x, y, residual_dropout):
    if residual_dropout > 0.0:
        y = tf.nn.dropout(y, 1.0 - residual_dropout)
    return layer_norm(x + y)