Пример #1
0
def add_residual_dense_block(in_layer, filter_dims, num_layers, act_func=tf.nn.relu, bn_phaze=False, scope='residual_dense_block'):
    with tf.variable_scope(scope):
        l = in_layer
        input_dims = in_layer.get_shape().as_list()
        num_channel_in = input_dims[-1]
        num_channel_out = input_dims[-1]

        for i in range(num_layers):
            l = layers.add_dense_layer(l, filter_dims=filter_dims, act_func=act_func, bn_phaze=bn_phaze,
                                       scope='layer' + str(i))
        l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, num_channel_out], act_func=act_func,
                                     scope='dense_transition_1', bn_phaze=bn_phaze, use_pool=False)

        l = tf.add(l, in_layer)

    return l
Пример #2
0
def decoder_network(latent, anchor_layer=None, activation='swish', scope='g_decoder_network', bn_phaze=False):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        if activation == 'swish':
            act_func = util.swish
        elif activation == 'relu':
            act_func = tf.nn.relu
        elif activation == 'lrelu':
            act_func = tf.nn.leaky_relu
        else:
            act_func = tf.nn.sigmoid

        #l = tf.cond(bn_phaze, lambda: latent, lambda: make_multi_modal_noise(8))
        l = tf.cond(bn_phaze, lambda: latent, lambda: latent)

        l = layers.fc(l, 6*6*32, non_linear_fn=act_func)

        print('decoder input:', str(latent.get_shape().as_list()))
        l = tf.reshape(l, shape=[-1, 6, 6, 32])

        l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth*4], num_layers=4,
                               act_func=act_func, bn_phaze=bn_phaze, use_residual=False, scope='block_0')

        print('block 0:', str(l.get_shape().as_list()))

        l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn1')
        l = act_func(l)

        # 12 x 12
        l = layers.deconv(l, b_size=batch_size, scope='g_dec_deconv1', filter_dims=[3, 3, g_dense_block_depth * 3],
                             stride_dims=[2, 2], padding='SAME', non_linear_fn=None)

        print('deconv1:', str(l.get_shape().as_list()))

        l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth * 3], num_layers=4,
                               act_func=act_func, bn_phaze=bn_phaze, use_residual=False,
                               scope='block_1', use_dilation=True)

        l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn2')
        l = act_func(l)

        # 24 x 24
        l = layers.deconv(l, b_size=batch_size, scope='g_dec_deconv2', filter_dims=[3, 3, g_dense_block_depth * 2],
                             stride_dims=[2, 2], padding='SAME', non_linear_fn=None)

        print('deconv2:', str(l.get_shape().as_list()))

        l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth * 2], num_layers=4,
                               act_func=act_func, bn_phaze=bn_phaze, use_residual=False,
                               scope='block_2', use_dilation=True)

        l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn3')
        l = act_func(l)

        # 48 x 48
        l = layers.deconv(l, b_size=batch_size, scope='g_dec_deconv3', filter_dims=[3, 3, g_dense_block_depth],
                          stride_dims=[2, 2], padding='SAME', non_linear_fn=None)

        print('deconv3:', str(l.get_shape().as_list()))

        l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=4,
                               act_func=act_func, bn_phaze=bn_phaze, use_residual=False,
                               scope='block_3', use_dilation=True)

        l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn4')
        l = act_func(l)

        l = layers.self_attention(l, g_dense_block_depth, act_func=act_func)

        if anchor_layer is not None:
            l = tf.concat([l, anchor_layer], axis=3)

        # 96 x 96
        l = layers.deconv(l, b_size=batch_size, scope='g_dec_deconv4', filter_dims=[3, 3, g_dense_block_depth],
                          stride_dims=[2, 2], padding='SAME', non_linear_fn=None)

        l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
                               act_func=act_func, bn_phaze=bn_phaze, use_residual=False,
                               scope='block_4', use_dilation=True)

        l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, 3], act_func=act_func,
                                              scope='dense_transition_1', bn_phaze=bn_phaze, use_pool=False)

        l = add_residual_block(l, filter_dims=[3, 3, 3], num_layers=2,
                               act_func=act_func, bn_phaze=bn_phaze, use_residual=False,
                               scope='block_5', use_dilation=True)

        l = tf.nn.tanh(l)

        print('final:', str(l.get_shape().as_list()))

        return l
Пример #3
0
def encoder_network(x, activation='relu', scope='encoder_network', reuse=False, bn_phaze=False, keep_prob=0.5):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        # if reuse:
        #    tf.get_variable_scope().reuse_variables()

        if activation == 'swish':
            act_func = util.swish
        elif activation == 'relu':
            act_func = tf.nn.relu
        elif activation == 'lrelu':
            act_func = tf.nn.leaky_relu
        else:
            act_func = tf.nn.sigmoid

        # [96 x 96]
        l = layers.conv(x, scope='conv1', filter_dims=[3, 3, g_dense_block_depth], stride_dims=[1, 1],
                        non_linear_fn=None, bias=False, dilation=[1, 1, 1, 1])

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_0')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_1')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_1_1')

        l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn1')
        l = act_func(l)

        # [48 x 48]
        #l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        l = layers.conv(l, scope='conv2', filter_dims=[3, 3, g_dense_block_depth], stride_dims=[2, 2],
                        non_linear_fn=act_func, bias=False, dilation=[1, 1, 1, 1])

        l = layers.self_attention(l, g_dense_block_depth)

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_2')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_3')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_3_1')

        l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn2')
        l = act_func(l)

        l_share = l

        # [24 x 24]
        #l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        l = layers.conv(l, scope='conv3', filter_dims=[3, 3, g_dense_block_depth * 2], stride_dims=[2, 2],
                        non_linear_fn=None, bias=False, dilation=[1, 1, 1, 1])

        l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth * 2],
                                              act_func=act_func,
                                              scope='dense_transition_24', bn_phaze=bn_phaze,
                                              use_pool=False)

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth * 2], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_4')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth * 2], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_5')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth * 2], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_5_1')

        l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn3')
        l = act_func(l)

        # [12 x 12]
        #l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        l = layers.conv(l, scope='conv4', filter_dims=[3, 3, g_dense_block_depth * 3], stride_dims=[2, 2],
                        non_linear_fn=None, bias=False, dilation=[1, 1, 1, 1])

        l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth * 3],
                                              act_func=act_func,
                                              scope='dense_transition_12', bn_phaze=bn_phaze,
                                              use_pool=False)

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth * 3], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_6')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth * 3], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_7')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth * 3], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_7_1')

        l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn4')
        l = act_func(l)

        # [6 x 6]
        #l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        l = layers.conv(l, scope='conv5', filter_dims=[3, 3, g_dense_block_depth * 4], stride_dims=[2, 2],
                        non_linear_fn=None, bias=False, dilation=[1, 1, 1, 1])

        l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth * 4],
                                              act_func=act_func,
                                              scope='dense_transition_6', bn_phaze=bn_phaze,
                                              use_pool=False)

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth * 4], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_8')

        #l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth * 4], num_layers=3,
        #                             act_func=act_func, bn_phaze=bn_phaze, scope='block_9')

        #l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth * 4], num_layers=3,
        #                             act_func=act_func, bn_phaze=bn_phaze, scope='block_10')

        with tf.variable_scope('dense_block_last'):
            scale_layer = layers.add_dense_transition_layer(l, filter_dims=[1, 1, representation_dim],
                                                            act_func=act_func,
                                                            scope='dense_transition_1', bn_phaze=bn_phaze,
                                                            use_pool=False)
            last_dense_layer = layers.add_dense_transition_layer(l, filter_dims=[1, 1, representation_dim],
                                                                 act_func=act_func,
                                                                 scope='dense_transition_2', bn_phaze=bn_phaze,
                                                                 use_pool=False)
            scale_layer = act_func(scale_layer)
            last_dense_layer = act_func(last_dense_layer)

    return last_dense_layer, scale_layer, l_share
Пример #4
0
def cnn_network(x, activation='swish', scope='cnn_network', reuse=False, bn_phaze=False, keep_prob=0.5):
    with tf.variable_scope(scope):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        if activation == 'swish':
            act_func = util.swish
        elif activation == 'relu':
            act_func = tf.nn.relu
        else:
            act_func = tf.nn.sigmoid

        l = layers.conv(x, scope='conv1', filter_dims=[5, 5, 256], stride_dims=[1, 1], non_linear_fn=None, bias=False)

        with tf.variable_scope('dense_block_1'):
            for i in range(g_dense_block_layers):
                l = layers.add_dense_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func, bn_phaze=bn_phaze,
                                    scope='layer' + str(i))
            l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth], act_func=act_func,
                                     scope='dense_transition_1', bn_phaze=bn_phaze)

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_1')

        l = tf.nn.max_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_2')

        l = tf.nn.max_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_3')

        l = tf.nn.max_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_4')

        l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=3,
                                     act_func=act_func, bn_phaze=bn_phaze, scope='block_5')

        with tf.variable_scope('dense_block_last'):
            for i in range(g_dense_block_layers):
                l = layers.add_dense_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                           bn_phaze=bn_phaze, scope='layer' + str(i))
            scale_layer = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth],
                                                            act_func=act_func,
                                                            scope='dense_transition_1', bn_phaze=bn_phaze,
                                                            use_pool=False)
            last_dense_layer = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth],
                                                                 act_func=act_func,
                                                                 scope='dense_transition_2', bn_phaze=bn_phaze,
                                                                 use_pool=False)
            scale_layer = act_func(scale_layer)
            last_dense_layer = act_func(last_dense_layer)

        '''
        with tf.variable_scope('residual_block_1'):
            r = layers.add_residual_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                          bn_phaze=bn_phaze, scope='layer1')
            r = layers.add_residual_layer(r, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                          bn_phaze=bn_phaze, scope='layer2')
            l = tf.add(l, r)

        with tf.variable_scope('dense_block_2'):
            for i in range(g_dense_block_layers):
                l = layers.add_dense_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func, bn_phaze=bn_phaze,
                                    scope='layer' + str(i))
            l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth], act_func=act_func,
                                     scope='dense_transition_1', bn_phaze=bn_phaze)

        with tf.variable_scope('residual_block_2'):
            r = layers.add_residual_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                          bn_phaze=bn_phaze, scope='layer1')
            r = layers.add_residual_layer(r, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                          bn_phaze=bn_phaze, scope='layer2')
            l = tf.add(l, r)

        with tf.variable_scope('dense_block_3'):
            for i in range(g_dense_block_layers):
                l = layers.add_dense_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func, bn_phaze=bn_phaze,
                                    scope='layer' + str(i))
            l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth], act_func=act_func,
                                    scope='dense_transition_1', bn_phaze=bn_phaze)

        with tf.variable_scope('residual_block_3'):
            r = layers.add_residual_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                          bn_phaze=bn_phaze, scope='layer1')
            r = layers.add_residual_layer(r, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                          bn_phaze=bn_phaze, scope='layer2')
            l = tf.add(l, r)

        with tf.variable_scope('dense_block_4'):
            for i in range(g_dense_block_layers):
                l = layers.add_dense_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func, bn_phaze=bn_phaze,
                                           scope='layer' + str(i))
            l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth], act_func=act_func,
                                                  scope='dense_transition_1', bn_phaze=bn_phaze)

        with tf.variable_scope('residual_block_4'):
            r = layers.add_residual_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                          bn_phaze=bn_phaze, scope='layer1')
            r = layers.add_residual_layer(r, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                          bn_phaze=bn_phaze, scope='layer2')
            l = tf.add(l, r)
        
        with tf.variable_scope('dense_block_last'):
            for i in range(g_dense_block_layers):
                l = layers.add_dense_layer(l, filter_dims=[3, 3, g_dense_block_depth], act_func=act_func,
                                    bn_phaze=bn_phaze, scope='layer' + str(i))
            scale_layer = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth], act_func=act_func,
                                                  scope='dense_transition_1', bn_phaze=bn_phaze, use_pool=False)
            last_dense_layer = layers.add_dense_transition_layer(l, filter_dims=[1, 1, g_dense_block_depth],
                                                            act_func=act_func,
                                                            scope='dense_transition_2', bn_phaze=bn_phaze, use_pool=False)
            scale_layer = act_func(scale_layer)
            last_dense_layer = act_func(last_dense_layer)
        '''

        return last_dense_layer, scale_layer