Exemple #1
0
    def __call__(self, input):
        with tf.compat.v1.variable_scope(self.name, reuse=self._reuse):
            D = ops.conv_block(input,
                               64,
                               'C64',
                               4,
                               2,
                               self._is_train,
                               self._reuse,
                               norm=None,
                               activation=self._activation)
            D = ops.conv_block(D, 128, 'C128', 4, 2, self._is_train,
                               self._reuse, self._norm, self._activation)
            D = ops.conv_block(D, 256, 'C256', 4, 2, self._is_train,
                               self._reuse, self._norm, self._activation)
            D = ops.conv_block(D, 512, 'C512', 4, 2, self._is_train,
                               self._reuse, self._norm, self._activation)
            D = ops.conv_block(D,
                               1,
                               'C1',
                               4,
                               1,
                               self._is_train,
                               self._reuse,
                               norm=None,
                               activation=None,
                               bias=True)
            D = tf.reduce_mean(D, axis=[1, 2, 3])

            self._reuse = True
            self.var_list = tf.compat.v1.get_collection(
                tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, self.name)
            return D
Exemple #2
0
def discriminator(x, reuse=False):
    if reuse:
        tf.get_variable_scope().reuse_variables()
    conv_b_1 = ops.conv_block(x,
                              filter_size=4,
                              stride_length=2,
                              n_maps=8,
                              name='d_conv_b_1')
    conv_b_2 = ops.conv_block(conv_b_1,
                              filter_size=4,
                              stride_length=2,
                              n_maps=16,
                              name='d_conv_b_2')
    conv_b_3 = ops.conv_block(conv_b_2,
                              filter_size=4,
                              stride_length=2,
                              n_maps=32,
                              name='d_conv_b_3')
    conv_b_4 = ops.conv_block(conv_b_3,
                              filter_size=4,
                              stride_length=2,
                              n_maps=64,
                              name='d_conv_b_4')
    conv_b_5 = ops.conv_block(conv_b_4,
                              filter_size=4,
                              stride_length=2,
                              n_maps=1,
                              name='d_conv_b_5')
    conv_b_5_r = tf.reshape(conv_b_5, [-1, 11 * 9 * 1], name='d_reshape')
    output = ops.dense(conv_b_5_r, 11 * 9, 1, name='d_output')
    return output
Exemple #3
0
    def __call__(self, input_op):
        with tf.variable_scope(self.name, reuse=self.reuse):
            d_1 = ops.conv_block(input_op, 64, 'C64', 4, 2, self.is_train,
                                 self.reuse, norm=None, activation=self.activation)
            d_2 = ops.conv_block(d_1, 128, 'C128', 4, 2, self.is_train,
                                 self.reuse, self.norm, self.activation)
            d_3 = ops.conv_block(d_2, 256, 'C256', 4, 2, self.is_train,
                                 self.reuse, self.norm, self.activation)
            d_4 = ops.conv_block(d_3, 512, 'C512', 4, 2, self.is_train,
                                 self.reuse, self.norm, self.activation)
            d_5 = ops.conv_block(d_4, 1, 'C1', 4, 1, self.is_train,
                                 self.reuse, norm=None, activation=None, bias=True)
            self.d_out = tf.reduce_mean(d_5, axis=[1, 2, 3])

            self.reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
    def __call__(self, input, z):

        with tf.variable_scope(self.name, reuse=self._reuse):
            batch_size = int(input.get_shape()[0])
            latent_dim = int(z.get_shape()[-1])
            num_filters = [64, 128, 256, 512, 512, 512, 512]
            if self._image_size == 256:
                num_filters.append(512)

            layers = []
            G = input
            z = tf.reshape(z, [batch_size, 1, 1, latent_dim])
            z = tf.tile(z, [1, self._image_size, self._image_size, 1])
            G = tf.concat([G, z], axis=3)
            for i, n in enumerate(num_filters):
                G = ops.conv_block(G,
                                   n,
                                   'C{}_{}'.format(n, i),
                                   4,
                                   2,
                                   self._is_train,
                                   self._reuse,
                                   norm=self._norm if i else None,
                                   activation='leaky')
                layers.append(G)

            layers.pop()
            num_filters.pop()
            num_filters.reverse()

            for i, n in enumerate(num_filters):
                G = ops.deconv_block(G,
                                     n,
                                     'CD{}_{}'.format(n, i),
                                     4,
                                     2,
                                     self._is_train,
                                     self._reuse,
                                     norm=self._norm,
                                     activation='relu')
                G = tf.concat([G, layers.pop()], axis=3)
            G = ops.deconv_block(G,
                                 3,
                                 'last_layer',
                                 4,
                                 2,
                                 self._is_train,
                                 self._reuse,
                                 norm=None,
                                 activation='tanh')

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)

            return G
 def __init__(self, num_classes=8):
     super(resnet_quadruplets, self).__init__()
     self.num_classes = num_classes
     self.avgpool = nn.AvgPool2d(7)
     self.relu = nn.ReLU(inplace=True)
     self.feature_selction = nn.Linear(1024 * 2, 1024)
     self.feature_selction = conv_block(1024 * 2, 1024, 1, 1, 0)
     self.fc = nn.Linear(1024 * 2, num_classes)
     self.modelA = feature_extract(BasicBlock, [1, 1, 1, 1])
     self.modelB = feature_extract(BasicBlock, [1, 1, 1, 1])
     self.modelC = feature_extract(BasicBlock, [1, 1, 1, 1])
     self.modelD = feature_extract(BasicBlock, [1, 1, 1, 1])
Exemple #6
0
    def __call__(self, input_op):
        # 改回了魔术方法的实现,更加简洁
        with tf.variable_scope(self.name):
            conv1 = ops.conv_block(input_op,
                                   32,
                                   'conv1',
                                   7,
                                   1,
                                   self.is_train,
                                   self.reuse,
                                   self.norm,
                                   self.activation,
                                   pad='REFLECT')
            conv2 = ops.conv_block(conv1, 64, 'conv2', 3, 2, self.is_train,
                                   self.reuse, self.norm, self.activation)
            res = ops.conv_block(conv2, 128, 'conv3', 3, 2, self.is_train,
                                 self.reuse, self.norm, self.activation)
            for i in range(self.block_size):
                res = ops.residual_block(res, 128, 'res' + str(i),
                                         self.is_train, self.reuse, self.norm)
            deconv1 = ops.deconv_block(res, 64, 'deconv1', 3, 2, self.is_train,
                                       self.reuse, self.norm, self.activation)
            deconv2 = ops.deconv_block(deconv1, 32, 'deconv2', 3, 2,
                                       self.is_train, self.reuse, self.norm,
                                       self.activation)
            self.gen = ops.conv_block(deconv2,
                                      3,
                                      'conv_end',
                                      7,
                                      1,
                                      self.is_train,
                                      self.reuse,
                                      norm=None,
                                      activation=tf.nn.tanh,
                                      pad='REFLECT')

            self.reuse = True

            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)
Exemple #7
0
    def _resnet(self, input):
        with tf.variable_scope(self.name, reuse=self._reuse):
            num_filters = [128, 256, 512, 512]
            if self._image_size == 256:
                num_filters.append(512)

            E = input
            E = ops.conv_block(E,
                               64,
                               'C{}_{}'.format(64, 0),
                               4,
                               2,
                               self._is_train,
                               self._reuse,
                               norm=None,
                               activation='leaky',
                               bias=True)
            for i, n in enumerate(num_filters):
                E = ops.residual(E,
                                 n,
                                 'res{}_{}'.format(n, i + 1),
                                 self._is_train,
                                 self._reuse,
                                 norm=self._norm,
                                 bias=True)
                E = tf.nn.avg_pool(E, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
            E = tf.nn.relu(E)
            E = tf.nn.avg_pool(E, [1, 8, 8, 1], [1, 8, 8, 1], 'SAME')
            E = ops.flatten(E)
            mu = ops.mlp(E,
                         self._latent_dim,
                         'FC8_mu',
                         self._is_train,
                         self._reuse,
                         norm=None,
                         activation=None)
            log_sigma = ops.mlp(E,
                                self._latent_dim,
                                'FC8_sigma',
                                self._is_train,
                                self._reuse,
                                norm=None,
                                activation=None)

            z = mu + tf.random_normal(
                shape=tf.shape(self._latent_dim)) * tf.exp(log_sigma)

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)
            return z, mu, log_sigma
    def __call__(self, input):
        with tf.variable_scope(self.name, reuse=self._reuse):
            G = ops.conv_block(input,
                               32,
                               'c7s1-32',
                               7,
                               1,
                               self._is_train,
                               self._reuse,
                               self._norm,
                               self._activation,
                               pad='REFLECT')
            G = ops.conv_block(G, 64, 'd64', 3, 2, self._is_train, self._reuse,
                               self._norm, self._activation)
            G = ops.conv_block(G, 128, 'd128', 3, 2, self._is_train,
                               self._reuse, self._norm, self._activation)
            for i in range(self._num_res_block):
                G = ops.residual(G, 128, 'R128_{}'.format(i), self._is_train,
                                 self._reuse, self._norm)
            G = ops.deconv_block(G, 64, 'u64', 3, 2, self._is_train,
                                 self._reuse, self._norm, self._activation)
            G = ops.deconv_block(G, 32, 'u32', 3, 2, self._is_train,
                                 self._reuse, self._norm, self._activation)
            G = ops.conv_block(G,
                               3,
                               'c7s1-3',
                               7,
                               1,
                               self._is_train,
                               self._reuse,
                               norm=None,
                               activation='tanh',
                               pad='REFLECT')

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)
            return G
Exemple #9
0
    def __call__(self, input):
        with tf.variable_scope(self.name, reuse=self._reuse):
            D = ops.conv_block(input,
                               64,
                               'C64',
                               4,
                               2,
                               self._is_train,
                               self._reuse,
                               norm=None,
                               activation=self._activation)
            D = ops.conv_block(D, 128, 'C128', 4, 2, self._is_train,
                               self._reuse, self._norm, self._activation)
            D = ops.conv_block(D, 256, 'C256', 4, 2, self._is_train,
                               self._reuse, self._norm, self._activation)
            num_layers = 3 if self._image_size == 256 else 1
            for i in range(num_layers):
                D = ops.conv_block(D, 512, 'C512_{}'.format(i), 4, 2,
                                   self._is_train, self._reuse, self._norm,
                                   self._activation)
            D = ops.conv_block(D,
                               1,
                               'C1',
                               4,
                               1,
                               self._is_train,
                               self._reuse,
                               norm=None,
                               activation=None,
                               bias=True)
            D = tf.reduce_mean(D, axis=[1, 2, 3])

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)
            return D
Exemple #10
0
    def _convnet(self, input):
        with tf.variable_scope(self.name, reuse=self._reuse):
            num_filters = [64, 128, 256, 512, 512, 512, 512]
            if self._image_size == 256:
                num_filters.append(512)

            E = input
            for i, n in enumerate(num_filters):
                E = ops.conv_block(E,
                                   n,
                                   'C{}_{}'.format(n, i),
                                   4,
                                   2,
                                   self._is_train,
                                   self._reuse,
                                   norm=self._norm if i else None,
                                   activation='leaky')
            E = ops.flatten(E)
            mu = ops.mlp(E,
                         self._latent_dim,
                         'FC8_mu',
                         self._is_train,
                         self._reuse,
                         norm=None,
                         activation=None)
            log_sigma = ops.mlp(E,
                                self._latent_dim,
                                'FC8_sigma',
                                self._is_train,
                                self._reuse,
                                norm=None,
                                activation=None)

            z = mu + tf.random_normal(
                shape=tf.shape(self._latent_dim)) * tf.exp(log_sigma)

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)
            return z, mu, log_sigma
Exemple #11
0
def generator(x, reuse=False):
    # TODO: Add dropout??
    if reuse:
        tf.get_variable_scope().reuse_variables()
    # Encoder
    conv_b_1 = ops.conv_block(x,
                              filter_size=3,
                              stride_length=2,
                              n_maps=32,
                              name='g_conv_b_1')
    conv_b_2 = ops.conv_block(conv_b_1,
                              filter_size=3,
                              stride_length=2,
                              n_maps=64,
                              name='g_conv_b_2')
    conv_b_3 = ops.conv_block(conv_b_2,
                              filter_size=3,
                              stride_length=2,
                              n_maps=64,
                              name='g_conv_b_3')
    conv_b_4 = ops.conv_block(conv_b_3,
                              filter_size=3,
                              stride_length=2,
                              n_maps=128,
                              name='g_conv_b_4')

    # Decoder
    conv_tb_1 = ops.conv_t_block(conv_b_4,
                                 filter_size=4,
                                 stride_length=2,
                                 n_maps=128,
                                 output_shape=[
                                     mc.batch_size,
                                     conv_b_4.get_shape()[1].value * 2,
                                     conv_b_4.get_shape()[2].value * 2, 128
                                 ],
                                 name='g_conv_tb_1')
    conv_tb_1 = tf.concat([conv_tb_1, conv_b_3], axis=3)
    conv_tb_2 = ops.conv_t_block(conv_tb_1,
                                 filter_size=4,
                                 stride_length=2,
                                 n_maps=64,
                                 output_shape=[
                                     mc.batch_size,
                                     conv_tb_1.get_shape()[1].value * 2,
                                     conv_tb_1.get_shape()[2].value * 2, 64
                                 ],
                                 name='g_conv_tb_2')
    conv_tb_2 = tf.concat([conv_tb_2, conv_b_2], axis=3)
    conv_tb_3 = ops.conv_t_block(conv_tb_2,
                                 filter_size=4,
                                 stride_length=2,
                                 n_maps=64,
                                 output_shape=[
                                     mc.batch_size,
                                     conv_tb_2.get_shape()[1].value * 2,
                                     conv_tb_2.get_shape()[2].value * 2, 64
                                 ],
                                 name='g_conv_tb_3')
    conv_tb_3 = tf.concat([conv_tb_3, conv_b_1], axis=3)
    conv_tb_4 = ops.conv_t_block(conv_tb_3,
                                 filter_size=4,
                                 stride_length=2,
                                 n_maps=32,
                                 output_shape=[
                                     mc.batch_size,
                                     conv_tb_3.get_shape()[1].value * 2,
                                     conv_tb_3.get_shape()[2].value * 2, 32
                                 ],
                                 name='g_conv_tb_4')

    output = ops.cnn_2d_trans(
        conv_tb_4,
        weight_shape=[4, 4, 3, conv_tb_4.get_shape()[-1].value],
        strides=[1, 1, 1, 1],
        output_shape=[
            mc.batch_size,
            conv_tb_4.get_shape()[1].value,
            conv_tb_4.get_shape()[2].value, 3
        ],
        name='g_output')
    return output