Exemple #1
0
    def generator(self, reuse=False):
        """Returns model generator, which is a DeConvNet.
        Assumed properties:
            gen_input - a scalar
            batch_size
            dimensions of filters and other hyperparameters.
            ...
        """
        with tf.variable_scope("conv1"):
            if self.test_images is not None:
                h = conv_block(self.test_images, relu=True, reuse=reuse)
            else:
                # noise = tf.random_normal(self.g_images.get_shape(), stddev=.03 * 255)
                h = self.g_images
                h = conv_block(self.g_images, relu=True, reuse=reuse)

        for i in range(1, 16):
            with tf.variable_scope("res" + str(i)):
                h = res_block(h, self.is_training, reuse=reuse)

        with tf.variable_scope("deconv1"):
            h = deconv_block(h)

        with tf.variable_scope("deconv2"):
            h = deconv_block(h)

        with tf.variable_scope("conv2"):
            h = conv_block(h, output_channels=3, reuse=reuse)

        return h
    def __init__(self, output_shape, n):

        super(ResNet6n, self).__init__()
        self.output_shape = output_shape
        self.n = n

        # regiter first conv layer and last fc layer
        self.conv_a = nn.Conv2d(3, 16, kernel_size=3, padding=1)
        self.bn_a = nn.BatchNorm2d(16)
        self.fc = nn.Linear(64, output_shape, bias=True)

        # register conv blocks
        self.conv_block_b = conv_block(16, 32)
        self.conv_block_c = conv_block(32, 64)

        # register identity blocks
        self.iden_blocks_a = nn.ModuleList(
            [iden_block(16, 16) for i in range(n)])
        self.iden_blocks_b = nn.ModuleList(
            [iden_block(32, 32) for i in range(n - 1)])
        self.iden_blocks_c = nn.ModuleList(
            [iden_block(64, 64) for i in range(n - 1)])

        # weights init
        self.weights_init()
Exemple #3
0
def discriminator_block(x,
                        is_training,
                        filters,
                        activation_='lrelu',
                        kernel_size=(3, 3),
                        normalization='spectral',
                        residual=True):
    with tf.variable_scope(None, discriminator_block.__name__):
        _x = conv_block(
            x,
            filters,
            activation_,
            kernel_size,
            'same',
            normalization,
            is_training,
            0.,
        )
        _x = conv_block(
            _x,
            filters,
            None,
            kernel_size,
            'same',
            normalization,
            is_training,
            0.,
        )
        if residual:
            _x += x
        _x = activation(_x, 'lrelu')

        return _x
Exemple #4
0
    def __call__(self, x, reuse=False):
        feature_maps = []
        conv_iterations = [2, 2, 2, 2, 3, 3, 3]
        s = 16
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            # Encoder
            with tf.variable_scope('Encoder'):
                for bi, ci in enumerate(conv_iterations):
                    _x, x = _down_block(x, s * (2**bi), ci, **self.conv_kwargs)
                    feature_maps.append(_x)

            # Decoder
            with tf.variable_scope('Decoder'):
                # 1 - 6th block
                for bi, ci in enumerate(conv_iterations[:0:-1]):
                    x = _up_block(x, feature_maps.pop(),
                                  s * (2**(len(conv_iterations) - bi - 1)),
                                  self.upsampling, ci, **self.conv_kwargs)
                # last block
                with tf.variable_scope(None, 'Up'):
                    x = conv_block(x,
                                   s,
                                   sampling=self.upsampling,
                                   **self.conv_kwargs)
                    x = tf.concat([x, feature_maps.pop()], axis=-1)
                    x = conv_block(x, s, sampling='same', **self.conv_kwargs)
                    x = conv_block(x,
                                   self.channel,
                                   sampling='same',
                                   normalization=None,
                                   activation_=self.last_activation)
        return x
Exemple #5
0
    def __call__(self, x, reuse=False, is_training=True):
        nb_upsampling = int(np.log2(self.target_size[0] // 4))
        with tf.variable_scope(self.name, reuse=reuse) as vs:
            _x = first_block(x, (4, 4), self.noise_dim, 'dense',
                             self.normalization, is_training)

            for i in range(nb_upsampling):
                with tf.variable_scope(None, 'conv_blocks'):
                    filters = 1024 // (2**(i + 1))
                    _x = conv_block(_x,
                                    is_training=is_training,
                                    filters=filters,
                                    activation_='relu',
                                    sampling=self.upsampling,
                                    normalization=self.normalization,
                                    dropout_rate=0.,
                                    mode='conv_first')

            _x = conv_block(_x,
                            is_training=is_training,
                            kernel_size=(9, 9),
                            filters=self.channel,
                            activation_=self.last_activation,
                            sampling='same',
                            normalization=None,
                            dropout_rate=0.,
                            mode='conv_first')
            return _x
Exemple #6
0
def _up_block(x, x_e, filters, upsampling, conv_iteration, **conv_params):
    with tf.variable_scope(None, 'Up'):
        x = conv_block(x, filters, sampling=upsampling, **conv_params)
        x = tf.concat([x, x_e], axis=-1)

        for i in range(conv_iteration):
            _filters = filters // 2 if i + 1 == conv_iteration else filters
            x = conv_block(x, _filters, sampling='same', **conv_params)
    return x
Exemple #7
0
 def create_transform_network():
     tf_input = Input(self.input_shape)
     tf_model = ReflectionPadding2d((40, 40))(inp)
     tf_model = conv_block(x, 32, size=(9, 9), strides=(1, 1))
     tf_model = conv_block(x, 64, size=(3, 3), strides=(2, 2))
     tf_model = conv_block(x, 128, size=(3, 3), strides=(2, 2))
     for i in range(5):
         tf_model = res_crop_block(x, 128, i)
     tf_model = up_block(x, 64, size=(3, 3))
     tf_model = up_block(x, 32, size=(3, 3))
     # x = deconv_block(x, 3, size=(9,9), strides=(1,1))
     tf_model = Conv2D(3, (9, 9), activation='tanh', padding='same')(x)
     self.tf_model = Lambda(lambda x: (x + 1) * 127.5)(x)
     return self.tf_model, tf_input
Exemple #8
0
    def __call__(self, x, reuse=True, is_feature=False, is_training=True):
        nb_downsampling = int(np.log2(self.input_shape[0] // 4))
        nb_blocks = [2]
        if nb_downsampling - 2 > 0:
            nb_blocks += [4] * (nb_downsampling - 2)

        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            first_conv_channel = 32
            _x = conv_block(x,
                            is_training=is_training,
                            filters=first_conv_channel,
                            activation_='lrelu',
                            kernel_size=(4, 4),
                            sampling='down',
                            normalization=self.normalization)

            for index, nb_block in enumerate(nb_blocks):
                for i in range(nb_block):
                    _x = discriminator_block(_x,
                                             is_training=is_training,
                                             filters=first_conv_channel *
                                             (2**index),
                                             activation_='lrelu',
                                             kernel_size=(3, 3),
                                             normalization=self.normalization,
                                             residual=True)
                _x = conv_block(_x,
                                is_training=is_training,
                                filters=first_conv_channel * (2**(index + 1)),
                                activation_='lrelu',
                                kernel_size=(4, 4) if index < 2 else (3, 3),
                                sampling='down',
                                normalization=self.normalization)

            if is_feature:
                return _x

            _x = flatten(_x)

            if self.normalization == 'spectral':
                _x = sn_dense(_x,
                              is_training=is_training,
                              units=1,
                              activation_=None)
            else:
                _x = dense(_x, units=1, activation_=None)
            return _x
Exemple #9
0
    def __call__(self, x, is_training=True, reuse=False, *args, **kwargs):
        with tf.variable_scope(self.__class__.__name__) as vs:
            if reuse:
                vs.reuse_variables()
            conv_params = {'is_training': is_training, 'activation_': 'relu'}

            x = conv_block(x, 16, **conv_params)
            x = conv_block(x, 16, **conv_params, sampling='pool')
            x = conv_block(x, 32, **conv_params)
            x = conv_block(x, 32, **conv_params, sampling='pool')

            x = flatten(x)
            x = dense(x, 512, activation_='relu')
            x = dense(x, self.nb_classes)
            return x
Exemple #10
0
    def __call__(self, x, reuse=True, is_feature=False, is_training=True):
        nb_downsampling = int(np.log2(self.input_shape[0] // 4))
        with tf.variable_scope(self.name, reuse=reuse) as vs:
            if reuse:
                vs.reuse_variables()
            _x = x
            first_filters = 32
            for i in range(nb_downsampling):
                filters = first_filters * (2**i)
                _x = conv_block(_x,
                                is_training=is_training,
                                filters=filters,
                                activation_='lrelu',
                                sampling='down',
                                normalization=self.normalization)
            _x = flatten(_x)

            if self.normalization == 'spectral':
                _x = sn_dense(_x,
                              is_training=is_training,
                              units=1,
                              activation_=None)
            else:
                _x = dense(_x, units=1, activation_=None)
            return _x
    def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            with tf.variable_scope('Encoder'):
                _x = conv_block(x,
                                filters=16,
                                sampling='same',
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=16,
                                sampling='down',
                                **self.conv_block_params)

                _x = conv_block(_x,
                                filters=32,
                                sampling='same',
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=32,
                                sampling='down',
                                **self.conv_block_params)

                current_shape = _x.get_shape().as_list()[1:]
                _x = flatten(_x)
                _x = dense(_x, 512, activation_='lrelu')
                encoded = dense(_x, self.latent_dim)

            with tf.variable_scope('Decoder'):
                _x = dense(encoded, 512, activation_='lrelu')
                _x = dense(_x,
                           current_shape[0] * current_shape[1] *
                           current_shape[2],
                           activation_='lrelu')
                _x = reshape(_x, current_shape)

                _x = conv_block(_x,
                                filters=32,
                                sampling=self.upsampling,
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=16,
                                sampling='same',
                                **self.conv_block_params)

                _x = conv_block(_x,
                                filters=16,
                                sampling=self.upsampling,
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=self.channel,
                                sampling='same',
                                **self.last_conv_block_params)

            return encoded, _x
def discriminator_block(x,
                        filters,
                        activation_='lrelu',
                        kernel_size=(3, 3),
                        is_training=True,
                        normalization=None,
                        residual=True):
    with tf.variable_scope(None, discriminator_block.__name__):
        _x = conv_block(x, filters, activation_, kernel_size, is_training,
                        'same', normalization, 0., 'conv_first')
        _x = conv_block(_x, filters, None, kernel_size, is_training, 'same',
                        None, 0., 'conv_first')
        if residual:
            _x += x
        _x = activation(_x, activation_)
        if normalization == 'layer':
            _x = layer_norm(_x, is_training=is_training)
        return _x
Exemple #13
0
    def discriminator(self, inp, reuse=False):
        """Returns model discriminator.
        Assumed properties:
            disc_input - an image tensor
            G - a generator
            ...
        """
        with tf.variable_scope("conv1"):
            h = conv_block(inp, leaky_relu=True, reuse=reuse)

        with tf.variable_scope("conv2"):
            h = conv_block(h, leaky_relu=True, bn=True, 
                is_training_cond=self.is_training, stride=2, reuse=reuse)

        with tf.variable_scope("conv3"):
            h = conv_block(h, leaky_relu=True, bn=True, 
                is_training_cond=self.is_training, output_channels=128,
                reuse=reuse)

        with tf.variable_scope("conv4"):
            h = conv_block(h, leaky_relu=True, bn=True,
                is_training_cond=self.is_training, output_channels=128, stride=2,
                reuse=reuse)

        with tf.variable_scope("conv5"):
            h = conv_block(h, leaky_relu=True, bn=True,
                is_training_cond=self.is_training, output_channels=256, stride=1,
                reuse=reuse)

        with tf.variable_scope("conv6"):
            h = conv_block(h, leaky_relu=True, bn=True,
                is_training_cond=self.is_training, output_channels=256, stride=2,
                reuse=reuse)

        with tf.variable_scope("conv7"):
            h = conv_block(h, leaky_relu=True, bn=True,
                is_training_cond=self.is_training, output_channels=512, stride=1,
                reuse=reuse)

        with tf.variable_scope("conv8"):
            h = conv_block(h, leaky_relu=True, bn=True,
                is_training_cond=self.is_training, output_channels=512, stride=2,
                reuse=reuse)

        with tf.variable_scope("dense1"):
            h = dense_block(h, leaky_relu=True, output_size=1024)

        with tf.variable_scope("dense2"):
            h = dense_block(h, output_size=1)

        return h
    def __call__(self, x, reuse=True, is_feature=False):
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            x = conv_block(x,
                           32,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(2):
                x = discriminator_block(x, 32, **self.conv_kwargs)
            x = conv_block(x,
                           64,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(4):
                x = discriminator_block(x, 64, **self.conv_kwargs)
            x = conv_block(x,
                           128,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(4):
                x = discriminator_block(x, 128, **self.conv_kwargs)
            x = conv_block(x,
                           256,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(4):
                x = discriminator_block(x, 256, **self.conv_kwargs)
            x = conv_block(x,
                           512,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(4):
                x = discriminator_block(x, 512, **self.conv_kwargs)
            x = conv_block(x,
                           1024,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            if is_feature:
                return x

            x = global_average_pool2d(x)
            x = dense(x, units=1, activation_=None)
            return x
Exemple #15
0
    def __call__(self, x, is_training=True, reuse=False, *args, **kwargs):
        with tf.variable_scope(self.__class__.__name__) as vs:
            if reuse:
                vs.reuse_variables()
            conv_params = {
                'is_training': is_training,
                'activation_': 'relu',
                'normalization': 'batch'
            }
            x = conv_block(x, 64, **conv_params, dropout_rate=0.3)
            x = conv_block(x, 64, **conv_params, dropout_rate=0.3)

            x = conv_block(x, 128, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 128, **conv_params, dropout_rate=0.4)

            x = conv_block(x, 256, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 256, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 256, **conv_params, sampling='pool')
            l1 = x

            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params, sampling='pool')
            l2 = x

            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params, sampling='pool')
            l3 = x

            x = conv_block(x, 512, **conv_params, sampling='pool')
            x = conv_block(x, 512, **conv_params, sampling='pool')

            x = flatten(x)
            g = dense(x, 512, activation_='relu')

            x, attentions = attention_module([l1, l2, l3], g)
            x = dense(x, self.nb_classes)
            return x, attentions
Exemple #16
0
def _down_block(x, filters, conv_iteration, **conv_params):
    with tf.variable_scope(None, 'Down'):
        for i in range(conv_iteration):
            x = conv_block(x, filters, sampling='same', **conv_params)
        _x = average_pool2d(x)
    return x, _x
Exemple #17
0
    def __call__(self, x, reuse=False, is_training=True):
        nb_upsampling = int(np.log2(self.target_size[0] // 16))
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            _x = first_block(x, (16, 16), self.noise_dim, 'deconv',
                             self.normalization, is_training)

            residual_inputs = conv_block(_x,
                                         is_training=is_training,
                                         filters=64,
                                         activation_='relu',
                                         sampling='same',
                                         normalization=self.normalization,
                                         dropout_rate=0.,
                                         mode='conv_first')

            with tf.variable_scope('residual_blocks'):
                for i in range(16):
                    _x = residual_block(_x,
                                        is_training=is_training,
                                        filters=64,
                                        activation_='relu',
                                        sampling='same',
                                        normalization=self.normalization,
                                        dropout_rate=0.,
                                        mode='conv_first')

            _x = conv_block(_x,
                            is_training=is_training,
                            filters=64,
                            activation_='relu',
                            sampling='same',
                            normalization=self.normalization,
                            dropout_rate=0.,
                            mode='conv_first')
            _x += residual_inputs

            with tf.variable_scope('upsampling_blocks'):
                for i in range(nb_upsampling):
                    _x = conv_block(_x,
                                    is_training=is_training,
                                    filters=64,
                                    activation_='relu',
                                    sampling=self.upsampling,
                                    normalization=self.normalization,
                                    dropout_rate=0.,
                                    mode='conv_first')

            __x = _x
            for _ in range(3):
                __x = conv_block(__x,
                                 is_training=is_training,
                                 filters=64,
                                 activation_='relu',
                                 sampling='same',
                                 normalization=self.normalization,
                                 dropout_rate=0.,
                                 mode='conv_first')
            _x += __x
            _x = conv_block(_x,
                            is_training=is_training,
                            kernel_size=(9, 9),
                            filters=self.channel,
                            activation_=self.last_activation,
                            sampling='same',
                            normalization=None,
                            dropout_rate=0.,
                            mode='conv_first')
            return _x
Exemple #18
0
    def __call__(self, x, reuse=True, is_feature=False):
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            _x = conv_block(x,
                            filters=32,
                            kernel_size=(4, 4),
                            sampling='down',
                            activation_='lrelu',
                            normalization=self.normalization)

            for i in range(2):
                _x = discriminator_block(_x,
                                         filters=32,
                                         is_training=self.is_training)
            _x = conv_block(_x,
                            filters=64,
                            kernel_size=(4, 4),
                            sampling='down',
                            activation_='lrelu',
                            normalization=self.normalization)

            for i in range(4):
                _x = discriminator_block(_x,
                                         filters=64,
                                         is_training=self.is_training)
            _x = conv_block(_x,
                            filters=128,
                            kernel_size=(4, 4),
                            sampling='down',
                            activation_='lrelu',
                            normalization=self.normalization)

            for i in range(4):
                _x = discriminator_block(_x,
                                         filters=128,
                                         is_training=self.is_training)
            _x = conv_block(_x,
                            filters=256,
                            kernel_size=(4, 4),
                            sampling='down',
                            activation_='lrelu',
                            normalization=self.normalization)

            for i in range(4):
                _x = discriminator_block(_x,
                                         filters=256,
                                         is_training=self.is_training)
            _x = conv_block(_x,
                            filters=512,
                            kernel_size=(4, 4),
                            sampling='down',
                            activation_='lrelu',
                            normalization=self.normalization)

            for i in range(4):
                _x = discriminator_block(_x,
                                         filters=512,
                                         is_training=self.is_training)
            _x = conv_block(_x,
                            filters=1024,
                            kernel_size=(4, 4),
                            sampling='down',
                            activation_='lrelu',
                            normalization=self.normalization)

            if is_feature:
                return _x

            _x = flatten(_x)
            _x = dense(_x, units=1, activation_=None)
            return _x