コード例 #1
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # 64 -> 32
            h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # 32 -> 16
            h1_conv = tf_utils.conv2d(h0_lrelu, self.dis_c[1], name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 16 -> 8
            h2_conv = tf_utils.conv2d(h1_lrelu, self.dis_c[2], name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 8 -> 4
            h3_conv = tf_utils.conv2d(h2_lrelu, self.dis_c[3], name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            h3_flatten = flatten(h3_lrelu)
            h4_linear = tf_utils.linear(h3_flatten, 1, name='h4_linear')

            return tf.nn.sigmoid(h4_linear), h4_linear
コード例 #2
0
    def encoder(self,data,name='enc_'):
        with tf.variable_scope(name):
            # 64 -> 32 or 32 -> 16
            h0_conv = tf_utils.conv3d(data, self.dis_c[0], name='h0_conv3d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # 32 -> 16 or 16 -> 8
            h1_conv = tf_utils.conv3d(h0_lrelu, self.dis_c[1], name='h1_conv3d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 16 -> 8 or 8 -> 4
            h2_conv = tf_utils.conv3d(h1_lrelu, self.dis_c[2], name='h2_conv3d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            h3_conv = tf_utils.conv3d(h2_lrelu, self.dis_c[3], name='h3_conv3d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            h4_conv = tf_utils.conv3d(h3_lrelu, self.dis_c[3],k_d=1,k_h=2,k_w=2, name='h4_conv3d')
            h4_batchnorm = tf_utils.batch_norm(h4_conv, name='h4_batchnorm', _ops=self._dis_train_ops)
            h4_lrelu = tf_utils.lrelu(h4_batchnorm, name='h4_lrelu')

            h4_flatten = flatten(h4_lrelu)
            h5_linear = tf_utils.linear(h4_flatten, 1024, name='h4_linear')
            return tf.nn.sigmoid(h5_linear), h5_linear
コード例 #3
0
    def basicDiscriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()
            tf_utils.print_activations(data)

            # from (N, 32, 32, 1) to (N, 16, 16, 64)
            h0_conv = tf_utils.conv2d(data,
                                      self.dis_c[0],
                                      k_h=5,
                                      k_w=5,
                                      name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # from (N, 16, 16, 64) to (N, 8, 8, 128)
            h1_conv = tf_utils.conv2d(h0_lrelu,
                                      self.dis_c[1],
                                      k_h=5,
                                      k_w=5,
                                      name='h1_conv2d')
            h1_lrelu = tf_utils.lrelu(h1_conv, name='h1_lrelu')

            # from (N, 8, 8, 128) to (N, 4, 4, 256)
            h2_conv = tf_utils.conv2d(h1_lrelu,
                                      self.dis_c[2],
                                      k_h=5,
                                      k_w=5,
                                      name='h2_conv2d')
            h2_lrelu = tf_utils.lrelu(h2_conv, name='h2_lrelu')

            # from (N, 4, 4, 256) to (N, 4096) and to (N, 1)
            h2_flatten = flatten(h2_lrelu)
            h3_linear = tf_utils.linear(h2_flatten, 1, name='h3_linear')

            return tf.nn.sigmoid(h3_linear), h3_linear
コード例 #4
0
ファイル: pix2pix.py プロジェクト: takerujason/SpineC2M
    def __call__(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # 200 -> 100
            h0_conv2d = tf_utils.conv2d(x, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv2d, name='h0_lrelu')

            # 100 -> 50
            h1_conv2d = tf_utils.conv2d(h0_lrelu,
                                        self.dis_c[1],
                                        name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv2d,
                                               name='h1_batchnorm',
                                               _ops=self._ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 50 -> 25
            h2_conv2d = tf_utils.conv2d(h1_lrelu,
                                        self.dis_c[2],
                                        name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv2d,
                                               name='h2_batchnorm',
                                               _ops=self._ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 25 -> 13
            h3_conv2d = tf_utils.conv2d(h2_lrelu,
                                        self.dis_c[3],
                                        name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv2d,
                                               name='h3_batchnorm',
                                               _ops=self._ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # Patch GAN: 13 -> 13
            output = tf_utils.conv2d(h3_lrelu,
                                     self.dis_c[4],
                                     k_h=3,
                                     k_w=3,
                                     d_h=1,
                                     d_w=1,
                                     name='output_conv2d')

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
コード例 #5
0
ファイル: discoGAN.py プロジェクト: ChengBinJin/GANFromAtoZ
    def __call__(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # conv: (N, H, W, 3) -> (N, H/2, W/2, 64)
            output = tf_utils.conv2d(x,
                                     self.ndf,
                                     k_h=4,
                                     k_w=4,
                                     d_h=2,
                                     d_w=2,
                                     padding='SAME',
                                     name='conv0_conv2d')
            output = tf_utils.lrelu(output, name='conv0_lrelu', is_print=True)

            for idx, hidden_dim in enumerate(self.hidden_dims[1:]):
                # conv: (N, H/2, W/2, C) -> (N, H/4, W/4, C/2)
                output = tf_utils.conv2d(output,
                                         hidden_dim,
                                         k_h=4,
                                         k_w=4,
                                         d_h=2,
                                         d_w=2,
                                         padding='SAME',
                                         name='conv{}_conv2d'.format(idx + 1))
                output = tf_utils.norm(output,
                                       _type=self.norm,
                                       _ops=self._ops,
                                       name='conv{}_norm'.format(idx + 1))
                output = tf_utils.lrelu(output,
                                        name='conv{}_lrelu'.format(idx + 1),
                                        is_print=True)

            # conv: (N, H/16, W/16, 512) -> (N, H/16, W/16, 1)
            output = tf_utils.conv2d(output,
                                     1,
                                     k_h=4,
                                     k_w=4,
                                     d_h=1,
                                     d_w=1,
                                     padding='SAME',
                                     name='conv4_conv2d')

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return tf_utils.sigmoid(output), output
コード例 #6
0
    def model_g(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (N, H, W, C) -> (N, H/2, W/2, 64)
            conv1 = tf_utils.conv2d(x, self.ndf, k_h=4, k_w=4, d_h=2, d_w=2, padding='SAME',
                                    name='conv1_conv')
            conv1 = tf_utils.lrelu(conv1, name='conv1_lrelu', is_print=True)

            # (N, H/2, W/2, 64) -> (N, H/4, W/4, 128)
            conv2 = tf_utils.conv_norm_lrelu(conv1, 2 * self.ndf, k_h=4, k_w=4, d_h=2, d_w=2, padding='SAME',
                                             name='conv2_conv', ops=self._ops)

            # (N, H/4, W/4, 128) -> (N, H/8, W/8, 256)
            conv3 = tf_utils.conv_norm_lrelu(conv2, 4 * self.ndf, k_h=4, k_w=4, d_h=2, d_w=2, padding='SAME',
                                             name='conv3_conv', ops=self._ops)

            # (N, H/8, W/8, 256) -> (N, H/16, W/16, 512)
            conv4 = tf_utils.conv2d(conv3, 8 * self.ndf, k_h=4, k_w=4, d_h=2, d_w=2, padding='SAME',
                                    name='conv4_conv', ops=self._ops)

            # (N, H/16, W/16, 512) -> (N, H/16, W/16, 1)
            conv5 = tf_utils.conv2d(conv4, 1, k_h=4, k_w=4, d_h=1, d_w=1, padding='SAME',
                                    name='conv5_conv', is_print=True)

            output = tf.identity(conv5, name='output_without_sigmoid')

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
コード例 #7
0
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # 256 -> 128
            h0_conv2d = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv2d, name='h0_lrelu')

            # 128 -> 64
            h1_conv2d = tf_utils.conv2d(h0_lrelu,
                                        self.dis_c[1],
                                        name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv2d,
                                               name='h1_batchnorm',
                                               _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # 64 -> 32
            h2_conv2d = tf_utils.conv2d(h1_lrelu,
                                        self.dis_c[2],
                                        name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv2d,
                                               name='h2_batchnorm',
                                               _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # 32 -> 16
            h3_conv2d = tf_utils.conv2d(h2_lrelu,
                                        self.dis_c[3],
                                        name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv2d,
                                               name='h3_batchnorm',
                                               _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # Patch GAN: 16 -> 16
            h4_conv2d = tf_utils.conv2d(h3_lrelu,
                                        self.dis_c[4],
                                        k_h=3,
                                        k_w=3,
                                        d_h=1,
                                        d_w=1,
                                        name='h4_conv2d')

            return tf.nn.sigmoid(h4_conv2d), h4_conv2d
コード例 #8
0
ファイル: wgan.py プロジェクト: ChengBinJin/GANFromAtoZ
    def discriminator(self, data, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            # (128, 256) -> (64, 128)
            h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

            # (64, 128) -> (32, 64)
            h1_conv = tf_utils.conv2d(h0_lrelu,
                                      self.dis_c[1],
                                      name='h1_conv2d')
            h1_batchnorm = tf_utils.batch_norm(h1_conv,
                                               name='h1_batchnorm',
                                               _ops=self._dis_train_ops)
            h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

            # (32, 64) -> (16, 32)
            h2_conv = tf_utils.conv2d(h1_lrelu,
                                      self.dis_c[2],
                                      name='h2_conv2d')
            h2_batchnorm = tf_utils.batch_norm(h2_conv,
                                               name='h2_batchnorm',
                                               _ops=self._dis_train_ops)
            h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

            # (16, 32) -> (8, 16)
            h3_conv = tf_utils.conv2d(h2_lrelu,
                                      self.dis_c[3],
                                      name='h3_conv2d')
            h3_batchnorm = tf_utils.batch_norm(h3_conv,
                                               name='h3_batchnorm',
                                               _ops=self._dis_train_ops)
            h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

            # (8, 16) -> (4, 8)
            h4_conv = tf_utils.conv2d(h3_lrelu,
                                      self.dis_c[4],
                                      name='h4_conv2d')
            h4_batchnorm = tf_utils.batch_norm(h4_conv,
                                               name='h4_batchnorm',
                                               _ops=self._dis_train_ops)
            h4_lrelu = tf_utils.lrelu(h4_batchnorm, name='h4_lrelu')

            # (4, 8) -> (2, 4)
            h5_conv = tf_utils.conv2d(h4_lrelu,
                                      self.dis_c[5],
                                      name='h5_conv2d')
            h5_batchnorm = tf_utils.batch_norm(h5_conv,
                                               name='h5_batchnorm',
                                               _ops=self._dis_train_ops)
            h5_lrelu = tf_utils.lrelu(h5_batchnorm, name='h5_lrelu')

            h5_flatten = flatten(h5_lrelu)
            h6_linear = tf_utils.linear(h5_flatten, 1, name='h6_linear')

            return tf.nn.sigmoid(h6_linear), h6_linear
コード例 #9
0
ファイル: discoGAN.py プロジェクト: ChengBinJin/GANFromAtoZ
    def __call__(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # conv: (N, H, W, C) -> (N, H/2, W/2, 64)
            output = tf_utils.conv2d(x,
                                     self.conv_dims[0],
                                     k_h=4,
                                     k_w=4,
                                     d_h=2,
                                     d_w=2,
                                     padding='SAME',
                                     name='conv0_conv2d')
            output = tf_utils.lrelu(output, name='conv0_lrelu', is_print=True)

            for idx, conv_dim in enumerate(self.conv_dims[1:]):
                # conv: (N, H/2, W/2, C) -> (N, H/4, W/4, 2C)
                output = tf_utils.conv2d(output,
                                         conv_dim,
                                         k_h=4,
                                         k_w=4,
                                         d_h=2,
                                         d_w=2,
                                         padding='SAME',
                                         name='conv{}_conv2d'.format(idx + 1))
                output = tf_utils.norm(output,
                                       _type=self.norm,
                                       _ops=self._ops,
                                       name='conv{}_norm'.format(idx + 1))
                output = tf_utils.lrelu(output,
                                        name='conv{}_lrelu'.format(idx + 1),
                                        is_print=True)

            for idx, deconv_dim in enumerate(self.deconv_dims):
                # deconv: (N, H/16, W/16, C) -> (N, W/8, H/8, C/2)
                output = tf_utils.deconv2d(output,
                                           deconv_dim,
                                           k_h=4,
                                           k_w=4,
                                           name='deconv{}_conv2d'.format(idx))
                output = tf_utils.norm(output,
                                       _type=self.norm,
                                       _ops=self._ops,
                                       name='deconv{}_norm'.format(idx))
                output = tf_utils.relu(output,
                                       name='deconv{}_relu'.format(idx),
                                       is_print=True)

            # conv: (N, H/2, W/2, 64) -> (N, W, H, 3)
            output = tf_utils.deconv2d(output,
                                       self.output_channel,
                                       k_h=4,
                                       k_w=4,
                                       name='conv3_deconv2d')
            output = tf_utils.tanh(output, name='conv4_tanh', is_print=True)

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
            return output
コード例 #10
0
    def __call__(self, x):
        with tf.compat.v1.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x, logger=self.logger)

            # H1: (320, 200) -> (160, 100)
            h0_conv2d = tf_utils.conv2d(x,
                                        output_dim=self.dis_c[0],
                                        initializer='He',
                                        logger=self.logger,
                                        name='h0_conv2d')
            h0_lrelu = tf_utils.lrelu(h0_conv2d,
                                      logger=self.logger,
                                      name='h0_lrelu')

            # H2: (160, 100) -> (80, 50)
            h1_conv2d = tf_utils.conv2d(h0_lrelu,
                                        output_dim=self.dis_c[1],
                                        initializer='He',
                                        logger=self.logger,
                                        name='h1_conv2d')
            h1_norm = tf_utils.norm(h1_conv2d,
                                    _type=self.norm,
                                    _ops=self._ops,
                                    logger=self.logger,
                                    name='h1_norm')
            h1_lrelu = tf_utils.lrelu(h1_norm,
                                      logger=self.logger,
                                      name='h1_lrelu')

            # H3: (80, 50) -> (40, 25)
            h2_conv2d = tf_utils.conv2d(h1_lrelu,
                                        output_dim=self.dis_c[2],
                                        initializer='He',
                                        logger=self.logger,
                                        name='h2_conv2d')
            h2_norm = tf_utils.norm(h2_conv2d,
                                    _type=self.norm,
                                    _ops=self._ops,
                                    logger=self.logger,
                                    name='h2_norm')
            h2_lrelu = tf_utils.lrelu(h2_norm,
                                      logger=self.logger,
                                      name='h2_lrelu')

            # H4: (40, 25) -> (20, 13)
            h3_conv2d = tf_utils.conv2d(h2_lrelu,
                                        output_dim=self.dis_c[3],
                                        initializer='He',
                                        logger=self.logger,
                                        name='h3_conv2d')
            h3_norm = tf_utils.norm(h3_conv2d,
                                    _type=self.norm,
                                    _ops=self._ops,
                                    logger=self.logger,
                                    name='h3_norm')
            h3_lrelu = tf_utils.lrelu(h3_norm,
                                      logger=self.logger,
                                      name='h3_lrelu')

            # H5: (20, 13) -> (20, 13)
            output = tf_utils.conv2d(h3_lrelu,
                                     output_dim=self.dis_c[4],
                                     k_h=3,
                                     k_w=3,
                                     d_h=1,
                                     d_w=1,
                                     initializer='He',
                                     logger=self.logger,
                                     name='output_conv2d')

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.compat.v1.get_collection(
                tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

        return output
コード例 #11
0
    def __call__(self, x, keep_rate=0.5):
        with tf.compat.v1.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x, logger=self.logger)

            # E0: (320, 200) -> (160, 100)
            e0_conv2d = tf_utils.conv2d(x,
                                        output_dim=self.gen_c[0],
                                        initializer='He',
                                        logger=self.logger,
                                        name='e0_conv2d')
            e0_lrelu = tf_utils.lrelu(e0_conv2d,
                                      logger=self.logger,
                                      name='e0_lrelu')

            # E1: (160, 100) -> (80, 50)
            e1_conv2d = tf_utils.conv2d(e0_lrelu,
                                        output_dim=self.gen_c[1],
                                        initializer='He',
                                        logger=self.logger,
                                        name='e1_conv2d')
            e1_batchnorm = tf_utils.norm(e1_conv2d,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='e1_norm')
            e1_lrelu = tf_utils.lrelu(e1_batchnorm,
                                      logger=self.logger,
                                      name='e1_lrelu')

            # E2: (80, 50) -> (40, 25)
            e2_conv2d = tf_utils.conv2d(e1_lrelu,
                                        output_dim=self.gen_c[2],
                                        initializer='He',
                                        logger=self.logger,
                                        name='e2_conv2d')
            e2_batchnorm = tf_utils.norm(e2_conv2d,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='e2_norm')
            e2_lrelu = tf_utils.lrelu(e2_batchnorm,
                                      logger=self.logger,
                                      name='e2_lrelu')

            # E3: (40, 25) -> (20, 13)
            e3_conv2d = tf_utils.conv2d(e2_lrelu,
                                        output_dim=self.gen_c[3],
                                        initializer='He',
                                        logger=self.logger,
                                        name='e3_conv2d')
            e3_batchnorm = tf_utils.norm(e3_conv2d,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='e3_norm')
            e3_lrelu = tf_utils.lrelu(e3_batchnorm,
                                      logger=self.logger,
                                      name='e3_lrelu')

            # E4: (20, 13) -> (10, 7)
            e4_conv2d = tf_utils.conv2d(e3_lrelu,
                                        output_dim=self.gen_c[4],
                                        initializer='He',
                                        logger=self.logger,
                                        name='e4_conv2d')
            e4_batchnorm = tf_utils.norm(e4_conv2d,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='e4_norm')
            e4_lrelu = tf_utils.lrelu(e4_batchnorm,
                                      logger=self.logger,
                                      name='e4_lrelu')

            # E5: (10, 7) -> (5, 4)
            e5_conv2d = tf_utils.conv2d(e4_lrelu,
                                        output_dim=self.gen_c[5],
                                        initializer='He',
                                        logger=self.logger,
                                        name='e5_conv2d')
            e5_batchnorm = tf_utils.norm(e5_conv2d,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='e5_norm')
            e5_lrelu = tf_utils.lrelu(e5_batchnorm,
                                      logger=self.logger,
                                      name='e5_lrelu')

            # E6: (5, 4) -> (3, 2)
            e6_conv2d = tf_utils.conv2d(e5_lrelu,
                                        output_dim=self.gen_c[6],
                                        initializer='He',
                                        logger=self.logger,
                                        name='e6_conv2d')
            e6_batchnorm = tf_utils.norm(e6_conv2d,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='e6_norm')
            e6_lrelu = tf_utils.lrelu(e6_batchnorm,
                                      logger=self.logger,
                                      name='e6_lrelu')

            # E7: (3, 2) -> (2, 1)
            e7_conv2d = tf_utils.conv2d(e6_lrelu,
                                        output_dim=self.gen_c[7],
                                        initializer='He',
                                        logger=self.logger,
                                        name='e7_conv2d')
            e7_batchnorm = tf_utils.norm(e7_conv2d,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='e7_norm')
            e7_relu = tf_utils.lrelu(e7_batchnorm,
                                     logger=self.logger,
                                     name='e7_relu')

            # D0: (2, 1) -> (3, 2)
            # Stage1: (2, 1) -> (4, 2)
            d0_deconv = tf_utils.deconv2d(e7_relu,
                                          output_dim=self.gen_c[8],
                                          initializer='He',
                                          logger=self.logger,
                                          name='d0_deconv2d')
            # Stage2: (4, 2) -> (3, 2)
            shapeA = e6_conv2d.get_shape().as_list()[1]
            shapeB = d0_deconv.get_shape().as_list()[1] - e6_conv2d.get_shape(
            ).as_list()[1]
            d0_split, _ = tf.split(d0_deconv, [shapeA, shapeB],
                                   axis=1,
                                   name='d0_split')
            tf_utils.print_activations(d0_split, logger=self.logger)
            # Stage3: Batch norm, concatenation, and relu
            d0_batchnorm = tf_utils.norm(d0_split,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='d0_norm')
            d0_drop = tf_utils.dropout(d0_batchnorm,
                                       keep_prob=keep_rate,
                                       logger=self.logger,
                                       name='d0_dropout')
            d0_concat = tf.concat([d0_drop, e6_batchnorm],
                                  axis=3,
                                  name='d0_concat')
            d0_relu = tf_utils.relu(d0_concat,
                                    logger=self.logger,
                                    name='d0_relu')

            # D1: (3, 2) -> (5, 4)
            # Stage1: (3, 2) -> (6, 4)
            d1_deconv = tf_utils.deconv2d(d0_relu,
                                          output_dim=self.gen_c[9],
                                          initializer='He',
                                          logger=self.logger,
                                          name='d1_deconv2d')
            # Stage2: (6, 4) -> (5, 4)
            shapeA = e5_batchnorm.get_shape().as_list()[1]
            shapeB = d1_deconv.get_shape().as_list(
            )[1] - e5_batchnorm.get_shape().as_list()[1]
            d1_split, _ = tf.split(d1_deconv, [shapeA, shapeB],
                                   axis=1,
                                   name='d1_split')
            tf_utils.print_activations(d1_split, logger=self.logger)
            # Stage3: Batch norm, concatenation, and relu
            d1_batchnorm = tf_utils.norm(d1_split,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='d1_norm')
            d1_drop = tf_utils.dropout(d1_batchnorm,
                                       keep_prob=keep_rate,
                                       logger=self.logger,
                                       name='d1_dropout')
            d1_concat = tf.concat([d1_drop, e5_batchnorm],
                                  axis=3,
                                  name='d1_concat')
            d1_relu = tf_utils.relu(d1_concat,
                                    logger=self.logger,
                                    name='d1_relu')

            # D2: (5, 4) -> (10, 7)
            # Stage1: (5, 4) -> (10, 8)
            d2_deconv = tf_utils.deconv2d(d1_relu,
                                          output_dim=self.gen_c[10],
                                          initializer='He',
                                          logger=self.logger,
                                          name='d2_deconv2d')
            # Stage2: (10, 8) -> (10, 7)
            shapeA = e4_batchnorm.get_shape().as_list()[2]
            shapeB = d2_deconv.get_shape().as_list(
            )[2] - e4_batchnorm.get_shape().as_list()[2]
            d2_split, _ = tf.split(d2_deconv, [shapeA, shapeB],
                                   axis=2,
                                   name='d2_split')
            tf_utils.print_activations(d2_split, logger=self.logger)
            # Stage3: Batch norm, concatenation, and relu
            d2_batchnorm = tf_utils.norm(d2_split,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='d2_norm')
            d2_drop = tf_utils.dropout(d2_batchnorm,
                                       keep_prob=keep_rate,
                                       logger=self.logger,
                                       name='d2_dropout')
            d2_concat = tf.concat([d2_drop, e4_batchnorm],
                                  axis=3,
                                  name='d2_concat')
            d2_relu = tf_utils.relu(d2_concat,
                                    logger=self.logger,
                                    name='d2_relu')

            # D3: (10, 7) -> (20, 13)
            # Stage1: (10, 7) -> (20, 14)
            d3_deconv = tf_utils.deconv2d(d2_relu,
                                          output_dim=self.gen_c[11],
                                          initializer='He',
                                          logger=self.logger,
                                          name='d3_deconv2d')
            # Stage2: (20, 14) -> (20, 13)
            shapeA = e3_batchnorm.get_shape().as_list()[2]
            shapeB = d3_deconv.get_shape().as_list(
            )[2] - e3_batchnorm.get_shape().as_list()[2]
            d3_split, _ = tf.split(d3_deconv, [shapeA, shapeB],
                                   axis=2,
                                   name='d3_split_2')
            tf_utils.print_activations(d3_split, logger=self.logger)
            # Stage3: Batch norm, concatenation, and relu
            d3_batchnorm = tf_utils.norm(d3_split,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='d3_norm')
            d3_concat = tf.concat([d3_batchnorm, e3_batchnorm],
                                  axis=3,
                                  name='d3_concat')
            d3_relu = tf_utils.relu(d3_concat,
                                    logger=self.logger,
                                    name='d3_relu')

            # D4: (20, 13) -> (40, 25)
            # Stage1: (20, 13) -> (40, 26)
            d4_deconv = tf_utils.deconv2d(d3_relu,
                                          output_dim=self.gen_c[12],
                                          initializer='He',
                                          logger=self.logger,
                                          name='d4_deconv2d')
            # Stage2: (40, 26) -> (40, 25)
            shapeA = e2_batchnorm.get_shape().as_list()[2]
            shapeB = d4_deconv.get_shape().as_list(
            )[2] - e2_batchnorm.get_shape().as_list()[2]
            d4_split, _ = tf.split(d4_deconv, [shapeA, shapeB],
                                   axis=2,
                                   name='d4_split')
            tf_utils.print_activations(d4_split, logger=self.logger)
            # Stage3: Batch norm, concatenation, and relu
            d4_batchnorm = tf_utils.norm(d4_split,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='d4_norm')
            d4_concat = tf.concat([d4_batchnorm, e2_batchnorm],
                                  axis=3,
                                  name='d4_concat')
            d4_relu = tf_utils.relu(d4_concat,
                                    logger=self.logger,
                                    name='d4_relu')

            # D5: (40, 25, 256) -> (80, 50, 128)
            d5_deconv = tf_utils.deconv2d(d4_relu,
                                          output_dim=self.gen_c[13],
                                          initializer='He',
                                          logger=self.logger,
                                          name='d5_deconv2d')
            d5_batchnorm = tf_utils.norm(d5_deconv,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='d5_norm')
            d5_concat = tf.concat([d5_batchnorm, e1_batchnorm],
                                  axis=3,
                                  name='d5_concat')
            d5_relu = tf_utils.relu(d5_concat,
                                    logger=self.logger,
                                    name='d5_relu')

            # D6: (80, 50, 128) -> (160, 100, 64)
            d6_deconv = tf_utils.deconv2d(d5_relu,
                                          output_dim=self.gen_c[14],
                                          initializer='He',
                                          logger=self.logger,
                                          name='d6_deconv2d')
            d6_batchnorm = tf_utils.norm(d6_deconv,
                                         _type=self.norm,
                                         _ops=self._ops,
                                         logger=self.logger,
                                         name='d6_norm')
            d6_concat = tf.concat([d6_batchnorm, e0_conv2d],
                                  axis=3,
                                  name='d6_concat')
            d6_relu = tf_utils.relu(d6_concat,
                                    logger=self.logger,
                                    name='d6_relu')

            # D7: (160, 100, 64) -> (320, 200, 1)
            d7_deconv = tf_utils.deconv2d(d6_relu,
                                          output_dim=self.gen_c[15],
                                          initializer='He',
                                          logger=self.logger,
                                          name='d7_deconv2d')
            output = tf_utils.tanh(d7_deconv,
                                   logger=self.logger,
                                   name='output_tanh')

            # Set reuse=True for next call
            self.reuse = True
            self.variables = tf.compat.v1.get_collection(
                tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

        return output
コード例 #12
0
    def __call__(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # conv: (N, H, W, C) -> (N, H/2, W/2, 64)
            output = tf_utils.conv2d(x,
                                     self.conv_dims[0],
                                     k_h=4,
                                     k_w=4,
                                     d_h=2,
                                     d_w=2,
                                     padding='SAME',
                                     name='conv0_conv2d')
            output = tf_utils.lrelu(output, name='conv0_lrelu', is_print=True)

            for idx, conv_dim in enumerate(self.conv_dims[1:]):
                # conv: (N, H/2, W/2, C) -> (N, H/4, W/4, 2C)
                output = tf_utils.conv2d(output,
                                         conv_dim,
                                         k_h=4,
                                         k_w=4,
                                         d_h=2,
                                         d_w=2,
                                         padding='SAME',
                                         name='conv{}_conv2d'.format(idx + 1))
                output = tf_utils.norm(output,
                                       _type=self.norm,
                                       _ops=self._ops,
                                       name='conv{}_norm'.format(idx + 1))
                output = tf_utils.lrelu(output,
                                        name='conv{}_lrelu'.format(idx + 1),
                                        is_print=True)

            for idx, deconv_dim in enumerate(self.deconv_dims):
                # deconv: (N, H/16, W/16, C) -> (N, W/8, H/8, C/2)
                output = tf_utils.deconv2d(output,
                                           deconv_dim,
                                           k_h=4,
                                           k_w=4,
                                           name='deconv{}_conv2d'.format(idx))
                output = tf_utils.norm(output,
                                       _type=self.norm,
                                       _ops=self._ops,
                                       name='deconv{}_norm'.format(idx))
                output = tf_utils.relu(output,
                                       name='deconv{}_relu'.format(idx),
                                       is_print=True)

            # split (N, 152, 104, 64) to (N, 150, 104, 64)
            shapeA = int(self.img_size[0] / 2)
            shapeB = output.get_shape().as_list()[1] - shapeA
            output, _ = tf.split(output, [shapeA, shapeB],
                                 axis=1,
                                 name='split_0')
            tf_utils.print_activations(output)
            # split (N, 150, 104, 64) to (N, 150, 100, 64)
            shapeA = int(self.img_size[1] / 2)
            shapeB = output.get_shape().as_list()[2] - shapeA
            output, _ = tf.split(output, [shapeA, shapeB],
                                 axis=2,
                                 name='split_1')
            tf_utils.print_activations(output)

            # conv: (N, H/2, W/2, 64) -> (N, W, H, 3)
            output = tf_utils.deconv2d(output,
                                       self.img_size[2],
                                       k_h=4,
                                       k_w=4,
                                       name='conv3_deconv2d')
            output = tf_utils.tanh(output, name='conv4_tanh', is_print=True)

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
            return output
コード例 #13
0
    def __call__(self, x, is_train=True):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (N, 120, 160, 1) -> (N, 60, 80, 64)
            h0_conv = tf_utils.conv2d(
                x,
                output_dim=self.dims[0],
                initializer='he',
                name='h0_conv',
                logger=self.logger if is_train is True else None)
            h0_lrelu = tf_utils.lrelu(
                h0_conv,
                name='h0_lrelu',
                logger=self.logger if is_train is True else None)

            # (N, 60, 80, 64) -> (N, 30, 40, 128)
            h1_conv = tf_utils.conv2d(
                h0_lrelu,
                output_dim=self.dims[1],
                initializer='he',
                name='h1_conv',
                logger=self.logger if is_train is True else None)
            h1_norm = tf_utils.norm(
                h1_conv,
                name='h1_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h1_lrelu = tf_utils.lrelu(
                h1_norm,
                name='h1_lrelu',
                logger=self.logger if is_train is True else None)

            # (N, 30, 40, 128) -> (N, 15, 20, 256)
            h2_conv = tf_utils.conv2d(
                h1_lrelu,
                output_dim=self.dims[2],
                initializer='he',
                name='h2_conv',
                logger=self.logger if is_train is True else None)
            h2_norm = tf_utils.norm(
                h2_conv,
                name='h2_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h2_lrelu = tf_utils.lrelu(
                h2_norm,
                name='h2_lrelu',
                logger=self.logger if is_train is True else None)

            # (N, 15, 20, 256) -> (N, 8, 10, 512)
            h3_conv = tf_utils.conv2d(
                h2_lrelu,
                output_dim=self.dims[3],
                initializer='he',
                name='h3_conv',
                logger=self.logger if is_train is True else None)
            h3_norm = tf_utils.norm(
                h3_conv,
                name='h3_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h3_lrelu = tf_utils.lrelu(
                h3_norm,
                name='h3_lrelu',
                logger=self.logger if is_train is True else None)

            # (N, 8, 10, 512) -> (N, 4, 5, 1024)
            h4_conv = tf_utils.conv2d(
                h3_lrelu,
                output_dim=self.dims[4],
                initializer='he',
                name='h4_conv',
                logger=self.logger if is_train is True else None)
            h4_norm = tf_utils.norm(
                h4_conv,
                name='h4_batch',
                _type='batch',
                _ops=self._ops,
                is_train=is_train,
                logger=self.logger if is_train is True else None)
            h4_lrelu = tf_utils.lrelu(
                h4_norm,
                name='h4_lrelu',
                logger=self.logger if is_train is True else None)
            # (N, 4, 5, 1024) -> (N, 4*5*1024)
            h4_flatten = tf_utils.flatten(
                h4_lrelu,
                name='h4_flatten',
                logger=self.logger if is_train is True else None)

            # (N, 4*5*1024) -> (N, 1)
            output = tf_utils.linear(
                h4_flatten,
                output_size=self.dims[5],
                initializer='he',
                name='output',
                logger=self.logger if is_train is True else None)

            # Set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
コード例 #14
0
ファイル: pix2pix.py プロジェクト: takerujason/SpineC2M
    def __call__(self, x):
        with tf.variable_scope(self.name, reuse=self.reuse):
            tf_utils.print_activations(x)

            # (300, 200) -> (150, 100)
            e0_conv2d = tf_utils.conv2d(x, self.gen_c[0], name='e0_conv2d')
            e0_lrelu = tf_utils.lrelu(e0_conv2d, name='e0_lrelu')

            # (150, 100) -> (75, 50)
            e1_conv2d = tf_utils.conv2d(e0_lrelu,
                                        self.gen_c[1],
                                        name='e1_conv2d')
            e1_batchnorm = tf_utils.batch_norm(e1_conv2d,
                                               name='e1_batchnorm',
                                               _ops=self._ops)
            e1_lrelu = tf_utils.lrelu(e1_batchnorm, name='e1_lrelu')

            # (75, 50) -> (38, 25)
            e2_conv2d = tf_utils.conv2d(e1_lrelu,
                                        self.gen_c[2],
                                        name='e2_conv2d')
            e2_batchnorm = tf_utils.batch_norm(e2_conv2d,
                                               name='e2_batchnorm',
                                               _ops=self._ops)
            e2_lrelu = tf_utils.lrelu(e2_batchnorm, name='e2_lrelu')

            # (38, 25) -> (19, 13)
            e3_conv2d = tf_utils.conv2d(e2_lrelu,
                                        self.gen_c[3],
                                        name='e3_conv2d')
            e3_batchnorm = tf_utils.batch_norm(e3_conv2d,
                                               name='e3_batchnorm',
                                               _ops=self._ops)
            e3_lrelu = tf_utils.lrelu(e3_batchnorm, name='e3_lrelu')

            # (19, 13) -> (10, 7)
            e4_conv2d = tf_utils.conv2d(e3_lrelu,
                                        self.gen_c[4],
                                        name='e4_conv2d')
            e4_batchnorm = tf_utils.batch_norm(e4_conv2d,
                                               name='e4_batchnorm',
                                               _ops=self._ops)
            e4_lrelu = tf_utils.lrelu(e4_batchnorm, name='e4_lrelu')

            # (10, 7) -> (5, 4)
            e5_conv2d = tf_utils.conv2d(e4_lrelu,
                                        self.gen_c[5],
                                        name='e5_conv2d')
            e5_batchnorm = tf_utils.batch_norm(e5_conv2d,
                                               name='e5_batchnorm',
                                               _ops=self._ops)
            e5_lrelu = tf_utils.lrelu(e5_batchnorm, name='e5_lrelu')

            # (5, 4) -> (3, 2)
            e6_conv2d = tf_utils.conv2d(e5_lrelu,
                                        self.gen_c[6],
                                        name='e6_conv2d')
            e6_batchnorm = tf_utils.batch_norm(e6_conv2d,
                                               name='e6_batchnorm',
                                               _ops=self._ops)
            e6_lrelu = tf_utils.lrelu(e6_batchnorm, name='e6_lrelu')

            # (3, 2) -> (2, 1)
            e7_conv2d = tf_utils.conv2d(e6_lrelu,
                                        self.gen_c[7],
                                        name='e7_conv2d')
            e7_batchnorm = tf_utils.batch_norm(e7_conv2d,
                                               name='e7_batchnorm',
                                               _ops=self._ops)
            e7_relu = tf_utils.relu(e7_batchnorm, name='e7_relu')

            # (2, 1) -> (4, 2)
            d0_deconv = tf_utils.deconv2d(e7_relu,
                                          self.gen_c[8],
                                          name='d0_deconv2d')
            shapeA = e6_conv2d.get_shape().as_list()[1]
            shapeB = d0_deconv.get_shape().as_list()[1] - e6_conv2d.get_shape(
            ).as_list()[1]
            # (4, 2) -> (3, 2)
            d0_split, _ = tf.split(d0_deconv, [shapeA, shapeB],
                                   axis=1,
                                   name='d0_split')
            tf_utils.print_activations(d0_split)
            d0_batchnorm = tf_utils.batch_norm(d0_split,
                                               name='d0_batchnorm',
                                               _ops=self._ops)
            d0_drop = tf.nn.dropout(d0_batchnorm,
                                    keep_prob=0.5,
                                    name='d0_dropout')
            d0_concat = tf.concat([d0_drop, e6_batchnorm],
                                  axis=3,
                                  name='d0_concat')
            d0_relu = tf_utils.relu(d0_concat, name='d0_relu')

            # (3, 2) -> (6, 4)
            d1_deconv = tf_utils.deconv2d(d0_relu,
                                          self.gen_c[9],
                                          name='d1_deconv2d')
            # (6, 4) -> (5, 4)
            shapeA = e5_batchnorm.get_shape().as_list()[1]
            shapeB = d1_deconv.get_shape().as_list(
            )[1] - e5_batchnorm.get_shape().as_list()[1]
            d1_split, _ = tf.split(d1_deconv, [shapeA, shapeB],
                                   axis=1,
                                   name='d1_split')
            tf_utils.print_activations(d1_split)
            d1_batchnorm = tf_utils.batch_norm(d1_split,
                                               name='d1_batchnorm',
                                               _ops=self._ops)
            d1_drop = tf.nn.dropout(d1_batchnorm,
                                    keep_prob=0.5,
                                    name='d1_dropout')
            d1_concat = tf.concat([d1_drop, e5_batchnorm],
                                  axis=3,
                                  name='d1_concat')
            d1_relu = tf_utils.relu(d1_concat, name='d1_relu')

            # (5, 4) -> (10, 8)
            d2_deconv = tf_utils.deconv2d(d1_relu,
                                          self.gen_c[10],
                                          name='d2_deconv2d')
            # (10, 8) -> (10, 7)
            shapeA = e4_batchnorm.get_shape().as_list()[2]
            shapeB = d2_deconv.get_shape().as_list(
            )[2] - e4_batchnorm.get_shape().as_list()[2]
            d2_split, _ = tf.split(d2_deconv, [shapeA, shapeB],
                                   axis=2,
                                   name='d2_split')
            tf_utils.print_activations(d2_split)
            d2_batchnorm = tf_utils.batch_norm(d2_split,
                                               name='d2_batchnorm',
                                               _ops=self._ops)
            d2_drop = tf.nn.dropout(d2_batchnorm,
                                    keep_prob=0.5,
                                    name='d2_dropout')
            d2_concat = tf.concat([d2_drop, e4_batchnorm],
                                  axis=3,
                                  name='d2_concat')
            d2_relu = tf_utils.relu(d2_concat, name='d2_relu')

            # (10, 7) -> (20, 14)
            d3_deconv = tf_utils.deconv2d(d2_relu,
                                          self.gen_c[11],
                                          name='d3_deconv2d')
            # (20, 14) -> (19, 14)
            shapeA = e3_batchnorm.get_shape().as_list()[1]
            shapeB = d3_deconv.get_shape().as_list(
            )[1] - e3_batchnorm.get_shape().as_list()[1]
            d3_split_1, _ = tf.split(d3_deconv, [shapeA, shapeB],
                                     axis=1,
                                     name='d3_split_1')
            tf_utils.print_activations(d3_split_1)
            # (19, 14) -> (19, 13)
            shapeA = e3_batchnorm.get_shape().as_list()[2]
            shapeB = d3_split_1.get_shape().as_list(
            )[2] - e3_batchnorm.get_shape().as_list()[2]
            d3_split_2, _ = tf.split(d3_split_1, [shapeA, shapeB],
                                     axis=2,
                                     name='d3_split_2')
            tf_utils.print_activations(d3_split_2)
            d3_batchnorm = tf_utils.batch_norm(d3_split_2,
                                               name='d3_batchnorm',
                                               _ops=self._ops)
            d3_concat = tf.concat([d3_batchnorm, e3_batchnorm],
                                  axis=3,
                                  name='d3_concat')
            d3_relu = tf_utils.relu(d3_concat, name='d3_relu')

            # (19, 13) -> (38, 26)
            d4_deconv = tf_utils.deconv2d(d3_relu,
                                          self.gen_c[12],
                                          name='d4_deconv2d')
            # (38, 26) -> (38, 25)
            shapeA = e2_batchnorm.get_shape().as_list()[2]
            shapeB = d4_deconv.get_shape().as_list(
            )[2] - e2_batchnorm.get_shape().as_list()[2]
            d4_split, _ = tf.split(d4_deconv, [shapeA, shapeB],
                                   axis=2,
                                   name='d4_split')
            tf_utils.print_activations(d4_split)
            d4_batchnorm = tf_utils.batch_norm(d4_split,
                                               name='d4_batchnorm',
                                               _ops=self._ops)
            d4_concat = tf.concat([d4_batchnorm, e2_batchnorm],
                                  axis=3,
                                  name='d4_concat')
            d4_relu = tf_utils.relu(d4_concat, name='d4_relu')

            # (38, 25) -> (76, 50)
            d5_deconv = tf_utils.deconv2d(d4_relu,
                                          self.gen_c[13],
                                          name='d5_deconv2d')
            # (76, 50) -> (75, 50)
            shapeA = e1_batchnorm.get_shape().as_list()[1]
            shapeB = d5_deconv.get_shape().as_list(
            )[1] - e1_batchnorm.get_shape().as_list()[1]
            d5_split, _ = tf.split(d5_deconv, [shapeA, shapeB],
                                   axis=1,
                                   name='d5_split')
            tf_utils.print_activations(d5_split)
            d5_batchnorm = tf_utils.batch_norm(d5_split,
                                               name='d5_batchnorm',
                                               _ops=self._ops)
            d5_concat = tf.concat([d5_batchnorm, e1_batchnorm],
                                  axis=3,
                                  name='d5_concat')
            d5_relu = tf_utils.relu(d5_concat, name='d5_relu')

            # (75, 50) -> (150, 100)
            d6_deconv = tf_utils.deconv2d(d5_relu,
                                          self.gen_c[14],
                                          name='d6_deconv2d')
            d6_batchnorm = tf_utils.batch_norm(d6_deconv,
                                               name='d6_batchnorm',
                                               _ops=self._ops)
            d6_concat = tf.concat([d6_batchnorm, e0_conv2d],
                                  axis=3,
                                  name='d6_concat')
            d6_relu = tf_utils.relu(d6_concat, name='d6_relu')

            # (150, 100) -> (300, 200)
            d7_deconv = tf_utils.deconv2d(d6_relu,
                                          self.gen_c[15],
                                          name='d7_deconv2d')
            output = tf_utils.tanh(d7_deconv, name='output_tanh')

            # set reuse=True for next call
            self.reuse = True
            self.variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)

            return output
コード例 #15
0
    def discriminator(self, data, y, name='d_', is_reuse=False):
        with tf.variable_scope(name) as scope:
            if is_reuse is True:
                scope.reuse_variables()

            if not self.flags.y_dim:
                # 64 -> 32
                h0_conv = tf_utils.conv2d(data, self.dis_c[0], name='h0_conv2d')
                h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')

                # 32 -> 16
                h1_conv = tf_utils.conv2d(h0_lrelu, self.dis_c[1], name='h1_conv2d')
                h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
                h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')

                # 16 -> 8
                h2_conv = tf_utils.conv2d(h1_lrelu, self.dis_c[2], name='h2_conv2d')
                h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
                h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')

                # 8 -> 4
                h3_conv = tf_utils.conv2d(h2_lrelu, self.dis_c[3], name='h3_conv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
                h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

                h3_flatten = flatten(h3_lrelu)
                h4_linear = tf_utils.linear(h3_flatten, 1, name='h4_linear')

                return tf.nn.sigmoid(h4_linear), h4_linear

            else:
                yb = tf.reshape(y, [tf.shape(y)[0], 1, 1, self.flags.y_dim])
                x = conv_cond_concat(data, yb)

                h0_conv = tf_utils.conv2d(x, self.dis_c[0] + self.flags.y_dim, name='h0_conv2d')
                h0_lrelu = tf_utils.lrelu(h0_conv, name='h0_lrelu')
                h0 = conv_cond_concat(h0_lrelu, yb)

                # 32 -> 16
                h1_conv = tf_utils.conv2d(h0, self.dis_c[1] + self.flags.y_dim, name='h1_conv2d')
                h1_batchnorm = tf_utils.batch_norm(h1_conv, name='h1_batchnorm', _ops=self._dis_train_ops)
                h1_lrelu = tf_utils.lrelu(h1_batchnorm, name='h1_lrelu')
                h1 = conv_cond_concat(h1_lrelu, yb)

                # 16 -> 8
                h2_conv = tf_utils.conv2d(h1, self.dis_c[2], name='h2_conv2d')
                h2_batchnorm = tf_utils.batch_norm(h2_conv, name='h2_batchnorm', _ops=self._dis_train_ops)
                h2_lrelu = tf_utils.lrelu(h2_batchnorm, name='h2_lrelu')
                h2 = conv_cond_concat(h2_lrelu, yb)

                # 8 -> 4
                h3_conv = tf_utils.conv2d(h2, self.dis_c[3], name='h3_conv2d')
                h3_batchnorm = tf_utils.batch_norm(h3_conv, name='h3_batchnorm', _ops=self._dis_train_ops)
                h3_lrelu = tf_utils.lrelu(h3_batchnorm, name='h3_lrelu')

                h3_flatten = flatten(h3_lrelu)
                h3 = concat([h3_flatten, y], 1)

                h4_linear = tf_utils.linear(h3, 1, name='h4_linear')

                return tf.nn.sigmoid(h4_linear), h4_linear
コード例 #16
0
    def generator(self, data, name='g_'):
        with tf.variable_scope(name):
            # 256 -> 128
            e0_conv2d = tf_utils.conv2d(data, self.gen_c[0], name='e0_conv2d')
            e0_lrelu = tf_utils.lrelu(e0_conv2d, name='e0_lrelu')

            # 128 -> 64
            e1_conv2d = tf_utils.conv2d(e0_lrelu,
                                        self.gen_c[1],
                                        name='e1_conv2d')
            e1_batchnorm = tf_utils.batch_norm(e1_conv2d,
                                               name='e1_batchnorm',
                                               _ops=self._gen_train_ops)
            e1_lrelu = tf_utils.lrelu(e1_batchnorm, name='e1_lrelu')

            # 64 -> 32
            e2_conv2d = tf_utils.conv2d(e1_lrelu,
                                        self.gen_c[2],
                                        name='e2_conv2d')
            e2_batchnorm = tf_utils.batch_norm(e2_conv2d,
                                               name='e2_batchnorm',
                                               _ops=self._gen_train_ops)
            e2_lrelu = tf_utils.lrelu(e2_batchnorm, name='e2_lrelu')

            # 32 -> 16
            e3_conv2d = tf_utils.conv2d(e2_lrelu,
                                        self.gen_c[3],
                                        name='e3_conv2d')
            e3_batchnorm = tf_utils.batch_norm(e3_conv2d,
                                               name='e3_batchnorm',
                                               _ops=self._gen_train_ops)
            e3_lrelu = tf_utils.lrelu(e3_batchnorm, name='e3_lrelu')

            # 16 -> 8
            e4_conv2d = tf_utils.conv2d(e3_lrelu,
                                        self.gen_c[4],
                                        name='e4_conv2d')
            e4_batchnorm = tf_utils.batch_norm(e4_conv2d,
                                               name='e4_batchnorm',
                                               _ops=self._gen_train_ops)
            e4_lrelu = tf_utils.lrelu(e4_batchnorm, name='e4_lrelu')

            # 8 -> 4
            e5_conv2d = tf_utils.conv2d(e4_lrelu,
                                        self.gen_c[5],
                                        name='e5_conv2d')
            e5_batchnorm = tf_utils.batch_norm(e5_conv2d,
                                               name='e5_batchnorm',
                                               _ops=self._gen_train_ops)
            e5_lrelu = tf_utils.lrelu(e5_batchnorm, name='e5_lrelu')

            # 4 -> 2
            e6_conv2d = tf_utils.conv2d(e5_lrelu,
                                        self.gen_c[6],
                                        name='e6_conv2d')
            e6_batchnorm = tf_utils.batch_norm(e6_conv2d,
                                               name='e6_batchnorm',
                                               _ops=self._gen_train_ops)
            e6_lrelu = tf_utils.lrelu(e6_batchnorm, name='e6_lrelu')

            # 2 -> 1
            e7_conv2d = tf_utils.conv2d(e6_lrelu,
                                        self.gen_c[7],
                                        name='e7_conv2d')
            e7_batchnorm = tf_utils.batch_norm(e7_conv2d,
                                               name='e7_batchnorm',
                                               _ops=self._gen_train_ops)
            e7_relu = tf.nn.relu(e7_batchnorm, name='e7_relu')

            # 1 -> 2
            d0_deconv = tf_utils.deconv2d(e7_relu,
                                          self.gen_c[8],
                                          name='d0_deconv2d')
            d0_batchnorm = tf_utils.batch_norm(d0_deconv,
                                               name='d0_batchnorm',
                                               _ops=self._gen_train_ops)
            d0_drop = tf.nn.dropout(d0_batchnorm,
                                    keep_prob=0.5,
                                    name='d0_dropout')
            d0_concat = tf.concat([d0_drop, e6_batchnorm],
                                  axis=3,
                                  name='d0_concat')
            d0_relu = tf.nn.relu(d0_concat, name='d0_relu')

            # 2 -> 4
            d1_deconv = tf_utils.deconv2d(d0_relu,
                                          self.gen_c[9],
                                          name='d1_deconv2d')
            d1_batchnorm = tf_utils.batch_norm(d1_deconv,
                                               name='d1_batchnorm',
                                               _ops=self._gen_train_ops)
            d1_drop = tf.nn.dropout(d1_batchnorm,
                                    keep_prob=0.5,
                                    name='d1_dropout')
            d1_concat = tf.concat([d1_drop, e5_batchnorm],
                                  axis=3,
                                  name='d1_concat')
            d1_relu = tf.nn.relu(d1_concat, name='d1_relu')

            # 4 -> 8
            d2_deconv = tf_utils.deconv2d(d1_relu,
                                          self.gen_c[10],
                                          name='d2_deconv2d')
            d2_batchnorm = tf_utils.batch_norm(d2_deconv,
                                               name='d2_batchnorm',
                                               _ops=self._gen_train_ops)
            d2_drop = tf.nn.dropout(d2_batchnorm,
                                    keep_prob=0.5,
                                    name='d2_dropout')
            d2_concat = tf.concat([d2_drop, e4_batchnorm],
                                  axis=3,
                                  name='d2_concat')
            d2_relu = tf.nn.relu(d2_concat, name='d2_relu')

            # 8 -> 16
            d3_deconv = tf_utils.deconv2d(d2_relu,
                                          self.gen_c[11],
                                          name='d3_deconv2d')
            d3_batchnorm = tf_utils.batch_norm(d3_deconv,
                                               name='d3_batchnorm',
                                               _ops=self._gen_train_ops)
            d3_concat = tf.concat([d3_batchnorm, e3_batchnorm],
                                  axis=3,
                                  name='d3_concat')
            d3_relu = tf.nn.relu(d3_concat, name='d3_relu')

            # 16 -> 32
            d4_deconv = tf_utils.deconv2d(d3_relu,
                                          self.gen_c[12],
                                          name='d4_deconv2d')
            d4_batchnorm = tf_utils.batch_norm(d4_deconv,
                                               name='d4_batchnorm',
                                               _ops=self._gen_train_ops)
            d4_concat = tf.concat([d4_batchnorm, e2_batchnorm],
                                  axis=3,
                                  name='d4_concat')
            d4_relu = tf.nn.relu(d4_concat, name='d4_relu')

            # 32 -> 64
            d5_deconv = tf_utils.deconv2d(d4_relu,
                                          self.gen_c[13],
                                          name='d5_deconv2d')
            d5_batchnorm = tf_utils.batch_norm(d5_deconv,
                                               name='d5_batchnorm',
                                               _ops=self._gen_train_ops)
            d5_concat = tf.concat([d5_batchnorm, e1_batchnorm],
                                  axis=3,
                                  name='d5_concat')
            d5_relu = tf.nn.relu(d5_concat, name='d5_relu')

            # 64 -> 128
            d6_deconv = tf_utils.deconv2d(d5_relu,
                                          self.gen_c[14],
                                          name='d6_deconv2d')
            d6_batchnorm = tf_utils.batch_norm(d6_deconv,
                                               name='d6_batchnorm',
                                               _ops=self._gen_train_ops)
            d6_concat = tf.concat([d6_batchnorm, e0_conv2d],
                                  axis=3,
                                  name='d6_concat')
            d6_relu = tf.nn.relu(d6_concat, name='d6_relu')

            # 128 -> 256
            d7_deconv = tf_utils.deconv2d(d6_relu,
                                          self.gen_c[15],
                                          name='d7_deconv2d')

            return tf.nn.tanh(d7_deconv)