Ejemplo n.º 1
0
    def Gx(self, input_x, img_mask, fp, use_sp=False):

        conv2d_first = functools.partial(conv2d, kernel=7, stride=1, use_sp=use_sp)
        conv2d_base = functools.partial(conv2d, kernel=4, stride=2, use_sp=use_sp)
        fc = functools.partial(fully_connect, use_sp=use_sp)
        conv2d_final = functools.partial(conv2d, kernel=7, stride=1, use_sp=use_sp, output_dim=self.opt.output_nc)
        with tf.variable_scope("Gx", reuse=tf.AUTO_REUSE):

            x = tf.concat([input_x, img_mask], axis=3)
            u_fp_list = []
            x = lrelu(IN(conv2d_first(x, output_dim=self.opt.ngf, scope='conv'), scope='IN'))
            for i in range(self.opt.n_layers_g):
                c_dim = np.minimum(self.opt.ngf * np.power(2, i + 1), 256)
                x = lrelu(IN(conv2d_base(x, output_dim=c_dim, scope='conv{}'.format(i)), scope='IN{}'.format(i)))
                u_fp_list.append(x)

            bottleneck = tf.reshape(x, shape=[self.opt.batch_size, -1])
            bottleneck = fc(bottleneck, output_size=256, scope='FC1')

            bottleneck = tf.concat([bottleneck, fp], axis=-1)
            h, w = x.shape.as_list()[-3], x.shape.as_list()[-2]
            de_x = lrelu(fc(bottleneck, output_size=256 * h * w, scope='FC2'))
            de_x = tf.reshape(de_x, shape=[self.opt.batch_size, h, w, 256])
            ngf = c_dim
            for i in range(self.opt.n_layers_g):
                c_dim = np.maximum(int(ngf / np.power(2, i)), 16)
                de_x = tf.concat([de_x, u_fp_list[len(u_fp_list) - (i + 1)]], axis=3)
                de_x = tf.nn.relu(IN(de_conv(de_x, output_shape=[self.opt.batch_size, h * pow(2, i + 1),
                                                                            w * pow(2, i + 1), c_dim], use_sp=use_sp,
                                                        scope='deconv{}'.format(i)), scope='IN_{}'.format(i)))

            de_x = conv2d_final(de_x, scope='output_conv')
            return input_x + tf.nn.tanh(de_x) * img_mask
Ejemplo n.º 2
0
    def encode(self, x):

        conv2d_first = functools.partial(conv2d, kernel=7, stride=1)
        conv2d_base = functools.partial(conv2d, kernel=4, stride=2)
        with tf.variable_scope("encode", reuse=tf.AUTO_REUSE):
            nef = self.opt.nef
            x = tf.nn.relu(IN(conv2d_first(x, output_dim=nef, scope='e_c1'), scope='e_in1'))
            for i in range(self.opt.n_layers_e):
                x = tf.nn.relu(IN(conv2d_base(x, output_dim=min(nef * pow(2, i + 1), 128), scope='e_c{}'.format(i + 2)),
                                  scope='e_in{}'.format(i + 2)))
            bottleneck = tf.reshape(x, [self.opt.batch_size, -1])
            content = fully_connect(bottleneck, output_size=2, scope='e_ful1')

            return content
Ejemplo n.º 3
0
    def Gr(self, input_x, use_sp=False):
        print(input_x.shape)
        conv2d_first = functools.partial(conv2d, kernel=7, stride=1, use_sp=use_sp)
        conv2d_base = functools.partial(conv2d, kernel=4, stride=2, use_sp=use_sp)
        fc = functools.partial(fully_connect, use_sp=use_sp)
        with tf.variable_scope("Gr", reuse=tf.AUTO_REUSE):
            x = input_x
            x = lrelu(IN(conv2d_first(x, output_dim=self.opt.ngf, scope='conv'), scope='IN'))
            for i in range(self.opt.n_layers_r):
                c_dim = np.minimum(self.opt.ngf * np.power(2, i + 1), 128)
                x = lrelu(IN(conv2d_base(x, output_dim=c_dim, scope='conv{}'.format(i)), scope='IN{}'.format(i)))

            bottleneck = tf.reshape(x, shape=[self.opt.batch_size, -1])
            bottleneck = fc(bottleneck, output_size=256, scope='FC1')

            return bottleneck