Esempio n. 1
0
 def discriminator(self, mri, pet, reuse = False):
     with tf.variable_scope('discriminator') as scope:
         if reuse:
             scope.reuse_variables()
         inputs = tf.concat(
             [mri, pet], self.channel_axis, name='/concat')
         print('input',' ',inputs.shape)
         conv1 = ops.conv(inputs, 16, (3, 3, 3), '/conv1', self.conf.data_type)
         conv1 = ops.batch_norm(conv1, '/batch1',ac_fn = ops.leaky_relu)
         print('conv1 ',conv1.shape)
         conv1 = tf.nn.dropout(conv1, 0.5, name='drop1')
         conv2 = ops.conv(conv1, 32, (3, 3, 3), '/conv2', self.conf.data_type,2)
         conv2 = ops.batch_norm(conv2, '/batch2',ac_fn = ops.leaky_relu)
         print('conv2 ',conv2.shape)
         conv2 = tf.nn.dropout(conv2, 0.5, name = 'drop2')
         conv3 = ops.conv(conv2, 64, (3, 3, 3), '/conv3', self.conf.data_type)
         conv3 = ops.batch_norm(conv3, '/batch3',ac_fn = ops.leaky_relu)
         print('conv3 ',conv3.shape)
         conv3 = tf.nn.dropout(conv3, 0.5, name = 'drop3')
         conv4 = ops.conv(conv3, 128, (3, 3, 3), '/conv4', self.conf.data_type,2)
         conv4 = ops.batch_norm(conv4, '/batch4',ac_fn = ops.leaky_relu)
         conv4 = tf.nn.dropout(conv4, 0.5, name = 'drop4')
         print('conv4 ',conv4.shape)
         flatten = tf.contrib.layers.flatten(conv4)
         logits = tf.contrib.layers.fully_connected(flatten, 5, activation_fn=None, scope = '/fully')
         print('flatten ',flatten.shape)
         print('logits ',logits.shape)
         d = tf.nn.sigmoid(logits)
         return d[:,0], logits[:,0], d[:,1:5], logits[:,1:5]
    def discriminator(self, image, is_training, reuse=False):
        with tf.variable_scope("discriminator"):
            if reuse:
                tf.get_variable_scope().reuse_variables()
            # [batch,256,256,1] -> [batch,128,128,64]
            h0 = lrelu(conv2d(image, self.discriminator_dim,
                              scope="d_h0_conv"))
            # [batch,128,128,64] -> [batch,64,64,64*2]
            h1 = lrelu(
                batch_norm(conv2d(h0,
                                  self.discriminator_dim * 2,
                                  scope="d_h1_conv"),
                           is_training,
                           scope="d_bn_1"))
            # [batch,64,64,64*2] -> [batch,32,32,64*4]
            h2 = lrelu(
                batch_norm(conv2d(h1,
                                  self.discriminator_dim * 4,
                                  scope="d_h2_conv"),
                           is_training,
                           scope="d_bn_2"))
            # [batch,32,32,64*4] -> [batch,31,31,64*8]
            h3 = lrelu(
                batch_norm(conv2d(h2,
                                  self.discriminator_dim * 8,
                                  sh=1,
                                  sw=1,
                                  scope="d_h3_conv"),
                           is_training,
                           scope="d_bn_3"))

            # real or fake binary loss
            fc1 = fc(tf.reshape(h3, [self.batch_size, -1]), 1, scope="d_fc1")

            return tf.sigmoid(fc1), fc1
Esempio n. 3
0
 def discriminator(self, mri, pet, reuse = False, disc_type=1):   
     with tf.variable_scope('discriminator') as scope:
         if reuse:
             scope.reuse_variables()
         inputs = tf.concat(
             [mri, pet], self.channel_axis, name='/concat')
         # print('input',' ',inputs.shape)
         
         conv1 = ops.conv(inputs, 16, 3 , '/conv1', self.conf.data_type)
         conv1 = ops.batch_norm(conv1, '/batch1',ac_fn = ops.leaky_relu)
         # print('conv1 ',conv1.shape)
         conv2 = ops.conv(conv1, 32, 3, '/conv2', self.conf.data_type,2)
         conv2 = ops.batch_norm(conv2, '/batch2',ac_fn = ops.leaky_relu)
         # print('conv2 ',conv2.shape)
         conv3 = ops.conv(conv2, 64, 3, '/conv3', self.conf.data_type)
         conv3 = ops.batch_norm(conv3, '/batch3',ac_fn = ops.leaky_relu)
         # print('conv3 ',conv3.shape)
         conv4 = ops.conv(conv3, 128, 3, '/conv4', self.conf.data_type,2)
         conv4 = ops.batch_norm(conv4, '/batch4',ac_fn = ops.leaky_relu)
         # print('conv4 ',conv4.shape)
         flatten = tf.contrib.layers.flatten(conv4)
         # print('flatten ',flatten.shape)
         logits = tf.contrib.layers.fully_connected(flatten, 1, activation_fn=None, scope = '/fully1')  
         # print('logits ',logits.shape)
         features = tf.contrib.layers.fully_connected(flatten, 20, activation_fn=None, scope = '/fully2')
         if self.conf.model_option.endswith('metric'): 
             features = ops.batch_norm(features, '/batch5', ac_fn=None) 
         return logits, features
Esempio n. 4
0
	def generator(self, z, embed, is_training=True, reuse=False, cond_noise=True):
		s = self.output_size
		s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)

		with tf.variable_scope("g_net", reuse=reuse):
			# Sample from the multivariate normal distribution of the embeddings
			mean, log_sigma = self.generate_conditionals(embed)
			net_embed = self.sample_normal_conditional(mean, log_sigma, cond_noise)
			# --------------------------------------------------------

			# Concatenate the sampled embedding with the z vector
			net_input = tf.concat([z, net_embed], 1)
			net_h0 = tf.layers.dense(net_input, units=self.gf_dim*8*s16*s16, activation=None,
									 kernel_initializer=self.w_init)
			net_h0 = batch_norm(net_h0, train=is_training, init=self.batch_norm_init, act=None)
			net_h0 = tf.reshape(net_h0, [-1, s16, s16, self.gf_dim * 8])

			# Residual layer
			net = conv2d(net_h0, self.gf_dim * 2, ks=(1, 1), s=(1, 1), padding='valid',  init=self.w_init)
			net = batch_norm(net, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)
			net = conv2d(net, self.gf_dim * 2, ks=(3, 3), s=(1, 1),  init=self.w_init)
			net = batch_norm(net, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)
			net = conv2d(net, self.gf_dim * 8, ks=(3, 3), s=(1, 1), padding='same',  init=self.w_init)
			net = batch_norm(net, train=is_training, init=self.batch_norm_init, act=None)
			net_h1 = tf.add(net_h0, net)
			net_h1 = tf.nn.relu(net_h1)
			# --------------------------------------------------------

			net_h2 = conv2d_transpose(net_h1, self.gf_dim*4, ks=(4, 4), s=(2, 2),  init=self.w_init)
			net_h2 = conv2d(net_h2, self.gf_dim*4, ks=(3, 3), s=(1, 1),  init=self.w_init)
			net_h2 = batch_norm(net_h2, train=is_training, init=self.batch_norm_init, act=None)
			# --------------------------------------------------------

			# Residual layer
			net = conv2d(net_h2, self.gf_dim, ks=(1, 1), s=(1, 1), padding='valid', init=self.w_init)
			net = batch_norm(net, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)
			net = conv2d(net, self.gf_dim, ks=(3, 3), s=(1, 1),  init=self.w_init)
			net = batch_norm(net, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)
			net = conv2d(net, self.gf_dim*4, ks=(3, 3), s=(1, 1),  init=self.w_init)
			net = batch_norm(net, train=is_training, init=self.batch_norm_init, act=None)
			net_h3 = tf.add(net_h2, net)
			net_h3 = tf.nn.relu(net_h3)
			# --------------------------------------------------------

			net_h4 = conv2d_transpose(net_h3, self.gf_dim*2, ks=(4, 4), s=(2, 2), init=self.w_init)
			net_h4 = conv2d(net_h4, self.gf_dim*2, ks=(3, 3), s=(1, 1), init=self.w_init)
			net_h4 = batch_norm(net_h4, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)

			net_h5 = conv2d_transpose(net_h4, self.gf_dim, ks=(4, 4), s=(2, 2),  init=self.w_init)
			net_h5 = conv2d(net_h5, self.gf_dim, ks=(3, 3), s=(1, 1),  init=self.w_init)
			net_h5 = batch_norm(net_h5, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)

			net_logits = conv2d_transpose(net_h5, self.image_dims[-1], ks=(4, 4), s=(2, 2),  init=self.w_init)
			net_logits = conv2d(net_logits, self.image_dims[-1], ks=(3, 3), s=(1, 1),  init=self.w_init)

			net_output = tf.nn.tanh(net_logits)
			return net_output, mean, log_sigma
Esempio n. 5
0
 def build_bottom_block(self, inputs, name):
     out_num = inputs.shape[self.channel_axis].value
     conv1 = ops.conv(inputs, 2 * out_num, self.conv_size, name + '/conv1',
                      self.conf.data_type)
     conv1 = ops.batch_norm(conv1, name + '/batch1')
     conv2 = ops.conv(conv1, out_num, self.conv_size, name + '/conv2',
                      self.conf.data_type)
     conv2 = ops.batch_norm(conv2, name + '/batch2')
     return conv2
Esempio n. 6
0
    def generator_residual_layer(self, input_layer, is_training=True):
        net_h0 = input_layer

        net_h1 = conv2d(net_h0, self.gf_dim * 4, ks=(4, 4), s=(1, 1))
        net_h1 = batch_norm(net_h1, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)

        net_h2 = conv2d(net_h1, self.gf_dim * 4, ks=(4, 4), s=(1, 1))
        net_h2 = batch_norm(net_h2, train=is_training, init=self.batch_norm_init)

        return tf.nn.relu(tf.add(net_h0, net_h2))
Esempio n. 7
0
    def generator_encode_image(self, image, is_training=True):
        net_h0 = conv2d(image, self.gf_dim, ks=(3, 3), s=(1, 1), act=tf.nn.relu)

        net_h1 = conv2d(net_h0, self.gf_dim * 2, ks=(4, 4), s=(2, 2))
        net_h1 = batch_norm(net_h1, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)

        output_tensor = conv2d(net_h1, self.gf_dim * 4, ks=(4, 4), s=(2, 2))
        output_tensor = batch_norm(output_tensor, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)

        return output_tensor
Esempio n. 8
0
 def build_down_block(self, inputs, name, down_outputs, first=False):
     out_num = self.conf.start_channel_num if first else 2 * \
         inputs.shape[self.channel_axis].value
     conv1 = ops.conv(inputs, out_num, self.conv_size,
                      name+'/conv1', self.conf.data_type)
     conv1 = ops.batch_norm(conv1, name+'/batch1')
     conv2 = ops.conv(conv1, out_num, self.conv_size,
                      name+'/conv2', self.conf.data_type,2)
     conv2 = ops.batch_norm(conv2, name+'/batch2')
     down_outputs.append(conv1)
     return conv2
 def _make_descriminator(self, input, phase_train):
     conv1 = ops.batch_norm(ops.conv2d(input, self.df_dim,
                                       name='d_h0_conv'),
                            name='d_bn0',
                            phase_train=phase_train)
     h0 = ops.lrelu(conv1)
     h1 = ops.lrelu(
         ops.batch_norm(ops.conv2d(h0, self.df_dim * 2, name='d_h1_conv'),
                        name='d_bn1',
                        phase_train=phase_train))
     #h2 = ops.lrelu(ops.batch_norm(ops.conv2d(h1, self.df_dim*4, name='d_h2_conv'), name='d_bn2'))
     #h3 = ops.lrelu(ops.batch_norm(ops.conv2d(h2, self.df_dim*8, name='d_h3_conv'), name='d_bn3'))
     h2 = ops.lrelu(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h1_lin')
     return h2
Esempio n. 10
0
    def generator(self, image, embed, is_training=True, reuse=False, sampler=False):
        s = 64
        s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)

        with tf.variable_scope("stageII_g_net", reuse=reuse):
            encoded_img = self.generator_encode_image(image, is_training=is_training)

            # Sample from the multivariate normal distribution of the embeddings
            mean, log_sigma = self.generate_conditionals(embed)
            net_embed = self.sample_normal_conditional(mean, log_sigma)
            # --------------------------------------------------------

            # Concatenate the encoded image and the embeddings
            net_embed = tf.expand_dims(tf.expand_dims(net_embed, 1), 1)
            net_embed = tf.tile(net_embed, [1, s4, s4, 1])
            imgenc_embed = tf.concat([encoded_img, net_embed], 3)

            pre_res = conv2d(imgenc_embed, self.gf_dim * 4, ks=(3, 3), s=(1, 1))
            pre_res = batch_norm(pre_res, train=is_training, init=self.batch_norm_init, act=tf.nn.relu)

            r_block1 = self.generator_residual_layer(pre_res, is_training=is_training)
            r_block2 = self.generator_residual_layer(r_block1, is_training=is_training)
            r_block3 = self.generator_residual_layer(r_block2, is_training=is_training)
            r_block4 = self.generator_residual_layer(r_block3, is_training=is_training)

            return self.generator_upsample(r_block4, is_training=is_training), mean, log_sigma
    def _make_generator(self, input, phase_train):
        s_h, s_w = self.img_size, self.img_size
        s_h2, s_w2 = self._conv_out_size_same(s_h,
                                              2), self._conv_out_size_same(
                                                  s_w, 2)
        s_h4, s_w4 = self._conv_out_size_same(s_h2,
                                              2), self._conv_out_size_same(
                                                  s_w2, 2)
        #s_h8, s_w8 = self._conv_out_size_same(s_h4, 2), self._conv_out_size_same(s_w4, 2)
        #s_h16, s_w16 = self._conv_out_size_same(s_h8, 2), self._conv_out_size_same(s_w8, 2)
        # project `z` and reshape
        self.z_, self.h0_w, self.h0_b = ops.linear(input,
                                                   self.gf_dim * 8 * s_h4 *
                                                   s_w4,
                                                   'g_h0_lin',
                                                   with_w=True)
        normalized_value = ops.batch_norm(self.z_,
                                          name='g_bn0',
                                          axes=[0],
                                          phase_train=phase_train)

        self.h0 = tf.reshape(normalized_value,
                             [-1, s_h4, s_w4, self.gf_dim * 8])

        h0 = ops.lrelu(self.h0)

        self.h1, self.h1_w, self.h1_b = ops.deconv2d(
            h0, [self.batch_size, s_h2, s_w2, self.gf_dim * 4],
            name='g_h1',
            with_w=True)
        h1 = ops.lrelu(
            ops.batch_norm(self.h1, name='g_bn1', phase_train=phase_train))

        # h2, self.h2_w, self.h2_b = ops.deconv2d(
        #     h1, [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2', with_w=True)
        # h2 = tf.nn.relu(ops.batch_norm(h2, name='g_bn2'))
        #
        # h3, self.h3_w, self.h3_b = ops.deconv2d(
        #     h2, [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3', with_w=True)
        # h3 = tf.nn.relu(ops.batch_norm(h3, name='g_bn3'))

        h2, self.h2_w, self.h2_b = ops.deconv2d(
            h1, [self.batch_size, s_h, s_w, self.c_dim],
            name='g_h4',
            with_w=True)
        h2_non_linear = ops.lrelu(h2, leak=0)
        return h2_non_linear
 def encode_layer(x, output_filters, layer):
     act = lrelu(x)
     conv = conv2d(act,
                   output_filters=output_filters,
                   scope="g_e%d_conv" % layer)
     enc = batch_norm(conv, is_training, scope="g_e%d_bn" % layer)
     encode_layers["e%d" % layer] = enc
     return enc
Esempio n. 13
0
    def generator_upsample(self, input_layer, is_training=True):
        net_h0 = conv2d_transpose(input_layer,
                                  self.gf_dim * 2,
                                  ks=(4, 4),
                                  init=self.w_init)
        net_h0 = conv2d(net_h0, self.gf_dim * 2, ks=(3, 3), s=(1, 1))
        net_h0 = batch_norm(net_h0,
                            train=is_training,
                            init=self.batch_norm_init,
                            act=tf.nn.relu)

        net_h1 = conv2d_transpose(net_h0,
                                  self.gf_dim,
                                  ks=(4, 4),
                                  init=self.w_init)
        net_h1 = conv2d(net_h1, self.gf_dim, ks=(3, 3), s=(1, 1))
        net_h1 = batch_norm(net_h1,
                            train=is_training,
                            init=self.batch_norm_init,
                            act=tf.nn.relu)

        net_h2 = conv2d_transpose(net_h1,
                                  self.gf_dim // 2,
                                  ks=(4, 4),
                                  init=self.w_init)
        net_h2 = conv2d(net_h2, self.gf_dim // 2, ks=(3, 3), s=(1, 1))
        net_h2 = batch_norm(net_h2,
                            train=is_training,
                            init=self.batch_norm_init,
                            act=tf.nn.relu)

        net_h3 = conv2d_transpose(net_h2,
                                  self.gf_dim // 4,
                                  ks=(4, 4),
                                  init=self.w_init)
        net_h3 = conv2d(net_h3, self.gf_dim // 4, ks=(3, 3), s=(1, 1))
        net_h3 = batch_norm(net_h3,
                            train=is_training,
                            init=self.batch_norm_init,
                            act=tf.nn.relu)

        return conv2d(net_h3,
                      self.image_dims[-1],
                      ks=(3, 3),
                      s=(1, 1),
                      act=tf.nn.tanh)
Esempio n. 14
0
 def build_up_block(self, inputs, down_inputs, name, final=False):
     out_num = inputs.shape[self.channel_axis].value
     conv1 = self.deconv_func()(
         inputs, out_num, self.conv_size, name+'/conv1',
         self.conf.data_type, action=self.conf.action)
     conv1 = ops.batch_norm(conv1, name+'/batch1')
     conv1 = tf.concat(
         [conv1, down_inputs], self.channel_axis, name=name+'/concat')
     conv2 = self.conv_func()(
         conv1, out_num, self.conv_size, name+'/conv2', self.conf.data_type)
     conv2 = ops.batch_norm(conv2, name+'/batch2')
     out_num = self.conf.class_num if final else out_num/2
     if final:
         conv3 = ops.conv(conv2, out_num, self.conv_size, name+'/conv3', self.conf.data_type)
     else:
         conv3 = ops.conv(conv2, out_num, self.conv_size, name+'/conv3', self.conf.data_type)
         conv3 = ops.batch_norm(conv3, name+'/batch3')
     return conv3
Esempio n. 15
0
            def encode_layer(x, output_filters, layer, keep_rate=1.0):
                # act = lrelu(x)
                enc = tf.nn.relu(x)
                enc = tf.nn.dropout(enc, keep_rate)
                enc = conv2d(enc,
                             output_filters=output_filters,
                             scope="g_e%d_conv" % layer)

                # batch norm is important for ae, or aw would output nothing!!!
                enc = batch_norm(enc, is_training, scope="g_e%d_bn" % layer)
                encode_layers["e%d" % layer] = enc
                return enc
Esempio n. 16
0
    def discriminator(self, inputs, embed, is_training=True, reuse=False):
        s16 = self.output_size / 16
        with tf.variable_scope("d_net", reuse=reuse):
            net_ho = tf.layers.conv2d(inputs=inputs, filters=self.df_dim, kernel_size=(4, 4), strides=(2, 2),
                                      padding='same', activation=lambda l: tf.nn.leaky_relu(l, 0.2), 
                                      kernel_initializer=self.w_init)
            net_h1 = tf.layers.conv2d(inputs=net_ho, filters=self.df_dim * 2, kernel_size=(4, 4), strides=(2, 2),
                                      padding='same', activation=None, kernel_initializer=self.w_init)
            net_h1 = batch_norm(net_h1, train=is_training, init=self.batch_norm_init,
                                act=lambda l: tf.nn.leaky_relu(l, 0.2))
            net_h2 = tf.layers.conv2d(inputs=net_h1, filters=self.df_dim * 4, kernel_size=(4, 4), strides=(2, 2),
                                      padding='same', activation=None, kernel_initializer=self.w_init)
            net_h2 = batch_norm(net_h2, train=is_training, init=self.batch_norm_init,
                                act=lambda l: tf.nn.leaky_relu(l, 0.2))
            net_h3 = tf.layers.conv2d(inputs=net_h2, filters=self.df_dim * 8, kernel_size=(4, 4), strides=(2, 2),
                                      padding='same', activation=None, kernel_initializer=self.w_init)
            net_h3 = batch_norm(net_h3, train=is_training, init=self.batch_norm_init,
                                act=None)
            # --------------------------------------------------------

            # Residual layer
            net = tf.layers.conv2d(inputs=net_h3, filters=self.df_dim * 2, kernel_size=(1, 1), strides=(1, 1),
                                   padding='valid', activation=None, kernel_initializer=self.w_init)
            net = batch_norm(net, train=is_training, init=self.batch_norm_init,
                             act=lambda l: tf.nn.leaky_relu(l, 0.2))
            net = tf.layers.conv2d(inputs=net, filters=self.df_dim * 2, kernel_size=(3, 3), strides=(1, 1),
                                   padding='same', activation=None, kernel_initializer=self.w_init)
            net = batch_norm(net, train=is_training, init=self.batch_norm_init,
                             act=lambda l: tf.nn.leaky_relu(l, 0.2))
            net = tf.layers.conv2d(inputs=net, filters=self.df_dim * 8, kernel_size=(3, 3), strides=(1, 1),
                                   padding='same', activation=None, kernel_initializer=self.w_init)
            net = batch_norm(net, train=is_training, init=self.batch_norm_init,
                             act=None)
            net_h4 = tf.add(net_h3, net)
            net_h4 = tf.nn.leaky_relu(net_h4, 0.2)
            # --------------------------------------------------------

            # Compress embeddings
            net_embed = tf.layers.dense(inputs=embed, units=self.compressed_embed_dim,
                                        activation=lambda l: tf.nn.leaky_relu(l, 0.2))

            # Append embeddings in depth
            net_embed = tf.reshape(net_embed, [self.batch_size, 4, 4, -1])
            net_h4_concat = tf.concat([net_h4, net_embed], 3)

            net_h4 = tf.layers.conv2d(inputs=net_h4_concat, filters=self.df_dim * 8, kernel_size=(1, 1), strides=(1, 1),
                                      padding='valid', activation=None, kernel_initializer=self.w_init)
            net_h4 = batch_norm(net_h4, train=is_training, init=self.batch_norm_init,
                                act=lambda l: tf.nn.leaky_relu(l, 0.2))

            net_logits = tf.layers.conv2d(inputs=net_h4, filters=1, kernel_size=(s16, s16), strides=(s16, s16),
                                          padding='valid', kernel_initializer=self.w_init)

            return tf.nn.sigmoid(net_logits), net_logits
Esempio n. 17
0
            def decode_layer(x,
                             output_width,
                             output_filters,
                             layer,
                             enc_layer,
                             keep_rate=1.0):
                dec = deconv2d(tf.nn.relu(x), [
                    self.batch_size, output_width, output_width, output_filters
                ],
                               scope="g_d%d_deconv" % layer)

                if layer != 8:
                    # normalization for last layer is very important, otherwise GAN is unstable
                    dec = batch_norm(dec,
                                     is_training,
                                     scope="g_d%d_bn" % layer)
                dec = tf.nn.dropout(dec, keep_prob=keep_rate)
                return dec
Esempio n. 18
0
 def decode_layer(x,
                  output_width,
                  output_filters,
                  layer,
                  enc_layer,
                  dropout=False,
                  do_concat=True):
     dec = deconv2d(tf.nn.relu(x), [
         self.batch_size, output_width, output_width, output_filters
     ],
                    scope="g_d%d_deconv" % layer)
     if layer != 8:
         # normalization for last layer is very important, otherwise GAN is unstable
         dec = batch_norm(dec,
                          is_training,
                          scope="g_d%d_bn" % layer)
     if dropout:
         dec = tf.nn.dropout(dec, 0.5)
     if do_concat:
         dec = tf.concat([dec, enc_layer], 3)
     return dec
Esempio n. 19
0
	def discriminator(self, inputs, embed, is_training=True, reuse=False):
		s16 = self.output_size / 16
		lrelu = lambda l: tf.nn.leaky_relu(l, 0.2)
		
		with tf.variable_scope("d_net", reuse=reuse):
			net_ho = conv2d(inputs, self.df_dim, ks=(4, 4), s=(2, 2), act=lrelu, init=self.w_init)
			net_h1 = conv2d(net_ho, self.df_dim * 2, ks=(4, 4), s=(2, 2), init=self.w_init)
			net_h1 = batch_norm(net_h1, train=is_training, init=self.batch_norm_init, act=lrelu)
			net_h2 = conv2d(net_h1, self.df_dim * 4, ks=(4, 4), s=(2, 2), init=self.w_init)
			net_h2 = batch_norm(net_h2, train=is_training, init=self.batch_norm_init, act=lrelu)
			net_h3 = conv2d(net_h2, self.df_dim * 8, ks=(4, 4), s=(2, 2), init=self.w_init)
			net_h3 = batch_norm(net_h3, train=is_training, init=self.batch_norm_init)
			# --------------------------------------------------------

			# Residual layer
			net = conv2d(net_h3, self.df_dim * 2, ks=(1, 1), s=(1, 1), padding='valid', init=self.w_init)
			net = batch_norm(net, train=is_training, init=self.batch_norm_init, act=lrelu)
			net = conv2d(net, self.df_dim * 2, ks=(3, 3), s=(1, 1), init=self.w_init)
			net = batch_norm(net, train=is_training, init=self.batch_norm_init, act=lrelu)
			net = conv2d(net, self.df_dim * 8, ks=(3, 3), s=(1, 1), init=self.w_init)
			net = batch_norm(net, train=is_training, init=self.batch_norm_init)
			net_h4 = tf.add(net_h3, net)
			net_h4 = tf.nn.leaky_relu(net_h4, 0.2)
			# --------------------------------------------------------

			# Compress embeddings
			net_embed = tf.layers.dense(embed, units=self.compressed_embed_dim, activation=lrelu)

			# Append embeddings in depth
			net_embed = tf.expand_dims(tf.expand_dims(net_embed, 1), 1)
			net_embed = tf.tile(net_embed, [1, 4, 4, 1])
			net_h4_concat = tf.concat([net_h4, net_embed], 3)

			net_h4 = conv2d(net_h4_concat, self.df_dim * 8, ks=(1, 1), s=(1, 1), padding='valid', init=self.w_init)
			net_h4 = batch_norm(net_h4, train=is_training, init=self.batch_norm_init, act=lrelu)

			net_logits = conv2d(net_h4, 1, ks=(s16, s16), s=(s16, s16), padding='valid', init=self.w_init)
			return tf.nn.sigmoid(net_logits), net_logits