示例#1
0
    def build_model(self):

        if not self.use_queue:

            self.rgb_images = tf.placeholder(tf.float32, [self.batch_size] +
                                             self.rgb_shape,
                                             name='rgb_images')
            self.depth_images = tf.placeholder(tf.int32, [self.batch_size] +
                                               self.depth_shape,
                                               name='depth_images')
        else:
            print ' using queue loading'
            self.rgb_single = tf.placeholder(tf.float32, shape=self.rgb_shape)
            self.depth_single = tf.placeholder(tf.int32,
                                               shape=self.depth_shape)
            q = tf.FIFOQueue(4000, [tf.float32, tf.int32],
                             [[self.rgb_shape[0], self.rgb_shape[1], 3],
                              [self.depth_shape[0], self.depth_shape[1], 1]])
            self.enqueue_op = q.enqueue([self.rgb_single, self.depth_single])
            self.rgb_images, self.depth_images = q.dequeue_many(
                self.batch_size)

        self.keep_prob = tf.placeholder(tf.float32)
        net = networks(self.vgg_path)
        self.pred_seg, self.logits = net.inference(self.rgb_images,
                                                   self.keep_prob)
        #net  = networks(self.batch_size,self.df_dim,self.dropout)
        #self.pred_seg, self.logits = net.FCN8(self.rgb_images)
        self.loss = tf.reduce_mean(
            (tf.nn.sparse_softmax_cross_entropy_with_logits(
                self.logits,
                tf.squeeze(self.depth_images, squeeze_dims=[3]),
                name='entropy')))
        self.saver = tf.train.Saver(max_to_keep=10)
示例#2
0
    def build_model(self):

        self.images = tf.placeholder(tf.float32,
                                     shape=[
                                         self.batch_size,
                                         self.ir_image_shape[0],
                                         self.ir_image_shape[1], 3
                                     ])
        self.normal_images = tf.placeholder(tf.float32,
                                            shape=[
                                                self.batch_size,
                                                self.normal_image_shape[0],
                                                self.normal_image_shape[1], 3
                                            ])
        self.keep_prob = tf.placeholder(tf.float32)
        net = networks(64, self.df_dim)
        self.G = net.generator(self.images)
        self.G = self.G[-1]
        ################ Discriminator Loss ######################
        self.D = net.discriminator(self.normal_images, self.keep_prob)
        self.D_ = net.discriminator(self.G, self.keep_prob, reuse=True)

        self.d_loss_real = binary_cross_entropy_with_logits(
            tf.ones_like(self.D[-1]), self.D[-1])
        self.d_loss_fake = binary_cross_entropy_with_logits(
            tf.zeros_like(self.D_[-1]), self.D_[-1])
        self.d_loss = self.d_loss_real + self.d_loss_fake
        self.d_loss_real_sum = tf.summary.scalar("d_loss_real",
                                                 self.d_loss_real)
        self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake",
                                                 self.d_loss_fake)
        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)

        ########################## Generative loss ################################
        self.ang_loss = ang_loss.ang_error(self.G, self.normal_images)
        self.ang_loss_sum = tf.summary.scalar("ang_loss", self.ang_loss)

        if self.loss == 'L1':
            self.L_loss = tf.reduce_mean(
                tf.abs(tf.subtract(self.G, self.normal_images)))
            self.L_loss_sum = tf.summary.scalar("L1_loss", self.L_loss)
        else:
            self.L_loss = tf.reduce_mean(tf.square(self.G -
                                                   self.normal_images))
            self.L_loss_sum = tf.summary.scalar("L2_loss", self.L_loss)

        self.g_loss = binary_cross_entropy_with_logits(
            tf.ones_like(self.D_[-1]), self.D_[-1])
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.gen_loss = self.g_loss + (self.L_loss +
                                       self.ang_loss) * self.lambda_d
        self.gen_loss_sum = tf.summary.scalar("gen_loss", self.g_loss)
        t_vars = tf.trainable_variables()
        self.g_vars = [var for var in t_vars if 'g' in var.name]
        self.d_vars = [var for var in t_vars if 'dis' in var.name]

        self.saver = tf.train.Saver(max_to_keep=20)
示例#3
0
    def build_model(self):

        self.rgb_images = tf.placeholder(tf.float32, [self.batch_size] + self.rgb_image_shape,
                                    name='rgb_images')

	self.keep_prob = tf.placeholder(tf.float32)
	net  = networks(self.vgg_path)
	self.pred_seg, self.logits = net.inference(self.rgb_images,self.keep_prob)
        self.saver = tf.train.Saver()
示例#4
0
    def build_model(self):

        self.ir_images = tf.placeholder(tf.float32, [self.batch_size] + self.ir_image_shape,
                                    name='ir_images')
        self.normal_images = tf.placeholder(tf.float32, [self.batch_size] + self.normal_image_shape,
                                    name='normal_images')

	net  = networks(self.num_block,self.batch_size,self.df_dim)
        self.G = net.generator(self.ir_images)
        self.sampler = net.sampler(self.ir_images)
        self.saver = tf.train.Saver()
示例#5
0
    def build_model(self):

        if not self.use_queue:

            self.ir_images = tf.placeholder(tf.float32, [self.batch_size] +
                                            self.ir_image_shape,
                                            name='ir_images')
            self.normal_images = tf.placeholder(tf.float32, [self.batch_size] +
                                                self.normal_image_shape,
                                                name='normal_images')
        else:
            print ' using queue loading'
            self.ir_image_single = tf.placeholder(tf.float32,
                                                  shape=self.ir_image_shape)
            self.normal_image_single = tf.placeholder(
                tf.float32, shape=self.normal_image_shape)
            q = tf.FIFOQueue(
                10000, [tf.float32, tf.float32],
                [[self.ir_image_shape[0], self.ir_image_shape[1], 1],
                 [self.normal_image_shape[0], self.normal_image_shape[1], 3]])
            self.enqueue_op = q.enqueue(
                [self.ir_image_single, self.normal_image_single])
            self.ir_images, self.normal_images = q.dequeue_many(
                self.batch_size)
        """
        self.ir_test = tf.placeholder(tf.float32, [1,600,800,1],name='ir_test')
        self.gt_test = tf.placeholder(tf.float32, [1,600,800,3],name='gt_test')
	"""
        net = networks(self.num_block, self.batch_size, self.df_dim)
        self.G = net.generator(self.ir_images)
        self.D = net.discriminator(
            tf.concat(3, [self.normal_images, self.ir_images]))
        self.D_ = net.discriminator(tf.concat(3, [self.G, self.ir_images]),
                                    reuse=True)

        # generated surface normal
        self.d_loss_real = binary_cross_entropy_with_logits(
            tf.ones_like(self.D), self.D)
        self.d_loss_fake = binary_cross_entropy_with_logits(
            tf.zeros_like(self.D_), self.D_)
        self.d_loss = self.d_loss_real + self.d_loss_fake
        self.L1_loss = tf.reduce_mean(
            tf.square(tf.sub(self.G, self.normal_images)))
        self.g_loss = binary_cross_entropy_with_logits(tf.ones_like(self.D_),
                                                       self.D_)
        self.gen_loss = self.g_loss + self.L1_loss

        self.saver = tf.train.Saver(max_to_keep=0)
        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]
示例#6
0
    def build_model(self):

        self.nondetail_images = tf.placeholder(tf.float32, [self.batch_size] +
                                               self.ir_image_shape,
                                               name='nondetail_images')
        self.detail_images = tf.placeholder(tf.float32, [self.batch_size] +
                                            self.ir_image_shape,
                                            name='detail_images')

        net = networks(64, 64)
        self.nondetail_G, self.detail_G = net.multi_freq_generator_skip(
            self.nondetail_images, self.detail_images)
        self.G = self.nondetail_G[-1] + self.detail_G[-1]
        self.saver = tf.train.Saver()
    def build_model(self):

        self.images = tf.placeholder(tf.float32,
                                     shape=[
                                         self.batch_size,
                                         self.ir_image_shape[0],
                                         self.ir_image_shape[1], 1
                                     ])
        self.detail_images = tf.placeholder(tf.float32,
                                            shape=[
                                                self.batch_size,
                                                self.ir_image_shape[0],
                                                self.ir_image_shape[1], 1
                                            ])
        self.nondetail_images = tf.placeholder(tf.float32,
                                               shape=[
                                                   self.batch_size,
                                                   self.ir_image_shape[0],
                                                   self.ir_image_shape[1], 1
                                               ])
        self.normal_images = tf.placeholder(tf.float32,
                                            shape=[
                                                self.batch_size,
                                                self.normal_image_shape[0],
                                                self.normal_image_shape[1], 3
                                            ])
        self.nondetail_normal = tf.placeholder(tf.float32,
                                               shape=[
                                                   self.batch_size,
                                                   self.normal_image_shape[0],
                                                   self.normal_image_shape[1],
                                                   3
                                               ])
        self.keep_prob = tf.placeholder(tf.float32)
        net = networks(64, self.df_dim)
        self.nondetail_G, self.G = net.multi_freq_generator_skip(
            self.nondetail_images, self.detail_images)
        self.G = self.g[-1]

        ################ Discriminator Loss ######################
        if self.pair:
            self.nondetail_D = net.discriminator_low(
                tf.concat(3, [self.nondetail_images, self.nondetail_normal]),
                self.keep_prob)
            self.nondetail_D_ = net.discriminator_low(tf.concat(
                3, [self.nondetail_images, self.nondetail_G[-1]]),
                                                      self.keep_prob,
                                                      reuse=True)
            self.D = net.discriminator(self.normal_images, self.keep_prob)
            self.D_ = net.discriminator(self.G, self.keep_prob, reuse=True)
        else:
            self.nondetail_D = net.discriminator_low(self.nondetail_normal,
                                                     self.keep_prob)
            self.nondetail_D_ = net.discriminator_low(self.nondetail_G[-1],
                                                      self.keep_prob,
                                                      reuse=True)
            self.D = net.discriminator(self.normal_images, self.keep_prob)
            self.D_ = net.discriminator(self.G, self.keep_prob, reuse=True)

        #### nondetail resolution ####
        self.nondetail_d_loss_real = binary_cross_entropy_with_logits(
            tf.random_uniform(self.nondetail_D[-1].get_shape(),
                              minval=0.7,
                              maxval=1.2,
                              dtype=tf.float32,
                              seed=0), self.nondetail_D[-1])
        self.nondetail_d_loss_fake = binary_cross_entropy_with_logits(
            tf.random_uniform(self.nondetail_D[-1].get_shape(),
                              minval=0.0,
                              maxval=0.3,
                              dtype=tf.float32,
                              seed=0), self.nondetail_D_[-1])
        self.nondetail_d_loss = self.nondetail_d_loss_real + self.nondetail_d_loss_fake

        self.nondetail_d_loss_real_sum = tf.summary.scalar(
            "nondetail_d_loss_real", self.nondetail_d_loss_real)
        self.nondetail_d_loss_fake_sum = tf.summary.scalar(
            "nondetail_d_loss_fake", self.nondetail_d_loss_fake)
        self.nondetail_d_loss_sum = tf.summary.scalar("nondetail_d_loss",
                                                      self.nondetail_d_loss)

        #### detail resolution ####
        self.d_loss_real = binary_cross_entropy_with_logits(
            tf.ones_like(self.D[-1]), self.D[-1])
        self.d_loss_fake = binary_cross_entropy_with_logits(
            tf.zeros_like(self.D_[-1]), self.D_[-1])
        self.d_loss = self.d_loss_real + self.d_loss_fake
        self.d_loss_real_sum = tf.summary.scalar("d_loss_real",
                                                 self.d_loss_real)
        self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake",
                                                 self.d_loss_fake)
        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)

        ########################## Generative loss ################################
        self.ang_loss = ang_loss.ang_error(self.G, self.normal_images)
        self.ang_loss_sum = tf.summary.scalar("ang_loss", self.ang_loss)

        if self.loss == 'L1':
            self.nondetail_L_loss = tf.reduce_mean(
                tf.abs(tf.subtract(self.nondetail_G[-1],
                                   self.nondetail_normal)))
            self.L_loss = tf.reduce_mean(
                tf.abs(tf.subtract(self.G, self.normal_images)))
            self.nondetail_L_loss_sum = tf.summary.scalar(
                "nondetail_L1_loss", self.nondetail_L_loss)
            self.L_loss_sum = tf.summary.scalar("L1_loss", self.L_loss)
        else:
            self.nondetail_L_loss = tf.reduce_mean(
                tf.square(self.nondetail_G[-1] - self.nondetail_normal))
            self.L_loss = tf.reduce_mean(tf.square(self.G -
                                                   self.normal_images))

            self.nondetail_L_loss_sum = tf.summary.scalar(
                "nondetail_L2_loss", self.nondetail_L_loss)
            self.L_loss_sum = tf.summary.scalar("L2_loss", self.L_loss)

        self.nondetail_g_loss = binary_cross_entropy_with_logits(
            tf.ones_like(self.nondetail_D_[-1]), self.nondetail_D_[-1])
        self.nondetail_g_sum = tf.summary.scalar("low_g_loss",
                                                 self.nondetail_g_loss)

        self.g_loss = binary_cross_entropy_with_logits(
            tf.ones_like(self.D_[-1]), self.D_[-1])
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.nondetail_gen_loss = self.nondetail_g_loss + self.g_loss + (
            self.nondetail_L_loss + self.L_loss +
            self.ang_loss) * self.lambda_g_non
        self.nondetail_gen_loss_sum = tf.summary.scalar(
            "nondetail_gen_loss", self.nondetail_gen_loss)
        self.gen_loss = self.g_loss + self.L_loss + self.ang_loss
        self.gen_loss_sum = tf.summary.scalar("gen_loss", self.gen_loss)

        t_vars = tf.trainable_variables()
        self.nondetail_d_vars = [
            var for var in t_vars if 'low_dis' in var.name
        ]
        self.nondetail_g_vars = [var for var in t_vars if 'low_g' in var.name]
        self.detail_d_vars = [var for var in t_vars if 'high_dis' in var.name]
        self.detail_g_vars = [var for var in t_vars if 'high_g' in var.name]

        self.saver = tf.train.Saver(max_to_keep=20)
示例#8
0
    def build_model(self):

        if not self.use_queue:

            self.low_ir_images = tf.placeholder(tf.float32, [self.batch_size] +
                                                self.low_ir_image_shape,
                                                name='low_ir_images')
            self.low_normal_images = tf.placeholder(
                tf.float32, [self.batch_size] + self.low_normal_image_shape,
                name='low_normal_images')
        else:
            print ' using queue loading'
            if self.input_type == 'single':
                self.image_single = tf.placeholder(tf.float32,
                                                   shape=self.ir_image_shape)
                self.normal_image_single = tf.placeholder(
                    tf.float32, shape=self.normal_image_shape)
                q = tf.RandomShuffleQueue(
                    1000, 100, [tf.float32, tf.float32],
                    [[self.ir_image_shape[0], self.ir_image_shape[1], 1],
                     [normal_image_shape[0], self.normal_image_shape[1], 3]])
                self.enqueue_op = q.enqueue(
                    [self.image_single, self.normal_image_single])
                self.images, self.normal_images = q.dequeue_many(
                    self.batch_size)

            else:
                self.nondetail_image_single = tf.placeholder(
                    tf.float32, shape=self.ir_image_shape)
                self.detail_image_single = tf.placeholder(
                    tf.float32, shape=self.ir_image_shape)
                self.nondetailnormal_image_single = tf.placeholder(
                    tf.float32, shape=self.normal_image_shape)
                self.detailnormal_image_single = tf.placeholder(
                    tf.float32, shape=self.normal_image_shape)
                self.normal_image_single = tf.placeholder(
                    tf.float32, shape=self.normal_image_shape)

                q = tf.RandomShuffleQueue(1000, 100, [
                    tf.float32, tf.float32, tf.float32, tf.float32, tf.float32
                ], [[
                    self.ir_image_shape[0], self.ir_image_shape[1], 1
                ], [
                    self.ir_image_shape[0], self.ir_image_shape[1], 1
                ], [
                    self.normal_image_shape[0], self.normal_image_shape[1], 3
                ], [
                    self.normal_image_shape[0], self.normal_image_shape[1], 3
                ], [self.normal_image_shape[0], self.normal_image_shape[1], 3]
                    ])
                self.enqueue_op = q.enqueue([
                    self.nondetail_image_single, self.detail_image_single,
                    self.nondetailnormal_image_single,
                    self.detailnormal_image_single, self.normal_image_single
                ])
                self.nondetail_images, self.detail_images, self.nondetail_normal, self.detail_normal, self.normal_images = q.dequeue_many(
                    self.batch_size)

        self.keep_prob = tf.placeholder(tf.float32)
        net = networks(64, self.df_dim)
        if self.input_type is 'single':
            self.G = net.generator(self.images)
            self.G = self.G[-1]
        else:
            self.nondetail_G, self.detail_G = net.multi_freq_generator(
                self.nondetail_images, self.detail_images)
            self.G = self.nondetail_G[-1] + self.detail_G[-1]

        ######## evaluation #######
        '''
	if self.input_type == 'single':
	    self.sample_G = tf.placeholder(tf.float32,shape=[1,600,800,1],name='sampler') 

	else:
	    self.sample_low= tf.placeholder(tf.float32,shape=[1,600,800,1],name='sampler_low')
	    self.sample_high= tf.placeholder(tf.float32,shape=[1,600,800,1],name='sampler_high')
	    self.sample_low_G,self.sample_high_G =net.multi_freq_sampler(self.sample_low,self.sample_high)
	    self.sample_G = self.sample_low_G[-1] + self.sample_high_G[-1]	
	'''
        ################ Discriminator Loss ######################
        self.detail_D = net.discriminator_high(self.detail_normal,
                                               self.keep_prob)
        self.detail_D_ = net.discriminator_high(self.detail_G[-1],
                                                self.keep_prob,
                                                reuse=True)
        self.nondetail_D = net.discriminator_low(self.nondetail_normal,
                                                 self.keep_prob)
        self.nondetail_D_ = net.discriminator_low(self.nondetail_G[-1],
                                                  self.keep_prob,
                                                  reuse=True)
        '''
        if self.input_type=='single':
            self.D = net.discriminator(tf.concat(3,[self.images,self.normal]),self.keep_prob)
	    self.D_  = net.discriminator(tf.concat(3,[self.images,self.G[-1]]),self.keep_prob,reuse=True)


        else:
            pdb.set_trace()
	    self.detail_D = net.discriminator_high(self.detail_normal,self.keep_prob)
	    self.detail_D_  = net.discriminator_high(self.detail_G[-1],self.keep_prob,reuse=True)
            self.nondetail_D = net.discriminator_low(self.nondetail_normal,self.keep_prob)
	    self.nondetail_D_  = net.discriminator_low(self.nondetail_G[-1],self.keep_prob,reuse=True)
	'''
        '''
	    if self.pair: 
                self.nondetail_D = net.discriminator_low(tf.concat(3,[self.nondetail_images,self.nondetail_normal]),self.keep_prob)
	        self.nondetail_D_  = net.discriminator_low(tf.concat(3,[self.nondetail_images,self.nondetail_G[-1]]),self.keep_prob,reuse=True)
	        self.detail_D = net.discriminator_high(tf.concat(3,[self.detail_images,self.detail_normal]),self.keep_prob)
	        self.detail_D_  = net.discriminator_high(tf.concat(3,[self.detail_images,self.detail_G[-1]]),self.keep_prob,reuse=True)

	    else:
	        self.nondetail_D = net.discriminator_low(self.nondetail_normal,self.keep_prob)
	        self.nondetail_D_  = net.discriminator_low(self.nondetail_G[-1],self.keep_prob,reuse=True)
	        self.detail_D = net.discriminator_high(self.detail_normal,self.keep_prob)
	        self.detail_D_  = net.discriminator_high(self.detail_G[-1],self.keep_prob,reuse=True)
        '''
        #### entire resolution ####
        if self.input_type == 'single':
            self.d_loss_real = binary_cross_entropy_with_logits(
                tf.ones.like(self.D[-1]), self.D[-1])
            self.loss_fake = binary_cross_entropy_with_logits(
                tf.zeros.like(self.D_[-1]), self.D_[-1])
            self.d_loss = self.d_loss_real + self.d_loss_fake

        else:
            #### nondetail resolution ####
            self.nondetail_d_loss_real = binary_cross_entropy_with_logits(
                tf.random_uniform(self.nondetail_D[-1].get_shape(),
                                  minval=0.7,
                                  maxval=1.2,
                                  dtype=tf.float32,
                                  seed=0), self.nondetail_D[-1])
            self.nondetail_d_loss_fake = binary_cross_entropy_with_logits(
                tf.random_uniform(self.nondetail_D[-1].get_shape(),
                                  minval=0.0,
                                  maxval=0.3,
                                  dtype=tf.float32,
                                  seed=0), self.nondetail_D_[-1])
            self.nondetail_d_loss = self.nondetail_d_loss_real + self.nondetail_d_loss_fake

            #### detail resolution ####
            self.detail_d_loss_real = binary_cross_entropy_with_logits(
                tf.random_uniform(self.detail_D[-1].get_shape(),
                                  minval=0.7,
                                  maxval=1.2,
                                  dtype=tf.float32,
                                  seed=0), self.detail_D[-1])
            self.detail_d_loss_fake = binary_cross_entropy_with_logits(
                tf.random_uniform(self.detail_D[-1].get_shape(),
                                  minval=0.0,
                                  maxval=0.3,
                                  dtype=tf.float32,
                                  seed=0), self.detail_D_[-1])
            self.detail_d_loss = self.detail_d_loss_real + self.detail_d_loss_fake

        ########################## Generative loss ################################
        self.ang_loss = ang_loss.ang_error(self.G, self.normal_images)

        if self.loss == 'L1':
            if self.input_type == 'single':
                self.L_loss = tf.reduce_mean(
                    tf.abs(tf.subtract(self.G, self.normal_images)))

            else:
                self.nondetail_L_loss = tf.reduce_mean(
                    tf.abs(
                        tf.subtract(self.nondetail_G[-1],
                                    self.nondetail_normal)))
                self.detail_L_loss = tf.reduce_mean(
                    tf.abs(tf.subtract(self.detail_G[-1], self.detail_normal)))
                self.L_loss = tf.reduce_mean(
                    tf.abs(tf.subtract(self.G, self.normal_images)))
        else:
            if self.input_type == 'single':
                self.L_loss = tf.reduce_mean(
                    tf.square(self.G - self.normal_images))
            else:
                self.nondetail_L_loss = tf.reduce_mean(
                    tf.square(self.nondetail_G[-1] - self.nondetail_normal))
                self.detail_L_loss = tf.reduce_mean(
                    tf.square(self.detail_G[-1] - self.detail_normal))
                self.L_loss = tf.reduce_mean(
                    tf.square(self.G - self.normal_images))

        if self.input_type == 'single':
            self.g_loss = binary_cross_entropy_with_logits(
                tf.ones_like(self.D_[-1]), self.D_[-1])
            self.gen_loss = self.g_loss + (self.L_loss + self.ang_loss) * 100
            t_vars = tf.trainable_variables()

            t_vars = tf.trainable_variables()
            self.g_vars = [var for var in t_vars if 'g' in var.name]
            self.d_vars = [var for var in t_vars if 'dis' in var.name]

        else:
            self.nondetail_g_loss = binary_cross_entropy_with_logits(
                tf.ones_like(self.nondetail_D_[-1]), self.nondetail_D_[-1])
            self.detail_g_loss = binary_cross_entropy_with_logits(
                tf.ones_like(self.detail_D_[-1]), self.detail_D_[-1])
            self.nondetail_gen_loss = self.nondetail_g_loss + (
                self.nondetail_L_loss + self.L_loss + self.ang_loss) * 100
            self.detail_gen_loss = self.detail_g_loss + (
                self.detail_L_loss) * 1000 + (self.L_loss +
                                              self.ang_loss) * 1000

            t_vars = tf.trainable_variables()
            self.nondetail_d_vars = [
                var for var in t_vars if 'low_dis' in var.name
            ]
            self.nondetail_g_vars = [
                var for var in t_vars if 'low_g' in var.name
            ]
            self.detail_d_vars = [
                var for var in t_vars if 'high_dis' in var.name
            ]
            self.detail_g_vars = [
                var for var in t_vars if 'high_g' in var.name
            ]

        self.saver = tf.train.Saver(max_to_keep=20)
示例#9
0
    def build_model(self):


	if not self.use_queue:

        	self.ir_images = tf.placeholder(tf.float32, [self.batch_size] + self.ir_image_shape,
                                    name='ir_images')
        	self.normal_images = tf.placeholder(tf.float32, [self.batch_size] + self.normal_image_shape,
                                    name='normal_images')
	else:
		print ' using queue loading'
		self.ir_image_single = tf.placeholder(tf.float32,shape=self.ir_image_shape)
		self.normal_image_single = tf.placeholder(tf.float32,shape=self.normal_image_shape)
		q = tf.FIFOQueue(4000,[tf.float32,tf.float32],[[self.ir_image_shape[0],self.ir_image_shape[1],1],[self.normal_image_shape[0],self.normal_image_shape[1],3]])
		self.enqueue_op = q.enqueue([self.ir_image_single,self.normal_image_single])
		self.ir_images, self.normal_images = q.dequeue_many(self.batch_size)

        #self.ir_test = tf.placeholder(tf.float32, [1,600,800,1],name='ir_test')
	self.noise = tf.placeholder(tf.float32,[self.batch_size] + self.ir_image_shape, name = 'noise')
	self.keep_prob = tf.placeholder(tf.float32)
	net  = networks(self.batch_size,self.df_dim)
	self.G,self.G2 = net.generator(self.ir_images)
	print('Loading VGG network \n')	
	vgg_pretrained = VGG(self.vgg_model)
	self.G_low,self.G_high = vgg_pretrained.vgg_net(self.G2) 
	self.real_low, self.real_high = vgg_pretrained.vgg_net(self.normal_images,reuse=True)
	size = tf.to_float(tf.size(self.G_low))
	self.low_loss = tf.nn.l2_loss(self.G_low - self.real_low)/tf.to_float(size**2)
	size = tf.to_float(tf.size(self.G_high))
	self.high_loss = tf.nn.l2_loss(self.G_high - self.real_high)/tf.to_float(size**2)

	if self.dis_loss:
	    if self.pair:
	        self.D = net.discriminator(tf.concat(3,[self.normal_images,self.ir_images]),self.keep_prob)
	        self.D_  = net.discriminator(tf.concat(3,[self.G,self.ir_images]),self.keep_prob,reuse=True)
	    else:
	        self.D = net.discriminator(self.normal_images,self.keep_prob)
	        self.D_  = net.discriminator(self.G,self.keep_prob,reuse=True)

	# Discriminator loss
            self.d_loss_real = binary_cross_entropy_with_logits(tf.random_uniform(self.D.get_shape(),minval=0.7,maxval=1.2,dtype=tf.float32,seed=0), self.D)
            self.d_loss_fake = binary_cross_entropy_with_logits(tf.random_uniform(self.D.get_shape(),minval=0.0,maxval=0.3,dtype=tf.float32,seed=0), self.D_)
            self.d_loss = self.d_loss_real + self.d_loss_fake

	# generator loss#
	self.D_  = net.discriminator(tf.concat(3,[self.G,self.ir_images]),self.keep_prob,reuse=False)
	if self.loss == 'L1':
            self.L_loss = tf.reduce_mean(tf.abs(tf.sub(self.G,self.normal_images)))
	else:
            self.L_loss = tf.reduce_mean(tf.square(tf.sub(self.G,self.normal_images)))
        self.g_loss = binary_cross_entropy_with_logits(tf.ones_like(self.D_), self.D_)

	self.ang_loss = norm_(self.G,self.normal_images)

        self.gen_loss = self.g_loss + self.L_loss + self.ang_loss +self.high_loss
        #self.gen_loss = self.g_loss + self.L_loss + self.ang_loss +self.low_loss +self.high_loss

	self.saver = tf.train.Saver(max_to_keep=10)
	t_vars = tf.trainable_variables()
	if self.dis_loss:
	    self.d_vars =[var for var in t_vars if 'd_' in var.name]
	self.g_vars =[var for var in t_vars if 'g_' in var.name]