示例#1
0
def test(config, load_path, acc=0.3):
    torch.backends.cudnn.benchmark=True
    dataset = generator(config, train_flg=False)
    config.transforms += {'img_shape': dataset.get_img_shape()}
    model = net[config.struct.name](config)
    if model.cuda_flg and (torch.cuda.device_count() > 1):
        model = nn.DataParallel(model)
        model.cuda()
    model.test(dataset, acc)
示例#2
0
def train(config, save_path):
    torch.backends.cudnn.benchmark = True
    dataset = generator(config, train_flg=True)
    config.transforms += {'img_shape': dataset.get_img_shape()}
    model = net[config.struct.name](config)
    if model.cuda_flg and (torch.cuda.device_count() > 1):
        model = nn.DataParallel(model)
        model.cuda()
    model.train(dataset)
    model.save(save_path)
    model.sample_image()
示例#3
0
wtl2 = 0.999


# custom weights initialization called on netG and netD
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


resume_epoch = 0

netG = generator.generator()
netG.apply(weights_init)

netD = discriminator.discriminator()
netD.apply(weights_init)

criterion = nn.BCELoss()
criterionMSE = nn.MSELoss()

input_real = torch.FloatTensor(Batch_Size, 3, 128, 128)
input_cropped = torch.FloatTensor(Batch_Size, 3, 128, 128)
label = torch.FloatTensor(Batch_Size)
real_label = 1
fake_label = 0

real_center = torch.FloatTensor(Batch_Size, 3, 64, 64)
示例#4
0
def test_show(config, load_path, acc=0.3, idx=None):
    dataset = generator(config, train_flg=False)
    config.transforms += {'img_shape': dataset.get_img_shape()}
    model = net[config.struct.name](config, train_flg=False)
    model.load(load_path)
    model.test_show(dataset, acc, idx=idx)
示例#5
0
    def build_cycle(self):
        self.X = tf.placeholder(
            tf.float32, shape=[None, self.size, self.size, self.channel])
        self.Y = tf.placeholder(
            tf.float32, shape=[None, self.size, self.size, self.channel])

        self.fake_B = generator(self.X,
                                self.options,
                                False,
                                name="generatorA2B")
        self.fake_A_ = generator(self.fake_B,
                                 self.options,
                                 False,
                                 name="generatorB2A")
        self.fake_A = generator(self.Y,
                                self.options,
                                True,
                                name="generatorB2A")
        self.fake_B_ = generator(self.fake_A,
                                 self.options,
                                 True,
                                 name="generatorA2B")

        self.DB_fake = self.discriminator(self.fake_B,
                                          self.options,
                                          reuse=False,
                                          name="discriminatorB")
        self.DA_fake = self.discriminator(self.fake_A,
                                          self.options,
                                          reuse=False,
                                          name="discriminatorA")
        # 此处discriminator参数更新了吧? 并没有更新~在tf.train.AdamOptimizer中var_list指定更新的参数
        # 测试loss 改变(关系不大)
        self.g_loss_a2b = self.mae_criterion(self.DB_fake, tf.ones_like(self.DB_fake)) \
                          +  self.abs_criterion(self.X, self.fake_A_) \
                          +  self.abs_criterion(self.Y, self.fake_B_)
        self.g_loss_b2a = self.mae_criterion(self.DA_fake, tf.ones_like(self.DA_fake)) \
                          +  self.abs_criterion(self.X, self.fake_A_) \
                          + self.abs_criterion(self.Y, self.fake_B_)
        self.G_loss = self.mae_criterion(self.DA_fake, tf.ones_like(self.DA_fake)) \
                      + self.mae_criterion(self.DB_fake, tf.ones_like(self.DB_fake)) \
                      +  self.abs_criterion(self.X, self.fake_A_) \
                      + self.abs_criterion(self.Y, self.fake_B_)

        self.fake_A_sample = tf.placeholder(
            tf.float32, [None, self.size, self.size, self.options.input_c_dim],
            name='fake_A_sample')
        self.fake_B_sample = tf.placeholder(
            tf.float32, [None, self.size, self.size, self.options.input_c_dim],
            name='fake_B_sample')
        self.DB_real = self.discriminator(self.Y,
                                          self.options,
                                          reuse=True,
                                          name="discriminatorB")
        self.DA_real = self.discriminator(self.X,
                                          self.options,
                                          reuse=True,
                                          name="discriminatorA")
        #self.DB_fake_sample = self.discriminator(self.fake_B_sample, self.options, reuse=True, name="discriminatorB")
        #self.DA_fake_sample = self.discriminator(self.fake_A_sample, self.options, reuse=True, name="discriminatorA")
        self.DB_fake_sample = self.discriminator(self.fake_B,
                                                 self.options,
                                                 reuse=True,
                                                 name="discriminatorB")
        self.DA_fake_sample = self.discriminator(self.fake_A,
                                                 self.options,
                                                 reuse=True,
                                                 name="discriminatorA")

        self.db_loss_real = self.mae_criterion(self.DB_real,
                                               tf.ones_like(self.DB_real))
        self.db_loss_fake = self.mae_criterion(
            self.DB_fake_sample, tf.zeros_like(self.DB_fake_sample))
        self.db_loss = (self.db_loss_real + self.db_loss_fake) / 2
        self.da_loss_real = self.mae_criterion(self.DA_real,
                                               tf.ones_like(self.DA_real))
        self.da_loss_fake = self.mae_criterion(
            self.DA_fake_sample, tf.zeros_like(self.DA_fake_sample))
        self.da_loss = (self.da_loss_real + self.da_loss_fake) / 2
        self.D_loss = self.da_loss + self.db_loss
        '''
        self.g_loss_a2b_sum = tf.summary.scalar("g_loss_a2b", self.g_loss_a2b)
        self.g_loss_b2a_sum = tf.summary.scalar("g_loss_b2a", self.g_loss_b2a)
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.g_sum = tf.summary.merge([self.g_loss_a2b_sum, self.g_loss_b2a_sum, self.g_loss_sum])
        self.db_loss_sum = tf.summary.scalar("db_loss", self.db_loss)
        self.da_loss_sum = tf.summary.scalar("da_loss", self.da_loss)
        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
        self.db_loss_real_sum = tf.summary.scalar("db_loss_real", self.db_loss_real)
        self.db_loss_fake_sum = tf.summary.scalar("db_loss_fake", self.db_loss_fake)
        self.da_loss_real_sum = tf.summary.scalar("da_loss_real", self.da_loss_real)
        self.da_loss_fake_sum = tf.summary.scalar("da_loss_fake", self.da_loss_fake)
        self.d_sum = tf.summary.merge(
            [self.da_loss_sum, self.da_loss_real_sum, self.da_loss_fake_sum,
             self.db_loss_sum, self.db_loss_real_sum, self.db_loss_fake_sum,
             self.d_loss_sum]
        )

        self.test_A = tf.placeholder(tf.float32,
                                     [None, self.image_size, self.image_size,
                                      self.input_c_dim], name='test_A')
        self.test_B = tf.placeholder(tf.float32,
                                     [None, self.image_size, self.image_size,
                                      self.output_c_dim], name='test_B')
        self.testB = self.generator(self.test_A, self.options, True, name="generatorA2B")
        self.testA = self.generator(self.test_B, self.options, True, name="generatorB2A")
        '''
        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'discriminator' in var.name]
        self.g_vars = [var for var in t_vars if 'generator' in var.name]
        #for var in t_vars: print(var.name)
        self.D_solver = tf.train.AdamOptimizer(0.0002, beta1=0.5) \
            .minimize(self.D_loss, var_list=self.d_vars)
        self.G_solver = tf.train.AdamOptimizer(0.0002, beta1=0.5) \
            .minimize(self.G_loss, var_list=self.g_vars)
示例#6
0
	session_conf = tf.ConfigProto(
		allow_soft_placement=FLAGS.allow_soft_placement,
		log_device_placement=FLAGS.log_device_placement,
		inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
		intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads)
	sess = tf.Session(config=session_conf)
	with sess.as_default():
		# Initialize model
		noise_input = tf.placeholder(tf.float32, shape=[None, 100], name="noise_input")
		true_score_images = tf.placeholder(tf.float32, shape=[None, 1000,1000], name="true_score_images")
		true_lab_images = tf.placeholder(tf.float32, shape=[None, 1000,1000], name="true_lab_images")
		true_labels = tf.placeholder(tf.int32, shape = [None], name="true_logits")
		true_scores = tf.placeholder(tf.int32, shape = [None], name="true_scores")
        
        # Get generator network definition from helper file
		output_images = generator(noise_input, FLAGS.batch_size)
		print(output_images.get_shape()) # sanity check
        
        # Get both discriminator networks definition from helper files
		logits_true = discriminator_label(true_lab_images)
		logits_fake = discriminator_label(output_images, reuse=True)
		score_true = discriminator_score(true_score_images)
		score_fake = discriminator_score(output_images, reuse=True)
        
        # Set the target labels for the generated images
		target = tf.tile([1], [FLAGS.batch_size])
		
        # Add the discriminators' losses
		loss_discr_label = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = true_labels, logits = logits_true))
		loss_discr_score = tf.losses.absolute_difference(labels=true_scores, predictions = score_true, reduction=tf.losses.Reduction.MEAN)