Esempio n. 1
0
 def pretrain_gen(self):
     """
     pretrain the generator
     """
     util.generate_samples(self.target_lstm, self.batch_size,
                           self.sequence_len, self.generate_sum,
                           self.real_file)
     gen_data = GenData(self.real_file)
     gen_data_loader = DataLoader(gen_data,
                                  batch_size=self.batch_size,
                                  shuffle=True,
                                  num_workers=8)
     gen_criterion = util.to_cuda(
         nn.CrossEntropyLoss(size_average=False, reduce=True))
     gen_optim = torch.optim.Adam(self.generator.parameters(), self.lr)
     print('\nPretrain generator......')
     for epoch in range(self.pre_gen_epochs):
         train_loss = self.train_epoch(self.generator, gen_data_loader,
                                       gen_criterion, gen_optim)
         util.generate_samples(self.generator, self.batch_size,
                               self.sequence_len, self.generate_sum,
                               self.eval_file)
         eval_data = GenData(self.eval_file)
         eval_data_loader = DataLoader(eval_data,
                                       batch_size=self.batch_size,
                                       shuffle=True,
                                       num_workers=8)
         eval_loss = self.eval_epoch(self.target_lstm, eval_data_loader,
                                     gen_criterion)
         print(
             'epoch: {:4d}, train_loss: {:6.4f}, eval_loss: {:6.4f}'.format(
                 epoch, train_loss, eval_loss))
Esempio n. 2
0
def run_test(n_samples=1000,
             grad=False,
             show_plot=True,
             save_plot=False,
             data_seed=0,
             model_seed=1):
    torch.set_grad_enabled(grad)

    torch.manual_seed(data_seed)
    train_input, train_target = util.generate_samples(n_samples)
    test_input, test_target = util.generate_samples(n_samples)

    train_results = dict()
    test_results = dict()
    for name, (model, criterion, optimizer, learning_rate) in tests.items():
        torch.manual_seed(model_seed)
        model = model()
        criterion = criterion()
        optimizer = optimizer(model.parameters, learning_rate=learning_rate)
        print(name)
        print('Train ' + ('-' * (42 - len('Train '))))
        train_results[name] = batch_train(model,
                                          criterion,
                                          optimizer,
                                          train_input,
                                          train_target,
                                          verbose=True,
                                          nb_errors=True)
        model.eval()
        print('Test ' + ('-' * (42 - len('Test '))))
        test_results[name] = batch_test(model,
                                        criterion,
                                        test_input,
                                        test_target,
                                        verbose=True,
                                        nb_errors=True)
        print('=' * 42)
        print('')

    if show_plot:
        import matplotlib.pyplot as plt
        train_errors = {
            name: [
                100 * compute_nb_errors(predictions.argmax(1), train_target) /
                len(train_target) for _, predictions in values
            ]
            for name, values in train_results.items()
        }
        [
            plt.plot(range(len(values)), values, label=name)
            for name, values in train_errors.items()
        ]
        plt.legend()
        plt.xlim((0, 100))
        plt.xlabel('Epoch')
        plt.ylabel('Error Percentage')
        if save_plot:
            plt.savefig('train_error.png')
        plt.show()
Esempio n. 3
0
	def pretrain_dis(self):

		dis_criterion = util.to_cuda(nn.BCEWithLogitsLoss(size_average=False))
		dis_optim = torch.optim.Adam(self.discriminator.parameters(), self.lr)
		print '\nPretrain discriminator......'
		for epoch in range(self.pre_dis_epochs):
			util.generate_samples(self.generator, self.batch_size, self.sequence_len, self.generate_sum, self.fake_file)
			dis_data = DisData(self.real_file, self.fake_file)
			dis_data_loader = DataLoader(dis_data, batch_size=self.batch_size, shuffle=True, num_workers=8)
			loss = self.train_epoch(self.discriminator, dis_data_loader, dis_criterion, dis_optim)
			print 'epoch: [{0:d}], loss: [{1:.4f}]'.format(epoch, loss)
Esempio n. 4
0
	def pretrain_gen(self):
		util.generate_samples(self.target_lstm, self.batch_size, self.sequence_len, self.generate_sum, self.real_file)
		gen_data = GenData(self.real_file)
		gen_data_loader = DataLoader(gen_data, batch_size=self.batch_size, shuffle=True, num_workers=8)
		gen_criterion = util.to_cuda(nn.CrossEntropyLoss(size_average=False, reduce=True))
		gen_optim = torch.optim.Adam(self.generator.parameters(), self.lr)
		print '\nPretrain generator......'
		for epoch in range(self.pre_gen_epochs):
			loss = self.train_epoch(self.generator, gen_data_loader, gen_criterion, gen_optim)
			print 'epoch: [{0:d}], model loss: [{1:.4f}]'.format(epoch, loss)
			util.generate_samples(self.generator, self.batch_size, self.sequence_len, self.generate_sum, self.eval_file)
			eval_data = GenData(self.eval_file)
			eval_data_loader = DataLoader(eval_data, batch_size=self.batch_size, shuffle=True, num_workers=8)
			loss = self.eval_epoch(self.target_lstm, eval_data_loader, gen_criterion)
			print 'epoch: [{0:d}], true loss: [{1:.4f}]'.format(epoch, loss)
Esempio n. 5
0
	def train_gan(self, backend):

		rollout = Rollout(self.generator, self.discriminator, self.update_rate)
		print('\nStart Adeversatial Training......')
		gen_optim, dis_optim = torch.optim.Adam(self.generator.parameters(), self.lr), torch.optim.Adam(self.discriminator.parameters(), self.lr)
		dis_criterion = util.to_cuda(nn.BCEWithLogitsLoss(size_average=False))
		gen_criterion = util.to_cuda(nn.CrossEntropyLoss(size_average=False, reduce=True))

		for epoch in range(self.gan_epochs):

			start = time.time()
			for _ in range(1):
				samples = self.generator.sample(self.batch_size, self.sequence_len) # (batch_size, sequence_len)
				zeros = util.to_var(torch.zeros(self.batch_size, 1).long()) # (batch_size, 1)
				inputs = torch.cat([samples, zeros], dim=1)[:, :-1] # (batch_size, sequence_len)
				rewards = rollout.reward(samples, 16) # (batch_size, sequence_len)
				rewards = util.to_var(torch.from_numpy(rewards))
				logits = self.generator(inputs) # (None, vocab_size, sequence_len)
				pg_loss = self.pg_loss(logits, samples, rewards)
				gen_optim.zero_grad()
				pg_loss.backward()
				gen_optim.step()

			print 'generator updated via policy gradient......'

			if epoch % 10 == 0:
				util.generate_samples(self.generator, self.batch_size, self.sequence_len, self.generate_sum, self.eval_file)
				eval_data = GenData(self.eval_file)
				eval_data_loader = DataLoader(eval_data, batch_size=self.batch_size, shuffle=True, num_workers=8)
				loss = self.eval_epoch(self.target_lstm, eval_data_loader, gen_criterion)
				print 'epoch: [{0:d}], true loss: [{1:.4f}]'.format(epoch, loss)



			for _ in range(1):
				util.generate_samples(self.generator, self.batch_size, self.sequence_len, self.generate_sum, self.fake_file)
				dis_data = DisData(self.real_file, self.fake_file)
				dis_data_loader = DataLoader(dis_data, batch_size=self.batch_size, shuffle=True, num_workers=8)
				for _ in range(1):
					loss = self.train_epoch(self.discriminator, dis_data_loader, dis_criterion, dis_optim)

			print 'discriminator updated via gan loss......'

			rollout.update_params()

			end = time.time()

			print 'time: [{:.3f}s/epoch] in {}'.format(end-start, backend)
Esempio n. 6
0
    def evaluate(self, ggen: GeometricVideoGenerator, cgen: ColorVideoGenerator):
        """
        Evaluate generated samples using evaluation metrics for GANs.
        Currently supporing InceptionScore, Frechet Inception Distance.

        Parameters
        ----------
        ggen : nn.Module
            The geometric information video generator.

        cgen : nn.Module
            The color video generator.
        """
        verbose = True

        # generate fake samples
        _, xc = util.generate_samples(
            ggen,
            cgen,
            self.eval_num_samples,
            self.eval_batchsize,
            with_geo=False,
            desc=f"sampling {self.eval_num_samples} videos for evalaution",
            verbose=verbose,
        )
        ggen, cgen = ggen.to("cpu"), cgen.to("cpu")

        # save them in a temporary directory
        temp = tempfile.TemporaryDirectory()
        temp_dir = Path(temp.name)
        save_paths = [temp_dir / f"{i}.mp4" for i in range(self.eval_num_samples)]
        dataio.write_videos_pararell(xc, save_paths)

        # dataset directory
        dataset_dir = Path(self.dataloader.dataset.root_path) / "color"
        batchsize = 100
        for m in self.eval_metrics:
            if m == "is":
                score = evan.score.compute_inception_score(
                    temp_dir, batchsize=batchsize, verbose=verbose
                )
            elif m == "fid":
                score = evan.score.compute_frechet_distance(
                    temp_dir, dataset_dir, batchsize=batchsize, verbose=verbose
                )
            elif m == "prd":
                score = evan.score.compute_precision_recall(
                    temp_dir, dataset_dir, batchsize=batchsize, verbose=verbose
                )

            self.logger.update(m, score)

        temp.cleanup()
        ggen, cgen = ggen.to(self.device), cgen.to(self.device)
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("result_dir", type=Path)
    parser.add_argument("iteration", type=int)
    parser.add_argument("save_dir", type=Path)
    parser.add_argument("--n_samples", "-n", type=int, default=10000)
    parser.add_argument("--batchsize", "-b", type=int, default=10)
    args = parser.parse_args()

    # read config file
    with open(args.result_dir / "config.yml") as f:
        configs = yaml.load(f, Loader=yaml.FullLoader)

    # load model with weights
    ggen = load_model(
        args.result_dir / "models" / "ggen_model.pth",
        args.result_dir / "models" / f"ggen_params_{args.iteration:05d}.pth",
    )
    cgen = load_model(
        args.result_dir / "models" / "cgen_model.pth",
        args.result_dir / "models" / f"cgen_params_{args.iteration:05d}.pth",
    )

    # make directories
    color_dir = args.save_dir / "color"
    color_dir.mkdir(parents=True, exist_ok=True)
    geo_dir = args.save_dir / configs["geometric_info"]["name"]
    geo_dir.mkdir(parents=True, exist_ok=True)

    # generate samples
    for offset in tqdm(range(0, args.n_samples, args.batchsize)):
        xg, xc = util.generate_samples(
            ggen, cgen, args.batchsize, args.batchsize, verbose=False
        )

        # (B, C, T, H, W) -> (B, T, H, W, C)
        xg, xc = xg.transpose(0, 2, 3, 4, 1), xc.transpose(0, 2, 3, 4, 1)

        dataio.write_videos_pararell(
            xg, [geo_dir / "{:06d}.mp4".format(offset + i) for i in range(len(xg))]
        )
        dataio.write_videos_pararell(
            xc, [color_dir / "{:06d}.mp4".format(offset + i) for i in range(len(xc))]
        )
Esempio n. 8
0
    def test_generate_samples(self):
        IMAGE_SIZE = 64
        VIDEO_LENGTH = 16

        for geometric_info, ch in zip(["depth", "optical-flow"], [1, 2]):
            # init ggen
            inputs = {
                "dim_z_content": 30,
                "dim_z_motion": 10,
                "channel": ch,
                "geometric_info": geometric_info,
                "video_length": VIDEO_LENGTH,
            }
            ggen = GeometricVideoGenerator(**inputs)

            # init cgen
            inputs = {
                "in_ch": ch,
                "dim_z": 10,
                "geometric_info": geometric_info
            }
            cgen = ColorVideoGenerator(**inputs)

            # generate
            cases = [(3, 1), (3, 2), (3, 4)]
            for num, batchsize in cases:
                xg, xc = generate_samples(ggen, cgen, num, batchsize)

                # xg
                self.assertTrue(isinstance(xg, np.ndarray))
                self.assertEqual(xg.dtype, np.uint8)
                self.assertEqual(len(xg), num)
                s = (num, 3, VIDEO_LENGTH, IMAGE_SIZE, IMAGE_SIZE)
                self.assertEqual(xg.shape, s)

                # xc
                self.assertTrue(isinstance(xc, np.ndarray))
                self.assertEqual(xc.dtype, np.uint8)
                self.assertEqual(len(xc), num)
                s = (num, 3, VIDEO_LENGTH, IMAGE_SIZE, IMAGE_SIZE)
                self.assertEqual(xc.shape, s)
Esempio n. 9
0
def main():
    assert sys.argv[1].endswith(".json"), \
        "Need to specify the config.json file."

    parser = HfArgumentParser(TrainingConfig)
    args = parser.parse_json_file(os.path.abspath(sys.argv[1]))[0]

    if args.server:
        config = AutoConfig.from_pretrained(args.model_name_or_path)
        tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
    else:
        config = AutoConfig.from_pretrained(args.model_name_or_path,
                                            cache_dir=args.cached_dir)
        tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path,
                                                  cache_dir=args.cached_dir)

    # specify configuration
    config.num_labels = tokenizer.vocab_size
    config.type_vocab_size = args.type_vocab_size
    config.eos_token_id = 102
    config.return_dict = True

    model = MODEL_CLASSES[args.model_name_or_path](config)
    state_dict = torch.load(os.path.join(args.model_name_or_path, 'pytorch_model.bin')) \
        if args.server else load_cached_hf_parameters(args.model_name_or_path, args.cached_dir)

    # if 'roberta' in args.model_name_or_path:
    #     state_dict.pop('roberta.embeddings.token_type_embeddings.weight')
    model.load_state_dict(state_dict, strict=False)
    model.tie_weights()

    if args.do_test:

        from util import generate_samples, PROMPT_TEXT

        generate_samples(
            model,
            tokenizer,
            PROMPT_TEXT,
        )

    if args.task == 'thuc':

        from bert_seq2seq.THUCNews import collate_fn
        import datasets

        if args.thuc_cache_dir and os.path.exists(
                args.thuc_cache_dir[0]) and os.path.exists(
                    args.thuc_cache_dir[1]):
            tokenized_train = datasets.load_from_disk(args.thuc_cache_dir[0])
            tokenized_test = datasets.load_from_disk(args.thuc_cache_dir[1])
        else:
            dataset = datasets.load_dataset('thuc_datasets.py',
                                            data_path=args.thuc_data_path,
                                            split='train')
            dataset = dataset.train_test_split(test_size=0.05, seed=42)

            train_dataset = dataset['train']
            test_dataset = dataset['test']

            def tokenize(examples):
                return tokenizer(examples['content'], examples['title'])

            tokenized_train = train_dataset.map(
                tokenize,
                remove_columns=train_dataset.column_names,
                batched=True,
                num_proc=args.dataloader_num_workers,
            )

            tokenized_test = test_dataset.map(
                tokenize,
                remove_columns=test_dataset.column_names,
                batched=True,
                num_proc=args.dataloader_num_workers,
            )

            tokenized_train.save_to_disk('thuc.train.cache')
            tokenized_test.save_to_disk('thuc.test.cache')

        trainer = TextGenTrainer(
            model=model,
            args=args,
            train_dataset=tokenized_train,
            data_collator=collate_fn,
            tokenizer=tokenizer,
        )

    elif args.task == 'debatepedia':
        qsumm_dataset = QsummDataSet(args.qsumm_data_path, tokenizer)

        args.evaluation_strategy = 'epoch'
        trainer = Trainer(
            model=model,
            args=args,
            train_dataset=qsumm_dataset.train,
            eval_dataset=qsumm_dataset.valid,
            tokenizer=tokenizer,
        )

    if args.do_train:
        trainer.train()
        trainer.save_model()
Esempio n. 10
0
    def log_samples(
        self, ggen: GeometricVideoGenerator, cgen: ColorVideoGenerator, iteration: int
    ):
        """
        Log generator samples into TensorBoard.

        Parameters
        ----------
        ggen : GeometricVideoGenerator
            The geometric information video generator.

        cgen : ColorVideoGenerator
            The color video generator.

        iteration : int
            Iteration count.
        """
        ggen.eval()
        cgen.eval()

        # fake samples
        # generate sampels (dtype: int, axis: (B, C, T, H, W))
        xg_fake, xc_fake = util.generate_samples(ggen, cgen, self.num_log, self.num_log)

        # log histgram of fake samples
        self.logger.tf_log_histgram(xg_fake[:, 0], "geospace_fake", iteration)
        self.logger.tf_log_histgram(xc_fake[:, 0], "colorspace_fake", iteration)

        # make a grid video
        xg_fake = util.make_video_grid(xg_fake, self.rows_log, self.cols_log)
        xc_fake = util.make_video_grid(xc_fake, self.rows_log, self.cols_log)
        x_fake = np.concatenate([xg_fake, xc_fake], axis=-1)  # concat

        # log fake samples (dtype: int, axis: (B, T, C, H, W))
        x_fake = x_fake.transpose(0, 2, 1, 3, 4)
        self.logger.tf_log_video(x_fake, "fake_samples", iteration)

        # real samples
        # take next batch: (dtype: float, axis: (B, C, T, H, W))
        batch = next(iter(self.dataloader_log))
        xg_real, xc_real = batch[self.geometric_info], batch["color"]

        # convert xc to np.ndarray: (dtype: int, axis: (B, C, T, H, W))
        xc_real = util.videos_to_numpy(xc_real)

        # convert xg to np.ndarray: (dtype: int, axis: (B, C, T, H, W))
        xg_real = xg_real.data.cpu().numpy()
        xg_real = util.geometric_info_in_color_format(xg_real, ggen.geometric_info)

        # log histgram of real samples
        self.logger.tf_log_histgram(xg_real[:, 0], "geospace_real", iteration)
        self.logger.tf_log_histgram(xc_real[:, 0], "colorspace_real", iteration)

        # make a grid video
        xc_real = util.make_video_grid(xc_real, self.rows_log, self.cols_log)
        xg_real = util.make_video_grid(xg_real, self.rows_log, self.cols_log)
        x_real = np.concatenate([xg_real, xc_real], axis=-1)

        # log fake samples (dtype: int, axis: (B, T, C, H, W))
        x_real = x_real.transpose(0, 2, 1, 3, 4)
        self.logger.tf_log_video(x_real, "real_samples", iteration)
Esempio n. 11
0
def train(pretrain=True, b_test=False):
	# Generate test sample
	inlier_sample, outlier_sample = util.generate_samples(150, 100000, 100)

	# Pretraining Stacked Auto Encoder
	if pretrain:
		stacked_auto_encoder_weights, stacked_auto_encoder_biases = g_encoder_pretrain(inlier_sample)
	else:
		stacked_auto_encoder_weights = None
		stacked_auto_encoder_biases = None

	bn_train = tf.placeholder(tf.bool)

	lstm_input = tf.placeholder(dtype=tf.float32, shape=[None, lstm_sequence_length, input_feature_dim])
	#lstm_linear_transform_weight = tf.Variable(
	# tf.truncated_normal([lstm_linear_transform_input_dim, lstm_z_sequence_dim], stddev=0.1, dtype=tf.float32))
	#lstm_linear_transform_bias = tf.Variable(tf.zeros([lstm_z_sequence_dim], dtype=tf.float32))

	g_encoder_input = tf.placeholder(dtype=tf.float32, shape=[None, input_feature_dim])
	d_input = tf.placeholder(dtype=tf.float32, shape=[None, input_feature_dim])

	# Z local: Encoder latent output
	z_local = g_encoder_network(g_encoder_input, pretrained=pretrain,
	weights=stacked_auto_encoder_weights, biases=stacked_auto_encoder_biases,
	activation='swish', scope='G_Encoder', bn_phaze=bn_train)

	# Z seq: LSTM sequence latent output
	z_seq = lstm_network(lstm_input, scope='LSTM')

	z_enc = tf.concat([z_seq, z_local], 1)

	# Reconstructed output
	decoder_output = g_decoder_network(z_enc, activation='swish', scope='G_Decoder', bn_phaze=bn_train)

	# Reencoding reconstucted output
	z_renc = r_encoder_network(decoder_output, activation='swish', scope='R_Encoder', bn_phaze=bn_train)

	# Discriminator output
	# - feature real/fake: Feature matching approach. Returns last feature layer
	feature_real, d_real, d_real_output = discriminator(d_input, activation='relu', scope='Discriminator', bn_phaze=bn_train)
	feature_fake, d_fake, d_fake_output = discriminator(decoder_output, activation='relu', scope='Discriminator', reuse=True, bn_phaze=bn_train)

	d_real_output = tf.squeeze(d_real_output)
	d_fake_output = tf.squeeze(d_fake_output)

	# Trainable variable lists
	d_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Discriminator')
	r_encoder_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='R_Encoder')
	g_encoder_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='G_Encoder')
	# print('encoder vars:', g_encoder_var)
	g_decoder_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='G_Decoder')
	lstm_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='LSTM')

	generator_vars = lstm_var + g_encoder_var + g_decoder_var
	conceptual_vars = g_decoder_var + r_encoder_var

	# Joint loss term
	residual_loss = get_residual_loss(decoder_output, g_encoder_input, type='l1', gamma=1.0)
	feature_matching_loss = get_feature_matching_loss(feature_fake, feature_real, type='l2', gamma=1.0)
	alpha = 0.5
	generator_loss = alpha * residual_loss + (1-alpha) * feature_matching_loss
	conceptual_loss = get_conceptual_loss(z_renc, z_enc, type='l2', gamma=1.0)
	discriminator_loss = get_discriminator_loss(d_real, d_fake, type='ce', gamma=1.0)
	# discriminator_loss = get_discriminator_loss(d_real, d_fake, type='wgan', gamma=1.0)

	# For wgan loss.
	d_weight_clip = [p.assign(tf.clip_by_value(p, -0.01, 0.01)) for p in d_var]

	# training operation
	d_optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(discriminator_loss, var_list=d_var)
	# d_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(discriminator_loss, var_list=d_var)
	# g_res_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(residual_loss, var_list=generator_vars)
	# g_feature_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(feature_matching_loss,
	# var_list=generator_vars)
	g_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(generator_loss, var_list=generator_vars)
	r_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(conceptual_loss, var_list=[conceptual_vars, generator_vars])

	with tf.Session() as sess:
		sess.run(tf.global_variables_initializer())

		try:
			saver = tf.train.Saver()
			saver.restore(sess, './model/SE_ADNet_L.ckpt')
		except:
			print('Restore failed')

		if b_test == False:
			num_itr = int(len(inlier_sample)/batch_size)
			num_epoch = 4
			early_stop = False
			f_loss_list = []

			for epoch in range(num_epoch):
				for itr in range(num_itr):
					batch_x, batch_seq = util.get_sequence_batch(inlier_sample, lstm_sequence_length, batch_size)

					# wgan
					#_, _, d_loss = sess.run([d_optimizer, d_weight_clip, discriminator_loss],
					# feed_dict={d_input: batch_x, g_encoder_input: batch_x, lstm_input: batch_seq, bn_train: True})

					# gan cross entropy. 2(discriminator):1(generator) training.
					sess.run([d_optimizer, discriminator_loss], feed_dict={d_input: batch_x, g_encoder_input: batch_x, lstm_input: batch_seq, bn_train: True})

					batch_x, batch_seq = util.get_sequence_batch(inlier_sample, lstm_sequence_length, batch_size)

					_, d_loss = sess.run([d_optimizer, discriminator_loss], feed_dict={d_input: batch_x, g_encoder_input: batch_x, lstm_input: batch_seq, bn_train:True})

					_, r_loss, f_loss = sess.run([g_optimizer, residual_loss, feature_matching_loss], feed_dict={d_input: batch_x, g_encoder_input: batch_x, lstm_input: batch_seq, bn_train: True})

					# Test.
					#_, r_loss = sess.run([g_optimizer, residual_loss],
					# feed_dict={d_input: batch_x, g_encoder_input: batch_x, lstm_input: batch_seq})

					_, c_loss = sess.run([r_optimizer, conceptual_loss], feed_dict={g_encoder_input: batch_x, lstm_input: batch_seq, bn_train: True})

					if itr % 200 == 0:
						print('epoch: {0}, itr: {1}, d_loss: {2}, r_loss: {3}, c_loss: {4}, f_loss: {5}'.format(epoch, itr, d_loss, r_loss, c_loss, f_loss))
						f_loss_list.append(f_loss)
						if len(f_loss_list) > 10:
							if sum(f_loss_list[-5:])/5 < 0.002:
								early_stop = False

					if early_stop:
						break
				if early_stop:
					break

			for i in range(10):
				batch_x, batch_seq = util.get_sequence_batch(outlier_sample, lstm_sequence_length, 1)

				d_loss, r_loss, f_loss, c_loss = sess.run([d_fake_output, residual_loss, feature_matching_loss, conceptual_loss],
				feed_dict={d_input: batch_x, g_encoder_input: batch_x, lstm_input: batch_seq, bn_train: False})

				alpha = 1.0
				beta = 100
				score = (1.0 - d_loss) * 100 + alpha * r_loss + beta * c_loss
				print('outlier Anomaly Score:', score, ', d loss:', d_loss, ', r loss:', r_loss, ', c loss:', c_loss)

				batch_x, batch_seq = util.get_sequence_batch(inlier_sample, lstm_sequence_length, 1)

				d_loss, r_loss, f_loss, c_loss = sess.run([d_fake_output, residual_loss, feature_matching_loss, conceptual_loss],
				feed_dict={d_input: batch_x, g_encoder_input: batch_x,
				lstm_input: batch_seq, bn_train: False})
				score = (1.0 - d_loss) * 10 + alpha * r_loss + beta * c_loss
				print('inlier Anomaly Score:', score, ', d loss:', d_loss, ', r loss:', r_loss, ', c loss:', c_loss)

			try:
				saver.save(sess, './model/SE_ADNet_L.ckpt')
			except:
				print('Save failed')
		else:
			for i in range(100):
				batch_x, batch_seq = util.get_sequence_batch(outlier_sample, lstm_sequence_length, 1)

				# batch_x = np.ones_like(batch_x)

				d_loss, r_loss, f_loss, c_loss = sess.run([d_fake_output, residual_loss, feature_matching_loss, conceptual_loss],
				feed_dict={d_input: batch_x, g_encoder_input: batch_x,
				lstm_input: batch_seq, bn_train: False})
				alpha = 1.0
				beta = 100

				score = (1.0 - d_loss) * 10 + alpha * r_loss + beta * c_loss
				print('outlier Anomaly Score:', score, ', d loss:', d_loss, ', r loss:', r_loss, ', c loss:', c_loss)

				batch_x, batch_seq = util.get_sequence_batch(inlier_sample, lstm_sequence_length, 1)

				d_loss, r_loss, f_loss, c_loss = sess.run([d_fake_output, residual_loss, feature_matching_loss, conceptual_loss],
				feed_dict={d_input: batch_x, g_encoder_input: batch_x,
				lstm_input: batch_seq, bn_train: False})

				score = (1.0 - d_loss) * 10 + alpha * r_loss + beta * c_loss

				print('inlier Anomaly Score:', score, ', d loss:', d_loss , ', r loss:', r_loss, ', c loss:', c_loss)