def train(self, epoch):
        """Train a epoch"""
        if self.iteration >= self.settings.nIters:
            return
        batch_time = utils.AverageMeter()
        data_load_time = utils.AverageMeter()
        losses = utils.AverageMeter()
        top1 = utils.AverageMeter()
        top5 = utils.AverageMeter()
        iters = len(self.train_loader)
        # switch to train mode
        self.model.train()

        end = time.time()
        for batch_idx, (data, target) in enumerate(self.train_loader):
            self.update_lr(self.iteration)

            # measure data loading time
            data_load_time.update(time.time() - end)
            end = time.time()
            data, target = data.cuda(), target.cuda()
            output, loss = self.forward(data, target)
            # compute gradient and do SGD step
            self.backward(loss)
            # measure accuracy and record loss
            single_error, single_loss, single5_error = utils.compute_singlecrop(
                outputs=output, labels=target,
                loss=loss, top5_flag=True)
            single_error /= target.size(0)
            single5_error /= target.size(0)

            losses.update(single_loss, data.size(0))
            top1.update(single_error, data.size(0))
            top5.update(single5_error, data.size(0))

            self.tensorboard_logger.scalar_summary(
                'train_loss', single_loss, self.iteration)
            self.tensorboard_logger.scalar_summary(
                'train_top1error', single_error, self.iteration)
            self.tensorboard_logger.scalar_summary(
                'train_top5error', single5_error, self.iteration)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            self.iteration = self.iteration + 1

            utils.print_result(epoch, self.settings.nEpochs, batch_idx + 1,
                               iters, self.lr_master.lr, data_load_time.avg, batch_time.avg,
                               single_error, single_loss, top5error=single5_error,
                               mode="Train",
                               logger=self.logger)

            if self.iteration >= self.settings.nIters or self.settings.nEpochs == 1:
                break

        self.logger.info(
            "|===>Training Error: {:.4f} Loss: {:.4f}, Top5 Error: {:.4f}".format(top1.avg, losses.avg, top5.avg))
        return top1.avg, losses.avg, top5.avg
示例#2
0
    def train(self, epoch):
        """Train a epoch"""
        if self.iteration >= self.n_iters:
            return
        batch_time = utils.AverageMeter()
        data_load_time = utils.AverageMeter()
        losses = utils.AverageMeter()
        top1 = utils.AverageMeter()
        top5 = utils.AverageMeter()
        iters = len(self.train_loader)
        # switch to train mode
        self.model.train()
        end = time.time()
        for batch_idx, (data, target) in enumerate(self.train_loader):
            self.update_lr(self.iteration)

            # measure data loading time
            data_load_time.update(time.time() - end)
            end = time.time()
            data_var, target_var = Variable(
            data.cuda()), Variable(target.cuda())
            output, loss = self.forward(data_var, target_var)
            # compute gradient and do SGD step
            self.backward(loss)
            # measure accuracy and record loss
            single_error, single_loss, single5_error = utils.compute_singlecrop(outputs=output, labels=target_var,
                                                                                loss=loss, top5_flag=True)
            single_error /= target_var.size(0)
            single5_error /= target_var.size(0)

            losses.update(single_loss, data.size(0))
            top1.update(single_error, data.size(0))
            top5.update(single5_error, data.size(0))

            self.logger.scalar_summary(
                'train_loss', single_loss, self.iteration)
            self.logger.scalar_summary(
                'train_top1error', single_error, self.iteration)
            self.logger.scalar_summary(
                'train_top5error', single5_error, self.iteration)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            self.iteration = self.iteration + 1

            utils.print_result(epoch, self.n_epochs, batch_idx + 1,
                               iters, self.lr_master.lr, data_load_time.avg, batch_time.avg,
                               single_error, single_loss, top5error=single5_error,
                               mode="Train")

            if self.iteration >= self.n_iters or self.n_epochs == 1:
                break
            
            r = self.compute_ratio(0.9, epoch*iters+batch_idx+1, 16*iters)
            self.update_threshold(r)

        print("|===>Training Error: %.4f Loss: %.4f, Top5 Error:%.4f" % (top1.avg, losses.avg, top5.avg))
        return top1.avg, losses.avg, top5.avg
示例#3
0
	def test(self, epoch):
		"""
		testing
		"""
		top1_error = utils.AverageMeter()
		top1_loss = utils.AverageMeter()
		top5_error = utils.AverageMeter()
		
		self.model.eval()
		self.model_teacher.eval()
		
		iters = len(self.test_loader)
		start_time = time.time()
		end_time = start_time

		with torch.no_grad():
			for i, (images, labels) in enumerate(self.test_loader):
				start_time = time.time()
				
				labels = labels.cuda()
				images = images.cuda()
				output = self.model(images)

				loss = torch.ones(1)
				self.mean_list.clear()
				self.var_list.clear()

				single_error, single_loss, single5_error = utils.compute_singlecrop(
					outputs=output, loss=loss,
					labels=labels, top5_flag=True, mean_flag=True)

				top1_error.update(single_error, images.size(0))
				top1_loss.update(single_loss, images.size(0))
				top5_error.update(single5_error, images.size(0))
				
				end_time = time.time()
		
		print(
			"[Epoch %d/%d] [Batch %d/%d] [acc: %.4f%%]"
			% (epoch + 1, self.settings.nEpochs, i + 1, iters, (100.00-top1_error.avg))
		)
		
		self.scalar_info['testing_top1error'] = top1_error.avg
		self.scalar_info['testing_top5error'] = top5_error.avg
		self.scalar_info['testing_loss'] = top1_loss.avg
		if self.tensorboard_logger is not None:
			for tag, value in self.scalar_info.items():
				self.tensorboard_logger.scalar_summary(tag, value, self.run_count)
			self.scalar_info = {}
		self.run_count += 1

		return top1_error.avg, top1_loss.avg, top5_error.avg
示例#4
0
    def test(self, epoch):
        """
        testing
        """
        top1_error = utils.AverageMeter()
        top1_loss = utils.AverageMeter()
        top5_error = utils.AverageMeter()

        self.model.eval()

        iters = len(self.test_loader)
        start_time = time.time()
        end_time = start_time

        for i, (images, labels) in enumerate(self.test_loader):
            start_time = time.time()
            data_time = start_time - end_time

            labels = labels.cuda()
            labels_var = Variable(labels, volatile=True)
            if self.settings.tenCrop:
                image_size = images.size()
                images = images.view(image_size[0] * 10, image_size[1] / 10,
                                     image_size[2], image_size[3])
                images_tuple = images.split(image_size[0])
                output = None
                for img in images_tuple:
                    if self.settings.nGPU == 1:
                        img = img.cuda()
                    img_var = Variable(img, volatile=True)
                    temp_output, _ = self.forward(img_var)
                    if output is None:
                        output = temp_output.data
                    else:
                        output = torch.cat((output, temp_output.data))
                single_error, single_loss, single5_error = utils.compute_tencrop(
                    outputs=output, labels=labels_var)
            else:
                if self.settings.nGPU == 1:
                    images = images.cuda()
                images_var = Variable(images, volatile=True)
                output, loss = self.forward(images_var, labels_var)

                single_error, single_loss, single5_error = utils.compute_singlecrop(
                    outputs=output,
                    loss=loss,
                    labels=labels_var,
                    top5_flag=True,
                    mean_flag=True)

            top1_error.update(single_error, images.size(0))
            top1_loss.update(single_loss, images.size(0))
            top5_error.update(single5_error, images.size(0))

            end_time = time.time()
            iter_time = end_time - start_time

            utils.print_result(epoch,
                               self.settings.nEpochs,
                               i + 1,
                               iters,
                               self.lr_master.lr,
                               data_time,
                               iter_time,
                               single_error,
                               single_loss,
                               top5error=single5_error,
                               mode="Test")

            if self.settings.nEpochs == 1 and i + 1 >= 50:
                print "|===>Program testing for only 50 iterations"
                break

        self.scalar_info['testing_top1error'] = top1_error.avg
        self.scalar_info['testing_top5error'] = top5_error.avg
        self.scalar_info['testing_loss'] = top1_loss.avg
        if self.logger is not None:
            for tag, value in self.scalar_info.items():
                self.logger.scalar_summary(tag, value, self.run_count)
            self.scalar_info = {}
        self.run_count += 1
        print "|===>Testing Error: %.4f Loss: %.4f, Top5 Error: %.4f" % (
            top1_error.avg, top1_loss.avg, top5_error.avg)
        return top1_error.avg, top1_loss.avg, top5_error.avg
示例#5
0
    def train(self, epoch):
        """
        training
        """
        top1_error = utils.AverageMeter()
        top1_loss = utils.AverageMeter()
        top5_error = utils.AverageMeter()

        iters = len(self.train_loader)
        self.update_lr(epoch)
        self.model.train()

        start_time = time.time()
        end_time = start_time

        for i, (images, labels) in enumerate(self.train_loader):
            start_time = time.time()
            data_time = start_time - end_time

            # if we use multi-gpu, its more efficient to send input
            # to different gpu, instead of send it to the master gpu.

            if self.settings.nGPU == 1:
                images = images.cuda()
            labels = labels.cuda()
            images_var = Variable(images)
            labels_var = Variable(labels)

            output, loss = self.forward(images_var, labels_var)
            self.backward(loss)

            single_error, single_loss, single5_error = utils.compute_singlecrop(
                outputs=output,
                labels=labels_var,
                loss=loss,
                top5_flag=True,
                mean_flag=True)

            top1_error.update(single_error, images.size(0))
            top1_loss.update(single_loss, images.size(0))
            top5_error.update(single5_error, images.size(0))

            end_time = time.time()
            iter_time = end_time - start_time

            utils.print_result(epoch,
                               self.settings.nEpochs,
                               i + 1,
                               iters,
                               self.lr_master.lr,
                               data_time,
                               iter_time,
                               single_error,
                               single_loss,
                               top5error=single5_error,
                               mode="Train")

            if self.settings.nEpochs == 1 and i + 1 >= 50:
                print "|===>Program testing for only 50 iterations"
                break

        self.scalar_info['training_top1error'] = top1_error.avg
        self.scalar_info['training_top5error'] = top5_error.avg
        self.scalar_info['training_loss'] = top1_loss.avg

        if self.logger is not None:
            for tag, value in self.scalar_info.items():
                self.logger.scalar_summary(tag, value, self.run_count)
            self.scalar_info = {}

        print "|===>Training Error: %.4f Loss: %.4f, Top5 Error:%.4f" % (
            top1_error.avg, top1_loss.avg, top5_error.avg)

        return top1_error.avg, top1_loss.avg, top5_error.avg
示例#6
0
    def test(self, epoch):
        """
        testing
        """
        top1_error = []
        top5_error = []
        top1_loss = []
        num_segments = len(self.segments)
        for i in range(num_segments):
            self.segments[i].eval()
            self.auxfc[i].eval()
            top1_error.append(utils.AverageMeter())
            top5_error.append(utils.AverageMeter())
            top1_loss.append(utils.AverageMeter())

        iters = len(self.test_loader)

        start_time = time.time()
        end_time = start_time
        for i, (images, labels) in enumerate(self.test_loader):
            start_time = time.time()
            data_time = start_time - end_time

            # if we use multi-gpu, its more efficient to send input to different gpu,
            # instead of send it to the master gpu.
            if self.settings.nGPU == 1:
                images = images.cuda()
            images_var = Variable(images, volatile=True)
            labels = labels.cuda()
            labels_var = Variable(labels, volatile=True)

            # forward
            outputs, losses = self.forward(images_var, labels_var)
            # print len(outputs), len(losses)

            # compute loss and error rate
            single_error, single_loss, single5_error = utils.compute_singlecrop(
                outputs=outputs, labels=labels_var,
                loss=losses, top5_flag=True, mean_flag=True)

            for j in range(num_segments):
                top1_error[j].update(single_error[j], images.size(0))
                top5_error[j].update(single5_error[j], images.size(0))
                top1_loss[j].update(single_loss[j](), images.size(0))

            end_time = time.time()
            iter_time = end_time - start_time

            utils.print_result(epoch, self.settings.nEpochs, i + 1,
                               iters, self.lr_master.lr, data_time, iter_time,
                               single_error,
                               single_loss,
                               mode="Test",)

        """
        warning: for better comparison, we inverse the index of data
        """
        top1_error_list, top1_loss_list, top5_error_list = self._convert_results(
            top1_error=top1_error, top1_loss=top1_loss, top5_error=top5_error)
        if self.logger is not None:
            length = num_segments - 1
            for i in range(num_segments):
                self.logger.scalar_summary(
                    "test_top1_error_%d" % (length - i), top1_error[i].avg.item(), self.run_count)
                self.logger.scalar_summary(
                    "test_top5_error_%d" % (length - i), top5_error[i].avg.item(), self.run_count)
                self.logger.scalar_summary(
                    "test_loss_%d" % (length - i), top1_loss[i].avg, self.run_count)
        self.run_count += 1

        print "|===>Testing Error: %.4f/%.4f, Loss: %.4f" % (
            top1_error[-1].avg, top5_error[-1].avg, top1_loss[-1].avg)
        return top1_error_list, top1_loss_list, top5_error_list
示例#7
0
    def train(self, epoch):

        top1_error = 0
        top1_loss = 0
        top5_error = 0
        images_count = 0
        iters = len(self.train_loader)

        self.update_lr(epoch)
        self.model.train()

        start_time = time.time()
        end_time = start_time

        for i, (images, labels) in enumerate(self.train_loader):
            start_time = time.time()
            data_time = start_time - end_time

            # if we use multi-gpu, its more efficient to send input to different gpu, instead of send it to the master gpu.
            if self.ngpu == 1:
                images = images.cuda()
            labels = labels.cuda()
            images_var = Variable(images)
            labels_var = Variable(labels)

            # forward_ts = time.time()
            output, loss = self.forward(images_var, labels_var)
            # forward_te = time.time()
            self.backward(loss)
            # backward_te = time.time()
            # print "forward time: %f, backward time: %f"%(forward_te-forward_ts, backward_te-forward_te)

            single_error, single_loss, single5_error = utils.compute_singlecrop(
                outputs=output, labels=labels_var, loss=loss, top5_flag=True)
            top1_error += single_error
            top1_loss += single_loss
            top5_error += single5_error
            end_time = time.time()
            iter_time = end_time - start_time

            images_count += images.size(0)

            total_time, left_time = utils.print_result(
                epoch,
                self.n_epoch,
                i + 1,
                iters,
                self.lr_master.lr,
                data_time,
                iter_time,
                single_error / images.size(0),
                single_loss,
                top5error=single5_error / images.size(0),
                mode="Train")

            if self.n_epoch == 1 and i + 1 >= 50:
                print("|===>Program testing for only 50 iterations")
                break

        top1_loss /= iters
        top1_error /= images_count
        top5_error /= images_count

        self.scalar_info['training_top1error'] = top1_error
        self.scalar_info['training_top5error'] = top5_error
        self.scalar_info['training_loss'] = top1_loss

        if self.logger is not None:
            for tag, value in self.scalar_info.items():
                self.logger.scalar_summary(tag, value, self.run_count)
            self.scalar_info = {}

        print("|===>Training Error: %.4f Loss: %.4f, Top5 Error:%.4f" %
              (top1_error, top1_loss, top5_error))
        return top1_error, top1_loss, top5_error
示例#8
0
    def test(self, epoch):

        top1_error = 0
        top1_loss = 0
        top5_error = 0
        images_count = 0

        self.model.eval()

        iters = len(self.test_loader)
        start_time = time.time()
        end_time = start_time

        for i, (images, labels) in enumerate(self.test_loader):
            start_time = time.time()
            data_time = start_time - end_time

            labels = labels.cuda()
            labels_var = Variable(labels, volatile=True)
            if self.ten_crop:
                image_size = images.size()
                images = images.view(image_size[0] * 10, image_size[1] / 10,
                                     image_size[2], image_size[3])
                images_tuple = images.split(image_size[0])
                output = None
                for img in images_tuple:
                    if self.ngpu == 1:
                        img = img.cuda()
                    img_var = Variable(img, volatile=True)
                    temp_output, _ = self.forward(img_var)
                    if output is None:
                        output = temp_output.data
                    else:
                        output = torch.cat((output, temp_output.data))
                single_error, single_loss, single5_error = computetencrop(
                    outputs=output, labels=labels_var)
            else:
                if self.ngpu == 1:
                    images = images.cuda()
                images_var = Variable(images, volatile=True)
                output, loss = self.forward(images_var, labels_var)

                single_error, single_loss, single5_error = utils.compute_singlecrop(
                    outputs=output,
                    loss=loss,
                    labels=labels_var,
                    top5_flag=True)
            images_count += images.size(0)

            top1_loss += single_loss
            top1_error += single_error
            top5_error += single5_error

            end_time = time.time()
            iter_time = end_time - start_time

            total_time, left_time = utils.print_result(
                epoch,
                self.n_epoch,
                i + 1,
                iters,
                self.lr_master.lr,
                data_time,
                iter_time,
                single_error / images.size(0),
                single_loss,
                top5error=single5_error / images.size(0),
                mode="Test")

            if self.n_epoch == 1 and i + 1 >= 50:
                print("|===>Program testing for only 50 iterations")
                break

        top1_loss /= iters
        top1_error /= images_count
        top5_error /= images_count

        self.scalar_info['testing_top1error'] = top1_error
        self.scalar_info['testing_top5error'] = top5_error
        self.scalar_info['testing_loss'] = top1_loss
        if self.logger is not None:
            for tag, value in self.scalar_info.items():
                self.logger.scalar_summary(tag, value, self.run_count)
            self.scalar_info = {}
        self.run_count += 1
        print("|===>Testing Error: %.4f Loss: %.4f, Top5 Error: %.4f" %
              (top1_error, top1_loss, top5_error))
        return top1_error, top1_loss, top5_error
示例#9
0
    def test(self, epoch):
        """
        testing
        """
        top1_error = np.zeros(len(self.segments))
        top5_error = np.zeros(len(self.segments))
        top1_loss = np.zeros(len(self.segments))

        for i in range(len(self.segments)):
            self.segments[i].eval()
            self.auxfc[i].eval()
        iters = len(self.test_loader)

        start_time = time.time()
        end_time = start_time
        for i, (images, labels) in enumerate(self.test_loader):
            start_time = time.time()
            data_time = start_time - end_time

            # if we use multi-gpu, its more efficient to send input to different gpu,
            # instead of send it to the master gpu.
            if self.settings.nGPU == 1:
                images = images.cuda()
            images_var = Variable(images, volatile=True)
            labels = labels.cuda()
            labels_var = Variable(labels, volatile=True)

            # forward
            outputs, losses = self.forward(images_var, labels_var)
            # print len(outputs), len(losses)

            # compute loss and error rate
            single_error, single_loss, single5_error = utils.compute_singlecrop(
                outputs=outputs,
                labels=labels_var,
                loss=losses,
                top5_flag=True,
                mean_flag=True)

            top1_loss += single_loss
            top1_error += single_error
            top5_error += single5_error

            end_time = time.time()
            iter_time = end_time - start_time

            utils.print_result(
                epoch,
                self.settings.nEpochs,
                i + 1,
                iters,
                self.lr_master.lr,
                data_time,
                iter_time,
                single_error,
                single_loss,
                mode="Test",
            )

        top1_loss /= iters
        top1_error /= iters
        top5_error /= iters
        """
        warning: for better comparison, we inverse the index of data
        """
        if self.logger is not None:
            length = len(top1_error) - 1
            for i, item in enumerate(top1_error):
                self.logger.scalar_summary("test_top1_error_%d" % (length - i),
                                           item, self.run_count)
                self.logger.scalar_summary("test_top5_error_%d" % (length - i),
                                           top5_error[i], self.run_count)
                self.logger.scalar_summary("test_loss_%d" % (length - i),
                                           top1_loss[i], self.run_count)
        self.run_count += 1

        print "|===>Testing Error: %.4f Loss: %.4f" % (top1_error[-1],
                                                       top1_loss[-1])
        return top1_error, top1_loss, top5_error
示例#10
0
	def test_teacher(self, epoch):
		"""
		testing
		"""
		top1_error = utils.AverageMeter()
		top1_loss = utils.AverageMeter()
		top5_error = utils.AverageMeter()

		self.model_teacher.eval()

		iters = len(self.test_loader)
		start_time = time.time()
		end_time = start_time

		with torch.no_grad():
			for i, (images, labels) in enumerate(self.test_loader):
				start_time = time.time()
				data_time = start_time - end_time

				labels = labels.cuda()
				if self.settings.tenCrop:
					image_size = images.size()
					images = images.view(
						image_size[0] * 10, image_size[1] / 10, image_size[2], image_size[3])
					images_tuple = images.split(image_size[0])
					output = None
					for img in images_tuple:
						if self.settings.nGPU == 1:
							img = img.cuda()
						img_var = Variable(img, volatile=True)
						temp_output, _ = self.forward(img_var)
						if output is None:
							output = temp_output.data
						else:
							output = torch.cat((output, temp_output.data))
					single_error, single_loss, single5_error = utils.compute_tencrop(
						outputs=output, labels=labels)
				else:
					if self.settings.nGPU == 1:
						images = images.cuda()

					output = self.model_teacher(images)

					loss = torch.ones(1)
					self.mean_list.clear()
					self.var_list.clear()

					single_error, single_loss, single5_error = utils.compute_singlecrop(
						outputs=output, loss=loss,
						labels=labels, top5_flag=True, mean_flag=True)
				#
				top1_error.update(single_error, images.size(0))
				top1_loss.update(single_loss, images.size(0))
				top5_error.update(single5_error, images.size(0))

				end_time = time.time()
				iter_time = end_time - start_time

		print(
				"Teacher network: [Epoch %d/%d] [Batch %d/%d] [acc: %.4f%%]"
				% (epoch + 1, self.settings.nEpochs, i + 1, iters, (100.00 - top1_error.avg))
		)

		self.run_count += 1

		return top1_error.avg, top1_loss.avg, top5_error.avg
示例#11
0
	def train(self, epoch):
		"""
		training
		"""
		top1_error = utils.AverageMeter()
		top1_loss = utils.AverageMeter()
		top5_error = utils.AverageMeter()
		fp_acc = utils.AverageMeter()

		iters = 200
		self.update_lr(epoch)

		self.model.eval()
		self.model_teacher.eval()
		self.generator.train()
		
		start_time = time.time()
		end_time = start_time
		
		if epoch==0:
			for m in self.model_teacher.modules():
				if isinstance(m, nn.BatchNorm2d):
					m.register_forward_hook(self.hook_fn_forward)
		
		for i in range(iters):
			start_time = time.time()
			data_time = start_time - end_time

			z = Variable(torch.randn(self.settings.batchSize, self.settings.latent_dim)).cuda()

			# Get labels ranging from 0 to n_classes for n rows
			labels = Variable(torch.randint(0, self.settings.nClasses, (self.settings.batchSize,))).cuda()
			z = z.contiguous()
			labels = labels.contiguous()
			images = self.generator(z, labels)
		
			self.mean_list.clear()
			self.var_list.clear()
			output_teacher_batch, output_teacher_1 = self.model_teacher(images, out_feature = True)

			# One hot loss
			loss_one_hot = self.criterion(output_teacher_batch, labels)

			# BN statistic loss
			BNS_loss = torch.zeros(1).cuda()

			for num in range(len(self.mean_list)):
				BNS_loss += self.MSE_loss(self.mean_list[num], self.teacher_running_mean[num]) + self.MSE_loss(
					self.var_list[num], self.teacher_running_var[num])

			BNS_loss = BNS_loss / len(self.mean_list)

			# loss of Generator
			loss_G = loss_one_hot + 0.1 * BNS_loss

			self.backward_G(loss_G)

			output, loss_S = self.forward(images.detach(), output_teacher_batch.detach(), labels)
			
			if epoch>= self.settings.warmup_epochs:
				self.backward_S(loss_S)


			single_error, single_loss, single5_error = utils.compute_singlecrop(
				outputs=output, labels=labels,
				loss=loss_S, top5_flag=True, mean_flag=True)
			
			top1_error.update(single_error, images.size(0))
			top1_loss.update(single_loss, images.size(0))
			top5_error.update(single5_error, images.size(0))
			
			end_time = time.time()
			
			gt = labels.data.cpu().numpy()
			d_acc = np.mean(np.argmax(output_teacher_batch.data.cpu().numpy(), axis=1) == gt)

			fp_acc.update(d_acc)

		print(
			"[Epoch %d/%d] [Batch %d/%d] [acc: %.4f%%] [G loss: %f] [One-hot loss: %f] [BNS_loss:%f] [S loss: %f] "
			% (epoch + 1, self.settings.nEpochs, i+1, iters, 100 * fp_acc.avg, loss_G.item(), loss_one_hot.item(), BNS_loss.item(),
			 loss_S.item())
		)

		self.scalar_info['accuracy every epoch'] = 100 * d_acc
		self.scalar_info['G loss every epoch'] = loss_G
		self.scalar_info['One-hot loss every epoch'] = loss_one_hot
		self.scalar_info['S loss every epoch'] = loss_S

		self.scalar_info['training_top1error'] = top1_error.avg
		self.scalar_info['training_top5error'] = top5_error.avg
		self.scalar_info['training_loss'] = top1_loss.avg
		
		if self.tensorboard_logger is not None:
			for tag, value in list(self.scalar_info.items()):
				self.tensorboard_logger.scalar_summary(tag, value, self.run_count)
			self.scalar_info = {}

		return top1_error.avg, top1_loss.avg, top5_error.avg