Exemplo n.º 1
0
def random_test(opt):

    # --------- Prepare Data
    images = []
    images_arr = []
    names = []
    for file in os.listdir(opt.pathDirData + "Resized/"):
        names.append(file)
        images.append(
            Image.open(opt.pathDirData + "Resized/" + file).convert("RGB"))
        images_arr.append(
            np.asarray(
                Image.open(opt.pathDirData + "Resized/" +
                           file).convert("RGB")))

    trans_pipeline = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    processor = utils.SeedlingDataset(images_arr,
                                      opt.pathDirData,
                                      trans_pipeline=trans_pipeline)

    input_arr = []
    for sample in range(0, len(images)):

        image = images[sample]
        input = processor.get_processed(image)
        input = processor.trans_pipeline(input)
        input = torch.reshape(input, (3, 240, 300)).cuda()
        input_arr.append(input)

    input = torch.stack(input_arr).cuda()

    # ---------- Load Model
    test_model = models.resnet50_DANN(opt)
    modelCheckpoint = torch.load(opt.checkpoint)
    print('-' * 100)
    print('-' * 100)
    test_model.load_state_dict(modelCheckpoint['state_dict'])

    output, domain_output = test_model(input, alpha=0)
    output = output.reshape((len(output), 2, 2)).cpu().detach().numpy()
    domain_loss = sum(
        domain_output.cpu().detach().numpy()) / len(domain_output)
    print("Domain loss = " + str(domain_loss))

    for sample in range(0, len(input)):

        img = np.asarray(images[sample])

        color = [(255, 255, 255), (0, 0, 0)]  # Left - White, # Right - Black
        count = 0
        for point in output[sample]:
            point = np.array(point).astype(int)
            cv2.circle(img, (point[0], point[1]), 5, color[count], -1)
            count += 1

        cv2.imwrite(opt.pathDirData + "Results/" + names[sample], img)
Exemplo n.º 2
0
    def testing(self):

        cudnn.benchmark = True

        #-------------------- SETTINGS: LOAD DATA
        test_model = models.resnet50_DANN(self.opt)

        modelCheckpoint = torch.load(opt.checkpoint)
        test_model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATASET BUILDERS
        trans_pipeline = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        test_data = utils.dataset_builder(self.opt.pathDirData, 'Test')
        test_set = utils.SeedlingDataset(test_data,
                                         self.opt.pathDirData,
                                         trans_pipeline=trans_pipeline,
                                         normalize=True)
        test_loader = DataLoader(test_set,
                                 batch_size=self.opt.trBatchSize,
                                 shuffle=True)

        loss_class = losses.Cal_loss(loss_type='mae')
        vein_loss_class = veinloss.Vein_loss_class(self.opt)
        loss_domain = "bce"

        loss_class = loss_class.cuda()
        vein_loss_class = vein_loss_class.cuda()

        test_model.eval()

        #-------------------- TESTING BEIGINS
        print('-' * 50 + 'Start Testing' + '-' * 50)
        print('-' * 113)

        runningLoss = 0
        runningLoss_v = 0
        runningLoss_d = 0
        runningTotalLoss = 0
        loss_logger = []
        names = []
        batch_loss_logger = []
        with torch.no_grad():

            loader = tqdm(test_loader, total=len(test_loader))
            for batchID, (input, target, img_name) in enumerate(loader):
                batch_loss = []
                id = target[:, 0:1]
                org = target[:, 1:2]
                target = target[:, 2:]
                if (self.gpu):
                    input = input.type(
                        torch.FloatTensor).to(device=torch.device('cuda'))
                    target = target.float().to(device=torch.device('cuda'))
                    id = id.float().to(device=torch.device('cuda'))
                    org = org.float().to(device=torch.device('cuda'))
                else:
                    input = input.type(torch.DoubleTensor),
                    target = target.float()
                    id = id.float()
                    org = org.float()

                # ---------- Calculate alpha
                # p = float(batchID + epoch * len(dataLoader)) / self.opt.trMaxEpoch / len(dataLoader)
                # alpha = 2. / (1. + np.exp(-10 * p)) - 1

                output, domain_output = test_model(input, alpha=0)
                loss = loss_class(target, output, domain_output, input,
                                  img_name, id, org, vein_loss_class,
                                  self.opt.loss_weights, loss_domain)

                # Loss Logger
                loss_logger.append(loss_class.loss_logger)
                names.append(loss_class.names)
                batch_loss.append(loss_class.point_loss_value)
                batch_loss.append(loss_class.vein_loss_value)
                batch_loss.append(loss_class.domain_loss)
                batch_loss.append(loss)
                batch_loss_logger.append(batch_loss)

                runningLoss += loss_class.point_loss_value
                runningLoss_v += loss_class.vein_loss_value
                runningLoss_d += loss_class.domain_loss
                runningTotalLoss += loss

            runningLoss = runningLoss / len(test_loader)
            runningLoss_v = runningLoss_v / len(test_loader)
            runningLoss_d = runningLoss_d / len(test_loader)
            runningTotalLoss = runningTotalLoss / len(test_loader)

        # Print the losses
        print('Test_loss      = ' + str(np.array(runningTotalLoss.cpu())))
        print('-' * 20)
        print('Test_loss_point   = ' + str(runningLoss.item()))
        print('-' * 20)
        print('Test_loss_vein   = ' + str(runningLoss_v.item()))
        print('-' * 20)
        print('Test_loss_domain   = ' + str(runningLoss_d.item()))

        np.savez(opt.Output_dir + "Loss_logger_test.npz",
                 loss_logger=loss_logger,
                 names=names,
                 batch_loss_logger=batch_loss_logger)

        print('-' * 50 + 'Finished Testing' + '-' * 50)
        print('-' * 113)
Exemplo n.º 3
0
    def testing(self, pathFileTest, pathModel, Output_dir, nnArchitecture,
                nnInChanCount, nnClassCount, nnIsTrained, trBatchSize,
                loss_weights, vein_loss, cropped_fldr, bounding_box_folder):

        cudnn.benchmark = True

        #-------------------- SETTINGS: LOAD DATA
        test_model = model.load_model(nnArchitecture, nnIsTrained,
                                      nnInChanCount, nnClassCount)

        modelCheckpoint = torch.load(pathModel)
        test_model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATASET BUILDERS
        trans_pipeline = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        test_data = utils.dataset_builder(pathFileTest, 'Test')
        test_set = utils.SeedlingDataset(test_data,
                                         pathFileTest,
                                         trans_pipeline=trans_pipeline,
                                         normalize=True)
        test_loader = DataLoader(test_set,
                                 batch_size=trBatchSize,
                                 shuffle=True)

        loss_class = losses.Cal_loss(loss_type='mae')
        vein_loss_class = veinloss.Vein_loss_class(cropped_fldr,
                                                   bounding_box_folder,
                                                   pathFileTest)
        test_model.eval()

        #-------------------- TESTING BEIGINS
        print('-' * 50 + 'Start Testing' + '-' * 50)
        print('-' * 113)

        runningLoss = 0
        runningLoss_v = 0
        runningTotalLoss = 0
        loss_logger = []
        names = []
        batch_loss_logger = []
        with torch.no_grad():

            loader = tqdm(test_loader, total=len(test_loader))
            for batchID, (input, target, img_name) in enumerate(loader):
                batch_loss = []
                id = target[:, 0:2]
                target = target[:, 2:]
                if (self.gpu):
                    input = input.type(
                        torch.FloatTensor).to(device=torch.device('cuda'))
                    target = target.float().to(device=torch.device('cuda'))
                else:
                    input = input.type(torch.DoubleTensor),
                    target = target.float()

                output = test_model(input)
                # loss = func.mse_loss(output, target)
                loss = loss_class(target, output, input, img_name, id,
                                  vein_loss, vein_loss_class,
                                  loss_weights).type(torch.FloatTensor)

                # Loss Logger
                loss_logger.append(loss_class.loss_logger)
                names.append(loss_class.names)
                batch_loss.append(loss_class.point_loss_value)
                batch_loss.append(loss_class.vein_loss_value)
                batch_loss.append(loss)
                batch_loss_logger.append(batch_loss)

                runningLoss += loss_class.point_loss_value
                runningLoss_v += loss_class.vein_loss_value
                runningTotalLoss += loss

            runningLoss = runningLoss / len(test_loader)
            runningLoss_v = runningLoss_v / len(test_loader)
            runningTotalLoss = runningTotalLoss / len(test_loader)

        # Print the losses
        print('Test_loss      = ' + str(np.array(runningTotalLoss.cpu())))
        print('-' * 20)
        if (vein_loss):
            print('Test_loss_point   = ' + str(runningLoss.item()))
            print('-' * 20)
            print('Test_loss_vein   = ' + str(runningLoss_v.item()))

        np.savez(Output_dir + "Loss_logger_test.npz",
                 loss_logger=loss_logger,
                 names=names,
                 batch_loss_logger=batch_loss_logger)

        print('-' * 50 + 'Finished Testing' + '-' * 50)
        print('-' * 113)
Exemplo n.º 4
0
    def training(self):

        training_model = models.resnet50_DANN(self.opt)
        for p in training_model.parameters():
            p.requires_grad = True

        #-------------------- SETTINGS: DATASET BUILDERS
        trans_pipeline = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        train_data, valid_data = utils.dataset_builder(self.opt.pathDirData,
                                                       'Train')
        train_set = utils.SeedlingDataset(train_data,
                                          self.opt.pathDirData,
                                          trans_pipeline=trans_pipeline,
                                          normalize=False)
        val_set = utils.SeedlingDataset(valid_data,
                                        self.opt.pathDirData,
                                        trans_pipeline=trans_pipeline,
                                        normalize=False)

        train_loader = DataLoader(train_set,
                                  batch_size=self.opt.trBatchSize,
                                  shuffle=True)
        valid_loader = DataLoader(val_set,
                                  batch_size=self.opt.trBatchSize,
                                  shuffle=True)

        #-------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer = optim.Adam(training_model.parameters(),
                               lr=self.opt.lr,
                               betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                         mode='min',
                                                         factor=0.1,
                                                         patience=5,
                                                         verbose=True,
                                                         threshold=0.0001,
                                                         threshold_mode='rel',
                                                         cooldown=0,
                                                         min_lr=1e-10,
                                                         eps=1e-08)

        #-------------------- SETTINGS: LOSS
        loss_class = losses.Cal_loss(loss_type='mae')
        vein_loss_class = veinloss.Vein_loss_class(self.opt)

        loss_class = loss_class.cuda()
        vein_loss_class = vein_loss_class.cuda()

        #-------------------- SETTINGS: TENSORBOARD
        tb = SummaryWriter()

        # images, _, _ = next(iter(train_loader))
        # images_val, _, _ = next(iter(valid_loader))
        # grid = torchvision.utils.make_grid(images)
        # grid_val = torchvision.utils.make_grid(images_val)

        # tb.add_image('images', grid)
        # tb.add_image('images_val', grid_val)
        # tb.add_graph(training_model.cpu(), images.reshape((1, trBatchSize, nnInChanCount, 240, 300)))
        # tb.add_graph(training_model.cpu(), images_val.reshape((1, trBatchSize, nnInChanCount, 240, 300)))

        #---- TRAIN THE NETWORK
        lossMIN = 100000000
        start_epoch = 0
        # Load the checkpoint
        if (self.opt.checkpoint):
            training_model, optimizer, start_epoch, lossMIN = utils.load_checkpoint(
                training_model, optimizer, lossMIN, self.opt.checkpoint)
            training_model = training_model.cuda()

            # now individually transfer the optimizer parts...
            for state in optimizer.state.values():
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.to(torch.device('cuda'))

        print('-' * 50 + 'Start Training' + '-' * 50)

        loss_epoch = []
        self.loss_logger = []

        for epochID in range(start_epoch, self.opt.trMaxEpoch):

            totalLossTrain, lossTrain, lossTrain_v = self.epochTrain(
                training_model, train_loader, optimizer, scheduler, loss_class,
                epochID + 1, vein_loss_class)

            tb.add_scalar('Train_loss', totalLossTrain, epochID + 1)
            tb.add_scalar('Train_loss_point', lossTrain, epochID + 1)
            tb.add_scalar('Train_loss_vein', lossTrain_v, epochID + 1)
            loss_epoch.append(totalLossTrain)
            loss_epoch.append(lossTrain)
            loss_epoch.append(lossTrain_v)

            totalLossVal, lossVal, lossVal_v = self.epochVal(
                training_model, valid_loader, optimizer, scheduler, loss_class,
                epochID + 1, vein_loss_class)

            tb.add_scalar('Val_loss', totalLossVal, epochID + 1)
            tb.add_scalar('Val_loss_point', lossVal, epochID + 1)
            tb.add_scalar('Val_loss_vein', lossVal_v, epochID + 1)
            loss_epoch.append(totalLossVal)
            loss_epoch.append(lossVal)
            loss_epoch.append(lossVal_v)

            # tb.add_histogram('conv1.bias', training_model.conv1.bias, epochID + 1)
            # tb.add_histogram('conv1.weight', training_model.conv1.weight, epochID + 1)
            # tb.add_histogram('conv1.weight.grad', training_model.conv1.weight.grad, epochID + 1)

            self.loss_logger.append(loss_epoch)

            scheduler.step(totalLossVal.data)

            # Save the minimum validation point data
            if lossVal < lossMIN:
                lossMIN = lossVal
                path = self.opt.Output_dir + str(epochID + 1) + '_____' + str(
                    lossTrain) + '_____' + str(lossVal.item()) + '_____' + str(
                        lossTrain_v) + '_____' + str(
                            lossVal_v.item()) + '.pth.tar'
                torch.save(
                    {
                        'epoch': epochID + 1,
                        'state_dict': training_model.state_dict(),
                        'best_loss': lossMIN,
                        'optimizer': optimizer.state_dict(),
                        'loss_logger': self.loss_logger
                    }, path)
                print('Epoch [' + str(epochID + 1) + '] [save]')
            else:
                print('Epoch [' + str(epochID + 1) + '] [----]')

            # Print the losses
            print('Train_loss = ' + str(totalLossTrain))
            print('Val_loss   = ' + str(np.array(totalLossVal.cpu())))
            print('-' * 20)
            print('Train_loss_point = ' + str(lossTrain))
            print('Val_loss_point   = ' + str(lossVal.item()))
            print('-' * 20)
            print('Train_loss_vein = ' + str(lossTrain_v))
            print('Val_loss_vein   = ' + str(lossVal_v.item()))
            print('-' * 50)

            loss_epoch = []

        tb.close()
        np.savez(self.opt.Output_dir + 'loss_logger.npz',
                 loss_logger=np.array(self.loss_logger))

        print('-' * 50 + 'Finished Training' + '-' * 50)
Exemplo n.º 5
0
    def training(self):

        training_model = models.resnet50_DANN(self.opt)
        for p in training_model.parameters():
            p.requires_grad = True

        #-------------------- SETTINGS: DATASET BUILDERS
        trans_pipeline = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        #---------- Source Data
        train_data, valid_data = utils.dataset_builder(self.opt.pathDirData,
                                                       'Train', opt.source)
        train_set = utils.SeedlingDataset(train_data,
                                          self.opt.pathDirData,
                                          trans_pipeline=trans_pipeline,
                                          normalize=False)
        val_set = utils.SeedlingDataset(valid_data,
                                        self.opt.pathDirData,
                                        trans_pipeline=trans_pipeline,
                                        normalize=False)

        train_loader_s = DataLoader(train_set,
                                    batch_size=self.opt.trBatchSize,
                                    shuffle=True)
        valid_loader_s = DataLoader(val_set,
                                    batch_size=self.opt.trBatchSize,
                                    shuffle=True)

        #---------- Target Data
        train_data, valid_data = utils.dataset_builder(self.opt.pathDirData,
                                                       'Train', opt.target)
        train_set = utils.SeedlingDataset(train_data,
                                          self.opt.pathDirData,
                                          trans_pipeline=trans_pipeline,
                                          normalize=False)
        val_set = utils.SeedlingDataset(valid_data,
                                        self.opt.pathDirData,
                                        trans_pipeline=trans_pipeline,
                                        normalize=False)

        train_loader_t = DataLoader(train_set,
                                    batch_size=self.opt.trBatchSize,
                                    shuffle=True)
        valid_loader_t = DataLoader(val_set,
                                    batch_size=self.opt.trBatchSize,
                                    shuffle=True)

        #-------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer = optim.Adam(training_model.parameters(),
                               lr=self.opt.lr,
                               betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                         mode='min',
                                                         factor=0.1,
                                                         patience=5,
                                                         verbose=True,
                                                         threshold=0.0001,
                                                         threshold_mode='rel',
                                                         cooldown=0,
                                                         min_lr=1e-10,
                                                         eps=1e-08)

        #-------------------- SETTINGS: LOSS
        loss_class = losses.Cal_loss(loss_type='mae')
        vein_loss_class = veinloss.Vein_loss_class(self.opt)
        loss_domain = "bce"

        loss_class = loss_class.cuda()
        vein_loss_class = vein_loss_class.cuda()

        #-------------------- SETTINGS: TENSORBOARD
        # #-- Source
        # tb_s = SummaryWriter()
        # tb_val_s = SummaryWriter()
        # #-- Target
        # tb_t = SummaryWriter()
        # tb_val_t = SummaryWriter()

        #---- TRAIN THE NETWORK
        lossMIN = 100000000
        start_epoch = 0
        # Load the checkpoint
        if (self.opt.checkpoint):
            training_model, optimizer, start_epoch, lossMIN = utils.load_checkpoint(
                training_model, optimizer, lossMIN, self.opt.checkpoint)
            training_model = training_model.cuda()

            # now individually transfer the optimizer parts...
            for state in optimizer.state.values():
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.to(torch.device('cuda'))

        print('-' * 50 + 'Start Training' + '-' * 50)

        loss_epoch = []
        self.loss_logger = []

        for epochID in range(start_epoch, self.opt.trMaxEpoch):

            # ------------------------------------------ Train
            print("------ Training ------ Epoch - " + str(epochID + 1) +
                  " ----" * 10)
            totalLossTrain_s, lossTrain_s, lossTrain_v_s, lossTrain_d_s, totalLossTrain, lossTrain, lossTrain_v, lossTrain_d = self.epochTrain(
                training_model, train_loader_s, train_loader_t, optimizer,
                scheduler, loss_class, epochID + 1, vein_loss_class,
                loss_domain)

            # tb_s.add_scalar('Train_loss_s', totalLossTrain_s, epochID + 1)
            # tb_s.add_scalar('Train_loss_point_s', lossTrain_s, epochID + 1)
            # tb_s.add_scalar('Train_loss_vein_s', lossTrain_v_s, epochID + 1)
            # tb_s.add_scalar('Train_loss_domain_s', lossTrain_d_s, epochID + 1)
            loss_epoch.append(totalLossTrain_s)
            loss_epoch.append(lossTrain_s)
            loss_epoch.append(lossTrain_v_s)
            loss_epoch.append(lossTrain_d_s)

            # tb_t.add_scalar('Train_loss_t', totalLossTrain, epochID + 1)
            # tb_t.add_scalar('Train_loss_point_t', lossTrain, epochID + 1)
            # tb_t.add_scalar('Train_loss_vein_t', lossTrain_v, epochID + 1)
            # tb_t.add_scalar('Train_loss_domain_t', lossTrain_d, epochID + 1)
            loss_epoch.append(totalLossTrain)
            loss_epoch.append(lossTrain)
            loss_epoch.append(lossTrain_v)
            loss_epoch.append(lossTrain_d)

            # ------------------------------------------ Validation
            print("------ Validation ------ Epoch - " + str(epochID + 1) +
                  " ----" * 10)
            # ----- Source Validation
            method = "source"
            totalLossVal_s, lossVal_s, lossVal_v_s, lossVal_d_s = self.epochVal(
                training_model, method, valid_loader_s, optimizer, scheduler,
                loss_class, epochID + 1, vein_loss_class, loss_domain)

            # tb_val_s.add_scalar('Val_loss_s', totalLossVal_s, epochID + 1)
            # tb_val_s.add_scalar('Val_loss_point_s', lossVal_s, epochID + 1)
            # tb_val_s.add_scalar('Val_loss_vein_s', lossVal_v_s, epochID + 1)
            # tb_val_s.add_scalar('Val_loss_domain_s', lossVal_d_s, epochID + 1)
            loss_epoch.append(totalLossVal_s)
            loss_epoch.append(lossVal_s)
            loss_epoch.append(lossVal_v_s)
            loss_epoch.append(lossVal_d_s)

            # ----- Target Validation
            print(' -- ' * 10)
            method = "target"
            totalLossVal, lossVal, lossVal_v, lossVal_d = self.epochVal(
                training_model, method, valid_loader_t, optimizer, scheduler,
                loss_class, epochID + 1, vein_loss_class, loss_domain)

            # tb_val_t.add_scalar('Val_loss_t', totalLossVal, epochID + 1)
            # tb_val_t.add_scalar('Val_loss_point_t', lossVal, epochID + 1)
            # tb_val_t.add_scalar('Val_loss_vein_t', lossVal_v, epochID + 1)
            # tb_val_t.add_scalar('Val_loss_domain_t', lossVal_d, epochID + 1)
            loss_epoch.append(totalLossVal)
            loss_epoch.append(lossVal)
            loss_epoch.append(lossVal_v)
            loss_epoch.append(lossVal_d)

            self.loss_logger.append(loss_epoch)

            scheduler.step(totalLossVal_s.data)

            # Save the minimum validation point data
            print('-' * 50)
            print('-' * 50)
            if lossVal_s < lossMIN:
                lossMIN = lossVal_s
                path = self.opt.Output_dir + str(epochID + 1) + '_____' + str(
                    lossTrain_s) + '_____' + str(lossVal_s.item(
                    )) + '_____' + str(lossTrain_v_s) + '_____' + str(
                        lossVal_v_s) + '_____' + str(
                            lossTrain_d_s) + '_____' + str(
                                lossVal_d_s.item()) + '.pth.tar'
                torch.save(
                    {
                        'epoch': epochID + 1,
                        'state_dict': training_model.state_dict(),
                        'best_loss': lossMIN,
                        'optimizer': optimizer.state_dict(),
                        'loss_logger': self.loss_logger
                    }, path)
                print('Epoch [' + str(epochID + 1) + '] [save]')
            else:
                print('Epoch [' + str(epochID + 1) + '] [----]')

            # Print the losses
            print("-------- Source Data Results --------")
            print('Train_loss = ' + str(totalLossTrain_s))
            print('Val_loss   = ' + str(np.array(totalLossVal_s.cpu())))
            print('-' * 20)
            print('Train_loss_point = ' + str(lossTrain_s))
            print('Val_loss_point   = ' + str(lossVal_s.item()))
            # print('-' * 20)
            # print('Train_loss_vein = ' + str(lossTrain_v_s))
            # print('Val_loss_vein   = ' + str(lossVal_v_s.item()))
            print('-' * 20)
            print('Train_loss_domain = ' + str(lossTrain_d_s))
            print('Val_loss_domain   = ' + str(lossVal_d_s.item()))
            print('-' * 20)
            print("-------- Target Data Results --------")
            print('Train_loss = ' + str(totalLossTrain))
            print('Val_loss   = ' + str(np.array(totalLossVal.cpu())))
            print('-' * 20)
            print('Train_loss_point = ' + str(lossTrain))
            print('Val_loss_point   = ' + str(lossVal.item()))
            # print('-' * 20)
            # print('Train_loss_vein = ' + str(lossTrain_v))
            # print('Val_loss_vein   = ' + str(lossVal_v_s.item()))
            print('-' * 20)
            print('Train_loss_domain = ' + str(lossTrain_d))
            print('Val_loss_domain   = ' + str(lossVal_d_s.item()))
            print('-' * 50)

            np.savez(self.opt.Output_dir + 'loss_logger.npz',
                     loss_logger=np.array(self.loss_logger))
            loss_epoch = []

        # tb_s.close()
        # tb_val_s.close()
        # tb_t.close()
        # tb_val_t.close()

        print('-' * 50 + 'Finished Training' + '-' * 50)