Exemple #1
0
if __name__ == '__main__':
    opt = TrainOptions().parse()  # get training options
    # dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    # dataset_size = len(dataset)    # get the number of images in the dataset.
    # assert(opt.niter + opt.niter_decay == opt.stage3_epoch)
    data_list = make_dataset(opt.dataroot)

    dataset_size = len(data_list)

    print('The number of training images = %d' % dataset_size)

    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(
        opt)  # regular setup: load and print networks; create schedulers
    visualizer = Visualizer(
        opt)  # create a visualizer that display/save images and plots
    total_iters = 0  # the total number of training iterations

    for epoch in range(
            opt.epoch_count, opt.niter + opt.niter_decay + 1
    ):  # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
        epoch_start_time = time.time()  # timer for entire epoch
        iter_data_time = time.time()  # timer for data loading per iteration
        epoch_iter = 0  # the number of training iterations in current epoch, reset to 0 every epoch

        idx = random.sample(range(dataset_size), dataset_size)

        for i in range(dataset_size):

            data = get_epoch_batch(data_list,
                                   1,
opt = TestOptions().parse()
opt.nThreads = 1  # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip
model = create_model(opt)

pathbase = '/home/lichao/tracking/datasets/ILSVRC2015_crops/Data/VID/train'
level1 = sorted(os.listdir(pathbase))
for folder1 in level1:
    level2 = sorted(os.listdir(os.path.join(pathbase, folder1)))
    for folder2 in level2:
        opt.dataroot = os.path.join(pathbase, folder1, folder2)
        data_loader = CreateDataLoader(opt)
        dataset = data_loader.load_data()
        visualizer = Visualizer(opt)
        # create website
        #web_dir = os.path.join(opt.results_dir, folder1, '%s_%s' % (opt.phase, opt.which_epoch))
        web_dir = os.path.join(opt.results_dir, folder1, folder2)
        #webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
        # test
        for i, data in enumerate(dataset):

            model.set_input(data)
            model.test()
            img_path = model.get_image_paths()
            resimg_path = img_path[0].replace('ILSVRC2015_crops',
                                              'ILSVRC2015_crops_i')
            resimg_path = resimg_path.replace('jpg', 'JPEG')
            if os.path.isfile(resimg_path):
                continue
Exemple #3
0
def main():
    # pdb.set_trace()
    opt, val_opt = TrainOptions().parse()
    # pdb.set_trace()
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    dataset_size = len(data_loader)
    print('# training videos = %d' % dataset_size)

    model = create_model(opt)
    visualizer = Visualizer(opt)
    total_steps = 0  # total # of videos
    writer = SummaryWriter(log_dir=os.path.join(opt.tensorboard_dir, opt.name))

    for epoch in range(model.start_epoch, opt.nepoch + opt.nepoch_decay + 1):
        epoch_start_time = time.time()
        epoch_iters = 0  # # of videos in this epoch

        for i, data in enumerate(dataset):
            # pdb.set_trace()
            iter_start_time = time.time()
            total_steps += opt.batch_size
            epoch_iters += opt.batch_size
            model.set_inputs(data)
            model.optimize_parameters()

            if total_steps % opt.print_freq == 0:
                errors = model.get_current_errors()
                t = (time.time() - iter_start_time) / opt.batch_size
                writer.add_scalar('iter_time', t, total_steps / opt.batch_size)
                for key in errors.keys():
                    writer.add_scalar('loss/%s' % (key), errors[key],
                                      total_steps / opt.batch_size)
                visualizer.print_current_errors(epoch, epoch_iters, errors, t)

            if total_steps % opt.display_freq == 0:
                visuals = model.get_current_visuals()
                grid = visual_grid(visuals['seq_batch'], visuals['pred'],
                                   opt.K, opt.T)
                writer.add_image('current_batch', grid,
                                 total_steps / opt.batch_size)

            if total_steps % opt.save_latest_freq == 0:
                print('saving the latest model (epoch %d, total_steps %d)' %
                      (epoch, total_steps))
                model.save('latest', epoch)

        if epoch % opt.save_epoch_freq == 0:
            print('saving the model at the end of epoch %d, iters %d' %
                  (epoch, total_steps))
            model.save('latest', epoch)
            model.save(epoch, epoch)
            psnr_plot, ssim_plot, grid = val(val_opt)
            # pdb.set_trace()
            writer.add_image('psnr', psnr_plot, epoch)
            writer.add_image('ssim', ssim_plot, epoch)
            writer.add_image('samples', grid, epoch)

        print('End of epoch %d / %d \t Time Taken: %d sec' %
              (epoch, opt.nepoch + opt.nepoch_decay,
               time.time() - epoch_start_time))
Exemple #4
0
opt = TrainOptions().parse()

if opt.debug:
    opt.display_freq = 1
    opt.print_freq = 1
    opt.niter = 1
    opt.niter_decay = 0
    opt.max_dataset_size = 10

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(dataset) * opt.batchSize
print('#training images = %d' % dataset_size)

path = os.path.join(opt.checkpoints_dir, opt.name, 'model.txt')
visualizer = Visualizer(opt)


iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
if opt.continue_train:
    try:
        start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int)
    except:
        start_epoch, epoch_iter = 1, 0
    visualizer.print_save('Resuming from epoch %d at iteration %d' % (start_epoch-1, epoch_iter))
else:
    start_epoch, epoch_iter = 1, 0

# opt.which_epoch=start_epoch-1
model = create_model(opt)
fd = open(path, 'w')
Exemple #5
0
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
from util import html
import torch
import numpy as np
import imageio

opt = TestOptions().parse(save=False)
opt.nThreads = 1  # test code only supports nThreads = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name,
                       '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(
    web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
    (opt.name, opt.phase, opt.which_epoch))

# test
if not opt.engine and not opt.onnx:
    model = create_model(opt)

    if opt.verbose:
        print(model)
else:
    from run_engine import run_trt_engine, run_onnx
Exemple #6
0
    start_epoch, epoch_iter = 1, 0

if opt.debug:
    opt.display_freq = 1
    opt.print_freq = 1
    opt.niter = 1
    opt.niter_decay = 0
    opt.max_dataset_size = 10

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)

model = create_model(opt)
visualizer = Visualizer(opt)

total_steps = (start_epoch-1) * dataset_size + epoch_iter

display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq

for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
    epoch_start_time = time.time()
    if epoch != start_epoch:
        epoch_iter = epoch_iter % dataset_size
    for i, data in enumerate(dataset, start=epoch_iter):
        iter_start_time = time.time()
        total_steps += opt.batchSize
        epoch_iter += opt.batchSize
Exemple #7
0
import time
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer

opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)

model = create_model(opt)
visualizer = Visualizer(opt)
total_steps = 0

for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
    epoch_start_time = time.time()
    epoch_iter = 0

    for i, data in enumerate(dataset):
        iter_start_time = time.time()
        visualizer.reset()
        total_steps += opt.batchSize
        epoch_iter += opt.batchSize
        model.set_input(data)
        model.optimize_parameters()

        if total_steps % opt.display_freq == 0:
            save_result = total_steps % opt.update_html_freq == 0
            visualizer.display_current_results(model.get_current_visuals(),
Exemple #8
0
from util.util import calc_psnr as calc_psnr
#from util.util import calc_psnr_np as calc_psnr

if __name__ == '__main__':
    opt = TrainOptions().parse()
    dataset_train = create_dataset('div2k', 'train', opt)
    dataset_size_train = len(dataset_train)
    print('The number of training images = %d' % dataset_size_train)
    dataset_val = create_dataset('div2k', 'val', opt)
    dataset_size_val = len(dataset_val)
    print('The number of val images = %d' % dataset_size_val)

    model = create_model(opt)
    model.setup(opt)
    visualizer = Visualizer(opt)
    total_iters = 0

    for epoch in range(model.start_epoch + 1, opt.niter + opt.niter_decay + 1):

        # training
        epoch_start_time = time.time()
        epoch_iter = 0
        model.train()
        if hasattr(model, 'depth_gen') and model.depth_gen is not None:
            model.depth_gen.train()

        iter_data_time = iter_start_time = time.time()
        for i, data in enumerate(dataset_train):
            if total_iters % opt.print_freq == 0:
                t_data = time.time() - iter_data_time
Exemple #9
0
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
from util import html

opt = TestOptions().parse()
opt.nThreads = 1  # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name,
                       '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(
    web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
    (opt.name, opt.phase, opt.which_epoch))
# test
for i, data in enumerate(dataset):
    if i >= opt.how_many: break
    model.set_input(data)
    model.test()
    visuals = model.get_current_visuals()
    img_path = model.get_image_paths()
    print('%04d: process image... %s' % (i, img_path))
    visualizer.save_images(webpage,
Exemple #10
0
from options.train_options import TrainOptions
from dataloader.data_loader import dataloader
from model import create_model
from util.visualizer import Visualizer

if __name__ == '__main__':
    # get training options
    opt = TrainOptions().parse()
    # create a dataset
    dataset = dataloader(opt)
    dataset_size = len(dataset) * opt.batchSize
    print('training images = %d' % dataset_size)
    # create a model
    model = create_model(opt)
    # create a visualizer
    visualizer = Visualizer(opt)
    # training flag
    keep_training = True
    max_iteration = opt.niter + opt.niter_decay
    epoch = 0
    total_iteration = opt.iter_count

    # training process
    while (keep_training):
        epoch_start_time = time.time()
        epoch += 1
        print('\n Training epoch: %d' % epoch)

        for i, data in enumerate(dataset):
            dataset.epoch = epoch - 1
            iter_start_time = time.time()
Exemple #11
0
    test_opt.epoch = 9

    test_data_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=test_opt.batch_size,
        shuffle=False,
        num_workers=test_opt.num_threads,
        pin_memory=True)

    test_dataset_size = len(test_data_loader)
    print('#test images = %d' % test_dataset_size)

    model = create_model(test_opt, test_dataset)
    model.eval()
    model.setup(test_opt)
    visualizer = Visualizer(test_opt)
    test_loss_iter = []
    gts = None
    preds = None
    epoch_iter = 0
    model.init_test_eval()
    epoch = 0
    num = 5
    # How many images to save in an image
    if not os.path.exists('vis'):
        os.makedirs('vis')
    with torch.no_grad():
        iterator = iter(test_data_loader)
        i = 0
        while True:
            try:
Exemple #12
0
    def define_networks(self, start_epoch):
        opt = self.opt
        # Generator network
        input_nc = opt.label_nc if (opt.label_nc != 0
                                    and not self.pose) else opt.input_nc
        netG_input_nc = input_nc
        opt.for_face = False
        self.netG = networks.define_G(opt)
        if self.refine_face:
            opt_face = copy.deepcopy(opt)
            opt_face.n_downsample_G -= 1
            if opt_face.n_adaptive_layers > 0: opt_face.n_adaptive_layers -= 1
            opt_face.input_nc = opt.output_nc
            opt_face.fineSize = self.faceRefiner.face_size
            opt_face.aspect_ratio = 1
            opt_face.for_face = True
            self.netGf = networks.define_G(opt_face)

        # Discriminator network
        if self.isTrain or opt.finetune:
            netD_input_nc = input_nc + opt.output_nc + (
                1 if self.concat_fg_mask_for_D else 0)
            if self.concat_ref_for_D:
                netD_input_nc *= 2
            self.netD = networks.define_D(opt,
                                          netD_input_nc,
                                          opt.ndf,
                                          opt.n_layers_D,
                                          opt.norm_D,
                                          opt.netD_subarch,
                                          opt.num_D,
                                          not opt.no_ganFeat_loss,
                                          gpu_ids=self.gpu_ids)
            if self.add_face_D:
                self.netDf = networks.define_D(opt,
                                               opt.output_nc * 2,
                                               opt.ndf,
                                               opt.n_layers_D,
                                               opt.norm_D,
                                               'n_layers',
                                               1,
                                               not opt.no_ganFeat_loss,
                                               gpu_ids=self.gpu_ids)
            else:
                self.netDf = None
        self.temporal = False
        self.netDT = None

        Visualizer.vis_print(self.opt,
                             '---------- Networks initialized -------------')

        # initialize optimizers
        if self.isTrain:
            # optimizer G
            params = list(self.netG.parameters())
            if self.refine_face: params += list(self.netGf.parameters())
            self.optimizer_G = self.get_optimizer(params,
                                                  for_discriminator=False)

            # optimizer D
            params = list(self.netD.parameters())
            if self.add_face_D: params += list(self.netDf.parameters())
            self.optimizer_D = self.get_optimizer(params,
                                                  for_discriminator=True)

        Visualizer.vis_print(
            self.opt, '---------- Optimizers initialized -------------')

        # make model temporal by generating multiple frames
        if (not opt.isTrain
                or start_epoch > opt.niter_single) and opt.n_frames_G > 1:
            self.make_temporal_model()
Exemple #13
0
                                             drop_last=True)

    # Load test dataset
    test_dataset = TestImgDataset()
    test_dataset.initialize(opt)

    test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=1,
                                                  drop_last=False)

    # Create complete model
    model = SemIAModel(opt)

    visualizer = Visualizer(opt)

    total_steps = 0
    # Main training loop
    for i, data in enumerate(dataloader):
        total_steps += 1
        start_time = time.time()

        if i % opt.zero_rec_freq == 0:
            # Zero reconstruction: using augmented image as input and condition
            mode_g = 'generator_rec'
            model.set_input(data, mode_g)
        else:
            # Sample mode: using input_image as input, tgt_image as condition
            mode_g = 'generator'
            model.set_input(data, mode_g)
Exemple #14
0
def train():
    opt = TrainOptions().parse()
    if opt.debug:
        opt.display_freq = 1
        opt.print_freq = 1
        opt.nThreads = 1

    ### initialize dataset
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    dataset_size = len(data_loader)
    if opt.dataset_mode == 'pose':
        print('#training frames = %d' % dataset_size)
    else:
        print('#training videos = %d' % dataset_size)

    ### initialize models
    modelG, modelD, flowNet = create_model(opt)
    visualizer = Visualizer(opt)

    iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
    ### if continue training, recover previous states
    if opt.continue_train:
        try:
            start_epoch, epoch_iter = np.loadtxt(iter_path,
                                                 delimiter=',',
                                                 dtype=int)
        except:
            start_epoch, epoch_iter = 1, 0
        print('Resuming from epoch %d at iteration %d' %
              (start_epoch, epoch_iter))
        if start_epoch > opt.niter:
            modelG.module.update_learning_rate(start_epoch - 1)
            modelD.module.update_learning_rate(start_epoch - 1)
        if (opt.n_scales_spatial > 1) and (opt.niter_fix_global != 0) and (
                start_epoch > opt.niter_fix_global):
            modelG.module.update_fixed_params()
        if start_epoch > opt.niter_step:
            data_loader.dataset.update_training_batch(
                (start_epoch - 1) // opt.niter_step)
            modelG.module.update_training_batch(
                (start_epoch - 1) // opt.niter_step)
    else:
        start_epoch, epoch_iter = 1, 0

    ### set parameters
    n_gpus = opt.n_gpus_gen // opt.batchSize  # number of gpus used for generator for each batch
    tG, tD = opt.n_frames_G, opt.n_frames_D
    tDB = tD * opt.output_nc
    s_scales = opt.n_scales_spatial
    t_scales = opt.n_scales_temporal
    input_nc = 1 if opt.label_nc != 0 else opt.input_nc
    output_nc = opt.output_nc

    opt.print_freq = lcm(opt.print_freq, opt.batchSize)
    total_steps = (start_epoch - 1) * dataset_size + epoch_iter
    total_steps = total_steps // opt.print_freq * opt.print_freq

    ### real training starts here
    for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        for idx, data in enumerate(dataset, start=epoch_iter):
            if total_steps % opt.print_freq == 0:
                iter_start_time = time.time()
            total_steps += opt.batchSize
            epoch_iter += opt.batchSize

            # whether to collect output images
            save_fake = total_steps % opt.display_freq == 0

            _, n_frames_total, height, width = data['B'].size(
            )  # n_frames_total = n_frames_load * n_loadings + tG - 1
            n_frames_total = n_frames_total // opt.output_nc
            n_frames_load = opt.max_frames_per_gpu * n_gpus  # number of total frames loaded into GPU at a time for each batch
            n_frames_load = min(n_frames_load, n_frames_total - tG + 1)
            t_len = n_frames_load + tG - 1  # number of loaded frames plus previous frames

            fake_B_last = None  # the last generated frame from previous training batch (which becomes input to the next batch)
            real_B_all, fake_B_all, flow_ref_all, conf_ref_all = None, None, None, None  # all real/generated frames so far
            if opt.sparse_D:
                real_B_all, fake_B_all, flow_ref_all, conf_ref_all = [
                    None
                ] * t_scales, [None] * t_scales, [None] * t_scales, [
                    None
                ] * t_scales
            real_B_skipped, fake_B_skipped = [None] * t_scales, [
                None
            ] * t_scales  # temporally subsampled frames
            flow_ref_skipped, conf_ref_skipped = [None] * t_scales, [
                None
            ] * t_scales  # temporally subsampled flows

            for i in range(0, n_frames_total - t_len + 1, n_frames_load):
                # 5D tensor: batchSize, # of frames, # of channels, height, width
                input_A = Variable(
                    data['A'][:, i * input_nc:(i + t_len) * input_nc,
                              ...]).view(-1, t_len, input_nc, height, width)
                input_B = Variable(
                    data['B'][:, i * output_nc:(i + t_len) * output_nc,
                              ...]).view(-1, t_len, output_nc, height, width)
                inst_A = Variable(data['inst'][:, i:i + t_len, ...]).view(
                    -1, t_len, 1, height,
                    width) if len(data['inst'].size()) > 2 else None

                ###################################### Forward Pass ##########################
                ####### generator
                fake_B, fake_B_raw, flow, weight, real_A, real_Bp, fake_B_last = modelG(
                    input_A, input_B, inst_A, fake_B_last)

                if i == 0:
                    fake_B_first = fake_B[
                        0, 0]  # the first generated image in this sequence
                real_B_prev, real_B = real_Bp[:, :
                                              -1], real_Bp[:,
                                                           1:]  # the collection of previous and current real frames

                ####### discriminator
                ### individual frame discriminator
                flow_ref, conf_ref = flowNet(
                    real_B, real_B_prev)  # reference flows and confidences
                fake_B_prev = real_B_prev[:, 0:
                                          1] if fake_B_last is None else fake_B_last[
                                              0][:, -1:]
                if fake_B.size()[1] > 1:
                    fake_B_prev = torch.cat(
                        [fake_B_prev, fake_B[:, :-1].detach()], dim=1)

                losses = modelD(
                    0,
                    reshape([
                        real_B, fake_B, fake_B_raw, real_A, real_B_prev,
                        fake_B_prev, flow, weight, flow_ref, conf_ref
                    ]))
                losses = [
                    torch.mean(x) if x is not None else 0 for x in losses
                ]
                loss_dict = dict(zip(modelD.module.loss_names, losses))

                ### temporal discriminator
                loss_dict_T = []
                # get skipped frames for each temporal scale
                if t_scales > 0:
                    if opt.sparse_D:
                        real_B_all, real_B_skipped = get_skipped_frames_sparse(
                            real_B_all, real_B, t_scales, tD, n_frames_load, i)
                        fake_B_all, fake_B_skipped = get_skipped_frames_sparse(
                            fake_B_all, fake_B, t_scales, tD, n_frames_load, i)
                        flow_ref_all, flow_ref_skipped = get_skipped_frames_sparse(
                            flow_ref_all,
                            flow_ref,
                            t_scales,
                            tD,
                            n_frames_load,
                            i,
                            is_flow=True)
                        conf_ref_all, conf_ref_skipped = get_skipped_frames_sparse(
                            conf_ref_all,
                            conf_ref,
                            t_scales,
                            tD,
                            n_frames_load,
                            i,
                            is_flow=True)
                    else:
                        real_B_all, real_B_skipped = get_skipped_frames(
                            real_B_all, real_B, t_scales, tD)
                        fake_B_all, fake_B_skipped = get_skipped_frames(
                            fake_B_all, fake_B, t_scales, tD)
                        flow_ref_all, conf_ref_all, flow_ref_skipped, conf_ref_skipped = get_skipped_flows(
                            flowNet, flow_ref_all, conf_ref_all,
                            real_B_skipped, flow_ref, conf_ref, t_scales, tD)

                # run discriminator for each temporal scale
                for s in range(t_scales):
                    if real_B_skipped[s] is not None:
                        losses = modelD(s + 1, [
                            real_B_skipped[s], fake_B_skipped[s],
                            flow_ref_skipped[s], conf_ref_skipped[s]
                        ])
                        losses = [
                            torch.mean(x) if not isinstance(x, int) else x
                            for x in losses
                        ]
                        loss_dict_T.append(
                            dict(zip(modelD.module.loss_names_T, losses)))

                # collect losses
                loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
                loss_G = loss_dict['G_GAN'] + loss_dict[
                    'G_GAN_Feat'] + loss_dict['G_VGG']
                loss_G += loss_dict['G_Warp'] + loss_dict[
                    'F_Flow'] + loss_dict['F_Warp'] + loss_dict['W']
                if opt.add_face_disc:
                    loss_G += loss_dict['G_f_GAN'] + loss_dict['G_f_GAN_Feat']
                    loss_D += (loss_dict['D_f_fake'] +
                               loss_dict['D_f_real']) * 0.5

                # collect temporal losses
                loss_D_T = []
                t_scales_act = min(t_scales, len(loss_dict_T))
                for s in range(t_scales_act):
                    loss_G += loss_dict_T[s]['G_T_GAN'] + loss_dict_T[s][
                        'G_T_GAN_Feat'] + loss_dict_T[s]['G_T_Warp']
                    loss_D_T.append((loss_dict_T[s]['D_T_fake'] +
                                     loss_dict_T[s]['D_T_real']) * 0.5)

                ###################################### Backward Pass #################################
                optimizer_G = modelG.module.optimizer_G
                optimizer_D = modelD.module.optimizer_D
                # update generator weights
                optimizer_G.zero_grad()
                loss_G.backward()
                optimizer_G.step()

                # update discriminator weights
                # individual frame discriminator
                optimizer_D.zero_grad()
                loss_D.backward()
                optimizer_D.step()
                # temporal discriminator
                for s in range(t_scales_act):
                    optimizer_D_T = getattr(modelD.module,
                                            'optimizer_D_T' + str(s))
                    optimizer_D_T.zero_grad()
                    loss_D_T[s].backward()
                    optimizer_D_T.step()

            if opt.debug:
                call([
                    "nvidia-smi", "--format=csv",
                    "--query-gpu=memory.used,memory.free"
                ])

            ############## Display results and errors ##########
            ### print out errors
            if total_steps % opt.print_freq == 0:
                t = (time.time() - iter_start_time) / opt.print_freq
                errors = {
                    k: v.data.item() if not isinstance(v, int) else v
                    for k, v in loss_dict.items()
                }
                for s in range(len(loss_dict_T)):
                    errors.update({
                        k + str(s):
                        v.data.item() if not isinstance(v, int) else v
                        for k, v in loss_dict_T[s].items()
                    })
                visualizer.print_current_errors(epoch, epoch_iter, errors, t)
                visualizer.plot_current_errors(errors, total_steps)

            ### display output images
            if save_fake:
                if opt.label_nc != 0:
                    input_image = util.tensor2label(real_A[0, -1],
                                                    opt.label_nc)
                elif opt.dataset_mode == 'pose':
                    input_image = util.tensor2im(real_A[0, -1, :3])
                    if real_A.size()[2] == 6:
                        input_image2 = util.tensor2im(real_A[0, -1, 3:])
                        input_image[input_image2 != 0] = input_image2[
                            input_image2 != 0]
                else:
                    c = 3 if opt.input_nc == 3 else 1
                    input_image = util.tensor2im(real_A[0, -1, :c],
                                                 normalize=False)
                if opt.use_instance:
                    edges = util.tensor2im(real_A[0, -1, -1:, ...],
                                           normalize=False)
                    input_image += edges[:, :, np.newaxis]

                if opt.add_face_disc:
                    ys, ye, xs, xe = modelD.module.get_face_region(real_A[0,
                                                                          -1:])
                    if ys is not None:
                        input_image[ys, xs:xe, :] = input_image[
                            ye, xs:xe, :] = input_image[
                                ys:ye, xs, :] = input_image[ys:ye, xe, :] = 255

                visual_list = [
                    ('input_image', input_image),
                    ('fake_image', util.tensor2im(fake_B[0, -1])),
                    ('fake_first_image', util.tensor2im(fake_B_first)),
                    ('fake_raw_image', util.tensor2im(fake_B_raw[0, -1])),
                    ('real_image', util.tensor2im(real_B[0, -1])),
                    ('flow_ref', util.tensor2flow(flow_ref[0, -1])),
                    ('conf_ref',
                     util.tensor2im(conf_ref[0, -1], normalize=False))
                ]
                if flow is not None:
                    visual_list += [('flow', util.tensor2flow(flow[0, -1])),
                                    ('weight',
                                     util.tensor2im(weight[0, -1],
                                                    normalize=False))]
                visuals = OrderedDict(visual_list)
                visualizer.display_current_results(visuals, epoch, total_steps)

            ### save latest model
            if total_steps % opt.save_latest_freq == 0:
                visualizer.vis_print(
                    'saving the latest model (epoch %d, total_steps %d)' %
                    (epoch, total_steps))
                modelG.module.save('latest')
                modelD.module.save('latest')
                np.savetxt(iter_path, (epoch, epoch_iter),
                           delimiter=',',
                           fmt='%d')

            if epoch_iter > dataset_size - opt.batchSize:
                epoch_iter = 0
                break

        # end of epoch
        iter_end_time = time.time()
        visualizer.vis_print('End of epoch %d / %d \t Time Taken: %d sec' %
                             (epoch, opt.niter + opt.niter_decay,
                              time.time() - epoch_start_time))

        ### save model for this epoch
        if epoch % opt.save_epoch_freq == 0:
            visualizer.vis_print(
                'saving the model at the end of epoch %d, iters %d' %
                (epoch, total_steps))
            modelG.module.save('latest')
            modelD.module.save('latest')
            modelG.module.save(epoch)
            modelD.module.save(epoch)
            np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')

        ### linearly decay learning rate after certain iterations
        if epoch > opt.niter:
            modelG.module.update_learning_rate(epoch)
            modelD.module.update_learning_rate(epoch)

        ### gradually grow training sequence length
        if (epoch % opt.niter_step) == 0:
            data_loader.dataset.update_training_batch(epoch // opt.niter_step)
            modelG.module.update_training_batch(epoch // opt.niter_step)

        ### finetune all scales
        if (opt.n_scales_spatial > 1) and (opt.niter_fix_global != 0) and (
                epoch == opt.niter_fix_global):
            modelG.module.update_fixed_params()
Exemple #15
0
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
from util import html
from collections import OrderedDict

opt = TestOptions().parse()
opt.nThreads = 1   # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
# test
if opt.output_nc == 1:
    save_list = ['fake_B_postprocessed', 'fake_B_color']
else:
    save_list = ['fake_B']

for i, data in enumerate(dataset):
    if i >= opt.how_many:
        break
    model.set_input(data)
    model.test()
    visuals = model.get_current_visuals()
Exemple #16
0
def train(opt):
    iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')

    if opt.continue_train:
        if opt.which_epoch == 'latest':
            try:
                start_epoch, epoch_iter = np.loadtxt(iter_path,
                                                     delimiter=',',
                                                     dtype=int)
            except:
                start_epoch, epoch_iter = 1, 0
        else:
            start_epoch, epoch_iter = int(opt.which_epoch), 0

        print('Resuming from epoch %d at iteration %d' %
              (start_epoch, epoch_iter))
        for update_point in opt.decay_epochs:
            if start_epoch < update_point:
                break

            opt.lr *= opt.decay_gamma
    else:
        start_epoch, epoch_iter = 0, 0

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    dataset_size = len(data_loader)
    print('#training images = %d' % dataset_size)

    model = create_model(opt)
    visualizer = Visualizer(opt)

    total_steps = (start_epoch) * dataset_size + epoch_iter

    display_delta = total_steps % opt.display_freq
    print_delta = total_steps % opt.print_freq
    save_delta = total_steps % opt.save_latest_freq
    bSize = opt.batchSize

    #in case there's no display sample one image from each class to test after every epoch
    if opt.display_id == 0:
        dataset.dataset.set_sample_mode(True)
        dataset.num_workers = 1
        for i, data in enumerate(dataset):
            if i * opt.batchSize >= opt.numClasses:
                break
            if i == 0:
                sample_data = data
            else:
                for key, value in data.items():
                    if torch.is_tensor(data[key]):
                        sample_data[key] = torch.cat(
                            (sample_data[key], data[key]), 0)
                    else:
                        sample_data[key] = sample_data[key] + data[key]
        dataset.num_workers = opt.nThreads
        dataset.dataset.set_sample_mode(False)

    for epoch in range(start_epoch, opt.epochs):
        epoch_start_time = time.time()
        if epoch != start_epoch:
            epoch_iter = 0
        for i, data in enumerate(dataset, start=epoch_iter):
            iter_start_time = time.time()
            total_steps += opt.batchSize
            epoch_iter += opt.batchSize

            # whether to collect output images
            save_fake = (total_steps % opt.display_freq
                         == display_delta) and (opt.display_id > 0)

            ############## Network Pass ########################
            model.set_inputs(data)
            disc_losses = model.update_D()
            gen_losses, gen_in, gen_out, rec_out, cyc_out = model.update_G(
                infer=save_fake)
            loss_dict = dict(gen_losses, **disc_losses)
            ##################################################

            ############## Display results and errors ##########
            ### print out errors
            if total_steps % opt.print_freq == print_delta:
                errors = {
                    k: v.item()
                    if not (isinstance(v, float) or isinstance(v, int)) else v
                    for k, v in loss_dict.items()
                }
                t = (time.time() - iter_start_time) / opt.batchSize
                visualizer.print_current_errors(epoch + 1, epoch_iter, errors,
                                                t)
                if opt.display_id > 0:
                    visualizer.plot_current_errors(
                        epoch,
                        float(epoch_iter) / dataset_size, opt, errors)

            ### display output images
            if save_fake and opt.display_id > 0:
                class_a_suffix = ' class {}'.format(data['A_class'][0])
                class_b_suffix = ' class {}'.format(data['B_class'][0])
                classes = None

                visuals = OrderedDict()
                visuals_A = OrderedDict([('real image' + class_a_suffix,
                                          util.tensor2im(gen_in.data[0]))])
                visuals_B = OrderedDict([('real image' + class_b_suffix,
                                          util.tensor2im(gen_in.data[bSize]))])

                A_out_vis = OrderedDict([('synthesized image' + class_b_suffix,
                                          util.tensor2im(gen_out.data[0]))])
                B_out_vis = OrderedDict([('synthesized image' + class_a_suffix,
                                          util.tensor2im(gen_out.data[bSize]))
                                         ])
                if opt.lambda_rec > 0:
                    A_out_vis.update([('reconstructed image' + class_a_suffix,
                                       util.tensor2im(rec_out.data[0]))])
                    B_out_vis.update([('reconstructed image' + class_b_suffix,
                                       util.tensor2im(rec_out.data[bSize]))])
                if opt.lambda_cyc > 0:
                    A_out_vis.update([('cycled image' + class_a_suffix,
                                       util.tensor2im(cyc_out.data[0]))])
                    B_out_vis.update([('cycled image' + class_b_suffix,
                                       util.tensor2im(cyc_out.data[bSize]))])

                visuals_A.update(A_out_vis)
                visuals_B.update(B_out_vis)
                visuals.update(visuals_A)
                visuals.update(visuals_B)

                ncols = len(visuals_A)
                visualizer.display_current_results(visuals, epoch, classes,
                                                   ncols)

            ### save latest model
            if total_steps % opt.save_latest_freq == save_delta:
                print('saving the latest model (epoch %d, total_steps %d)' %
                      (epoch + 1, total_steps))
                model.save('latest')
                np.savetxt(iter_path, (epoch, epoch_iter),
                           delimiter=',',
                           fmt='%d')
                if opt.display_id == 0:
                    model.eval()
                    visuals = model.inference(sample_data)
                    visualizer.save_matrix_image(visuals, 'latest')
                    model.train()

        # end of epoch
        iter_end_time = time.time()
        print('End of epoch %d / %d \t Time Taken: %d sec' %
              (epoch + 1, opt.epochs, time.time() - epoch_start_time))

        ### save model for this epoch
        if (epoch + 1) % opt.save_epoch_freq == 0:
            print('saving the model at the end of epoch %d, iters %d' %
                  (epoch + 1, total_steps))
            model.save('latest')
            model.save(epoch + 1)
            np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')
            if opt.display_id == 0:
                model.eval()
                visuals = model.inference(sample_data)
                visualizer.save_matrix_image(visuals, epoch + 1)
                model.train()

        ### multiply learning rate by opt.decay_gamma after certain iterations
        if (epoch + 1) in opt.decay_epochs:
            model.update_learning_rate()
Exemple #17
0
import os
from options.test_options import TestOptions
from data.data_loader import DataLoader
from models.combogan_model import ComboGANModel
from util.visualizer import Visualizer
from util import html


opt = TestOptions().parse()
opt.nThreads = 1   # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.no_flip = True  # no flip

dataset = DataLoader(opt)
model = ComboGANModel(opt)
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%d' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %d' % (opt.name, opt.phase, opt.which_epoch))
# store images for matrix visualization
vis_buffer = []

# test
for i, data in enumerate(dataset):
    if i >= opt.how_many:
        break
    model.set_input(data)
    model.test()
    visuals = model.get_current_visuals(testing=True)
    img_path = model.get_image_paths()
    print('process image... %s' % img_path)
model = dm.DistModel()
model.initialize(model=opt.model,
                 net=opt.net,
                 use_gpu=opt.use_gpu,
                 is_train=True)

# load data from all training sets
data_loader = dl.CreateDataLoader(opt.datasets,
                                  dataset_mode='2afc',
                                  batch_size=opt.batch_size,
                                  serial_batches=False)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
D = len(dataset)
print('Loading %i instances from' % dataset_size, opt.datasets)
visualizer = Visualizer(opt)

total_steps = 0
fid = open(os.path.join(opt.checkpoints_dir, opt.name, 'train_log.txt'), 'w+')
for epoch in range(1, opt.nepoch + opt.nepoch_decay + 1):
    epoch_start_time = time.time()
    for i, data in enumerate(dataset):
        iter_start_time = time.time()
        total_steps += opt.batch_size
        epoch_iter = total_steps - dataset_size * (epoch - 1)

        model.set_input(data)
        model.optimize_parameters()

        if total_steps % opt.display_freq == 0:
            visualizer.display_current_results(model.get_current_visuals(),
Exemple #19
0
opt.print_freq = lcm(opt.print_freq, opt.batchSize)    
if opt.debug:
    opt.display_freq = 1
    opt.print_freq = 1
    opt.niter = 1
    opt.niter_decay = 0
    opt.max_dataset_size = 10

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)

model = create_model(opt)
visualizer = Visualizer(opt)
if opt.fp16:    
    from apex import amp
    if(opt.niter_fix_global > 0):
        model, [optimizer_G, optimizer_D] = amp.initialize(model, [model.optimizer_G_sap, model.optimizer_D], opt_level='O1')   
    else:
        model, [optimizer_G, optimizer_D] = amp.initialize(model, [model.optimizer_G, model.optimizer_D], opt_level='O1')           
    model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
else:
    if(opt.niter_fix_global > 0):
        optimizer_G, optimizer_D = model.module.optimizer_G_sap, model.module.optimizer_D
    else:
        optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D
    if(opt.race):
        optimizer_R = model.module.optimizer_R
Exemple #20
0
import time
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer

opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)

model = create_model(opt)
visualizer = Visualizer(opt)
total_steps = 0

for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
    epoch_start_time = time.time()
    epoch_iter = 0

    for i, data in enumerate(dataset):
        iter_start_time = time.time()
        visualizer.reset()
        total_steps += opt.batchSize
        epoch_iter += opt.batchSize
        model.set_input(data)
        model.optimize_parameters()

        if total_steps % opt.display_freq == 0:
            save_result = total_steps % opt.update_html_freq == 0
            visualizer.display_current_results(model.get_current_visuals(),
Exemple #21
0
# import torch.backends.cudnn as cudnn
import time
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer

opt = TrainOptions().parse()  # set CUDA_VISIBLE_DEVICES before import torch
dataset = create_dataset(opt)
dataset_size = len(dataset)
print('#training data = %d' % dataset_size)
model = create_model(opt)
model.setup(opt)
visualizer = Visualizer(opt)
total_steps = 0

for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
    epoch_start_time = time.time()
    save_result = True
    iter_data_time = time.time()

    for i, data in enumerate(dataset):
        iter_start_time = time.time()
        total_steps += opt.batch_size
        epoch_iter = total_steps - dataset_size * (epoch - opt.epoch_count)
        model.set_input(data)
        if model.skip():
            continue
        model.update_G()
        model.update_D()
        model.check_nan_inf()
Exemple #22
0
class TrainModel():
    def name(self):
        return 'Train Model'

    def initialize(self, opt):
        self.opt = opt
        self.opt.imageSize = self.opt.imageSize if len(self.opt.imageSize) == 2 else self.opt.imageSize * 2
        self.gpu_ids = ''
        self.batchSize = self.opt.batchSize
        self.checkpoints_path = os.path.join(self.opt.checkpoints,
                                             self.opt.name)
        self.scheduler = None
        self.create_save_folders()

        # criterion to evaluate the val split
        self.criterion_eval = MSEScaledError()
        self.mse_scaled_error = MSEScaledError()

        self.opt.print_freq = self.opt.display_freq

        # self.input = self.get_variable(torch.FloatTensor(self.batchSize, opt.input_nc, self.opt.imageSize[0], self.opt.imageSize[1]))
        # self.target = self.get_variable(torch.FloatTensor(self.batchSize, opt.output_nc, self.opt.imageSize[0], self.opt.imageSize[1]))
        # if opt.use_semantics:
        #     self.target_sem = self.get_variable(torch.LongTensor(self.batchSize, opt.output_nc, self.opt.imageSize[0], self.opt.imageSize[1]))
        # self.logfile = # ToDo

        # visualizer
        self.visualizer = Visualizer(opt)

        if self.opt.resume and self.opt.display_id > 0:
            self.load_plot_data()
        elif opt.train:
            self.start_epoch = 1
            self.best_val_error = 999.9
        # self.print_save_options()

        # Logfile
        self.logfile = open(os.path.join(self.checkpoints_path,
                                         'logfile.txt'), 'a')
        if opt.validate:
            self.logfile_val = open(os.path.join(self.checkpoints_path,
                                                 'logfile_val.txt'), 'a')

        # Prepare a random seed that will be the same for everyone
        # opt.manualSeed = random.randint(1, 10000)   # fix seed
        # print("Random Seed: ", opt.manualSeed)
        # # random.seed(opt.manualSeed)
        # torch.manual_seed(opt.manualSeed)

        self.random_seed = 123
        random.seed(self.random_seed)
        torch.cuda.manual_seed_all(self.random_seed)
        torch.manual_seed(self.random_seed)
        if opt.cuda:
            self.cuda = torch.device('cuda:0') # set externally. ToDo: set internally
            torch.cuda.manual_seed(self.random_seed)

        # uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms.
        cudnn.benchmark = self.opt.use_cudnn_benchmark  # using too much memory - use when not in astroboy
        cudnn.enabled = True

        if not opt.train and not opt.test and not opt.resume:
            raise Exception("You have to set --train or --test")

        if torch.cuda.is_available and not opt.cuda:
            print("WARNING: You have a CUDA device, so you should run WITHOUT --cpu")
        if not torch.cuda.is_available and opt.cuda:
            raise Exception("No GPU found, run WITH --cpu")

    def set_input(self, input):
        self.input = input

    def create_network(self):
        netG = networks.define_G(input_nc=self.opt.input_nc,
                                 output_nc=self.opt.output_nc, ngf=64,
                                 net_architecture=self.opt.net_architecture,
                                 opt=self.opt,
                                 gpu_ids='')

        if self.opt.cuda:
            netG = netG.cuda()
        return netG

    def get_optimizerG(self, network, lr, weight_decay=0.0):
        generator_params = filter(lambda p: p.requires_grad, network.parameters())
        return optim.Adam(generator_params, lr=lr, betas=(self.opt.beta1, 0.999), weight_decay=weight_decay)

    def get_checkpoint(self, epoch):
        pass

    def train_batch(self):
        """Each method has a different implementation"""
        pass

    def display_gradients_norms(self):
        return 'nothing yet'

    def get_current_errors_display(self):
        pass

    def get_regression_criterion(self):
        if self.opt.regression_loss == 'L1':
            return nn.L1Loss()

    def get_variable(self, tensor, requires_grad=False):
        if self.opt.cuda:
            tensor = tensor.cuda()
        return Variable(tensor, requires_grad=requires_grad)

    def restart_variables(self):
        self.it = 0
        self.rmse = 0
        self.n_images = 0

    def train(self, data_loader, val_loader=None):
        self.data_loader = data_loader
        self.len_data_loader = len(self.data_loader)    # check if gonna use elsewhere
        self.total_iter = 0
        for epoch in range(self.start_epoch, self.opt.nEpochs):
            self.restart_variables()
            self.data_iter = iter(self.data_loader)
            # self.pbar = tqdm(range(self.len_data_loader))
            self.pbar = range(self.len_data_loader)
            # while self.it < self.len_data_loader:
            for self.it in self.pbar:
                if self.opt.optim == 'SGD':
                    self.scheduler.step()

                self.total_iter += self.opt.batchSize
                
                self.netG.train(True)

                iter_start_time = time.time()

                self.train_batch()

                d_time = (time.time() - iter_start_time) / self.opt.batchSize

                # print errors
                self.print_current_errors(epoch, d_time)

                # display errors
                self.display_current_results(epoch)

                # Validate
                self.evaluate(val_loader, epoch)

            # save checkpoint
            self.save_checkpoint(epoch, is_best=0)

        self.logfile.close()

        if self.opt.validate:
            self.logfile_val.close()

    def get_next_batch(self):
        # self.it += 1 # important for GANs
        rgb_cpu, depth_cpu = self.data_iter.next()
        # depth_cpu = depth_cpu[0]
        self.input.data.resize_(rgb_cpu.size()).copy_(rgb_cpu)
        # self.target.data.resize_(depth_cpu.size()).copy_(depth_cpu)

    def apply_valid_pixels_mask(self, *data, value=0.0):
        # self.nomask_outG = data[0].data   # for displaying purposes
        mask = (data[1].data > value).to(self.cuda, dtype=torch.float32)
        
        masked_data = []
        for d in data:
            masked_data.append(d * mask)

        return masked_data, mask.sum()

    def update_learning_rate(self, epoch):
        if epoch > self.opt.niter_decay and self.opt.use_cgan:  # but independs if conditional or not
            # Linear decay for discriminator
            [self.opt.d_lr, self.optimD] = self._update_learning_rate(self.opt.niter_decay, self.opt.d_lr, self.optimD)
            [self.opt.lr, self.optimG] = self._update_learning_rate(self.opt.niter_decay, self.opt.lr, self.optimG)

    def _update_learning_rate(self, niter_decay, old_lr, optim):
        lr = old_lr - old_lr / niter_decay
        for param_group in optim.param_groups:
            param_group['lr'] = lr
        return lr, optim

    # CONTROL FUNCTIONS OF THE ARCHITECTURE

    def _get_plot_data_filename(self, phase):
        return os.path.join(self.checkpoints_path,
                            'plot_data' + ('' if phase == 'train' else '_' + phase) + '.p')

    def save_static_plot_image():
        return None

    def save_interactive_plot_image():
        return None

    def _save_plot_data(self, plot_data, filename):
        # save
        pickle.dump(plot_data, open(filename, 'wb'))

    def save_plot_data(self):
        self._save_plot_data(self.visualizer.plot_data,
                             self._get_plot_data_filename('train'))
        if self.opt.validate and self.total_iter > self.opt.val_freq:
            self._save_plot_data(self.visualizer.plot_data_val,
                                 self._get_plot_data_filename('val'))

    def _load_plot_data(self, filename):
        # verify if file exists
        if not os.path.isfile(filename):
            raise Exception('In _load_plot_data file {} doesnt exist.'.format(filename))
        else:
            return pickle.load(open(filename, "rb"))

    def load_plot_data(self):
        self.visualizer.plot_data = self._load_plot_data(self._get_plot_data_filename('train'))
        if self.opt.validate:
            self.visualizer.plot_data_val = self._load_plot_data(self._get_plot_data_filename('val'))

    def save_checkpoint(self, epoch, is_best):
        if epoch % self.opt.save_checkpoint_freq == 0 or is_best:
            checkpoint = self.get_checkpoint(epoch)
            checkpoint_filename = '{}/{:04}.pth.tar'.format(self.checkpoints_path, epoch)
            self._save_checkpoint(checkpoint, is_best=is_best, filename=checkpoint_filename)    # standart is_best=0 here cause we didn' evaluate on validation data
            # save plot data as well

    def _save_checkpoint(self, state, is_best, filename):
        print("Saving checkpoint...")
        # uncomment next 2 lines if we still want per epoch
        torch.save(state, filename)
        shutil.copyfile(filename, os.path.join(os.path.dirname(filename), 'latest.pth.tar'))

        # comment next 2 lines if necessary if using last two lines
        # filename = os.path.join(self.checkpoints_path, 'latest.pth.tar')
        # torch.save(state, os.path.join(self.checkpoints_path, 'latest.pth.tar'))

        if is_best:
            shutil.copyfile(filename, os.path.join(self.checkpoints_path, 'best.pth.tar'))

    def create_save_folders(self):
        if self.opt.train:
            os.system('mkdir -p {0}'.format(self.checkpoints_path))
        # if self.opt.save_samples:
        #     subfolders = ['input', 'target', 'results', 'output']
        #     self.save_samples_path = os.path.join('results/train_results/', self.opt.name)
        #     for subfolder in subfolders:
        #         path = os.path.join(self.save_samples_path, subfolder)
        #         os.system('mkdir -p {0}'.format(path))
        #     if self.opt.test:
        #         self.save_samples_path = os.path.join('results/test_results/', self.opt.name)
        #         self.save_samples_path = os.path.join(self.save_samples_path, self.opt.epoch)
        #         for subfolder in subfolders:
        #             path = os.path.join(self.save_samples_path, subfolder)
        #             os.system('mkdir -p {0}'.format(path))

    def print_save_options(self):
        options_file = open(os.path.join(self.checkpoints_path,
                                         'options.txt'), 'w')
        args = dict((arg, getattr(self.opt, arg)) for arg in dir(self.opt) if not arg.startswith('_'))
        print('---Options---')
        for k, v in sorted(args.items()):
            option = '{}: {}'.format(k, v)
            # print options
            print(option)
            # save options in file
            options_file.write(option + '\n')

        options_file.close()

    def mean_errors(self):
        pass

    def get_current_errors(self):
        pass

    def print_current_errors(self, epoch, d_time):
        if self.total_iter % self.opt.print_freq == 0:
            self.mean_errors()
            errors = self.get_current_errors()
            message = self.visualizer.print_errors(errors, epoch, self.it,
                                            self.len_data_loader, d_time)

            # self.pbar.set_description(message)
            print(message)
        # self.pbar.refresh()

    # def print_epoch_error(error):
    #     pass

    def get_current_visuals(self):
        pass

    def display_current_results(self, epoch):
        if self.opt.display_id > 0 and self.total_iter % self.opt.display_freq == 0:

            errors = self.get_current_errors_display()
            self.visualizer.display_errors(errors, epoch,
                                           float(self.it) / self.len_data_loader)

            visuals = self.get_current_visuals()

            self.visualizer.display_images(visuals, epoch)

            # save printed errors to logfile
            self.visualizer.save_errors_file(self.logfile)

    def evaluate(self, data_loader, epoch):
        if self.opt.validate and self.total_iter % self.opt.val_freq == 0:
            val_error = self.get_eval_error(data_loader, self.netG,
                                            self.criterion_eval, epoch)

            # errors = OrderedDict([('LossL1', self.e_reg if self.opt.reg_type == 'L1' else self.L1error),
            #                      ('ValError', val_error.item())])
            errors = OrderedDict([('RMSE', self.rmse_epoch), ('RMSEVal', val_error)])
            self.visualizer.display_errors(errors, epoch, float(self.it) / self.len_data_loader, phase='val')
            message = self.visualizer.print_errors(errors, epoch, self.it, len(data_loader), 0)
            print('[Validation] ' + message)
            self.visualizer.save_errors_file(self.logfile_val)
            self.save_plot_data()
            # save best models
            is_best = self.best_val_error > val_error
            if is_best:     # and not self.opt.not_save_val_model:
                print("Updating BEST model (epoch {}, iters {})\n".format(epoch, self.total_iter))
                self.best_val_error = val_error
                self.save_checkpoint(epoch, is_best)

    def get_eval_error(self, val_loader, model, criterion, epoch):
        """
        Validate every self.opt.val_freq epochs
        """
        # no need to switch to model.eval because we want to keep dropout layers. Do I gave to ignore batch norm layers?
        cumulated_rmse = 0
        batchSize = 1
        input = self.get_variable(torch.FloatTensor(batchSize, 3, self.opt.imageSize[0], self.opt.imageSize[1]), requires_grad=False)
        mask = self.get_variable(torch.FloatTensor(batchSize, 1, self.opt.imageSize[0], self.opt.imageSize[1]), requires_grad=False)
        target = self.get_variable(torch.FloatTensor(batchSize, 1, self.opt.imageSize[0], self.opt.imageSize[1]))
        # model.eval()
        model.train(False)
        pbar_val = tqdm(val_loader)
        for i, (rgb_cpu, depth_cpu) in enumerate(pbar_val):
            pbar_val.set_description('[Validation]')
            input.data.resize_(rgb_cpu.size()).copy_(rgb_cpu)
            target.data.resize_(depth_cpu.size()).copy_(depth_cpu)

            if self.opt.use_padding:
                from torch.nn import ReflectionPad2d

                self.opt.padding = self.get_padding_image(input)

                input = ReflectionPad2d(self.opt.padding)(input)
                target = ReflectionPad2d(self.opt.padding)(target)

            # get output of the network
            with torch.no_grad():
                outG = model.forward(input)
            # apply mask
            nomask_outG = outG.data   # for displaying purposes
            mask_ByteTensor = self.get_mask(target.data)
            mask.data.resize_(mask_ByteTensor.size()).copy_(mask_ByteTensor)
            outG = outG * mask
            target = target * mask
            cumulated_rmse += sqrt(criterion(outG, target, mask, no_mask=False))

            if(i == 1):
                self.visualizer.display_images(OrderedDict([('input', input.data),
                                                            ('gt', target.data),
                                                            ('output', nomask_outG)]), epoch='val {}'.format(epoch), phase='val')

        return cumulated_rmse / len(val_loader)

    def get_mask(self, data, value=0.0):
        return (target.data > 0.0)

    def get_padding(self, dim):
        final_dim = (dim // 32 + 1) * 32
        return final_dim - dim

    def get_padding_image(self, img):
        # get tensor dimensions
        h, w = img.size()[2:]
        w_pad, h_pad = self.get_padding(w), self.get_padding(h)

        pwr = w_pad // 2
        pwl = w_pad - pwr
        phb = h_pad // 2
        phu = h_pad - phb

        # pwl, pwr, phu, phb
        return (pwl, pwr, phu, phb)

    def adjust_learning_rate(self, initial_lr, optimizer, epoch):
        """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
        lr = initial_lr * (0.1 ** (epoch // self.opt.niter_decay))
        if epoch % self.opt.niter_decay == 0:
            print("LEARNING RATE DECAY HERE: lr = {}".format(lr))
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
Exemple #23
0
opt.print_freq = lcm(opt.print_freq, opt.batchSize)
if opt.debug:
    opt.display_freq = 1
    opt.print_freq = 1
    opt.niter = 1
    opt.niter_decay = 0
    opt.max_dataset_size = 10

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)

model = create_model(opt)
visualizer = Visualizer(opt)
if opt.fp16:
    from apex import amp
    model, [optimizer_G, optimizer_D
            ] = amp.initialize(model, [model.optimizer_G, model.optimizer_D],
                               opt_level='O1')
    model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
else:
    optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D

total_steps = (start_epoch - 1) * dataset_size + epoch_iter

display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
Exemple #24
0
    def initialize(self, opt):
        self.opt = opt
        self.opt.imageSize = self.opt.imageSize if len(self.opt.imageSize) == 2 else self.opt.imageSize * 2
        self.gpu_ids = ''
        self.batchSize = self.opt.batchSize
        self.checkpoints_path = os.path.join(self.opt.checkpoints,
                                             self.opt.name)
        self.scheduler = None
        self.create_save_folders()

        # criterion to evaluate the val split
        self.criterion_eval = MSEScaledError()
        self.mse_scaled_error = MSEScaledError()

        self.opt.print_freq = self.opt.display_freq

        # self.input = self.get_variable(torch.FloatTensor(self.batchSize, opt.input_nc, self.opt.imageSize[0], self.opt.imageSize[1]))
        # self.target = self.get_variable(torch.FloatTensor(self.batchSize, opt.output_nc, self.opt.imageSize[0], self.opt.imageSize[1]))
        # if opt.use_semantics:
        #     self.target_sem = self.get_variable(torch.LongTensor(self.batchSize, opt.output_nc, self.opt.imageSize[0], self.opt.imageSize[1]))
        # self.logfile = # ToDo

        # visualizer
        self.visualizer = Visualizer(opt)

        if self.opt.resume and self.opt.display_id > 0:
            self.load_plot_data()
        elif opt.train:
            self.start_epoch = 1
            self.best_val_error = 999.9
        # self.print_save_options()

        # Logfile
        self.logfile = open(os.path.join(self.checkpoints_path,
                                         'logfile.txt'), 'a')
        if opt.validate:
            self.logfile_val = open(os.path.join(self.checkpoints_path,
                                                 'logfile_val.txt'), 'a')

        # Prepare a random seed that will be the same for everyone
        # opt.manualSeed = random.randint(1, 10000)   # fix seed
        # print("Random Seed: ", opt.manualSeed)
        # # random.seed(opt.manualSeed)
        # torch.manual_seed(opt.manualSeed)

        self.random_seed = 123
        random.seed(self.random_seed)
        torch.cuda.manual_seed_all(self.random_seed)
        torch.manual_seed(self.random_seed)
        if opt.cuda:
            self.cuda = torch.device('cuda:0') # set externally. ToDo: set internally
            torch.cuda.manual_seed(self.random_seed)

        # uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms.
        cudnn.benchmark = self.opt.use_cudnn_benchmark  # using too much memory - use when not in astroboy
        cudnn.enabled = True

        if not opt.train and not opt.test and not opt.resume:
            raise Exception("You have to set --train or --test")

        if torch.cuda.is_available and not opt.cuda:
            print("WARNING: You have a CUDA device, so you should run WITHOUT --cpu")
        if not torch.cuda.is_available and opt.cuda:
            raise Exception("No GPU found, run WITH --cpu")
Exemple #25
0
import util.util as util
from util.visualizer import Visualizer
from util import html
import torch

torch.backends.cudnn.benchmark = True

opt = TestOptions().parse(save=False)
opt.nThreads = 1  # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
visualizer = Visualizer(opt)

model = create_model(opt)
if opt.verbose:
    print(model)

for i, data in enumerate(dataset):
    if opt.max_test_num > 0:
        if i >= opt.max_test_num:
            break

    generated = model.inference(data['ID_RGB'], data['ID'], data['Image'])

    generated_image = util.tensor2im(generated.data[0])

    img_path = data['path'][0]
Exemple #26
0
    lr_policy = 'lambda'
    lr_decay_iters = 50

    train_dataset = TrainDataset()
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=4)

    dataset_size = len(train_dataset)
    print('#training images = %d' % dataset_size)

    model = StyleModel(args)
    print("model [%s] was created" % (model.name))

    visualizer = Visualizer(display_id, display_port, args)
    total_steps = 0

    for epoch in range(epoch_count, niter + niter_decay + 1):
        epoch_start_time = time.time()
        iter_data_time = time.time()
        epoch_iter = 0

        for i, data in enumerate(train_dataloader):
            print('epoch %d, data %d' % (epoch, i * batch_size))
            iter_start_time = time.time()

            # for 15 steps, print loss, save images, display images, display losses
            if total_steps % print_freq == 0:
                t_data = iter_start_time - iter_data_time
            visualizer.reset()
Exemple #27
0
import time
from options.train_options import TrainOptions
opt = TrainOptions().parse()  # set CUDA_VISIBLE_DEVICES before import torch

from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer

# Load data
data_loader = CreateDataLoader(opt)
dataset_paired, paired_dataset_size = data_loader.load_data_pair()
dataset_unpaired, unpaired_dataset_size = data_loader.load_data_unpair()

# Create Model
model = create_model(opt)
visualizer = Visualizer(opt)

# Start Training
print('Start training')

#################################################
# Step1: Autoencoder
#################################################
print('step 1')
pre_epoch_AE = 5  # number of iteration for autoencoder pre-training
total_steps = 0
for epoch in range(1, pre_epoch_AE + 1):
    for i, (images_a, images_b) in enumerate(dataset_paired):
        iter_start_time = time.time()
        total_steps += opt.batchSize
        epoch_iter = total_steps - unpaired_dataset_size * (epoch - 1)
Exemple #28
0
	test_opt.batch_size = 1
	test_opt.num_threads = 1
	test_opt.serial_batches = True
	test_opt.no_flip = True

	test_data_loader = torch.utils.data.DataLoader(test_dataset,
        batch_size=test_opt.batch_size, shuffle=False, num_workers=test_opt.num_threads, pin_memory=True)

	train_dataset_size = len(train_data_loader)
	print('#training images = %d' % train_dataset_size)
	test_dataset_size = len(test_data_loader)
	print('#test images = %d' % test_dataset_size)

	model = create_model(train_opt, train_dataset)
	model.setup(train_opt)
	visualizer = Visualizer(train_opt)
	total_steps = 0
	for epoch in range(train_opt.epoch_count, train_opt.niter + 1):
		model.train()
		epoch_start_time = time.time()
		iter_data_time = time.time()
		epoch_iter = 0
		model.init_eval()
		iterator = iter(train_data_loader)
		while True:
			try:  # Some images couldn't sample more than defined nP points under Stereo sampling
				next_batch = next(iterator)
			except IndexError:
				print("Catch and Skip!")
				continue
			except StopIteration:
Exemple #29
0
from collections import OrderedDict

import data
from options.test_options import TestOptions
from models.label_pix2pix_model import LabelPix2PixModel
from util.visualizer import Visualizer
from util import html

opt = TestOptions().parse()

dataloader = data.create_dataloader(opt)

model = LabelPix2PixModel(opt)
model.eval()

visualizer = Visualizer(opt)

# create a webpage that summarizes the all results
web_dir = os.path.join(opt.results_dir, opt.name,
                       '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(
    web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
    (opt.name, opt.phase, opt.which_epoch))

# test
for i, data_i in enumerate(dataloader):
    if i * opt.batchSize >= opt.how_many:
        break

    generated = model(data_i, mode='inference')
Exemple #30
0
import time
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer


if __name__ == '__main__':
    opt = TrainOptions().parse()   # get training options
    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    dataset_size = len(dataset)    # get the number of images in the dataset.
    print('The number of training images = %d' % dataset_size)

    model = create_model(opt)      # create a model given opt.model and other options
    model.setup(opt)               # regular setup: load and print networks; create schedulers
    visualizer = Visualizer(opt)   # create a visualizer that display/save images and plots
    total_iters = 0                # the total number of training iterations

    if opt.display_networks:
        data=next(iter(dataset))
        for path in model.save_networks_img(data):
            visualizer.display_img(path+'.png')

    for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1):    # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
        epoch_start_time = time.time()  # timer for entire epoch
        iter_data_time = time.time()    # timer for data loading per iteration
        epoch_iter = 0                  # the number of training iterations in current epoch, reset to 0 every epoch
        visualizer.reset()              # reset the visualizer: make sure it saves the results to HTML at least once every epoch
        

        for i, data in enumerate(dataset):  # inner loop within one epoch
Exemple #31
0
from options.train_options import TrainOptions
opt = TrainOptions().parse()  # set CUDA_VISIBLE_DEVICES before import torch

from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer


# Load data
data_loader = CreateDataLoader(opt)
dataset_paired, paired_dataset_size = data_loader.load_data_pair()
dataset_unpaired, unpaired_dataset_size = data_loader.load_data_unpair()

# Create Model
model = create_model(opt)
visualizer = Visualizer(opt)

# Start Training
print('Start training')

#################################################
# Step1: Autoencoder
#################################################
print('step 1')
pre_epoch_AE = 5 # number of iteration for autoencoder pre-training
total_steps = 0
for epoch in range(1, pre_epoch_AE+1):
    for i,(images_a, images_b) in enumerate(dataset_paired):
        iter_start_time = time.time()
        total_steps += opt.batchSize
        epoch_iter = total_steps - unpaired_dataset_size * (epoch - 1)
Exemple #32
0

opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip
opt.dataroot = opt.testroot
opt.file_list = opt.test_file_list
avi2pngs(opt)
opt.dataroot = opt.dataroot + '/split'
testing_data_loader = CreateDataLoader(opt)
test_dataset = testing_data_loader.load_data()
test_dataset_size = len(testing_data_loader)
print('#testing samples = %d' % test_dataset_size)

model = create_model(opt)
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
# test

total_steps = 0
opt.batchSize = 32
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
    epoch_start_time = time.time()
    epoch_iter = 0
    for i, data in enumerate(train_dataset):
        iter_start_time = time.time()
        total_steps += opt.batchSize
        epoch_iter += opt.batchSize
        model.set_input(data)
Exemple #33
0
import time
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer

if __name__ == '__main__':
    opt = TrainOptions().parse()
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    dataset_size = len(data_loader)
    print('#training images = %d' % dataset_size)

    model = create_model(opt)
    model.setup(opt)
    visualizer = Visualizer(opt)
    total_steps = 0

    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        iter_data_time = time.time()
        epoch_iter = 0

        for i, data in enumerate(dataset):
            iter_start_time = time.time()
            if total_steps % opt.print_freq == 0:
                t_data = iter_start_time - iter_data_time
            visualizer.reset()
            total_steps += opt.batch_size
            epoch_iter += opt.batch_size
            model.set_input(data)
Exemple #34
0
import time
import os
from options.test_options import TestOptions
from data.data_loader import DataLoader
from models.combogan_model import ComboGANModel
from util.visualizer import Visualizer
from util import html

opt = TestOptions().parse()
opt.nThreads = 1  # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1

dataset = DataLoader(opt)
model = ComboGANModel(opt)
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name,
                       '%s_%d' % (opt.phase, opt.which_epoch))
webpage = html.HTML(
    web_dir, 'Experiment = %s, Phase = %s, Epoch = %d' %
    (opt.name, opt.phase, opt.which_epoch))
# store images for matrix visualization
vis_buffer = []

# test
for i, data in enumerate(dataset):
    if not opt.serial_test and i >= opt.how_many:
        break
    model.set_input(data)
    model.test()
    visuals = model.get_current_visuals(testing=True)
Exemple #35
0
import time
from options.train_options import TrainOptions
from data.data_loader import DataLoader
from models.combogan_model import ComboGANModel
from util.visualizer import Visualizer


opt = TrainOptions().parse()

dataset = DataLoader(opt)
print('# training images = %d' % len(dataset))
model = ComboGANModel(opt)
visualizer = Visualizer(opt)
total_steps = 0

for epoch in range(opt.which_epoch + 1, opt.niter + opt.niter_decay + 1):
    epoch_start_time = time.time()
    epoch_iter = 0
    for i, data in enumerate(dataset):
        iter_start_time = time.time()
        total_steps += opt.batchSize
        epoch_iter += opt.batchSize
        model.set_input(data)
        model.optimize_parameters()

        if total_steps % opt.display_freq == 0:
            visualizer.display_current_results(model.get_current_visuals(), epoch)

        if total_steps % opt.print_freq == 0:
            errors = model.get_current_errors()
            t = (time.time() - iter_start_time) / opt.batchSize
    start_epoch, epoch_iter = 1, 0

if opt.debug:
    opt.display_freq = 1
    opt.print_freq = 1
    opt.niter = 1
    opt.niter_decay = 0
    opt.max_dataset_size = 10

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)

model = create_model(opt)
visualizer = Visualizer(opt)

total_steps = (start_epoch-1) * dataset_size + epoch_iter

display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq

if __name__=='__main__':
    for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        if epoch != start_epoch:
            epoch_iter = epoch_iter % dataset_size

        for i, data in enumerate(dataset, start=epoch_iter):
            iter_start_time = time.time()
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
from util import html

opt = TestOptions().parse()
opt.nThreads = 1   # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
# test
for i, data in enumerate(dataset):
    if i >= opt.how_many:
        break
    model.set_input(data)
    model.test()
    visuals = model.get_current_visuals()
    img_path = model.get_image_paths()
    print('process image... %s' % img_path)
    visualizer.save_images(webpage, visuals, img_path)

webpage.save()
    from Dataloader.Test_load_video import Test_VideoFolder
elif opt.test_type == 'audio':
    import Test_Gen_Models.Test_Audio_Model as Gen_Model
    from Dataloader.Test_load_audio import Test_VideoFolder
else:
    raise('test type select error')

opt.nThreads = 1   # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.sequence_length = 1
test_nums = [1, 2, 3, 4]  # choose input identity images

model = Gen_Model.GenModel(opt)
# _, _, start_epoch = util.load_test_checkpoint(opt.test_resume_path, model)
start_epoch = opt.start_epoch
visualizer = Visualizer(opt)
# find the checkpoint's path name without the 'checkpoint.pth.tar'
path_name = ntpath.basename(opt.test_resume_path)[:-19]
web_dir = os.path.join(opt.results_dir, path_name, '%s_%s' % ('test', start_epoch))
for i in test_nums:
    A_path = os.path.join(opt.test_A_path, '/test_sample' + str(i) + '.jpg')
    test_folder = Test_VideoFolder(root=opt.test_root, A_path=A_path, config=opt)
    test_dataloader = DataLoader(test_folder, batch_size=1,
                                shuffle=False, num_workers=1)
    model, _, start_epoch = util.load_test_checkpoint(opt.test_resume_path, model)

    # inference during test

    for i2, data in enumerate(test_dataloader):
        if i2 < 5:
            model.set_test_input(data)