コード例 #1
0
    def make_temporal_model(self):
        opt = self.opt
        self.temporal = True
        self.netG.set_flow_prev()
        self.netG.cuda()

        if opt.isTrain:
            self.lossCollector.tD = min(opt.n_frames_D, opt.n_frames_G)  
            if opt.finetune_all:      
                params = list(self.netG.parameters())
            else:
                train_names = ['flow_network_temp']
                if opt.spade_combine: 
                    train_names += ['img_warp_embedding', 'mlp_gamma3', 'mlp_beta3']
                params, _ = self.get_train_params(self.netG, train_names) 
                    
            if self.refine_face: params += list(self.netGf.parameters())
            self.optimizer_G = self.get_optimizer(params, for_discriminator=False)
            
            # temporal discriminator
            self.netDT = networks.define_D(opt, opt.output_nc * self.lossCollector.tD, opt.ndf, opt.n_layers_D, opt.norm_D, 'n_layers',
                                           1, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
            # optimizer D            
            params = list(self.netD.parameters()) + list(self.netDT.parameters())
            if self.add_face_D: params += list(self.netDf.parameters())
            self.optimizer_D = self.get_optimizer(params, for_discriminator=True)           

            print('---------- Now start training multiple frames -------------')
コード例 #2
0
 def vis_print(opt, message):
     print(message)
     if opt.isTrain and not opt.debug:
         log_name = os.path.join(opt.checkpoints_dir, opt.name,
                                 'loss_log.txt')
         with open(log_name, "a") as log_file:
             log_file.write('%s\n' % message)
コード例 #3
0
    def initialize(self, opt):
        self.opt = opt
        self.pose_type = opt.pose_type

        root = opt.dataroot
        if opt.isTrain:
            self.img_paths = sorted(
                make_grouped_dataset(path.join(root, 'train_images')))
            self.op_paths = sorted(
                make_grouped_dataset(path.join(root, 'train_openpose')))
            self.dp_paths = sorted(
                make_grouped_dataset(path.join(root, 'train_densepose')))
        else:
            self.img_paths = sorted(make_dataset(opt.seq_path))
            self.op_paths = sorted(
                make_dataset(opt.seq_path.replace('images', 'openpose')))
            self.dp_paths = sorted(
                make_dataset(opt.seq_path.replace('images', 'densepose')))

            self.ref_img_paths = sorted(make_dataset(opt.ref_img_path))
            self.ref_op_paths = sorted(
                make_dataset(opt.ref_img_path.replace('images', 'openpose')))
            self.ref_dp_paths = sorted(
                make_dataset(opt.ref_img_path.replace('images', 'densepose')))

        self.n_of_seqs = len(self.img_paths)  # number of sequences to train
        if opt.isTrain: print('%d sequences' % self.n_of_seqs)
        self.crop_coords = self.ref_face_pts = None
        self.ref_crop_coords = [None] * opt.n_shot
コード例 #4
0
def create_model(opt, epoch=0):
    model = Vid2VidModel()
    model.initialize(opt, epoch)
    print("model [%s] was created" % (model.name()))

    if opt.isTrain:
        if opt.amp != 'O0':
            from apex import amp
            print('using amp optimization')
            model, optimizers = amp.initialize(
                model, [model.optimizer_G, model.optimizer_D],
                opt_level=opt.amp,
                num_losses=2)
        else:
            optimizers = model.optimizer_G, model.optimizer_D

        model = WrapModel(opt, model)
        flowNet = None
        if not opt.no_flow_gt:
            from .flownet import FlowNet
            flowNet = FlowNet()
            flowNet.initialize(opt)
            flowNet = WrapModel(opt, flowNet)
        return model, flowNet, optimizers
    return model
コード例 #5
0
    def initialize(self, opt):
        self.opt = opt
        root = opt.dataroot

        if opt.isTrain:            
            self.L_paths = sorted(make_grouped_dataset(path.join(root, 'train_keypoints'))) 
            self.I_paths = sorted(make_grouped_dataset(path.join(root, 'train_images')))
            check_path_valid(self.L_paths, self.I_paths)
        else:
            self.L_paths = sorted(make_dataset(opt.seq_path.replace('images', 'keypoints')))
            self.I_paths = sorted(make_dataset(opt.seq_path))

            self.ref_L_paths = sorted(make_dataset(opt.ref_img_path.replace('images', 'keypoints')))
            self.ref_I_paths = sorted(make_dataset(opt.ref_img_path))

        self.n_of_seqs = len(self.I_paths)                         # number of sequences to train 
        if opt.isTrain: print('%d sequences' % self.n_of_seqs)        

        # mapping from keypoints to face part 
        self.add_upper_face = not opt.no_upper_face
        self.part_list = [[list(range(0, 17)) + ((list(range(68, 83)) + [0]) if self.add_upper_face else [])], # face
                     [range(17, 22)],                                  # right eyebrow
                     [range(22, 27)],                                  # left eyebrow
                     [[28, 31], range(31, 36), [35, 28]],              # nose
                     [[36,37,38,39], [39,40,41,36]],                   # right eye
                     [[42,43,44,45], [45,46,47,42]],                   # left eye
                     [range(48, 55), [54,55,56,57,58,59,48], range(60, 65), [64,65,66,67,60]], # mouth and tongue
                    ]        
        self.ref_dist_x, self.ref_dist_y = [None] * 83, [None] * 83
        self.dist_scale_x, self.dist_scale_y = [None] * 83, [None] * 83        
        self.fix_crop_pos = True
コード例 #6
0
    def __init__(self, opt):
        # self.opt = opt
        self.tf_log = opt.tf_log
        self.use_visdom = opt.use_visdom
        self.use_html = opt.isTrain and not opt.no_html
        self.win_size = opt.display_winsize #* opt.aspect_ratio
        self.name = opt.name
        if self.tf_log:
            import tensorflow as tf
            self.tf = tf
            self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs')
            self.writer = tf.summary.FileWriter(self.log_dir)

        if self.use_visdom:
            import visdom
            self.vis = visdom.Visdom()
            self.visdom_id = opt.visdom_id

        if self.use_html:
            self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
            self.img_dir = os.path.join(self.web_dir, 'images')
            print('create web directory %s...' % self.web_dir)
            util.mkdirs([self.web_dir, self.img_dir])
        if opt.isTrain:
            if hasattr(opt, 'model_idx') and opt.model_idx != -1:
                self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log_%03d.txt' % opt.model_idx)
            else:
                self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
            with open(self.log_name, "a") as log_file:
                now = time.strftime("%c")
                log_file.write('================ Training Loss (%s) ================\n' % now)
コード例 #7
0
 def update_training_batch(self, ratio):
     # update the training sequence length to be longer
     seq_len_max = 30
     if self.n_frames_total < seq_len_max:
         self.n_frames_total = min(seq_len_max,
                                   self.opt.n_frames_total * (2**ratio))
         print('--- Updating training sequence length to %d ---' %
               self.n_frames_total)
コード例 #8
0
    def print_current_errors(self, epoch, i, errors, t):
        message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
        for k, v in errors.items():
            if v != 0:
                message += '%s: %.3f ' % (k, v)

        print(message)
        with open(self.log_name, "a") as log_file:
            log_file.write('%s\n' % message)
コード例 #9
0
    def initialize(self, opt):
        self.opt = opt
        self.pose_type = opt.pose_type

        root = opt.dataroot
        if opt.isTrain:
            self.img_paths = sorted(
                make_grouped_dataset(path.join(root, 'train_images')))
            self.op_paths = sorted(
                make_grouped_dataset(path.join(root, 'train_openpose')))
            self.dp_paths = sorted(
                make_grouped_dataset(path.join(root, 'train_densepose')))
            self.ppl_indices = None
            if path.exists(path.join(root, 'all_subsequences.json')):
                with open(path.join(root, 'all_subsequences.json')) as f:
                    all_subsequences = json.loads(f.read())
                seq_indices = all_subsequences['seq_indices']
                start_frame_indices = all_subsequences['start_frame_indices']
                end_frame_indices = all_subsequences['end_frame_indices']
                img_paths, op_paths, dp_paths = [], [], []
                for i in range(len(seq_indices)):
                    seq_idx = seq_indices[i]
                    start_frame_idx, end_frame_idx = start_frame_indices[
                        i], end_frame_indices[i]
                    img_paths += [
                        self.img_paths[seq_idx][start_frame_idx:end_frame_idx]
                    ]
                    op_paths += [
                        self.op_paths[seq_idx][start_frame_idx:end_frame_idx]
                    ]
                    dp_paths += [
                        self.dp_paths[seq_idx][start_frame_idx:end_frame_idx]
                    ]
                self.img_paths = img_paths
                self.op_paths = op_paths
                self.dp_paths = dp_paths
                self.ppl_indices = all_subsequences['ppl_indices']
        else:
            self.img_paths = sorted(make_dataset(opt.seq_path))
            self.op_paths = sorted(
                make_dataset(opt.seq_path.replace('images', 'openpose')))
            self.dp_paths = sorted(
                make_dataset(opt.seq_path.replace('images', 'densepose')))

            self.ref_img_paths = sorted(make_dataset(opt.ref_img_path))
            self.ref_op_paths = sorted(
                make_dataset(opt.ref_img_path.replace('images', 'openpose')))
            self.ref_dp_paths = sorted(
                make_dataset(opt.ref_img_path.replace('images', 'densepose')))

        self.n_of_seqs = len(self.img_paths)  # number of sequences to train
        if opt.isTrain: print('%d sequences' % self.n_of_seqs)
        self.crop_coords = self.ref_face_pts = None
        self.ref_crop_coords = [None] * opt.n_shot
コード例 #10
0
 def print_options(self, opt):
     message = ''
     message += '----------------- Options ---------------\n'
     for k, v in sorted(vars(opt).items()):
         comment = ''
         default = self.parser.get_default(k)
         if v != default:
             comment = '\t[default: %s]' % str(default)
         message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
     message += '----------------- End -------------------'
     print(message)
コード例 #11
0
 def update_learning_rate(self, epoch):
     new_lr = self.opt.lr * (1 - (epoch - self.opt.niter) / (self.opt.niter_decay + 1))
     if self.opt.no_TTUR:            
         G_lr, D_lr = new_lr, new_lr
     else:                            
         G_lr, D_lr = new_lr/2, new_lr*2
     
     for param_group in self.optimizer_D.param_groups:
         param_group['lr'] = D_lr
     for param_group in self.optimizer_G.param_groups:
         param_group['lr'] = G_lr
     print('update learning rate: %f -> %f' % (self.old_lr, new_lr))
     self.old_lr = new_lr
コード例 #12
0
    def define_networks(self, start_epoch):
        opt = self.opt        
        # Generator network        
        input_nc = opt.label_nc if (opt.label_nc != 0 and not self.pose) else opt.input_nc
        netG_input_nc = input_nc           
        opt.for_face = False        
        self.netG = networks.define_G(opt)        
        if self.refine_face:            
            opt_face = copy.deepcopy(opt)
            opt_face.n_downsample_G -= 1
            if opt_face.n_adaptive_layers > 0: opt_face.n_adaptive_layers -= 1
            opt_face.input_nc = opt.output_nc
            opt_face.fineSize = self.faceRefiner.face_size
            opt_face.aspect_ratio = 1
            opt_face.for_face = True
            self.netGf = networks.define_G(opt_face)

        # Discriminator network
        if self.isTrain or opt.finetune:            
            netD_input_nc = input_nc + opt.output_nc + (1 if self.concat_fg_mask_for_D else 0)
            if self.concat_ref_for_D:
                netD_input_nc *= 2
            self.netD = networks.define_D(opt, netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm_D, opt.netD_subarch, 
                                          opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)            
            if self.add_face_D:
                self.netDf = networks.define_D(opt, opt.output_nc * 2, opt.ndf, opt.n_layers_D, opt.norm_D, 'n_layers',
                                               1, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
            else:
                self.netDf = None
        self.temporal = False
        self.netDT = None             
                    
        print('---------- Networks initialized -------------')

        # initialize optimizers
        if self.isTrain:            
            # optimizer G
            params = list(self.netG.parameters())           
            if self.refine_face: params += list(self.netGf.parameters())
            self.optimizer_G = self.get_optimizer(params, for_discriminator=False)

            # optimizer D            
            params = list(self.netD.parameters())
            if self.add_face_D: params += list(self.netDf.parameters())
            self.optimizer_D = self.get_optimizer(params, for_discriminator=True)           

        print('---------- Optimizers initialized -------------')

        # make model temporal by generating multiple frames
        if (not opt.isTrain or start_epoch > opt.niter_single) and opt.n_frames_G > 1:
            self.make_temporal_model() 
コード例 #13
0
ファイル: models.py プロジェクト: wpc2018/few-shot-vid2vid
def create_model(opt, epoch=0):
    model = Vid2VidModel()
    model.initialize(opt, epoch)
    print("model [%s] was created" % (model.name()))

    if opt.isTrain:
        model = WrapModel(opt, model)
        flowNet = None
        if not opt.no_flow_gt:
            from .flownet import FlowNet
            flowNet = FlowNet()
            flowNet.initialize(opt)
            flowNet = WrapModel(opt, flowNet)
        return model, flowNet
    return model
コード例 #14
0
 def load_networks(self):
     opt = self.opt
     if not self.isTrain or opt.continue_train or opt.load_pretrain:
         pretrained_path = '' if not self.isTrain or opt.continue_train else opt.load_pretrain
         self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)
         if opt.transfer_initial:
             print('loading for encoder')
             self.netG.trans_init_G()
         if self.temporal and opt.warp_ref and not self.netG.flow_temp_is_initalized:
             self.netG.set_flow_prev()
         if (self.isTrain and not opt.load_pretrain) or opt.finetune:
             self.load_network(self.netD, 'D', opt.which_epoch,
                               pretrained_path)
             if self.isTrain and self.temporal:
                 self.load_network(self.netDT, 'DT', opt.which_epoch,
                                   pretrained_path)
コード例 #15
0
 def get_train_params(self, netG, train_names):
     train_list = set()
     params = []
     params_dict = netG.state_dict()
     for key, value in params_dict.items():
         do_train = False
         for model_name in train_names:
             if model_name in key: do_train = True
         if do_train:
             module = netG
             key_list = key.split('.')
             for k in key_list:
                 module = getattr(module, k)
             params += [module]
             train_list.add('.'.join(key_list[:1]))
     print('training layers: ', train_list)
     return params, train_list
コード例 #16
0
ファイル: trainer.py プロジェクト: yuzhou164/few-shot-vid2vid
    def __init__(self, opt, data_loader):
        iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
        start_epoch, epoch_iter = 1, 0
        ### if continue training, recover previous states
        if opt.continue_train:        
            if os.path.exists(iter_path):
                start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int)        
            print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter))                                                      
                
        print_freq = lcm(opt.print_freq, opt.batchSize)
        total_steps = (start_epoch-1) * len(data_loader) + epoch_iter
        total_steps = total_steps // print_freq * print_freq  

        self.opt = opt
        self.epoch_iter, self.print_freq, self.total_steps, self.iter_path = epoch_iter, print_freq, total_steps, iter_path
        self.start_epoch, self.epoch_iter = start_epoch, epoch_iter
        self.dataset_size = len(data_loader)
        self.visualizer = Visualizer(opt)        
コード例 #17
0
def find_dataset_using_name(dataset_name):
    # Given the option --dataset [datasetname],
    # the file "datasets/datasetname_dataset.py"
    # will be imported.
    dataset_filename = "data." + dataset_name + "_dataset"
    datasetlib = importlib.import_module(dataset_filename)

    # In the file, the class called DatasetNameDataset() will
    # be instantiated. It has to be a subclass of BaseDataset,
    # and it is case-insensitive.
    dataset = None
    target_dataset_name = dataset_name.replace('_', '') + 'dataset'
    for name, cls in datasetlib.__dict__.items():
        if name.lower() == target_dataset_name.lower() \
           and issubclass(cls, BaseDataset):
            dataset = cls

    if dataset is None:
        print(
            "In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase."
            % (dataset_filename, target_dataset_name))
        exit(0)

    return dataset
コード例 #18
0
 def __init__(self, root, write_cache=False):
     self.root = os.path.expanduser(root)
     self.env = lmdb.open(root, max_readers=126, readonly=True, lock=False,
                          readahead=False, meminit=False)
     with self.env.begin(write=False) as txn:
         self.length = txn.stat()['entries']
     print('LMDB file at %s opened.' % root)
     cache_file = os.path.join(root, '_cache_')
     if os.path.isfile(cache_file):
         self.keys = pickle.load(open(cache_file, "rb"))
     elif write_cache:
         print('generating keys')
         with self.env.begin(write=False) as txn:
             self.keys = [key for key, _ in txn.cursor()]
         pickle.dump(self.keys, open(cache_file, "wb"))
         print('cache file generated at %s' % cache_file)
     else:
         self.keys = []
コード例 #19
0
def train():
    opt = TrainOptions().parse()

    if opt.distributed:
        init_dist()
        print('batch size per GPU: %d' % opt.batchSize)
    torch.backends.cudnn.benchmark = True

    ### setup dataset
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    pose = 'pose' in opt.dataset_mode

    ### setup trainer
    trainer = Trainer(opt, data_loader)

    ### setup models
    model, flowNet, [optimizer_G,
                     optimizer_D] = create_model(opt, trainer.start_epoch)
    flow_gt = conf_gt = [None] * 2

    for epoch in range(trainer.start_epoch, opt.niter + opt.niter_decay + 1):
        if opt.distributed:
            dataset.sampler.set_epoch(epoch)
        trainer.start_of_epoch(epoch, model, data_loader)
        n_frames_total, n_frames_load = data_loader.dataset.n_frames_total, opt.n_frames_per_gpu
        for idx, data in enumerate(dataset, start=trainer.epoch_iter):
            data = trainer.start_of_iter(data)

            if not opt.no_flow_gt:
                data_list = [
                    data['tgt_label'], data['ref_label']
                ] if pose else [data['tgt_image'], data['ref_image']]
                flow_gt, conf_gt = flowNet(data_list, epoch)
            data_list = [
                data['tgt_label'], data['tgt_image'], flow_gt, conf_gt
            ]
            data_ref_list = [data['ref_label'], data['ref_image']]
            data_prev = [None, None, None]

            ############## Forward Pass ######################
            for t in range(0, n_frames_total, n_frames_load):
                data_list_t = get_data_t(data_list, n_frames_load,
                                         t) + data_ref_list + data_prev

                d_losses = model(data_list_t, mode='discriminator')
                d_losses = loss_backward(opt, d_losses, optimizer_D, 1)

                g_losses, generated, data_prev = model(
                    data_list_t, save_images=trainer.save, mode='generator')
                g_losses = loss_backward(opt, g_losses, optimizer_G, 0)

            loss_dict = dict(
                zip(model.module.lossCollector.loss_names,
                    g_losses + d_losses))

            if trainer.end_of_iter(loss_dict,
                                   generated + data_list + data_ref_list,
                                   model):
                break
        trainer.end_of_epoch(model)
コード例 #20
0
def create_dataset(opt):
    dataset = find_dataset_using_name(opt.dataset_mode)
    instance = dataset()
    instance.initialize(opt)
    print("dataset [%s] was created" % (instance.name()))
    return instance
コード例 #21
0
def CreateDataLoader(opt):
    from data.custom_dataset_data_loader import CustomDatasetDataLoader
    data_loader = CustomDatasetDataLoader()
    print(data_loader.name())
    data_loader.initialize(opt)
    return data_loader