def main(master_opt, launcher): trainers = [] all_networks = {} shared_networks = [] if launcher != 'none': train.init_dist('nccl') for i, sub_opt in enumerate(master_opt['trainer_options']): sub_opt_parsed = option.parse(sub_opt, is_train=True) trainer = train.Trainer() #### distributed training settings if launcher == 'none': # disabled distributed training sub_opt_parsed['dist'] = False trainer.rank = -1 print('Disabled distributed training.') else: sub_opt_parsed['dist'] = True trainer.world_size = torch.distributed.get_world_size() trainer.rank = torch.distributed.get_rank() trainer.init(sub_opt_parsed, launcher, all_networks) train_gen = trainer.create_training_generator(i) model = next(train_gen) for k, v in model.networks.items(): if k in all_networks.keys() and k not in shared_networks: shared_networks.append(k) all_networks[k] = v.module trainers.append(train_gen) print("Networks being shared by trainers: ", shared_networks) # Now, simply "iterate" through the trainers to accomplish training. while True: for trainer in trainers: next(trainer)
def main(): #### options parser = argparse.ArgumentParser() parser.add_argument( '-opt', type=str, help='Path to option YAML file.', default='../../options/train_prog_mi1_rrdb_6bypass.yml') parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() opt = option.parse(args.opt, is_train=True) #### distributed training settings opt['dist'] = False rank = -1 # convert to NoneDict, which returns None for missing keys opt = option.dict_to_nonedict(opt) #### random seed seed = opt['train']['manual_seed'] if seed is None: seed = random.randint(1, 10000) util.set_random_seed(seed) torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True #### create train and val dataloader for phase, dataset_opt in opt['datasets'].items(): if phase == 'train': train_set = create_dataset(dataset_opt) train_size = int( math.ceil(len(train_set) / dataset_opt['batch_size'])) total_iters = int(opt['train']['niter']) total_epochs = int(math.ceil(total_iters / train_size)) dataset_opt[ 'n_workers'] = 0 # Force num_workers=0 to make dataloader work in process. train_loader = create_dataloader(train_set, dataset_opt, opt, None) if rank <= 0: print('Number of train images: {:,d}, iters: {:,d}'.format( len(train_set), train_size)) assert train_loader is not None ''' tq_ldr = tqdm(train_set.get_paths()) for path in tq_ldr: try: _ = io.imread(path) # Do stuff with img except Exception as e: print("Error with %s" % (path,)) print(e) ''' tq_ldr = tqdm(train_set) for ds in tq_ldr: pass
def test_test_real(): args = parse() print(args) dataset = SepeDataset(args.poses_train,args.images_train,coor_layer_flag =False) dataloader = DataLoader(dataset, batch_size=10,shuffle=False ,num_workers=1,drop_last=True) dvo_feature_extractor = DVOFeature() dvo_regressor = DVORegression() dvo_discriminator = Discriminator(500,500,2) dvo_feature_extractor.load_state_dict(torch.load(('feature_ntsd_2_10.pt'))) dvo_regressor.load_state_dict(torch.load('regressor_seed_ntsd_2_10.pt')) test(dvo_feature_extractor,dvo_regressor,dataloader,args)
def test_train_real(): args = parse() print(args) dataset = SepeDataset(args.poses_train,args.images_train,coor_layer_flag =False) dataloader = DataLoader(dataset, batch_size=3,shuffle=True ,num_workers=1,drop_last=True,worker_init_fn=lambda wid:np.random.seed(np.uint32(torch.initial_seed() + wid))) dvo_feature_extractor = DVOFeature() dvo_regressor = DVORegression() dvo_discriminator = Discriminator(500,500,2) trained_feature,trained_regressor = train(dvo_feature_extractor,dvo_regressor,dataloader,args) torch.save(trained_feature.state_dict(),'feature_'+args.tag+str(args.epoch)+'.pt') torch.save(trained_regressor.state_dict(),'regressor_'+args.tag+str(args.epoch)+'.pt')
def test_train(): args = parse() print(args) motion_ax_i = [int(i) for i in args.motion_ax.split(' ')] dataset = RandomDataset(20000,motion_ax = motion_ax_i) dataloader = DataLoader(dataset, batch_size=1000,shuffle=False ,num_workers=1,drop_last=True,worker_init_fn=lambda wid:np.random.seed(np.uint32(torch.initial_seed() + wid))) dvo_feature_extractor = DVOFeature() dvo_regressor = DVORegression() dvo_discriminator = Discriminator(500,500,2) trained_feature,trained_regressor = train(dvo_feature_extractor,dvo_regressor,dataloader,args) torch.save(trained_feature.state_dict(),'feature_seed'+args.motion_ax.replace(' ','')+str(args.epoch)+'.pt') torch.save(trained_regressor.state_dict(),'regressor_seed'+args.motion_ax.replace(' ','')+str(args.epoch)+'.pt')
def test_test(): args = parse() print(args) motion_ax_i = [int(i) for i in args.motion_ax.split(' ')] test_motion_ax_i = [int(i) for i in args.test_motion_ax.split(' ')] dataset = RandomDataset(2,motion_ax = test_motion_ax_i) dataloader = DataLoader(dataset, batch_size=1,shuffle=False ,num_workers=1,drop_last=True) dvo_feature_extractor = DVOFeature() dvo_regressor = DVORegression() dvo_discriminator = Discriminator(500,500,2) dvo_feature_extractor.load_state_dict(torch.load('feature'+args.motion_ax.replace(' ','')+'.pt')) dvo_regressor.load_state_dict(torch.load('regressor'+args.motion_ax.replace(' ','')+'.pt')) test(dvo_feature_extractor,dvo_regressor,dataloader,args)
def test_adapt(): args = parse() print(args) dataset = SepeDataset(args.poses_train,args.images_train,coor_layer_flag =False) dataloader = DataLoader(dataset, batch_size=1,shuffle=True ,num_workers=1,drop_last=True,worker_init_fn=lambda wid:np.random.seed(np.uint32(torch.initial_seed() + wid))) dataset_tgt = SepeDataset(args.poses_target,args.images_target,coor_layer_flag =False) dataloader_tgt = DataLoader(dataset_tgt, batch_size=1,shuffle=True ,num_workers=1,drop_last=True,worker_init_fn=lambda wid:np.random.seed(np.uint32(torch.initial_seed() + wid))) src_extractor = DVOFeature() tgt_extractor = DVOFeature() src_extractor.load_state_dict(torch.load(args.feature_model)) tgt_extractor.load_state_dict(torch.load(args.feature_model)) dvo_discriminator = Discriminator(500,500,2) adapt(src_extractor,tgt_extractor,dvo_discriminator,dataloader,dataloader_tgt,args) torch.save(tgt_extractor.state_dict(),'tgt_feature_'+args.tag+str(args.epoch)+'.pt') torch.save(dvo_discriminator.state_dict(),'dis_'+args.tag+str(args.epoch)+'.pt')
def test_data(): args = parse() print(args) motion_ax_i = [float(i) for i in args.motion_ax.split(' ')] dataset = RandomDataset(10,motion_path='dataset/random/motion.txt',motion_ax=motion_ax_i,camera_parameter=[640,480,320,320,320,240]) dataloader = DataLoader(dataset, batch_size=1,shuffle=False ,num_workers=1,drop_last=True) for i_item in range(len(dataset)): img_1,img_2,depth,motion = dataset.show_item(i_item) print(motion) name_pre = 'result/saved_img/'+args.tag+str(np.sum(motion)) cv2.imwrite(name_pre+'img_1.png',255*img_1) cv2.imwrite(name_pre+'img_2.png',255*img_2) cv2.imwrite(name_pre+'depth_1.png',255*depth/100) cv2.imshow('img_1',img_1) cv2.imshow('depth',depth/100) cv2.imshow('img_2',img_2) cv2.waitKey()
def __init__(self, cfg): self.cfg = cfg opt = option.parse(cfg, is_train=False) opt = option.dict_to_nonedict(opt) utils.util.loaded_options = opt util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key)) util.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') logger.info(option.dict2str(opt)) #### Create test dataset and dataloader dataset_opt = list(opt['datasets'].values())[0] # Remove labeling features from the dataset config and wrappers. if 'dataset' in dataset_opt.keys(): if 'labeler' in dataset_opt['dataset'].keys(): dataset_opt['dataset']['includes_labels'] = False del dataset_opt['dataset']['labeler'] test_set = create_dataset(dataset_opt) if hasattr(test_set, 'wrapped_dataset'): test_set = test_set.wrapped_dataset else: test_set = create_dataset(dataset_opt) logger.info('Number of test images: {:d}'.format(len(test_set))) self.test_loader = create_dataloader(test_set, dataset_opt, opt) self.model = ExtensibleTrainer(opt) self.gen = self.model.netsG['generator'] self.dataset_dir = osp.join(opt['path']['results_root'], opt['name']) util.mkdir(self.dataset_dir)
# Stopped using this because PILs animated gif output is total crap. #images[0].save(output_file, save_all=True, append_images=images[1:], duration=80, loop=0) if __name__ == "__main__": #### options torch.backends.cudnn.benchmark = True srg_analyze = False parser = argparse.ArgumentParser() parser.add_argument( '-opt', type=str, help='Path to options YAML file.', default='../options/train_exd_imgsetext_srflow_bigboi_ganbase.yml') #parser.add_argument('-opt', type=str, help='Path to options YAML file.', default='../../options/train_exd_imgsetext_srflow_bigboi_frompsnr.yml') opt = option.parse(parser.parse_args().opt, is_train=False) opt = option.dict_to_nonedict(opt) utils.util.loaded_options = opt util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key)) util.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') logger.info(option.dict2str(opt))
def get_model_for_opt_file(filename): opt = option.parse(filename, is_train=True) opt = option.dict_to_nonedict(opt) model = create_model(opt) return model, opt
from data import create_dataloader, create_dataset import math from tqdm import tqdm from utils.fdpl_util import dct_2d, extract_patches_2d import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from utils.colors import rgb2ycbcr import torch.nn.functional as F input_config = "../../options/train_imgset_pixgan_srg4_fdpl.yml" output_file = "fdpr_diff_means.pt" device = 'cuda' patch_size = 128 if __name__ == '__main__': opt = option.parse(input_config, is_train=True) opt['dist'] = False # Create a dataset to load from (this dataset loads HR/LR images and performs any distortions specified by the YML. dataset_opt = opt['datasets']['train'] train_set = create_dataset(dataset_opt) train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size'])) total_iters = int(opt['train']['niter']) total_epochs = int(math.ceil(total_iters / train_size)) train_loader = create_dataloader(train_set, dataset_opt, opt, None) print('Number of train images: {:,d}, iters: {:,d}'.format( len(train_set), train_size)) # calculate the perceptual weights master_diff = np.zeros((patch_size, patch_size)) num_patches = 0
self.do_step(train_data) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_tacotron2_lj.yml') parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() opt = option.parse(args.opt, is_train=True) if args.launcher != 'none': # export CUDA_VISIBLE_DEVICES for running in distributed mode. if 'gpu_ids' in opt.keys(): gpu_list = ','.join(str(x) for x in opt['gpu_ids']) os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list print('export CUDA_VISIBLE_DEVICES=' + gpu_list) trainer = Trainer() #### distributed training settings if args.launcher == 'none': # disabled distributed training opt['dist'] = False trainer.rank = -1 if len(opt['gpu_ids']) == 1: torch.cuda.set_device(opt['gpu_ids'][0]) print('Disabled distributed training.')