def __init__(self, cfg):
        self.cfg = cfg

        opt = option.parse(cfg, is_train=False)
        opt = option.dict_to_nonedict(opt)
        utils.util.loaded_options = opt

        util.mkdirs((path for key, path in opt['path'].items()
                     if not key == 'experiments_root'
                     and 'pretrain_model' not in key and 'resume' not in key))
        util.setup_logger('base',
                          opt['path']['log'],
                          'test_' + opt['name'],
                          level=logging.INFO,
                          screen=True,
                          tofile=True)
        logger = logging.getLogger('base')
        logger.info(option.dict2str(opt))

        #### Create test dataset and dataloader
        dataset_opt = list(opt['datasets'].values())[0]
        # Remove labeling features from the dataset config and wrappers.
        if 'dataset' in dataset_opt.keys():
            if 'labeler' in dataset_opt['dataset'].keys():
                dataset_opt['dataset']['includes_labels'] = False
                del dataset_opt['dataset']['labeler']
            test_set = create_dataset(dataset_opt)
            if hasattr(test_set, 'wrapped_dataset'):
                test_set = test_set.wrapped_dataset
        else:
            test_set = create_dataset(dataset_opt)
        logger.info('Number of test images: {:d}'.format(len(test_set)))
        self.test_loader = create_dataloader(test_set, dataset_opt, opt)
        self.model = ExtensibleTrainer(opt)
        self.gen = self.model.netsG['generator']
        self.dataset_dir = osp.join(opt['path']['results_root'], opt['name'])
        util.mkdir(self.dataset_dir)
Пример #2
0
    #parser.add_argument('-opt', type=str, help='Path to options YAML file.', default='../../options/train_exd_imgsetext_srflow_bigboi_frompsnr.yml')
    opt = option.parse(parser.parse_args().opt, is_train=False)
    opt = option.dict_to_nonedict(opt)
    utils.util.loaded_options = opt

    util.mkdirs((path for key, path in opt['path'].items()
                 if not key == 'experiments_root'
                 and 'pretrain_model' not in key and 'resume' not in key))
    util.setup_logger('base',
                      opt['path']['log'],
                      'test_' + opt['name'],
                      level=logging.INFO,
                      screen=True,
                      tofile=True)
    logger = logging.getLogger('base')
    logger.info(option.dict2str(opt))

    model = ExtensibleTrainer(opt)
    gen = model.networks['generator']
    gen.eval()

    mode = "feed_through"  # temperature | restore | latent_transfer | feed_through
    #imgs_to_resample_pattern = "F:\\4k6k\\datasets\\ns_images\\adrianna\\val2\\lr\\*"
    imgs_to_resample_pattern = "F:\\4k6k\\datasets\\ns_images\\adrianna\\analyze\\analyze_xx\\*"
    #imgs_to_resample_pattern = "F:\\4k6k\\datasets\\ns_images\\imagesets\\images-half\\*lanette*"
    scale = 2
    resample_factor = 2  # When != 1, the HR image is upsampled by this factor using a bicubic to get the local latents. E.g. set this to '2' to get 2x upsampling.
    temperature = 1
    output_path = "..\\..\\results\\latent_playground"

    # Data types <- used to perform latent transfer.
Пример #3
0
    def init(self, opt, launcher, all_networks={}):
        self._profile = False
        self.val_compute_psnr = opt_get(opt, ['eval', 'compute_psnr'], False)
        self.val_compute_fea = opt_get(opt, ['eval', 'compute_fea'], False)

        #### loading resume state if exists
        if opt['path'].get('resume_state', None):
            # distributed resuming: all load into default GPU
            device_id = torch.cuda.current_device()
            resume_state = torch.load(
                opt['path']['resume_state'],
                map_location=lambda storage, loc: storage.cuda(device_id))
            option.check_resume(opt,
                                resume_state['iter'])  # check resume options
        else:
            resume_state = None

        #### mkdir and loggers
        if self.rank <= 0:  # normal training (self.rank -1) OR distributed training (self.rank 0)
            if resume_state is None:
                util.mkdir_and_rename(
                    opt['path']
                    ['experiments_root'])  # rename experiment folder if exists
                util.mkdirs(
                    (path for key, path in opt['path'].items()
                     if not key == 'experiments_root' and path is not None
                     and 'pretrain_model' not in key and 'resume' not in key))

            # config loggers. Before it, the log will not work
            util.setup_logger('base',
                              opt['path']['log'],
                              'train_' + opt['name'],
                              level=logging.INFO,
                              screen=True,
                              tofile=True)
            self.logger = logging.getLogger('base')
            self.logger.info(option.dict2str(opt))
            # tensorboard logger
            if opt['use_tb_logger'] and 'debug' not in opt['name']:
                self.tb_logger_path = os.path.join(
                    opt['path']['experiments_root'], 'tb_logger')
                version = float(torch.__version__[0:3])
                if version >= 1.1:  # PyTorch 1.1
                    from torch.utils.tensorboard import SummaryWriter
                else:
                    self.self.logger.info(
                        'You are using PyTorch {}. Tensorboard will use [tensorboardX]'
                        .format(version))
                    from tensorboardX import SummaryWriter
                self.tb_logger = SummaryWriter(log_dir=self.tb_logger_path)
        else:
            util.setup_logger('base',
                              opt['path']['log'],
                              'train',
                              level=logging.INFO,
                              screen=True)
            self.logger = logging.getLogger('base')

        # convert to NoneDict, which returns None for missing keys
        opt = option.dict_to_nonedict(opt)
        self.opt = opt

        #### wandb init
        if opt['wandb'] and self.rank <= 0:
            import wandb
            os.makedirs(os.path.join(opt['path']['log'], 'wandb'),
                        exist_ok=True)
            wandb.init(project=opt['name'], dir=opt['path']['log'])

        #### random seed
        seed = opt['train']['manual_seed']
        if seed is None:
            seed = random.randint(1, 10000)
        if self.rank <= 0:
            self.logger.info('Random seed: {}'.format(seed))
        seed += self.rank  # Different multiprocessing instances should behave differently.
        util.set_random_seed(seed)

        torch.backends.cudnn.benchmark = True
        # torch.backends.cudnn.deterministic = True
        if opt_get(opt, ['anomaly_detection'], False):
            torch.autograd.set_detect_anomaly(True)

        # Save the compiled opt dict to the global loaded_options variable.
        util.loaded_options = opt

        #### create train and val dataloader
        dataset_ratio = 1  # enlarge the size of each epoch
        for phase, dataset_opt in opt['datasets'].items():
            if phase == 'train':
                self.train_set, collate_fn = create_dataset(
                    dataset_opt, return_collate=True)
                train_size = int(
                    math.ceil(len(self.train_set) / dataset_opt['batch_size']))
                total_iters = int(opt['train']['niter'])
                self.total_epochs = int(math.ceil(total_iters / train_size))
                if opt['dist']:
                    self.train_sampler = DistIterSampler(
                        self.train_set, self.world_size, self.rank,
                        dataset_ratio)
                    self.total_epochs = int(
                        math.ceil(total_iters / (train_size * dataset_ratio)))
                else:
                    self.train_sampler = None
                self.train_loader = create_dataloader(self.train_set,
                                                      dataset_opt,
                                                      opt,
                                                      self.train_sampler,
                                                      collate_fn=collate_fn)
                if self.rank <= 0:
                    self.logger.info(
                        'Number of train images: {:,d}, iters: {:,d}'.format(
                            len(self.train_set), train_size))
                    self.logger.info(
                        'Total epochs needed: {:d} for iters {:,d}'.format(
                            self.total_epochs, total_iters))
            elif phase == 'val':
                self.val_set, collate_fn = create_dataset(dataset_opt,
                                                          return_collate=True)
                self.val_loader = create_dataloader(self.val_set,
                                                    dataset_opt,
                                                    opt,
                                                    None,
                                                    collate_fn=collate_fn)
                if self.rank <= 0:
                    self.logger.info(
                        'Number of val images in [{:s}]: {:d}'.format(
                            dataset_opt['name'], len(self.val_set)))
            else:
                raise NotImplementedError(
                    'Phase [{:s}] is not recognized.'.format(phase))
        assert self.train_loader is not None

        #### create model
        self.model = ExtensibleTrainer(opt, cached_networks=all_networks)

        ### Evaluators
        self.evaluators = []
        if 'eval' in opt.keys() and 'evaluators' in opt['eval'].keys():
            for ev_key, ev_opt in opt['eval']['evaluators'].items():
                self.evaluators.append(
                    create_evaluator(self.model.networks[ev_opt['for']],
                                     ev_opt, self.model.env))

        #### resume training
        if resume_state:
            self.logger.info(
                'Resuming training from epoch: {}, iter: {}.'.format(
                    resume_state['epoch'], resume_state['iter']))

            self.start_epoch = resume_state['epoch']
            self.current_step = resume_state['iter']
            self.model.resume_training(
                resume_state, 'amp_opt_level'
                in opt.keys())  # handle optimizers and schedulers
        else:
            self.current_step = -1 if 'start_step' not in opt.keys(
            ) else opt['start_step']
            self.start_epoch = 0
        if 'force_start_step' in opt.keys():
            self.current_step = opt['force_start_step']
        opt['current_step'] = self.current_step