示例#1
0
def main():
    args = parser.parse_args()
    args.timestamp = tools.get_timestamp()

    tools.mkdir_or_exist(args.workdir)
    tools.setup(args.benchmark, args.deterministic, args.seed)

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed

    ngpus_per_node = torch.cuda.device_count()
    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)
示例#2
0
def main():
    opt = get_option()
    save_root = os.path.join(opt.save_root, opt.dataset)
    utils.mkdir_or_exist(save_root)
    if "_DC" in opt.model:
        module = importlib.import_module("model.dynet")
    else:
        module = importlib.import_module("model.{}".format(opt.model.lower()))
    solver = Solver(module, opt)

    solver.save_mid_result()
示例#3
0
    def __init__(self,
                 model,
                 batch_processor,
                 optimizer=None,
                 work_dir=None,
                 log_level=logging.INFO,
                 logger=None):
        assert callable(batch_processor)
        self.model = model
        if optimizer is not None:
            self.optimizer = self.init_optimizer(optimizer)
        else:
            self.optimizer = None
        self.batch_processor = batch_processor

        # create work_dir
        if is_str(work_dir):
            self.work_dir = osp.abspath(work_dir)
            mkdir_or_exist(self.work_dir)
        elif work_dir is None:
            self.work_dir = None
        else:
            raise TypeError('"work_dir" must be a str or None')

        # get model name from the model class
        if hasattr(self.model, 'module'):
            self._model_name = self.model.module.__class__.__name__
        else:
            self._model_name = self.model.__class__.__name__

        self._rank, self._world_size = get_dist_info()
        if logger is None:
            self.logger = self.init_logger(work_dir, log_level)
        else:
            self.logger = logger
        self.log_buffer = LogBuffer()
        # self.tensorboardX_buffer = TensorboardXBuffer()

        self.mode = None
        self._hooks = []
        self._epoch = 0
        self._iter = 0
        self._inner_iter = 0
        self._max_epochs = 0
        self._max_iters = 0
示例#4
0
def main(opt):
    mkdir_or_exist(opt.save_root)

    dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    module = importlib.import_module("model.{}".format(opt.model.lower()))
    net = module.Net(opt).to(dev)

    state_dict = torch.load(opt.pretrain,
                            map_location=lambda storage, loc: storage)
    net.load_state_dict(state_dict)

    paths = sorted(glob.glob(os.path.join(opt.dataset_root, "*.png")))
    for path in tqdm(paths):
        name = path.split("/")[-1]

        LR = color.gray2rgb(io.imread(path))
        LR = im2tensor(LR).unsqueeze(0).to(dev)
        LR = F.interpolate(LR, scale_factor=opt.scale, mode="nearest")

        SR = net(LR).detach()
        SR = SR[0].clamp(0, 255).round().cpu().byte().permute(1, 2, 0).numpy()

        save_path = os.path.join(opt.save_root, name)
        io.imsave(save_path, SR)
示例#5
0
def main():
    opt = get_option()
    if opt.save_result:
        save_root = os.path.join(opt.save_root, opt.dataset)
        utils.mkdir_or_exist(save_root)
    utils.mkdir_or_exist(opt.ckpt_root)
    utils.mkdir_or_exist(opt.save_root)
    logger = utils.create_logger(opt)
    torch.manual_seed(opt.seed)
    if "_DC" in opt.model:
        module = importlib.import_module("model.dynet")
    else:
        module = importlib.import_module("model.{}".format(opt.model.lower()))

    if not opt.test_only:
        logger.info(json.dumps(vars(opt), indent=4))

    solver = Solver(module, opt)
    if opt.test_only:
        logger.info("Evaluate {} (loaded from {})".format(opt.model, opt.pretrain))
        psnr = solver.evaluate()
        logger.info("{:.2f}".format(psnr))
    else:
        solver.fit()
示例#6
0
def main():
    args = parser.parse_args()
    with open(args.config, 'r') as f:
        args_old = json.load(f)
    args_old.update(args.__dict__)
    args.__dict__ = args_old
    args.timestamp = tools.get_timestamp()

    tools.mkdir_or_exist(args.workdir)
    tools.setup(True, False, None)

    logger = Logger(args, 'test_report.txt', mode='w')
    logger.init_info(save_config=False)
    logger.info("Configuration:\n{}".format(json.dumps(args.__dict__,
                                                       indent=4)))

    # create dataset
    test_augmentation = transforms.Compose([
        transforms.Resize(args.img_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=args.rgb_mean, std=args.rgb_std)
    ])
    test_dataset = dataset_classes[args.dataset](
        args.data,
        subset=args.subset,
        mode='test',
        image_transform=test_augmentation,
        scale_size=args.scale_list,
        task=args.task)

    # create model
    logger.info("=> creating model ..")
    model = DCR(backbone=args.backbone,
                layers=args.layers,
                out_channels=args.out_channels,
                mid_channels=args.mid_channels,
                num_scales=test_dataset.num_scales,
                num_classes=test_dataset.num_classes,
                normalized_embeddings=args.normalized_embeddings)
    torch.cuda.set_device(args.gpu)
    model = model.cuda(args.gpu)

    if args.load:
        if os.path.isfile(args.load):
            logger.info("=> loading checkpoint '{}'".format(args.load))
            # Map model to be loaded to specified single gpu.
            loc = 'cuda:{}'.format(args.gpu)
            checkpoint = torch.load(args.load, map_location=loc)
            tools.load_model(model, checkpoint['state_dict'])
        else:
            logger.info("=> no checkpoint found at '{}'".format(args.load))
    else:
        logger.warning(
            "args.load is not specified. Model will be evaluated in random initialized."
        )

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=True,
                                              sampler=None,
                                              drop_last=False)

    results = []

    # switch to val mode
    model.eval()
    all_embeds = list()
    all_labels = list()
    for i, batch_data in enumerate(tqdm(test_loader)):
        images = batch_data['data']
        labels = batch_data['label'].long()
        all_labels.append(labels)

        if isinstance(images, torch.Tensor):
            images = {'0': images}
        # compute output
        embeddings = {}
        with torch.no_grad():
            embeddings = dict()
            for scale, imgs in images.items():
                if args.gpu is not None:
                    imgs = imgs.cuda(args.gpu, non_blocking=True)
                if args.stage == 'trunk':
                    outputs = model(imgs, trunk_only=True)
                    embeddings[scale] = outputs['trunk_embeddings'].detach(
                    ).cpu()
                elif args.stage == 'branch':
                    outputs = model(imgs, idx=int(scale.split('_')[-1]))
                    embeddings[scale] = outputs['branch_embeddings'].detach(
                    ).cpu()
        all_embeds.append(embeddings)
    # get evaluation metric
    all_embeds = {
        k: torch.cat(tuple(map(lambda r: r[k], all_embeds)), dim=0)
        for k in all_embeds[0].keys()
    }
    all_labels = torch.cat(all_labels, dim=0)
    metrics = test_loader.dataset.evaluate(all_embeds, all_labels)
    logger.info('Evalution metrics:\n{}'.format(json.dumps(metrics, indent=4)))