예제 #1
0
def run(args):
    dataset = voc12.dataloader.VOC12ImageDataset(args.train_list, voc12_root=args.voc12_root, img_normal=None, to_torch=False)
    dataset = torchutils.split_dataset(dataset, args.num_workers)

    print('[ ', end='')
    multiprocessing.spawn(_work, nprocs=args.num_workers, args=(dataset, args), join=True)
    print(']')
예제 #2
0
파일: make_cam.py 프로젝트: jnyborg/irn
def run(args):

    n_gpus = torch.cuda.device_count()

    if args.dataset == 'l8biome':
        model = getattr(importlib.import_module(args.cam_network),
                        'CAM')(n_classes=2, in_channels=10, pretrained=False)
        dataset = l8biome.dataloader.L8BiomeDatasetMSF(args.data_root,
                                                       'train',
                                                       scales=args.cam_scales)
        # Only compute for cloudy images, clear should have empty mask
        dataset.images = [img for img in dataset.images if 'cloudy' in img[2]]
    else:
        model = getattr(importlib.import_module(args.cam_network),
                        'CAM')(n_classes=20)
        dataset = voc12.dataloader.VOC12ClassificationDatasetMSF(
            args.train_list, voc12_root=args.data_root, scales=args.cam_scales)

    model.load_state_dict(torch.load(args.cam_weights_name + '.pth'),
                          strict=True)
    model.eval()

    dataset = torchutils.split_dataset(dataset, n_gpus)

    print('[ ', end='')
    multiprocessing.spawn(_work,
                          nprocs=n_gpus,
                          args=(model, dataset, args),
                          join=True)
    print(']')

    torch.cuda.empty_cache()
예제 #3
0
def run(args):
    if args.dataset == 'voc12':
        dataset = voc12.dataloader.VOC12ImageDataset(args.train_list,
                                                     dev_root=args.dev_root,
                                                     norm_mode=None,
                                                     to_torch=False)
    elif args.dataset in ['adp_morph', 'adp_func']:
        dataset = adp.dataloader.ADPImageDataset(
            args.train_list,
            dev_root=args.dev_root,
            htt_type=args.dataset.split('_')[-1],
            is_eval=args.split == 'evaluation',
            norm_mode=None,
            to_torch=False)
    elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
        dataset = deepglobe.dataloader.DeepGlobeImageDataset(
            args.train_list,
            dev_root=args.dev_root,
            is_balanced=args.dataset == 'deepglobe_balanced',
            norm_mode=None,
            to_torch=False)
    else:
        raise KeyError('Dataset %s not yet implemented' % args.dataset)
    dataset = torchutils.split_dataset(dataset, args.num_workers)

    # print('[ ', end='')
    multiprocessing.spawn(_work,
                          nprocs=args.num_workers,
                          args=(dataset, args),
                          join=True)
예제 #4
0
def run(args):
    model = getattr(importlib.import_module(args.irn_network), 'EdgeDisplacement')()
    model.load_state_dict(torch.load(args.irn_weights_name), strict=False)
    model.eval()

    n_gpus = torch.cuda.device_count()

    dataset = voc12.dataloader.VOC12ClassificationDatasetMSF(args.infer_list,
                                                             voc12_root=args.voc12_root,
                                                             scales=(1.0,))
    dataset = torchutils.split_dataset(dataset, n_gpus)

    print("[ ", end='')
    multiprocessing.spawn(_work, nprocs=n_gpus, args=(model, dataset, args), join=True)
    print("]")
예제 #5
0
파일: make_cam.py 프로젝트: johnnylu305/irn
def run(args):
    model = getattr(importlib.import_module(args.cam_network), 'CAM')()
    print(args.cam_weights_name+'.pth')
    model.load_state_dict(torch.load(args.cam_weights_name + '.pth'), strict=True)
    model.eval()

    n_gpus = torch.cuda.device_count()

    dataset = voc12.dataloader.VOC12ClassificationDatasetMSF(args.train_list,
                                                             voc12_root=args.voc12_root, scales=args.cam_scales)
    dataset = torchutils.split_dataset(dataset, n_gpus)

    print('[ ', end='')
    multiprocessing.spawn(_work, nprocs=n_gpus, args=(model, dataset, args), join=True)
    print(']')

    torch.cuda.empty_cache()
예제 #6
0
def run(args):
    model = getattr(importlib.import_module(args.cam_network),
                    'CAM')(args.model_dir, args.dataset, args.tag,
                           args.num_classes, args.use_cls)
    if args.model_id == 'resnet50':
        model.load_state_dict(torch.load(args.cam_weights_name + '.pth'),
                              strict=True)
    model.eval()

    n_gpus = torch.cuda.device_count()

    if args.dataset == 'voc12':
        dataset = voc12.dataloader.VOC12ClassificationDatasetMSF(
            args.val_list,
            norm_mode=args.norm_mode,
            outsize=args.outsize,
            dev_root=args.dev_root,
            scales=args.cam_scales)
    elif args.dataset in ['adp_morph', 'adp_func']:
        dataset = adp.dataloader.ADPClassificationDatasetMSF(
            args.val_list,
            norm_mode=args.norm_mode,
            outsize=args.outsize,
            dev_root=args.dev_root,
            htt_type=args.dataset.split('_')[-1],
            is_eval=args.split == 'evaluation',
            scales=args.cam_scales)
    elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
        dataset = deepglobe.dataloader.DeepGlobeClassificationDatasetMSF(
            args.val_list,
            norm_mode=args.norm_mode,
            outsize=args.outsize,
            dev_root=args.dev_root,
            is_balanced=args.dataset == 'deepglobe_balanced',
            scales=args.cam_scales)
    else:
        raise KeyError('Dataset %s not yet implemented' % args.dataset)
    dataset = torchutils.split_dataset(dataset, n_gpus)

    multiprocessing.spawn(_work,
                          nprocs=n_gpus,
                          args=(model, dataset, args),
                          join=True)

    torch.cuda.empty_cache()
            ]

            highres_cam = torch.sum(torch.stack(highres_cam, 0),
                                    0)[:, 0, :size[0], :size[1]]
            strided_cam /= F.adaptive_max_pool2d(strided_cam, (1, 1)) + 1e-5
            highres_cam /= F.adaptive_max_pool2d(highres_cam, (1, 1)) + 1e-5

            np.save(
                os.path.join(args.cam_out_dir, img_name + '.npy'), {
                    "keys": valid_cat,
                    "cam": strided_cam.cpu(),
                    "high_res": highres_cam.cpu().numpy()
                })


if __name__ == '__main__':
    model = getattr(importlib.import_module(args.cam_network), 'CAM')()
    model.load_state_dict(torch.load(args.cam_weights_name + '.pth'),
                          strict=True)
    model.eval()

    n_gpus = torch.cuda.device_count()
    print(n_gpus)
    dataset = voc12.dataloader.VOC12ClassificationDatasetMSF(
        args.train_list, voc12_root=args.voc12_root, scales=args.cam_scales)
    dataset = torchutils.split_dataset(dataset, n_gpus)
    # _work(0, model, dataset, args)
    multiprocessing.spawn(_work,
                          nprocs=n_gpus,
                          args=(model, dataset, args),
                          join=True)
예제 #8
0
 pyutils.make_directory(cam_out_dir)
 
 # Setting Environment
 if args.use_cuda and torch.cuda.is_available():
     torch.cuda.set_device(args.gpu_id)
     device = torch.device('cuda')
 else:
     device = torch.device('cpu')
 
 n_gpus = torch.cuda.device_count()
 
 # Dataset
 train_dataset = voc12.dataloader.VOC12ClassificationDatasetMSF(args.train_list, voc12_root=args.voc12_root, \
                                                                 scales=args.cam_scales)
 
 train_datset = torchutils.split_dataset(train_dataset, n_gpus)
 
 
 
 # Build model
 model = getattr(importlib.import_module(args.cam_network), 'CAM')()
 print(os.path.join(checkpoints_dir, args.cam_weights_name))
 model.load_state_dict(torch.load(os.path.join(checkpoints_dir, args.cam_weights_name)), strict=True)
 model.eval()
 
 logger.info('[')
 work(model, train_dataset, args, cam_out_dir)
 logger.info(']')
 
 torch.cuda.empty_cache()
 
예제 #9
0
        pred = imutils.crf_inference_label(img,
                                           bg_conf_cam,
                                           n_labels=keys.shape[0])
        bg_conf = keys[pred]  ## mask

        # 2. combine confident fg & bg
        conf = fg_conf.copy()
        conf[fg_conf == 0] = 255  # 这些背景crf后也被当作类别255
        conf[bg_conf + fg_conf == 0] = 0  # bg和fg都认为是bg才是bg

        imageio.imwrite(os.path.join(ir_label_out_dir, img_name + '.png'),
                        conf.astype(np.uint8))

        if process_id == num_workers - 1 and iter % (len(databin) // 20) == 0:
            print("%d " % ((5 * iter + 1) // (len(databin) // 20)), end='')


if __name__ == '__main__':
    dataset = voc12.dataloader.VOC12ImageDataset(val_list,
                                                 voc12_root=voc12_root,
                                                 img_normal=None,
                                                 to_torch=False)
    dataset = torchutils.split_dataset(dataset, num_workers)

    print('[ ', end='')
    multiprocessing.spawn(_work,
                          nprocs=num_workers,
                          args=(dataset, ),
                          join=True)
    print(']')