Пример #1
0
def main():
    """
    Main Function
    """
    # Parse args and set up logging
    infer_args()

    if args.single_scale:
        scales = [1.0]
    else:
        scales = [float(x) for x in args.scales.split(',')]

    output_dir = os.path.join(args.ckpt_path, args.exp_name, args.split)
    os.makedirs(output_dir, exist_ok=True)
    save_log('eval', output_dir, date_str)
    logging.info("Network Arch: %s", args.arch)
    logging.info("CV split: %d", args.cv_split)
    logging.info("Exp_name: %s", args.exp_name)
    logging.info("Ckpt path: %s", args.ckpt_path)
    logging.info("Scales : %s", ' '.join(str(e) for e in scales))
    logging.info("Inference mode: %s", args.inference_mode)

    # Set up network, loader, inference mode
    metrics = args.dataset != 'video_folder'
    test_loader = setup_loader()

    runner = RunEval(output_dir,
                     metrics,
                     write_image=args.dump_images,
                     dataset_cls=args.dataset_cls,
                     inference_mode=args.inference_mode)
    net = get_net()

    # Fix the ASPP pool size to 105, which is the tensor size if you train with crop
    # size of 840x840
    if args.fixed_aspp_pool:
        net.module.aspp.img_pooling = torch.nn.AvgPool2d(105)

    if args.inference_mode == 'sliding':
        inference = inference_sliding
    elif args.inference_mode == 'pooling':
        inference = inference_pool
    else:
        raise 'Not a valid inference mode: {}'.format(args.inference_mode)

    # Run Inference!
    pbar = tqdm(test_loader, desc='eval {}'.format(args.split), smoothing=1.0)
    for iteration, data in enumerate(pbar):
        #if iteration < 800:
        #    continue
        if args.dataset == 'video_folder':
            imgs, img_names = data
            gt = None
        else:
            if args.inference_mode == 'pooling':
                if args.pos_rfactor > 0:
                    base_img, gt_with_imgs, img_names, (pos_h, pos_w) = data
                else:
                    base_img, gt_with_imgs, img_names = data
                base_img = base_img[0]
                imgs = gt_with_imgs[0]
                gt = gt_with_imgs[1]
            else:
                base_img = None
                if args.pos_rfactor > 0:
                    imgs, gt, img_names, _, (pos_h, pos_w) = data
                else:
                    imgs, gt, img_names = data


################### Profile
# pos = (pos_h, pos_w)
# flops, params = profile(net, inputs=(imgs, pos))
# print("Flops", flops, params)
# flops, params = clever_format([flops, params], "%.3f")
# print("Flops", flops, params)
# exit()
################### Profile
        runner.inf(imgs,
                   img_names,
                   gt,
                   inference,
                   net,
                   scales,
                   pbar,
                   base_img,
                   pos=(pos_h, pos_w))
        if iteration > 5 and args.test_mode:
            break

    # Calculate final overall statistics
    runner.final_dump()
Пример #2
0
def main():
    """
    Main Function
    """
    # Parse args and set up logging
    infer_args()

    if args.single_scale:
        scales = [1.0]
    else:
        scales = [float(x) for x in args.scales.split(',')]

    output_dir = os.path.join(args.ckpt_path, args.exp_name, args.split)
    os.makedirs(output_dir, exist_ok=True)
    save_log('eval', output_dir, date_str)
    logging.info("Network Arch: %s", args.arch)
    logging.info("CV split: %d", args.cv_split)
    logging.info("Exp_name: %s", args.exp_name)
    logging.info("Ckpt path: %s", args.ckpt_path)
    logging.info("Scales : %s", ' '.join(str(e) for e in scales))
    logging.info("Inference mode: %s", args.inference_mode)

    # Set up network, loader, inference mode, metric设置为False则不计算MIoU
    metrics = args.dataset != 'video_folder'
    if args.dataset == 'kitti' and args.split == 'test':
        metrics = False
    test_loader = setup_loader()

    runner = RunEval(output_dir,
                     metrics,
                     write_image=args.dump_images,
                     dataset_cls=args.dataset_cls,
                     inference_mode=args.inference_mode)
    net = get_net()

    # Fix the ASPP pool size to 105, which is the tensor size if you train with crop
    # size of 840x840
    if args.fixed_aspp_pool:
        net.module.aspp.img_pooling = torch.nn.AvgPool2d(105)

    if args.inference_mode == 'sliding':  #默认是这个模式
        inference = inference_sliding
    elif args.inference_mode == 'pooling':
        inference = inference_pool
    elif args.inference_mode == 'whole':
        inference = inference_whole
    else:
        raise 'Not a valid inference mode: {}'.format(args.inference_mode)

    # Run Inference!
    pbar = tqdm(test_loader, desc='eval {}'.format(args.split), smoothing=1.0)
    for iteration, data in enumerate(pbar):

        if args.dataset == 'video_folder':
            imgs, img_names = data
            gt = None
        else:
            if args.inference_mode == 'pooling':
                base_img, gt_with_imgs, img_names = data
                base_img = base_img[0]
                imgs = gt_with_imgs[0]
                gt = gt_with_imgs[1]
            else:
                base_img = None
                imgs, gt, img_names = data

        runner.inf(imgs, img_names, gt, inference, net, scales, pbar,
                   base_img)  #运算模型得到分割结果
        if iteration > 5 and args.test_mode:
            break

    # Calculate final overall statistics
    runner.final_dump()
Пример #3
0
                    default='network.sfnet_resnet.DeepR18_SF_deeply_dsn',
                    help='network architecture used for inference')
parser.add_argument('--save_dir',
                    type=str,
                    default='./save',
                    help='path to save your results')
args = parser.parse_args()
assert_and_infer_cfg(args, train_mode=False)
cudnn.benchmark = False
torch.cuda.empty_cache()

# setup logger
date_str = str(datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))
log_dir = os.path.join(args.save_dir, "log")
os.makedirs(log_dir, exist_ok=True)
save_log('log', log_dir, date_str, rank=0)

# get net
args.dataset_cls = cityscapes
net = get_net(args, criterion=None)
net = torch.nn.DataParallel(net).cuda()
logging.info('Net built.')
net, _ = restore_snapshot(net,
                          optimizer=None,
                          snapshot=args.snapshot,
                          restore_optimizer_bool=False)
net.eval()
logging.info('Net restored.')

# get data
data_dir = args.demo_folder