Exemple #1
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--img', help='Image file', default=img_path)
    parser.add_argument('--config', help='Config file', default=config)
    parser.add_argument('--checkpoint', help='Checkpoint file', default=ckpt)
    parser.add_argument(
        '--device', default='cuda:1', help='Device used for inference')
    parser.add_argument(
        '--palette',
        default=None,
        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)

    if args.img=='':
        list_img=get_list_file_in_folder(img_dir)
        list_img=sorted(list_img)
        for img_ in list_img:
            img=os.path.join(img_dir,img_)
            print(img)
            result = inference_segmentor(model, img)
            show_result_pyplot(model, img, result, get_palette(args.palette))
    else:
        result = inference_segmentor(model, args.img)
        show_result_pyplot(model, args.img, result, get_palette(args.palette))
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--palette',
        default='cityscapes',
        help='Color palette used for segmentation map')
    parser.add_argument(
        '--opacity',
        type=float,
        default=0.5,
        help='Opacity of painted segmentation map. In (0, 1] range.')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)
    # show the results
    show_result_pyplot(
        model,
        args.img,
        result,
        get_palette(args.palette),
        opacity=args.opacity)
def test_palette():
    assert CityscapesDataset.PALETTE == get_palette('cityscapes')
    assert PascalVOCDataset.PALETTE == get_palette('voc') == get_palette(
        'pascal_voc')
    assert ADE20KDataset.PALETTE == get_palette('ade') == get_palette('ade20k')

    with pytest.raises(ValueError):
        get_palette('unsupported')
def main():
    parser = ArgumentParser()
    # parser.add_argument('--img', default="Image_20200925100338349.bmp", help='Image file')
    parser.add_argument('--img', default="star.png", help='Image file')
    # parser.add_argument('--img', default="demo.png", help='Image file')
    parser.add_argument(
        '--config',
        # default="../configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes_custom_binary.py",
        default=
        "../configs/danet/danet_r50-d8_512x1024_40k_cityscapes_custom.py",
        # default="../configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py",
        help='Config file')
    parser.add_argument(
        '--checkpoint',
        # default="../tools/work_dirs/deeplabv3_r50-d8_512x1024_40k_cityscapes_custom_binary/iter_200.pth",
        default=
        "../tools/work_dirs/danet_r50-d8_512x1024_40k_cityscapes_custom/iter_4000.pth",
        # default="../checkpoints/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-cf9ce186.pth",
        help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument(
        '--palette',
        default='cityscapes_custom',
        # default='cityscapes',
        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)

    # io.imsave("result.png", result[0])
    # io.imshow(result[0])
    # io.show()
    # show the results
    show_result_pyplot(model, args.img, result, get_palette(args.palette))
    """
def test_palette():
    assert CityscapesDataset.PALETTE == get_palette('cityscapes')
    assert PascalVOCDataset.PALETTE == get_palette('voc') == get_palette(
        'pascal_voc')
    assert ADE20KDataset.PALETTE == get_palette('ade') == get_palette('ade20k')
    assert LoveDADataset.PALETTE == get_palette('loveda')
    assert PotsdamDataset.PALETTE == get_palette('potsdam')
    assert COCOStuffDataset.PALETTE == get_palette('cocostuff')
    assert iSAIDDataset.PALETTE == get_palette('isaid')

    with pytest.raises(ValueError):
        get_palette('unsupported')
Exemple #6
0
def main():
    parser = ArgumentParser()
    parser.add_argument('video', help='Video file or webcam id')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--palette',
                        default='cityscapes',
                        help='Color palette used for segmentation map')
    parser.add_argument('--show',
                        action='store_true',
                        help='Whether to show draw result')
    parser.add_argument('--show-wait-time',
                        default=1,
                        type=int,
                        help='Wait time after imshow')
    parser.add_argument('--output-file',
                        default=None,
                        type=str,
                        help='Output video file path')
    parser.add_argument('--output-fourcc',
                        default='MJPG',
                        type=str,
                        help='Fourcc of the output video')
    parser.add_argument('--output-fps',
                        default=-1,
                        type=int,
                        help='FPS of the output video')
    parser.add_argument('--output-height',
                        default=-1,
                        type=int,
                        help='Frame height of the output video')
    parser.add_argument('--output-width',
                        default=-1,
                        type=int,
                        help='Frame width of the output video')
    parser.add_argument(
        '--opacity',
        type=float,
        default=0.5,
        help='Opacity of painted segmentation map. In (0, 1] range.')
    args = parser.parse_args()

    assert args.show or args.output_file, \
        'At least one output should be enabled.'

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)

    # build input video
    cap = cv2.VideoCapture(args.video)
    assert (cap.isOpened())
    input_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    input_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    input_fps = cap.get(cv2.CAP_PROP_FPS)

    # init output video
    writer = None
    output_height = None
    output_width = None
    if args.output_file is not None:
        fourcc = cv2.VideoWriter_fourcc(*args.output_fourcc)
        output_fps = args.output_fps if args.output_fps > 0 else input_fps
        output_height = args.output_height if args.output_height > 0 else int(
            input_height)
        output_width = args.output_width if args.output_width > 0 else int(
            input_width)
        writer = cv2.VideoWriter(args.output_file, fourcc, output_fps,
                                 (output_width, output_height), True)

    # start looping
    try:
        while True:
            flag, frame = cap.read()
            if not flag:
                break

            # test a single image
            result = inference_segmentor(model, frame)

            # blend raw image and prediction
            draw_img = model.show_result(frame,
                                         result,
                                         palette=get_palette(args.palette),
                                         show=False,
                                         opacity=args.opacity)

            if args.show:
                cv2.imshow('video_demo', draw_img)
                cv2.waitKey(args.show_wait_time)
            if writer:
                if draw_img.shape[0] != output_height or draw_img.shape[
                        1] != output_width:
                    draw_img = cv2.resize(draw_img,
                                          (output_width, output_height))
                writer.write(draw_img)
    finally:
        if writer:
            writer.release()
        cap.release()