Esempio n. 1
0
def main():
    # Usage: python visualize.py <config_path> <checkpoint_path>
    parser = argparse.ArgumentParser()
    parser.add_argument("config_path",
                        type=str,
                        help="Path to the configuration file")
    parser.add_argument("checkpoint_path",
                        type=str,
                        help="Path to the model checkpoint file")
    args = parser.parse_args()

    cfg = torchie.Config.fromfile(args.config_path)
    cfg.data.val.test_mode = True

    # build the dataloader
    dataset = build_dataset(cfg.data.val)
    data_loader = build_dataloader(
        dataset,
        batch_size=1,
        workers_per_gpu=1,
        dist=False,
        shuffle=False,
    )

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    checkpoint = load_checkpoint(model,
                                 args.checkpoint_path,
                                 map_location="cpu")
    model = MegDataParallel(model, device_ids=[0])

    device = torch.device("cuda")
    visualize(model, data_loader, device)
Esempio n. 2
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, (
        "Please specify at least one operation (save or show the results) "
        'with the argument "--out" or "--show" or "--json_out"'
    )

    if args.out is not None and not args.out.endswith((".pkl", ".pickle")):
        raise ValueError("The output file must be a pkl file.")

    if args.json_out is not None and args.json_out.endswith(".json"):
        args.json_out = args.json_out[:-5]

    cfg = torchie.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get("cudnn_benchmark", False):
        torch.backends.cudnn.benchmark = True

    # cfg.model.pretrained = None
    cfg.data.test.test_mode = True
#     cfg.data.val.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == "none":
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
#     dataset = build_dataset(cfg.data.val)
    data_loader = build_dataloader(
        dataset,
        batch_size=cfg.data.samples_per_gpu,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False,
    )

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)

    checkpoint = load_checkpoint(model, args.checkpoint, map_location="cpu")
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if "CLASSES" in checkpoint["meta"]:
        model.CLASSES = checkpoint["meta"]["CLASSES"]
    else:
        model.CLASSES = dataset.CLASSES

    model = MegDataParallel(model, device_ids=[0])
    result_dict, detections = test(
        data_loader, model, save_dir=None, distributed=distributed
    )

    for k, v in result_dict["results"].items():
        print(f"Evaluation {k}: {v}")

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print("\nwriting results to {}".format(args.out))
        torchie.dump(detections, args.out)

    if args.txt_result:
        res_dir = os.path.join(os.getcwd(), "predictions")
        for dt in detections:
            with open(
                os.path.join(res_dir, "%06d.txt" % int(dt["metadata"]["token"])), "w"
            ) as fout:
                lines = kitti.annos_to_kitti_label(dt)
                for line in lines:
                    fout.write(line + "\n")

        ap_result_str, ap_dict = kitti_evaluate(
            "/data/Datasets/KITTI/Kitti/object/training/label_2",
            res_dir,
            label_split_file="/data/Datasets/KITTI/Kitti/ImageSets/val.txt",
            current_class=0,
        )

        print(ap_result_str)
Esempio n. 3
0
def main():
    args = parse_args()
    print(args)
    assert args.out or args.show or args.json_out, (
        'Please specify at least one operation (save or show the results) with the argument "--out" or "--show" or "--json_out"'
    )

    if args.out is not None and not args.out.endswith((".pkl", ".pickle")):
        raise ValueError("The output file must be a pkl file.")

    if args.json_out is not None and args.json_out.endswith(".json"):
        args.json_out = args.json_out[:-5]

    cfg = torchie.Config.fromfile(args.config)
    if cfg.get("cudnn_benchmark", False):  # False
        torch.backends.cudnn.benchmark = True

    # cfg.model.pretrained = None
    # cfg.data.test.test_mode = True
    cfg.data.val.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == "none":
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader, TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.val)
    batch_size = cfg.data.samples_per_gpu
    num_workers = cfg.data.workers_per_gpu
    data_loader = DataLoader(
        dataset,
        batch_size=batch_size,
        sampler=None,
        num_workers=num_workers,
        collate_fn=collate_kitti,
        shuffle=False,
    )

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    checkpoint_path = os.path.join(cfg.work_dir, args.checkpoint)
    checkpoint = load_checkpoint(model, checkpoint_path, map_location="cpu")

    # old versions did not save class info in checkpoints, this walkaround is for backward compatibility
    if "CLASSES" in checkpoint["meta"]:
        model.CLASSES = checkpoint["meta"]["CLASSES"]
    else:
        model.CLASSES = dataset.CLASSES

    model = MegDataParallel(model, device_ids=[0])
    if args.eval_id is None:
        result_dict, detections = test(data_loader,
                                       model,
                                       save_dir=None,
                                       distributed=distributed)

        # for k, v in result_dict["results"].items():
        #     print(f"Evaluation {k}: {v}")

        for k, v in result_dict["results"].items():
            print(f"Evaluation {k}: {v}")
            # f.write(f"\nEvaluation {k}: {v}\n")

        for k, v in result_dict["results_2"].items():
            print(f"Evaluation {k}: {v}")
            # f.write(f"\nEvaluation {k}: {v}\n")

        # save mAP results to out.pkl file.
        # rank, _ = get_dist_info()
        # if args.out and rank == 0:
        #     print("\nwriting results to {}".format(args.out))
        #     torchie.dump(detections, os.path.join(cfg.work_dir, args.out))

        # if args.txt_result:  # True
        #     res_dir = os.path.join(cfg.work_dir, "predictions")
        #     os.makedirs(res_dir, exist_ok=True)
        #     for dt in detections:
        #         with open(os.path.join(res_dir, "%06d.txt" % int(dt["metadata"]["token"])), "w") as fout:
        #             lines = kitti.annos_to_kitti_label(dt)
        #             for line in lines:
        #                 fout.write(line + "\n")

        #     gt_labels_dir = data_root + "/KITTI/object/training/label_2"
        #     label_split_file = data_root + "/KITTI/ImageSets/val.txt"
        #     # todo: this evaluation is different from previous one
        #     ap_result_str, ap_dict = kitti_evaluate(gt_labels_dir, res_dir, label_split_file=label_split_file, current_class=0,)
        #     print(ap_result_str)

    else:
        assert type(args.eval_id) is list
        test_v2(data_loader,
                model,
                distributed=distributed,
                eval_id=args.eval_id,
                vis_id=args.vis_id)