Example #1
0
    def _test_model(self, config_path, device="cpu"):
        # requires extra dependencies
        from detectron2.export import (
            Caffe2Model,
            add_export_config,
            export_caffe2_model,
        )

        cfg = get_cfg()
        cfg.merge_from_file(model_zoo.get_config_file(config_path))
        cfg = add_export_config(cfg)
        cfg.MODEL.DEVICE = device

        model = build_model(cfg)
        DetectionCheckpointer(model).load(
            model_zoo.get_checkpoint_url(config_path))

        inputs = [{"image": self._get_test_image()}]
        c2_model = export_caffe2_model(cfg, model, copy.deepcopy(inputs))

        with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d:
            c2_model.save_protobuf(d)
            c2_model.save_graph(os.path.join(d, "test.svg"),
                                inputs=copy.deepcopy(inputs))
            c2_model = Caffe2Model.load_protobuf(d)
        c2_model(inputs)[0]["instances"]
Example #2
0
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3

# cfg.MODEL.WEIGHTS = "/codehub/tmp/model_final_balloon.pth"
# cfg.MODEL.WEIGHTS = "/codehub/apps/detectron2/temp/model_final_balloon.pth"
cfg.MODEL.WEIGHTS = "/codehub/apps/detectron2/release/model_final.pth"
# cfg.MODEL.DEVICE = "cpu"

# cfg.SOLVER.IMS_PER_BATCH = 2
# cfg.SOLVER.BASE_LR = 0.00025
# cfg.SOLVER.MAX_ITER = 350000    # 300 iterations seems good enough, but you can certainly train longer
# cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this toy dataset
# # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512   # faster, and good enough for this toy dataset
# cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3

cfg.freeze()

out_path = "./output"

torch_model = build_model(cfg)
DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)

# get a sample data
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
first_batch = next(iter(data_loader))

# convert and save caffe2 model
caffe2_model = export_caffe2_model(cfg, torch_model, first_batch)
caffe2_model.save_protobuf(out_path)
# draw the caffe2 graph
caffe2_model.save_graph(os.path.join(out_path, "model.svg"),
                        inputs=first_batch)