Exemplo n.º 1
0
 def _get_kwargs(self):
     # get kwargs of build_detection_train_loader
     cfg = model_zoo.get_config("common/data/coco.py").dataloader.train
     cfg.dataset.names = "coco_2017_val_100"
     cfg.pop("_target_")
     kwargs = {k: instantiate(v) for k, v in cfg.items()}
     return kwargs
Exemplo n.º 2
0
def get_model_no_weights(config_path):
    """
    Like model_zoo.get, but do not load any weights (even pretrained)
    """
    cfg = model_zoo.get_config(config_path)
    if not torch.cuda.is_available():
        cfg.MODEL.DEVICE = "cpu"
    return build_model(cfg)
Exemplo n.º 3
0
 def testOmegaConf(self):
     cfg = model_zoo.get_config(
         "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
     cfg = OmegaConf.create(cfg.dump())
     if not torch.cuda.is_available():
         cfg.MODEL.DEVICE = "cpu"
     # test that a model can be built with omegaconf config as well
     build_model(cfg)
Exemplo n.º 4
0
    def test_setup_config(self):
        with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
            cfg = get_cfg()
            cfg.OUTPUT_DIR = os.path.join(d, "yacs")
            default_setup(cfg, {})

            cfg = model_zoo.get_config(
                "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py")
            cfg.train.output_dir = os.path.join(d, "omegaconf")
            default_setup(cfg, {})
Exemplo n.º 5
0
    def test_fpn_scriptability(self):
        cfg = model_zoo.get_config(
            "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml")
        bb = build_resnet_fpn_backbone(cfg, ShapeSpec(channels=3))
        bb_s = torch.jit.script(bb)

        inp = torch.rand(2, 3, 128, 128)
        out1 = bb(inp)["p5"]
        out2 = bb_s(inp)["p5"]
        self.assertTrue(torch.allclose(out1, out2))
Exemplo n.º 6
0
def get_model_no_weights(config_path):
    """
    Like model_zoo.get, but do not load any weights (even pretrained)
    """
    cfg = model_zoo.get_config(config_path)
    if isinstance(cfg, CfgNode):
        if not torch.cuda.is_available():
            cfg.MODEL.DEVICE = "cpu"
        return build_model(cfg)
    else:
        return instantiate(cfg.model)
Exemplo n.º 7
0
    def test_PointRend_mask_head_tracing(self):
        cfg = model_zoo.get_config(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
        point_rend.add_pointrend_config(cfg)
        cfg.MODEL.ROI_HEADS.IN_FEATURES = ["p2", "p3"]
        cfg.MODEL.ROI_MASK_HEAD.NAME = "PointRendMaskHead"
        cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE = ""
        cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = True
        chan = 256
        head = point_rend.PointRendMaskHead(
            cfg,
            {
                "p2": ShapeSpec(channels=chan, stride=4),
                "p3": ShapeSpec(channels=chan, stride=8),
            },
        )

        def gen_inputs(h, w, N):
            p2 = torch.rand(1, chan, h, w)
            p3 = torch.rand(1, chan, h // 2, w // 2)
            boxes = random_boxes(N, max_coord=h)
            return p2, p3, boxes

        class Wrap(nn.ModuleDict):
            def forward(self, p2, p3, boxes):
                features = {
                    "p2": p2,
                    "p3": p3,
                }
                inst = Instances((p2.shape[2] * 4, p2.shape[3] * 4))
                inst.pred_boxes = Boxes(boxes)
                inst.pred_classes = torch.zeros(inst.__len__(),
                                                dtype=torch.long)
                out = self.head(features, [inst])[0]
                return out.pred_masks

        model = Wrap({"head": head})
        model.eval()
        with torch.no_grad(), patch_builtin_len():
            traced = torch.jit.trace(model, gen_inputs(302, 208, 20))
            inputs = gen_inputs(100, 120, 30)
            out_eager = model(*inputs)
            out_trace = traced(*inputs)
            self.assertTrue(torch.allclose(out_eager, out_trace))
Exemplo n.º 8
0
def init_context(context):
    context.logger.info("Init context...  0%")

    cfg = get_config('COCO-Detection/retinanet_R_101_FPN_3x.yaml')
    if torch.cuda.is_available():
        CONFIG_OPTS.extend(['MODEL.DEVICE', 'cuda'])
    else:
        CONFIG_OPTS.extend(['MODEL.DEVICE', 'cpu'])

    cfg.merge_from_list(CONFIG_OPTS)
    cfg.MODEL.RETINANET.SCORE_THRESH_TEST = CONFIDENCE_THRESHOLD
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = CONFIDENCE_THRESHOLD
    cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = CONFIDENCE_THRESHOLD
    cfg.freeze()
    predictor = DefaultPredictor(cfg)

    context.user_data.model_handler = predictor

    context.logger.info("Init context...100%")
Exemplo n.º 9
0
    def _test_model(self, config_path, device="cpu"):
        # requires extra dependencies
        from detectron2.export import Caffe2Model, add_export_config, Caffe2Tracer

        cfg = model_zoo.get_config(config_path)
        add_export_config(cfg)
        cfg.MODEL.DEVICE = device
        model = model_zoo.get(config_path, trained=True, device=device)

        inputs = [{"image": get_sample_coco_image()}]
        c2_model = Caffe2Tracer(cfg, model,
                                copy.deepcopy(inputs)).export_caffe2()

        with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d:
            c2_model.save_protobuf(d)
            c2_model.save_graph(os.path.join(d, "test.svg"),
                                inputs=copy.deepcopy(inputs))
            c2_model = Caffe2Model.load_protobuf(d)
        c2_model(inputs)[0]["instances"]
Exemplo n.º 10
0
    def _test_model(self, config_path, device="cpu"):
        cfg = model_zoo.get_config(config_path)
        cfg.MODEL.DEVICE = device
        model = model_zoo.get(config_path, trained=True, device=device)

        inputs = [{"image": get_sample_coco_image()}]
        tracer = Caffe2Tracer(cfg, model, copy.deepcopy(inputs))

        with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d:
            if not os.environ.get("CI"):
                # This requires onnx, which is not yet available on public CI
                c2_model = tracer.export_caffe2()
                c2_model.save_protobuf(d)
                c2_model.save_graph(os.path.join(d, "test.svg"),
                                    inputs=copy.deepcopy(inputs))

                c2_model = Caffe2Model.load_protobuf(d)
                c2_model(inputs)[0]["instances"]

            ts_model = tracer.export_torchscript()
            ts_model.save(os.path.join(d, "model.ts"))
Exemplo n.º 11
0
from detectron2.model_zoo import get_config

model = get_config("common/models/retinanet.py").model
model.backbone.bottom_up.freeze_at = 2
model.head.norm = "SyncBN"

dataloader = get_config("common/data/coco.py").dataloader
lr_multiplier = get_config("common/coco_schedule.py").lr_multiplier_3x
optimizer = get_config("common/optim.py").SGD
train = get_config("common/train.py").train

optimizer.lr = 0.01

train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
train.max_iter = 270000  # 3x for batchsize = 16
                           std=(0.229, 0.224, 0.225)),
        ]),
    ),
    batch_size=256 // 8,
    num_workers=4,
    training=False,
)

dataloader.evaluator = L(ClassificationAcc)()

model = L(ClassificationNet)(model=(
    ResNet)(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=True))

optimizer = L(torch.optim.SGD)(
    params=L(get_default_optimizer_params)(),
    lr=0.1,
    momentum=0.9,
    weight_decay=1e-4,
)

lr_multiplier = L(WarmupParamScheduler)(
    scheduler=L(MultiStepParamScheduler)(values=[1.0, 0.1, 0.01, 0.001],
                                         milestones=[30, 60, 90, 100]),
    warmup_length=1 / 100,
    warmup_factor=0.1,
)

train = get_config("common/train.py").train
train.init_checkpoint = None
train.max_iter = 100 * 1281167 // 256
Exemplo n.º 13
0
parser.add_argument("--test_annotations", type=str, default='/home/dimitar/test_annotations.json', help="Path to COCO-style annotations file for model evaluation.")
parser.add_argument("--test_imagedir", type=str, default='/home/mengmi/Projects/Proj_context2/Matlab/Stimulus/keyframe_expH', help="Path to images folder w.r.t. which filenames are specified in the annotations for model evaluation.")
parser.add_argument("--test_frequency", type=int, default=50000, help="Evaluate model on test data every __ iterations.")

parser.add_argument("--iters", type=int, default=500000, help="Number of iterations to train.")
parser.add_argument("--save_frequency", type=int, default=50000, help="Save model checkpoint every __ iterations.")
args = parser.parse_args()


# Register datasets
register_coco_instances("train", {}, args.annotations, args.imagedir)
register_coco_instances("test", {}, args.test_annotations, args.test_imagedir)

# Load and configure model
cfg = model_zoo.get_config("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml", trained=True)

cfg.DATASETS.TRAIN = ("train",)
cfg.DATASETS.TEST =  ("test",)

cfg.OUTPUT_DIR = args.outdir

cfg.DATALOADER.NUM_WORKERS = 4
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025
cfg.SOLVER.CHECKPOINT_PERIOD = args.save_frequency
cfg.SOLVER.MAX_ITER = args.iters # Note that when traininig is resumed the iteration count will resume as well, so increase the number of iterations to train further. 
cfg.TEST.EVAL_PERIOD = args.test_frequency
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 55

# save config