Exemple #1
0
def build_resnet_backbone(cfg, input_shape):
    """
    Create a ResNet instance from config.

    Returns:
        ResNet: a :class:`ResNet` instance.
    """
    # need registration of new blocks/stems?
    norm = cfg.MODEL.RESNETS.NORM
    stem = BasicStem(
        in_channels=input_shape.channels,
        out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
        norm=norm,
    )
    freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT

    if freeze_at >= 1:
        for p in stem.parameters():
            p.requires_grad = False
        stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)

    # fmt: off
    out_features = cfg.MODEL.RESNETS.OUT_FEATURES
    depth = cfg.MODEL.RESNETS.DEPTH
    num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
    width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
    bottleneck_channels = num_groups * width_per_group
    in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
    out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
    stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
    res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
    deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
    deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
    deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
    # fmt: on
    assert res5_dilation in {
        1, 2
    }, "res5_dilation cannot be {}.".format(res5_dilation)

    num_blocks_per_stage = {
        50: [3, 4, 6, 3],
        101: [3, 4, 23, 3],
        152: [3, 8, 36, 3]
    }[depth]

    stages = []

    # Avoid creating variables without gradients
    # It consumes extra memory and may cause allreduce to fail
    out_stage_idx = [{
        "res2": 2,
        "res3": 3,
        "res4": 4,
        "res5": 5
    }[f] for f in out_features]
    max_stage_idx = max(out_stage_idx)
    for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
        dilation = res5_dilation if stage_idx == 5 else 1
        first_stride = 1 if idx == 0 or (stage_idx == 5
                                         and dilation == 2) else 2
        stage_kargs = {
            "num_blocks": num_blocks_per_stage[idx],
            "first_stride": first_stride,
            "in_channels": in_channels,
            "bottleneck_channels": bottleneck_channels,
            "out_channels": out_channels,
            "num_groups": num_groups,
            "norm": norm,
            "stride_in_1x1": stride_in_1x1,
            "dilation": dilation,
        }
        if deform_on_per_stage[idx]:
            stage_kargs["block_class"] = DeformBottleneckBlock
            stage_kargs["deform_modulated"] = deform_modulated
            stage_kargs["deform_num_groups"] = deform_num_groups
        else:
            stage_kargs["block_class"] = InvertedBlock
        blocks = make_stage(**stage_kargs)
        in_channels = out_channels
        out_channels *= 2
        bottleneck_channels *= 2

        if freeze_at >= stage_idx:
            for block in blocks:
                block.freeze()
        stages.append(blocks)
    return SNet(stem, stages, out_features=out_features)
Exemple #2
0
 def freeze(self):
     for p in self.parameters():
         p.requires_grad = False
     FrozenBatchNorm2d.convert_frozen_batchnorm(self)
     return self
def conv_1x1_bn(inp, oup):
    return nn.Sequential(Conv2d(inp, oup, 1, 1, 0, bias=False),
                         FrozenBatchNorm2d(oup), nn.ReLU6(inplace=True))
def build_trident_resnet_backbone(cfg, input_shape):
    """
    Create a ResNet instance from config for TridentNet.

    Returns:
        ResNet: a :class:`ResNet` instance.
    """
    # need registration of new blocks/stems?
    norm = cfg.MODEL.RESNETS.NORM
    stem = BasicStem(
        in_channels=input_shape.channels,
        out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
        norm=norm,
    )
    freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT

    if freeze_at >= 1:
        for p in stem.parameters():
            p.requires_grad = False
        stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)

    # fmt: off
    out_features = cfg.MODEL.RESNETS.OUT_FEATURES
    depth = cfg.MODEL.RESNETS.DEPTH
    num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
    width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
    bottleneck_channels = num_groups * width_per_group
    in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
    out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
    stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
    res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
    deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
    deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
    deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
    num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH
    branch_dilations = cfg.MODEL.TRIDENT.BRANCH_DILATIONS
    trident_stage = cfg.MODEL.TRIDENT.TRIDENT_STAGE
    test_branch_idx = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX
    # fmt: on
    assert res5_dilation in {
        1, 2
    }, "res5_dilation cannot be {}.".format(res5_dilation)

    num_blocks_per_stage = {
        50: [3, 4, 6, 3],
        101: [3, 4, 23, 3],
        152: [3, 8, 36, 3]
    }[depth]

    stages = []

    res_stage_idx = {"res2": 2, "res3": 3, "res4": 4, "res5": 5}
    out_stage_idx = [res_stage_idx[f] for f in out_features]
    trident_stage_idx = res_stage_idx[trident_stage]
    max_stage_idx = max(out_stage_idx)
    for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
        dilation = res5_dilation if stage_idx == 5 else 1
        first_stride = 1 if idx == 0 or (stage_idx == 5
                                         and dilation == 2) else 2
        stage_kargs = {
            "num_blocks": num_blocks_per_stage[idx],
            "first_stride": first_stride,
            "in_channels": in_channels,
            "bottleneck_channels": bottleneck_channels,
            "out_channels": out_channels,
            "num_groups": num_groups,
            "norm": norm,
            "stride_in_1x1": stride_in_1x1,
            "dilation": dilation,
        }
        if stage_idx == trident_stage_idx:
            assert not deform_on_per_stage[
                idx], "Not support deformable conv in Trident blocks yet."
            stage_kargs["block_class"] = TridentBottleneckBlock
            stage_kargs["num_branch"] = num_branch
            stage_kargs["dilations"] = branch_dilations
            stage_kargs["test_branch_idx"] = test_branch_idx
            stage_kargs.pop("dilation")
        elif deform_on_per_stage[idx]:
            stage_kargs["block_class"] = DeformBottleneckBlock
            stage_kargs["deform_modulated"] = deform_modulated
            stage_kargs["deform_num_groups"] = deform_num_groups
        else:
            stage_kargs["block_class"] = BottleneckBlock
        blocks = (make_trident_stage(**stage_kargs) if stage_idx
                  == trident_stage_idx else make_stage(**stage_kargs))
        in_channels = out_channels
        out_channels *= 2
        bottleneck_channels *= 2

        if freeze_at >= stage_idx:
            for block in blocks:
                block.freeze()
        stages.append(blocks)
    return ResNet(stem, stages, out_features=out_features)
    def __init__(self,
                 cfg,
                 stages_repeats,
                 stages_out_channels,
                 num_classes=1000,
                 inverted_residual=InvertedResidual,
                 groups_in_1x1=1,
                 use_flgc=True):
        super(ShuffleNetV2, self).__init__()

        if len(stages_repeats) != 3:
            raise ValueError(
                'expected stages_repeats as list of 3 positive ints')
        if len(stages_out_channels) != 5:
            raise ValueError(
                'expected stages_out_channels as list of 5 positive ints')
        self._stage_out_channels = stages_out_channels

        input_channels = 3
        output_channels = self._stage_out_channels[0]
        self.features = nn.ModuleList([
            nn.Sequential(
                Conv2d(input_channels, output_channels, 3, 2, 0, bias=False),
                FrozenBatchNorm2d(output_channels),
                nn.ReLU(inplace=True),
            )
        ])
        input_channels = output_channels
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)

        stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
        for name, repeats, output_channels in zip(
                stage_names, stages_repeats, self._stage_out_channels[1:]):
            seq = [
                inverted_residual(input_channels,
                                  output_channels,
                                  stride=2,
                                  groups_in_1x1=groups_in_1x1,
                                  use_flgc=use_flgc)
            ]
            for i in range(repeats - 1):
                seq.append(
                    inverted_residual(output_channels,
                                      output_channels,
                                      stride=1,
                                      groups_in_1x1=groups_in_1x1,
                                      use_flgc=use_flgc))
            # setattr(self, name, nn.Sequential(*seq))
            self.features.append(nn.Sequential(*seq))
            input_channels = output_channels

        output_channels = self._stage_out_channels[-1]
        self.features.append(
            nn.Sequential(
                Conv2d(input_channels,
                       output_channels,
                       kernel_size=1,
                       stride=2,
                       padding=0,
                       bias=False),
                FrozenBatchNorm2d(output_channels),
                nn.ReLU(inplace=True),
            ))
        self._initialize_weights()
        self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_AT)
def conv_bn(inp, oup, stride):
    return nn.Sequential(Conv2d(inp, oup, 3, stride, 1, bias=False),
                         FrozenBatchNorm2d(oup), nn.ReLU6(inplace=True))
Exemple #7
0
def build_custom_backbone(cfg, input_shape, num_classes=None):
    """
    Create a ResNet instance from config for TridentNet.

    Returns:
        ResNet: a :class:`ResNet` instance.
    """
    # need registration of new blocks/stems?
    norm = cfg.MODEL.CUSTOM.NORM
    stem = PSRBasicStem(
        in_channels=input_shape,
        out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
        norm=norm,
        c7x7=cfg.MODEL.CUSTOM.RESNETS.STEM.CONVF_7x7,
        convf_name=cfg.MODEL.CUSTOM.RESNETS.STEM.CONVF_NAME,
        rot_1x1_out=cfg.MODEL.CUSTOM.RESNETS.ROT_1x1,
        noise_var=cfg.MODEL.CUSTOM.RESNETS.NOISE_VAR,
        stride_psr=cfg.MODEL.CUSTOM.RESNETS.STEM.STRIDE_PSR
    )
    freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT

    if freeze_at >= 1:
        for p in stem.parameters():
            p.requires_grad = False
        stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)

    # fmt: off
    out_features         = cfg.MODEL.RESNETS.OUT_FEATURES
    depth                = cfg.MODEL.RESNETS.DEPTH
    width_per_group      = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
    bottleneck_channels  = width_per_group
    in_channels          = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
    out_channels         = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
    conv_name            = cfg.MODEL.CUSTOM.RESNETS.BLOCK.CONV_NAME
    conv_1x1_rot         = cfg.MODEL.CUSTOM.RESNETS.BLOCK.CONV_1x1_ROT
    rot_1x1_out          = cfg.MODEL.CUSTOM.RESNETS.ROT_1x1
    noise_var            = cfg.MODEL.CUSTOM.RESNETS.NOISE_VAR

    # fmt: on
    num_blocks_per_stage = {10: [1, 1, 1, 1], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]

    stages = []

    res_stage_idx = {"res2": 2, "res3": 3, "res4": 4, "res5": 5}
    out_stage_idx = [res_stage_idx[f] for f in out_features]
    max_stage_idx = max(out_stage_idx)
    for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
        first_stride = 1 if idx == 0 else 2
        stage_kargs = {
            "num_blocks": num_blocks_per_stage[idx],
            "first_stride": first_stride,
            "in_channels": in_channels,
            "bottleneck_channels": bottleneck_channels,
            "out_channels": out_channels,
            "conv_name": conv_name,
            "conv_1x1_rot": conv_1x1_rot,
            "rot_1x1_out": rot_1x1_out,
            "noise_var": noise_var,
            "norm": norm
        }
        stage_kargs["block_class"] = PSRBottleneckBlock
        blocks = (
            make_custom_stage(**stage_kargs)
        )
        in_channels = out_channels
        out_channels *= 2
        bottleneck_channels *= 2
        stages.append(blocks)
    return ResNet(stem, stages, out_features=out_features, num_classes=num_classes)
Exemple #8
0
    def __init__(self, cfg):
        super().__init__()

        self.device = torch.device(cfg.MODEL.DEVICE)
        self.backbone = build_backbone(cfg)
        self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
        self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())
        self.vis_period = cfg.VIS_PERIOD
        self.input_format = cfg.INPUT.FORMAT
        self.gt_test_proposals = cfg.MODEL.GT_TEST_PROPOSALS
        self.eval_method = _EvalMethod[cfg.TEST.EVAL_METHOD.upper()]

        assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
        num_channels = len(cfg.MODEL.PIXEL_MEAN)
        pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(num_channels, 1, 1)
        pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(num_channels, 1, 1)
        self.normalizer = lambda x: (x - pixel_mean) / pixel_std

        from detectron2.layers import FrozenBatchNorm2d
        if cfg.MODEL.BACKBONE.FREEZE:
            for p in self.backbone.parameters():
                p.requires_grad = False
            # Also freeze batchnorm, not done in original code
            # TODO(): Is this even correct, or does it just return a new module?
            FrozenBatchNorm2d.convert_frozen_batchnorm(self.backbone)
            logger.info('froze backbone parameters')
            print('froze backbone parameters')

        if cfg.MODEL.PROPOSAL_GENERATOR.FREEZE:
            for p in self.proposal_generator.parameters():
                p.requires_grad = False
            logger.info('froze proposal generator parameters')
            print('froze proposal generator parameters')
            FrozenBatchNorm2d.convert_frozen_batchnorm(self.proposal_generator)

        if cfg.MODEL.ROI_HEADS.FREEZE_BOX_HEAD:
            for p in self.roi_heads.box_head.parameters():
                p.requires_grad = False
            FrozenBatchNorm2d.convert_frozen_batchnorm(self.roi_heads.box_head)
            logger.info('froze roi_box_head parameters')
            print('froze roi_box_head parameters')

        if cfg.MODEL.ROI_MASK_HEAD.FREEZE:
            for p in self.roi_heads.mask_head.parameters():
                p.requires_grad = False
            FrozenBatchNorm2d.convert_frozen_batchnorm(self.roi_heads.mask_head)
            logger.info('froze roi mask head parameters')

        if cfg.MODEL.ROI_MASK_HEAD.FREEZE_WITHOUT_PREDICTOR \
                and cfg.MODEL.ROI_MASK_HEAD.FREEZE:
            # Both frozen doesn't make sense and likely indicates that we forgot to
            # modify a config, so better to early error.
            assert False

        if cfg.MODEL.ROI_MASK_HEAD.FREEZE_WITHOUT_PREDICTOR:
            frozen_names = []
            for n, p in self.roi_heads.mask_head.named_parameters():
                if 'predictor' not in n:
                    p.requires_grad = False
                    frozen_names.append(n)

            logger.info('froze roi mask head parameters without predictor')
            logger.info(f'Names of frozen layers: {frozen_names}')

        self.to(self.device)
def build_resnet_backbone(cfg, input_shape):
    """
    Create a ResNet instance from config.

    Returns:
        ResNet: a :class:`ResNet` instance.
    """

    depth = cfg.MODEL.RESNETS.DEPTH
    stem_width = {50: 32, 101: 64, 152: 64, 200: 64, 269: 64}[depth] 
    radix = cfg.MODEL.RESNETS.RADIX 
    deep_stem = cfg.MODEL.RESNETS.DEEP_STEM or (radix > 1)

    # need registration of new blocks/stems?
    norm = cfg.MODEL.RESNETS.NORM
    stem = BasicStem(
        in_channels=input_shape.channels,
        out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
        norm=norm,
        deep_stem=deep_stem,
        stem_width=stem_width,
    )
    freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT

    if freeze_at >= 1:
        for p in stem.parameters():
            p.requires_grad = False
        stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)

    # fmt: off
    out_features        = cfg.MODEL.RESNETS.OUT_FEATURES
    num_groups          = cfg.MODEL.RESNETS.NUM_GROUPS
    width_per_group     = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
    bottleneck_channels = num_groups * width_per_group
    in_channels         = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
    out_channels        = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
    stride_in_1x1       = cfg.MODEL.RESNETS.STRIDE_IN_1X1
    res5_dilation       = cfg.MODEL.RESNETS.RES5_DILATION
    deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
    deform_modulated    = cfg.MODEL.RESNETS.DEFORM_MODULATED
    deform_num_groups   = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
    avd                 = cfg.MODEL.RESNETS.AVD or (radix > 1)
    avg_down            = cfg.MODEL.RESNETS.AVG_DOWN or (radix > 1)
    bottleneck_width    = cfg.MODEL.RESNETS.BOTTLENECK_WIDTH
    # fmt: on
    assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)

    num_blocks_per_stage = {
        18: [2, 2, 2, 2],
        34: [3, 4, 6, 3],
        50: [3, 4, 6, 3],
        101: [3, 4, 23, 3],
        152: [3, 8, 36, 3],
        200: [3, 24, 36, 3],
        269: [3, 30, 48, 8],
    }[depth]

    if depth in [18, 34]:
        assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34"
        assert not any(
            deform_on_per_stage
        ), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34"
        assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34"
        assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34"

    stages = []

    # Avoid creating variables without gradients
    # It consumes extra memory and may cause allreduce to fail
    out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
    max_stage_idx = max(out_stage_idx)
    in_channels = 2*stem_width if deep_stem else in_channels
    for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
        dilation = res5_dilation if stage_idx == 5 else 1
        first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
        stage_kargs = {
            "num_blocks": num_blocks_per_stage[idx],
            "first_stride": first_stride,
            "in_channels": in_channels,
            "out_channels": out_channels,
            "norm": norm,
            "avd": avd,
            "avg_down": avg_down,
            "radix": radix,
            "bottleneck_width": bottleneck_width,
        }
        # Use BasicBlock for R18 and R34.
        if depth in [18, 34]:
            stage_kargs["block_class"] = BasicBlock
        else:
            stage_kargs["bottleneck_channels"] = bottleneck_channels
            stage_kargs["stride_in_1x1"] = stride_in_1x1
            stage_kargs["dilation"] = dilation
            stage_kargs["num_groups"] = num_groups
            if deform_on_per_stage[idx]:
                stage_kargs["block_class"] = DeformBottleneckBlock
                stage_kargs["deform_modulated"] = deform_modulated
                stage_kargs["deform_num_groups"] = deform_num_groups
            else:
                stage_kargs["block_class"] = BottleneckBlock
        blocks = make_stage(**stage_kargs)
        in_channels = out_channels
        out_channels *= 2
        bottleneck_channels *= 2

        if freeze_at >= stage_idx:
            for block in blocks:
                block.freeze()
        stages.append(blocks)
    return ResNet(stem, stages, out_features=out_features)
Exemple #10
0
def build_resnetlike_backbone(cfg, input_shape: ShapeSpec):
    """
    Create a ResNet instance from config.
    Returns:
        ResNet: a :class:`ResNet` instance.
    """
    # fmt: off
    out_features = cfg.MODEL.RESNETS.OUT_FEATURES
    num_classes = cfg.MODEL.BACKBONE.NUM_CLASSES if "logits" in out_features else None
    num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
    width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
    bottleneck_channels = num_groups * width_per_group
    in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
    out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
    num_blocks_per_stage = cfg.MODEL.RESNETS.BLOCKS_PER_STAGE
    dilation_per_stage = cfg.MODEL.RESNETS.DILATION_PAR_STAGE
    norm = cfg.MODEL.RESNETS.NORM
    freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
    # fmt: on

    # need registration of new blocks/stems?
    stem = StemBlock(
        in_channels=input_shape.channels,
        out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
        kernel_size=cfg.MODEL.RESNETS.STEM_KERNEL,
        stride=cfg.MODEL.RESNETS.STEM_STRIDE,
        pooling=nn.MaxPool2d
        if cfg.MODEL.RESNETS.STEM_POOLING_ON == True else None,
        norm=norm,
    )

    if freeze_at >= 1:
        for p in stem.parameters():
            p.requires_grad = False
        stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)

    out_stage_idx = [{
        "res2": 2,
        "res3": 3,
        "res4": 4,
        "res5": 5,
        "logits": 5
    }[f] for f in out_features]
    max_stage_idx = max(out_stage_idx)

    blocks = []
    for idx in range(2, max_stage_idx + 1):
        block = {
            'count': num_blocks_per_stage[idx],
            'in_channels': in_channels,
            'bottleneck_channels': bottleneck_channels,
            'out_channels': out_channels,
            'dilation': dilation_per_stage[idx]
        }
        in_channels = out_channels
        bottleneck_channels *= 2
        out_channels *= 2
        blocks.append(block)

    return ResNetLike(stem,
                      stages,
                      num_classes=num_classes,
                      out_features=out_features,
                      freeze_at=0)
Exemple #11
0
def build_custom_resnet50_fpn_backbone(cfg, input_shape: ShapeSpec):
    """
    Args:
        cfg: a detectron2 CfgNode

    Returns:
        backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
    """
    """
    in detectron2 by default:
    1. resnet base and layer1(res2) requires_grad = False
    2. all the bn in resnet converted to Frozen bn
    """
    # bottom_up = build_resnet_backbone(cfg, input_shape)
    # in_channels_p6p7 = bottom_up.output_shape()["res5"].channels

    assert "WEIGHTS" in cfg.MODEL.keys(
    ), "Define where to find the robust network path"

    return_layers = {
        "layer1": "res2",
        "layer2": "res3",
        "layer3": "res4",
        "layer4": "res5"
    }

    model, _ = model_utils.make_and_restore_model(
        arch="resnet50",
        dataset=datasets.ImageNet(""),
        resume_path=cfg.MODEL.WEIGHTS,
        pytorch_pretrained=False)

    cfg.MODEL.WEIGHTS = ""

    resnet = model.model

    freeze_level = cfg.MODEL.BACKBONE.FREEZE_AT

    to_freeze_layers = [
        resnet.conv1, resnet.layer1, resnet.layer2, resnet.layer3,
        resnet.layer4
    ]

    # frozen_range = [resnet.conv1, resnet.layer1]
    # always freeze conv1
    for module in to_freeze_layers[:freeze_level]:
        for param in module.parameters():
            param.requires_grad = False
    resnet = FrozenBatchNorm2d.convert_frozen_batchnorm(resnet)

    bottom_up = IntermediateLayerGetter(resnet, return_layers)

    in_features = cfg.MODEL.FPN.IN_FEATURES
    out_channels = cfg.MODEL.FPN.OUT_CHANNELS

    backbone = CustomResnet50FPN(
        bottom_up=bottom_up,
        in_features=in_features,
        out_channels=out_channels,
        return_layers=return_layers,
        norm=cfg.MODEL.FPN.NORM,
        top_block=LastLevelMaxPool(),
        fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
    )
    backbone.__bottom_up = bottom_up
    return backbone
Exemple #12
0
def build_efficientnetv2_backbone(cfg, input_shape):
    """
    Create a GenEfficientNet instance from config.

    Returns:
        GenEfficientNet: a :class:`GenEfficientNet` instance.
    """

    freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
    norm = cfg.MODEL.EFFICIENTNETS.NORM
    out_features = cfg.MODEL.EFFICIENTNETS.OUT_FEATURES
    model_name = cfg.MODEL.EFFICIENTNETS.NAME
    pretrained = cfg.MODEL.EFFICIENTNETS.PRETRAINED
    set_exportable(cfg.MODEL.EFFICIENTNETS.EXPORTABLE)
    set_scriptable(cfg.MODEL.EFFICIENTNETS.SCRIPTABLE)

    # GET MODEL BY NAME
    model = timm.create_model('spnasnet_100',
                              pretrained=pretrained,
                              features_only=True,
                              out_indices=out_features)

    # LOAD MODEL AND CONVERT NORM
    # NOTE: why I use if/else: see the strange function _load_from_state_dict in FrozenBatchNorm2d
    assert norm in ["FrozenBN", "SyncBN", "BN"]
    if norm == "FrozenBN":
        model = FrozenBatchNorm2d.convert_frozen_batchnorm(model)
    elif pretrained:
        model = convert_norm_to_detectron2_format(model, norm)
    else:
        model = convert_norm_to_detectron2_format_and_init_default(model, norm)

    # USE TENSORFLOW EPS, MOMENTUM defaults if model is tf pretrained
    if "tf" in model_name:
        model = convert_norm_eps_momentum_to_tf_defaults(model)

    # PRUNE REDUNDANT BLOCKS
    # FIXME PRUNE
    # prune(model, is_mobilenetv3)

    max_block_number = int(model.feature_info[1]['name'][7:8])
    for p in model.conv_stem.parameters():
        p.requires_grad = False
    model.bn1 = FrozenBatchNorm2d.convert_frozen_batchnorm(model.bn1)
    for block_number in range(0, max_block_number + 1):
        for p in model.blocks[block_number].parameters():
            p.requires_grad = False
        model.blocks[
            block_number] = FrozenBatchNorm2d.convert_frozen_batchnorm(
                model.blocks[block_number])

    # def decorate_forward(cls):
    #     old_forward = cls.forward
    #
    #     def new_forward(self, x):
    #         x = old_forward(self, x)
    #
    #         return x + _dummy
    #
    #     cls.forward = new_forward
    #     return cls
    #
    # model = decorate_forward(model)

    return model
    def __init__(self, cfg, **kwargs):
        super(HRNet, self).__init__()

        blocks_dict = {
            'BasicBlockWithFixedBatchNorm': BasicBlock,
            'BottleneckWithFixedBatchNorm': Bottleneck
        }

        self.blocks_dict = blocks_dict
        self.inplanes = 64

        # stem net
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=2,
                               padding=1,
                               bias=False)
        self.bn1 = FrozenBatchNorm2d(64)
        self.conv2 = nn.Conv2d(64,
                               64,
                               kernel_size=3,
                               stride=2,
                               padding=1,
                               bias=False)
        self.bn2 = FrozenBatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)

        self.stage1_cfg = cfg.MODEL.HRNET.STAGE1
        num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
        block = blocks_dict[self.stage1_cfg['BLOCK']]
        num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
        # stage1_out_channel = block.expansion*num_channels
        # self.layer1 = self._make_layer(Bottleneck, self.inplanes, 64, 4)

        self.stage2_cfg = cfg.MODEL.HRNET.STAGE2
        num_channels = self.stage2_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage2_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition1 = self._make_transition_layer([256], num_channels)
        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg, num_channels)

        self.stage3_cfg = cfg.MODEL.HRNET.STAGE3
        num_channels = self.stage3_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage3_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition2 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)

        self.stage4_cfg = cfg.MODEL.HRNET.STAGE4
        num_channels = self.stage4_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage4_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition3 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg,
            num_channels,
            multi_scale_output=self.stage4_cfg.MULTI_OUTPUT
        )  ##_C.MODEL.HRNET.STAGE4.MULTI_OUTPUT = True