Пример #1
0
def add_ResNet_roi_conv5_head_for_masks(
        model, blob_in, dim_in, spatial_scale, preprefix='_[mask]_',
        dilation=1, shared=False):
    assert not shared, \
        'Using shared ResNet stage not supported (temporarily)'
    model.RoIFeatureTransform(
        blob_in,
        blob_out=preprefix + 'pool5',
        blob_rois='mask_rois',
        method=cfg.MRCNN.ROI_XFORM_METHOD,
        resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION,
        sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO,
        spatial_scale=spatial_scale)

    stride_init = int(cfg.MRCNN.ROI_XFORM_RESOLUTION / 7)  # by default: 2
    if not shared:
        s, dim_in = ResNet.add_stage(
            model, preprefix + 'res5', preprefix + 'pool5',
            3, dim_in, 2048, 512, dilation, stride_init=stride_init)
    else:
        s, dim_in = ResNet.add_stage_shared(
            model, preprefix, 'res5', preprefix + 'pool5',
            3, dim_in, 2048, 512, dilation, stride_init=stride_init)

    return s, 2048, spatial_scale
Пример #2
0
def add_ResNet_roi_conv5_head_for_keypoints(model, blob_in, dim_in,
                                            spatial_scale):
    """Add a ResNet "conv5" / "stage5" head for Mask R-CNN keypoint prediction.
    """
    model.RoIFeatureTransform(
        blob_in,
        '_[pose]_pool5',
        blob_rois='keypoint_rois',
        method=cfg.KRCNN.ROI_XFORM_METHOD,
        resolution=cfg.KRCNN.ROI_XFORM_RESOLUTION,
        sampling_ratio=cfg.KRCNN.ROI_XFORM_SAMPLING_RATIO,
        spatial_scale=spatial_scale)
    # Using the prefix '_[pose]_' to 'res5' enables initializing the head's
    # parameters using pretrained 'res5' parameters if given (see
    # utils.net.initialize_gpu_0_from_weights_file)
    s, dim_in = ResNet.add_stage(model,
                                 '_[pose]_res5',
                                 '_[pose]_pool5',
                                 3,
                                 dim_in,
                                 2048,
                                 512,
                                 cfg.KRCNN.DILATION,
                                 stride_init=int(
                                     cfg.KRCNN.ROI_XFORM_RESOLUTION / 7))
    return s, 2048
Пример #3
0
def ResNet_roi_conv5_head_for_masks(dim_in):
    """ResNet "conv5" / "stage5" head for predicting masks."""
    dilation = cfg.MRCNN.DILATION
    stride_init = cfg.MRCNN.ROI_XFORM_RESOLUTION // 7  # by default: 2
    module, dim_out = ResNet.add_stage(dim_in, 2048, 512, 3, dilation,
                                       stride_init)
    return module, dim_out
Пример #4
0
def add_ResNet_roi_conv5_head_for_masks(model, blob_in, dim_in, spatial_scale):
    """Add a ResNet "conv5" / "stage5" head for predicting masks."""
    model.RoIFeatureTransform(
        blob_in,
        blob_out='_[mask]_pool5',
        blob_rois='mask_rois',
        method=cfg.MRCNN.ROI_XFORM_METHOD,
        resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION,
        sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO,
        spatial_scale=spatial_scale)

    dilation = cfg.MRCNN.DILATION
    stride_init = int(cfg.MRCNN.ROI_XFORM_RESOLUTION / 7)  # by default: 2

    s, dim_in = ResNet.add_stage(model,
                                 '_[mask]_res5',
                                 '_[mask]_pool5',
                                 3,
                                 dim_in,
                                 2048,
                                 512,
                                 dilation,
                                 stride_init=stride_init)

    return s, 2048
Пример #5
0
def add_ResNet_roi_conv5_head_for_keypoints(
    model, blob_in, dim_in, spatial_scale
):
    """Add a ResNet "conv5" / "stage5" head for Mask R-CNN keypoint prediction.
    """
    model.RoIFeatureTransform(
        blob_in,
        '_[pose]_pool5',
        blob_rois='keypoint_rois',
        method=cfg.KRCNN.ROI_XFORM_METHOD,
        resolution=cfg.KRCNN.ROI_XFORM_RESOLUTION,
        sampling_ratio=cfg.KRCNN.ROI_XFORM_SAMPLING_RATIO,
        spatial_scale=spatial_scale
    )
    # Using the prefix '_[pose]_' to 'res5' enables initializing the head's
    # parameters using pretrained 'res5' parameters if given (see
    # utils.net.initialize_gpu_0_from_weights_file)
    s, dim_in = ResNet.add_stage(
        model,
        '_[pose]_res5',
        '_[pose]_pool5',
        3,
        dim_in,
        2048,
        512,
        cfg.KRCNN.DILATION,
        stride_init=int(cfg.KRCNN.ROI_XFORM_RESOLUTION / 7)
    )
    return s, 2048
Пример #6
0
def add_ResNet_roi_conv5_head_for_masks(model, blob_in, dim_in, spatial_scale):
    """Add a ResNet "conv5" / "stage5" head for predicting masks."""
    model.RoIFeatureTransform(
        blob_in,
        blob_out='_[mask]_pool5',
        blob_rois='mask_rois',
        method=cfg.MRCNN.ROI_XFORM_METHOD,
        resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION,
        sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO,
        spatial_scale=spatial_scale
    )

    dilation = cfg.MRCNN.DILATION
    stride_init = int(cfg.MRCNN.ROI_XFORM_RESOLUTION / 7)  # by default: 2

    s, dim_in = ResNet.add_stage(
        model,
        '_[mask]_res5',
        '_[mask]_pool5',
        3,
        dim_in,
        2048,
        512,
        dilation,
        stride_init=stride_init
    )

    return s, 2048
Пример #7
0
 def detectron_weight_mapping(self):
     detectron_weight_mapping, orphan_in_detectron = \
       ResNet.residual_stage_detectron_mapping(self.res5, 'res5', 3, 5)
     detectron_weight_mapping.update({
         'upconv5.weight': 'conv5_mask_w',
         'upconv5.bias': 'conv5_mask_b'
     })
     return detectron_weight_mapping, orphan_in_detectron
Пример #8
0
 def detectron_weight_mapping(self):
     detectron_weight_mapping, orphan_in_detectron = \
       ResNet.residual_stage_detectron_mapping(self.res5, 'res5', 3, 5)
     detectron_weight_mapping.update({
         'upconv5.weight': 'conv5_mask_w',
         'upconv5.bias': 'conv5_mask_b'
     })
     return detectron_weight_mapping, orphan_in_detectron
Пример #9
0
    def detectron_weight_mapping(self):
        detectron_weight_mapping, orphan_in_detectron = \
          ResNet.residual_stage_detectron_mapping(self.res5, 'res5', 3, 5)
        # Assign None for res5 modules, do not load from or save to checkpoint
        for k in detectron_weight_mapping:
            detectron_weight_mapping[k] = None

        detectron_weight_mapping.update({
            'upconv5.weight': 'conv5_mask_w',
            'upconv5.bias': 'conv5_mask_b'
        })
        return detectron_weight_mapping, orphan_in_detectron
Пример #10
0
    def detectron_weight_mapping(self):
        detectron_weight_mapping, orphan_in_detectron = \
          ResNet.residual_stage_detectron_mapping(self.res5, 'res5', 3, 5)
        # Assign None for res5 modules, do not load from or save to checkpoint
        for k in detectron_weight_mapping:
            detectron_weight_mapping[k] = None

        detectron_weight_mapping.update({
            'upconv5.weight': 'conv5_mask_w',
            'upconv5.bias': 'conv5_mask_b'
        })
        return detectron_weight_mapping, orphan_in_detectron
Пример #11
0
    def __init__(self, dim_in, roi_xform_func, spatial_scale):
        super().__init__()
        self.dim_in = dim_in
        self.roi_xform = roi_xform_func
        self.spatial_scale = spatial_scale
        self.dim_out = cfg.MRCNN.DIM_REDUCED

        self.res5, dim_out = ResNet_roi_conv5_head_for_masks(dim_in)
        self.upconv5 = nn.ConvTranspose2d(dim_out, self.dim_out, 2, 2, 0)

        # Freeze all bn (affine) layers in resnet!!!
        self.res5.apply(lambda m: ResNet.freeze_params(m)
                        if isinstance(m, mynn.AffineChannel2d) else None)
        self._init_weights()
Пример #12
0
def add_ResNet_roi_conv5_head_for_keypoints(
        model, blob_in, dim_in, spatial_scale):
    model.RoIFeatureTransform(
        blob_in, '_[pose]_pool5',
        blob_rois='keypoint_rois',
        method=cfg.KRCNN.ROI_XFORM_METHOD,
        resolution=cfg.KRCNN.ROI_XFORM_RESOLUTION,
        sampling_ratio=cfg.KRCNN.ROI_XFORM_SAMPLING_RATIO,
        spatial_scale=spatial_scale)
    s, dim_in = ResNet.add_stage(
        model, '_[pose]_res5', '_[pose]_pool5',
        3, dim_in, 2048, 512, cfg.KRCNN.DILATION,
        stride_init=int(cfg.KRCNN.ROI_XFORM_RESOLUTION / 7))
    return s, 2048, spatial_scale
Пример #13
0
    def __init__(self, dim_in, roi_xform_func, spatial_scale):
        super().__init__()
        self.dim_in = dim_in
        self.roi_xform = roi_xform_func
        self.spatial_scale = spatial_scale
        self.dim_out = cfg.MRCNN.DIM_REDUCED

        self.res5, dim_out = ResNet_roi_conv5_head_for_masks(dim_in)
        self.upconv5 = nn.ConvTranspose2d(dim_out, self.dim_out, 2, 2, 0)

        # Freeze all bn (affine) layers in resnet!!!
        self.res5.apply(
            lambda m: ResNet.freeze_params(m)
            if isinstance(m, mynn.AffineChannel2d) else None)
        self._init_weights()
Пример #14
0
def add_ResNet_roi_conv5_head_for_keypoints(model, blob_in, dim_in,
                                            spatial_scale):
    model.RoIFeatureTransform(
        blob_in,
        '_[pose]_pool5',
        blob_rois='keypoint_rois',
        method=cfg.KRCNN.ROI_XFORM_METHOD,
        resolution=cfg.KRCNN.ROI_XFORM_RESOLUTION,
        sampling_ratio=cfg.KRCNN.ROI_XFORM_SAMPLING_RATIO,
        spatial_scale=spatial_scale)
    s, dim_in = ResNet.add_stage(model,
                                 '_[pose]_res5',
                                 '_[pose]_pool5',
                                 3,
                                 dim_in,
                                 2048,
                                 512,
                                 cfg.KRCNN.DILATION,
                                 stride_init=int(
                                     cfg.KRCNN.ROI_XFORM_RESOLUTION / 7))
    return s, 2048, spatial_scale
Пример #15
0
def ResNet_roi_conv5_head_for_masks(dim_in):
    """ResNet "conv5" / "stage5" head for predicting masks."""
    dilation = cfg.MRCNN.DILATION
    stride_init = cfg.MRCNN.ROI_XFORM_RESOLUTION // 7  # by default: 2
    module, dim_out = ResNet.add_stage(dim_in, 2048, 512, 3, dilation, stride_init)
    return module, dim_out
Пример #16
0
x = Variable(torch.Tensor(x)).to(device)


''' Load Model '''
model_save_type = ["all", "state_dict", "ckp"]  # 학습 재개를 위해서 ckp 저장 권장 // else state_dict
model_name = "./modeling/ResNet"
option = 2

if option == 0:
    # 모델 클래스는 어딘가에 반드시 선언되어 있어야 합니다
    model_path = model_name + '_all.pt'
    model = torch.load(model_path)

elif option == 1:
    model_path = model_name + '_state_dict.pt'
    model = ResNet().to(device)
    model.load_state_dict(torch.load(model_path))

elif option == 2:
    # model, optimizer 초기화
    model = ResNet(ResidualBlock, [2, 2, 2]).to(device)
    #optimizer = optim.Adam(model.parameters(), lr=lr)

    model_path = model_name + '_ckp.tar'
    checkpoint = torch.load(model_path, map_location='cpu')
    model.load_state_dict(checkpoint['model_state_dict'])
    #optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    last_epoch = checkpoint['last_epoch']
    loss = checkpoint['loss']

''' Audio Test '''