Beispiel #1
0
    def _bbox_subnet(self, conv_feat, conv_channel, num_base_anchor, num_class):
        p = self.p

        # regression subnet
        bbox_conv1 = X.conv(
            data=conv_feat,
            kernel=3,
            filter=conv_channel,
            weight=self.bbox_conv1_weight,
            bias=self.bbox_conv1_bias,
            no_bias=False,
            name="bbox_conv1"
        )
        bbox_conv1_relu = X.relu(bbox_conv1)
        bbox_conv2 = X.conv(
            data=bbox_conv1_relu,
            kernel=3,
            filter=conv_channel,
            weight=self.bbox_conv2_weight,
            bias=self.bbox_conv2_bias,
            no_bias=False,
            name="bbox_conv2"
        )
        bbox_conv2_relu = X.relu(bbox_conv2)
        bbox_conv3 = X.conv(
            data=bbox_conv2_relu,
            kernel=3,
            filter=conv_channel,
            weight=self.bbox_conv3_weight,
            bias=self.bbox_conv3_bias,
            no_bias=False,
            name="bbox_conv3"
        )
        bbox_conv3_relu = X.relu(bbox_conv3)
        bbox_conv4 = X.conv(
            data=bbox_conv3_relu,
            kernel=3,
            filter=conv_channel,
            weight=self.bbox_conv4_weight,
            bias=self.bbox_conv4_bias,
            no_bias=False,
            name="bbox_conv4"
        )
        bbox_conv4_relu = X.relu(bbox_conv4)

        if p.fp16:
            bbox_conv4_relu = X.to_fp32(bbox_conv4_relu, name="bbox_conv4_fp32")

        output_channel = num_base_anchor * 4
        output = X.conv(
            data=bbox_conv4_relu,
            kernel=3,
            filter=output_channel,
            weight=self.bbox_pred_weight,
            bias=self.bbox_pred_bias,
            no_bias=False,
            name="bbox_pred"
        )

        return output
Beispiel #2
0
def se_v2_resnet_v1b_unit(input, name, filter, stride, dilate, proj, norm,
                          **kwargs):
    """
    diff with v1: move the SE module to 3x3 conv
    """
    conv1 = conv(input, name=name + "_conv1", filter=filter // 4)
    bn1 = norm(conv1, name=name + "_bn1")
    relu1 = relu(bn1, name=name + "_relu1")

    conv2 = conv(relu1,
                 name=name + "_conv2",
                 stride=stride,
                 filter=filter // 4,
                 kernel=3)
    bn2 = norm(conv2, name=name + "_bn2")
    relu2 = relu(bn2, name=name + "_relu2")
    relu2 = se(relu2,
               prefix=name + "_se2",
               f_down=filter // 16,
               f_up=filter // 4)

    conv3 = conv(relu2, name=name + "_conv3", filter=filter)
    bn3 = norm(conv3, name=name + "_bn3")

    if proj:
        shortcut = conv(input, name=name + "_sc", filter=filter, stride=stride)
        shortcut = norm(shortcut, name=name + "_sc_bn")
    else:
        shortcut = input

    eltwise = add(bn3, shortcut, name=name + "_plus")

    return relu(eltwise, name=name + "_relu")
Beispiel #3
0
    def _cls_subnet(self, conv_feat, conv_channel, num_base_anchor, num_class):
        p = self.p

        # classification subnet
        cls_conv1 = X.conv(
            data=conv_feat,
            kernel=3,
            filter=conv_channel,
            weight=self.cls_conv1_weight,
            bias=self.cls_conv1_bias,
            no_bias=False,
            name="cls_conv1"
        )
        cls_conv1_relu = X.relu(cls_conv1)
        cls_conv2 = X.conv(
            data=cls_conv1_relu,
            kernel=3,
            filter=conv_channel,
            weight=self.cls_conv2_weight,
            bias=self.cls_conv2_bias,
            no_bias=False,
            name="cls_conv2"
        )
        cls_conv2_relu = X.relu(cls_conv2)
        cls_conv3 = X.conv(
            data=cls_conv2_relu,
            kernel=3,
            filter=conv_channel,
            weight=self.cls_conv3_weight,
            bias=self.cls_conv3_bias,
            no_bias=False,
            name="cls_conv3"
        )
        cls_conv3_relu = X.relu(cls_conv3)
        cls_conv4 = X.conv(
            data=cls_conv3_relu,
            kernel=3,
            filter=conv_channel,
            weight=self.cls_conv4_weight,
            bias=self.cls_conv4_bias,
            no_bias=False,
            name="cls_conv4"
        )
        cls_conv4_relu = X.relu(cls_conv4)

        if p.fp16:
            cls_conv4_relu = X.to_fp32(cls_conv4_relu, name="cls_conv4_fp32")

        output_channel = num_base_anchor * (num_class - 1)
        output = X.conv(
            data=cls_conv4_relu,
            kernel=3,
            filter=output_channel,
            weight=self.cls_pred_weight,
            bias=self.cls_pred_bias,
            no_bias=False,
            name="cls_pred"
        )

        return output
Beispiel #4
0
    def get_output(self, conv_feat):
        if self._cls_logit is not None and self._bbox_delta is not None:
            return self._cls_logit, self._bbox_delta

        p = self.p
        num_base_anchor = len(p.anchor_generate.ratio) * len(
            p.anchor_generate.scale)
        conv_channel = p.head.conv_channel

        conv = X.convrelu(conv_feat,
                          kernel=3,
                          filter=conv_channel,
                          name="rpn_conv_3x3",
                          no_bias=False,
                          init=X.gauss(0.01))

        if p.fp16:
            conv = X.to_fp32(conv, name="rpn_conv_3x3_fp32")

        cls_logit = X.conv(conv,
                           filter=2 * num_base_anchor,
                           name="rpn_cls_logit",
                           no_bias=False,
                           init=X.gauss(0.01))

        bbox_delta = X.conv(conv,
                            filter=4 * num_base_anchor,
                            name="rpn_bbox_delta",
                            no_bias=False,
                            init=X.gauss(0.01))

        self._cls_logit = cls_logit
        self._bbox_delta = bbox_delta

        return self._cls_logit, self._bbox_delta
Beispiel #5
0
    def conv_shared(data, name, kernel, num_filter, branch_ids=None, no_bias=True, share_weight=True,
                    pad=(0, 0), stride=(1, 1), dilate=(1, 1)):
        if branch_ids is None:
            branch_ids = range(len(data))

        weight = X.var(name + '_weight')
        if no_bias:
            bias = None
        else:
            bias = X.var(name + '_bias')

        conv_layers = []
        for i in range(len(data)):
            data_i = data[i]
            stride_i = stride[i] if type(stride) is list else stride
            dilate_i = dilate[i] if type(dilate) is list else dilate
            pad_i = pad[i] if type(pad) is list else pad
            branch_i = branch_ids[i]
            if share_weight:
                conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i,
                                name=name + '_shared%d' % branch_i, no_bias=no_bias, weight=weight, bias=bias)
            else:
                conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i,
                                name=name + '_branch%d' % branch_i, no_bias=no_bias)
            conv_layers.append(conv_i)

        return conv_layers
Beispiel #6
0
def mbconv(input, prefix, kernel, f_in, f_out, stride, proj, bottleneck_ratio,
           norm, **kwargs):
    with mx.name.Prefix(prefix + "_"):
        if bottleneck_ratio != 1:
            conv1 = conv(input,
                         name="conv1",
                         filter=f_in * bottleneck_ratio,
                         no_bias=False)
            bn1 = norm(conv1, name="bn1")
            relu1 = relu6(bn1, name="relu1")
        else:
            relu1 = input

        conv2 = dwconv(relu1,
                       name="conv2",
                       filter=f_in * bottleneck_ratio,
                       kernel=kernel,
                       stride=stride,
                       no_bias=False)
        bn2 = norm(conv2, name="bn2")
        relu2 = relu6(bn2, name="relu2")
        relu2 = se(relu2,
                   prefix=prefix + "_se2",
                   f_down=f_in // 4,
                   f_up=f_in * bottleneck_ratio)

        conv3 = conv(relu2, name="conv3", filter=f_out, no_bias=False)
        bn3 = norm(conv3, name="bn3")

        if proj:
            return bn3
        else:
            return bn3 + input
Beispiel #7
0
def dcn_resnet_unit(input, name, filter, stride, dilate, proj, norm, **kwargs):
    conv1 = conv(input, name=name + "_conv1", filter=filter // 4)
    bn1 = norm(conv1, name=name + "_bn1")
    relu1 = relu(bn1, name=name + "_relu1")

    # conv2 filter router
    conv2_offset = conv(relu1, name=name + "_conv2_offset", filter=72, kernel=3, stride=stride, dilate=dilate)
    conv2 = mx.sym.contrib.DeformableConvolution(relu1, conv2_offset, kernel=(3, 3),
        stride=(stride, stride), dilate=(dilate, dilate), pad=(1, 1), num_filter=filter // 4,
        num_deformable_group=4, no_bias=True, name=name + "_conv2")
    bn2 = norm(conv2, name=name + "_bn2")
    relu2 = relu(bn2, name=name + "_relu2")

    conv3 = conv(relu2, name=name + "_conv3", filter=filter)
    bn3 = norm(conv3, name=name + "_bn3")

    if proj:
        shortcut = conv(input, name=name + "_sc", filter=filter, stride=stride)
        shortcut = norm(shortcut, name=name + "_sc_bn")
    else:
        shortcut = input

    eltwise = add(bn3, shortcut, name=name + "_plus")

    return relu(eltwise, name=name + "_relu")
Beispiel #8
0
def trident_resnet_v1b_unit(input, name, id, filter, stride, dilate, proj, **kwargs):
    """
    Compared with v1, v1b moves stride=2 to the 3x3 conv instead of the 1x1 conv and use std in pre-processing
    This is also known as the facebook re-implementation of ResNet(a.k.a. the torch ResNet)
    """
    p = kwargs["params"]
    share_bn = p.branch_bn_shared
    share_conv = p.branch_conv_shared
    norm = p.normalizer

    ######################### prepare names #########################
    if id is not None:
        conv_postfix = ("_shared%s" if share_conv else "_branch%s") % id
        bn_postfix = ("_shared%s" if share_bn else "_branch%s") % id
        other_postfix = "_branch%s" % id
    else:
        conv_postfix = ""
        bn_postfix = ""
        other_postfix = ""

    ######################### prepare parameters #########################
    conv_params = lambda x: dict(
        weight=X.shared_var(name + "_%s_weight" % x) if share_conv else None,
        name=name + "_%s" % x + conv_postfix
    )

    def bn_params(x):
        ret = dict(
            gamma=X.shared_var(name + "_%s_gamma" % x) if share_bn else None,
            beta=X.shared_var(name + "_%s_beta" % x) if share_bn else None,
            moving_mean=X.shared_var(name + "_%s_moving_mean" % x) if share_bn else None,
            moving_var=X.shared_var(name + "_%s_moving_var" % x) if share_bn else None,
            name=name + "_%s" % x + bn_postfix
        )
        if norm.__name__ == "gn":
            del ret["moving_mean"], ret["moving_var"]
        return ret

    ######################### construct graph #########################
    conv1 = conv(input, filter=filter // 4, **conv_params("conv1"))
    bn1 = norm(conv1, **bn_params("bn1"))
    relu1 = relu(bn1, name=name + other_postfix)

    conv2 = conv(relu1, filter=filter // 4, kernel=3, stride=stride, dilate=dilate, **conv_params("conv2"))
    bn2 = norm(conv2, **bn_params("bn2"))
    relu2 = relu(bn2, name=name + other_postfix)

    conv3 = conv(relu2, filter=filter, **conv_params("conv3"))
    bn3 = norm(conv3, **bn_params("bn3"))

    if proj:
        shortcut = conv(input, filter=filter, stride=stride, **conv_params("sc"))
        shortcut = norm(shortcut, **bn_params("sc_bn"))
    else:
        shortcut = input

    eltwise = add(bn3, shortcut, name=name + "_plus" + other_postfix)

    return relu(eltwise, name=name + "_relu" + other_postfix)
Beispiel #9
0
    def get_output(self, conv_fpn_feat):
        if self.cls_logit_dict is not None and self.bbox_delta_dict is not None:
            return self.cls_logit_dict, self.bbox_delta_dict

        p = self.p
        num_base_anchor = len(p.anchor_generate.ratio) * len(p.anchor_generate.scale)
        conv_channel = p.head.conv_channel

        # FPN RPN share weight
        rpn_conv_weight = X.var('rpn_conv_weight', init=X.gauss(0.01))
        rpn_conv_bias = X.var('rpn_conv_bias', init=X.zero_init())
        rpn_conv_cls_weight = X.var('rpn_conv_cls_weight', init=X.gauss(0.01))
        rpn_conv_cls_bias = X.var('rpn_conv_cls_bias', init=X.zero_init())
        rpn_conv_bbox_weight = X.var('rpn_conv_bbox_weight', init=X.gauss(0.01))
        rpn_conv_bbox_bias = X.var('rpn_conv_bbox_bias', init=X.zero_init())

        cls_logit_dict = {}
        bbox_delta_dict = {}

        for stride in p.anchor_generate.stride:
            rpn_conv = X.conv(
                conv_fpn_feat['stride%s' % stride],
                kernel=3,
                filter=conv_channel,
                name="rpn_conv_3x3_%s" % stride,
                no_bias=False,
                weight=rpn_conv_weight,
                bias=rpn_conv_bias
            )
            rpn_relu = X.relu(rpn_conv, name='rpn_relu_%s' % stride)
            if p.fp16:
                rpn_relu = X.to_fp32(rpn_relu, name="rpn_relu_%s_fp32" % stride)

            cls_logit = X.conv(
                rpn_relu,
                filter=2 * num_base_anchor,
                name="rpn_cls_score_stride%s" % stride,
                no_bias=False,
                weight=rpn_conv_cls_weight,
                bias=rpn_conv_cls_bias
            )

            bbox_delta = X.conv(
                rpn_relu,
                filter=4 * num_base_anchor,
                name="rpn_bbox_pred_stride%s" % stride,
                no_bias=False,
                weight=rpn_conv_bbox_weight,
                bias=rpn_conv_bbox_bias
            )

            cls_logit_dict[stride]  = cls_logit
            bbox_delta_dict[stride] = bbox_delta

        self.cls_logit_dict = cls_logit_dict
        self.bbox_delta_dict = bbox_delta_dict

        return self.cls_logit_dict, self.bbox_delta_dict
Beispiel #10
0
def sepc_conv(x,
              name,
              out_channels,
              kernel_size,
              i,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              deformable_groups=1,
              part_deform=False,
              start_level=1,
              weight=None,
              bias=None,
              weight_offset=None,
              bias_offset=None):
    assert weight is not None and bias is not None
    if part_deform:
        assert weight_offset is not None and bias_offset is not None
    if i < start_level or not part_deform:
        return conv(x,
                    name,
                    filter=out_channels,
                    kernel=kernel_size,
                    stride=stride,
                    pad=kernel_size // 2,
                    dilate=dilation,
                    num_group=groups,
                    no_bias=False,
                    weight=weight,
                    bias=bias)
    offset = conv(x,
                  name + 'offset',
                  filter=deformable_groups * 2 * kernel_size * kernel_size,
                  kernel=kernel_size,
                  stride=stride,
                  pad=kernel_size // 2,
                  dilate=dilation,
                  num_group=groups,
                  no_bias=False,
                  weight=weight_offset,
                  bias=bias_offset)
    return DeformConv(x,
                      offset,
                      name,
                      out_channels,
                      kernel_size,
                      stride,
                      padding=padding,
                      dilation=dilation,
                      groups=groups,
                      deformable_groups=deformable_groups,
                      no_bias=False,
                      weight=weight,
                      bias=bias)
Beispiel #11
0
def se(input, prefix, f_down, f_up):
    with mx.name.Prefix(prefix + "_"):
        gap = mx.sym.mean(input, axis=-1, keepdims=True)
        gap = mx.sym.mean(gap, axis=-2, keepdims=True)
        fc1 = conv(gap, name="fc1", filter=f_down)
        fc1 = relu6(fc1, name="fc1_relu")
        fc2 = conv(fc1, name="fc2", filter=f_up)
        att = sigmoid(fc2, name="sigmoid")
        input = mx.sym.broadcast_mul(input, att, name="mul")

    return input
Beispiel #12
0
    def get_output(self, conv_feat):
        if self._cls_logit is not None and self._bbox_delta is not None:
            return self._cls_logit, self._bbox_delta

        p = self.p
        num_base_anchor = len(p.anchor_generate.ratio) * len(p.anchor_generate.scale)
        conv_channel = p.head.conv_channel

        if p.normalizer.__name__ == "fix_bn":
            conv = X.convrelu(
                conv_feat,
                kernel=3,
                filter=conv_channel,
                name="rpn_conv_3x3",
                no_bias=False,
                init=X.gauss(0.01)
            )
        elif p.normalizer.__name__ in ["sync_bn", "gn"]:
            conv = X.convnormrelu(
                p.normalizer,
                conv_feat,
                kernel=3,
                filter=conv_channel,
                name="rpn_conv_3x3",
                no_bias=False,
                init=X.gauss(0.01)
            )
        else:
            raise NotImplementedError("Unsupported normalizer: {}".format(p.normalizer.__name__))

        if p.fp16:
            conv = X.to_fp32(conv, name="rpn_conv_3x3_fp32")

        cls_logit = X.conv(
            conv,
            filter=2 * num_base_anchor,
            name="rpn_cls_logit",
            no_bias=False,
            init=X.gauss(0.01)
        )

        bbox_delta = X.conv(
            conv,
            filter=4 * num_base_anchor,
            name="rpn_bbox_delta",
            no_bias=False,
            init=X.gauss(0.01)
        )

        self._cls_logit = cls_logit
        self._bbox_delta = bbox_delta

        return self._cls_logit, self._bbox_delta
Beispiel #13
0
def trident_resnet_v1_unit(input, name, id, filter, stride, dilate, proj, **kwargs):
    p = kwargs["params"]
    share_bn = p.branch_bn_shared
    share_conv = p.branch_conv_shared
    norm = p.normalizer

    ######################### prepare names #########################
    if id is not None:
        conv_postfix = ("_shared%s" if share_conv else "_branch%s") % id
        bn_postfix = ("_shared%s" if share_bn else "_branch%s") % id
        other_postfix = "_branch%s" % id
    else:
        conv_postfix = ""
        bn_postfix = ""
        other_postfix = ""

    ######################### prepare parameters #########################
    conv_params = lambda x: dict(
        weight=X.shared_var(name + "_%s_weight" % x) if share_conv else None,
        name=name + "_%s" % x + conv_postfix
    )

    bn_params = lambda x: dict(
        gamma=X.shared_var(name + "_%s_gamma" % x) if share_bn else None,
        beta=X.shared_var(name + "_%s_beta" % x) if share_bn else None,
        moving_mean=X.shared_var(name + "_%s_moving_mean" % x) if share_bn else None,
        moving_var=X.shared_var(name + "_%s_moving_var" % x) if share_bn else None,
        name=name + "_%s" % x + bn_postfix
    )

    ######################### construct graph #########################
    conv1 = conv(input, filter=filter // 4, stride=stride, **conv_params("conv1"))
    bn1 = norm(conv1, **bn_params("bn1"))
    relu1 = relu(bn1, name=name + other_postfix)

    conv2 = conv(relu1, filter=filter // 4, kernel=3, dilate=dilate, **conv_params("conv2"))
    bn2 = norm(conv2, **bn_params("bn2"))
    relu2 = relu(bn2, name=name + other_postfix)

    conv3 = conv(relu2, filter=filter, **conv_params("conv3"))
    bn3 = norm(conv3, **bn_params("bn3"))

    if proj:
        shortcut = conv(input, filter=filter, stride=stride, **conv_params("sc"))
        shortcut = norm(shortcut, **bn_params("sc_bn"))
    else:
        shortcut = input

    eltwise = add(bn3, shortcut, name=name + "_plus" + other_postfix)

    return relu(eltwise, name=name + "_relu" + other_postfix)
Beispiel #14
0
    def _refine_pts(self, cls_feat, reg_feat, dcn_offset, pts_init_out):
        p = self.p
        point_conv_channel = p.head.point_conv_channel
        num_class = p.num_class
        output_channel = num_class - 1
        pts_output_channel = p.point_generate.num_points * 2

        cls_conv = mx.symbol.contrib.DeformableConvolution(
            data=cls_feat,
            offset=dcn_offset,
            kernel=(self.dcn_kernel, self.dcn_kernel),
            pad=(self.dcn_pad, self.dcn_pad),
            stride=(1, 1),
            dilate=(1, 1),
            num_filter=point_conv_channel,
            weight=self.cls_conv_weight,
            bias=self.cls_conv_bias,
            no_bias=False,
            name="cls_conv")
        cls_conv_relu = X.relu(cls_conv)
        cls_out = X.conv(data=cls_conv_relu,
                         kernel=1,
                         filter=output_channel,
                         weight=self.cls_out_weight,
                         bias=self.cls_out_bias,
                         no_bias=False,
                         name="cls_out")

        pts_refine_conv = mx.symbol.contrib.DeformableConvolution(
            data=reg_feat,
            offset=dcn_offset,
            kernel=(self.dcn_kernel, self.dcn_kernel),
            pad=(self.dcn_pad, self.dcn_pad),
            stride=(1, 1),
            dilate=(1, 1),
            num_filter=point_conv_channel,
            weight=self.pts_refine_conv_weight,
            bias=self.pts_refine_conv_bias,
            no_bias=False,
            name="pts_refine_conv")
        pts_refine_conv_relu = X.relu(pts_refine_conv)
        pts_refine_out = X.conv(data=pts_refine_conv_relu,
                                kernel=1,
                                filter=pts_output_channel,
                                weight=self.pts_refine_out_weight,
                                bias=self.pts_refine_out_bias,
                                no_bias=False,
                                name="pts_refine_out")
        pts_refine_out = pts_refine_out + X.block_grad(pts_init_out)
        return pts_refine_out, cls_out
Beispiel #15
0
    def _get_bbox_head_logit(self, conv_feat):
        if self._head_feat is not None:
            return self._head_feat

        xavier_init = mx.init.Xavier(factor_type="in", rnd_type="uniform", magnitude=3)

        flatten = X.reshape(conv_feat, shape=(0, -1, 1, 1), name="bbox_feat_reshape")
        fc1 = X.conv(flatten, filter=1024, name="bbox_fc1", init=xavier_init)
        fc1 = self.add_norm(fc1)
        fc1 = X.relu(fc1)
        fc2 = X.conv(fc1, filter=1024, name="bbox_fc2", init=xavier_init)
        fc2 = self.add_norm(fc2)
        fc2 = X.relu(fc2)

        self._head_feat = fc2

        return self._head_feat
Beispiel #16
0
    def _get_output(self, mask_pred_logits, conv_feat):
        num_class = self.pBbox.num_class

        msra_init = mx.init.Xavier(rnd_type="gaussian", factor_type="out", magnitude=2)
        normal_init = mx.init.Normal(0.01)
        kaiming_uniform = mx.init.Xavier(rnd_type='uniform', factor_type='in', magnitude=3)

        mask_pred_logits = mx.sym.expand_dims(mask_pred_logits, axis=1)

        iou_head_maxpool_1 = X.pool(
            mask_pred_logits,
            name='iou_head_maxpool_1',
            kernel=2,
            stride=2,
            pad=0,
        )
        iou_head_input = X.concat([conv_feat, iou_head_maxpool_1], axis=1, name='iou_head_input')
        hi = iou_head_input
        for ii in range(3):
            hi = X.conv(
                hi,
                filter=256,
                kernel=3,
                stride=1,
                name='iou_head_conv_%d'%ii,
                no_bias=False,
                init=msra_init,
            )
            hi = X.relu(hi)
        hi = X.conv(
            hi,
            filter=256,
            kernel=3,
            stride=2,
            name='iou_head_conv_3',
            no_bias=False,
            init=msra_init
        )
        hi = X.relu(hi)
        hi = X.flatten(data=hi)
        fc1 = X.relu(X.fc(hi, filter=1024, name='iou_head_FC1', init=kaiming_uniform))
        fc2 = X.relu(X.fc(fc1, filter=1024, name='iou_head_FC2', init=kaiming_uniform))
        iou_pred_logits = X.fc(fc2, filter=num_class, name='iou_head_pred', init=normal_init)
        return iou_pred_logits
Beispiel #17
0
 def get_P0_features(c_features, p_names, dim_reduced, init, norm):
     p_features = {}
     for c_feature, p_name in zip(c_features, p_names):
         p = X.conv(data=c_feature,
                    filter=dim_reduced,
                    no_bias=False,
                    weight=X.var(name=p_name + "_weight", init=init),
                    bias=X.var(name=p_name + "_bias", init=X.zero_init()),
                    name=p_name)
         p = norm(p, name=p_name + '_bn')
         p_features[p_name] = p
     return p_features
Beispiel #18
0
def convnormrelu(input, prefix, kernel, f_in, f_out, stride, proj, norm,
                 **kwargs):
    with mx.name.Prefix(prefix + "_"):
        conv1 = conv(input,
                     name="conv1",
                     filter=f_out,
                     kernel=kernel,
                     stride=stride,
                     no_bias=False)
        bn1 = norm(conv1, name="bn1")
        relu1 = relu6(bn1, name="relu1")
    return relu1
Beispiel #19
0
    def _init_pts(self, reg_feat):
        p = self.p
        point_conv_channel = p.head.point_conv_channel
        pts_output_channel = p.point_generate.num_points * 2

        pts_init_conv = X.conv(data=reg_feat,
                               kernel=3,
                               filter=point_conv_channel,
                               weight=self.pts_init_conv_weight,
                               bias=self.pts_init_conv_bias,
                               no_bias=False,
                               name="pts_init_conv")
        pts_init_conv_relu = X.relu(pts_init_conv)
        pts_init_out = X.conv(data=pts_init_conv_relu,
                              kernel=1,
                              filter=pts_output_channel,
                              weight=self.pts_init_out_weight,
                              bias=self.pts_init_out_bias,
                              no_bias=False,
                              name="pts_init_out")

        return pts_init_out
Beispiel #20
0
    def _reg_head(self, conv_feat):
        num_block = self.p.num_block or 4

        for i in range(num_block):
            conv_feat = X.conv(conv_feat,
                               kernel=3,
                               filter=256,
                               init=X.gauss(0.01),
                               name="bbox_reg_block%s" % (i + 1))
            conv_feat = self.add_norm(conv_feat)
            conv_feat = X.relu(conv_feat)

        return conv_feat
Beispiel #21
0
    def _convs_and_fcs(self, x, num_convs, num_fcs, name, conv_init, fc_init):
        '''
        Args:
            x: [N, C, H, W] feature maps
            num_convs: int
            num_fcs: int
            conv_init: mx initializer
        Returns:
            x: [N, C, H, W] or [N, C, 1, 1]
        '''
        if num_convs == 0 and num_fcs == 0:
            return x

        out_channels = self.p.TSD.conv_out_channels
        out_fc_channels = self.p.TSD.fc_out_channels

        if num_convs > 0:
            for i in range(num_convs):
                x = X.relu(
                    X.conv(x,
                           kernel=3,
                           filter=out_channels,
                           no_bias=False,
                           name=name + '_conv%s' % i,
                           init=conv_init))

        if num_fcs > 0:
            x = X.reshape(x,
                          shape=(0, -1, 1, 1),
                          name=name + '_conv_fc_flatten')
            for i in range(num_fcs):
                x = X.relu(
                    X.conv(x,
                           kernel=1,
                           filter=out_fc_channels,
                           no_bias=False,
                           name=name + '_fc%s' % i,
                           init=fc_init))
        return x
Beispiel #22
0
    def _cls_subnet(self, conv_feat, stride):
        p = self.p
        norm = p.normalizer
        conv_channel = p.head.conv_channel

        # classification subset
        cls_conv1 = X.conv(data=conv_feat,
                           kernel=3,
                           filter=conv_channel,
                           weight=self.cls_conv1_weight,
                           bias=self.cls_conv1_bias,
                           no_bias=False,
                           name="cls_conv1")
        cls_conv1 = norm(cls_conv1, name="cls_conv1_bn_s{}".format(stride))
        cls_conv1_relu = X.relu(cls_conv1)
        cls_conv2 = X.conv(data=cls_conv1_relu,
                           kernel=3,
                           filter=conv_channel,
                           weight=self.cls_conv2_weight,
                           bias=self.cls_conv2_bias,
                           no_bias=False,
                           name="cls_conv2")
        cls_conv2 = norm(cls_conv2, name="cls_conv2_bn_s{}".format(stride))
        cls_conv2_relu = X.relu(cls_conv2)
        cls_conv3 = X.conv(data=cls_conv2_relu,
                           kernel=3,
                           filter=conv_channel,
                           weight=self.cls_conv3_weight,
                           bias=self.cls_conv3_bias,
                           no_bias=False,
                           name="cls_conv3")
        cls_conv3 = norm(cls_conv3, name="cls_conv3_bn_s{}".format(stride))
        cls_conv3_relu = X.relu(cls_conv3)

        if p.fp16:
            cls_conv3_relu = X.to_fp32(cls_conv3_relu, name="cls_conv3_fp32")

        return cls_conv3_relu
Beispiel #23
0
    def _reg_subnet(self, conv_feat, stride):
        p = self.p
        norm = p.normalizer
        conv_channel = p.head.conv_channel

        # regression subnet
        reg_conv1 = X.conv(data=conv_feat,
                           kernel=3,
                           filter=conv_channel,
                           weight=self.reg_conv1_weight,
                           bias=self.reg_conv1_bias,
                           no_bias=False,
                           name="reg_conv1")
        reg_conv1 = norm(reg_conv1, name="reg_conv1_bn_s{}".format(stride))
        reg_conv1_relu = X.relu(reg_conv1)
        reg_conv2 = X.conv(data=reg_conv1_relu,
                           kernel=3,
                           filter=conv_channel,
                           weight=self.reg_conv2_weight,
                           bias=self.reg_conv2_bias,
                           no_bias=False,
                           name="reg_conv2")
        reg_conv2 = norm(reg_conv2, name="reg_conv2_bn_s{}".format(stride))
        reg_conv2_relu = X.relu(reg_conv2)
        reg_conv3 = X.conv(data=reg_conv2_relu,
                           kernel=3,
                           filter=conv_channel,
                           weight=self.reg_conv3_weight,
                           bias=self.reg_conv3_bias,
                           no_bias=False,
                           name="reg_conv3")
        reg_conv3 = norm(reg_conv3, name="reg_conv3_bn_s{}".format(stride))
        reg_conv3_relu = X.relu(reg_conv3)

        if p.fp16:
            reg_conv3_relu = X.to_fp32(reg_conv3_relu, name="reg_conv3_fp32")

        return reg_conv3_relu
Beispiel #24
0
    def _get_bbox_head_logit(self, conv_feat):
        #if self._head_feat is not None:
        #    return self._head_feat

        stage = self.stage

        flatten = X.flatten(conv_feat, name="bbox_feat_flatten_" + stage)
        reshape = X.reshape(flatten, (0, 0, 1, 1),
                            name="bbox_feat_reshape_" + stage)
        fc1 = X.conv(reshape,
                     filter=1024,
                     weight=self.fc1_weight,
                     name="bbox_fc1_" + stage)
        fc1_relu = X.relu(fc1, name="bbox_fc1_relu_" + stage)
        fc2 = X.conv(fc1_relu,
                     filter=1024,
                     weight=self.fc2_weight,
                     name="bbox_fc2_" + stage)
        fc2_relu = X.relu(fc2, name="bbox_fc2_" + stage)

        self._head_feat = fc2_relu

        return self._head_feat
Beispiel #25
0
 def get_P0_features(c_features,
                     p_names,
                     dim_reduced,
                     init,
                     norm,
                     kernel=1):
     p_features = []
     for c_feature, p_name in zip(c_features, p_names):
         p = X.conv(data=c_feature,
                    filter=dim_reduced,
                    kernel=kernel,
                    no_bias=False,
                    weight=X.var(name=p_name + "_weight", init=init),
                    bias=X.var(name=p_name + "_bias", init=X.zero_init()),
                    name=p_name)
         p_features.append(p)
     return p_features
Beispiel #26
0
    def _get_mask_head_logit(self, conv_feat):
        if self._head_feat is not None:
            return self._head_feat

        up_stride = int(self.pMask.resolution // self.pMaskRoi.out_size)
        dim_reduced = self.pMask.dim_reduced

        msra_init = mx.init.Xavier(rnd_type="gaussian", factor_type="out", magnitude=2)

        current = conv_feat
        for i in range(4):
            current = X.conv(
                current,
                name="mask_fcn_conv{}".format(i + 1),
                filter=dim_reduced,
                kernel=3,
                no_bias=False,
                init=msra_init
            )
            current = self.add_norm(current)
            current = X.relu(current)

        mask_up = current
        for i in range(up_stride // 2):
            weight = X.var(
                name="mask_up{}_weight".format(i),
                init=msra_init,
                lr_mult=1,
                wd_mult=1)
            mask_up = mx.sym.Deconvolution(
                mask_up,
                kernel=(2, 2),
                stride=(2, 2),
                num_filter=dim_reduced,
                no_bias=False,
                weight=weight,
                name="mask_up{}".format(i)
                )
            mask_up = X.relu(
                mask_up,
                name="mask_up{}_relu".format(i))

        mask_up = X.to_fp32(mask_up, name='mask_up_to_fp32')
        self._head_feat = mask_up

        return self._head_feat
Beispiel #27
0
    def get_output(self, conv_feat):
        pBbox = self.pBbox
        num_class = pBbox.num_class

        head_feat = self._get_mask_head_logit(conv_feat)

        msra_init = mx.init.Xavier(rnd_type="gaussian", factor_type="out", magnitude=2)

        if self.pMask:
            head_feat = X.to_fp32(head_feat, name="mask_head_to_fp32")

        mask_fcn_logit = X.conv(
            head_feat,
            filter=num_class,
            name="mask_fcn_logit",
            no_bias=False,
            init=msra_init
        )

        return mask_fcn_logit
Beispiel #28
0
 def _bbox_subnet(self,
                  conv_feat,
                  conv_channel,
                  num_base_anchor,
                  num_class,
                  stride,
                  nb_conv=0):
     p = self.p
     if nb_conv <= 0:
         bbox_conv4_relu = conv_feat
         if p.fp16:
             bbox_conv4_relu = X.to_fp32(bbox_conv4_relu,
                                         name="bbox_conv4_fp32")
         output_channel = num_base_anchor * 4
         output = X.conv(data=bbox_conv4_relu,
                         kernel=3,
                         filter=output_channel,
                         weight=self.bbox_pred_weight,
                         bias=self.bbox_pred_bias,
                         no_bias=False,
                         name="bbox_pred")
         return output
     return super()._bbox_subnet(conv_feat, conv_channel, num_base_anchor,
                                 num_class, stride)
Beispiel #29
0
    def fpn_neck(self, data):
        if self.fpn_feat is not None:
            return self.fpn_feat

        c2, c3, c4, c5 = data

        xavier_init = mx.init.Xavier(factor_type="in",
                                     rnd_type="uniform",
                                     magnitude=3)

        # P5
        p5 = X.conv(data=c5,
                    filter=256,
                    no_bias=False,
                    weight=X.var(name="P5_lateral_weight", init=xavier_init),
                    bias=X.var(name="P5_lateral_bias", init=X.zero_init()),
                    name="P5_lateral")
        p5 = self.add_norm(p5)
        p5_conv = X.conv(data=p5,
                         kernel=3,
                         filter=256,
                         no_bias=False,
                         weight=X.var(name="P5_conv_weight", init=xavier_init),
                         bias=X.var(name="P5_conv_bias", init=X.zero_init()),
                         name="P5_conv")
        p5_conv = self.add_norm(p5_conv)

        # P4
        p5_up = mx.sym.UpSampling(p5,
                                  scale=2,
                                  sample_type="nearest",
                                  name="P5_upsampling",
                                  num_args=1)
        p4_la = X.conv(data=c4,
                       filter=256,
                       no_bias=False,
                       weight=X.var(name="P4_lateral_weight",
                                    init=xavier_init),
                       bias=X.var(name="P4_lateral_bias", init=X.zero_init()),
                       name="P4_lateral")
        p4_la = self.add_norm(p4_la)
        p5_clip = mx.sym.slice_like(p5_up, p4_la, name="P4_clip")
        p4 = mx.sym.add_n(p5_clip, p4_la, name="P4_sum")

        p4_conv = X.conv(data=p4,
                         kernel=3,
                         filter=256,
                         no_bias=False,
                         weight=X.var(name="P4_conv_weight", init=xavier_init),
                         bias=X.var(name="P4_conv_bias", init=X.zero_init()),
                         name="P4_conv")
        p4_conv = self.add_norm(p4_conv)

        # P3
        p4_up = mx.sym.UpSampling(p4,
                                  scale=2,
                                  sample_type="nearest",
                                  name="P4_upsampling",
                                  num_args=1)
        p3_la = X.conv(data=c3,
                       filter=256,
                       no_bias=False,
                       weight=X.var(name="P3_lateral_weight",
                                    init=xavier_init),
                       bias=X.var(name="P3_lateral_bias", init=X.zero_init()),
                       name="P3_lateral")
        p3_la = self.add_norm(p3_la)
        p4_clip = mx.sym.slice_like(p4_up, p3_la, name="P3_clip")
        p3 = mx.sym.add_n(p4_clip, p3_la, name="P3_sum")

        p3_conv = X.conv(data=p3,
                         kernel=3,
                         filter=256,
                         no_bias=False,
                         weight=X.var(name="P3_conv_weight", init=xavier_init),
                         bias=X.var(name="P3_conv_bias", init=X.zero_init()),
                         name="P3_conv")
        p3_conv = self.add_norm(p3_conv)

        # P2
        p3_up = mx.sym.UpSampling(p3,
                                  scale=2,
                                  sample_type="nearest",
                                  name="P3_upsampling",
                                  num_args=1)
        p2_la = X.conv(data=c2,
                       filter=256,
                       no_bias=False,
                       weight=X.var(name="P2_lateral_weight",
                                    init=xavier_init),
                       bias=X.var(name="P2_lateral_bias", init=X.zero_init()),
                       name="P2_lateral")
        p2_la = self.add_norm(p2_la)
        p3_clip = mx.sym.slice_like(p3_up, p2_la, name="P2_clip")
        p2 = mx.sym.add_n(p3_clip, p2_la, name="P2_sum")

        p2_conv = X.conv(data=p2,
                         kernel=3,
                         filter=256,
                         no_bias=False,
                         weight=X.var(name="P2_conv_weight", init=xavier_init),
                         bias=X.var(name="P2_conv_bias", init=X.zero_init()),
                         name="P2_conv")
        p2_conv = self.add_norm(p2_conv)

        # P6
        p6 = X.max_pool(
            p5_conv,
            name="P6_subsampling",
            kernel=1,
            stride=2,
        )

        conv_fpn_feat = dict(stride64=p6,
                             stride32=p5_conv,
                             stride16=p4_conv,
                             stride8=p3_conv,
                             stride4=p2_conv)

        self.fpn_feat = conv_fpn_feat
        return self.fpn_feat
Beispiel #30
0
    def get_retinanet_neck(self, data):
        if self.neck is not None:
            return self.neck

        c2, c3, c4, c5 = data

        import mxnet as mx
        xavier_init = mx.init.Xavier(factor_type="in",
                                     rnd_type="uniform",
                                     magnitude=3)
        # P5
        p5 = X.conv(data=c5,
                    filter=256,
                    no_bias=False,
                    weight=X.var(name="P5_lateral_weight", init=xavier_init),
                    bias=X.var(name="P5_lateral_bias", init=X.zero_init()),
                    name="P5_lateral")
        p5_conv = X.conv(data=p5,
                         kernel=3,
                         filter=256,
                         no_bias=False,
                         weight=X.var(name="P5_conv_weight", init=xavier_init),
                         bias=X.var(name="P5_conv_bias", init=X.zero_init()),
                         name="P5_conv")

        # P4
        p5_up = mx.sym.UpSampling(p5,
                                  scale=2,
                                  sample_type="nearest",
                                  name="P5_upsampling",
                                  num_args=1)
        p4_la = X.conv(data=c4,
                       filter=256,
                       no_bias=False,
                       weight=X.var(name="P4_lateral_weight",
                                    init=xavier_init),
                       bias=X.var(name="P4_lateral_bias", init=X.zero_init()),
                       name="P4_lateral")
        p5_clip = mx.sym.slice_like(p5_up, p4_la, name="P4_clip")
        p4 = mx.sym.add_n(p5_clip, p4_la, name="P4_sum")

        p4_conv = X.conv(data=p4,
                         kernel=3,
                         filter=256,
                         no_bias=False,
                         weight=X.var(name="P4_conv_weight", init=xavier_init),
                         bias=X.var(name="P4_conv_bias", init=X.zero_init()),
                         name="P4_conv")

        # P3
        p4_up = mx.sym.UpSampling(p4,
                                  scale=2,
                                  sample_type="nearest",
                                  name="P4_upsampling",
                                  num_args=1)
        p3_la = X.conv(data=c3,
                       filter=256,
                       no_bias=False,
                       weight=X.var(name="P3_lateral_weight",
                                    init=xavier_init),
                       bias=X.var(name="P3_lateral_bias", init=X.zero_init()),
                       name="P3_lateral")
        p4_clip = mx.sym.slice_like(p4_up, p3_la, name="P3_clip")
        p3 = mx.sym.add_n(p4_clip, p3_la, name="P3_sum")

        p3_conv = X.conv(data=p3,
                         kernel=3,
                         filter=256,
                         no_bias=False,
                         weight=X.var(name="P3_conv_weight", init=xavier_init),
                         bias=X.var(name="P3_conv_bias", init=X.zero_init()),
                         name="P3_conv")

        # P6
        p6 = X.conv(data=c5,
                    kernel=3,
                    stride=2,
                    filter=256,
                    no_bias=False,
                    weight=X.var(name="P6_conv_weight", init=xavier_init),
                    bias=X.var(name="P6_conv_bias", init=X.zero_init()),
                    name="P6_conv")

        # P7
        p6_relu = X.relu(data=p6, name="P6_relu")
        p7 = X.conv(data=p6_relu,
                    kernel=3,
                    stride=2,
                    filter=256,
                    no_bias=False,
                    weight=X.var(name="P7_conv_weight", init=xavier_init),
                    bias=X.var(name="P7_conv_bias", init=X.zero_init()),
                    name="P7_conv")

        self.neck = dict(stride8=p3_conv,
                         stride16=p4_conv,
                         stride32=p5_conv,
                         stride64=p6,
                         stride128=p7)

        return self.neck