Example #1
0
 def __init__(self, out_channels, cfg, *args, **kwargs):
     super().__init__(cfg=cfg, *args, **kwargs)
     self.normalizer_fn, self.norm_params = odt.get_norm(
         self.cfg.MODEL.FPN.NORM, self.is_training)
     self.activation_fn = odt.get_activation_fn(
         self.cfg.MODEL.FPN.ACTIVATION_FN)
     self.out_channels = out_channels
Example #2
0
 def __init__(self, cfg, *args, **kwargs):
     super().__init__(cfg, *args, **kwargs)
     self.out_channels = self.cfg.MODEL.SHUFFLENETS.OUT_CHANNELS
     self.normalizer_fn, self.norm_params = odt.get_norm(
         self.cfg.MODEL.SHUFFLENETS.NORM, self.is_training)
     self.activation_fn = odt.get_activation_fn(
         self.cfg.MODEL.SHUFFLENETS.ACTIVATION_FN)
Example #3
0
    def forward(self, features, batched_inputs):
        normalizer_fn, normalizer_params = odt.get_norm(
            self.cfg.NORM, self.is_training)
        activation_fn = odt.get_activation_fn(self.cfg.ACTIVATION_FN)
        with tf.variable_scope("FusionBackboneHookV2"):
            del batched_inputs
            end_points = list(features.items())
            k0, v0 = end_points[0]
            mfeatures = []
            shape0 = wmlt.combined_static_and_dynamic_shape(v0)
            for k, v in end_points[1:]:
                net = tf.image.resize_bilinear(v, shape0[1:3])
                mfeatures.append(net)
            net = tf.add_n(mfeatures) / float(len(mfeatures))
            '''
            与v2相比,使用sum代替concat
            '''
            net = v0 + net
            level0 = int(k0[1:])
            net = slim.conv2d(net,
                              v0.get_shape().as_list()[-1], [3, 3],
                              activation_fn=activation_fn,
                              normalizer_fn=normalizer_fn,
                              normalizer_params=normalizer_params,
                              scope=f"smooth{level0}")
            res = features
            res[f'F{level0}'] = net

            return res
Example #4
0
    def __init__(self, cfg, parent, *args, **kwargs):
        '''

        :param cfg:  only the child part
        :param parent:
        :param args:
        :param kwargs:
        '''
        super().__init__(cfg, *args, parent=parent, **kwargs)
        # Detectron2默认没有使用normalizer, 但在测试数据集上发现不使用normalizer网络不收敛
        self.normalizer_fn, self.norm_params = odtk.get_norm(
            self.cfg.NORM, is_training=self.is_training)
        self.activation_fn = odtk.get_activation_fn(self.cfg.ACTIVATION_FN)
        self.norm_scope_name = odtk.get_norm_scope_name(self.cfg.NORM)
        '''self.left_pool = tfop.left_pool
        self.right_pool = tfop.right_pool
        self.bottom_pool = tfop.bottom_pool
        self.top_pool = tfop.top_pool'''
        '''self.left_pool = partial(wnnl.cnn_self_hattenation,scope="left_pool")
        self.right_pool = partial(wnnl.cnn_self_hattenation,scope="right_pool")
        self.bottom_pool = partial(wnnl.cnn_self_vattenation,scope="bottom_pool")
        self.top_pool = partial(wnnl.cnn_self_vattenation,scope="top_pool")'''
        self.left_pool = left_pool
        self.right_pool = right_pool
        self.bottom_pool = bottom_pool
        self.top_pool = top_pool
Example #5
0
 def __init__(self, cfg, is_mini=False, **kwargs):
     super().__init__(cfg, **kwargs)
     self.normalizer_fn, self.norm_params = odt.get_norm(
         self.cfg.MODEL.HRNET.NORM, self.is_training)
     self.activation_fn = odt.get_activation_fn(
         self.cfg.MODEL.HRNET.ACTIVATION_FN)
     self.is_mini = is_mini
Example #6
0
    def forward(self, x):
        res = collections.OrderedDict()
        batch_norm_decay = self.cfg.MODEL.MOBILENETS.batch_norm_decay  #0.999
        activation_fn = get_activation_fn(
            self.cfg.MODEL.MOBILENETS.ACTIVATION_FN)
        if not self.is_training:
            train_bn = False
        elif self.cfg.MODEL.MOBILENETS.FROZEN_BN:
            print("Frozen bn.")
            train_bn = False
        else:
            train_bn = True
        MINOR_VERSION = self.cfg.MODEL.MOBILENETS.MINOR_VERSION
        with slim.arg_scope(
                mobilenet_v3.training_scope(bn_decay=batch_norm_decay,
                                            is_training=train_bn)):
            if MINOR_VERSION == "LARGE":
                conv_defs = mobilenet_v3.V3_LARGE
                keys = [
                    "layer_2", "layer_4", "layer_7", "layer_13", "layer_17"
                ]
            elif MINOR_VERSION == "LARGE_DETECTION":
                conv_defs = mobilenet_v3.V3_LARGE_DETECTION
                keys = [
                    "layer_2", "layer_4", "layer_7", "layer_13", "layer_17"
                ]
            elif MINOR_VERSION == "SMALL":
                conv_defs = mobilenet_v3.get_V3_SMALL(activation_fn)
                keys = ["layer_1", "layer_2", "layer_4", "layer_9", "layer_13"]
            elif MINOR_VERSION == "SMALL_DETECTION":
                conv_defs = mobilenet_v3.get_V3_SMALL_DETECTION(activation_fn)
                keys = ["layer_1", "layer_2", "layer_4", "layer_9", "layer_13"]
            else:
                conv_defs = None
                print(f"ERROR MobileNet Minor version {MINOR_VERSION}")
        if activation_fn is not None:
            defs = copy.deepcopy(conv_defs)
            for d in defs['spec']:
                if 'activation_fn' in d.params:
                    d.params.update({'activation_fn': activation_fn})

        with slim.arg_scope(
                mobilenet_v3.training_scope(bn_decay=batch_norm_decay,
                                            is_training=train_bn)):
            _, end_points = mobilenet_v3.mobilenet(x['image'],
                                                   output_stride=None,
                                                   base_only=True,
                                                   num_classes=None,
                                                   conv_defs=defs)

        self.end_points = end_points
        res.update(end_points)

        for i in range(1, 6):
            res[f"C{i}"] = end_points[keys[i - 1]]

        return res
Example #7
0
 def __init__(self,cfg,*args,**kwargs):
     if cfg.MODEL.PREPROCESS != "subimagenetmean":
         print("--------------------WARNING--------------------")
         print(f"Preprocess for resnet should be subimagenetmean not {cfg.MODEL.PREPROCESS}.")
         print("------------------END WARNING------------------")
     super().__init__(cfg,*args,**kwargs)
     self.normalizer_fn, self.norm_params = odt.get_norm(self.cfg.MODEL.RESNETS.NORM, self.is_training)
     self.activation_fn = odt.get_activation_fn(self.cfg.MODEL.RESNETS.ACTIVATION_FN)
     self.out_channels = cfg.MODEL.RESNETS.OUT_CHANNELS
     self.scope_name = "50"
Example #8
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.rcnn_anchor_boxes = tf.reshape(
         tf.convert_to_tensor(self.cfg.MODEL.ANCHOR_GENERATOR.SIZES,
                              dtype=tf.float32), [1, -1])
     self.normalizer_fn, self.norm_params = odt.get_norm(
         self.cfg.MODEL.ROI_BOX_HEAD.NORM, self.is_training)
     self.activation_fn = odt.get_activation_fn(
         self.cfg.MODEL.ROI_BOX_HEAD.ACTIVATION_FN)
     self.norm_scope_name = odt.get_norm_scope_name(
         self.cfg.MODEL.ROI_BOX_HEAD.NORM)
Example #9
0
 def __init__(self, cfg, **kwargs):
     """
     The following attributes are parsed from config:
         num_conv: the number of conv layers
         conv_dim: the dimension of the conv layers
         norm: normalization for the conv layers
     """
     super(HighResolutionMaskHead, self).__init__(cfg, **kwargs)
     self.normalizer_fn, self.norm_params = odt.get_norm(
         self.cfg.MODEL.ROI_MASK_HEAD.NORM, self.is_training)
     self.activation_fn = odt.get_activation_fn(
         self.cfg.MODEL.ROI_MASK_HEAD.ACTIVATION_FN)
Example #10
0
    def __init__(self, cfg, parent, *args, **kwargs):
        '''

        :param cfg: only child part
        :param parent:
        :param args:
        :param kwargs:
        '''
        super().__init__(cfg, parent=parent, *args, **kwargs)
        self.normalizer_fn, self.norm_params = odtk.get_norm(
            self.cfg.NORM, is_training=self.is_training)
        self.activation_fn = odtk.get_activation_fn(self.cfg.ACTIVATION_FN)
Example #11
0
    def __init__(self,
                 cfg,
                 bottom_up,
                 in_features,
                 out_channels,
                 fuse_type="sum",
                 parent=None,
                 *args,
                 **kwargs):
        """
        Args:
            bottom_up (Backbone): module representing the bottom up subnetwork.
                Must be a subclass of :class:`Backbone`. The multi-scale feature
                maps generated by the bottom up network, and listed in `in_features`,
                are used to generate WeightedFPN levels.
            in_features (list[str]): names of the input feature maps coming
                from the backbone to which WeightedFPN is attached. For example, if the
                backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
                of these may be used; order must be from high to low resolution.
            out_channels (int): number of channels in the output feature maps.
            norm (str): the normalization to use.
            fuse_type (str): types for fusing the top down features and the lateral
                ones. It can be "sum" (default), which sums up element-wise; or "avg",
                which takes the element-wise mean of the two.
        """
        stage = int(in_features[-1][1:])
        super(WeightedFPN, self).__init__(cfg, parent=parent, *args, **kwargs)
        assert isinstance(bottom_up, Backbone)

        # Place convs into top-down order (from low to high resolution)
        # to make the top-down computation in forward clearer.
        self.in_features = in_features
        self.bottom_up = bottom_up
        self.out_channels = out_channels
        self._fuse_type = fuse_type
        self.scope = "WeightedFPN"
        self.use_depthwise = False
        self.interpolate_op = tf.image.resize_nearest_neighbor
        self.stage = stage
        #Detectron2默认没有使用normalizer, 但在测试数据集上发现不使用normalizer网络不收敛
        self.normalizer_fn, self.normalizer_params = odt.get_norm(
            self.cfg.MODEL.TWOWAYFPN.NORM, self.is_training)
        self.hook0_before, self.hook0_after = build_backbone_hook_by_name(
            cfg.MODEL.TWOWAYFPN.BACKBONE_HOOK, cfg, parent=self)
        if len(cfg.MODEL.TWOWAYFPN.BACKBONE_HOOK) >= 4:
            self.hook1_before, self.hook1_after = build_backbone_hook_by_name(
                cfg.MODEL.TWOWAYFPN.BACKBONE_HOOK[2:], cfg, parent=self)
        else:
            self.hook1_before, self.hook1_after = build_backbone_hook_by_name(
                ["", ""], cfg, parent=self)
        self.activation_fn = odt.get_activation_fn(
            self.cfg.MODEL.TWOWAYFPN.ACTIVATION_FN)
Example #12
0
 def __init__(self, cfg, parent, *args, **kwargs):
     '''
     :param cfg:  only the child part
     :param parent:
     :param args:
     :param kwargs:
     '''
     super().__init__(cfg, *args, parent=parent, **kwargs)
     self.normalizer_fn, self.norm_params = odtk.get_norm(
         self.cfg.NORM, is_training=self.is_training)
     self.activation_fn = odtk.get_activation_fn(self.cfg.ACTIVATION_FN)
     self.norm_scope_name = odtk.get_norm_scope_name(self.cfg.NORM)
     self.head_conv_dim = self.cfg.HEAD_CONV_DIM
Example #13
0
 def __init__(self, cfg, **kwargs):
     """
     The following attributes are parsed from config:
         num_conv: the number of conv layers
         conv_dim: the dimension of the conv layers
         norm: normalization for the conv layers
     """
     super(MaskRCNNConvUpsampleHead, self).__init__(cfg, **kwargs)
     #Detectron2默认没有使用normalizer, 使用测试数据发现是否使用normalizer并没有什么影响
     self.normalizer_fn, self.norm_params = odt.get_norm(
         self.cfg.MODEL.ROI_MASK_HEAD.NORM, self.is_training)
     self.activation_fn = odt.get_activation_fn(
         self.cfg.MODEL.ROI_MASK_HEAD.ACTIVATION_FN)
Example #14
0
 def __init__(
     self, cfg,bottom_up, in_features, out_channels, top_block=None, fuse_type="sum",
         parent=None,*args,**kwargs
 ):
     """
     Args:
         bottom_up (Backbone): module representing the bottom up subnetwork.
             Must be a subclass of :class:`Backbone`. The multi-scale feature
             maps generated by the bottom up network, and listed in `in_features`,
             are used to generate FPN levels.
         in_features (list[str]): names of the input feature maps coming
             from the backbone to which FPN is attached. For example, if the
             backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
             of these may be used; order must be from high to low resolution.
         out_channels (int): number of channels in the output feature maps.
         norm (str): the normalization to use.
         top_block (nn.Module or None): if provided, an extra operation will
             be performed on the output of the last (smallest resolution)
             FPN output, and the result will extend the result list. The top_block
             further downsamples the feature map. It must have an attribute
             "num_levels", meaning the number of extra FPN levels added by
             this block, and "in_feature", which is a string representing
             its input feature (e.g., p5).
         fuse_type (str): types for fusing the top down features and the lateral
             ones. It can be "sum" (default), which sums up element-wise; or "avg",
             which takes the element-wise mean of the two.
     """
     stage = int(in_features[-1][-1:])
     super(FPNV2, self).__init__(cfg,parent=parent,*args,**kwargs)
     assert isinstance(bottom_up, Backbone)
     def get_feature_name(x):
         p = x.find(":")
         if p<0:
             return x
         else:
             return x[:p]
     # Place convs into top-down order (from low to high resolution)
     # to make the top-down computation in forward clearer.
     self.top_block = top_block
     self.in_features = [get_feature_name(x) for x in in_features]
     self.bottom_up = bottom_up
     self.out_channels = out_channels
     self._fuse_type = fuse_type
     self.scope = "FPN"
     self.use_depthwise = self.cfg.MODEL.FPN.USE_DEPTHWISE
     self.interpolate_op=tf.image.resize_nearest_neighbor
     self.stage = stage
     #Detectron2默认没有使用normalizer, 但在测试数据集上发现不使用normalizer网络不收敛
     self.normalizer_fn,self.norm_params = odt.get_norm(self.cfg.MODEL.FPN.NORM,self.is_training)
     self.hook_before,self.hook_after = build_backbone_hook(cfg.MODEL.FPN,parent=self)
     self.activation_fn = odt.get_activation_fn(self.cfg.MODEL.FPN.ACTIVATION_FN)
Example #15
0
 def __init__(self, num_keypoints, cfg, parent, *args, **kwargs):
     '''
     :param cfg:  only the child part
     :param parent:
     :param args:
     :param kwargs:
     '''
     super().__init__(cfg, *args, parent=parent, **kwargs)
     self.num_keypoints = num_keypoints
     self.normalizer_fn, self.norm_params = odtk.get_norm(
         self.cfg.NORM, is_training=self.is_training)
     self.activation_fn = odtk.get_activation_fn(self.cfg.ACTIVATION_FN)
     self.norm_scope_name = odtk.get_norm_scope_name(self.cfg.NORM)
     self.pred_paf_maps_outputs = []
     self.pred_conf_maps_outputs = []
Example #16
0
 def __init__(
     self, cfg,bottom_up, in_features,
         parent=None,*args,**kwargs
 ):
     """
     Args:
         bottom_up (Backbone): module representing the bottom up subnetwork.
             Must be a subclass of :class:`Backbone`. The multi-scale feature
             maps generated by the bottom up network, and listed in `in_features`,
             are used to generate DLA levels.
         in_features (list[str]): names of the input feature maps coming
             from the backbone to which DLA is attached. For example, if the
             backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
             of these may be used; order must be from high to low resolution.
         norm (str): the normalization to use.
     """
     stage = int(in_features[-1][-1:])
     super(DLA, self).__init__(cfg,parent=parent,*args,**kwargs)
     assert isinstance(bottom_up, Backbone)
     def get_feature_name(x):
         p = x.find(":")
         if p<0:
             return x
         else:
             return x[:p]
     # Place convs into top-down order (from low to high resolution)
     # to make the top-down computation in forward clearer.
     self.in_features = [get_feature_name(x) for x in in_features]
     self.bottom_up = bottom_up
     self.scope = "DLA"
     self.interpolate_op=tf.image.resize_nearest_neighbor
     self.stage = stage
     #Detectron2默认没有使用normalizer, 但在测试数据集上发现不使用normalizer网络不收敛
     self.normalizer_fn,self.norm_params = odt.get_norm(self.cfg.MODEL.DLA.NORM,self.is_training)
     self.hook_before,self.hook_after = build_backbone_hook(cfg.MODEL.DLA,parent=self)
     self.activation_fn = odt.get_activation_fn(self.cfg.MODEL.DLA.ACTIVATION_FN)
     self.out_channels = [ 64,64,128,256]
     self.conv_op = functools.partial(slim.conv2d,normalizer_fn=self.normalizer_fn,
                                      activation_fn=self.activation_fn,
                                      normalizer_params=self.norm_params)
     self.upsample_op = functools.partial(slim.conv2d_transpose,
                                          kernel_size=4,
                                          stride=2,
                                          activation_fn=self.activation_fn,
                                          normalizer_fn=self.normalizer_fn,
                                          normalizer_params=self.norm_params)
Example #17
0
 def __init__(self, cfg, parent, *args, **kwargs):
     super().__init__(cfg, parent, *args, **kwargs)
     self.anchor_generator = build_anchor_generator(cfg,
                                                    parent=self,
                                                    *args,
                                                    **kwargs)
     num_cell_anchors = self.anchor_generator.num_cell_anchors
     assert len(set(num_cell_anchors)
                ) == 1, "all levers cell anchors num must be equal."
     self.num_cell_anchors = num_cell_anchors[0]
     self.box_dim = self.anchor_generator.box_dim
     self.normalizer_fn, self.norm_params = odtk.get_norm(
         self.cfg.MODEL.RPN.NORM, is_training=self.is_training)
     self.activation_fn = odtk.get_activation_fn(
         self.cfg.MODEL.RPN.ACTIVATION_FN)
     self.hook = build_hook_by_name(self.cfg.MODEL.RPN.HOOK,
                                    self.cfg,
                                    parent=self)
Example #18
0
    def __init__(self, num_anchors, cfg, parent, *args, **kwargs):
        '''

        :param num_anchors:
        :param cfg:  only the child part
        :param parent:
        :param args:
        :param kwargs:
        '''
        super().__init__(cfg, *args, parent=parent, **kwargs)
        assert (
            len(set(num_anchors)) == 1
        ), "Using different number of anchors between levels is not currently supported!"
        self.num_anchors = num_anchors[0]
        self.normalizer_fn, self.norm_params = odtk.get_norm(
            self.cfg.NORM, is_training=self.is_training)
        self.activation_fn = odtk.get_activation_fn(self.cfg.ACTIVATION_FN)
        self.norm_scope_name = odtk.get_norm_scope_name(self.cfg.NORM)
        self.logits_pre_outputs = []
        self.bbox_reg_pre_outputs = []
Example #19
0
    def __init__(self,
                 cfg,
                 bottom_up,
                 in_features,
                 out_channels,
                 parent=None,
                 *args,
                 **kwargs):
        """
        Args:
            bottom_up (Backbone): module representing the bottom up subnetwork.
                Must be a subclass of :class:`Backbone`. The multi-scale feature
                maps generated by the bottom up network, and listed in `in_features`,
                are used to generate BIFPN levels.
            in_features (list[str]): names of the input feature maps coming
                from the backbone to which BIFPN is attached. For example, if the
                backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
                of these may be used; order must be from high to low resolution.
            out_channels (int): number of channels in the output feature maps.
        """
        stage = int(in_features[-1][1:])
        super(BIFPN, self).__init__(cfg, parent=parent, *args, **kwargs)
        assert isinstance(bottom_up, Backbone)

        # Place convs into top-down order (from low to high resolution)
        # to make the top-down computation in forward clearer.
        self.in_features = in_features
        self.bottom_up = bottom_up
        self.out_channels = out_channels
        self.scope = "BIFPN"
        self.use_depthwise = False
        self.interpolate_op = tf.image.resize_nearest_neighbor
        self.stage = stage
        self.normalizer_fn, self.norm_params = odt.get_norm(
            self.cfg.MODEL.BIFPN.NORM, self.is_training)
        self.hook_before, self.hook_after = build_backbone_hook(
            cfg.MODEL.BIFPN, parent=self)
        self.activation_fn = odt.get_activation_fn(
            self.cfg.MODEL.BIFPN.ACTIVATION_FN)
Example #20
0
 def __init__(self, cfg, parent, *args, **kwargs):
     super().__init__(cfg, parent, *args, **kwargs)
     self.normalizer_fn, self.norm_params = odt.get_norm(
         self.cfg.MODEL.ROI_BOX_HEAD.NORM, self.is_training)
     self.activation_fn = odt.get_activation_fn(
         self.cfg.MODEL.ROI_BOX_HEAD.ACTIVATION_FN)