Example #1
0
 def __init__(
     self,
     num_input_features=4,
     use_norm=True,
     num_filters=[32, 128],
     with_distance=False,
     voxel_size=(0.2, 0.2, 4),
     name="VoxelFeatureExtractor",
 ):
     super(VoxelFeatureExtractor, self).__init__()
     self.name = name
     if use_norm:
         BatchNorm1d = change_default_args(eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
         Linear = change_default_args(bias=False)(nn.Linear)
     else:
         BatchNorm1d = Empty
         Linear = change_default_args(bias=True)(nn.Linear)
     assert len(num_filters) == 2
     num_input_features += 3  # add mean features
     if with_distance:
         num_input_features += 1
     self._with_distance = with_distance
     self.vfe1 = VFELayer(num_input_features, num_filters[0], use_norm)
     self.vfe2 = VFELayer(num_filters[0], num_filters[1], use_norm)
     self.linear = Linear(num_filters[1], num_filters[1])
     # var_torch_init(self.linear.weight)
     # var_torch_init(self.linear.bias)
     self.norm = BatchNorm1d(num_filters[1])
Example #2
0
    def __init__(
        self,
        num_input_features=4,
        use_norm=True,
        num_filters=[32, 128],
        with_distance=False,
        voxel_size=(0.2, 0.2, 0.3),
        name="VoxelFeatureExtractor",
    ):
        super(VoxelFeatureExtractorV2, self).__init__()
        self.name = name
        if use_norm:
            BatchNorm1d = change_default_args(eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
            Linear = change_default_args(bias=False)(nn.Linear)
        else:
            BatchNorm1d = Empty
            Linear = change_default_args(bias=True)(nn.Linear)
        assert len(num_filters) > 0
        num_input_features += 3
        if with_distance:
            num_input_features += 1
        self._with_distance = with_distance

        num_filters = [num_input_features] + num_filters
        filters_pairs = [
            [num_filters[i], num_filters[i + 1]] for i in range(len(num_filters) - 1)
        ]
        self.vfe_layers = nn.ModuleList(
            [VFELayer(i, o, use_norm) for i, o in filters_pairs]
        )
        self.linear = Linear(num_filters[-1], num_filters[-1])
        # var_torch_init(self.linear.weight)
        # var_torch_init(self.linear.bias)
        self.norm = BatchNorm1d(num_filters[-1])
Example #3
0
 def __init__(self, in_channels, out_channels, use_norm=True, name="vfe"):
     super(VFELayer, self).__init__()
     self.name = name
     self.units = int(out_channels / 2)
     if use_norm:
         BatchNorm1d = change_default_args(eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
         Linear = change_default_args(bias=False)(nn.Linear)
     else:
         BatchNorm1d = Empty
         Linear = change_default_args(bias=True)(nn.Linear)
     self.linear = Linear(in_channels, self.units)
     self.norm = BatchNorm1d(self.units)
Example #4
0
    def __init__(self,
                 use_norm=True,
                 num_class=2,
                 layer_nums=[3, 3, 3, 3],
                 layer_strides=[2, 2, 2, 2],
                 num_filters=[64, 128, 256, 512],
                 upsample_strides=[1, 2, 4, 4],
                 num_upsample_filters=[64, 128, 256, 256, 448],
                 num_input_filters=128,
                 encode_background_as_zeros=True,
                 use_groupnorm=False,
                 num_groups=32,
                 box_code_size=7,
                 name='det_net'):
        super(det_net, self).__init__()
        assert len(layer_nums) == 4
        assert len(layer_strides) == len(layer_nums)
        assert len(num_filters) == len(layer_nums)
        assert len(upsample_strides) == len(layer_nums)
        # print('use norm or not')
        # print(use_norm)
        if use_norm:
            if use_groupnorm:
                BatchNorm2d = change_default_args(num_groups=num_groups,
                                                  eps=1e-3)(GroupNorm)
            else:
                BatchNorm2d = change_default_args(eps=1e-3, momentum=0.01)(
                    nn.BatchNorm2d)
            Conv2d = change_default_args(bias=False)(nn.Conv2d)
            ConvTranspose2d = change_default_args(bias=False)(
                nn.ConvTranspose2d)
        else:
            BatchNorm2d = Empty
            Conv2d = change_default_args(bias=True)(nn.Conv2d)
            ConvTranspose2d = change_default_args(bias=True)(
                nn.ConvTranspose2d)

        dimension_feature_map = num_filters  #[ 64, 128, 256, 512] # [256, 512, 512, 512]
        dimension_concate = num_upsample_filters  # last one for final output

        # ===============================================================
        # block0
        # ==============================================================
        flag = 0
        middle_layers = []
        for i in range(layer_nums[flag]):
            middle_layers.append(
                Conv2d(dimension_feature_map[flag],
                       dimension_feature_map[flag],
                       3,
                       padding=1))
            middle_layers.append(BatchNorm2d(dimension_feature_map[flag]))
            middle_layers.append(nn.ReLU())
        self.block0 = Sequential(*middle_layers)
        middle_layers = []
        self.downsample0 = Sequential(
            Conv2d(dimension_feature_map[flag],
                   dimension_concate[flag],
                   3,
                   stride=2),
            BatchNorm2d(dimension_concate[flag]),
            nn.ReLU(),
        )

        # ===============================================================
        # block1
        # ==============================================================
        flag = 1
        middle_layers = []
        for i in range(layer_nums[flag]):
            middle_layers.append(
                Conv2d(dimension_feature_map[flag],
                       dimension_feature_map[flag],
                       3,
                       padding=1))
            middle_layers.append(BatchNorm2d(dimension_feature_map[flag]))
            middle_layers.append(nn.ReLU())
        self.block1 = Sequential(*middle_layers)

        # ===============================================================
        # block2
        # ==============================================================
        flag = 2
        middle_layers = []
        for i in range(layer_nums[flag]):
            middle_layers.append(
                Conv2d(dimension_feature_map[flag],
                       dimension_feature_map[flag],
                       3,
                       padding=1))
            middle_layers.append(BatchNorm2d(dimension_feature_map[flag]))
            middle_layers.append(nn.ReLU())
        self.block2 = Sequential(*middle_layers)

        # ===============================================================
        # block3
        # ==============================================================
        flag = 3
        middle_layers = []
        for i in range(layer_nums[flag]):
            middle_layers.append(
                Conv2d(dimension_feature_map[flag],
                       dimension_feature_map[flag],
                       3,
                       padding=1))
            middle_layers.append(BatchNorm2d(dimension_feature_map[flag]))
            middle_layers.append(nn.ReLU())
        self.block3 = Sequential(*middle_layers)
        self.upsample3 = Sequential(
            ConvTranspose2d(dimension_feature_map[flag],
                            dimension_concate[flag],
                            3,
                            stride=2),
            BatchNorm2d(dimension_concate[flag]),
            nn.ReLU(),
        )

        # ==============================================================
        # convlution after concatating block3 and block2
        # ==============================================================
        middle_layers = []
        middle_layers.append(
            Conv2d((dimension_concate[3] + dimension_feature_map[2]),
                   dimension_concate[2],
                   3,
                   padding=1))
        middle_layers.append(BatchNorm2d(dimension_concate[2]))
        middle_layers.append(nn.ReLU())
        middle_layers.append(
            Conv2d(dimension_concate[2], dimension_concate[2], 3, padding=1))
        middle_layers.append(BatchNorm2d(dimension_concate[2]))
        middle_layers.append(nn.ReLU())
        # upsampling
        middle_layers.append(
            ConvTranspose2d(dimension_concate[2],
                            dimension_concate[2],
                            3,
                            stride=2))
        middle_layers.append(BatchNorm2d(dimension_concate[2]))
        middle_layers.append(nn.ReLU())

        self.upsample2_after_concate_fuse32 = Sequential(*middle_layers)

        # ==============================================================
        # convlution after concatating block2, block1 and block0
        # ==============================================================
        middle_layers = []
        middle_layers.append(
            Conv2d((dimension_concate[0] + dimension_feature_map[1] +
                    dimension_concate[2]),
                   dimension_concate[4],
                   3,
                   padding=1))
        middle_layers.append(BatchNorm2d(dimension_concate[4]))
        middle_layers.append(nn.ReLU())
        middle_layers.append(
            Conv2d(dimension_concate[4], dimension_concate[4], 3, padding=1))
        middle_layers.append(BatchNorm2d(dimension_concate[4]))
        middle_layers.append(nn.ReLU())
        self.output_after_concate_fuse210 = Sequential(*middle_layers)