예제 #1
0
    def forward(self, rcnn_dict):
        """
        :param input_data: input dict
        :return:
        """
        rois = rcnn_dict['rois']
        batch_size = rois.shape[0]
        if self.training:
            targets_dict = self.assign_targets(batch_size, rcnn_dict)
            rois = targets_dict['rois']  # (B, N, 7)
            rcnn_dict['roi_raw_scores'] = targets_dict['roi_raw_scores']
            rcnn_dict['roi_labels'] = targets_dict['roi_labels']

        # RoI aware pooling
        pooled_part_features, pooled_rpn_features = self.roiaware_pool(rois, rcnn_dict)
        batch_size_rcnn = pooled_part_features.shape[0]  # (B * N, out_x, out_y, out_z, 4)

        # transform to sparse tensors
        sparse_shape = np.array(pooled_part_features.shape[1:4], dtype=np.int32)
        sparse_idx = pooled_part_features.sum(dim=-1).nonzero()  # (non_empty_num, 4) ==> [bs_idx, x_idx, y_idx, z_idx]
        if sparse_idx.shape[0] < 3:
            sparse_idx = self.fake_sparse_idx(sparse_idx, batch_size_rcnn)
            if self.training:
                # these are invalid samples
                targets_dict['rcnn_cls_labels'].fill_(-1)
                targets_dict['reg_valid_mask'].fill_(-1)

        part_features = pooled_part_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]]
        rpn_features = pooled_rpn_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]]
        coords = sparse_idx.int()
        part_features = spconv.SparseConvTensor(part_features, coords, sparse_shape, batch_size_rcnn)
        rpn_features = spconv.SparseConvTensor(rpn_features, coords, sparse_shape, batch_size_rcnn)

        # forward rcnn network
        x_part = self.conv_part(part_features)
        x_rpn = self.conv_rpn(rpn_features)

        merged_feature = torch.cat((x_rpn.features, x_part.features), dim=1)  # (N, C)
        shared_feature = spconv.SparseConvTensor(merged_feature, coords, sparse_shape, batch_size_rcnn)

        x = self.conv_down(shared_feature)  #

        shared_feature = x.dense().view(batch_size_rcnn, -1, 1)
        shared_feature = self.shared_fc_layer(shared_feature)
        rcnn_cls = self.cls_layer(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1)  # (B, 1 or 2)
        rcnn_reg = self.reg_layer(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1)  # (B, C)

        ret_dict = {
            'rcnn_cls': rcnn_cls,
            'rcnn_reg': rcnn_reg,
            'rois': rois,
            'roi_raw_scores': rcnn_dict['roi_raw_scores'],
            'roi_labels': rcnn_dict['roi_labels']
        }

        if self.training:
            ret_dict.update(targets_dict)

        self.forward_ret_dict = ret_dict
        return ret_dict
예제 #2
0
    def forward(self, voxel_features, coors, batch_size):
        # coors[:, 1] += 1
        coors = coors.int()
        ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
                                      batch_size)
        # t = time.time()
        # torch.cuda.synchronize()
        ret = self.middle_conv(ret)
        ret1 = self.out1(ret)
        ret2 = self.out2(ret)
        ret3 = self.out3(ret)
        #print(ret1.spatial_shape)
        ret1 = ret1.dense()
        ret2 = ret2.dense()
        ret3 = ret3.dense()
        ret = torch.cat([ret1, ret2, ret3], dim=1)
        #print(ret.shape)
        #print(ret.spatial_shape)
        ret = self.ac(ret)
        #ret = self.f_conv(ret)
        # torch.cuda.synchronize()
        # print("spconv forward time", time.time() - t)
        # ret = ret.dense()

        N, C, D, H, W = ret.shape
        ret = ret.view(N, C * D, H, W)
        return ret
예제 #3
0
    def forward_rpn(self, voxels, num_points, coordinates, batch_size,
                    voxel_centers, **kwargs):
        voxel_features = self.vfe(features=voxels,
                                  num_voxels=num_points,
                                  coords=coordinates)

        input_sp_tensor = spconv.SparseConvTensor(
            features=voxel_features,
            indices=coordinates,
            spatial_shape=self.sparse_shape,
            batch_size=batch_size)

        backbone_ret_dict = self.rpn_net(input_sp_tensor,
                                         **{'voxel_centers': voxel_centers})

        rpn_preds_dict = self.rpn_head(
            backbone_ret_dict['spatial_features'],
            **{'gt_boxes': kwargs.get('gt_boxes', None)})
        rpn_preds_dict.update(backbone_ret_dict)

        rpn_ret_dict = {
            'rpn_cls_preds': rpn_preds_dict['cls_preds'],
            'rpn_box_preds': rpn_preds_dict['box_preds'],
            'rpn_dir_cls_preds': rpn_preds_dict.get('dir_cls_preds', None),
            'anchors': rpn_preds_dict['anchors']
        }
        return rpn_ret_dict
예제 #4
0
 def forward(self, x):
     batch_size = x[0][-1, -1] + 1
     if self.use_z_model:
         z = self.z_model(x)
         x = spconv.SparseConvTensor(x[1], x[0][:, self.permute_tensor],
                                     self.spatial_size, batch_size)
         #x.features = torch.cat((x.features, z.features), dim=1)
         # new_features = torch.cat((x.features, z.features), dim=1)
         # x = spconv.SparseConvTensor(new_features, x[0][:, self.permute_tensor], self.spatial_size, batch_size)
         x = self.model(x)
         x = torch.cat((x, z), dim=1)
     else:
         x = spconv.SparseConvTensor(x[1], x[0][:, self.permute_tensor],
                                     self.spatial_size, batch_size)
         x = self.model(x)
     return x
예제 #5
0
def fuse_rgb_to_voxel(sp_tensor, voxel_size, offset, rgb_features, calib, downsample_factor=1, pixel_refinement=None,
                      feature_concat=True):
    """
    fetch and concat corresponding rgb features to voxels, where original voxels are kept and only features are modified

    Args:
        sp_tensor : sparse tensor ***[spconv.SparseConvTensor]***
        voxel_size : size of each voxel, in (Z, Y, X) ***[python built-in list]***
        offset : lower boundary of the 0th voxel, in (Z, Y, X) ***[python built-in list]***
        rgb_features : rgb features, in (B, C, H, W) ***[torch.Tensor]***
        calib : calibration for projection from liDAR to RGB, includes 'rect', 'Trv2c', and 'P2' ***[python built-in dict]***
        downsample_factor : downsample factor, necessary for recovering correct projection
        pixel_refinement : in case of using cropped and scaled rgb images ***None or [python built-in list]***
        feature_concat : concat voxel and rgb features if True, otherwise conduct element-wise addition
    """

    assert isinstance(sp_tensor, spconv.SparseConvTensor), "sp_tensor must be spconv.SparseConvTensor"
    assert isinstance(voxel_size, list) and len(voxel_size) == 3, "invalid voxel_size"
    assert isinstance(offset, list) and len(offset) == 3, "invalid offset"
    assert isinstance(rgb_features, torch.Tensor) and rgb_features.dim() == 4, "invalid rgb_features"
    assert feature_concat is True or sp_tensor.features.shape[1] == rgb_features.shape[
        1], "length of voxel and rgb features must match when feature_concat == False"

    # modify rgb feature orders
    rgb_features = rgb_features.permute(0, 3, 2, 1)  # (B, C, H, W) -> (B, W, H, C)

    # voxel infos
    indices = sp_tensor.indices
    features = sp_tensor.features
    batch_size = sp_tensor.batch_size
    spatial_shape = sp_tensor.spatial_shape
    indice_dict = sp_tensor.indice_dict

    # project to RGB plane
    scaled_indices = indices * torch.tensor([1] + [downsample_factor] * 3, dtype=indices.dtype,
                                            device=indices.device)  # -> original size for correct projection
    coors = transform.indices_to_coors(scaled_indices, voxel_size, offset)
    pixels = transform.coors_to_pixels(coors, calib, pixel_refinement, 'none')
    pixels = pixels / torch.tensor([1] + [downsample_factor] * 2, dtype=pixels.dtype, device=pixels.device)
    pixels = torch.round(pixels).int()

    # filter outsiders
    index_keep = (pixels[:, 1] >= 0) & (pixels[:, 1] < rgb_features.shape[1]) & \
                 (pixels[:, 2] >= 0) & (pixels[:, 2] < rgb_features.shape[2])

    # get rgb features
    voxel_rgb_features = torch.zeros(features.shape[0], rgb_features.shape[-1], dtype=features.dtype,
                                     device=features.device)
    voxel_rgb_features[index_keep] = rgb_features[tuple(pixels[index_keep].long().transpose(0, 1))]

    # generate output features
    if feature_concat:
        features = torch.cat((features, voxel_rgb_features), dim=1)
    else:
        features = features + voxel_rgb_features

    res = spconv.SparseConvTensor(features, indices, spatial_shape, batch_size)
    res.indice_dict = indice_dict

    return res
예제 #6
0
    def forward(self, input):
        identity = spconv.SparseConvTensor(input.features, input.indices, input.spatial_shape, input.batch_size)

        output = self.conv_branch(input)
        output.features += self.i_branch(identity).features

        return output
예제 #7
0
    def test_model_fn(batch, model, epoch):
        coords = batch['locs'].cuda()              # (N, 1 + 3), long, cuda, dimension 0 for batch_idx
        voxel_coords = batch['voxel_locs'].cuda()  # (M, 1 + 3), long, cuda
        p2v_map = batch['p2v_map'].cuda()          # (N), int, cuda
        v2p_map = batch['v2p_map'].cuda()          # (M, 1 + maxActive), int, cuda

        coords_float = batch['locs_float'].cuda()  # (N, 3), float32, cuda
        feats = batch['feats'].cuda()              # (N, C), float32, cuda

        batch_offsets = batch['offsets'].cuda()    # (B + 1), int, cuda

        spatial_shape = batch['spatial_shape']

        if cfg.use_coords:
            feats = torch.cat((feats, coords_float), 1)
        voxel_feats = pointgroup_ops.voxelization(feats, v2p_map, cfg.mode)  # (M, C), float, cuda

        input_ = spconv.SparseConvTensor(voxel_feats, voxel_coords.int(), spatial_shape, cfg.batch_size)

        ret = model(input_, p2v_map, coords_float, coords[:, 0].int(), batch_offsets, epoch)
        semantic_scores = ret['semantic_scores']  # (N, nClass) float32, cuda
        pt_offsets = ret['pt_offsets']            # (N, 3), float32, cuda
        if (epoch > cfg.prepare_epochs):
            scores, proposals_idx, proposals_offset = ret['proposal_scores']

        ##### preds
        with torch.no_grad():
            preds = {}
            preds['semantic'] = semantic_scores
            preds['pt_offsets'] = pt_offsets
            if (epoch > cfg.prepare_epochs):
                preds['score'] = scores
                preds['proposals'] = (proposals_idx, proposals_offset)

        return preds
예제 #8
0
def _simple_concat_helper(spt1, spt2, feature_concat=False):
    """concat two sparse tensors with exactly the same active indices, the indice_dicts are combined.
    """
    assert torch.equal(
        spt1.indices, spt2.indices
    ), "indices of the input sp tensors should be exactly the same"
    assert feature_concat == True or spt1.features.shape[1] == spt2.features.shape[1], \
        "length of features must match when feature_concat == False"
    assert all(s1 == s2 for s1, s2 in zip(spt1.spatial_shape, spt2.spatial_shape)), \
        "spatial shape of tensors must match"
    assert spt1.batch_size == spt2.batch_size, "batch size of tensors must match"

    indices = spt1.indices

    if feature_concat:
        features = torch.cat((spt1.features, spt2.features), dim=1)
    else:
        features = spt1.features + spt2.features

    spatial_shape = spt1.spatial_shape
    batch_size = spt1.batch_size

    res = spconv.SparseConvTensor(features, indices, spatial_shape, batch_size)
    res.indice_dict = spt1.indice_dict
    res.indice_dict.update(spt2.indice_dict)

    return res
예제 #9
0
 def forward(self, input):
     output = spconv.SparseConvTensor(
             torch.cat([i.features for i in input],1), input[1].indices,
             input[1].spatial_shape, input[0].batch_size )
     output.indice_dict = input[1].indice_dict
     output.grid = input[1].grid
     return output
예제 #10
0
    def forward(self, voxel_features, coors, batch_size):
        # x = x.contiguous()
        coors = coors.int()
        # import pdb
        # pdb.set_trace()
        ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
                                      batch_size)
        ret = self.downCntx(ret)
        down1c, down1b = self.resBlock2(ret)
        down2c, down2b = self.resBlock3(down1c)
        down3c, down3b = self.resBlock4(down2c)
        down4c, down4b = self.resBlock5(down3c)
        up4e = self.upBlock0(down4c, down4b)
        up3e = self.upBlock1(up4e, down3b)
        up2e = self.upBlock2(up3e, down2b)
        up1e = self.upBlock3(up2e, down1b)
        up0e = self.ReconNet(up1e)

        up0e.features = torch.cat((up0e.features, up1e.features),
                                  1)  # dense() [4,128,480,360,32]
        logits = self.logits(up0e)  # [X, 4]
        # up-down-up
        y = logits.dense()  #[4,4,480,360,32]
        print(y.shape)
        return y
예제 #11
0
    def forward(self, voxel_features, coors, batch_size):
        # x = x.contiguous()
        coors = coors.int()
        ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
                                      batch_size)
        ret = self.downCntx(ret)
        # down0c, down0b = self.resBlock1(ret)
        down1c, down1b = self.resBlock2(ret)
        down2c, down2b = self.resBlock3(down1c)
        down3c, down3b = self.resBlock4(down2c)
        down4c, down4b = self.resBlock5(down3c)
        # down5b = self.resBlock6(down4c)

        # down6b = self.ReconNet(down5b)

        up4e = self.upBlock0(down4c, down4b)
        up3e = self.upBlock1(up4e, down3b)
        up2e = self.upBlock2(up3e, down2b)
        up1e = self.upBlock3(up2e, down1b)

        up0e = self.ReconNet(up1e)

        up0e.features = torch.cat((up0e.features, up1e.features), 1)

        return up0e, up0e
예제 #12
0
    def forward(self, voxel_features, coors, batch_size, input_shape):

        # input: # [41, 1600, 1408]
        #sparse_shape = np.array(input_shape[::-1]) + [1, 0, 0]
        sparse_shape = [41, 1440, 1440]

        coors = coors.int()
        ret = spconv.SparseConvTensor(voxel_features, coors, sparse_shape,
                                      batch_size)

        x = self.conv_input(ret)

        x_conv1 = self.conv1(x)
        x_conv2 = self.conv2(x_conv1)
        x_conv3 = self.conv3(x_conv2)
        x_conv4 = self.conv4(x_conv3)

        ret = self.extra_conv(x_conv4)

        ret = ret.dense()

        N, C, D, H, W = ret.shape
        ret = ret.view(N, C * D, H, W)

        multi_scale_voxel_features = {
            'conv1': x_conv1,
            'conv2': x_conv2,
            'conv3': x_conv3,
            'conv4': x_conv4,
        }

        return ret, multi_scale_voxel_features
예제 #13
0
    def forward(self, voxel_features, coors, batch_size):
        coors = coors.int()
        sx = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
                                     batch_size)
        b1 = self.block1(sx)
        b2 = self.block2(b1)
        b3 = self.block3(b2)
        # print(b1.sparity, b2.sparity, b3.sparity)
        up1 = self.deconv1(b1)
        up2 = self.deconv2(b2)
        up3 = self.deconv3(b3)
        x = torch.cat([up1, up2, up3], dim=1)
        x = self.post(x)
        # out = self.to_dense(out).squeeze(2)
        # print("debug1")
        box_preds = self.conv_box(x)
        cls_preds = self.conv_cls(x)
        box_preds = box_preds.permute(0, 2, 3, 1).contiguous()
        cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
        ret_dict = {
            "box_preds": box_preds,
            "cls_preds": cls_preds,
        }
        if self._use_direction_classifier:
            dir_cls_preds = self.conv_dir_cls(x)
            dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
            ret_dict["dir_cls_preds"] = dir_cls_preds

        return ret_dict
예제 #14
0
    def forward(self, input):
        assert isinstance(input, spconv.SparseConvTensor)
        features = input.features
        device = features.device
        indices = input.indices
        spatial_shape = input.spatial_shape
        batch_size = input.batch_size
        if not self.subm:
            out_spatial_shape = ops.get_conv_output_size(
                spatial_shape, self.kernel_size, self.stride, self.padding,
                self.dilation)
        else:
            out_spatial_shape = spatial_shape
        outids, indice_pairs, indice_pairs_num = ops.get_indice_pairs(
            indices, batch_size, spatial_shape, self.kernel_size, self.stride,
            self.padding, self.dilation, 0, self.subm)

        out_features = Fsp.indice_maxpool(features, indice_pairs.to(device),
                                          indice_pairs_num.to(device),
                                          outids.shape[0])
        out_tensor = spconv.SparseConvTensor(out_features, outids,
                                             out_spatial_shape, batch_size)
        out_tensor.indice_dict = input.indice_dict
        out_tensor.grid = input.grid
        return out_tensor
예제 #15
0
 def forward(self, features, indices):
     indices = indices.int()
     x = spconv.SparseConvTensor(features, indices, [16, 64, 64], 2)
     x = self.net(x)
     x = x.view(2, -1)
     x = self.linear(x)
     return x
예제 #16
0
    def forward(self, voxel_features, coors, batch_size, input_shape):
        # input: # [41, 1600, 1408]
        sparse_shape = np.array(input_shape[::-1]) + [1, 0, 0]
        coors = coors.int()
        ret = spconv.SparseConvTensor(voxel_features, coors, sparse_shape,
                                      batch_size)

        output = {}
        for k in range(4):
            if k == 0:
                ret = self.block0(ret)
            elif k == 1:
                ret = self.block1(ret)
            elif k == 2:
                ret = self.block2(ret)
            elif k == 3:
                ret = self.block3(ret)

            temp = []

            if k == 0:
                temp = self.feature_map0(ret).dense()  # D: 5
            elif k == 1:
                temp = self.feature_map1(ret).dense()  # D: 3
            elif k == 2:
                temp = self.feature_map2(ret).dense()  # D: 2
            elif k == 3:
                temp = ret.dense()

            N, C, D, H, W = temp.shape
            output[k] = temp.view(N, C * D, H, W)

        return output
예제 #17
0
 def get_dense_matrix(self,
                      data: torch.tensor,
                      c: torch.tensor,
                      to_numpy=True):
     batch_size = c[-1, -1] + 1
     if data.dim() == 1:
         data = spconv.SparseConvTensor(data.unsqueeze(1),
                                        c[:, self.permute_tensor],
                                        self.spatial_size, batch_size)
     else:
         data = spconv.SparseConvTensor(data, c[:, self.permute_tensor],
                                        self.spatial_size, batch_size)
     data = data.dense()
     if to_numpy:
         data = data.detach().cpu().numpy()
     return data
예제 #18
0
    def rpn_forward(self, batch_data):
        start = time.time()
        voxels = batch_data["voxels"]
        num_points = batch_data["num_points"]
        coordinates = batch_data["coordinates"]
        batch_size = batch_data["batch_size"]

        with torch.set_grad_enabled(self.training):
            start = time.time()
            voxels_mean = self.vfe(voxels, num_points)
            batch_data["voxels_mean"] = voxels_mean
            spconv_tensor = spconv.SparseConvTensor(
                features=voxels_mean,
                indices=coordinates,
                spatial_shape=self.spatial_shapes,
                batch_size=batch_size)

            batch_data = self.conv_3d(spconv_tensor, batch_data)

            start = time.time()
            batch_data = self.conv_2d(batch_data)
            # print("conv2d spend time:",(time.time()-start)/batch_size)
            batch_data = self.detect_head(batch_data)
            if self.using_iou_head:
                batch_data = self.iou_head(batch_data)
            return batch_data
예제 #19
0
 def forward(self, x):
     batch_size = x[0][-1, -1] + 1
     x = spconv.SparseConvTensor(x[1], x[0][:, self.permute_tensor],
                                 self.spatial_size, batch_size)
     x = self.model(x)
     x = x.view(-1, self.n_linear)
     x = self.linear(x)
     return x
예제 #20
0
 def get_dense_matrix(self, data: torch.tensor, c: torch.tensor):
     batch_size = c[-1, -1] + 1
     data = spconv.SparseConvTensor(data.unsqueeze(1),
                                    c[:, self.permute_tensor],
                                    self.spatial_size, batch_size)
     data = data.dense()
     data = data.detach().cpu().numpy()
     return data
예제 #21
0
 def _format_target_and_prediction(self, pred, coords, target, batch_size):
     if self.e_factor != 1.:
         target[:, 0] *= self.e_factor
     target_tensor = spconv.SparseConvTensor(
         target, coords[:, self.model.permute_tensor],
         self.model.spatial_size, batch_size)
     target_tensor = target_tensor.dense()
     # set output to 0 if there was no value for input
     return where(target_tensor == 0, target_tensor, pred), target_tensor
예제 #22
0
    def forward(self, batch_dict):
        """
        Args:
            batch_dict:
                batch_size: int
                vfe_features: (num_voxels, C)
                voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
        Returns:
            batch_dict:
                encoded_spconv_tensor: sparse tensor
        """
        voxel_features, voxel_coords = batch_dict[
            'voxel_features'], batch_dict['voxel_coords']
        batch_size = batch_dict['batch_size']
        input_sp_tensor = spconv.SparseConvTensor(
            features=voxel_features,
            indices=voxel_coords.int(),
            spatial_shape=self.sparse_shape,
            batch_size=batch_size)
        # print("input spahtial shape = ", self.sparse_shape)
        # print("input feature shape = ", voxel_features.shape)

        self.timer.start()
        x = self.conv_input(input_sp_tensor)
        self.timer.record("conv_input")

        x_conv1 = self.conv1(x)
        self.timer.record("conv1")
        x_conv2 = self.conv2(x_conv1)
        self.timer.record("conv2")
        x_conv3 = self.conv3(x_conv2)
        self.timer.record("conv3")
        x_conv4 = self.conv4(x_conv3)
        self.timer.record("conv4")

        # for detection head
        # [200, 176, 5] -> [200, 176, 2]
        out = self.conv_out(x_conv4)

        print("output shape = ", out.spatial_shape)
        self.timer.record("conv_out")
        self.timer.end()

        batch_dict.update({
            'encoded_spconv_tensor': out,
            'encoded_spconv_tensor_stride': 8
        })
        batch_dict.update({
            'multi_scale_3d_features': {
                'x_conv1': x_conv1,
                'x_conv2': x_conv2,
                'x_conv3': x_conv3,
                'x_conv4': x_conv4,
            }
        })

        return batch_dict
예제 #23
0
    def forward(self, batch_dict):
        """
        Args:
            batch_dict:
                batch_size: int
                vfe_features: (num_voxels, C)
                voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
        Returns:
            batch_dict:
                encoded_spconv_tensor: sparse tensor
                point_features: (N, C)
        """
        voxel_features, voxel_coords = batch_dict[
            'voxel_features'], batch_dict['voxel_coords']
        batch_size = batch_dict['batch_size']
        input_sp_tensor = spconv.SparseConvTensor(
            features=voxel_features,
            indices=voxel_coords.int(),
            spatial_shape=self.sparse_shape,
            batch_size=batch_size)
        x = self.conv_input(input_sp_tensor)

        x_conv1 = self.conv1(x)
        x_conv2 = self.conv2(x_conv1)
        x_conv3 = self.conv3(x_conv2)
        x_conv4 = self.conv4(x_conv3)

        if self.conv_out is not None:
            # for detection head
            # [200, 176, 5] -> [200, 176, 2]
            out = self.conv_out(x_conv4)
            batch_dict['encoded_spconv_tensor'] = out
            batch_dict['encoded_spconv_tensor_stride'] = 8

        # for segmentation head
        # [400, 352, 11] <- [200, 176, 5]
        x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4,
                                      self.conv_up_m4, self.inv_conv4)
        # [800, 704, 21] <- [400, 352, 11]
        x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3,
                                      self.conv_up_m3, self.inv_conv3)
        # [1600, 1408, 41] <- [800, 704, 21]
        x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2,
                                      self.conv_up_m2, self.inv_conv2)
        # [1600, 1408, 41] <- [1600, 1408, 41]
        x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1,
                                      self.conv_up_m1, self.conv5)

        batch_dict['point_features'] = x_up1.features
        point_coords = common_utils.get_voxel_centers(
            x_up1.indices[:, 1:],
            downsample_times=1,
            voxel_size=self.voxel_size,
            point_cloud_range=self.point_cloud_range)
        batch_dict['point_coords'] = torch.cat(
            (x_up1.indices[:, 0:1].float(), point_coords), dim=1)
        return batch_dict
예제 #24
0
    def forward(self, voxel_features, coors, batch_size):
        coors = coors.int()
        ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
                                      batch_size)
        ret = self.middle_conv(ret)
        ret = ret.dense()

        N, C, D, H, W = ret.shape
        ret = ret.view(N, C * D, H, W)
        return ret
예제 #25
0
 def forward(self, features, coordinates, batch_size):
     x0 = spconv.SparseConvTensor(
         features, coordinates.int(), self.grid_shape, batch_size,
     )
     x1 = self.blocks[0](x0)
     x2 = self.blocks[1](x1)
     x3 = self.blocks[2](x2)
     x4 = self.blocks[3](x3)
     x = [self.to_global(i, x) for i, x in enumerate([x1, x2, x3, x4])]
     return x
예제 #26
0
 def forward(self, features, coordinates, batch_size):
     x0 = spconv.SparseConvTensor(features, coordinates.int(),
                                  self.grid_shape, batch_size)
     x1 = self.block1(x0)
     x2 = self.block2(x1)
     x3 = self.block3(x2)
     x4 = self.block4(x3)
     args = zip(self.cfg.STRIDES, (x0, x1, x2, x3))
     x = list(itertools.starmap(self.to_global, args))
     return x, x4
예제 #27
0
 def forward(self, voxel_features, coors, batch_size):
     coors = coors.int()
     x0 = spconv.SparseConvTensor(voxel_features, coors, self.grid_shape,
                                  batch_size)
     x1 = self.block1(x0)
     x2 = self.block2(x1)
     x3 = self.block3(x2)
     x4 = self.block4(x3)
     args = zip(self.cfg.strides, (x0, x1, x2, x3))
     x = list(itertools.starmap(self.to_global, args))
     return x, x4
예제 #28
0
 def forward(self, x):
     in0 = self.input0(x)
     # coors = coors.int()[:,[3,2,1,0]] # ordering is [batch, z, y, z]
     coors = in0.get_spatial_locations().int()[:, [3, 2, 1, 0]]
     coord.cuda()
     ret = spconv.SparseConvTensor(in0.features, coors, dense_shape,
                                   data.batch_size)
     x = self.sparseModel(ret)
     temp0 = scn.SparseConvNetTensor(x.features, in0.metadata)
     x = self.out0(temp0)
     x = self.linear(x)
     return x
예제 #29
0
    def forward(self, batch_dict):
        """
        Args:
            batch_dict:
                batch_size: int
                vfe_features: (num_voxels, C)
                voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
        Returns:
            batch_dict:
                encoded_spconv_tensor: sparse tensor
        """
        voxel_features, voxel_coords = batch_dict[
            'voxel_features'], batch_dict['voxel_coords']
        batch_size = batch_dict['batch_size']
        input_sp_tensor = spconv.SparseConvTensor(
            features=voxel_features,
            indices=voxel_coords.int(),
            spatial_shape=self.sparse_shape,
            batch_size=batch_size)

        x = self.conv_input(input_sp_tensor)

        x_conv1 = self.conv1(x)
        x_conv2 = self.conv2(x_conv1)
        x_conv3 = self.conv3(x_conv2)
        x_conv4 = self.conv4(x_conv3)

        # for detection head
        # [200, 176, 5] -> [200, 176, 2]
        out = self.conv_out(x_conv4)

        batch_dict.update({
            'encoded_spconv_tensor': out,
            'encoded_spconv_tensor_stride': 8
        })
        batch_dict.update({
            'multi_scale_3d_features': {
                'x_conv1': x_conv1,
                'x_conv2': x_conv2,
                'x_conv3': x_conv3,
                'x_conv4': x_conv4,
            }
        })
        batch_dict.update({
            'multi_scale_3d_strides': {
                'x_conv1': 1,
                'x_conv2': 2,
                'x_conv3': 4,
                'x_conv4': 8,
            }
        })

        return batch_dict
예제 #30
0
    def forward(self, batch_dict):
        """
        Args:
            batch_dict:
                batch_size: int
                vfe_features: (num_voxels, C)
                voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
        Returns:
            batch_dict:
                encoded_spconv_tensor: sparse tensor
        """
        voxel_features, voxel_coords = batch_dict[
            'voxel_features'], batch_dict['voxel_coords']
        batch_size = batch_dict['batch_size']
        img_idx_set = batch_dict['frame_id']
        calib_path = '/media/ddd/data2/3d_MOTS_Ex./Code/OpenPCDet/data/kitti/training/calib/'
        img_path = '/media/ddd/data2/3d_MOTS_Ex./Code/OpenPCDet/data/kitti/training/image_2/'
        voxel_features, voxel_coords = painted_point_cloud(
            calib_path, img_path, voxel_features, voxel_coords, img_idx_set)
        voxel_features = torch.from_numpy(voxel_features).float().cuda()
        voxel_coords = torch.from_numpy(voxel_coords).float().cuda()
        input_sp_tensor = spconv.SparseConvTensor(
            features=voxel_features,
            indices=voxel_coords.int(),
            spatial_shape=self.sparse_shape,
            batch_size=batch_size)

        x = self.conv_input(input_sp_tensor)

        x_conv1 = self.conv1(x)
        x_conv2 = self.conv2(x_conv1)
        x_conv3 = self.conv3(x_conv2)
        x_conv4 = self.conv4(x_conv3)

        # for detection head
        # [200, 176, 5] -> [200, 176, 2]
        out = self.conv_out(x_conv4)

        batch_dict.update({
            'encoded_spconv_tensor': out,
            'encoded_spconv_tensor_stride': 8
        })
        batch_dict.update({
            'multi_scale_3d_features': {
                'x_conv1': x_conv1,
                'x_conv2': x_conv2,
                'x_conv3': x_conv3,
                'x_conv4': x_conv4,
            }
        })

        return batch_dict