示例#1
0
    def forward(self, pointcloud: torch.cuda.FloatTensor, return_all=False):
        pointcloud = pointcloud.transpose(1, 2).contiguous()
        li_xyz, li_features = self._break_up_pc(pointcloud)

        # B,C,N
        # l_xyz, l_features = [xyz], [li_features]
        l_xyz, l_features = [], []
        for i in range(len(self.SA_modules)):
            # Pointnetmodule + MLP + maxpool
            li_xyz, li_features = self.SA_modules[i](li_xyz, li_features)
            li_features_post = self.postSA_mlp[i](li_features)
            l_xyz.append(li_xyz)
            l_features.append(li_features_post)

        # max pool (B,4*#SA,1) all SAmodules
        # exclude the first None features
        global_code = torch.cat(
            [torch.max(l_feat, dim=-1)[0] for l_feat in l_features], dim=1)

        l_features.append(global_code)
        l_xyz.append(None)
        if return_all:
            return l_features, l_xyz
        else:
            return global_code
示例#2
0
def copy_torch2glumpy(dst: gloo.VertexBuffer, src: torch.cuda.FloatTensor):
    # torch 2 pycuda
    src = gp.GPUArray(src.shape, np.float32, gpudata=src.data_ptr())

    # copy pycuda 2 glumpy
    copy_pycuda2glumpy(dst, src)
    return
示例#3
0
def acc_segmentation(input: torch.cuda.FloatTensor,
                     targs: torch.cuda.IntTensor,
                     rm_bknd=True):
    """The intent of this metric is to have a metric where it's easy to understand the implications of the value it outputs since the output value of mAP IoU is more difficult to understand intuitively."""
    _n = targs.shape[0]
    input = input.argmax(dim=1).view(_n, -1)
    targs = targs.view(_n, -1)
    return (input == targs).float().mean()
示例#4
0
def copy_torch2RegisteredBuffer(dst: pycuda.gl.RegisteredBuffer,
                                src: torch.cuda.FloatTensor):
    # torch 2 pycuda
    src = gp.GPUArray(src.shape, np.float32, gpudata=src.data_ptr())

    # copy pycuda 2 glumpy
    copy_pycuda2RegisteredBuffer(dst, src)
    return
示例#5
0
    def forward(self, pointcloud: torch.cuda.FloatTensor):
        r"""
            Forward pass of the network
            Parameters
            ----------
            pointcloud: Variable(torch.cuda.FloatTensor)
                (B, N, 3 + input_channels) tensor
                Point cloud to run predicts on
                Each point in the point-cloud MUST
                be formated as (x, y, z, features...)
        """

        pointcloud = pointcloud.transpose(1, 2)
        xyz, features = self._break_up_pc(pointcloud)
        cls = torch.zeros(pointcloud.size(0), 16).cuda()

        l_xyz, l_features = [xyz], [features]
        for i in range(len(self.SA_modules)):
            if i < 5:
                li_xyz, li_features = self.SA_modules[i](l_xyz[i],
                                                         l_features[i])
                if li_xyz is not None:
                    random_index = np.arange(li_xyz.size()[1])
                    np.random.shuffle(random_index)
                    li_xyz = li_xyz[:, random_index, :]
                    li_features = li_features[:, :, random_index]
                l_xyz.append(li_xyz)
                l_features.append(li_features)

        _, global_out2_feat = self.SA_modules[5](l_xyz[3], l_features[3])

        for i in range(-1, -(len(self.FP_modules) + 1), -1):
            l_features[i - 1 - 1] = self.FP_modules[i](l_xyz[i - 1 - 1],
                                                       l_xyz[i - 1],
                                                       l_features[i - 1 - 1],
                                                       l_features[i - 1])

        cls = cls.view(-1, 16, 1).repeat(
            1, 1, l_features[0].size()[2])  # object class one-hot-vector
        l_features[0] = torch.cat(
            (l_features[0], l_features[-1].repeat(1, 1,
                                                  l_features[0].size()[2]),
             global_out2_feat.repeat(1, 1, l_features[0].size()[2]), cls), 1)
        return self.FC_layer(l_features[0]).transpose(1, 2).contiguous()
示例#6
0
 def forward(self, pc: torch.cuda.FloatTensor,
             normal: torch.cuda.FloatTensor):
     """
     pc shape: (B, N, 3 + input_channels)
     formatted as (x, y, z, features...)
     """
     xyz = pc[..., 0:3].contiguous()
     if pc.size(-1) > 3:
         features = pc[..., 3:].transpose(1, 2).contiguous()
     else:
         features = None
     for module in self.SA_modules:
         xyz, normal, features = module(xyz, normal, features)
     return self.FC_layer(features.squeeze(-1))
示例#7
0
    def forward(self, pointcloud: torch.cuda.FloatTensor):
        r"""
            Forward pass of the network

            Parameters
            ----------
            pointcloud: Variable(torch.cuda.FloatTensor)
                (B, N, 3 + input_channels) tensor
                Point cloud to run predicts on
                Each point in the point-cloud MUST
                be formated as (x, y, z, features...)
        """
        pointcloud = pointcloud.transpose(1, 2)
        xyz, features = self._break_up_pc(pointcloud)
        for module in self.SA_modules:
            xyz, features = module(xyz, features)
        logits = self.FC_layer(features.squeeze(-1))
        #print(logits)
        return logits, None
示例#8
0
    def forward(self, pointcloud: torch.cuda.FloatTensor):
        r"""
            Forward pass of the network
            Parameters
            ----------
            pointcloud: Variable(torch.cuda.FloatTensor)
                (B, N, 3 + input_channels) tensor
                Point cloud to run predicts on
                Each point in the point-cloud MUST
                be formated as (x, y, z, features...)
        """
        batch_size = pointcloud.size(0)
        xyz, features = self._break_up_pc(pointcloud)
        l_xyz, l_features = [xyz], [features]
        combined = [xyz]
        for i in range(len(self.SA_modules)):
            # print(i,'-l_xyz shape:', l_xyz[i].shape)
            # print(i,'-l_feature shape:', l_features[i].shape)
            li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
            l_xyz.append(li_xyz)
            l_features.append(li_features)
            combined.append(li_xyz)
            # combined.append(li_features.max(dim=-1, keepdim=False)[0])
        # print('here!!!', torch.cat((l_xyz + l_features), dim=1).shape)
        combined = torch.cat(combined, dim=1)
        combined = self.conv(combined)
        c1 = F.adaptive_max_pool1d(combined, 1).view(batch_size, -1)
        c2 = F.adaptive_avg_pool1d(combined, 1).view(batch_size, -1)
        combined = torch.cat((c1, c2), 1)

        combined = self.dp1(
            F.leaky_relu(self.bn1(self.linear1(combined)), negative_slope=0.2))
        combined = self.dp2(
            F.leaky_relu(self.bn2(self.linear2(combined)), negative_slope=0.2))
        combined = self.linear3(combined)
        # combined = self.linear4(c1)
        return combined
    def forward(self,
                pointcloud: torch.cuda.FloatTensor,
                center_points: torch.cuda.FloatTensor,
                cue_points: torch.cuda.FloatTensor,
                matching: torch.cuda.FloatTensor,
                matching_sem: torch.cuda.FloatTensor,
                floor_height: torch.cuda.FloatTensor,
                end_points=None,
                mode=''):
        r"""
            Forward pass of the network

            Parameters
            ----------
            pointcloud: Variable(torch.cuda.FloatTensor)
                (B, N, 3 + input_feature_dim) tensor
                Point cloud to run predicts on
                Each point in the point-cloud MUST
                be formated as (x, y, z, features...)

            Returns
            ----------
            end_points: {XXX_xyz, XXX_features, XXX_inds}
                XXX_xyz: float32 Tensor of shape (B,K,3)
                XXX_features: float32 Tensor of shape (B,K,D)
                XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
        """
        if not end_points: end_points = {}
        batch_size = pointcloud.shape[0]

        xyz, features = self._break_up_pc(pointcloud)

        end_points['sa0_xyz' + mode] = xyz
        end_points['sa0_features' + mode] = features

        #center_points = end_points['center_points']
        #cue_points = end_points['cue_points']#.view(batch_size, -1, 3).float()

        obj_points = torch.cat((center_points, cue_points), dim=1)
        #center_matching = torch.max(matching.view(batch_size, 18, 256), dim=1)[0]
        center_matching = end_points['match_center']

        center_sem = torch.cuda.FloatTensor(
            batch_size, 256,
            18).zero_()  ### Need to change to config sem later
        center_sem.scatter_(
            2, matching_sem[:, :256].unsqueeze(-1),
            1)  # src==1 so it's *one-hot* (B,K,num_size_cluster)
        cue_sem = torch.cuda.FloatTensor(batch_size, 256 * 18, 18).zero_()
        cue_sem.scatter_(2, matching_sem[:, 256:].unsqueeze(-1),
                         1)  # src==1 so it's *one-hot* (B,K,num_size_cluster)

        center_feature = torch.cat(
            ((center_points[:, :, 2] -
              floor_height.unsqueeze(-1)).unsqueeze(1),
             center_matching.unsqueeze(1), center_sem.transpose(
                 2, 1).contiguous()),
            dim=1)  ### Need to make the floor height an option
        cue_feature = torch.cat(
            ((cue_points[:, :, 2] - floor_height.unsqueeze(-1)).unsqueeze(1),
             matching.unsqueeze(1), cue_sem.transpose(2, 1).contiguous()),
            dim=1)
        other_features = torch.cat(
            (features,
             torch.cuda.FloatTensor(batch_size, 19,
                                    features.shape[-1]).zero_()),
            dim=1)

        features = torch.cat((center_feature, cue_feature, other_features),
                             dim=2)
        #features = torch.cat((cue_feature, other_features), dim=2)

        # --------- 4 SET ABSTRACTION LAYERS ---------
        ### Concatenate the
        #xyz, features, fps_inds = self.sa1(obj_points, xyz, features, inds=end_points['sa1_inds'])
        xyz, features, fps_inds = self.sa1(obj_points, xyz, features)
        end_points['sa1_inds' + mode] = fps_inds
        end_points['sa1_xyz' + mode] = xyz
        end_points['sa1_features' + mode] = features

        #xyz, features, fps_inds = self.sa2(xyz[:,:256*18,:].contiguous(), xyz[:,256*18:,:].contiguous(), features) # this fps_inds is just 0,1,...,1023
        xyz, features, fps_inds = self.sa2(
            xyz[:, :256 * 19, :].contiguous(),
            xyz[:, 256 * 19:, :].contiguous(),
            features,
            inds=end_points['sa2_inds'])  # this fps_inds is just 0,1,...,1023
        end_points['sa2_inds' + mode] = fps_inds
        end_points['sa2_xyz' + mode] = xyz
        end_points['sa2_features' + mode] = features

        ### Append the surface and line info here
        '''
        center_ind = torch.cuda.FloatTensor(batch_size, 4, 256).zero_()
        center_ind[:,0,:] = 1.0
        surfacez_ind = torch.cuda.FloatTensor(batch_size, 4, 256*2).zero_()
        surfacez_ind[:,1,:] = 1.0
        surfacexy_ind = torch.cuda.FloatTensor(batch_size, 4, 256*4).zero_()
        surfacexy_ind[:,2,:] = 1.0
        line_ind = torch.cuda.FloatTensor(batch_size, 4, 256*12).zero_()
        line_ind[:,3,:] = 1.0
        cue_ind = torch.cat((torch.cuda.FloatTensor(batch_size, 1, 1024).zero_(), end_points["pred_z_ind"].unsqueeze(1), end_points["pred_xy_ind"].unsqueeze(1), end_points["pred_line_ind"].unsqueeze(1)), dim=1)
        ind_feature = torch.cat((center_ind, surfacez_ind, surfacexy_ind, line_ind, cue_ind), dim=2)
        features = torch.cat((features, ind_feature), dim=1)
        '''
        #xyz, features, fps_inds = self.sa3(xyz[:,:256*18,:].contiguous(), xyz[:,256*18:,:].contiguous(), features) # this fps_inds is just 0,1,...,1023
        xyz, features, fps_inds = self.sa3(
            xyz[:, :256 * 19, :].contiguous(),
            xyz[:, 256 * 19:, :].contiguous(),
            features,
            inds=end_points['sa3_inds'])  # this fps_inds is just 0,1,...,1023
        end_points['sa3_inds' + mode] = fps_inds
        end_points['sa3_xyz' + mode] = xyz
        end_points['sa3_features' + mode] = features

        #xyz, features, fps_inds = self.sa4(xyz[:,:256*18,:].contiguous(), xyz[:,256*18:,:].contiguous(), features) # this fps_inds is just 0,1,...,1023
        xyz, features, fps_inds = self.sa4(
            xyz[:, :256 * 19, :].contiguous(),
            xyz[:, 256 * 19:, :].contiguous(),
            features,
            inds=end_points['sa4_inds'])  # this fps_inds is just 0,1,...,1023
        end_points['sa4_inds' + mode] = fps_inds
        end_points['sa4_xyz' + mode] = xyz
        end_points['sa4_features' + mode] = features

        # --------- 2 FEATURE UPSAMPLING LAYERS --------
        #features = self.fp1(end_points['sa3_xyz'+mode], end_points['sa4_xyz'+mode], end_points['sa3_features'+mode], end_points['sa4_features'+mode])
        #features = self.fp2(end_points['sa2_xyz'+mode], end_points['sa3_xyz'+mode], end_points['sa2_features'+mode], features)
        features = self.fp1(
            end_points['sa3_xyz' + mode],
            end_points['sa4_xyz' + mode][:, 256 * 19:, :].contiguous(),
            end_points['sa3_features' + mode],
            end_points['sa4_features' + mode][:, :, 256 * 19:].contiguous())
        features = self.fp2(
            end_points['sa2_xyz' + mode][:, :256 * 19, :].contiguous(),
            end_points['sa3_xyz' + mode][:, 256 * 19:, :].contiguous(),
            end_points['sa2_features' + mode][:, :, :256 * 19].contiguous(),
            features[:, :, 256 * 19:].contiguous())
        end_points['fp2_features' + mode] = features
        end_points['fp2_xyz' + mode] = end_points['sa2_xyz' +
                                                  mode][:, :256 *
                                                        19, :].contiguous()
        num_seed = end_points['fp2_xyz' + mode].shape[1]
        end_points['fp2_inds' + mode] = end_points[
            'sa1_inds' +
            mode][:, 0:num_seed]  # indices among the entire input point clouds
        return end_points
示例#10
0
def mAP_IoU(preds: torch.cuda.FloatTensor,
            targs: torch.cuda.IntTensor,
            thresholds: list,
            num_tot_classes: int,
            bknd_idx: int = None,
            max_workers: int = 1,
            rm_bknd=True,
            fast_hist2d: bool = False,
            sparsify: bool = False):
    """Segmentation: calculates the IoU-based precision at a specified IoU threshold (t)
  { TP(t) / ( TP(t) + FP(t) + FN(t) ) } for a single image."""

    #----------------------------------------------------------------------------#

    _fmin = 1e-9

    if rm_bknd:
        if not isinstance(bknd_idx, int):
            raise Exception(
                'If removing background is desired, please specify index of backround in preds.'
            )

    # The following catches whether there will be division by 0 further below:
    if not (min(thresholds) > _fmin and max(thresholds) < 1 - _fmin):
        raise Exception(
            'Minimum specified threshold must be > 1e-9 and maximum specified threshold must be < (1 - 1e-9).'
        )

    if bknd_idx != 0:
        warnings.warn( 'Warning: To maintain consistency across segmentation models, please make `bknd_idx` be equal ' + \
          'to index number 0.' )
        first_idx = False
    else:
        first_idx = True

    #----------------------------------------------------------------------------#

    # TODO: Implement fast np.bincount/np.unique/etc. to get num classes for targs and preds

    num_classes_IU = _ if fast_hist2d else (
        num_tot_classes,
        num_tot_classes,
    )

    #----------------------------------------------------------------------------#

    _preds_flat_np = preds.view(
        -1,
        preds.size()[-2] * preds.size()[-1]).cpu().numpy().astype(np.int32)
    _targs_flat_np = targs.view(
        -1,
        targs.size()[-2] * targs.size()[-1]).cpu().numpy().astype(np.int32)

    #----------------------------------------------------------------------------#

    # TODO: Implement CUDA (GPU Parallel Computing) version of histogram2d
    histogram2d_async = partial( np.histogram2d, bins = num_classes_IU, \
      range = np.array( [[-0.5, num_classes_IU[0] - .5], [-0.5, num_classes_IU[1] - .5]] ) )
    intersections = parallel_map_func_re(histogram2d_async,
                                         np.stack((
                                             _targs_flat_np,
                                             _preds_flat_np,
                                         ),
                                                  axis=0),
                                         max_workers=max_workers)

    intersections = torch.stack( tuple( \
      torch.from_numpy( intersection[0] ).to( device = DEFAULT_DEVICE, dtype = torch.float64 ) for \
      intersection in intersections ) )

    #----------------------------------------------------------------------------#

    # TODO: Implement CUDA (GPU Parallel Computing) version of histogram
    # Calculate Union. First calculate areas (needed for finding the union between all objects).
    # Shape : (true_objects, pred_objects)
    bincount1d_preds_async = partial(np.bincount, minlength=num_classes_IU[0])
    area_preds = parallel_map_func_re(bincount1d_preds_async, [_preds_flat_np],
                                      max_workers=max_workers)
    area_preds = torch.stack( tuple( \
      torch.from_numpy( area_pred ).to( device = DEFAULT_DEVICE, dtype = torch.float64 ) for \
      area_pred in area_preds ) )

    bincount1d_trues_async = partial(np.bincount, minlength=num_classes_IU[0])
    area_trues = parallel_map_func_re(bincount1d_trues_async, [_targs_flat_np],
                                      max_workers=max_workers)
    area_trues = torch.stack( tuple( \
      torch.from_numpy( area_true ).to( device = DEFAULT_DEVICE, dtype = torch.float64 ) for \
      area_true in area_trues ) )

    area_preds.unsqueeze_(-1)
    area_trues.unsqueeze_(-2)

    unions = area_trues + area_preds - intersections  # subtract intersection to remove double-countings

    #----------------------------------------------------------------------------#

    ## Exclude background from the analysis if rm_bknd == True
    if rm_bknd:
        if first_idx:
            intersections = intersections[:, 1:, 1:]
            unions = unions[:, 1:, 1:]
        else:
            intersections = torch.cat((
                intersections[:, :, :bknd_idx],
                intersections[:, :, bknd_idx + 1:],
            ))
            intersections = torch.cat((
                intersections[:, :bknd_idx, :],
                intersections[:, bknd_idx + 1:, :],
            ))
            unions = torch.cat((
                unions[:, :, :bknd_idx],
                unions[:, :, bknd_idx + 1:],
            ))
            unions = torch.cat((
                unions[:, :bknd_idx, :],
                unions[:, bknd_idx + 1:, :],
            ))
        unions[unions == 0] = _fmin

    # Calculate the Intersection over Union (IoU) for all preds labels-targs labels pairs
    iou = torch.div(intersections, unions)
    # print( iou.shape )
    # iou_sparse = coo_matrix( iou.ravel('C'), \
    #   (true_obj_coords.ravel('C'), pred_obj_coords.ravel('C')), shape = (num_tot_classes, num_tot_classes) ).tocsc()
    if sparsify:
        iou = torch.sparse.FloatTensor(iou, device=DEFAULT_DEVICE)

    # Calculate the inverse Identity Matrix (needed to compute FP and FN). Shape : (num_tot_classes, num_tot_classes)
    _inv_eye = torch.abs( \
      torch.eye( iou.shape[-2], iou.shape[-1], dtype = torch.int8 ) - 1 ) \
      .to( device = DEFAULT_DEVICE, dtype = torch.uint8 )
    _inv_eye.unsqueeze_(0)

    # Loop over IoU thresholds
    prec = torch.empty(targs.shape[0],
                       len(thresholds),
                       dtype=torch.float64,
                       device=DEFAULT_DEVICE)
    for n, t in enumerate(thresholds):

        tp, fp, fn = precision_at(iou, t, _inv_eye)

        _prec_n = tp / (tp + fp + fn)
        _prec_n[_prec_n != _prec_n] = 0.
        prec[:, n] = _prec_n

    return prec.mean()
示例#11
0
def accuracy(input: torch.cuda.FloatTensor, targs: torch.cuda.LongTensor):
    _n = targs.shape[0]
    input = input.argmax(dim=1).view(_n, -1)
    targs = targs.view(_n, -1)
    return (input == targs).float().mean()
示例#12
0
    def forward(self, pc: torch.cuda.FloatTensor,
                normal: torch.cuda.FloatTensor, cls):
        """
        pc shape: (B, N, 3 + input_channels)
        formatted as (x, y, z, features...)
        """
        xyz = pc[..., 0:3].contiguous()
        if pc.size(-1) > 3:
            features = pc[..., 3:].transpose(1, 2).contiguous()
        else:
            features = None
        l_xyz, l_features = [xyz], [features]
        for i in range(len(self.SA_modules)):
            if i < 5:
                if i < 4:
                    li_xyz, normal, li_features = self.SA_modules[i](
                        l_xyz[i], normal, l_features[i])
                else:
                    li_xyz, normal, li_features = self.SA_modules[i](
                        l_xyz[i], None, l_features[i])
                if li_xyz is not None:
                    random_index = np.arange(li_xyz.size()[1])
                    np.random.shuffle(random_index)
                    li_xyz = li_xyz[:, random_index, :]
                    li_features = li_features[:, :, random_index]
                l_xyz.append(li_xyz)
                l_features.append(li_features)

        # filter the added relative pose
        # for i in range(len(l_features)):
        #     li_features = l_features[i]
        #     if li_features is None:
        #         continue
        #     if li_features.shape[1] == 201:
        #         l_features[i] = torch.cat([li_features[:, 3:67,:], li_features[:, 70:134,:], li_features[:, 137:,:]], dim=1)
        #     if li_features.shape[1] == 393:
        #         l_features[i] = torch.cat([li_features[:, 3:131,:], li_features[:, 134:262,:], li_features[:, 265:,:]], dim=1)
        #     if li_features.shape[1] == 777:
        #         l_features[i] = torch.cat([li_features[:, 3:259,:], li_features[:, 262:518,:], li_features[:, 521:,:]], dim=1)

        for i in range(4):
            li_features = l_features[i]
            if li_features is None:
                continue
            l = li_features.shape[1] // self.scale_num
            fts = []
            for j in range(self.scale_num):
                fts.append(li_features[:, 3 + j * l:l * (j + 1), :])
            l_features[i] = torch.cat(fts, dim=1)

        _, _, global_out2_feat = self.SA_modules[5](l_xyz[3], None,
                                                    l_features[3])

        for i in range(-1, -(len(self.FP_modules) + 1), -1):
            l_features[i - 1 - 1] = self.FP_modules[i](l_xyz[i - 1 - 1],
                                                       l_xyz[i - 1],
                                                       l_features[i - 1 - 1],
                                                       l_features[i - 1])

        cls = cls.view(-1, 16, 1).repeat(
            1, 1, l_features[0].size()[2])  # object class one-hot-vector
        l_features[0] = torch.cat(
            (l_features[0], l_features[-1].repeat(1, 1,
                                                  l_features[0].size()[2]),
             global_out2_feat.repeat(1, 1, l_features[0].size()[2]), cls), 1)
        return self.FC_layer(l_features[0]).transpose(1, 2).contiguous()