Ejemplo n.º 1
0
    def _infer_flow(self, flow_f_1, flow_f_2):

        # Normalize the features
        if self.normalize_feature:
            flow_f_1 = ME.SparseTensor(
                flow_f_1.F / torch.norm(flow_f_1.F, p=2, dim=1, keepdim=True),
                coordinate_map_key=flow_f_1.coordinate_map_key,
                coordinate_manager=flow_f_1.coordinate_manager)

            flow_f_2 = ME.SparseTensor(
                flow_f_2.F / torch.norm(flow_f_2.F, p=2, dim=1, keepdim=True),
                coordinate_map_key=flow_f_2.coordinate_map_key,
                coordinate_manager=flow_f_2.coordinate_manager)

        # Extract the coarse flow based on the feature correspondences
        coarse_flow = []

        # Iterate over the examples in the batch
        for b_idx in range(len(flow_f_1.decomposed_coordinates)):
            feat_s = flow_f_1.F[flow_f_1.C[:, 0] == b_idx]
            feat_t = flow_f_2.F[flow_f_2.C[:, 0] == b_idx]

            coor_s = flow_f_1.C[flow_f_1.C[:, 0] == b_idx, 1:].to(
                self.device) * self.voxel_size
            coor_t = flow_f_2.C[flow_f_2.C[:, 0] == b_idx, 1:].to(
                self.device) * self.voxel_size

            # Squared l2 distance between points points of both point clouds
            coor_s, coor_t = coor_s.unsqueeze(0), coor_t.unsqueeze(0)
            feat_s, feat_t = feat_s.unsqueeze(0), feat_t.unsqueeze(0)

            # Force transport to be zero for points further than 10 m apart
            support = (pairwise_distance(coor_s, coor_t, normalized=False) < 10
                       **2).float()

            # Transport cost matrix
            C = pairwise_distance(feat_s, feat_t)

            K = torch.exp(
                -C / (torch.exp(self.epsilon) + self.tau_offset)) * support

            row_sum = K.sum(-1, keepdim=True)

            # Estimate flow
            corr_flow = (K @ coor_t) / (row_sum + 1e-8) - coor_s

            coarse_flow.append(corr_flow.squeeze(0))

        coarse_flow = torch.cat(coarse_flow, dim=0)

        st_cf = ME.SparseTensor(features=coarse_flow,
                                coordinate_manager=flow_f_1.coordinate_manager,
                                coordinate_map_key=flow_f_1.coordinate_map_key)

        self.inferred_values['coarse_flow'] = st_cf.F

        # Refine the flow with the second network
        refined_flow = self.flow_refiner(st_cf)

        self.inferred_values['refined_flow'] = refined_flow.F
Ejemplo n.º 2
0
def pose_estimation(model,
                    device,
                    xyz0,
                    xyz1,
                    coord0,
                    coord1,
                    feats0,
                    feats1,
                    return_corr=False):
    sinput0 = ME.SparseTensor(feats0.to(device), coordinates=coord0.to(device))
    F0 = model(sinput0).F

    sinput1 = ME.SparseTensor(feats1.to(device), coordinates=coord1.to(device))
    F1 = model(sinput1).F

    corr = F0.mm(F1.t())
    weight, inds = corr.max(dim=1)
    weight = weight.unsqueeze(1).cpu()
    xyz1_corr = xyz1[inds, :]

    trans = est_quad_linear_robust(xyz0, xyz1_corr,
                                   weight)  # let's do this later

    if return_corr:
        return trans, weight, corr
    else:
        return trans, weight
Ejemplo n.º 3
0
    def empty_return(self):
        device = self.efi_cls.kernel.device
        dtype = self.efi_cls.kernel.dtype
        gfi = ME.SparseTensor(
            coordinates=torch.empty((0, self.spatial_dims + 1), device=device),
            features=torch.empty((0, self.efi.out_connection_type[1]),
                                 device=device,
                                 dtype=dtype),
            tensor_stride=[2**(self.config.n_conv_layers)] * self.spatial_dims)

        lfi = ME.SparseTensor(
            coordinates=torch.empty((0, self.spatial_dims + 1), device=device),
            features=torch.empty((0, self.gfi.out_connection_type[1]),
                                 device=device,
                                 dtype=dtype),
            tensor_stride=[2**(self.config.feature_layer + 1)] *
            self.spatial_dims)

        recon_x = ME.SparseTensor(coordinates=torch.empty(
            (0, self.spatial_dims + 1), device=device),
                                  features=torch.empty(
                                      (0, self.lfi.out_connection_type[1]),
                                      device=device,
                                      dtype=dtype),
                                  tensor_stride=[1] * self.spatial_dims)

        empty_outputs = {"gfi": gfi, "lfi": lfi, "recon_x": recon_x}
        return empty_outputs
Ejemplo n.º 4
0
    def _compute_loss_metrics(self, input_dict, phase='train'):

        ''' 
        Computes the losses and evaluation metrics
        
        Args:
            input_dict (dict): data dictionary

        Return:
            losses (dict): selected loss values
            metric (dict): selected evaluation metric
        '''

        # Run the feature and context encoder
        sinput1 = ME.SparseTensor(features=input_dict['sinput_s_F'].to(self.device),
            coordinates=input_dict['sinput_s_C'].to(self.device))

        sinput2 = ME.SparseTensor(features=input_dict['sinput_t_F'].to(self.device),
            coordinates=input_dict['sinput_t_C'].to(self.device))
                
        if phase == 'train':
            inferred_values = self.model(sinput1, sinput2, input_dict['pcd_s'], input_dict['pcd_t'], input_dict['fg_labels_s'], input_dict['fg_labels_t'])
        else:
            inferred_values = self.model(sinput1, sinput2, input_dict['pcd_eval_s'], input_dict['pcd_eval_t'], input_dict['fg_labels_s'], input_dict['fg_labels_t'])

        losses = self.compute_losses(inferred_values, input_dict)
        
        metrics = self.compute_metrics(inferred_values, input_dict, phase)

        return losses, metrics
Ejemplo n.º 5
0
    def forward(self, x):
        xf = self.relu(
            self.bn1(self.conv1(x.F.unsqueeze(-1))[..., 0]).unsqueeze(-1))
        xf = self.relu(self.bn2(self.conv2(xf)[..., 0]).unsqueeze(-1))
        xf = self.relu(self.bn3(self.conv3(xf)[..., 0]).unsqueeze(-1))
        xf = ME.SparseTensor(xf[..., 0],
                             coords_key=x.coords_key,
                             coords_manager=x.coords_man)
        xfc = self.pool(xf)

        xf = F.relu(self.bn4(self.fc1(self.pool(xfc).F)))
        xf = F.relu(self.bn5(self.fc2(xf)))
        xf = self.fc3(xf)
        xf += torch.tensor([[1, 0, 0, 0, 1, 0, 0, 0, 1]],
                           dtype=x.dtype,
                           device=x.device).repeat(xf.shape[0], 1)
        xf = ME.SparseTensor(xf,
                             coords_key=xfc.coords_key,
                             coords_manager=xfc.coords_man)
        xfc = ME.SparseTensor(torch.zeros(x.shape[0],
                                          9,
                                          dtype=x.dtype,
                                          device=x.device),
                              coords_key=x.coords_key,
                              coords_manager=x.coords_man)
        return self.broadcast(xfc, xf)
Ejemplo n.º 6
0
def benchmark(config):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = ResUNetBN2C(1, 16, normalize_feature=True, conv1_kernel_size=3, D=3)
    model.eval()
    model = model.to(device)

    num_conv_layers = defaultdict(int)
    for l in model.modules():
        if isinstance(l, ME.MinkowskiConvolution) or isinstance(
            l, ME.MinkowskiConvolutionTranspose
        ):
            num_conv_layers[l.kernel_generator.kernel_size[0]] += 1
    print(num_conv_layers)

    pcd = o3d.io.read_point_cloud(config.input)

    if ME.__version__.split(".")[1] == "5":
        for batch_size in [1, 2, 4, 8, 16, 32, 64]:
            vox_coords = torch.from_numpy(np.array(pcd.points)) / config.voxel_size
            coords = ME.utils.batched_coordinates(
                [vox_coords for i in range(batch_size)]
            )
            feats = torch.from_numpy(np.ones((len(coords), 1))).float()

            with torch.no_grad():
                t = Timer()
                for i in range(10):
                    # initialization time includes copy to GPU
                    t.tic()
                    sinput = ME.SparseTensor(
                        feats,
                        coords,
                        minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,
                        device=device,
                    )
                    model(sinput)
                    t.toc()
                print(f"{batch_size}\t{len(sinput)}\t{t.min_time}")

    elif ME.__version__.split(".")[1] == "4":
        for batch_size in [1, 2, 4, 8, 16, 32, 64]:
            vox_coords = torch.from_numpy(np.array(pcd.points)) / config.voxel_size
            coords = ME.utils.batched_coordinates(
                [vox_coords for i in range(batch_size)]
            )
            feats = torch.from_numpy(np.ones((len(coords), 1))).float()

            with torch.no_grad():
                t = Timer()
                for i in range(10):
                    # initialization time includes copy to GPU
                    t.tic()
                    sinput = ME.SparseTensor(feats, coords,).to(device)
                    model(sinput)
                    t.toc()
                print(f"{batch_size}\t{len(sinput)}\t{t.min_time}")

    else:
        raise NotImplementedError
Ejemplo n.º 7
0
def corr_and_add(feature_A,
                 feature_B,
                 k=10,
                 coords_A=None,
                 coords_B=None,
                 Npts=None):

    # compute sparse correlation from A to B
    scorr = sparse_corr(feature_A,
                        feature_B,
                        k=k,
                        ratio=False,
                        sparse_type='raw')

    # compute sparse correlation from B to A
    scorr2 = sparse_corr(feature_B,
                         feature_A,
                         k=k,
                         ratio=False,
                         reverse=True,
                         sparse_type='raw')

    scorr = ME.SparseTensor(scorr[0], scorr[1].cuda())
    scorr2 = ME.SparseTensor(scorr2[0],
                             scorr2[1].cuda(),
                             coordinate_manager=scorr.coordinate_manager)

    scorr = ME.MinkowskiUnion()(scorr, scorr2)

    return scorr
    def generate_inlier_input(self, xyz0, xyz1, iC0, iC1, iF0, iF1, len_batch,
                              pos_pairs):
        # pairs consist of (xyz1 index, xyz0 index)
        stime = time.time()
        sinput0 = ME.SparseTensor(iF0, coords=iC0).to(self.device)
        oF0 = self.feat_model(sinput0).F

        sinput1 = ME.SparseTensor(iF1, coords=iC1).to(self.device)
        oF1 = self.feat_model(sinput1).F
        feat_time = time.time() - stime

        stime = time.time()
        pred_pairs = self.find_pairs(oF0, oF1, len_batch)
        nn_time = time.time() - stime

        is_correct = find_correct_correspondence(pos_pairs,
                                                 pred_pairs,
                                                 len_batch=len_batch)

        cat_pred_pairs = []
        start_inds = torch.zeros((1, 2)).long()
        for lens, pred_pair in zip(len_batch, pred_pairs):
            cat_pred_pairs.append(pred_pair + start_inds)
            start_inds += torch.LongTensor(lens)

        cat_pred_pairs = torch.cat(cat_pred_pairs, 0)
        pred_pair_inds0, pred_pair_inds1 = cat_pred_pairs.t()
        reg_coords = torch.cat(
            (iC0[pred_pair_inds0], iC1[pred_pair_inds1, 1:]), 1)
        reg_feats = self.generate_inlier_features(xyz0, xyz1, iC0, iC1, oF0,
                                                  oF1, pred_pair_inds0,
                                                  pred_pair_inds1).float()

        return reg_coords, reg_feats, pred_pairs, is_correct, feat_time, nn_time
Ejemplo n.º 9
0
    def forward(self, x):
        # First, align coordinates to be centered around zero.
        coords = x.coords.to(x.device)[:, 1:]
        coords = ME.SparseTensor(coords.float(),
                                 coords_key=x.coords_key,
                                 coords_manager=x.coords_man)
        mean_coords = self.broadcast(coords, self.pool(coords))
        norm_coords = coords - mean_coords
        # Second, apply spatial transformer to the coordinates.
        trans = self.stn(norm_coords)
        coords_feat_stn = torch.squeeze(
            torch.bmm(norm_coords.F.view(-1, 1, 3), trans.F.view(-1, 3, 3)))
        xf = torch.cat((coords_feat_stn, x.F), 1).unsqueeze(-1)
        xf = self.relu(self.bn1(self.conv1(xf)[..., 0]).unsqueeze(-1))

        pointfeat = xf
        xf = self.bn2(self.conv2(xf)[..., 0]).unsqueeze(-1)
        xfc = ME.SparseTensor(xf[..., 0],
                              coords_key=x.coords_key,
                              coords_manager=x.coords_man)
        xf_avg = ME.SparseTensor(torch.zeros(x.shape[0],
                                             xfc.F.shape[1],
                                             dtype=x.dtype,
                                             device=x.device),
                                 coords_key=x.coords_key,
                                 coords_manager=x.coords_man)
        xf_avg = self.broadcast(xf_avg, self.pool(xfc))
        return torch.cat((pointfeat[..., 0], xf_avg.F), 1)
Ejemplo n.º 10
0
def operation_mode():
    # Set to share the coords_man by default
    ME.set_sparse_tensor_operation_mode(
        ME.SparseTensorOperationMode.SHARE_COORDS_MANAGER)
    print(ME.sparse_tensor_operation_mode())

    coords0, feats0 = to_sparse_coo(data_batch_0)
    coords0, feats0 = ME.utils.sparse_collate(coords=[coords0], feats=[feats0])

    coords1, feats1 = to_sparse_coo(data_batch_1)
    coords1, feats1 = ME.utils.sparse_collate(coords=[coords1], feats=[feats1])

    for _ in range(2):
        # sparse tensors
        A = ME.SparseTensor(coords=coords0, feats=feats0)
        B = ME.SparseTensor(
            coords=coords1,
            feats=feats1,
            # coords_manager=A.coords_man,  No need to feed the coords_man
            force_creation=True)

        C = A + B

        # When done using it for forward and backward, you must cleanup the coords man
        ME.clear_global_coords_man()
Ejemplo n.º 11
0
    def forward(self, x):
        out_s1 = self.conv1(x)
        out_s1 = self.norm1(out_s1)
        out_s1 = self.block1(out_s1)
        out = MEF.relu(out_s1)

        out_s2 = self.conv2(out)
        out_s2 = self.norm2(out_s2)
        out_s2 = self.block2(out_s2)
        out = MEF.relu(out_s2)

        out_s4 = self.conv3(out)
        out_s4 = self.norm3(out_s4)
        out_s4 = self.block3(out_s4)
        out = MEF.relu(out_s4)

        out_s8 = self.conv4(out)
        out_s8 = self.norm4(out_s8)
        out_s8 = self.block4(out_s8)
        out = MEF.relu(out_s8)

        out = self.conv4_tr(out)
        out = self.norm4_tr(out)
        out = self.block4_tr(out)
        out_s4_tr = MEF.relu(out)

        out = ME.cat(out_s4_tr, out_s4)

        out = self.conv3_tr(out)
        out = self.norm3_tr(out)
        out = self.block3_tr(out)
        out_s2_tr = MEF.relu(out)

        out = ME.cat(out_s2_tr, out_s2)

        out = self.conv2_tr(out)
        out = self.norm2_tr(out)
        out = self.block2_tr(out)
        out_s1_tr = MEF.relu(out)

        out = ME.cat(out_s1_tr, out_s1)
        out_feat = self.conv1_tr(out)
        out_feat = MEF.relu(out_feat)
        out_feat = self.final(out_feat)

        out_att = self.conv1_tr_att(out)
        out_att = MEF.relu(out_att)
        out_att = self.final_att(out_att)
        out_att = ME.SparseTensor(self.final_att_act(out_att.F),
                                  coords_key=out_att.coords_key,
                                  coords_manager=out_att.coords_man)

        if self.normalize_feature:
            out_feat = ME.SparseTensor(
                out_feat.F / torch.norm(out_feat.F, p=2, dim=1, keepdim=True),
                coords_key=out_feat.coords_key,
                coords_manager=out_feat.coords_man)

        return dict(features=out_feat, attention=out_att)
Ejemplo n.º 12
0
  def forward(self, x):
    out_s1 = self.conv1(x)
    out_s1 = self.norm1(out_s1)
    out_s1 = self.block1(out_s1)
    out = MEF.relu(out_s1)

    out_s2 = self.conv2(out)
    out_s2 = self.norm2(out_s2)
    out_s2 = self.block2(out_s2)
    out = MEF.relu(out_s2)

    out_s4 = self.conv3(out)
    out_s4 = self.norm3(out_s4)
    out_s4 = self.block3(out_s4)
    out = MEF.relu(out_s4)

    out_s8 = self.conv4(out)
    out_s8 = self.norm4(out_s8)
    out_s8 = self.block4(out_s8)
    out = MEF.relu(out_s8)

    out = self.conv4_tr(out)
    out = self.norm4_tr(out)
    out = self.block4_tr(out)
    out_s4_tr = MEF.relu(out)

    out = ME.cat(out_s4_tr, out_s4)

    out = self.conv3_tr(out)
    out = self.norm3_tr(out)
    out = self.block3_tr(out)
    out_s2_tr = MEF.relu(out)

    out = ME.cat(out_s2_tr, out_s2)

    out = self.conv2_tr(out)
    out = self.norm2_tr(out)
    out = self.block2_tr(out)
    out_s1_tr = MEF.relu(out)

    out = ME.cat(out_s1_tr, out_s1)
    out = self.conv1_tr(out)
    out = MEF.relu(out)
    out = self.final(out)

    if self.normalize_feature:
      if ME.__version__.split(".")[1] == "5":
        return ME.SparseTensor(
            out.F / torch.norm(out.F, p=2, dim=1, keepdim=True),
            coordinate_map_key=out.coordinate_map_key,
            coordinate_manager=out.coordinate_manager)
      elif ME.__version__.split(".")[1] == "4":
        return ME.SparseTensor(
            out.F / torch.norm(out.F, p=2, dim=1, keepdim=True),
            coords_key=out.coords_key,
            coords_manager=out.coords_man)
    else:
      return out
Ejemplo n.º 13
0
    def compute_descriptors(self, input_dict):
        ''' 
        If not precomputed it infers the feature descriptors and returns the established correspondences
        together with the ground truth transformation parameters and inlier labels.

        Args:
            input_dict (dict): input data

        '''

        if not self.precomputed_desc:

            xyz_down = input_dict['sinput0_C']

            sinput0 = ME.SparseTensor(
                input_dict['sinput0_F'], coords=input_dict['sinput0_C']).to(self.device)

            F0 = self.descriptor_module(sinput0).F

            # If the FCGF descriptor should be trained with the FCGF loss (need also corresponding desc.)
            if self.train_descriptor:
                sinput1 = ME.SparseTensor(
                    input_dict['sinput1_F'], coords=input_dict['sinput1_C']).to(self.device)

                F1 = self.descriptor_module(sinput1).F 
            else:
                F1 = torch.empty(F0.shape[0], 0).to(self.device)


            # Sample the points
            xyz_batch, f_batch = self.sampler(xyz_down, F0, input_dict['pts_list'])

            # Build point cloud pairs for the inference
            xyz_s, xyz_t, f_s, f_t = extract_overlaping_pairs(xyz_batch, f_batch, self.connectivity_info)

            # Compute nearest neighbors in feature space
            nn_C_s_t = self.feature_matching(f_s, f_t, xyz_t) # NNs of the source points in the target point cloud
            nn_C_t_s = self.feature_matching(f_t, f_s, xyz_s) # NNs of the target points in the source point cloud
            

            if self.mutuals_flag:
                mutuals = extract_mutuals(xyz_s, xyz_t, nn_C_s_t, nn_C_t_s)
            else:
                mutuals = None

            # Prepare the input for the filtering block
            filtering_input = construct_filtering_input_data(xyz_s, xyz_t, input_dict, self.mutuals)

        else:
            filtering_input = input_dict
            F0 = None
            F1 = None

        return filtering_input, F0, F1
Ejemplo n.º 14
0
    def _train_iter(self, data_loader_iter, timers):
        self.model.train()
        data_meter, data_timer, total_timer = timers

        self.optimizer.zero_grad()
        batch_pos_loss, batch_neg_loss, batch_loss = 0, 0, 0
        data_time = 0
        total_timer.tic()
        data_timer.tic()
        input_dict = data_loader_iter.next()
        data_time += data_timer.toc(average=False)

        sinput0 = ME.SparseTensor(input_dict['sinput0_F'],
                                  coords=input_dict['sinput0_C']).to(
                                      self.cur_device)
        F0 = self.model(sinput0).F

        sinput1 = ME.SparseTensor(input_dict['sinput1_F'],
                                  coords=input_dict['sinput1_C']).to(
                                      self.cur_device)

        F1 = self.model(sinput1).F

        pos_pairs = input_dict['correspondences']
        pos_loss, neg_loss = self.contrastive_hardest_negative_loss(
            F0,
            F1,
            pos_pairs,
            num_pos=self.config.trainer.num_pos_per_batch * self.batch_size,
            num_hn_samples=self.config.trainer.num_hn_samples_per_batch *
            self.batch_size)

        loss = pos_loss + neg_loss

        loss.backward()

        result = {"loss": loss, "pos_loss": pos_loss, "neg_loss": neg_loss}
        if self.config.misc.num_gpus > 1:
            result = du.scaled_all_reduce_dict(result,
                                               self.config.misc.num_gpus)
        batch_loss += result["loss"].item()
        batch_pos_loss += result["pos_loss"].item()
        batch_neg_loss += result["neg_loss"].item()

        self.optimizer.step()

        torch.cuda.empty_cache()

        total_timer.toc()
        data_meter.update(data_time)

        return batch_loss, batch_pos_loss, batch_neg_loss
def subsample_aux(x0, x1, aux, kernel_size=2):

    aux = ME.SparseTensor(coordinates=aux.C, features=(aux.F + 1))

    n_ks = kernel_size**3

    coords = x1.C
    batch_id = coords[:, 0]
    batch_id = batch_id.unsqueeze(-1).repeat(1, n_ks).reshape(-1, 1)

    pooled_C = coords[:, 1:]
    kernel_C = pooled_C.unsqueeze(1).repeat(1, n_ks, 1)

    diffs = torch.tensor([
        [0, 0, 0],
        [0, 0, 1],
        [0, 1, 0],
        [0, 1, 1],
        [1, 0, 0],
        [1, 0, 1],
        [1, 1, 0],
        [1, 1, 1],
    ],
                         device='cuda')

    kernel_C = kernel_C + diffs

    query_C = torch.cat([batch_id, kernel_C.reshape(-1, 3)], dim=1).float()
    query_F = aux.features_at_coordinates(query_C).reshape(-1, 8)  # [N, 8]

    # find the most freq: find normal will always return 0
    # out, _ = torch.mode(query_F)

    freqs = torch.stack([(query_F == i).sum(dim=1) for i in range(1, 21)])
    out = freqs.max(dim=0)[1]  # shape [N]

    out = out - 1  # [-1~20]

    # debug dict for plot
    # d = {}
    # d['origin_pc'] = x0.C
    # d['origin_pred'] = aux.F
    # d['new_pc'] = x1.C
    # d['new_pred'] = out
    # torch.save(d, './aux.pth')

    out = ME.SparseTensor(coordinates=x1.C,
                          features=out.float().reshape([-1, 1]).cuda())

    return out
Ejemplo n.º 16
0
def torch_to_me(sten):
    sten = sten.coalesce()
    indices = sten.indices().t().contiguous().int()  #.cpu()
    features = sten.values()
    if len(features.shape) == 1:
        features = features.unsqueeze(1)
    return ME.SparseTensor(features, indices)
Ejemplo n.º 17
0
def benchmark(config):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = ResUNetBN2C(1,
                        16,
                        normalize_feature=True,
                        conv1_kernel_size=3,
                        D=3)
    model.eval()
    model = model.to(device)

    pcd = o3d.io.read_point_cloud(config.input)
    coords = ME.utils.batched_coordinates(
        [torch.from_numpy(np.array(pcd.points)) / config.voxel_size])
    feats = torch.from_numpy(np.ones((len(coords), 1))).float()

    with torch.no_grad():
        t = MinTimer()
        for i in range(100):
            # initialization time includes copy to GPU
            t.tic()
            sinput = ME.SparseTensor(
                feats,
                coords,
                minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,
                # minkowski_algorithm=ME.MinkowskiAlgorithm.MEMORY_EFFICIENT,
                device=device)
            model(sinput)
            t.toc()
        print(t.min)
def reform_input(coords, labels, device):
    labels = np.int32(labels)
    feats = coords

    coords, feats, labels = coords[0], feats[0], labels[0]
    if config['data']['quantize']:
        quantization_size = 0.005  #* 10**((config['num_point'] / 131072) - 1)  # = 0.005 for num_point==131072, 0.05 for num_point== 262144
        coords, feats, labels = ME.utils.sparse_quantize(
            coords=coords,
            feats=feats,
            labels=labels,
            quantization_size=quantization_size)

    if config['data']['batch_coords']:
        coords_pt = ME.utils.batched_coordinates([coords * 1e6])
    else:
        coords = np.concatenate((coords * 1e6, np.zeros((coords.shape[0], 1))),
                                axis=1)
        coords_pt = torch.from_numpy(coords).int()
    feats_pt = torch.from_numpy(feats).float()

    labels_pt = torch.from_numpy(labels).long().to(device)

    input_pt = ME.SparseTensor(
        feats_pt,
        coords=coords_pt,
        quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE
    ).to(device)

    print(labels[labels == 1].shape)

    return input_pt, labels_pt, coords, feats, labels
def val(net, device, config, phase="val"):
    is_minknet = isinstance(net, ME.MinkowskiNetwork)
    data_loader = make_data_loader_custom(
        "val",
        config=config,
    )

    net.eval()
    labels_val, preds_val = [], []
    with torch.no_grad():
        for batch in data_loader:
            coords, feats, labels = batch
            input = ME.SparseTensor(feats.float(), coords, device="cuda")
            logit = net(input)
            pred = torch.argmax(logit, 1)

            #print("val_labels", labels)
            #print("val_logit", logit)
            #print("val_pred", pred)
            #labels.append(labels.numpy())
            labels_val.append(labels.cpu().numpy())
            preds_val.append(pred.cpu().numpy())
            torch.cuda.empty_cache()
    return metrics.accuracy_score(np.concatenate(labels_val),
                                  np.concatenate(preds_val))
Ejemplo n.º 20
0
    def forward(self, batch):
        # Coords must be on CPU, features can be on GPU - see MinkowskiEngine documentation
        x = ME.SparseTensor(features=batch['features'], coordinates=batch['coords'])
        x = self.backbone(x)

        # x is (num_points, n_features) tensor
        assert x.shape[1] == self.feature_size, 'Backbone output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.feature_size)

        x = self.pooling(x)
        if x.dim() == 3 and x.shape[2] == 1:
            # Reshape (batch_size,
            x = x.flatten(1)

        assert x.dim() == 2, 'Expected 2-dimensional tensor (batch_size,output_dim). Got {} dimensions.'.format(x.dim())
        assert x.shape[1] == self.pooled_feature_size, 'Backbone output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.pooled_feature_size)

        if self.dropout is not None:
            x = self.dropout(x)

        if self.linear is not None:
            x = self.linear(x)

        assert x.shape[1] == self.output_dim, 'Output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.output_dim)
        # x is (batch_size, output_dim) tensor
        return {'embedding': x}
Ejemplo n.º 21
0
 def training_step(self, batch, batch_idx):
     stensor = ME.SparseTensor(coordinates=batch["coordinates"],
                               features=batch["features"])
     # Must clear cache at regular interval
     if self.global_step % 10 == 0:
         torch.cuda.empty_cache()
     return self.criterion(self(stensor).F, batch["labels"].long())
  def fcgf_feature_extraction(self, feats, coords):
    '''
    Step 1: extract fast and accurate FCGF feature per point
    '''
    sinput = ME.SparseTensor(feats, coords=coords).to(self.device)

    return self.fcgf_model(sinput).F
Ejemplo n.º 23
0
 def forward(self, input):
     coords = input[:, 0:self.D + 1].cpu().int()
     features = input[:, self.D + 1:].float()
     x = ME.SparseTensor(features, coords=coords)
     x = self.input_layer(x)
     out = self.acnn(x)
     return out
Ejemplo n.º 24
0
def extract_feats(pcd, feature_type, voxel_size, model=None):
    xyz = np.asarray(pcd.points)
    _, sel = ME.utils.sparse_quantize(xyz,
                                      return_index=True,
                                      quantization_size=voxel_size)
    xyz = xyz[sel]
    pcd = make_o3d_pointcloud(xyz)

    if feature_type == 'FPFH':
        radius_normal = voxel_size * 2
        pcd.estimate_normals(
            o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal,
                                                 max_nn=30))
        radius_feat = voxel_size * 5
        feat = o3d.pipelines.registration.compute_fpfh_feature(
            pcd,
            o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feat,
                                                 max_nn=100))
        # (N, 33)
        return pcd, feat.data.T

    elif feature_type == 'FCGF':
        DEVICE = torch.device('cuda')
        coords = ME.utils.batched_coordinates(
            [torch.floor(torch.from_numpy(xyz) / voxel_size).int()]).to(DEVICE)

        feats = torch.ones(coords.size(0), 1).to(DEVICE)
        sinput = ME.SparseTensor(feats, coordinates=coords)  # .to(DEVICE)

        # (N, 32)
        return pcd, model(sinput).F.detach().cpu().numpy()

    else:
        raise NotImplementedError(
            'Unimplemented feature type {}'.format(feature_type))
Ejemplo n.º 25
0
 def forward(self, x: ME.SparseTensor):
     # This implicitly applies ReLU on x (clamps negative values)
     temp = ME.SparseTensor(x.F.clamp(min=self.eps).pow(self.p),
                            coordinates=x.C)
     temp = self.f(temp)  # Apply ME.MinkowskiGlobalAvgPooling
     return temp.F.pow(1. /
                       self.p)  # Return (batch_size, n_features) tensor
def decomposition():
    coords0, feats0 = to_sparse_coo(data_batch_0)
    coords1, feats1 = to_sparse_coo(data_batch_1)
    coords, feats = ME.utils.sparse_collate(coords=[coords0, coords1],
                                            feats=[feats0, feats1])

    # sparse tensors
    A = ME.SparseTensor(coords=coords, feats=feats)
    conv = ME.MinkowskiConvolution(in_channels=1,
                                   out_channels=2,
                                   kernel_size=3,
                                   stride=2,
                                   dimension=2)
    B = conv(A)

    # Extract features and coordinates per batch index
    list_of_coords = B.decomposed_coordinates
    list_of_feats = B.decomposed_features
    list_of_coords, list_of_feats = B.decomposed_coordinates_and_features

    # To specify a batch index
    batch_index = 1
    coords = B.coordinates_at(batch_index)
    feats = B.features_at(batch_index)

    # Empty list if given an invalid batch index
    batch_index = 3
    print(B.coordinates_at(batch_index))
Ejemplo n.º 27
0
def sparse_tensor_arithmetics():
    coords0, feats0 = to_sparse_coo(data_batch_0)
    coords0, feats0 = ME.utils.sparse_collate(coords=[coords0], feats=[feats0])

    coords1, feats1 = to_sparse_coo(data_batch_1)
    coords1, feats1 = ME.utils.sparse_collate(coords=[coords1], feats=[feats1])

    # sparse tensors
    A = ME.SparseTensor(coordinates=coords0, features=feats0)
    B = ME.SparseTensor(coordinates=coords1, features=feats1)

    # The following fails
    try:
        C = A + B
    except AssertionError:
        pass

    B = ME.SparseTensor(
        coordinates=coords1,
        features=feats1,
        coordinate_manager=A.
        coordinate_manager  # must share the same coordinate manager
    )

    C = A + B
    C = A - B
    C = A * B
    C = A / B

    # in place operations
    # Note that it requires the same coords_key (no need to feed coords)
    D = ME.SparseTensor(
        # coords=coords,  not required
        features=feats0,
        coordinate_manager=A.
        coordinate_manager,  # must share the same coordinate manager
        coordinate_map_key=A.
        coordinate_map_key  # For inplace, must share the same coords key
    )

    A += D
    A -= D
    A *= D
    A /= D

    # If you have two or more sparse tensors with the same coords_key, you can concatenate features
    E = ME.cat(A, D)
Ejemplo n.º 28
0
def generate_input_sparse_tensor(file_name, voxel_size=0.05):
    # Create a batch, this process is done in a data loader during training in parallel.
    batch = [load_file(file_name, voxel_size)]
    coordinates_, featrues_, pcds = list(zip(*batch))
    coordinates, features = ME.utils.sparse_collate(coordinates_, featrues_)

    # Normalize features and create a sparse tensor
    return ME.SparseTensor(features - 0.5, coords=coordinates).to(device)
  def inlier_prediction(self, inlier_feats, coords):
    '''
    Step 4: predict inlier likelihood
    '''
    sinput = ME.SparseTensor(inlier_feats, coords=coords).to(self.device)
    soutput = self.inlier_model(sinput)

    return soutput.F
Ejemplo n.º 30
0
    def test_sum(self):
        coords, colors, pcd = load_file("1.ply")
        device = "cuda"

        D = 3
        batch_size = 16
        voxel_size = 0.02
        channels = [3, 64, 128]
        dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
        bcoords = batched_coordinates([dcoords for i in range(batch_size)])
        in_feats = torch.rand(len(bcoords), 3).to(0)

        layer = MinkowskiStackSum(
            ME.MinkowskiConvolution(
                channels[0],
                channels[1],
                kernel_size=3,
                stride=1,
                dimension=3,
            ),
            nn.Sequential(
                ME.MinkowskiConvolution(
                    channels[0],
                    channels[1],
                    kernel_size=3,
                    stride=2,
                    dimension=3,
                ),
                ME.MinkowskiStackSum(
                    nn.Identity(),
                    nn.Sequential(
                        ME.MinkowskiConvolution(
                            channels[1],
                            channels[2],
                            kernel_size=3,
                            stride=2,
                            dimension=3,
                        ),
                        ME.MinkowskiConvolutionTranspose(
                            channels[2],
                            channels[1],
                            kernel_size=3,
                            stride=1,
                            dimension=3,
                        ),
                        ME.MinkowskiPoolingTranspose(
                            kernel_size=2, stride=2, dimension=D
                        ),
                    ),
                ),
                ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D),
            ),
        ).cuda()

        for i in range(1000):
            torch.cuda.empty_cache()
            sinput = ME.SparseTensor(in_feats, coordinates=bcoords, device=device)
            layer(sinput)