Esempio n. 1
0
 def __call__(self, pc: PointCloud):
     """ Randomly rotates a given PointCloud by performing an Euler rotation. The three angles are chosen randomly
         from the given angle_range. If the PointCloud is a HybridCloud then the nodes get rotated as well. Operates
         in-place for the given Pointcloud. If apply_flip is true, randomly flips spatial axes around origin
         independently.
     """
     pc.rotate_randomly(self.angle_range, random_flip=self.apply_flip)
Esempio n. 2
0
def test_get_coverage():
    pc = PointCloud(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]]),
                    labels=np.array([1, 2, 3, 4]),
                    predictions={
                        0: [2, 0, 0, 0, 1],
                        2: [7, 7, 7, 6, 6, 6, 6]
                    })
    coverage = pc.get_coverage()
    assert (1 - coverage[0] / coverage[1]) == 0.5

    pc = PointCloud(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]]),
                    labels=np.array([1, 2, 3, 4]),
                    predictions={
                        0: [2, 0, 0, 0, 1],
                        1: [1],
                        2: [7, 7, 7, 6, 6, 6, 6],
                        3: [1]
                    })
    coverage = pc.get_coverage()
    assert (1 - coverage[0] / coverage[1]) == 1

    pc = PointCloud(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]]),
                    labels=np.array([1, 2, 3, 4]))
    coverage = pc.get_coverage()
    assert (1 - coverage[0] / coverage[1]) == 0
Esempio n. 3
0
 def __call__(self, pc: PointCloud):
     if np.random.random() < self.prob:
         if self.randomize:
             sigma = np.random.rand() * (self.sigma[1] -
                                         self.sigma[0]) + self.sigma[0]
             alpha = np.random.rand() * (self.alpha[1] -
                                         self.alpha[0]) + self.alpha[0]
         else:
             sigma = self.sigma
             alpha = self.alpha
         pc.elastic(self.res, sigma, alpha)
Esempio n. 4
0
    def load_val(self, name: str, seed: int):
        """ Method for viewing validation or training examples. These examples must be saved as a list in the pickle
            file and should alternate between target and prediction. E.g. [targetcloud1, predictedcloud1, targetcloud2
            predictedcloud2, ...]


        Args:
            name: Name of file
        """
        reverse = False
        req_files = [file for file in self.files1 if name in file]
        idx = 0

        while idx < len(req_files):
            file = req_files[idx]
            slashs = [pos for pos, char in enumerate(file) if char == '/']
            filename = file[slashs[-1]:-4]
            print("Viewing: " + filename)

            with open(file, 'rb') as f:
                results = pickle.load(f)
            if reverse:
                i = int(len(results)) - 2
            else:
                i = 0
            while i < int(len(results)):
                orig = results[i]
                pred = results[i + 1]
                try:
                    pred = PointCloud(pred.vertices,
                                      pred.labels,
                                      encoding=pred.encoding)
                except:
                    pred = PointCloud(pred.vertices,
                                      np.argmax(pred.labels, axis=1),
                                      encoding=pred.encoding)
                res = self.core_next(orig,
                                     pred,
                                     filename + '_i{}'.format(i),
                                     seed=seed)
                if res is None:
                    return
                i += 2 * res
                if res < 0:
                    reverse = True
                    if i < 0:
                        break
                else:
                    reverse = False
            if reverse:
                idx -= 1
            else:
                idx += 1
Esempio n. 5
0
def test_generate_pred_labels():
    pc = PointCloud(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]),
                    labels=np.array([1, 2, 3]),
                    predictions={
                        0: [2, 0, 0, 0, 1],
                        1: [5, 4, 5, 5, 4, 3],
                        2: [7, 7, 7, 6, 6, 6, 6]
                    })
    expected = np.array([0, 5, 6]).reshape(-1, 1)
    assert np.all(pc.pred_labels == expected)

    expected = np.array([2, 5, 7]).reshape(-1, 1)
    assert np.all(pc.generate_pred_labels(False) == expected)
Esempio n. 6
0
 def __call__(self, pc: PointCloud):
     """ Centers the given PointCloud only with respect to vertices. If the PointCloud is an HybridCloud, the nodes
         get centered as well but are not taken into account for centroid calculation. Operates in-place for the
         given PointCloud
     """
     centroid = np.mean(pc.vertices, axis=0)
     if self.distr == 'const':
         offset = self.center_loc
     elif self.distr == 'uniform':
         offset = ((np.random.random(min(1, len(self.center_loc))) - 0.5) *
                   2 * self.center_loc).squeeze()
     else:
         raise ValueError(f'Given distr value is not implemented.')
     pc.move(-centroid + offset)
def test_ensemble2pointcloud():
    pc1 = PointCloud(np.array([[i, i, i] for i in range(10)]),
                     np.array([[i] for i in range(10)]),
                     obj_bounds={
                         'obj1': np.array([0, 5]),
                         'obj2': np.array([5, 10])
                     },
                     encoding={
                         'e1': 1,
                         'e2': 2
                     })
    pc2 = PointCloud(np.array([[i, i, i] for i in range(10)]),
                     np.array([[i] for i in range(10)]))
    hc = HybridCloud(np.array([[1, 1, 1], [2, 2, 2]]),
                     np.array([[0, 1]]),
                     vertices=np.array([[i, i, i] for i in range(10)]),
                     encoding={
                         'e1': 1,
                         'e3': 3
                     },
                     obj_bounds={
                         'obj3': np.array([0, 2]),
                         'obj4': np.array([2, 10])
                     })
    ce = CloudEnsemble({'pc1': pc1, 'pc2': pc2}, hybrid=hc)
    result = ensembles.ensemble2pointcloud(ce)

    obj_bounds = {
        'obj3': np.array([0, 2]),
        'obj4': np.array([2, 10]),
        'obj1': np.array([10, 15]),
        'obj2': np.array([15, 20]),
        'pc2': np.array([20, 30])
    }

    encoding = {'e1': 1, 'e2': 2, 'e3': 3}

    assert np.all(result.nodes == hc.nodes)
    assert np.all(result.edges == hc.edges)
    assert np.all(result.vertices == np.concatenate(
        (hc.vertices, pc1.vertices, pc2.vertices), axis=0))
    assert np.all(result.labels == np.concatenate(
        (np.ones((10, 1)) * -1, pc1.labels, pc2.labels), axis=0))
    assert len(result.obj_bounds) == len(obj_bounds)
    for key in obj_bounds:
        assert np.all(obj_bounds[key] == result.obj_bounds[key])
    assert len(result.encoding) == len(encoding)
    for key in result.encoding:
        assert result.encoding[key] == encoding[key]
Esempio n. 8
0
def test_sanity():
    pc = PointCloud()
    assert len(pc.vertices) == 0
    assert len(pc.labels) == 0
    assert len(pc.pred_labels) == 0
    assert len(pc.types) == 0
    assert len(pc.features) == 0

    pc = PointCloud(vertices=np.array([[1, 1, 1], [2, 2, 2]]),
                    labels=np.array([1, 2]))
    assert len(pc.vertices) == 2
    assert len(pc.labels) == 2
    assert len(pc.pred_labels) == len(pc.vertices)
    assert len(pc.types) == 0
    assert len(pc.features) == 0
Esempio n. 9
0
def sample_mesh_poisson_disk(hm: HybridMesh, sample_num: int) -> PointCloud:
    """ Uses poisson disk sampling and maps existing labels using a KDTree. It can not be guaranteed
        that the requested number of sample points can be generated. It can differ from the requested
        number by a small amount (around +-2%).

    Args:
        hm: The MeshCloud from which the samples should be generated
        sample_num: Requested number (approximate!) of sample points.

    Returns:
        PointCloud consisting of sampled points.
    """
    vertices = hm.vertices.astype(float)
    s_vertices, s_normals = pcu.sample_mesh_poisson_disk(vertices, hm.faces, np.array([]), ceil(sample_num))

    # map labels from input cloud to sample
    labels = None
    types = None

    tree = cKDTree(hm.vertices)
    dist, ind = tree.query(s_vertices, k=1)
    if len(hm.labels) > 0:
        labels = hm.labels[ind]
    if len(hm.types) > 0:
        types = hm.types[ind]

    result = PointCloud(vertices=s_vertices.reshape(-1, 3), labels=labels, types=types)
    return result
Esempio n. 10
0
def load_obj(
        data_type: str, file: str
) -> Union[HybridMesh, HybridCloud, PointCloud, CloudEnsemble]:
    if data_type == 'obj':
        return basics.load_pkl(file)
    if data_type == 'ce':
        return ensembles.ensemble_from_pkl(file)
    if data_type == 'hc':
        hc = HybridCloud()
        return hc.load_from_pkl(file)
    if data_type == 'hm':
        hm = HybridMesh()
        return hm.load_from_pkl(file)
    else:
        pc = PointCloud()
        return pc.load_from_pkl(file)
Esempio n. 11
0
def test_composition():
    pc = PointCloud(np.array([[10, 10, 10], [20, 20, 20]]))
    hc = HybridCloud(np.array([[10, 10, 10], [20, 20, 20]]),
                     np.array([[0, 1]]),
                     vertices=np.array([[10, 10, 10], [20, 20, 20]]))

    transform = clouds.Compose([
        clouds.Normalization(10),
        clouds.RandomRotate((60, 60)),
        clouds.Center()
    ])
    transform(pc)
    transform(hc)

    assert np.all(
        np.round(np.mean(pc.vertices, axis=0)) == np.array([0, 0, 0]))
    assert np.all(
        np.round(np.mean(hc.vertices, axis=0)) == np.array([0, 0, 0]))

    dummy = np.array([[10, 10, 10], [20, 20, 20]]) / 10
    angle_range = (60, 60)
    angles = np.random.uniform(angle_range[0], angle_range[1], (1, 3))[0]
    rot = Rot.from_euler('xyz', angles, degrees=True)
    dummy = rot.apply(dummy)
    centroid = np.mean(dummy, axis=0)
    dummy = dummy - centroid

    assert np.all(pc.vertices == dummy)
    assert np.all(hc.vertices == dummy)
    assert np.all(hc.vertices == dummy)
Esempio n. 12
0
def filter_labels(cloud: PointCloud, labels: list) -> PointCloud:
    """ Returns a pointcloud which contains only those vertices which labels occuring in 'labels'. If 'cloud'
        is a HybridCloud, the skeleton is taken as it is and should later be filtered with the 'filter_traverser'
        method.

    Args:
        cloud: PointCloud which should be filtered.
        labels: List of labels for which the corresponding vertices should be extracted.

    Returns:
        PointCloud object which contains only vertices with the filtered labels. Skeletons in case of HybridClouds are
        the same.
    """
    mask = np.zeros(len(cloud.labels), dtype=bool)
    for label in labels:
        mask = np.logical_or(mask, cloud.labels == label)

    if isinstance(cloud, HybridCloud):
        f_cloud = HybridCloud(cloud.nodes,
                              cloud.edges,
                              vertices=cloud.vertices[mask],
                              labels=cloud.labels[mask])
    else:
        f_cloud = PointCloud(cloud.vertices[mask], labels=cloud.labels[mask])
    return f_cloud
Esempio n. 13
0
def prepare_bfs(hc: HybridCloud, bfs: np.ndarray) -> PointCloud:
    """ Enriches the BFS result with small point cubes for better visualization.

    Args:
        hc: The HybridCloud on whose skeleton the BFS was performed
        bfs: The result of the BFS in form of an array of node indices.

    Returns:
        PointCloud with enriched BFS result.
    """

    nodes = hc.nodes
    bfs = bfs.astype(int)
    bfs_skel = nodes[bfs]
    # create small point cubes around BFS points for better visualization
    sphere_size = 1000
    size = len(bfs_skel)
    a_bfs_skel = np.zeros((size * sphere_size, 3))
    for i in range(sphere_size):
        a_bfs_skel[i * size:i * size + size] = bfs_skel
    a_bfs_skel += (np.random.random((len(a_bfs_skel), 3)) - 0.5) * 500

    labels = np.ones(len(a_bfs_skel))
    labels[:] = 9
    return PointCloud(a_bfs_skel, labels=labels)
Esempio n. 14
0
def test_prediction_smoothing():
    pc = PointCloud(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1],
                              [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2],
                              [3, 3, 3], [3, 3, 3], [3, 3, 3]]),
                    labels=np.array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]),
                    predictions={
                        0: [2, 0, 0, 1],
                        1: [1],
                        2: [1],
                        4: [7, 7, 6],
                        5: [7],
                        6: [7, 7],
                        8: [8]
                    })

    expected = np.array([1, 1, 1, -1, 7, 7, 7, -1, 7, -1, -1]).reshape(-1, 1)
    assert np.all(pc.prediction_smoothing(3) == expected)
Esempio n. 15
0
def test_random_variation():
    np.random.seed(0)
    pc = PointCloud(np.array([[10, 10, 10], [20, 20, 20]]))
    transform = clouds.RandomVariation((-10000, 10000))
    transform(pc)

    expected = np.array([[986., 4314., 2065.], [918., -1507., 2938.]])
    assert np.all(np.round(pc.vertices) == expected)
Esempio n. 16
0
def test_center():
    pc = PointCloud(np.array([[10, 10, 10], [20, 20, 20]]))
    hc = HybridCloud(np.array([[10, 10, 10], [20, 20, 20]]),
                     np.array([[0, 1]]),
                     vertices=np.array([[10, 10, 10], [20, 20, 20]]))
    relation = hc.vertices[0] - hc.nodes[1]

    pc.move(np.array([1, 1, 1]))
    hc.move(np.array([1, 1, 1]))

    assert np.all(pc.vertices == np.array([[11, 11, 11], [21, 21, 21]]))
    assert np.all(hc.vertices == np.array([[11, 11, 11], [21, 21, 21]]))
    assert np.all(hc.nodes == np.array([[11, 11, 11], [21, 21, 21]]))
    assert np.all(hc.vertices[0] - hc.nodes[1] == relation)

    pc = PointCloud(np.array([[10, 10, 10], [20, 20, 20]]))
    hc = HybridCloud(np.array([[10, 10, 10], [20, 20, 20]]),
                     np.array([[0, 1]]),
                     vertices=np.array([[10, 10, 10], [20, 20, 20]]))
    relation = hc.vertices[0] - hc.nodes[1]

    # test transformation class from processing.clouds
    transform = clouds.Center()
    transform(pc)
    transform(hc)

    assert np.all(np.mean(pc.vertices, axis=0) == np.array([0, 0, 0]))
    assert np.all(np.mean(hc.vertices, axis=0) == np.array([0, 0, 0]))
    assert np.all(hc.vertices[0] - hc.nodes[1] == relation)
Esempio n. 17
0
def test_normalization():
    pc = PointCloud(np.array([[10, 10, 10], [20, 20, 20]]))
    hc = HybridCloud(np.array([[10, 10, 10], [20, 20, 20]]),
                     np.array([[0, 1]]),
                     vertices=np.array([[10, 10, 10], [20, 20, 20]]))

    pc.scale(-10)
    hc.scale(-10)

    assert np.all(pc.vertices == np.array([[1, 1, 1], [2, 2, 2]]))
    assert np.all(hc.vertices == np.array([[1, 1, 1], [2, 2, 2]]))
    assert np.all(hc.nodes == np.array([[1, 1, 1], [2, 2, 2]]))

    # test transformation class from processing.clouds
    pc = PointCloud(np.array([[10, 10, 10], [20, 20, 20]]))
    hc = HybridCloud(np.array([[10, 10, 10], [20, 20, 20]]),
                     np.array([[0, 1]]),
                     vertices=np.array([[10, 10, 10], [20, 20, 20]]))

    transform = clouds.Normalization(10)
    transform(pc)
    transform(hc)

    assert np.all(pc.vertices == np.array([[1, 1, 1], [2, 2, 2]]))
    assert np.all(hc.vertices == np.array([[1, 1, 1], [2, 2, 2]]))
    assert np.all(hc.nodes == np.array([[1, 1, 1], [2, 2, 2]]))
def extract_subset(ensemble: CloudEnsemble,
                   nodes: np.ndarray) -> Tuple[PointCloud, np.ndarray]:
    """ Extracts all vertices which are associated with the given nodes by the mapping dict verts2node of
        the ensemble.

    Args:
        ensemble: CloudEnsemble from which the vertices should be extracted.
        nodes: node index array which was generated by a local BFS.

    Returns:
        PointCloud with the respective vertices.
    """
    idcs = []
    for i in nodes:
        idcs.extend(ensemble.verts2node[i])
    obj_bounds = {}
    offset = 0
    idcs = np.array(idcs)
    if ensemble.flattened.obj_bounds is not None:
        for key in ensemble.flattened.obj_bounds:
            bounds = ensemble.flattened.obj_bounds[key]
            num = len(idcs[np.logical_and(idcs >= bounds[0],
                                          idcs < bounds[1])])
            if num != 0:
                obj_bounds[key] = np.array([offset, offset + num])
                offset += num
    else:
        obj_bounds = None
    if len(idcs) == 0:
        return PointCloud(), idcs
    if len(ensemble.flattened.features) == 0:
        features = None
    else:
        features = ensemble.flattened.features[idcs]
    return PointCloud(vertices=ensemble.flattened.vertices[idcs],
                      labels=ensemble.flattened.labels[idcs],
                      features=features,
                      obj_bounds=obj_bounds,
                      no_pred=ensemble.flattened.no_pred,
                      encoding=ensemble.flattened.encoding), idcs
Esempio n. 19
0
def sample_objectwise(pc: PointCloud,
                      vertex_number: int,
                      random_seed=None) -> Tuple[PointCloud, np.ndarray]:
    """ Creates a (pseudo)random sample point cloud with a specific number of points from the given subset of mesh
        vertices. If different objects are present within the PointCloud (indicated by the obj_bounds attribute),
        the number of sample points for each object is calculated by (number of object vertices)/(total number of
        vertices) * vertex_number. For each object, the method sample_cloud is used. If obj_bounds of the PointCloud
        is None, this method is identical to sample_cloud.

    Args:
        pc: PointCloud which should be sampled.
        vertex_number: The number of points which should be sampled.
        random_seed: Random seed for making the sampling deterministic.

    Returns:
        PointCloud with sampled points (and labels) and indices of the original vertices where samples are from.
    """
    if pc.obj_bounds is None:
        return sample_cloud(pc, vertex_number, random_seed)
    curr_num = 0
    samples = []
    names = []
    ixs = np.zeros(vertex_number)
    for key in pc.obj_bounds:
        bounds = pc.obj_bounds[key]
        if bounds[1] - bounds[0] != 0:
            sample_num = (bounds[1] - bounds[0]) / len(
                pc.vertices) * vertex_number
            if curr_num + ceil(sample_num) <= vertex_number:
                sample_num = ceil(sample_num)
            else:
                sample_num = vertex_number - curr_num
            curr_cloud = PointCloud(pc.vertices[bounds[0]:bounds[1]],
                                    labels=pc.labels[bounds[0]:bounds[1]],
                                    features=pc.features[bounds[0]:bounds[1]])
            sample, sample_ixs = sample_cloud(curr_cloud, sample_num,
                                              random_seed)
            samples.append(sample)
            names.append(key)
            ixs[curr_num:curr_num + len(sample_ixs)] = sample_ixs
            curr_num += sample_num

    # use merge method for correct object boundary information
    result_sample = merge_clouds(samples, names)
    result_sample.add_no_pred(pc.no_pred)
    result_sample.set_encoding(pc.encoding)
    result_sample.remove_obj_bounds()
    return result_sample, ixs
def ensemble_from_pkl(path):
    """ Loads an ensemble from an existing pickle file.

    Args:
        path: File path of pickle file.
    """
    path = os.path.expanduser(path)
    if not os.path.exists(path):
        print(f"File with name: {path} was not found at this location.")
    with open(path, 'rb') as f:
        obj = pickle.load(f)
    f.close()
    if not isinstance(obj, dict):
        raise ValueError(f"Object at {path} is no valid ensemble file.")
    try:
        h = HybridCloud(**obj['hybrid'])
    except TypeError:
        h = HybridMesh(**obj['hybrid'])
    cloudlist = {}
    for key in obj['clouds']:
        try:
            cloudlist[key] = PointCloud(**obj['clouds'][key])
        except TypeError:
            try:
                cloudlist[key] = HybridCloud(**obj['clouds'][key])
            except TypeError:
                cloudlist[key] = HybridMesh(**obj['clouds'][key])
    # check for empty clouds
    empty = []
    for key in cloudlist:
        if cloudlist[key].vertices is None:
            empty.append(key)
    for key in empty:
        cloudlist.pop(key, None)
    try:
        predictions = obj['predictions']
    except KeyError:
        predictions = None
    try:
        verts2node = obj['verts2node']
    except KeyError:
        verts2node = None
    return CloudEnsemble(cloudlist,
                         h,
                         obj['no_pred'],
                         predictions=predictions,
                         verts2node=verts2node)
Esempio n. 21
0
def map_labels(cloud: PointCloud, labels: list, target) -> PointCloud:
    """ Returns a PointCloud where all labels given in the labels list got mapped to the target label. E.g. if the
        label array was [1,1,2,3] and the label 1 and 2 were mapped onto the target 3, the label array now is [3,3,3,3].
        This method works for PointClouds and HybridClouds, not for more specific classes (HybridMesh is returned as
        HybridCloud).

    Args:
        cloud: The PointCloud whose labels should get merged.
        labels: A list of keys of the encoding dict of the PointCloud, or a list of actual labels which should get
            mapped onto the target.
        target: A key of the encoding dict of the PointCloud, or an actual label on which the labels should be mapped.

    Returns:
        A PointCloud where the labels were replaced by the target.
    """
    mask = np.zeros(cloud.labels.shape, dtype=bool)
    for label in labels:
        if cloud.encoding is not None and label in cloud.encoding.keys():
            label = cloud.encoding[label]
            mask = np.logical_or(mask, cloud.labels == label)
        else:
            mask = np.logical_or(mask, cloud.labels == label)

    if cloud.encoding is not None and target in cloud.encoding.keys():
        target = cloud.encoding[target]

    new_labels = cloud.labels.copy()
    new_labels[mask] = target

    if cloud.encoding is not None:
        new_encoding = cloud.encoding.copy()
        for label in labels:
            new_encoding.pop(label, None)
    else:
        new_encoding = None

    if isinstance(cloud, HybridCloud):
        new_cloud = HybridCloud(cloud.nodes,
                                cloud.edges,
                                vertices=cloud.vertices,
                                labels=new_labels,
                                encoding=new_encoding)
    else:
        new_cloud = PointCloud(cloud.vertices,
                               labels=new_labels,
                               encoding=new_encoding)
    return new_cloud
Esempio n. 22
0
def voxel_down(ce: CloudEnsemble,
               voxel_size: Union[dict, int] = 500) -> CloudEnsemble:
    if type(voxel_size) is not dict:
        voxel_size = dict(hc=voxel_size)
        for k in ce.clouds:
            voxel_size[k] = voxel_size['hc']
    hc = ce.hc
    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(hc.vertices)
    pcd, idcs = pcd.voxel_down_sample_and_trace(voxel_size['hc'],
                                                pcd.get_min_bound(),
                                                pcd.get_max_bound())
    idcs = np.max(idcs, axis=1)
    new_labels = None
    new_types = None
    new_features = None
    if len(hc.labels) != 0:
        new_labels = hc.labels[idcs]
    if len(hc.types) != 0:
        new_types = hc.types[idcs]
    if len(hc.features) != 0:
        new_features = hc.features[idcs]
    new_hc = HybridCloud(hc.nodes,
                         hc.edges,
                         vertices=np.asarray(pcd.points),
                         labels=new_labels,
                         types=new_types,
                         features=new_features,
                         encoding=hc.encoding,
                         node_labels=hc.node_labels,
                         no_pred=hc.no_pred)
    new_clouds = {}
    for key in ce.clouds:
        pc = ce.clouds[key]
        pcd = o3d.geometry.PointCloud()
        pcd.points = o3d.utility.Vector3dVector(ce.clouds[key].vertices)
        pcd, idcs = pcd.voxel_down_sample_and_trace(voxel_size[key],
                                                    pcd.get_min_bound(),
                                                    pcd.get_max_bound())
        idcs = np.max(idcs, axis=1)
        new_pc = PointCloud(np.asarray(pcd.points),
                            labels=pc.labels[idcs],
                            encoding=pc.encoding,
                            no_pred=pc.no_pred)
        new_clouds[key] = new_pc

    return CloudEnsemble(new_clouds, new_hc, no_pred=ce.no_pred)
Esempio n. 23
0
    def load_cmp(self, idx: int, seed: int):
        """ Method for comparing ground truth with processed data. Ground truth must be given in first file list,
            processed files in the second file list.
        """

        while idx < len(self.files1):
            gt_file = self.files1[idx]
            pred_file = self.files2[idx]

            slashs = [pos for pos, char in enumerate(gt_file) if char == '/']
            filename = gt_file[slashs[-1] + 1:-4]

            gt = ensembles.ensemble_from_pkl(gt_file)
            pred = PointCloud().load_from_pkl(pred_file)

            res = self.core_next(gt, pred, filename, seed=seed)
            if res is None:
                return
            else:
                idx += res
Esempio n. 24
0
def filter_objects(cloud: PointCloud, objects: list) -> PointCloud:
    """ Creates a PointCloud which contains only the objects given in objects. There must exist an obj_bounds dict in
     order to use this method. The dict gets updated with the new object boundaries.

    Args:
        cloud: The initial Pointcloud from which objects should be filtered.
        objects: List of objects where each entry is also a key in the obj_bounds dict of the cloud.

    Returns:
        A PointCloud containing only the desired objects.
    """
    if cloud.obj_bounds is None:
        raise ValueError(
            "Objects cannot be filtered because obj_bounds dict doesn't exist (is None)."
        )
    size = 0
    for obj in objects:
        bounds = cloud.obj_bounds[obj]
        size += bounds[1] - bounds[0]

    new_vertices = np.zeros((size, 3))
    new_labels = None
    if cloud.labels is not None:
        new_labels = np.zeros((size, 1))
    new_obj_bounds = {}

    offset = 0
    for obj in objects:
        bounds = cloud.obj_bounds[obj]
        obj_size = bounds[1] - bounds[0]
        new_vertices[offset:offset +
                     obj_size] = cloud.vertices[bounds[0]:bounds[1]]
        if cloud.labels is not None:
            new_labels[offset:offset +
                       obj_size] = cloud.labels[bounds[0]:bounds[1]]
        new_obj_bounds[obj] = np.array([offset, offset + obj_size])

    return PointCloud(new_vertices,
                      labels=new_labels,
                      encoding=cloud.encoding,
                      obj_bounds=new_obj_bounds)
Esempio n. 25
0
def filter_preds(cloud: PointCloud) -> PointCloud:
    """ Returns a PointCloud with only those vertices and labels for which predictions exist. The predictions of
        these points get transfered to the returned PointCloud, all other attributes of the original cloud (encoding,
        obj_bounds, ...) are lost.

    Args:
        cloud: The PointCloud from which vertices with existing predictions should be filtered.

     Returns:
        PointCloud containing only vertices and labels with existing predictions.
    """
    idcs = []
    new_predictions = {}
    counter = 0
    for key in cloud.predictions:
        if len(cloud.predictions[key]) != 0:
            idcs.append(key)
            new_predictions[counter] = cloud.predictions[key]
            counter += 1
    return PointCloud(cloud.vertices[idcs],
                      cloud.labels[idcs],
                      predictions=new_predictions)
Esempio n. 26
0
def visualize_bfs(hc: HybridCloud,
                  bfs: np.ndarray,
                  capture: bool = False,
                  path="",
                  random_seed: int = 4):
    """ Uses open3d to visualize the result of a breadth first search on the skeleton of the given HybridCloud. The
        image can be saved without showing.

    Args:
        hc: The HybridCloud on whose skeleton the BFS was performed
        bfs: The result of the BFS in form of an array of node indices.
        capture: Flag to only save screenshots without showing the cloud.
        path: filepath where screenshot should be saved.
        random_seed: flag for using the same colors.
    """
    nodes = hc.nodes
    bfs = bfs.astype(int)
    pure_skel = np.delete(nodes, bfs, axis=0)
    pure_skel = PointCloud(pure_skel, labels=np.zeros(len(pure_skel)))

    bfs_skel = prepare_bfs(hc, bfs)

    pcd = build_pcd([pure_skel, bfs_skel], random_seed=random_seed)
    core_visualizer(pcd, capture=capture, path=path)
Esempio n. 27
0
def test_rotate_randomly():
    angle_range = (60, 60)

    angles = np.random.uniform(angle_range[0], angle_range[1], (1, 3))[0]
    rot = Rot.from_euler('xyz', angles, degrees=True)

    pc = PointCloud(np.array([[10, 10, 10], [20, 20, 20]]))
    hc = HybridCloud(np.array([[10, 10, 10], [20, 20, 20]]),
                     np.array([[0, 1]]),
                     vertices=np.array([[10, 10, 10], [20, 20, 20]]))

    pc.rotate_randomly(angle_range)
    hc.rotate_randomly(angle_range)

    assert np.all(
        pc.vertices == rot.apply(np.array([[10, 10, 10], [20, 20, 20]])))
    assert np.all(
        hc.vertices == rot.apply(np.array([[10, 10, 10], [20, 20, 20]])))
    assert np.all(
        hc.nodes == rot.apply(np.array([[10, 10, 10], [20, 20, 20]])))

    pc = PointCloud(np.array([[10, 10, 10], [20, 20, 20]]))
    hc = HybridCloud(np.array([[10, 10, 10], [20, 20, 20]]),
                     np.array([[0, 1]]),
                     vertices=np.array([[10, 10, 10], [20, 20, 20]]))

    # test transformation class from processing.clouds
    transform = clouds.RandomRotate(angle_range, apply_flip=False)
    transform(pc)
    transform(hc)

    assert np.all(
        pc.vertices == rot.apply(np.array([[10, 10, 10], [20, 20, 20]])))
    assert np.all(
        hc.vertices == rot.apply(np.array([[10, 10, 10], [20, 20, 20]])))
    assert np.all(
        hc.nodes == rot.apply(np.array([[10, 10, 10], [20, 20, 20]])))
Esempio n. 28
0
def test_transform_sanity():
    vertices = np.random.random((10, 3))
    pc = PointCloud(vertices=vertices)
    pc.shear()
    assert np.all(pc.vertices != vertices)

    pc = PointCloud(vertices=vertices)
    pc.scale(2)
    assert np.all(pc.vertices != vertices)

    pc = PointCloud(vertices=vertices)
    pc.rotate_randomly()
    assert np.all(pc.vertices != vertices)

    pc = PointCloud(vertices=vertices)
    pc.add_noise()
    assert np.all(pc.vertices != vertices)

    pc = PointCloud(vertices=vertices)
    pc.mult_noise()
    assert np.all(pc.vertices != vertices)

    pc = PointCloud(vertices=vertices)
    pc.move(np.array([1, 2, 3]))
    assert np.all(pc.vertices != vertices)
Esempio n. 29
0
        # ads.hc.save2pkl(base_path + f'/merged/ads_{sso_id}.pkl')
        # abt.hc.save2pkl(base_path + f'/merged/abt_{sso_id}.pkl')
        # dnh.hc.save2pkl(base_path + f'/merged/dnh_{sso_id}.pkl')

        preds = merge(
            ads.hc.pred_labels, {
                1: (abt.hc.pred_labels, [(1, 3), (2, 4), (0, 1)]),
                0: (dnh.hc.pred_labels, [(1, 5), (2, 6)])
            })
        obj = objects.load_obj('ce', abt_preds[0])
        obj.remove_nodes([-2])
        hc = obj.hc
        hc.set_pred_labels(preds)

        verts, labels = get_pred_verts(hc.vertices, preds)
        pc = PointCloud(vertices=verts, labels=labels)
        pc.save2pkl(base_path + f'/merged/merged_{sso_id}.pkl')

        # --- remove points without prediction ---
        mask = np.logical_and(hc.labels != -1, hc.pred_labels != -1)
        vertex_labels, vertex_predictions = hc.labels[mask], hc.pred_labels[
            mask]
        mask = np.logical_and(hc.node_labels != -1, hc.pred_node_labels != -1)
        node_labels, node_predictions = hc.node_labels[
            mask], hc.pred_node_labels[mask]

        sso_report = {}
        mode = 'mv'

        coverage = hc.get_coverage()
        sso_report['cov'] = coverage
Esempio n. 30
0
    def __getitem__(self, item: Union[int, Tuple[str, int]]):
        # Get new sample from base dataloader, skip samples without any points
        ixs = np.empty(0)
        sample = None
        while sample is None:
            if self._specific:
                sample, ixs = self._ch[item]
                # if ixs is None, the requested chunk doesn't exist
                if ixs is None:
                    sample, ixs = PointCloud(vertices=np.zeros((self._sample_num, 3)),
                                             labels=np.zeros(self._sample_num),
                                             features=np.zeros((self._sample_num, self._feat_dim))), \
                                  np.zeros(self._sample_num)
                    break
            else:
                sample = self._ch[item]
                if sample is not None:
                    if len(sample.vertices) == 0:
                        sample = None
                item += 1

        if sample.labels is not None:
            labels = sample.labels.reshape(len(sample.labels))
        else:
            labels = np.array([])

        pts = torch.from_numpy(sample.vertices).float()
        lbs = torch.from_numpy(labels).long()
        features = torch.from_numpy(sample.features).float()

        if self._specific:
            ixs = torch.from_numpy(ixs)
        # --- create masks marking which vertices should not get predictions. This is done by mapping
        # the no_pred encodings (set during dataset generation, see SyConn => jklimesch => ground_truth)
        # to their labels (mapping only works if encodings are set right, e.g. {'sy': 23, ...})
        no_pred_labels = []
        for name in sample.no_pred:
            if name in sample.encoding.keys():
                no_pred_labels.append(sample.encoding[name])
        if self._extend_no_pred is not None:
            for label in self._extend_no_pred:
                no_pred_labels.append(label)
        # build mask for all indices which should not be used for loss calculation
        idcs = np.isin(sample.labels, no_pred_labels).reshape(-1)
        if self._padding is not None:
            # Mark points which were added as padding for later removal / for ignoring them during loss calculation
            idcs = np.logical_or(
                idcs, np.all(sample.vertices == self._padding, axis=1))
        idcs = torch.from_numpy(idcs)
        o_mask = torch.ones(len(sample.vertices),
                            self._nclasses,
                            dtype=torch.bool)
        l_mask = torch.ones(len(sample.vertices), dtype=torch.bool)
        o_mask[idcs] = False
        l_mask[idcs] = False

        if self._specific:
            return {
                'pts': pts,
                'features': features,
                'target': lbs,
                'o_mask': o_mask,
                'l_mask': l_mask,
                'map': ixs
            }
        else:
            return {
                'pts': pts,
                'features': features,
                'target': lbs,
                'o_mask': o_mask,
                'l_mask': l_mask
            }