예제 #1
0
    def undistort_points(self,
                         pts_Local_batches: list,
                         timestamps: np.ndarray,
                         reference_ts: int = -1,
                         to_world: bool = False,
                         dtype=np.float64):
        """Transform 3D points that have not been sampled simultaneously to their 'correct' place
        referential.

        Args:
            pts_Local_batches: a list of arrays of [N, 3] points to be transformed
            timestamps: the N timestamps (common for all point batches)
            to_world:   If 'True', leave undistorted points in 'world' referential, otherwise
                        project them back to local referential
            reference_ts:   only used if to_world == False, let the use chose at what time
                            undistorted points are projected back to the local referential
                            (useful to compare points from different sensors in a common local referential)
            dtype: the output numpy data type

        Returns:
            The transformed points
        """

        warn_if_less_than_64bit(dtype)

        provider = self.datasource.sensor.platform.egomotion_provider
        tf_Ego_from_Local = self.compute_transform(provider.referential_name,
                                                   False,
                                                   dtype=dtype)
        traj_EgoZero_from_Ego = provider.compute_trajectory(
            timestamps, provider.tf_Global_from_EgoZero, dtype=dtype)

        for pts_Local in pts_Local_batches:

            pts_Ego = linalg.map_points(tf_Ego_from_Local, pts_Local)

            pts_EgoZero = provider.apply_trajectory(traj_EgoZero_from_Ego,
                                                    pts_Ego)

            if to_world:
                pts_Local[:] = pts_EgoZero
            else:
                if reference_ts < 0:
                    reference_ts = self.timestamp

                tf_Global_from_Ego = provider.get_Global_from_Ego_at(
                    reference_ts, dtype=dtype)
                tf_Local_from_EgoZero = linalg.tf_inv(
                    tf_Ego_from_Local) @ linalg.tf_inv(
                        tf_Global_from_Ego) @ provider.tf_Global_from_EgoZero
                pts_Local[:] = linalg.map_points(tf_Local_from_EgoZero,
                                                 pts_EgoZero)
예제 #2
0
def merge_bvhs(bvhs, matrices=None):

    vertices_list = []
    triangles_list = []

    vertex_offsets = [0]
    for i, bvh in enumerate(bvhs):
        if bvh is None:
            triangles_list.append(np.empty((0, 3), 'u4'))
            vertices_list.append(np.empty((0, 3), 'f4'))
            continue

        if matrices is not None and matrices[i] is not None:
            vertices_list.append(linalg.map_points(matrices[i], bvh.vertices))
        else:
            vertices_list.append(bvh.vertices)
        offset = vertex_offsets[-1]
        triangles_list.append(bvh.triangles + offset)
        offset += bvh.vertices.shape[0]
        vertex_offsets.append(offset)

    vertices = np.vstack(vertices_list)
    triangles = np.vstack(triangles_list)
    triangles_mapping = np.empty((triangles.shape[0]), 'u4')

    triangle_offsets = [0]
    offset_from = 0
    for i, t in enumerate(triangles_list):
        t_size = 0 if t is None else t.shape[0]
        triangles_mapping[offset_from:offset_from + t_size] = i
        offset_from += t_size
        triangle_offsets.append(offset_from)

    return BVH(triangles,
               vertices), triangles_mapping, triangle_offsets, vertex_offsets
예제 #3
0
 def apply_trajectory(self, trajectory:np.ndarray, points:np.ndarray) -> np.ndarray:
     corrected = np.empty_like(points)
     n_points = points.shape[0]
     for i in range(trajectory.shape[0]):
         s, e = i * self.subsampling, min((i+1) * self.subsampling, n_points)
         corrected[s:e] = linalg.map_points(trajectory[i], points[s:e])
     return corrected
예제 #4
0
    def __getitem__(self, key: Any):

        #TODO: if multiple point cloud datasources in dependencies, we could merge them.

        min_key = key - self.memory

        if not self._is_live:
            min_key = max([0, min_key])
        else:
            min_key = -min([
                -min_key,
                len(self.datasources[self.original_point_cloud_datasource])
            ])

        samples = self.datasources[
            self.original_point_cloud_datasource][min_key:key + 1]

        nb_features = 1 if not self.has_rgb else 4
        pc_map = np.empty((0, 5 + nb_features))

        cached_indices = []
        if self.local_cache is not None:
            pc_map = self.local_cache

            if not self._is_live:
                keep = np.where(
                    (pc_map[:,5] >= min_key) &\
                    (pc_map[:,5] <= key) &\
                    (pc_map[:,5] % self.skip == 0)
                )
            else:
                keep = np.where(
                    (pc_map[:,5] >= samples[0].raw['absolute_index']) &\
                    (pc_map[:,5] <= samples[-1].raw['absolute_index']) &\
                    (pc_map[:,5] % self.skip == 0)
                )
            pc_map = pc_map[keep]
            cached_indices = np.unique(pc_map[:, 5])

        for sample in samples:

            if not self._is_live:
                index = sample.index
                if index % self.skip and index != key:
                    continue
            else:
                index = sample.raw['absolute_index']
                if index % self.skip and index != samples[-1].raw[
                        'absolute_index']:
                    continue

            if index in cached_indices:
                continue  #don't re-add what is already cached

            pc = np.empty((sample.amplitudes.size, 5 + nb_features))
            pc[:, [0, 1, 2]] = sample.point_cloud(referential='world',
                                                  undistort=False)
            pc[:, 3] = sample.amplitudes
            pc[:, 4] = sample.timestamp
            pc[:, 5] = index

            if self.has_rgb:
                pc[:, 6] = sample.raw['r']
                pc[:, 7] = sample.raw['g']
                pc[:, 8] = sample.raw['b']

            pc_map = self.stack_point_cloud(pc_map, pc)

        self.local_cache = copy.deepcopy(pc_map)

        if self.voxel_size > 0 and OPEN3D_AVAILABLE:
            pc_map = self.voxelize(pc_map)

        to_world = samples[-1].compute_transform('world')
        to_sensor = tf_inv(to_world)
        pc_map[:, [0, 1, 2]] = map_points(to_sensor, pc_map[:, [0, 1, 2]])

        #package in das format
        dtype = datasource_xyzit() if not self.has_rgb else sample.raw.dtype
        raw = np.empty((pc_map.shape[0]), dtype=dtype)
        raw['x'] = pc_map[:, 0]
        raw['y'] = pc_map[:, 1]
        raw['z'] = pc_map[:, 2]
        raw['i'] = pc_map[:, 3]
        raw['t'] = pc_map[:, 4]

        if self.has_rgb:
            raw['r'] = pc_map[:, 6]
            raw['g'] = pc_map[:, 7]
            raw['b'] = pc_map[:, 8]

        sample_object = self.sensor.factories['xyzit'][0]

        return sample_object(index=key,
                             datasource=self,
                             virtual_raw=raw,
                             virtual_ts=samples[-1].timestamp)
예제 #5
0
 def transform_pts(matrix4x4, ptsNx3):
     return linalg.map_points(matrix4x4, ptsNx3)
예제 #6
0
def load_collada(filename,
                 scale=1,
                 matrix=np.eye(4, dtype='f4'),
                 name="collada",
                 bake_matrix=True,
                 merge_actors=True,
                 ignore_non_textured=False,
                 invert_normals=False,
                 type_id=-1,
                 instance_id=-1):

    actors = Actors.Actors(shared_transform=ensure_Transform(
        np.eye(4, dtype='f4')) if bake_matrix else ensure_Transform(matrix),
                           name=name,
                           type_id=type_id,
                           instance_id=instance_id)

    mesh = collada.Collada(filename)

    np_matrix = utils.to_numpy(matrix)

    textures_cache = {}

    actors_cache = {}

    bbox = np.full((2, 3), np.finfo('f4').max)
    bbox[1, :] = np.finfo('f4').min

    actors.all_vertices = []
    actors.scale = scale

    for coll_geom in tqdm.tqdm(mesh.scene.objects('geometry')):
        for coll_prim in coll_geom.primitives():

            #FIXME: stop ignoring colladas transforms

            if isinstance(coll_prim, collada.triangleset.BoundTriangleSet):
                triangles = coll_prim
            elif isinstance(coll_prim, collada.polylist.BoundPolylist):
                triangles = coll_prim.triangleset()
            else:
                LoggingManager.instance().warning(
                    f"{type(coll_prim)} not implementend")
                continue

            textures = {}
            effect_signature = []  #for merging actors
            uniforms = {}
            for effect_name in triangles.material.effect.supported:
                value = getattr(triangles.material.effect, effect_name)
                if isinstance(value, collada.material.Map):

                    texture_image = value.sampler.surface.image
                    effect_signature.append((effect_name, texture_image.id))
                    if texture_image.id in textures_cache:
                        textures[effect_name] = textures_cache[
                            texture_image.id]
                    else:
                        array = textures[effect_name] = textures_cache[
                            texture_image.id] = Array.Array(
                                ndarray=utils.load_texture(
                                    texture_image.pilimage))
                elif isinstance(value, tuple):
                    uniforms[effect_name] = QColor.fromRgbF(*value)
                    effect_signature.append((effect_name, value))
                elif isinstance(value, float):
                    uniforms[effect_name] = value
                    effect_signature.append((effect_name, value))
                elif value is not None:
                    LoggingManager.instance().warning(
                        f"Unsupported type {effect_name}: {type(value)}")

            if not textures and ignore_non_textured:
                continue

            effect_signature = frozenset(effect_signature)

            triangles.generateNormals()

            vertices = triangles.vertex.astype('f4') * scale

            normals = triangles.normal.astype('f4')

            if invert_normals:
                normals = normals * -1

            if bake_matrix:
                vertices = linalg.map_points(np_matrix, vertices)
                normals = linalg.map_vectors(np_matrix, normals)

            indices = triangles.vertex_index.flatten().astype('u4')
            attributes_ndarrays = {"vertices": vertices, "normals": normals}
            indexed_vertices = vertices[triangles.vertex_index.flatten()]

            for i in range(3):
                bbox[0, i] = min(bbox[0, i], indexed_vertices[:, i].min())
                bbox[1, i] = max(bbox[1, i], indexed_vertices[:, i].max())

            if textures:
                if len(triangles.texcoordset) > 1:
                    LoggingManager.instance().warning(
                        f"warning, {type(coll_prim)} not implementend")
                orig_tc0 = triangles.texcoordset[0].astype('f4')
                tc0_idx = triangles.texcoord_indexset[0].flatten()
                if not np.all(tc0_idx == indices):
                    assert tc0_idx.shape == indices.shape, "texcoord indices must be the same shape as vertex indices"
                    #this will duplicate shared vertices so that we can have a separate texcoords for each triangle sharing vertices
                    attributes_ndarrays['vertices'] = indexed_vertices
                    attributes_ndarrays['normals'] = normals[
                        triangles.normal_index.flatten()]
                    indices = np.arange(indices.shape[0], dtype=indices.dtype)
                    uv = orig_tc0[tc0_idx]
                else:
                    uv = np.empty((vertices.shape[0], 2), 'f4')
                    uv[indices] = orig_tc0[tc0_idx]

                attributes_ndarrays['texcoords0'] = uv

                attribs = CustomAttribs.TexcoordsAttribs(
                    vertices=Array.Array(
                        ndarray=attributes_ndarrays['vertices']),
                    normals=Array.Array(
                        ndarray=attributes_ndarrays['normals']),
                    texcoords0=Array.Array(
                        ndarray=attributes_ndarrays['texcoords0']))
                #FIXME: bind collada uniforms if present
                effect = CustomEffects.textured_material(textures)
            else:
                attribs = Geometry.Attribs(
                    vertices=Array.Array(
                        ndarray=attributes_ndarrays['vertices']),
                    normals=Array.Array(
                        ndarray=attributes_ndarrays['normals']))
                #FIXME: bind other uniforms if present
                effect = CustomEffects.material(color=uniforms['diffuse'],
                                                back_color=uniforms['diffuse'])

            if invert_normals:
                indices = indices.reshape((indices.shape[0] // 3),
                                          3)[:, [0, 2, 1]].flatten()

            if merge_actors and effect_signature in actors_cache:

                actor = actors_cache[effect_signature]

                actor_attributes = actor.geometry.attribs.get_attributes()

                n_vertices_before = actor_attributes['vertices'].shape[0]

                for attr_name, value in actor_attributes.items():
                    value.set_ndarray(
                        np.vstack(
                            (value.ndarray, attributes_ndarrays[attr_name])))

                actor.geometry.indices.set_ndarray(
                    np.hstack((actor.geometry.indices.ndarray,
                               indices + n_vertices_before)))

            else:
                geometry = Geometry.Geometry(
                    indices=Array.Array(ndarray=indices), attribs=attribs)

                actor = actors.addActor(
                    Actors.Actor(geometry=geometry,
                                 effect=effect,
                                 transform=actors.shared_transform,
                                 name=f"{name}_{coll_geom.original.id}",
                                 type_id=type_id,
                                 instance_id=instance_id))

                actors_cache[effect_signature] = actor

                actors.all_vertices.append(
                    actor.geometry.attribs.vertices
                )  #if in merge actor mode, vertices are already there

    actors.bbox = bbox

    return actors