Esempio n. 1
0
    def get_reciprocal_slice(self,
                             plane_normal: Tuple[int, int, int],
                             distance: float = 0) -> ReciprocalSlice:
        """
        Get a reciprocal slice through the Brillouin zone.

        Reciprocal slice defined by the intersection of a plane with the lattice.

        Args:
            plane_normal: (3, ) int array of the plane normal in fractional indices.
            distance: The distance from the center of the Brillouin zone (Γ-point).

        Returns:
            The reciprocal slice.
        """
        from trimesh import transform_points
        from trimesh.geometry import plane_transform
        from trimesh.intersections import plane_lines

        cart_normal = np.dot(plane_normal, self.reciprocal_lattice)
        cart_center = cart_normal * distance

        # get the intersections with the faces
        intersections, _ = plane_lines(cart_center, cart_normal,
                                       self.lines.transpose(1, 0, 2))

        if len(intersections) == 0:
            raise ValueError("Plane does not intersect reciprocal cell")

        # transform the intersections from 3D space to 2D coordinates
        transformation = plane_transform(origin=cart_center,
                                         normal=cart_normal)
        points = transform_points(intersections, transformation)[:, :2]

        return ReciprocalSlice(self, points, transformation)
Esempio n. 2
0
def user_selects_target(full_mesh_path, R):
    #  Find Oriented Bounding Box for surface
    global approved

    # load mesh in open3d
    mesh = trimesh.load('/home/simon/catkin_ws/src/mesh_partition/datasets/apartment_1m.ply')
    R_aug = np.zeros([4, 4])
    R_aug[:3, :3] = R
    mesh.vertices = trimesh.transform_points(mesh.vertices, R_aug)

    # Crop the mesh using face color
    f_colors = np.asarray(mesh.visual.face_colors)
    f_colors = f_colors / 255
    # add opacity
    f_colors[:, 3] = 1.0

    pvm = pv.PolyData('/home/simon/catkin_ws/src/mesh_partition/datasets/apartment_1m.ply')

    p.add_mesh(pvm, scalars=f_colors, rgb=True, name="env_mesh", culling=False)  # f_colors[:,:3])

    p.enable_cell_picking(mesh=pvm, style='surface', color='r', through=False,
                          show_message="")
    p.add_text(text="Press R to toggle selection tool\n"
                    "Press Y to approve region\n"
                    "Press Q to confirm selection\n",
               color='k', font_size=18)

    p.add_key_event(key='y', callback=pyvista_approve_region)
    p.show(auto_close=True, interactive=True)
    p.deep_clean()
    p.clear()
    p.close()

    return approved
Esempio n. 3
0
    def get_symmetry_points(
            fermi_slice: FermiSlice) -> Tuple[np.ndarray, List[str]]:
        """
        Get the high symmetry k-points and labels for the Fermi slice.

        Args:
            fermi_slice: A fermi slice.

        Returns:
            The high symmetry k-points and labels for points that lie on the slice.
        """
        hskp = HighSymmKpath(fermi_slice.structure)
        labels, kpoints = list(zip(*hskp.kpath["kpoints"].items()))

        if isinstance(fermi_slice.reciprocal_slice.reciprocal_space,
                      ReciprocalCell):
            kpoints = kpoints_to_first_bz(np.array(kpoints))

        kpoints = np.dot(
            kpoints,
            fermi_slice.reciprocal_slice.reciprocal_space.reciprocal_lattice)
        kpoints = transform_points(kpoints,
                                   fermi_slice.reciprocal_slice.transformation)

        # filter points that do not lie very close to the plane
        on_plane = np.where(np.abs(kpoints[:, 2]) < 1e-4)[0]
        kpoints = kpoints[on_plane]
        labels = [labels[i] for i in on_plane]

        return kpoints[:, :2], labels
Esempio n. 4
0
    def evaluate_interface(self, positive, negative):
        config = Configuration.config

        plane_samples = self.grid_sample_polygon()

        if len(plane_samples) == 0:
            return False

        mesh_samples = trimesh.transform_points(
            np.column_stack((plane_samples, np.zeros(plane_samples.shape[0]))),
            self.xform)
        pos_dists = positive.nearest.signed_distance(
            mesh_samples + (1 + self.connector_diameter) * self.normal)
        neg_dists = negative.nearest.signed_distance(
            mesh_samples + (1 + self.connector_diameter) * -1 * self.normal)
        # overestimate sqrt(2) to make the radius larger than half the diagonal of a square connector
        pos_valid_mask = pos_dists > 1.5 * self.connector_diameter / 2
        neg_valid_mask = neg_dists > 1.5 * self.connector_diameter / 2
        ch_area_mask = np.logical_or(pos_valid_mask, neg_valid_mask)

        if ch_area_mask.sum() == 0:
            return False

        convex_hull_area = sg.MultiPoint(plane_samples[ch_area_mask]).buffer(
            self.connector_diameter / 2).convex_hull.area
        self.objective = max(
            self.area / convex_hull_area - config.connector_objective_th, 0)
        self.positive_sites = mesh_samples[pos_valid_mask]
        self.negative_sites = mesh_samples[neg_valid_mask]
        self.all_sites = np.concatenate(
            (self.positive_sites, self.negative_sites), axis=0)
        return True
Esempio n. 5
0
    def Apuntar_Sol(self, Sun_vector, Cara_principal):

        direcion_principal = self.mesh.facets_normal[self.Caras_Despegables[0]]
        plano0 = np.cross(Sun_vector, self.actitud.eje_de_spin)
        plano0 = plano0 / np.linalg.norm(plano0)

        plano1 = np.cross(direcion_principal, self.actitud.eje_de_spin)

        plano1 = plano1 / np.linalg.norm(plano1)

        angulo_giro = np.arccos(np.absolute(np.dot(plano0, plano1))) / (
            np.linalg.norm(plano0) * np.linalg.norm(plano1))
        posicion_eje = np.array(np.where(self.eje_de_spin == 1)).max()
        if plano0(posicion_eje) == 0:
            angulo_giro = 0.0
        elif np.isnan(angulo_giro):
            angulo_giro = 0.0

        if angulo_giro == 0:
            pass
        else:
            prim = trimesh.transform_points(
                plano1.reshape(1, 3),
                trimesh.transformations.rotation_matrix(
                    angulo_giro, self.actitud.eje_de_spin, [0, 0, 0]))
            if not np.allclose(prim, plano0):
                angulo_giro = -angulo_giro
            self.mesh = self.mesh.apply_transform(
                trimesh.transformations.rotation_matrix(
                    angulo_giro, self.actitud.eje_de_spin, [0, 0, 0]))
Esempio n. 6
0
    def __init__(self, polygon, xform, normal, origin):
        config = Configuration.config
        self.valid = False
        self.polygon = polygon
        self.normal = normal
        self.origin = origin
        self.xform = xform
        self.area = self.polygon.area
        self.positive = None
        self.negative = None
        self.connector_diameter = None
        self.objective = None
        self.positive_sites = None
        self.pos_index = None
        self.negative_sites = None
        self.neg_index = None
        self.all_sites = None
        self.all_index = None

        self.connector_diameter = config.connector_diameter
        self.connector_spacing = config.connector_spacing
        self.connector_wall_distance = config.connector_wall_distance

        if self.area < (self.connector_diameter / 2)**2:
            return

        verts, faces = creation.triangulate_polygon(polygon, triangle_args='p')
        verts = np.column_stack((verts, np.zeros(len(verts))))
        verts = trimesh.transform_points(verts, xform)
        faces = np.fliplr(faces)
        self.mesh = trimesh.Trimesh(verts, faces)
        self.valid = True
 def check_forward(self, points, transform):
     points1 = np.asarray([
         trimesh.transform_points(cuda.to_cpu(self.points), T)
         for T in cuda.to_cpu(self.transform)
     ])
     points2 = transform_points(self.points, self.transform).array
     testing.assert_allclose(points1, points2, atol=1e-5, rtol=1e-4)
Esempio n. 8
0
def test_different_from():
    """verify that `BSPNode.different_from` has the expected behavior

    Get a list of planes. Split the object using the first plane, then for each of the other planes, split the object,
    check if the plane is far enough away given the config, then assert that `BSPNode.different_from` returns the
    correct value. This skips any splits that fail.
    """
    config = Configuration.config
    print()
    mesh = trimesh.primitives.Sphere(radius=50)

    tree = bsp_tree.BSPTree(mesh)
    root = tree.nodes[0]
    normal = trimesh.unitize(np.random.rand(3))
    planes = bsp_tree.get_planes(mesh, normal)
    base_node = copy.deepcopy(root)
    base_node = bsp_node.split(base_node, planes[0])

    for plane in planes[1:]:
        # smaller origin offset, should not be different
        test_node = copy.deepcopy(root)
        test_node = bsp_node.split(test_node, plane)
        if abs((plane[0] - planes[0][0])
               @ planes[0][1]) > config.different_origin_th:
            assert base_node.different_from(test_node)
        else:
            assert not base_node.different_from(test_node)

    # smaller angle difference, should not be different
    test_node = copy.deepcopy(root)
    random_vector = trimesh.unitize(np.random.rand(3))
    axis = np.cross(random_vector, planes[0][1])
    rotation = trimesh.transformations.rotation_matrix(np.pi / 11, axis)
    normal = trimesh.transform_points(planes[0][1][None, :], rotation)[0]
    test_plane = (planes[0][0], normal)
    test_node = bsp_node.split(test_node, test_plane)
    assert not base_node.different_from(test_node)

    # larger angle difference, should be different
    test_node = copy.deepcopy(root)
    random_vector = trimesh.unitize(np.random.rand(3))
    axis = np.cross(random_vector, planes[0][1])
    rotation = trimesh.transformations.rotation_matrix(np.pi / 9, axis)
    normal = trimesh.transform_points(planes[0][1][None, :], rotation)[0]
    test_plane = (planes[0][0], normal)
    test_node = bsp_node.split(test_node, test_plane)
    assert base_node.different_from(test_node)
Esempio n. 9
0
def camera_to_rays(camera):
    """
    Convert a trimesh.scene.Camera object to ray origins
    and direction vectors.

    Parameters
    --------------
    camera : trimesh.scene.Camera
      Camera with transform defined

    Returns
    --------------
    origins : (n, 3) float
      Ray origins in space
    vectors : (n, 3) float
      Ray direction unit vectors
    angles : (n, 2) float
      Ray spherical coordinate angles in radians
    """
    # radians of half the field of view
    half = np.radians(camera.fov / 2.0)
    # scale it down by two pixels to keep image under resolution
    half *= (camera.resolution - 2) / camera.resolution
    # get FOV angular bounds in radians

    # create an evenly spaced list of angles
    angles = trimesh.util.grid_linspace(bounds=[-half, half],
                                        count=camera.resolution)

    # turn the angles into unit vectors
    vectors = trimesh.unitize(
        np.column_stack((np.sin(angles), np.ones(len(angles)))))

    # flip the camera transform to change sign of Z
    transform = np.dot(camera.transform,
                       trimesh.geometry.align_vectors([1, 0, 0], [-1, 0, 0]))

    # apply the rotation to the direction vectors
    vectors = trimesh.transform_points(vectors, transform, translate=False)

    # camera origin is single point, extract from transform
    origin = trimesh.transformations.translation_from_matrix(transform)
    # tile it into corresponding list of ray vectorsy
    origins = np.ones_like(vectors) * origin

    return origins, vectors, angles
Esempio n. 10
0
 def _get_grid_full(self, examples, pitch, origin):
     dims = (self._voxel_dim, ) * 3
     grid_full = np.zeros(dims, dtype=np.int32)
     for i, example in enumerate(examples):
         T = tf.quaternion_matrix(example["quaternion_true"])
         T = geometry_module.compose_transform(
             R=T[:3, :3], t=example["translation_true"])
         vox = self._models.get_solid_voxel_grid(example["class_id"])
         points = trimesh.transform_points(vox.points, T)
         indices = trimesh.voxel.ops.points_to_indices(points,
                                                       pitch=pitch,
                                                       origin=origin)
         I, J, K = indices[:, 0], indices[:, 1], indices[:, 2]
         keep = ((0 <= I)
                 & (I < dims[0])
                 & (0 <= J)
                 & (J < dims[1])
                 & (0 <= K)
                 & (K < dims[2]))
         I, J, K = I[keep], J[keep], K[keep]
         grid_full[I, J, K] = i + 1  # starts from 1
     return grid_full
Esempio n. 11
0
    def _get_support_polygons(self,
                              min_area=0.01,
                              gravity=np.array([0, 0, -1.0]),
                              erosion_distance=0.02):
        """Extract support facets by comparing normals with gravity vector and checking area.

        Args:
            min_area (float, optional): Minimum area of support facets [m^2]. Defaults to 0.01.
            gravity ([np.ndarray], optional): Gravity vector in scene coordinates. Defaults to np.array([0, 0, -1.0]).
            erosion_distance (float, optional): Clearance from support surface edges. Defaults to 0.02.

        Returns:
            list[trimesh.path.polygons.Polygon]: list of support polygons.
            list[np.ndarray]: list of homogenous 4x4 matrices describing the polygon poses in scene coordinates.
        """
        assert np.isclose(np.linalg.norm(gravity), 1.0)

        support_polygons = []
        support_polygons_T = []

        # Add support plane if it is set (although not infinite)
        support_meshes = self._support_objects

        for obj_mesh in support_meshes:
            # get all facets that are aligned with -gravity and bigger than min_area
            support_facet_indices = np.argsort(obj_mesh.facets_area)
            support_facet_indices = [
                idx for idx in support_facet_indices if np.isclose(
                    obj_mesh.facets_normal[idx].dot(-gravity), 1.0, atol=0.5)
                and obj_mesh.facets_area[idx] > min_area
            ]

            for inds in support_facet_indices:
                index = inds
                normal = obj_mesh.facets_normal[index]
                origin = obj_mesh.facets_origin[index]

                T = trimesh.geometry.plane_transform(origin, normal)
                vertices = trimesh.transform_points(obj_mesh.vertices,
                                                    T)[:, :2]

                # find boundary edges for the facet
                edges = obj_mesh.edges_sorted.reshape(
                    (-1, 6))[obj_mesh.facets[index]].reshape((-1, 2))
                group = trimesh.grouping.group_rows(edges, require_count=1)

                # run the polygon conversion
                polygon = trimesh.path.polygons.edges_to_polygons(
                    edges=edges[group], vertices=vertices)

                assert len(polygon) == 1

                # erode to avoid object on edges
                polygon[0] = polygon[0].buffer(-erosion_distance)

                if not polygon[0].is_empty and polygon[0].area > min_area:
                    support_polygons.append(polygon[0])
                    support_polygons_T.append(
                        trimesh.transformations.inverse_matrix(T))

        return support_polygons, support_polygons_T
Esempio n. 12
0
def optimize(seg_env_prototype=None,
             target_prototype=None,
             cluster_env_path=None,
             full_env_path=None,
             N_its=1,
             enable_user_confirmation=True,
             preloaded_vars=None,
             visualize_with_vedo=False):

    if preloaded_vars is None:
        env, camera_list, optimization_options, pso_options = initialize(
            seg_env_prototype,
            target_prototype,
            cluster_env_path,
            full_env_path,
            load_full_env=True)

        if enable_user_confirmation:
            # surface_confirmation.confirm_surfaces(environment=env, N_max=10)
            surface_confirmation_demo.confirm_surfaces(environment=env,
                                                       N_max=10)
        env.post_process_environment()

        preloaded_vars = {
            'env': copy.deepcopy(env),
            'camera_list': copy.deepcopy(camera_list),
            'optimization_options': copy.deepcopy(optimization_options),
            'pso_options': copy.deepcopy(pso_options)
        }

        # SAVE THIS!
        pickle_env = open("../test/preloaded_environment_apartment.p", 'wb')
        pickle.dump(preloaded_vars, pickle_env)
        pickle_env.close()

    else:
        # work around to the seg fault issue... load everything in advance, then just pass it in!
        env = preloaded_vars['env']
        camera_list = initialize_camera_list()
        optimization_options = initialize_opt_options()
        pso_options = initialize_pso_options()

        env.vedo_mesh = vedo.mesh.Mesh(env.obs_mesh)
        env.opt_options = optimization_options
        env.correct_normals()
        env.n_points = optimization_options.n_points
        env.generate_integration_points()
        env.perch_regions = []
        env.perch_area = 0
        env.set_surface_as_perchable()
        optimization_options.log_performance = False

    # env.plot_environment()

    # PSO:
    # base dimension for simple 2d problem is 2. scales up depending on selected opt
    camera_particle_dimension = optimization_options.get_particle_size()
    n_cams = len(camera_list)
    N_iterations = pso_options["N_iterations"]
    N_particles = pso_options["N_particles"]

    if pso_options["greedy_search"]:
        particle_dimension = camera_particle_dimension
        num_optimizations = n_cams
    else:
        particle_dimension = n_cams * camera_particle_dimension
        num_optimizations = 1

    # for logging
    pso_keypoints = np.zeros([N_its, num_optimizations, N_iterations, 3])
    optimization_options.search_time = np.zeros(
        [N_its, num_optimizations, N_iterations + 1])
    optimization_options.best_fitness = np.zeros(
        [N_its, num_optimizations, N_iterations + 1])
    optimization_options.pts_searched = np.zeros(
        [N_its, num_optimizations, N_iterations + 1])

    bounds = (np.zeros(particle_dimension), np.ones(particle_dimension)
              )  # particle boundaries

    # for velocity update:
    # 'w' is velocity decay aka inertia,
    # 'c1' is "cognitive parameter", e.g. attraction to particle best,
    # 'c2' is "social parameter", e.g. attraction to local/global best
    # 'k' = number of neighbors to consider
    # 'p' = the Minkowski p-norm. 1 for absolute val dist, 2 for norm dist
    options = {
        'c1': pso_options["pso_c1"],
        'c2': pso_options["pso_c2"],
        'w': pso_options["pso_w"],
        'k': pso_options["pso_k"],
        'p': pso_options["pso_p"]
    }

    optimal_cameras = PlacedCameras()

    fig_num = 1

    if pso_options['individual_surface_opt']:
        num_surface_loops = len(env.perch_regions)
        surface_number = range(num_surface_loops)
        N_particles = env.assign_particles_to_surfaces(
            N_particles,
            pso_options["pso_k"],
            neighborhood_search=pso_options["local_search"])
    else:
        num_surface_loops = 1
        surface_number = [-1]
        N_particles = [N_particles]

    # STORE FOR ANALYSIS LATER
    if optimization_options.log_performance:
        pso_best_fitnesses = np.zeros(num_optimizations)
        pso_points_searched = np.zeros(num_optimizations)
        pso_search_time = np.zeros(num_optimizations)
        pso_keypoints = []
        for i in range(num_optimizations):
            pso_keypoints.append([[], [], [], []])

    # store, for each optimization, time, num particles searched, fitness, standard deviation
    bh = pso_options["boundary_handling"]

    # for i in range(num_optimizations):
    i = 0
    while i < num_optimizations:
        if optimization_options.log_performance:
            optimization_options.data_index = 0
        start_time = datetime.now()

        if pso_options["greedy_search"]:
            search_cameras_list = [copy.deepcopy(camera_list[i])]
        else:
            search_cameras_list = camera_list

        best_cost = np.finfo(float).max
        best_pos_surf = -1
        best_pos = np.zeros(particle_dimension)

        for j in range(num_surface_loops):
            # Future work: investigate velocity clamping...
            optimization_options.surface_number = j
            # if pso_options["local_search"]:
            #     optimizer = ps.single.LocalBestPSO(n_particles=N_particles[j], dimensions=particle_dimension,
            #                                        options=options, bounds=bounds, bh_strategy=bh)
            # else:
            #     optimizer = ps.single.GlobalBestPSO(n_particles=N_particles[j], dimensions=particle_dimension,
            #                                         options=options, bounds=bounds, bh_strategy=bh)

            optimization_options.surface_number = surface_number[j]
            # this flag gets reset if the current search has too little variance
            optimization_options.continue_searching = True
            optimization_options.stagnant_loops = 0

            if visualize_with_vedo:
                plt1 = vedo.Plotter(title='Confirm Perch Location',
                                    pos=[0, 0],
                                    interactive=False,
                                    sharecam=False)
                plt1.clear()

                # draw wireframe lineset of camera frustum
                # env_mesh = trimesh.load(env.full_env_path)

                env_mesh = trimesh.load(
                    '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
                    env.name + '_1m_pt1.ply')

                R = np.zeros([4, 4])
                R[:3, :3] = env.R
                env_mesh.vertices = trimesh.transform_points(
                    env_mesh.vertices, R)
                env_mesh_vedo = vedo.mesh.Mesh(env_mesh)
                target_mesh_pymesh = env.generate_target_mesh(shape='box')
                target_mesh = trimesh.Trimesh(target_mesh_pymesh.vertices,
                                              target_mesh_pymesh.faces)
                target_mesh_vedo = vedo.mesh.Mesh(target_mesh)
                target_colors = 0.5 * np.ones([len(target_mesh.faces), 4])
                target_colors[:, 0] *= 0
                target_colors[:, 2] *= 0
                target_mesh_vedo.alpha(0.6)
                target_mesh_vedo.cellIndividualColors(target_colors,
                                                      alphaPerCell=True)
                env_mesh.visual.face_colors[:, -1] = 255
                env_mesh_vedo.cellIndividualColors(
                    env_mesh.visual.face_colors / 255, alphaPerCell=True)

                geom_list = [env_mesh_vedo, target_mesh_vedo]

                if env.name == 'office3' or env.name == 'apartment':
                    env_mesh2 = trimesh.load(
                        '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
                        env.name + '_1m_pt2.ply')
                    env_mesh_vedo2 = vedo.mesh.Mesh(env_mesh2)

                    env_mesh2.visual.face_colors[:, -1] = 150
                    env_mesh_vedo2.cellIndividualColors(
                        env_mesh2.visual.face_colors / 255, alphaPerCell=True)
                    geom_list.append(env_mesh_vedo2)

                for s in env.perch_regions:
                    surf_mesh = trimesh.Trimesh(vertices=s.points,
                                                faces=s.faces)
                    vedo_surf_mesh = vedo.mesh.Mesh(surf_mesh)
                    vedo_surf_mesh.color('g')
                    vedo_surf_mesh.opacity(0.7)
                    geom_list.append(vedo_surf_mesh)

                for i_ in range(len(optimal_cameras.cameras)):
                    quad_mesh = trimesh.load(
                        "/home/simon/catkin_ws/src/perch_placement/src/ui/models/white-red-black_quad2.ply"
                    )
                    R = rot3d_from_x_vec(
                        optimal_cameras.cameras[i_].wall_normal)
                    R2 = rot3d_from_rtp(np.array([0, -90, 0]))
                    R_aug = np.zeros([4, 4])
                    R_aug[:3, :3] = R.dot(R2)
                    R_aug[:3, -1] = optimal_cameras.cameras[i_].pose[:3]
                    quad_mesh.vertices = trimesh.transform_points(
                        quad_mesh.vertices, R_aug)
                    quad_mesh_vedo = vedo.mesh.Mesh(quad_mesh)
                    quad_mesh_vedo.cellIndividualColors(
                        quad_mesh.visual.face_colors / 255, alphaPerCell=True)

                    pymesh_frustum = optimal_cameras.cameras[
                        i_].generate_discrete_camera_mesh(degrees_per_step=20,
                                                          environment=env)
                    pymesh_verts = pymesh_frustum.vertices.copy()
                    pymesh_verts.flags.writeable = True
                    pymesh_faces = pymesh_frustum.faces.copy()
                    pymesh_faces.flags.writeable = True

                    frustum = trimesh.Trimesh(
                        vertices=pymesh_frustum.vertices.copy(),
                        faces=pymesh_frustum.faces.copy())
                    vedo_frustum = vedo.mesh.Mesh(frustum)
                    vedo_frustum.alpha(0.3)
                    vedo_frustum.color("b")
                    quad_mesh_vedo.color('o')
                    geom_list.append(quad_mesh_vedo)
                    geom_list.append(vedo_frustum)

                for actor in geom_list:
                    plt1.add(actor)
            else:
                plt1 = None

            # if pso_options["multi_threading"]:
            #     # noinspection PyTypeChecker
            #     surf_best_cost, surf_best_pos = optimizer.optimize(evaluate_swarm, iters=N_iterations, environment=env,
            #                                                        cameras=search_cameras_list,
            #                                                        placed_cameras=optimal_cameras,
            #                                                        opt_options=optimization_options,
            #                                                        n_processes=multiprocessing.cpu_count(),
            #                                                        vedo_plt=plt1)
            # else:
            #     surf_best_cost, surf_best_pos = optimizer.optimize(evaluate_swarm, iters=N_iterations, environment=env,
            #                                                        cameras=search_cameras_list,
            #                                                        placed_cameras=optimal_cameras,
            #                                                        opt_options=optimization_options,
            #                                                        vedo_plt=plt1)

            pso_params = PSO_Hyperparameters(w=pso_options["pso_w"],
                                             c1=pso_options["pso_c1"],
                                             c2=pso_options["pso_c2"],
                                             lr=1,
                                             k=pso_options["pso_k"],
                                             p=pso_options["pso_p"],
                                             N_particles=N_particles[j],
                                             N_iterations=N_iterations)

            surf_best_cost, surf_best_pos = run_pso(
                fitness_function=evaluate_swarm,
                pso_hyper_parameters=pso_params,
                environment=env,
                cameras=search_cameras_list,
                placed_cameras=optimal_cameras,
                opt_options=optimization_options,
                local_pso=True)

            if surf_best_cost < best_cost:
                best_cost = copy.deepcopy(surf_best_cost)
                best_pos = copy.deepcopy(surf_best_pos)
                if pso_options["individual_surface_opt"]:
                    best_pos_surf = j
                # print("Surface " + str(j) + " has lowest cost so far.. ")
                # print("Particle: " + str(best_pos))

        if optimization_options.log_performance:
            pso_search_time[i] = (datetime.now() - start_time).total_seconds()
            pso_best_fitnesses[i] = best_cost

        if pso_options["greedy_search"]:
            search_cameras_list_copy = copy.deepcopy(search_cameras_list)
            optimization_options.surface_number = best_pos_surf
            best_cam = convert_particle_to_state(
                environment=env,
                particle=best_pos,
                cameras=search_cameras_list_copy,
                opt_options=optimization_options)[0]
            optimal_cameras.cameras.append(copy.deepcopy(best_cam))

            if enable_user_confirmation:
                if confirm_perch_placement(
                        environment=env,
                        placed_cameras=optimal_cameras.cameras,
                        focus_id=i):
                    best_cam_covariances = evaluate_camera_covariance(
                        environment=env, cameras=[best_cam])
                    optimal_cameras.append_covariances(best_cam_covariances)
                    i += 1
                else:
                    optimal_cameras.cameras.pop()
                    env.remove_rejected_from_perch_space(camera=best_cam,
                                                         r=0.3)
            else:
                best_cam_covariances = evaluate_camera_covariance(
                    environment=env, cameras=[best_cam])
                optimal_cameras.append_covariances(best_cam_covariances)
                i += 1

    evaluate_discrete_coverage(env.n_points, optimal_cameras, plot=True)

    return optimal_cameras.cameras
K2 = view2["meta"]["intrinsic_matrix"]
color2 = view2["color"]
depth2 = view2["depth"]
# plt.imshow(color2)
# plt.show()

# project pcd2 (view2/cam2 frame -> world frame)
pcd2_cam2 = morefusion.geometry.pointcloud_from_depth(
    depth2, fx=K2[0, 0], fy=K2[1, 1], cx=K2[0, 2], cy=K2[1, 2],
)
isnan = np.isnan(pcd2_cam2).any(axis=2)
pcd2_cam2 = pcd2_cam2[~isnan]
T2_world_to_cam = view2["meta"]["rotation_translation_matrix"]
T2_world_to_cam = np.r_[T2_world_to_cam, [[0, 0, 0, 1]]]
T2_cam_to_world = np.linalg.inv(T2_world_to_cam)
pcd2_world = trimesh.transform_points(pcd2_cam2, T2_cam_to_world)

# project pcd2 (world frame -> view1/cam1 frame)
T1_world_to_cam = view1["meta"]["rotation_translation_matrix"]
T1_world_to_cam = np.r_[T1_world_to_cam, [[0, 0, 0, 1]]]
pcd2_cam1 = trimesh.transform_points(pcd2_world, T1_world_to_cam)

# project to camera (view1/cam1 frame)
r, c = morefusion.geometry.project_to_camera(
    pcd2_cam1,
    fx=K1[0, 0],
    fy=K1[1, 1],
    cx=K1[0, 2],
    cy=K1[1, 2],
    image_shape=color1.shape,
)
Esempio n. 14
0
def evaluate_particle(particle, environment, cameras, placed_cameras=PlacedCameras(),
                      opt_options=CameraPlacementOptions(), debug=False, vedo_plt=None, maximization=False):
    """
    This function evaluates a single particle using the heuristic function defined in evaluate_arrangement()

    :param particle: individual particle as np.array
    :param environment: Environment class instance
    :param cameras: a list of Camera objects corresponding to the cameras which have already been placed in an
     environment
    :param placed_cameras: PlacedCameras class instance
    :param opt_options: CameraPlacementOptions class containing opt opt
    :param debug: flag, if true print and draw relevant information.
    :return: score of particle
    """
    # convert particle into camera pose; evaluate edge normal at each camera position
    cameras = convert_particle_to_state(environment=environment, particle=particle, cameras=cameras,
                                        opt_options=opt_options, debug=debug)

    # plot particles to debug...
    # if debug:
    #     if environment.dimension == 2:
    #         plot_particle_2d(particle=np.squeeze(particle), environment=environment, cameras=cameras,
    #                          placed_cameras=placed_cameras, view_time=0.0001, figure_number=figure_number)
    #     else:
    # # if len(placed_cameras.cameras) > 0:
    # plot_particle_3d(particle=particle, environment=environment, cameras=cameras, placed_cameras=placed_cameras,
    #                  view_time=0.0001, figure_number=figure_number)

    score = evaluate_arrangement_covariance(environment=environment, cameras=cameras, placed_cameras=placed_cameras,
                                            debug=debug, maximization=maximization)

    if vedo_plt is not None:
        if len(placed_cameras.cameras) >= 10:
            geom_list = []
            for i in range(len(cameras)):
                pymesh_frustum = cameras[i].generate_discrete_camera_mesh(degrees_per_step=5, environment=environment)
                if len(pymesh_frustum.faces) > 0 and not np.isnan(pymesh_frustum.vertices).any():
                    pymesh_verts = pymesh_frustum.vertices.copy()
                    pymesh_verts.flags.writeable = True
                    pymesh_faces = pymesh_frustum.faces.copy()
                    pymesh_faces.flags.writeable = True
                    frustum = trimesh.Trimesh(vertices=pymesh_frustum.vertices.copy(),
                                              faces=pymesh_frustum.faces.copy())
                    vedo_frustum = vedo.mesh.Mesh(frustum)
                    vedo_frustum.alpha(0.2)
                    vedo_frustum.color("c")
                    quad_mesh = trimesh.load(
                        "/home/simon/catkin_ws/src/perch_placement/src/ui/models/white-red-black_quad2.ply")
                    R = rot3d_from_x_vec(cameras[i].wall_normal)
                    R2 = rot3d_from_rtp(np.array([0, -90, 0]))
                    R_aug = np.zeros([4, 4])
                    R_aug[:3, :3] = R.dot(R2)
                    R_aug[:3, -1] = cameras[i].pose[:3]
                    quad_mesh.vertices = trimesh.transform_points(quad_mesh.vertices, R_aug)
                    quad_mesh_vedo = vedo.mesh.Mesh(quad_mesh)
                    quad_mesh_vedo.cellIndividualColors(quad_mesh.visual.face_colors / 255, alphaPerCell=True)
                    geom_list.append(quad_mesh_vedo)
                    geom_list.append(vedo_frustum)

                    for actor in geom_list:
                        vedo_plt.add(actor)

                    # p_.camera_position = [
                    #     (R * np.cos(t), R * np.sin(t), z),
                    #     (c[0], c[1], c[2]),  # (-0.026929191045848594, 0.5783514020506139, 0.8268966663940324),
                    #     (0, 0, 1),
                    # ]
                    vedo_plt.camera.SetPosition(7*np.cos(-145*np.pi/180.0), 7*np.sin(-145*np.pi/180.0), 6.25)
                    vedo_plt.camera.SetFocalPoint(-0.026929191045848594, 0.5783514020506139, 0.9268966663940324)
                    vedo_plt.camera.SetViewUp(np.array([0, 0, 1]))
                    vedo_plt.camera.SetDistance(7.8)
                    vedo_plt.camera.SetClippingRange([0.25, 30])
                    vedo_plt.camera
                    vedo_plt.show(interactive=False, rate=30, resetcam=False, fullscreen=True)
                    time.sleep(0.5)
                    actors = vedo_plt.actors
                    for i in range(len(cameras)):
                        vedo_plt.remove(actors.pop())
                        vedo_plt.remove(actors.pop())

        # plot_particle_3d(particle=particle, environment=environment, cameras=cameras, placed_cameras=placed_cameras,
        #                  view_time=0.0001, figure_number=0)
    # print("Particle score: " + str(score) + "; Pose: " + str(cameras[0].pose))

    if debug:
        print(score)

    return score
Esempio n. 15
0
def user_approves_surface(perch_region, full_mesh, full_mesh_path, R, g=np.array([0, 0, -1]), env_path=None):
    #  Find Oriented Bounding Box for surface
    global approved

    # load mesh in open3d
    mesh = trimesh.load('/home/simon/catkin_ws/src/mesh_partition/datasets/apartment_1m.ply')
    R_aug = np.zeros([4, 4])
    R_aug[:3, :3] = R
    mesh.vertices = trimesh.transform_points(mesh.vertices, R_aug)

    # Crop the mesh using face color
    f_colors = np.asarray(mesh.visual.face_colors)
    f_colors = f_colors / 255.0
    # add opacity
    f_colors[:, 3] = 1.0

    # vedo_mesh = vedo.mesh.Mesh(mesh)
    # vedo_mesh.cellIndividualColors(f_colors, alphaPerCell=True)
    # vedo_mesh.frontFaceCulling()
    # plt1.clear()
    # plt1.add(vedo_mesh)
    # plt1.add(vedo.Text2D("Press 'y' to approve surface. Press 'n' to reject. "
    #                      "\nPress 'f' for front culling, 'b' for back culling, 'c' to disable culling "
    #                      "\nPress 'q' when done",
    #                      pos='bottom-right', c='dg', bg='g', font='Godsway'))

    pvm = pv.PolyData('/home/simon/catkin_ws/src/mesh_partition/datasets/apartment_1m.ply')
    n = perch_region.mesh_normal
    pm = pymesh.form_mesh(perch_region.points + n*0.1, perch_region.faces)
    pms, _ = pymesh.split_long_edges(pm, 0.1)
    pm2 = pymesh.form_mesh(perch_region.points - n*0.1, perch_region.faces)
    pms2, _ = pymesh.split_long_edges(pm2, 0.1)
    #
    # surf_colors = np.zeros([len(pms.faces), 4])
    # surf_colors[:, 1] = 0.5  # g
    # surf_colors[:, 3] = 1.0  # alpha
    surf_mesh = pv.PolyData(pms.vertices, np.hstack([np.ones([len(pms.faces), 1])*3, pms.faces]).astype(int))
    surf_mesh2 = pv.PolyData(pms2.vertices, np.hstack([np.ones([len(pms2.faces), 1])*3, pms2.faces]).astype(int))

    # p.add_mesh(surf_mesh, scalars=surf_colors, rgb=True, name="surf_mesh")
    p.add_mesh(surf_mesh, color='g', opacity=.50, name="surf_mesh", culling=False)
    p.add_mesh(surf_mesh2, color='g', opacity=.50, name="surf_mesh2", culling=False)

    p.add_mesh(pvm, scalars=f_colors, rgb=True, name="env_mesh", culling=False)  # f_colors[:,:3])

    p.enable_cell_picking(mesh=surf_mesh, style='wireframe', color='r', through=True,
                          show_message="")
    p.add_text(text="Press R to toggle selection tool\n"
                                       "Press D to remove selected region\n"
                                       "Press A to keep only the selected region\n"
                                       "Press Y to approve region\n"
                                       "Press N to reject region\n"
                                       "Press Q to confirm selection\n",
               color='k', font_size=18)

    p.add_key_event(key='d', callback=pyvista_remove_region)
    p.add_key_event(key='a', callback=pyvista_select_region)
    p.add_key_event(key='y', callback=pyvista_approve_region)
    p.add_key_event(key='n', callback=pyvista_reject_region)

    # pvm.plot(scalars=f_colors, rgb=True)

    p.show(auto_close=True, interactive=True, full_screen=True)

    # plt1.show()
    p.deep_clean()
    p.clear()
    p.close()

    return approved, selection
Esempio n. 16
0
        fx=K[0, 0],
        fy=K[1, 1],
        cx=K[0, 2],
        cy=K[1, 2],
    )
    mask = ~np.isnan(pcd).any(axis=2) & mask

    points = pcd[mask]
    values = rgb[mask]

    T_world2camera = np.r_[example["meta"]["rotation_translation_matrix"],
                           [[0, 0, 0, 1]], ]
    T_camera2world = tf.inverse_matrix(T_world2camera)

    # camera frame -> world frame
    points = trimesh.transform_points(points, T_camera2world)

    if mapping is None:
        centroid = points.mean(axis=0)
        origin = centroid - pitch * voxel_dim / 2
        mapping = morefusion.geometry.VoxelMapping(origin=origin,
                                                   pitch=pitch,
                                                   voxel_dim=voxel_dim,
                                                   nchannel=3)

    mapping.add(points, values)

print("pitch:", pitch)
print("voxel_dim:", voxel_dim)
print("class_id:", class_id)
Esempio n. 17
0
  def is_valid_position(self, levelscript : LevelScriptParser, obj : Object3D,position, rules : list, is_pre_position : bool = False):
    """ Validate if this position is valid for the given object, position and for the rules that are given to this method.
    
    Arguments:
        levelscript {LevelScriptParser} -- Levelscript for the Level that contains this object3d
        obj {Object3D} -- Target object that is going to be randomized
        position {list} -- Position that is requested
        rules {list} -- List of rules this randomization must obey
    """

    if not self.check_walls(obj.area_id, levelscript, position, rules):
      self.log_reason_for_reject("is_valid_position", "object failed wall check")
      return False

    if self.inside_forbidden_boundary(obj.area_id, levelscript, position):
      self.log_reason_for_reject("is_in_waterbox", "object found in forbidden boundary")
      return False

    floor_properties = self.check_floor(obj.area_id, levelscript, position, rules)

    # check for floor if the rule is set and True
    if "no_floor_required" not in rules or rules["no_floor_required"] != True:
      if floor_properties is False:
        self.log_reason_for_reject("is_valid_position", "object floor required but none found")
        return False

    # check the floor type if the rule is set and the floor exists and the floor type isn't "all"
    if "no_floor_required" not in rules and "floor_types_allowed" in rules and floor_properties is not False:
      if rules["floor_types_allowed"] == "all":
        if floor_properties["collision_type"] not in self.rom.config.constants["collision_types"].values():
          self.log_reason_for_reject("is_valid_position", f'object floor type is "all" but "{hex(floor_properties["collision_type"])}" is unknown')
          return False

      if rules["floor_types_allowed"] not in self.rom.config.collision_groups:
        self.log_reason_for_reject("is_valid_position", f'unknown {rules["floor_types_allowed"]} floor type')
        return False
      else:
        if floor_properties["collision_type"] not in self.rom.config.collision_groups[rules["floor_types_allowed"]].values():
          self.log_reason_for_reject("is_valid_position", 'object floor type was not allowed')
          return False

    if "disable_planes" in obj.level.properties:
      for entry in obj.level.properties["disable_planes"]:
        plane_type = list(entry.keys())[0]
        (start, end) = entry[plane_type]
        lower = min(start, end)
        upper = max(start, end)

        if plane_type == "y_range":
          if position[1] > lower and position[1] < upper:
            #print(position, " is between ", (lower, upper))
            self.log_reason_for_reject("is_valid_position", "in level disable plane")
            return False

    if "min_y" in rules:
      if position[1] < rules["min_y"]:
        #print("min_y", position, rules["min_y"])
        self.log_reason_for_reject("is_valid_position", "object position below min_y")
        return False

    if "max_y" in rules:
      if position[1] > rules["max_y"]:
        #print("max_y", position, rules["max_y"])
        self.log_reason_for_reject("is_valid_position", "object position above max_y")
        return False

    if "distance" in rules:
      for distance_rules in rules["distance"]:
        origin = distance_rules["origin"]

        distance = math.sqrt(
          (position[0] - origin[0]) ** 2 +
          (position[1] - origin[1]) ** 2 +
          (position[2] - origin[2]) ** 2
        )

        if "max_distance" in distance_rules:
          if distance > distance_rules["max_distance"]:
            self.log_reason_for_reject("is_valid_position", "object too far away from origin")
            return False

        if "min_distance" in distance_rules:
          if distance > distance_rules["min_distance"]:
            self.log_reason_for_reject("is_valid_position", "object too close to origin")
            return False
      
    if "max_floor_steepness" in rules and floor_properties is not False:
      floor_slope = floor_properties["triangle_normal"][1]

      # 1 = Floor. 0 = Wall.
      slope_allowed = abs(float(rules["max_floor_steepness"]) - 1.0)
      # validate steep-ness (must be this steep)
      if floor_slope < slope_allowed: 
        self.log_reason_for_reject("is_valid_position", "floor too steep")
        return False
    
    if "underwater" in rules:
      underwater_status = rules["underwater"]

      if underwater_status == "only":
        if not self.is_in_water_box(obj.area_id, levelscript.water_boxes, position):
          self.log_reason_for_reject("is_valid_position", "can only be in water but was not in waterbox")
          return False
      elif underwater_status == "never":
        if self.is_in_water_box(obj.area_id, levelscript.water_boxes, position):
          self.log_reason_for_reject("is_valid_position", "can never be in water but was in waterbox")
          return False
      elif underwater_status == "allowed" or underwater_status == True:
        pass

    if not is_pre_position and "bounding_cylinder" in rules:
      cylinder_def = rules["bounding_cylinder"]
      radius = cylinder_def[0]
      height = cylinder_def[1] if len(cylinder_def) > 1 else 100
      orig_x = cylinder_def[2] if len(cylinder_def) > 2 else 0
      orig_y = cylinder_def[3] if len(cylinder_def) > 3 else 0
      orig_z = cylinder_def[4] if len(cylinder_def) > 4 else 0

      target_position = [
        position[0] + orig_x,
        position[1] + orig_y,
        position[2] + orig_z
      ]

      # positions here are in weird hand (y is up/down)
      for face_index, (start, end) in enumerate(levelscript.level_geometry.area_face_aabbs[obj.area_id]):
        # check height
        if (start[1] > target_position[1] or end[1] > target_position[1]) or (start[1] < (target_position[1] + height) or end[1] < (target_position[1] + height)):
          # atleast one vert is within cylinder
          #print(face_index, len(levelscript.level_geometry.area_faces[obj.area_id]))
          tri_verts = levelscript.level_geometry.area_faces[obj.area_id][face_index]
          for vert in list(map(lambda x: levelscript.level_geometry.area_vertices[obj.area_id][x], tri_verts)):
            # y is ignored to calc without height differences, as they were previously checked
            distance = math.sqrt(
              (target_position[0] - vert[0]) ** 2 +
              (target_position[2] - vert[2]) ** 2
            )

            if distance < radius:
              #print(distance, radius)
              self.log_reason_for_reject("is_valid_position", "bounding cylinder intersection encountered")
              return False

    if not is_pre_position and "bounding_box" in rules:
      extents = [ # x, z, y
        -rules["bounding_box"][0], # x neg
        rules["bounding_box"][2], # y and z swap
        rules["bounding_box"][1]
      ]

      y_rot = (obj.rotation[1] * math.pi / 180)

      # rotate points
      vertices = [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1,
                  1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1]
      vertices = np.array(vertices, order='C', dtype=np.float64).reshape((-1, 3))
      
      vertices -= 0.5
      vertices *= extents
      
      translation_matrix = trimesh.transformations.translation_matrix(position)
      rotation_matrix = trimesh.transformations.rotation_matrix(y_rot, [0, 1, 0])

      concat_matrix = trimesh.transformations.concatenate_matrices(translation_matrix, rotation_matrix)

      translated_points = trimesh.transform_points(vertices, concat_matrix)
      
      #t = trimesh.transformations.translation_matrix(position)
      #r = trimesh.transformations.rotation_matrix(y_rot, [0, 1, 0])
      
      #bounding_pos = trimesh.transformations.concatenate_matrices(t, r)
      
      #bounding_box = trimesh.creation.box(extents=extents, transform=bounding_pos)

      # this will overwrite existing bounding meshes for the same obj
      #levelscript.level_geometry.add_object_bounding_mesh(obj, obj.area_id, bounding_box)

      #levelscript.level_geometry.area_collision_managers[obj.area_id].in_collision(f'{obj.name} bounding box', bounding_box, bounding_pos)

      # intersection check with world
      for (start, end) in levelscript.level_geometry.area_face_aabbs[obj.area_id]:
        for point in translated_points:
          if (point[0] > start[0] and point[0] < end[0]) and (point[1] > start[1] and point[1] < end[1]) and (point[2] > start[2] and point[2] < end[2]):
            self.log_reason_for_reject("is_valid_position", "bounding box intersection encountered")

            if 'SM64R' in os.environ and 'PLOT' in os.environ['SM64R']:
              t = trimesh.transformations.translation_matrix(position)
              r = trimesh.transformations.rotation_matrix(y_rot, [0, 1, 0])
              bounding_pos = trimesh.transformations.concatenate_matrices(t, r)
              bounding_box = trimesh.creation.box(extents=extents, transform=bounding_pos)
              #print(bounding_box.vertices)
              #print(translated_points)

              tri_extents = [
                abs(start[0] - end[0]),
                abs(start[1] - end[1]),
                abs(start[2] - end[2])
              ]
              tri_position = [
                (start[0] if start[0] > end[0] else end[0]) - (tri_extents[0]/2),
                (start[1] if start[1] > end[1] else end[1]) - (tri_extents[1]/2),
                (start[2] if start[2] > end[2] else end[2]) - (tri_extents[2]/2),
              ]

              tri_box = trimesh.creation.box(extents=tri_extents, transform=trimesh.transformations.translation_matrix(tri_position))

              #self.debug_placement(levelscript, obj, bounding_box, tri_box)
            
            return False

      '''# check if intersects with WORLD
      intersections = levelscript.level_geometry.area_geometries[obj.area_id].intersection(
        bounding_box
      )

      if not intersections.is_empty:
        self.log_reason_for_reject("is_valid_position", "bounding box intersection encountered")
        self.debug_placement(levelscript, obj, bounding_box)
        return False
      '''
      

      #return bounding_box
      #pass

    return True
Esempio n. 18
0
def confirm_perch_placement(environment, placed_cameras, focus_id):
    global approved
    global user_responded

    plt1.keyPressFunction = perch_keyfunc
    plt1.clear()
    approved = False

    plt2.clear()
    dense_env = vedo.load(
        '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
        environment.name + '_1m.ply')
    pv_dense_env = pv.read(
        '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
        environment.name + '_1m.ply')

    plt2.add(dense_env)
    plt2.show()

    plt3 = pv.Plotter(notebook=False)
    plt3.add_mesh(pv_dense_env)
    # plt3.show(interactive=False)

    # draw wireframe lineset of camera frustum
    env_mesh = trimesh.load(
        '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
        environment.name + '_1m.ply')
    # env_mesh = trimesh.load(environment.full_env_path)
    R = np.zeros([4, 4])
    R[:3, :3] = environment.R
    env_mesh.vertices = trimesh.transform_points(env_mesh.vertices, R)
    env_mesh_vedo = vedo.mesh.Mesh(env_mesh)

    target_mesh_pymesh = environment.generate_target_mesh(shape='box')
    target_mesh = trimesh.Trimesh(target_mesh_pymesh.vertices,
                                  target_mesh_pymesh.faces)
    target_mesh_vedo = vedo.mesh.Mesh(target_mesh)
    target_colors = 0.5 * np.ones([len(target_mesh.faces), 4])
    target_colors[:, 0] *= 0.0
    target_colors[:, 2] *= 0.0
    target_mesh_vedo.alpha(0.6)
    target_mesh_vedo.cellIndividualColors(target_colors, alphaPerCell=True)
    plt2.add(target_mesh_vedo)

    env_mesh.visual.face_colors[:, -1] = 255.0
    env_mesh_vedo.cellIndividualColors(env_mesh.visual.face_colors / 255.0,
                                       alphaPerCell=True)
    geom_list = [env_mesh_vedo, target_mesh_vedo]

    for s in environment.perch_regions:
        surf_mesh = trimesh.Trimesh(vertices=s.points, faces=s.faces)
        vedo_surf_mesh = vedo.mesh.Mesh(surf_mesh)
        vedo_surf_mesh.color('g')
        vedo_surf_mesh.opacity(0.5)
        geom_list.append(vedo_surf_mesh)

    for i in range(len(placed_cameras)):
        quad_mesh = trimesh.load(
            "/home/simon/catkin_ws/src/perch_placement/src/ui/models/white-red-black_quad2.ply"
        )

        # offset mesh coords to match camera pose
        # eul = placed_cameras[i].pose[3:]  # USE WALL NORMAL, NOT CAMERA POSE
        # R = rot3d_from_rtp(np.array([eul[2], -eul[0], -eul[1]]))
        R = rot3d_from_x_vec(placed_cameras[i].wall_normal)
        R2 = rot3d_from_rtp(np.array([0, -90, 0]))
        R_aug = np.zeros([4, 4])
        R_aug[:3, :3] = R.dot(R2)
        R_aug[:3, -1] = placed_cameras[i].pose[:3]
        quad_mesh.vertices = trimesh.transform_points(quad_mesh.vertices,
                                                      R_aug)
        quad_mesh_vedo = vedo.mesh.Mesh(quad_mesh)
        quad_mesh_vedo.cellIndividualColors(quad_mesh.visual.face_colors / 255,
                                            alphaPerCell=True)
        quad_mesh_pv = pv.read(
            "/home/simon/catkin_ws/src/perch_placement/src/ui/models/white-red-black_quad2.ply"
        )

        pymesh_frustum = placed_cameras[i].generate_discrete_camera_mesh(
            degrees_per_step=10, environment=environment)
        pymesh_verts = pymesh_frustum.vertices.copy()
        pymesh_verts.flags.writeable = True
        pymesh_faces = pymesh_frustum.faces.copy()
        pymesh_faces.flags.writeable = True

        if i == focus_id:
            frustum = trimesh.Trimesh(vertices=pymesh_frustum.vertices.copy(),
                                      faces=pymesh_frustum.faces.copy())
            vedo_frustum = vedo.mesh.Mesh(frustum)
            vedo_frustum.alpha(0.3)
            vedo_frustum.color("c")
            # geom_list.append(frustum_lines)
            geom_list.append(quad_mesh_vedo)
            geom_list.append(vedo_frustum)

            print("cam pose: " + str(placed_cameras[i].pose))

            pose = placed_cameras[i].pose
            plt2.camera.SetPosition(pose[0], pose[1], pose[2])
            R = rot3d_from_rtp(np.array([pose[-1], -pose[-3], -pose[-2]]))
            print("R: " + str(R))
            focus = pose[:3] + R[:, 0]
            print("focus: " + str(focus))
            plt2.camera.SetFocalPoint(focus[0], focus[1], focus[2])
            plt2.camera.SetViewUp(R[:, 2])
            plt2.camera.SetDistance(5)
            plt2.camera.SetClippingRange([0.2, 10])
            plt2.camera.SetViewAngle(placed_cameras[i].fov[-1] * 1.1)
            plt2.show(resetcam=False)

            plt3.set_position(pose[:3])
            plt3.set_viewup(R[:, 2])
            plt3.set_focus(focus)
            plt3.show(auto_close=False, interactive=False)

        else:
            # vedo_frustum.alpha(0.1)
            # vedo_frustum.color("p")
            quad_mesh_vedo.color('o')
            # geom_list.append(frustum_lines)
            geom_list.append(quad_mesh_vedo)
            # geom_list.append(vedo_frustum)
            plt2.add(quad_mesh_vedo)
            plt3.add_mesh(quad_mesh_pv)

    # testing:
    test = (-plt3.get_image_depth(fill_value=0) / placed_cameras[0].range[1])
    test[test > 1] = 1.0
    test[test < 0] = 0.0
    test = np.round(test * np.iinfo(np.uint16).max)
    test = test.astype(np.uint16)

    # test_cv = cv.normalize(-test / placed_cameras[0].range[1] * 255, 0, 255, cv.NORM_MINMAX)
    cv.imshow('test', test)
    cv.waitKey()

    for actor in geom_list:
        plt1.add(actor)

    plt1.add(
        vedo.Text2D(
            "Press 'y' to approve placement. Press 'n' to reject. "
            "\nPress 'f' for front culling, 'b' for back culling, 'c' to disable culling "
            "\nPress 'q' when done",
            pos='bottom-right',
            c='dg',
            bg='g',
            font='Godsway'))
    plt1.camera.SetPosition(7.8 * np.cos(-145 * np.pi / 180.0),
                            7.8 * np.sin(-145 * np.pi / 180.0), 3.)
    plt1.camera.SetFocalPoint(-0.026929191045848594, 0.5783514020506139,
                              0.8268966663940324)
    plt1.camera.SetViewUp(np.array([0, 0, 1]))
    plt1.camera.SetDistance(7.8)
    plt1.camera.SetClippingRange([0.25, 10])
    plt1.show(resetcam=False)

    return approved
import numpy as np
import trimesh

mesh_file = "mesh.ply"
pts_file = "pts.csv"
out_file = "msh_fixed.ply"


def scaler(pts, scale_factor=1.1):
    pt_min, pt_max = pts.min(axis=0), pts.max(axis=0)
    center = (pt_min + pt_max) / 2.0
    scale = np.max(pt_max - pt_min) * scale_factor
    center = center - scale / 2
    scale_tform = np.eye(4) / scale
    scale_tform[3, 3] = 1
    translate_tform = np.eye(4)
    translate_tform[:-1, 3] = -center
    return scale_tform @ translate_tform


msh = trimesh.load_mesh(mesh_file)
with open(pts_file) as f:
    pts = np.asarray([[float(e) for e in row.strip().split(",")]
                      for row in f.readlines()[1:]])[:, :3]

msh.vertices = trimesh.transform_points(msh.vertices,
                                        np.linalg.inv(scaler(pts)))
msh.export(out_file)
Esempio n. 20
0
    def power_panel_con_actitud(self, Sun_vector, WSun):
        """
        power_panel_con_actitud 
        Obtiene la potencia producida por el satelite con actitud apuntando al sol

        Args:
            Sun_vector (array(,3)): Vector sol en LVLH
            WSun (float): Potencia irradiada por el sol 

        Returns:
            W (array(,n)) : Potencia generada
            area_potencia (array(,n)) : Areas que generan potencia
            ang (array(,n)) : Angulo de incidencia del vector sol con las caras
            angulo_giro (array(,n)) : Angulo de giro del satelite 
        n : numero de caras
        """
        # Si los paneles son fijos al satelite
        if self.Despegables_orientables == False:
            if self.actitud.apuntado_sol == True:

                # aqui empieza la magia
                # la intencion era formar dos planos entre el eje de spin y el vector sol y otro
                # con el eje de spin y una direccion principal de los paneles solares
                # para poder calcular el angulo que deberia girarse entre los dos planos

                direcion_principal = self.mesh.facets_normal[
                    self.Caras_Despegables[0]]

                plano0 = np.cross(Sun_vector, self.actitud.eje_de_spin)
                plano0 = plano0 / np.linalg.norm(plano0)

                plano1 = np.cross(direcion_principal, self.actitud.eje_de_spin)

                plano1 = plano1 / np.linalg.norm(plano1)

                angulo_giro = np.arccos(np.absolute(np.dot(
                    plano0, plano1))) / (np.linalg.norm(plano0) *
                                         np.linalg.norm(plano1))

                if np.isnan(angulo_giro):
                    angulo_giro = 0.0

                if angulo_giro == 0:
                    pass

                else:

                    # Comprueba si la transformacion produciria que fuesen iguales los giros

                    prim = trimesh.transform_points(
                        plano1.reshape(1, 3),
                        trimesh.transformations.rotation_matrix(
                            angulo_giro, self.actitud.eje_de_spin, [0, 0, 0]))
                    if not np.allclose(prim, plano0):
                        angulo_giro = -angulo_giro
                    self.mesh = self.mesh.apply_transform(
                        trimesh.transformations.rotation_matrix(
                            angulo_giro, self.actitud.eje_de_spin, [0, 0, 0]))
            else:
                angulo_giro = 0.0

            index_tri = self.celdas_activas(Sun_vector)
            W, area_potencia, ang = self.power_panel_solar(
                index_tri, Sun_vector, WSun)

            return W, area_potencia, ang, angulo_giro

        else:

            if self.actitud.apuntado_sol == True:

                # mas magia por aqui
                # pero ahora con lo de la proyeccion en unos ejes para poder utilizar el giro
                # esto funciona bastante bien el problema es cuando se pasa el ecuador

                direcion_principal = self.mesh.facets_normal[
                    self.Caras_Despegables[0]]
                direcion_principal = np.round(
                    direcion_principal / np.linalg.norm(direcion_principal), 5)

                matrix_projection = trimesh.transformations.projection_matrix(
                    [0, 0, 0], self.actitud.eje_de_spin)[0:3, 0:3]
                proyeccion = np.dot(matrix_projection, Sun_vector)
                proyeccion = proyeccion / np.linalg.norm(proyeccion)
                ver = np.arccos(np.dot(proyeccion, direcion_principal))

                if np.isnan(ver):
                    ver = 0.0
                if ver < 0.1e-4:
                    angulo_giro = 0.0
                    pass
                else:

                    # print("proyeccion",proyeccion)
                    # print("direprinci",direcion_principal)
                    #angulo_giro=np.arccos(np.absolute(np.dot(direcion_principal, proyeccion)))/(np.linalg.norm(direcion_principal)*np.linalg.norm(proyeccion))
                    transforma = trimesh.geometry.align_vectors(
                        direcion_principal, proyeccion)
                    # posicion_eje=np.array(np.where(np.array(self.actitud.eje_de_spin)==1)).flatten().max()

                    angulo_giro = trimesh.transformations.rotation_from_matrix(
                        transforma)[0]
                    dir = trimesh.transform_points(
                        direcion_principal.reshape(1, 3), transforma)

                    if np.absolute(angulo_giro) > 0.05:

                        transforma2 = np.round(
                            trimesh.geometry.align_vectors(
                                direcion_principal, -proyeccion), 5)
                        angulo_giro2 = trimesh.transformations.rotation_from_matrix(
                            transforma2)[0]
                        dir = trimesh.transform_points(
                            direcion_principal.reshape(1, 3), transforma)

                        if np.absolute(angulo_giro2) < np.absolute(
                                angulo_giro):
                            transforma = transforma2
                            angulo_giro = angulo_giro2

                        else:
                            pass
                    # if plano1[posicion_eje]==0:
                    #   angulo_giro=0.0
                    if np.isnan(angulo_giro):
                        angulo_giro = 0.0
                        pass
                    else:
                        self.mesh.apply_transform(transforma)
            else:
                angulo_giro = 0.0

            ang = list(map(Sun_vector.dot, self.mesh.facets_normal))
            area_potencia = []
            W = []
            angulo_giro = [angulo_giro]

            for i in np.arange(0, len(self.mesh.facets)):
                area = self.mesh.facets_area[i] / (1000**2)
                area_potencia.append(area)
                if (i in self.Caras_Despegables):
                    angulo_giro.append(np.arccos(ang[i]))
                    ang[i] = 1
                if (ang[i] >= 0) & (ang[i] > (np.cos((np.pi / 180) * 75))):
                    W.append(
                        area *
                        self.caracteristicas_panel_solar[i].psolar_rendimiento
                        * WSun * ang[i])
                else:
                    W.append(0.)
            return W, area_potencia, ang, angulo_giro