예제 #1
0
def grid(scale=1):
    path = []
    N = 10
    for i in range(2):
        for x in range(-N, N + 1):
            if i == 0:
                path.append(trimesh.load_path([[x, -N], [x, N]]))
            else:
                path.append(trimesh.load_path([[-N, x], [N, x]]))
    path = functools.reduce(lambda x, y: x + y, path)
    path.apply_scale(scale)
    return path
def get_mesh_scene(mesh, boundary=None, editable_vertices=None):
    scene = [mesh]
    if boundary:
        scene.append(trimesh.load_path(mesh.vertices[boundary +
                                                     [boundary[0]]]))
    if editable_vertices:
        scene.append(
            trimesh.points.PointCloud(mesh.vertices[editable_vertices +
                                                    boundary]))

    return trimesh.Scene(scene)
예제 #3
0
def contract():
    vertices, edges = surface_mesh.contract()
    print('vertices\n', vertices)
    print('edges\n', edges)

    #XXX: need to shape this right on exit
    vertices = np.array(vertices).reshape((-1, 3))

    print('number_of_vertices: ', len(vertices))
    print('number_of_faces: ', len(edges))

    # display the skeleton shifted over, so one can see it
    path_visual = trimesh.load_path(vertices[edges, :] + [2, 0, 0])
    scene = trimesh.Scene([path_visual, mesh])
    scene.show()
예제 #4
0
def visualize(trainer, sampler, tri_mesh_env, tri_mesh_obj, caption):
    tri_cloud_ibs = trimesh.points.PointCloud(sampler.np_cloud_ibs,
                                              color=[255, 0, 0, 100])
    pv_origin = trimesh.points.PointCloud(trainer.pv_points,
                                          color=[0, 0, 255, 250])

    tri_mesh_env.visual.face_colors = [100, 100, 100, 100]
    tri_mesh_obj.visual.face_colors = [0, 255, 0, 100]

    pv_3d_path = np.hstack(
        (trainer.pv_points,
         trainer.pv_points + trainer.pv_vectors)).reshape(-1, 2, 3)

    provenance_vectors = trimesh.load_path(pv_3d_path)

    scene = trimesh.Scene([
        provenance_vectors, pv_origin, tri_cloud_ibs, tri_mesh_env,
        tri_mesh_obj
    ])
    scene.show(callback=get_camera, caption=caption)
예제 #5
0
    def visualize_mesh(self,
                       faces_to_colour,
                       vector_origins=[],
                       vector_normals=[],
                       scale=2.0):
        """ Debugging plot for visualizing intersecting faces and vectors
        
        Parameters
        ----------
        faces_to_colour : (n,1) array
            array of face indexes that need to be coloured differently
        vector_origins : (n,3) np.array
            set of vector origins to plot
        vector_normals : (n,3) np.array
            List of normal vectors corresponding to vector_origins
        scale: float, optional
            Amount to scale the vector normal plot by
        
        """

        mesh = self.mesh.copy()
        # unmerge so viewer doesn't smooth
        mesh.unmerge_vertices()
        # make base_mesh white- ish
        mesh.visual.face_colors = [105, 105, 105, 105]
        mesh.visual.face_colors[faces_to_colour] = [255, 0, 0, 255]

        if vector_origins != [] and vector_normals != []:
            # stack rays into line segments for visualization as Path3D
            ray_visualize = trimesh.load_path(
                np.hstack((vector_origins, vector_origins +
                           vector_normals * scale)).reshape(-1, 2, 3))
            ray_visualize.merge_vertices()
            scene = trimesh.Scene([mesh, ray_visualize])
        else:
            scene = trimesh.Scene([mesh])
        scene.show()
예제 #6
0
    pv_3d_path = np.hstack(
        (trainer_weighted.pv_points, trainer_weighted.pv_points +
         trainer_weighted.pv_vectors)).reshape(-1, 2, 3)

    # pv_normalized_vectors = preprocessing.normalize(trainer_weighted.pv_vectors)
    pv_max_vectors = preprocessing.normalize(
        trainer_weighted.pv_vectors) * max_d.max_distances.reshape(-1, 1)
    calculated_max_intersections = trainer_weighted.pv_points + pv_max_vectors

    pv_max_path = np.hstack((trainer_weighted.pv_points,
                             calculated_max_intersections)).reshape(-1, 2, 3)

    pv_intersections = trimesh.points.PointCloud(calculated_max_intersections,
                                                 color=[0, 0, 255, 250])

    provenance_vectors = trimesh.load_path(pv_3d_path)
    provenance_vectors_max_path = trimesh.load_path(pv_max_path)

    scene = trimesh.Scene([
        provenance_vectors, pv_origin, tri_mesh_ibs_segmented, tri_mesh_obj,
        tri_mesh_env
    ])

    scene.show(flags={'cull': False, 'wireframe': False, 'axis': False})

    scene = trimesh.Scene([
        provenance_vectors_max_path, pv_origin, pv_intersections,
        max_d.sphere_of_influence, tri_mesh_obj
    ])
    scene.show(flags={'cull': False})
예제 #7
0
    def on_mouse_double_click(self, x, y):
        res = self._scene.camera.resolution
        fov_y = np.radians(self._scene.camera.fov[1] / 2.0)
        fov_x = fov_y * (res[0] / float(res[1]))
        half_fov = np.stack([fov_x, fov_y])

        right_top = np.tan(half_fov)
        right_top *= 1 - (1.0 / res)
        left_bottom = -right_top

        right, top = right_top
        left, bottom = left_bottom

        xy_vec = tu.grid_linspace(bounds=[[left, top], [right, bottom]], count=res).astype(np.float64)
        pixels = tu.grid_linspace(bounds=[[0, 0], [res[0] - 1, res[1] - 1]], count=res).astype(np.int64)
        assert xy_vec.shape == pixels.shape

        transform = self._scene.camera_transform
        vectors = tu.unitize(np.column_stack((xy_vec, -np.ones_like(xy_vec[:, :1]))))
        vectors = tf.transform_points(vectors, transform, translate=False)
        origins = (np.ones_like(vectors) * tf.translation_from_matrix(transform))

        indices = np.where(np.all(pixels == np.array([x, y]), axis=1))
        if len(indices) > 0 and len(indices[0]) > 0:
            pixel_id = indices[0][0]
            ray_origin = np.expand_dims(origins[pixel_id], 0)
            ray_direction = np.expand_dims(vectors[pixel_id], 0)
            # print(x, y, pixel_id, ray_origin, ray_direction)

            mesh = self._scene.geometry['geometry_0']

            locations, index_ray, index_tri = mesh.ray.intersects_location(
                ray_origins=ray_origin,
                ray_directions=ray_direction)

            if locations.size == 0:
                return

            ray_origins = np.tile(ray_origin, [locations.shape[0], 1])
            distances = np.linalg.norm(locations - ray_origins, axis=1)
            idx = np.argsort(distances)  # sort by disctances

            # color closest hit
            tri_color = mesh.visual.face_colors[index_tri[idx[0]]]
            if not np.alltrue(tri_color == [255, 0, 0, 255]):
                tri_color = [255, 0, 0, 255]
            else:
                # unselect triangle
                tri_color = [200, 200, 200, 255]

            mesh.visual.face_colors[index_tri[idx[0]]] = tri_color

            # collect clicked triangle ids
            tri_ids = np.where(np.all(mesh.visual.face_colors == [255, 0, 0, 255], axis=-1))[0]

            if len(tri_ids) >= self._settings_loader.min_triangles:
                # get center of triangles
                barycentric = mesh.triangles_center[tri_ids]
                joint_x = np.mean(barycentric[:, 0])
                joint_y = np.mean(barycentric[:, 1])
                joint_z = np.mean(barycentric[:, 2])
                joint = np.stack([joint_x, joint_y, joint_z])

                if 'joint_0' in self._scene.geometry:
                    self._scene.delete_geometry('joint_0')

                joint = np.expand_dims(joint, 0)
                joint = PointCloud(joint, process=False)
                self._scene.add_geometry(joint, geom_name='joint_0')

            if self.view['rays']:
                from trimesh import load_path
                ray_visualize = load_path(np.hstack((ray_origin, ray_origin + ray_direction)).reshape(-1, 2, 3))
                self._scene.add_geometry(ray_visualize, geom_name='cam_rays')

                # draw path where camera ray hits with mesh (only take 2 closest hits)
                path = np.hstack(locations[:2]).reshape(-1, 2, 3)
                ray_visualize = load_path(path)
                self._scene.add_geometry(ray_visualize, geom_name='cam_rays_hits')
예제 #8
0
def dual_cycloidal(eccentricity=.07,
                   count_pin=24,
                   radius_pin=.125,
                   radius_pattern=2.25,
                   input_count=3,
                   input_radius=.5625,
                   input_pattern=1.3125):
    '''
    Generate the profiles, pins, and holes for a regular dual- disc
    cycloidal drive. This design has two discs operation 180 degrees
    out of phase to minimize vibration.

    Parameters
    ------------
    eccentricity:   float, magnitude of eccentricity
    count_pin: int, number of fixed pins
    radius_pin:     float, radius of a fixed pin
    radius_pattern: float, radius of pin pattern
    input_count:    int, number of holes in the cycloidal disc
    input_radius:   float, radius of the holes in the cycloidal disc
    input_pattern:  float, radius of the hole pattern in the disc

    Returns
    -------------
    drive: Path2D object, with two disc layers and a pin layer
    '''

    # half a tooth spacing for transforming disc to pin pattern
    spacing = .5 * np.pi / (count_pin - 1)

    # get a disc profile, with a very dense sampling
    a = trimesh.load_path(
        cycloidal_profile(count_pin=count_pin,
                          count_cam=count_pin - 1,
                          eccentricity=eccentricity,
                          radius_pin=radius_pin,
                          radius_pattern=radius_pattern,
                          resolution=32))

    # replace the polyline entity with a bajillion points with a
    # tightly fit B-Spline
    a = a.simplify_spline(smooth=1e-6)

    # the second disc has the same profile, with a different transform
    b = a.copy()
    # for the first disc, apply the transform to line up with the pins
    a.apply_transform(
        trimesh.transformations.planar_matrix(offset=[-eccentricity, 0.0],
                                              theta=spacing))
    a.apply_layer('cam_disc_A')
    # do the same for the second disc
    b.apply_transform(
        trimesh.transformations.planar_matrix(offset=[eccentricity, 0.0],
                                              theta=-spacing))
    b.apply_layer('cam_disc_B')

    # generate the fixed pins
    pins = trimesh.path.creation.circle_pattern(pattern_radius=radius_pattern,
                                                circle_radius=radius_pin,
                                                count=count_pin)
    pins.apply_layer('pins')

    # add the holes for the
    holes_A = trimesh.path.creation.circle_pattern(
        pattern_radius=input_pattern,
        circle_radius=input_radius,
        center=[-eccentricity, 0.0],
        count=input_count)
    holes_A.apply_layer('cam_disc_A')

    holes_B = trimesh.path.creation.circle_pattern(
        pattern_radius=input_pattern,
        circle_radius=input_radius,
        center=[eccentricity, 0.0],
        count=input_count)
    holes_B.apply_layer('cam_disc_B')

    # concatenate all of the paths into a single drawing
    drive = a + b + pins + holes_A + holes_B

    return drive
예제 #9
0
파일: shortest.py 프로젝트: mikedh/trimesh
    # you can also create the graph with from_edgelist and
    # a list comprehension, which is like 1.5x faster
    ga = nx.from_edgelist([(e[0], e[1], {'length': L})
                           for e, L in zip(edges, length)])

    # arbitrary indices of mesh.vertices to test with
    start = 0
    end = int(len(mesh.vertices) / 2.0)

    # run the shortest path query using length for edge weight
    path = nx.shortest_path(g,
                            source=start,
                            target=end,
                            weight='length')

    # VISUALIZE RESULT
    # make the sphere transparent-ish
    mesh.visual.face_colors = [100, 100, 100, 100]
    # Path3D with the path between the points
    path_visual = trimesh.load_path(mesh.vertices[path])
    # visualizable two points
    points_visual = trimesh.points.PointCloud(mesh.vertices[[start, end]])

    # create a scene with the mesh, path, and points
    scene = trimesh.Scene([
        points_visual,
        path_visual,
        mesh])

    scene.show(smooth=False)
예제 #10
0
                    size_ibs_sample, random_num_generated, execution_time
                ]

                # #####################################################################################################
                # Saving information
                tri_mesh_env.visual.face_colors = [100, 100, 100, 255]
                tri_mesh_obj.visual.face_colors = [0, 255, 0, 100]
                tri_cloud_ibs = trimesh.points.PointCloud(
                    sampler_poissondisc_weighted.np_cloud_ibs,
                    color=[0, 0, 255, 150])
                pv_origin = trimesh.points.PointCloud(
                    trainer_weighted.pv_points, color=[0, 0, 255, 250])
                pv_3d_path = np.hstack(
                    (trainer_weighted.pv_points, trainer_weighted.pv_points +
                     trainer_weighted.pv_vectors)).reshape(-1, 2, 3)
                provenance_vectors = trimesh.load_path(pv_3d_path)
                scene = trimesh.Scene([
                    provenance_vectors, pv_origin, tri_cloud_ibs, tri_mesh_env,
                    tri_mesh_obj
                ])

                output_dir = './output/pv_selection_weighted/' + env + '_' + obj + '/'
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)

                for cam in range(len(camera_transformations)):
                    scene.camera_transform = camera_transformations[cam]

                    png = scene.save_image()
                    with open(
                            output_dir + "weighted_" + env + "_" + obj +
	def get_close_cam_pos_and_quats(self, mesh):
		if not type(mesh) == trimesh.scene.scene.Scene:
			mesh = trimesh.Scene(mesh)
		if not len(mesh.geometry) == 1:
			return [-1], [-1]
		all_mesh_vertices = list()
		all_mesh_faces = list()
		for _, m in mesh.geometry.items():
			all_mesh_vertices.append(m.vertices)
			all_mesh_faces.append(m.faces)
		
		all_mesh_vertices = np.concatenate(all_mesh_vertices, axis=0)
		all_mesh_faces = np.concatenate(all_mesh_faces, axis=0)

		uniform_pts = self.sample_faces(all_mesh_vertices, all_mesh_faces, n_samples=5000)

		# compute the normals
		pcd = o3d.geometry.PointCloud()
		pcd.points = o3d.utility.Vector3dVector(uniform_pts)
		# compute the normal at each point
		pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(
			radius=0.1, max_nn=30))
		normals = np.asarray(pcd.normals)

		# move in the normal directions a little bit
		normal_dir_moved_points = uniform_pts + 0.09 * normals
		pcd = trimesh.PointCloud(normal_dir_moved_points)
		pcd.show()

		# NOTE that the points are inside and outside since the mesh direction is not consistently pointing outwards
		# NOTE there is no functionality as of now to do that in open3d good opportunity to do code contribution

		# check that none of the points intersect with the mesh
		# if the points lie on the mesh then the distance to the closest point would be zero
		# also I know I will be working with one mesh only
		mesh_elem = list(mesh.geometry.keys())
		mesh_elem = mesh.geometry[mesh_elem[0]]
		mesh_elem.show()

		filter_idxs = list()
		for _, m in mesh.geometry.items():
			closest_points_bbox, distances, triangle_id = m.nearest.on_surface(normal_dir_moved_points)
			if (distances - 0.0 <= self.EPS).any():
				filter_idxs.append(np.where((distances + self.EPS) >= 0)[0])
			assert not np.allclose(distances, 0.0), "some point lies very close to the mesh not good"
		
		if len(filter_idxs) > 0:
			filter_idxs = np.asarray(filter_idxs).flatten()
			uniform_pts = uniform_pts[filter_idxs]
			normal_dir_moved_points = normal_dir_moved_points[filter_idxs]
		
		pcd_new = trimesh.PointCloud(uniform_pts)
		pcd_new.show()
		
		directions = normal_dir_moved_points - uniform_pts
		directions = self.mynormalize(directions)

		cam_locs = uniform_pts + 0.08*directions
		pcd_newer = trimesh.PointCloud(cam_locs)
		pcd_newer.show()

		# subsample
		idxs = np.random.permutation(len(cam_locs))
		idxs = idxs[:990]

		cam_locs = cam_locs[idxs]
		directions = directions[idxs]

		# filter out the ones which intersect with the mesh
		for _, m in mesh.geometry.items():
			closest_pts, distances, triangle_id = m.nearest.on_surface(cam_locs)
			if (distances - 0.0 <= self.EPS).any():
				# means one of the camera_location is close to the object, filter it out
				idxs = np.where((distances - 0.0) <= self.EPS)
				print('these are the indexes where distance is equal to zero')
				from IPython import embed; embed()
				mask = np.ones(len(cam_locs))
				mask[idxs] = 0
				cam_locs = cam_locs[mask.astype(bool)]
				directions = directions[mask.astype(bool)]

		# rotate the points and directions now as required by the mujoco
		r_matrix = transformations.euler_matrix(*np.deg2rad(self.object_angle))
		cam_locs = np.c_[cam_locs, np.ones(len(cam_locs))]
		directions = np.c_[directions, np.ones(len(directions))]

		r_cam_locs = np.dot(r_matrix, cam_locs.T).T[:, :3]
		r_directions = np.dot(r_matrix, directions.T).T[:, :3]

		ray_visualize = trimesh.load_path(np.hstack((cam_locs[:, :3], cam_locs [:, :3] + 0.05*directions[:, :3])).reshape(-1, 2, 3))
		mesh.add_geometry(ray_visualize)
		mesh.show()

		print(f'final number of cameras returned: {len(cam_locs)}')

		return r_cam_locs, r_directions
예제 #12
0
파일: ibs_cloud.py 프로젝트: dougbel/iTpy
    print(end - start, " seconds on IBS calculation (2,000 points)")  # timing execution

    # ### VISUALIZATION

    tri_mesh_obj = trimesh.load_mesh("./data/interactions/table_bowl/bowl.ply")
    radio, np_pivot = util.influence_sphere(tri_mesh_obj, radio_ratio=influence_radio_ratio)

    [idx_extracted, np_ibs_vertices_extracted] = util.extract_cloud_by_sphere(ibs_calculator.vertices, np_pivot, radio)

    edges_from, edges_to = util.get_edges(ibs_calculator.vertices, ibs_calculator.ridge_vertices, idx_extracted)

    tri_cloud_obj = trimesh.points.PointCloud(np.asarray(od3_cloud_obj_poisson.points), colors=[0, 255, 0, 255])
    tri_cloud_env = trimesh.points.PointCloud(np.asarray(od3_cloud_env_poisson.points), colors=[100, 100, 100, 255])

    tri_cloud_ibs_vertices_extracted = trimesh.points.PointCloud(np_ibs_vertices_extracted, colors=[0, 0, 255, 255])
    tri_path_ibs_edges_extracted = trimesh.load_path(np.hstack((edges_from, edges_to)).reshape(-1, 2, 3))

    visualizer = trimesh.Scene([tri_cloud_ibs_vertices_extracted, tri_cloud_obj,
                                tri_cloud_env, tri_path_ibs_edges_extracted])

    # display the environment with callback
    visualizer.show()

    ################################################################################################
    # ##   2.  EXECUTION WITH 10,000 POINTS IN OBJECT AND ENVIRONMENT RESPECTIVELY
    ################################################################################################

    od3_cloud_env_poisson = o3d.io.read_point_cloud('./data/ibs/cloud_env_10000_points.pcd')
    od3_cloud_obj_poisson = o3d.io.read_point_cloud('./data/ibs/cloud_obj_10000_points.pcd')

    np_cloud_env_poisson = np.asarray(od3_cloud_env_poisson.points)
예제 #13
0
        cloud_boundary.vertices_color = cloud_colors
        scene_list.append(cloud_boundary)
    scene = trimesh.Scene(scene_list)
    scene.show()

    # here is how to get the vertices that define the boundary of
    # a texture on a mesh
    mesh = sio.load_mesh('data/example_mesh.gii')
    tex_parcel = sio.load_texture('data/example_texture_parcel.gii')
    col_map = trimesh.visual.color.interpolate(tex_parcel.darray)
    mesh.visual.vertex_colors = col_map
    boundary = stop.texture_boundary(mesh, tex_parcel.darray, 0)
    print(boundary)
    scene_list = [mesh]
    for bound in boundary:
        path_visual = trimesh.load_path(mesh.vertices[bound])
        path_visual.vertices_color = trimesh.visual.random_color()
        scene_list.append(path_visual)
        # points = mesh.vertices[bound]
        # cloud_boundary = trimesh.points.PointCloud(points)
        # cloud_colors = np.array([trimesh.visual.random_color()
        #                          for i in points])
        # cloud_boundary.vertices_color = cloud_colors
        # scene_list.append(cloud_boundary)
    # boundary_vertices = stop.texture_boundary_vertices(tex_parcel.darray, 0,
    # mesh.vertex_neighbors)
    # print(boundary_vertices)
    # path_visual = trimesh.load_path(mesh.vertices[boundary[3]])
    # create a scene with the mesh, path, and points
    # scene = trimesh.Scene([path_visual, mesh ])
예제 #14
0
        reference_ori = np.array([[0, 0, 0]])
        reference_fin = np.array([[0.5, 0, 0]])

        R = z_rotation(angle * ori)  # accumulated rotation in each iteration
        Z = np.ones(3)  # zooms
        T = [0, 0, 0]  # translation
        A = compose(T, R, Z)

        tri_mesh_obj.apply_transform(A)
        tri_mesh_env.apply_transform(A)
        reference_ori = np.dot(reference_ori,
                               R.T)  # np.mat(reference_ori)*np.mat(R)
        reference_fin = np.dot(reference_fin,
                               R.T)  # np.mat(reference_ori)*np.mat(R)

        # VISUALIZATION
        tri_pv = trimesh.load_path(
            np.hstack((pv_origin, pv_final)).reshape(-1, 2, 3))
        tri_pv_origin = trimesh.points.PointCloud(pv_origin,
                                                  color=[0, 0, 255, 250])

        reference = trimesh.load_path(
            np.hstack((reference_ori, reference_fin)).reshape(-1, 2, 3))

        scene = trimesh.Scene(
            [tri_mesh_env, tri_pv, tri_pv_origin, tri_mesh_obj, reference])
        scene.show()

        tri_mesh_obj.apply_transform(linalg.inv(A))
        tri_mesh_env.apply_transform(linalg.inv(A))
예제 #15
0
파일: ray.py 프로젝트: mikedh/trimesh

    Returns
    ---------
    locations: (n) sequence of (m,3) intersection points
    index_ray: (n,) int, list of ray index
    index_tri: (n,) int, list of triangle (face) indexes
    """

    # run the mesh- ray test
    locations, index_ray, index_tri = mesh.ray.intersects_location(
        ray_origins=ray_origins,
        ray_directions=ray_directions)

    # stack rays into line segments for visualization as Path3D
    ray_visualize = trimesh.load_path(np.hstack((
        ray_origins,
        ray_origins + ray_directions)).reshape(-1, 2, 3))

    # make mesh transparent- ish
    mesh.visual.face_colors = [100, 100, 100, 100]

    # create a visualization scene with rays, hits, and mesh
    scene = trimesh.Scene([
        mesh,
        ray_visualize,
        trimesh.points.PointCloud(locations)])

    # display the scene
    scene.show()
예제 #16
0
import numpy as np

import trimesh

from it.testing.deglomerator import Deglomerator

if __name__ == '__main__':
    tests = Deglomerator(
        "./data/it/IBSMesh_400_4_OnGivenPointCloudWeightedSampler_5_500/hang",
        "hang", "umbrella")
    print(tests.pv_points)

    # VISUALIZATION
    provenance_vectors = trimesh.load_path(
        np.hstack((tests.pv_points,
                   tests.pv_points + tests.pv_vectors)).reshape(-1, 2, 3))

    pv_origin = trimesh.points.PointCloud(tests.pv_points,
                                          color=[0, 0, 255, 250])

    scene = trimesh.Scene([provenance_vectors, pv_origin])
    scene.show()
예제 #17
0
    def test_path(self):
        a = np.array(Point([0, 0]).buffer(1.0).exterior.coords)
        b = trimesh.load_path(a)

        assert trimesh.util.is_shape(b.vertices, (-1, 2))
	def dont_care_strategy(self, mesh):
		if not type(mesh) == trimesh.scene.scene.Scene:
			scene = trimesh.Scene(mesh)
		else:
			scene = mesh
		
		if not len(scene.geometry) == 1:
			return [-1], [-1]
		
		all_mesh_vertices = list()
		all_mesh_faces = list()
		for _, m in scene.geometry.items():
			all_mesh_vertices.append(m.vertices)
			all_mesh_faces.append(m.faces)
		
		all_mesh_vertices = np.concatenate(all_mesh_vertices, axis=0)
		all_mesh_faces = np.concatenate(all_mesh_faces, axis=0)

		bbox_extent = mesh.bounding_box.bounds
		print(mesh.bounding_box.extents)
		# scale the bounding box
		new_bbox = np.copy(bbox_extent)
		new_bbox *= 1.5
		new_bbox_extent = new_bbox[1, :] - new_bbox[0, :]
		print(new_bbox_extent)
		# new_bbox_extent[0, 1] -= 0.05
		# new_extent = new_bbox_extent[1, :] - new_bbox_extent[0, :]
		# points_on_bbox = trimesh.sample.volume_rectangular(new_extent, 5000)

		longest_side = np.argmax(new_bbox_extent)
		lside_max, lside_min = new_bbox[1, longest_side], new_bbox[0, longest_side]

		# sample points in between these a lot of them
		sampled_points = np.random.uniform(lside_min, lside_max, size=30000)
		sampled_points = sampled_points.reshape(-1, 3)

		points_on_bbox = sampled_points
		pcd = trimesh.PointCloud(points_on_bbox)
		new_scene = trimesh.Scene([pcd, mesh])
		new_scene.show()

		location, dists, triangle_ids = mesh.nearest.on_surface(points_on_bbox)
		good_idxs = np.where(dists >= 0.07)

		selected_points = points_on_bbox[good_idxs]
		pcd = trimesh.PointCloud(selected_points)
		pcd.show()

		closest_locations, dist, triangle_id = mesh.nearest.on_surface(selected_points)
		directions = selected_points - closest_locations
		directions /= np.linalg.norm(directions, axis=1).reshape(-1, 1)
		print(directions)
		assert np.allclose(np.linalg.norm(directions, axis=1),1.0), "this should be 1"

		cam_locs = closest_locations + 0.07 * directions

		# now see here that all points are 7 cm away from the closest point on mesh
		# subsample them and return
		idxs = np.random.permutation(len(cam_locs))
		cam_locs = cam_locs[idxs[:2000]]
		directions = directions[idxs[:2000]]

		ray_visualize = trimesh.load_path(np.hstack((cam_locs, cam_locs + 0.03*directions)).reshape(-1, 2, 3))
		scene = trimesh.Scene([mesh, ray_visualize])
		scene.show()

		r_matrix = transformations.euler_matrix(*np.deg2rad(self.object_angle))
		cam_locs = np.c_[cam_locs, np.ones(len(cam_locs))]
		directions = np.c_[directions, np.ones(len(directions))]

		r_cam_locs = np.dot(r_matrix, cam_locs.T).T[:, :3]
		r_directions = np.dot(r_matrix, directions.T).T[:, :3]

		return r_cam_locs, r_directions
예제 #19
0
        for k, v in calvis.human_model.mean_template_shape.joint_names.items())

    shoulder_xyz = calvis.joints_location[inverted_joint_names["R_Shoulder"]]
    ray_origins = np.array([shoulder_xyz])
    ray_direction_outside_mesh = [
        shoulder_xyz[0] +
        calvis.trimesh.bounding_box.bounds[0][0],  # shoulder_xyz[1] +
        calvis.trimesh.bounding_box.bounds[0][1],  # shoulder_xyz[2] +
        ((  # calvis.trimesh.bounding_box.bounds[1][2]
            calvis.trimesh.bounding_box.bounds[0][2])),
    ]

    ray_directions = np.array([ray_direction_outside_mesh])

    ray_visualize = trimesh.load_path(
        np.hstack(([calvis.axilla_center],
                   ray_origins + ray_directions)).reshape(-1, 2, 3))

    # axis
    geom = trimesh.creation.axis(0.02)

    scene = trimesh.Scene([
        geom,
        calvis.trimesh,
        ray_visualize,
        # calvis.trimesh.bounding_box,
        # calvis.trimesh.bounding_box_oriented,
        # *slices,
        cc,
        wc,
        pc,
예제 #20
0
    print('area: ', polyhedron.area())
    print('\n===> 1/ Contract geometry <====\n')
    skeleton_vertices, skeleton_edges, correspondence = contract(polyhedron)
    print('\n------Skeleton vertices and edges (atrocyde-skeleton.txt)------')
    print('skeleton_vertices.shape', skeleton_vertices.shape)
    print('skeleton_edges.shape', skeleton_edges.shape)

    print('''\n------(atrocyde-correspondence.txt) ------\n
    A mapping file between the skeleton points (id) and the surface
                          faces (id) generated by the contraction process which gives
                          information about the the faces of the surface that were collapsed
                          into a skeleton point
    ''')
    print('len(correspondence.keys()):', len(correspondence.keys()))

    print('\n\n===> 2/ Segment geometry <====\n')
    sdf_property_map, segment_property_map = segmentation(polyhedron)
    print('------ sdf values (atrocyde-sdf.txt) ------')
    print('(sdf_property_map).shape {}'.format(sdf_property_map.shape))
    print('(sdf_property_map).dtype {}'.format(sdf_property_map.dtype))
    print('\n------Segmentation of a surface mesh given an'
          ' SDF value per facet atrocyde-sdf.txt------')
    print('(segment_property_map).shape {}'.format(segment_property_map.shape))
    print(segment_property_map)

    # display the skeleton shifted over, so one can see it
    path_visual = trimesh.load_path(skeleton_vertices[skeleton_edges, :] +
                                    [2, 2, 2])
    scene = trimesh.Scene([path_visual, mesh])
    scene.show()
예제 #21
0
파일: ray.py 프로젝트: neoglez/vtkplotter
settings.useDepthPeeling = True

# test on a sphere mesh
mesh = trimesh.creation.icosphere()

# create some rays
ray_origins = np.array([[0, 0, -3], [1, 2, -3]])
ray_directions = np.array([[0, 0, 1], [0, -1, 1]])

# run the mesh-ray query
locations, index_ray, index_tri = mesh.ray.intersects_location(
    ray_origins=ray_origins, ray_directions=ray_directions)
locs = trimesh.points.PointCloud(locations)

# stack rays into line segments for visualization as Path3D
ray_visualize = trimesh.load_path(
    np.hstack((ray_origins, ray_origins + ray_directions)).reshape(-1, 2, 3))

print("The rays hit the mesh at coordinates:\n", locations)
print("The rays with index: {} hit triangles stored at mesh.faces[{}]".format(
    index_ray, index_tri))

# stack rays into line segments for visualization as Path3D
ray_visualize = trimesh.load_path(
    np.hstack(
        (ray_origins, ray_origins + ray_directions * 5.0)).reshape(-1, 2, 3))

# make mesh white-ish
mesh.visual.face_colors = [200, 200, 250, 100]
mesh.visual.face_colors[index_tri] = [255, 0, 0, 255]

show(mesh, ray_visualize, locs)
예제 #22
0
    extension, middle_point = util.influence_sphere(tri_mesh_obj, radio_ratio=influence_radio_ratio)

    tri_mesh_env_segmented = util.slide_mesh_by_bounding_box(tri_mesh_env, middle_point, extension)

    start = time.time()  # timing execution
    ibs_calculator = IBSMesh(600, 2)
    ibs_calculator.execute(tri_mesh_env_segmented, tri_mesh_obj)
    end = time.time()  # timing execution
    print(end - start, " seconds on IBS calculation (600 original points)")  # timing execution

    ####################################################################################################################
    # 1. IBS VISUALIZATION
    edges_from, edges_to = util.get_edges(ibs_calculator.vertices, ibs_calculator.ridge_vertices)

    visualizer = trimesh.Scene([
        trimesh.load_path(np.hstack((edges_from, edges_to)).reshape(-1, 2, 3)),
        # trimesh.points.PointCloud( np_cloud_obj_poisson , colors=[0,0,255,255] ),
        tri_mesh_obj,
    ])

    visualizer.show()

    ####################################################################################################################
    # 2. CROPPED VISUALIZATION MESH AND POINT CLOUD (IBS)

    # extracting point no farther than the principal sphere

    radio, np_pivot = util.influence_sphere(tri_mesh_obj, radio_ratio=influence_radio_ratio)

    [idx_extracted, np_ibs_vertices_extracted] = util.extract_cloud_by_sphere(ibs_calculator.vertices, np_pivot, radio)
예제 #23
0
    # print the vertex index
    for i in range(mesh.vertices.shape[0]):
        vert_distmap = area_geodist[i].toarray()[0]
        area_geodist_v = np.where(vert_distmap > 0)[0]
        print(area_geodist_v)
    #

    # arbitrary indices of mesh.vertices to test with
    start = 0
    end = int(len(mesh.vertices) / 2.0)
    path = sgeo.shortest_path(mesh, start, end)
    print(path)

    # VISUALIZE RESULT
    # make the sphere transparent-ish
    mesh.visual.face_colors = [100, 100, 100, 100]
    # Path3D with the path between the points
    path_visual = trimesh.load_path(mesh.vertices[path])
    print(path_visual)
    # visualizable two points
    points_visual = trimesh.points.PointCloud(mesh.vertices[[start, end]])

    # create a scene with the mesh, path, and points
    scene = trimesh.Scene([
        points_visual,
        path_visual,
        mesh])

    scene.show(smooth=False)
예제 #24
0
def dual_cycloidal(eccentricity=.07,
                   count_pin=24,
                   radius_pin=.125,
                   radius_pattern=2.25,
                   input_count=3,
                   input_radius=.5625,
                   input_pattern=1.3125):
    '''
    Generate the profiles, pins, and holes for a regular dual- disc
    cycloidal drive. This design has two discs operation 180 degrees
    out of phase to minimize vibration.

    Parameters
    ------------
    eccentricity:   float, magnitude of eccentricity
    count_pin: int, number of fixed pins
    radius_pin:     float, radius of a fixed pin
    radius_pattern: float, radius of pin pattern
    input_count:    int, number of holes in the cycloidal disc
    input_radius:   float, radius of the holes in the cycloidal disc
    input_pattern:  float, radius of the hole pattern in the disc

    Returns
    -------------
    drive: Path2D object, with two disc layers and a pin layer
    '''

    # half a tooth spacing for transforming disc to pin pattern
    spacing = .5 * np.pi / (count_pin - 1)

    # get a disc profile, with a very dense sampling
    a = trimesh.load_path(cycloidal_profile(count_pin=count_pin,
                                            count_cam=count_pin - 1,
                                            eccentricity=eccentricity,
                                            radius_pin=radius_pin,
                                            radius_pattern=radius_pattern,
                                            resolution=32))

    # replace the polyline entity with a bajillion points with a
    # tightly fit B-Spline
    a = a.simplify_spline(smooth=1e-6)

    # the second disc has the same profile, with a different transform
    b = a.copy()
    # for the first disc, apply the transform to line up with the pins
    a.apply_transform(trimesh.transformations.planar_matrix(
        offset=[-eccentricity, 0.0],
        theta=spacing))
    a.apply_layer('cam_disc_A')
    # do the same for the second disc
    b.apply_transform(trimesh.transformations.planar_matrix(
        offset=[eccentricity, 0.0],
        theta=-spacing))
    b.apply_layer('cam_disc_B')

    # generate the fixed pins
    pins = trimesh.path.creation.circle_pattern(pattern_radius=radius_pattern,
                                                circle_radius=radius_pin,
                                                count=count_pin)
    pins.apply_layer('pins')

    # add the holes for the
    holes_A = trimesh.path.creation.circle_pattern(pattern_radius=input_pattern,
                                                   circle_radius=input_radius,
                                                   center=[-eccentricity, 0.0],
                                                   count=input_count)
    holes_A.apply_layer('cam_disc_A')

    holes_B = trimesh.path.creation.circle_pattern(pattern_radius=input_pattern,
                                                   circle_radius=input_radius,
                                                   center=[eccentricity, 0.0],
                                                   count=input_count)
    holes_B.apply_layer('cam_disc_B')

    # concatenate all of the paths into a single drawing
    drive = a + b + pins + holes_A + holes_B

    return drive
예제 #25
0
    pyactp.makerough(file_name)

    curves = []
    for path in range(pyactp.getnumpaths()):
        npoints = pyactp.getnumpoints(path)
        nbreaks = pyactp.getnumbreaks(path)
        nlinkpaths = pyactp.getnumlinkpths(path)

        z = pyactp.getz(path)
        start_pos = 0
        first_z_done = False

        curves.append([])
        for brk in range(0, nbreaks):
            brkpos = pyactp.getbreak(path, brk)
            for point in range(start_pos, brkpos):
                x, y = pyactp.getpoint(path, point)

                curves[-1].append([x, y, z])
            start_pos = brkpos
            nlinkpoints = pyactp.getnumlinkpoints(path, brk)
            for linkpoint in range(0, nlinkpoints):
                x, y, z = pyactp.getlinkpoint(path, brk, linkpoint)
                curves[-1].append([x, y, z])

    # visualize the toolpaths and mesh in a pyglet window
    mesh = trimesh.load(file_name)
    viz = [trimesh.load_path(np.array(c)) for c in curves if len(c) > 0]
    viz.append(mesh)
    trimesh.Scene(viz).show()
	def convex_sampling_strategy(self, mesh):
		if not type(mesh) == trimesh.scene.scene.Scene:
			mesh = trimesh.Scene(mesh)
		
		if not len(mesh.geometry) == 1:
			return [-1], [-1]
		
		all_mesh_vertices = list()
		all_mesh_faces = list()
		for _, m in mesh.geometry.items():
			all_mesh_vertices.append(m.vertices)
			all_mesh_faces.append(m.faces)
		
		all_mesh_vertices = np.concatenate(all_mesh_vertices, axis=0)
		all_mesh_faces = np.concatenate(all_mesh_faces, axis=0)

		# now fit the bounding box to the mesh and sample points
		points_on_bbox = mesh.bounding_box_oriented.sample_volume(count=8000)

		convex_hull_of_mesh = mesh.convex_hull
		# compute signed distance of all points on bounding box from convex_hull
		sdists = convex_hull_of_mesh.nearest.signed_distance(points_on_bbox)
		# get indexes which are greater than zero
		gidxs = np.where(sdists > 0.0)[0]
		filtered_points_on_bbox = points_on_bbox[gidxs]

		pcd = trimesh.PointCloud(filtered_points_on_bbox)
		pcd.show()

		# now where does each of the point intersect with the mesh
		closest_points_bbox, distances, triangle_id = convex_hull_of_mesh.nearest.on_surface(filtered_points_on_bbox)
		pcd_new = trimesh.PointCloud(closest_points_bbox)
		pcd_new.show()

		directions = closest_points_bbox - filtered_points_on_bbox
		directions /= np.linalg.norm(directions, axis=1).reshape(-1, 1)
		assert np.allclose(np.linalg.norm(directions, axis=1), 1.0), "directions are not normalized"

		cam_locs = closest_points_bbox + 0.05 * directions
		
		# finally check that all the points are outside the convex hull
		assert (convex_hull_of_mesh.nearest.signed_distance(cam_locs) < 0.0).all(), "some point is inside the convex hull of the object"

		# subsample and return
		idxs = np.random.permutation(len(cam_locs))
		idxs = idxs[:3000]
		cam_locs = cam_locs[idxs]
		directions = directions[idxs]

		ray_visualize = trimesh.load_path(np.hstack((cam_locs, cam_locs + 0.03*directions)).reshape(-1, 2, 3))
		scene = trimesh.Scene([convex_hull_of_mesh, ray_visualize])
		scene.show()

		# rotate the points and directions now as required by the mujoco
		r_matrix = transformations.euler_matrix(*np.deg2rad(self.object_angle))
		cam_locs = np.c_[cam_locs, np.ones(len(cam_locs))]
		directions = np.c_[directions, np.ones(len(directions))]

		r_cam_locs = np.dot(r_matrix, cam_locs.T).T[:, :3]
		r_directions = np.dot(r_matrix, directions.T).T[:, :3]
			
		return r_cam_locs, r_directions
예제 #27
0
aabb_max_eye = (1, 1, 1)
distance = np.full((n_keypoints, ), 1, dtype=float)
elevation = np.random.uniform(30, 90, (n_keypoints, ))
azimuth = np.random.uniform(0, 360, (n_keypoints, ))
eyes = morefusion.geometry.points_from_angles(distance, elevation, azimuth)
indices = indices = np.linspace(0, 127, num=len(eyes))
indices = indices.round().astype(int)
eyes = morefusion.geometry.trajectory.sort_by(eyes, key=targets[indices])
eyes = morefusion.geometry.trajectory.interpolate(eyes, n_points=128)

# -----------------------------------------------------------------------------

scene = trimesh.Scene()

box = trimesh.path.creation.box_outline((2, 2, 2))
scene.add_geometry(box)

axis = trimesh.creation.axis(0.01)
point = trimesh.creation.icosphere(radius=0.01, color=(1.0, 0, 0))
for eye, target in zip(eyes, targets):
    transform = tf.translation_matrix(eye)
    scene.add_geometry(axis, transform=transform)

    transform = tf.translation_matrix(target)
    scene.add_geometry(point, transform=transform)

    ray = trimesh.load_path([eye, target])
    scene.add_geometry(ray)

morefusion.extra.trimesh.display_scenes({"scene": scene})
예제 #28
0
    print("score: " + str(score) + ", missing " + str(missing))

    affordance_name = tester.affordances[0][0]
    affordance_object = tester.affordances[0][1]
    tri_mesh_object_file = tester.objs_filenames[0]
    influence_radius = tester.objs_influence_radios[0]

    # visualizing
    tri_mesh_obj = trimesh.load_mesh(tri_mesh_object_file, process=False)

    idx_from = orientation * tester.num_pv
    idx_to = idx_from + tester.num_pv
    pv_begin = tester.compiled_pv_begin[idx_from:idx_to]
    pv_direction = tester.compiled_pv_direction[idx_from:idx_to]
    provenance_vectors = trimesh.load_path(
        np.hstack((pv_begin, pv_begin + pv_direction)).reshape(-1, 2, 3))

    pv_intersections = analyzer.calculated_pvs_intersection(0, orientation)

    R = z_rotation(angle)  # rotation matrix
    Z = np.ones(3)  # zooms
    T = testing_point
    A = compose(T, R, Z)
    tri_mesh_obj.apply_transform(A)

    tri_mesh_env.visual.face_colors = [100, 100, 100, 100]
    tri_mesh_obj.visual.face_colors = [0, 255, 0, 100]
    intersections = trimesh.points.PointCloud(pv_intersections,
                                              color=[0, 255, 255, 255])

    sphere = trimesh.primitives.Sphere(radius=influence_radius,