Example #1
0
def draw_quad_3d(camera, ax, frame_color="k", prop_color=None, opt_options=None, fill_alpha=0.5):
    if opt_options is not None:
        r_prop = camera.prop_rad
        frame_size = camera.frame_rad*2
    else:
        r_prop = 0.1
        frame_size = 0.4

    if prop_color is None:
        prop_color = frame_color
    pos = camera.pose
    norm = camera.wall_normal

    pitch = np.arccos(norm[2])
    if np.abs(pitch % 2*np.pi) <= 0.00001:
        warnings.warn("Normal Vector: " + str(norm) + " results in an invalid conversion to euler angles")
        yaw = 0  # purely cosmetic, but could be nice to deal with this...
    else:
        yaw = np.arccos((norm[0] / np.sin(pitch)))
    roll = 0

    R = rot3d_from_rtp(np.array([roll, pitch, yaw]), unit="radians")

    # draw frame
    frame_verts = [
        frame_size / 2 * np.array([[np.sqrt(2) / 2, np.sqrt(2) / 2, 0], [-np.sqrt(2) / 2, -np.sqrt(2) / 2, 0]]),
        frame_size / 2 * np.array([[np.sqrt(2) / 2, -np.sqrt(2) / 2, 0], [-np.sqrt(2) / 2, np.sqrt(2) / 2, 0]])]
    for i in range(2):
        for j in range(2):
            frame_verts[i][j, :] = np.dot(R, frame_verts[i][j, :]) + pos[:3]

    ax.plot(frame_verts[0][:, 0], frame_verts[0][:, 1], frame_verts[0][:, 2], frame_color)
    ax.plot(frame_verts[1][:, 0], frame_verts[1][:, 1], frame_verts[1][:, 2], frame_color)

    # draw props
    R = r_prop  # np.linspace(0, r_prop, 2)
    u = np.linspace(0, 2 * np.pi, 8)
    x = np.squeeze(np.outer(R, np.cos(u)))
    y = np.squeeze(np.outer(R, np.sin(u)))
    z = np.zeros(x.size)
    prop_coords = np.array([x, y, z]).transpose()
    prop_coords = np.vstack((prop_coords, prop_coords[0, :]))

    # rotate!
    for p in range(len(prop_coords)):
        prop_coords[p, :] = np.dot(R, prop_coords[p, :])

    # shift and plot
    for i in range(2):
        for j in range(2):
            pc = prop_coords + frame_verts[i][j, :]

            # fill polygon
            collection = convert_points_to_Poly3D(pc, line_width=0.0, alpha=fill_alpha)
            collection.set_edgecolor(prop_color)
            collection.set_facecolor(prop_color)
            ax.add_collection3d(collection)
            ax.plot(pc[:, 0], pc[:, 1], pc[:, 2], "black")  # for testing
Example #2
0
def map_line_to_edge(environment, x, poly_order="CCW"):
    """
    DEPRECATED
    Maps a particle weight into a position in 2D space, by mapping the distance along the perimeter edge

    :param environment: Environment class instance
    :param x: particle weight
    :param poly_order: polygon point order (clockwise ("CW") or counter clockwise ("CCW"). determines sign of normal
    direction offset
    :return: point position, p, and normal direction euler angles, n
    """
    p = np.array([-np.inf, -np.inf])
    cumulative_dist = [0]

    perimeter = environment.perch_perimeter
    poly_bounds = environment.perch_regions
    x = x % perimeter
    n = -np.inf  # initialize normal direction as inf as a debugging precaution

    while n == -np.inf:
        for pb in poly_bounds:
            d = dist(pb[0, :], pb[1, :])
            cumulative_dist.append(cumulative_dist[-1] + d)

            if cumulative_dist[-1] >= x:
                # find intersection point
                dist_on_edge = x - cumulative_dist[-2]
                norm_dist = dist_on_edge / d
                p[0] = norm_dist * (pb[0, 0] - pb[1, 0]) + pb[1, 0]
                p[1] = norm_dist * (pb[0, 1] - pb[1, 1]) + pb[1, 1]

                # get normal direction as an angle WRT the +x direction
                edge_dir = (np.arctan2(pb[1, 1] - pb[0, 1],
                                       pb[1, 0] - pb[0, 0])) * 180 / np.pi
                if poly_order == "CCW":
                    n = edge_dir - 90  # normal direction to the edge
                else:
                    n = edge_dir + 90

                break

    if n == -np.inf:
        print("Warning: invalid edge normal")
        print(x)
        print(cumulative_dist)

    R = rot3d_from_rtp(np.array([0, 0, n]))
    n = R[:, 0]

    return p, n
Example #3
0
def draw_camera_3d(camera, ax, color='c', tgt_steps=4, fov_steps=7, use_score_range=False, environment=None):
    """
    Draws a camera's FOV in 3D

    :param camera: Camera class instance containing camera parameters and pose
    :param ax: figure ax handle
    :param color: color code for the camera
    :param tgt_steps: number of steps over which to approximate opacity (corresponding to information density) change
     of camera
    :param fov_steps: number of steps in both horizontal and vertical fov directions. used to approximate camera view
     being limited by obstacles. Definitely more computationally expensive to have this enabled.
    :param use_score_range: (defaults to false) determines whether the camera's score range should be used over the
     default opacity values (which make the visualization a bit cleaner).
    :param environment: class instance containing environment information. if this parameter is left empty, then the
     naive visualization mode will be used (fov_steps=1, range is left unlimited)
    :return:
    """

    d_to_floor = camera.pose[2]  # assumes target is on the ground.
    d_to_ceil = 2.7 - camera.pose[2]

    if environment is None:
        fov_steps = 1
        obstacles = None
        mesh_env = False

    else:
        obstacles = environment.obstacles
        mesh_env = environment.opt_options.mesh_env
        if environment.floor is not None:
            d_to_floor = np.dot(camera.pose[:3] - environment.floor.centroid, -environment.gravity_direction)
        if environment.ceil is not None:
            d_to_ceil = np.dot(camera.pose[:3] - environment.floor.centroid, -environment.gravity_direction)

    r_steps = np.linspace(camera.range[0], camera.range[1], tgt_steps+1)
    if use_score_range:
        a_steps = np.linspace(camera.score_range[0], camera.score_range[1], tgt_steps)
    else:
        a_steps = np.linspace(0.6, 0.2, tgt_steps)

    for h in range(fov_steps):
        for v in range(fov_steps):
            # see if index is on one of the pyramid edges. if not, only draw end cap
            draw_walls = np.array([False, False, False, False, True, True])

            if h == 0:  # left side
                draw_walls[0] = True
            if h == fov_steps-1:  # right side
                draw_walls[2] = True
            if v == 0:  # bottom side
                draw_walls[3] = True
            if v == fov_steps-1:  # top side
                draw_walls[1] = True

            # check intersection of center of sub-pyramid
            sub_fov = camera.fov/(fov_steps)  # TODO: make sure things are oriented appropriately. in plot pyramid, things seem to be rotated 90

            # RECALL: CAMERA FWD IS Z, RIGHT IS X, DOWN IS Y; ROLL IS Z, PITCH IS X, YAW IS Y
            # TODO: VERIFY
            # reorient camera euler angles to match drawing function's orientation
            # (from X right, Y down, Z fwd -> X fwd, Y left, Z up)
            cam_pose = np.array([camera.pose[-1], -camera.pose[-3], -camera.pose[-2]])  # TODO VERIFY SIGN CHANGE
            R_sub = rot3d_from_rtp(cam_pose +  # start with initial camera pose
                                   np.array([0, (v+.5)*sub_fov[1], (h+.5)*sub_fov[0]]) -  # add on the sub_pose
                                   # deviation (plus a half step to get to the center of the step position)
                                   np.array([0, camera.fov[1]/2, camera.fov[0]/2]))  # and subtract off half the fov
            # R_sub = rot3d_from_rtp(camera.pose[3:] +  # start with initial camera pose
            #                        np.array([0, (v+.5)*sub_fov[1], (h+.5)*sub_fov[0]]) -  # add on the sub_pose
            #                        # deviation (plus a half step to get to the center of the step position)
            #                        np.array([0, camera.fov[1]/2, camera.fov[0]/2]))  # and subtract off half the fov
            # R_sub = rot3d_from_rtp(-camera.pose[3:] +  # start with initial camera pose
            #                        np.array([(v + .5) * sub_fov[1], (h + .5) * sub_fov[0], 0]) -  # add on the sub_pose
            #                        # deviation (plus a half step to get to the center of the step position)
            #                        np.array([camera.fov[1]/2, camera.fov[0]/2, 0]))  # and subtract off half the fov

            # find intersection...
            vec_dir = R_sub[:, 0]
            if vec_dir[-1] > 0:  # pointing upwards. look at ceiling intersection
                d = np.abs(d_to_ceil / vec_dir[-1])  # length of vector to ground in direction defined by R_sub
            else:  # pointing downwards, look at floor intersection
                d = np.abs(d_to_floor / vec_dir[-1])  # length of vector to ground in direction defined by R_sub

            d = min([camera.range[-1], d])  # take min of the ground intersection distance and the camera range
            e = d*vec_dir + camera.pose[:3]  # point representing the end of the field of view of the current sub-view

            if mesh_env:
                # obstructed, obstruction_point, _ = camera.get_mesh_obstruction(point=e,
                #                                                                clusters_remeshed=
                #                                                                environment.cluster_env_remeshed,
                #                                                                opt_options=environment.opt_options,
                #                                                                tree=environment.tree,
                #                                                                find_closest_intersection=True)
                obstructed, obstruction_point, _ = camera.get_obstruction_mesh_obb_tree(points=np.array([e]),
                                                                                        environment=environment,
                                                                                        find_closest_intersection=True)
                if obstructed[0]:
                    d = dist(obstruction_point[0], camera.pose[:3])
            elif obstacles is not None:
                obstructed, obstruction_point = camera.get_obstruction(e, obstacles)
                if obstructed:
                    d = dist(obstruction_point, camera.pose[:3])

            for i in range(tgt_steps):
                d_range = np.copy(r_steps[i:i+2])

                # Reset front and back
                draw_walls[4] = False
                draw_walls[5] = False
                if i == 0:  # draw the front surface on the starting point
                    draw_walls[4] = True

                if d_range[0] >= d:
                    break
                elif d_range[1] > d:
                    d_range[-1] = d
                    # if obstructed, or if it's the last step, draw the back wall
                    draw_walls[5] = True
                elif i == tgt_steps-1:
                    draw_walls[5] = True
                if any(draw_walls):
                    draw_pyramid_3d(position=camera.pose[:3], R=R_sub, fov=sub_fov, height_range=d_range, ax=ax,
                                    color=color, alpha=a_steps[i], draw_walls=draw_walls)
Example #4
0
def optimize(seg_env_prototype=None,
             target_prototype=None,
             cluster_env_path=None,
             full_env_path=None,
             N_its=1,
             enable_user_confirmation=True,
             preloaded_vars=None,
             visualize_with_vedo=False):

    if preloaded_vars is None:
        env, camera_list, optimization_options, pso_options = initialize(
            seg_env_prototype,
            target_prototype,
            cluster_env_path,
            full_env_path,
            load_full_env=True)

        if enable_user_confirmation:
            # surface_confirmation.confirm_surfaces(environment=env, N_max=10)
            surface_confirmation_demo.confirm_surfaces(environment=env,
                                                       N_max=10)
        env.post_process_environment()

        preloaded_vars = {
            'env': copy.deepcopy(env),
            'camera_list': copy.deepcopy(camera_list),
            'optimization_options': copy.deepcopy(optimization_options),
            'pso_options': copy.deepcopy(pso_options)
        }

        # SAVE THIS!
        pickle_env = open("../test/preloaded_environment_apartment.p", 'wb')
        pickle.dump(preloaded_vars, pickle_env)
        pickle_env.close()

    else:
        # work around to the seg fault issue... load everything in advance, then just pass it in!
        env = preloaded_vars['env']
        camera_list = initialize_camera_list()
        optimization_options = initialize_opt_options()
        pso_options = initialize_pso_options()

        env.vedo_mesh = vedo.mesh.Mesh(env.obs_mesh)
        env.opt_options = optimization_options
        env.correct_normals()
        env.n_points = optimization_options.n_points
        env.generate_integration_points()
        env.perch_regions = []
        env.perch_area = 0
        env.set_surface_as_perchable()
        optimization_options.log_performance = False

    # env.plot_environment()

    # PSO:
    # base dimension for simple 2d problem is 2. scales up depending on selected opt
    camera_particle_dimension = optimization_options.get_particle_size()
    n_cams = len(camera_list)
    N_iterations = pso_options["N_iterations"]
    N_particles = pso_options["N_particles"]

    if pso_options["greedy_search"]:
        particle_dimension = camera_particle_dimension
        num_optimizations = n_cams
    else:
        particle_dimension = n_cams * camera_particle_dimension
        num_optimizations = 1

    # for logging
    pso_keypoints = np.zeros([N_its, num_optimizations, N_iterations, 3])
    optimization_options.search_time = np.zeros(
        [N_its, num_optimizations, N_iterations + 1])
    optimization_options.best_fitness = np.zeros(
        [N_its, num_optimizations, N_iterations + 1])
    optimization_options.pts_searched = np.zeros(
        [N_its, num_optimizations, N_iterations + 1])

    bounds = (np.zeros(particle_dimension), np.ones(particle_dimension)
              )  # particle boundaries

    # for velocity update:
    # 'w' is velocity decay aka inertia,
    # 'c1' is "cognitive parameter", e.g. attraction to particle best,
    # 'c2' is "social parameter", e.g. attraction to local/global best
    # 'k' = number of neighbors to consider
    # 'p' = the Minkowski p-norm. 1 for absolute val dist, 2 for norm dist
    options = {
        'c1': pso_options["pso_c1"],
        'c2': pso_options["pso_c2"],
        'w': pso_options["pso_w"],
        'k': pso_options["pso_k"],
        'p': pso_options["pso_p"]
    }

    optimal_cameras = PlacedCameras()

    fig_num = 1

    if pso_options['individual_surface_opt']:
        num_surface_loops = len(env.perch_regions)
        surface_number = range(num_surface_loops)
        N_particles = env.assign_particles_to_surfaces(
            N_particles,
            pso_options["pso_k"],
            neighborhood_search=pso_options["local_search"])
    else:
        num_surface_loops = 1
        surface_number = [-1]
        N_particles = [N_particles]

    # STORE FOR ANALYSIS LATER
    if optimization_options.log_performance:
        pso_best_fitnesses = np.zeros(num_optimizations)
        pso_points_searched = np.zeros(num_optimizations)
        pso_search_time = np.zeros(num_optimizations)
        pso_keypoints = []
        for i in range(num_optimizations):
            pso_keypoints.append([[], [], [], []])

    # store, for each optimization, time, num particles searched, fitness, standard deviation
    bh = pso_options["boundary_handling"]

    # for i in range(num_optimizations):
    i = 0
    while i < num_optimizations:
        if optimization_options.log_performance:
            optimization_options.data_index = 0
        start_time = datetime.now()

        if pso_options["greedy_search"]:
            search_cameras_list = [copy.deepcopy(camera_list[i])]
        else:
            search_cameras_list = camera_list

        best_cost = np.finfo(float).max
        best_pos_surf = -1
        best_pos = np.zeros(particle_dimension)

        for j in range(num_surface_loops):
            # Future work: investigate velocity clamping...
            optimization_options.surface_number = j
            # if pso_options["local_search"]:
            #     optimizer = ps.single.LocalBestPSO(n_particles=N_particles[j], dimensions=particle_dimension,
            #                                        options=options, bounds=bounds, bh_strategy=bh)
            # else:
            #     optimizer = ps.single.GlobalBestPSO(n_particles=N_particles[j], dimensions=particle_dimension,
            #                                         options=options, bounds=bounds, bh_strategy=bh)

            optimization_options.surface_number = surface_number[j]
            # this flag gets reset if the current search has too little variance
            optimization_options.continue_searching = True
            optimization_options.stagnant_loops = 0

            if visualize_with_vedo:
                plt1 = vedo.Plotter(title='Confirm Perch Location',
                                    pos=[0, 0],
                                    interactive=False,
                                    sharecam=False)
                plt1.clear()

                # draw wireframe lineset of camera frustum
                # env_mesh = trimesh.load(env.full_env_path)

                env_mesh = trimesh.load(
                    '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
                    env.name + '_1m_pt1.ply')

                R = np.zeros([4, 4])
                R[:3, :3] = env.R
                env_mesh.vertices = trimesh.transform_points(
                    env_mesh.vertices, R)
                env_mesh_vedo = vedo.mesh.Mesh(env_mesh)
                target_mesh_pymesh = env.generate_target_mesh(shape='box')
                target_mesh = trimesh.Trimesh(target_mesh_pymesh.vertices,
                                              target_mesh_pymesh.faces)
                target_mesh_vedo = vedo.mesh.Mesh(target_mesh)
                target_colors = 0.5 * np.ones([len(target_mesh.faces), 4])
                target_colors[:, 0] *= 0
                target_colors[:, 2] *= 0
                target_mesh_vedo.alpha(0.6)
                target_mesh_vedo.cellIndividualColors(target_colors,
                                                      alphaPerCell=True)
                env_mesh.visual.face_colors[:, -1] = 255
                env_mesh_vedo.cellIndividualColors(
                    env_mesh.visual.face_colors / 255, alphaPerCell=True)

                geom_list = [env_mesh_vedo, target_mesh_vedo]

                if env.name == 'office3' or env.name == 'apartment':
                    env_mesh2 = trimesh.load(
                        '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
                        env.name + '_1m_pt2.ply')
                    env_mesh_vedo2 = vedo.mesh.Mesh(env_mesh2)

                    env_mesh2.visual.face_colors[:, -1] = 150
                    env_mesh_vedo2.cellIndividualColors(
                        env_mesh2.visual.face_colors / 255, alphaPerCell=True)
                    geom_list.append(env_mesh_vedo2)

                for s in env.perch_regions:
                    surf_mesh = trimesh.Trimesh(vertices=s.points,
                                                faces=s.faces)
                    vedo_surf_mesh = vedo.mesh.Mesh(surf_mesh)
                    vedo_surf_mesh.color('g')
                    vedo_surf_mesh.opacity(0.7)
                    geom_list.append(vedo_surf_mesh)

                for i_ in range(len(optimal_cameras.cameras)):
                    quad_mesh = trimesh.load(
                        "/home/simon/catkin_ws/src/perch_placement/src/ui/models/white-red-black_quad2.ply"
                    )
                    R = rot3d_from_x_vec(
                        optimal_cameras.cameras[i_].wall_normal)
                    R2 = rot3d_from_rtp(np.array([0, -90, 0]))
                    R_aug = np.zeros([4, 4])
                    R_aug[:3, :3] = R.dot(R2)
                    R_aug[:3, -1] = optimal_cameras.cameras[i_].pose[:3]
                    quad_mesh.vertices = trimesh.transform_points(
                        quad_mesh.vertices, R_aug)
                    quad_mesh_vedo = vedo.mesh.Mesh(quad_mesh)
                    quad_mesh_vedo.cellIndividualColors(
                        quad_mesh.visual.face_colors / 255, alphaPerCell=True)

                    pymesh_frustum = optimal_cameras.cameras[
                        i_].generate_discrete_camera_mesh(degrees_per_step=20,
                                                          environment=env)
                    pymesh_verts = pymesh_frustum.vertices.copy()
                    pymesh_verts.flags.writeable = True
                    pymesh_faces = pymesh_frustum.faces.copy()
                    pymesh_faces.flags.writeable = True

                    frustum = trimesh.Trimesh(
                        vertices=pymesh_frustum.vertices.copy(),
                        faces=pymesh_frustum.faces.copy())
                    vedo_frustum = vedo.mesh.Mesh(frustum)
                    vedo_frustum.alpha(0.3)
                    vedo_frustum.color("b")
                    quad_mesh_vedo.color('o')
                    geom_list.append(quad_mesh_vedo)
                    geom_list.append(vedo_frustum)

                for actor in geom_list:
                    plt1.add(actor)
            else:
                plt1 = None

            # if pso_options["multi_threading"]:
            #     # noinspection PyTypeChecker
            #     surf_best_cost, surf_best_pos = optimizer.optimize(evaluate_swarm, iters=N_iterations, environment=env,
            #                                                        cameras=search_cameras_list,
            #                                                        placed_cameras=optimal_cameras,
            #                                                        opt_options=optimization_options,
            #                                                        n_processes=multiprocessing.cpu_count(),
            #                                                        vedo_plt=plt1)
            # else:
            #     surf_best_cost, surf_best_pos = optimizer.optimize(evaluate_swarm, iters=N_iterations, environment=env,
            #                                                        cameras=search_cameras_list,
            #                                                        placed_cameras=optimal_cameras,
            #                                                        opt_options=optimization_options,
            #                                                        vedo_plt=plt1)

            pso_params = PSO_Hyperparameters(w=pso_options["pso_w"],
                                             c1=pso_options["pso_c1"],
                                             c2=pso_options["pso_c2"],
                                             lr=1,
                                             k=pso_options["pso_k"],
                                             p=pso_options["pso_p"],
                                             N_particles=N_particles[j],
                                             N_iterations=N_iterations)

            surf_best_cost, surf_best_pos = run_pso(
                fitness_function=evaluate_swarm,
                pso_hyper_parameters=pso_params,
                environment=env,
                cameras=search_cameras_list,
                placed_cameras=optimal_cameras,
                opt_options=optimization_options,
                local_pso=True)

            if surf_best_cost < best_cost:
                best_cost = copy.deepcopy(surf_best_cost)
                best_pos = copy.deepcopy(surf_best_pos)
                if pso_options["individual_surface_opt"]:
                    best_pos_surf = j
                # print("Surface " + str(j) + " has lowest cost so far.. ")
                # print("Particle: " + str(best_pos))

        if optimization_options.log_performance:
            pso_search_time[i] = (datetime.now() - start_time).total_seconds()
            pso_best_fitnesses[i] = best_cost

        if pso_options["greedy_search"]:
            search_cameras_list_copy = copy.deepcopy(search_cameras_list)
            optimization_options.surface_number = best_pos_surf
            best_cam = convert_particle_to_state(
                environment=env,
                particle=best_pos,
                cameras=search_cameras_list_copy,
                opt_options=optimization_options)[0]
            optimal_cameras.cameras.append(copy.deepcopy(best_cam))

            if enable_user_confirmation:
                if confirm_perch_placement(
                        environment=env,
                        placed_cameras=optimal_cameras.cameras,
                        focus_id=i):
                    best_cam_covariances = evaluate_camera_covariance(
                        environment=env, cameras=[best_cam])
                    optimal_cameras.append_covariances(best_cam_covariances)
                    i += 1
                else:
                    optimal_cameras.cameras.pop()
                    env.remove_rejected_from_perch_space(camera=best_cam,
                                                         r=0.3)
            else:
                best_cam_covariances = evaluate_camera_covariance(
                    environment=env, cameras=[best_cam])
                optimal_cameras.append_covariances(best_cam_covariances)
                i += 1

    evaluate_discrete_coverage(env.n_points, optimal_cameras, plot=True)

    return optimal_cameras.cameras
Example #5
0
def confirm_perch_placement(environment, placed_cameras, focus_id):
    global approved
    global user_responded

    plt1.keyPressFunction = perch_keyfunc
    plt1.clear()
    approved = False

    plt2.clear()
    dense_env = vedo.load(
        '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
        environment.name + '_1m.ply')
    pv_dense_env = pv.read(
        '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
        environment.name + '_1m.ply')

    plt2.add(dense_env)
    plt2.show()

    plt3 = pv.Plotter(notebook=False)
    plt3.add_mesh(pv_dense_env)
    # plt3.show(interactive=False)

    # draw wireframe lineset of camera frustum
    env_mesh = trimesh.load(
        '/home/simon/catkin_ws/src/mesh_partition/datasets/' +
        environment.name + '_1m.ply')
    # env_mesh = trimesh.load(environment.full_env_path)
    R = np.zeros([4, 4])
    R[:3, :3] = environment.R
    env_mesh.vertices = trimesh.transform_points(env_mesh.vertices, R)
    env_mesh_vedo = vedo.mesh.Mesh(env_mesh)

    target_mesh_pymesh = environment.generate_target_mesh(shape='box')
    target_mesh = trimesh.Trimesh(target_mesh_pymesh.vertices,
                                  target_mesh_pymesh.faces)
    target_mesh_vedo = vedo.mesh.Mesh(target_mesh)
    target_colors = 0.5 * np.ones([len(target_mesh.faces), 4])
    target_colors[:, 0] *= 0.0
    target_colors[:, 2] *= 0.0
    target_mesh_vedo.alpha(0.6)
    target_mesh_vedo.cellIndividualColors(target_colors, alphaPerCell=True)
    plt2.add(target_mesh_vedo)

    env_mesh.visual.face_colors[:, -1] = 255.0
    env_mesh_vedo.cellIndividualColors(env_mesh.visual.face_colors / 255.0,
                                       alphaPerCell=True)
    geom_list = [env_mesh_vedo, target_mesh_vedo]

    for s in environment.perch_regions:
        surf_mesh = trimesh.Trimesh(vertices=s.points, faces=s.faces)
        vedo_surf_mesh = vedo.mesh.Mesh(surf_mesh)
        vedo_surf_mesh.color('g')
        vedo_surf_mesh.opacity(0.5)
        geom_list.append(vedo_surf_mesh)

    for i in range(len(placed_cameras)):
        quad_mesh = trimesh.load(
            "/home/simon/catkin_ws/src/perch_placement/src/ui/models/white-red-black_quad2.ply"
        )

        # offset mesh coords to match camera pose
        # eul = placed_cameras[i].pose[3:]  # USE WALL NORMAL, NOT CAMERA POSE
        # R = rot3d_from_rtp(np.array([eul[2], -eul[0], -eul[1]]))
        R = rot3d_from_x_vec(placed_cameras[i].wall_normal)
        R2 = rot3d_from_rtp(np.array([0, -90, 0]))
        R_aug = np.zeros([4, 4])
        R_aug[:3, :3] = R.dot(R2)
        R_aug[:3, -1] = placed_cameras[i].pose[:3]
        quad_mesh.vertices = trimesh.transform_points(quad_mesh.vertices,
                                                      R_aug)
        quad_mesh_vedo = vedo.mesh.Mesh(quad_mesh)
        quad_mesh_vedo.cellIndividualColors(quad_mesh.visual.face_colors / 255,
                                            alphaPerCell=True)
        quad_mesh_pv = pv.read(
            "/home/simon/catkin_ws/src/perch_placement/src/ui/models/white-red-black_quad2.ply"
        )

        pymesh_frustum = placed_cameras[i].generate_discrete_camera_mesh(
            degrees_per_step=10, environment=environment)
        pymesh_verts = pymesh_frustum.vertices.copy()
        pymesh_verts.flags.writeable = True
        pymesh_faces = pymesh_frustum.faces.copy()
        pymesh_faces.flags.writeable = True

        if i == focus_id:
            frustum = trimesh.Trimesh(vertices=pymesh_frustum.vertices.copy(),
                                      faces=pymesh_frustum.faces.copy())
            vedo_frustum = vedo.mesh.Mesh(frustum)
            vedo_frustum.alpha(0.3)
            vedo_frustum.color("c")
            # geom_list.append(frustum_lines)
            geom_list.append(quad_mesh_vedo)
            geom_list.append(vedo_frustum)

            print("cam pose: " + str(placed_cameras[i].pose))

            pose = placed_cameras[i].pose
            plt2.camera.SetPosition(pose[0], pose[1], pose[2])
            R = rot3d_from_rtp(np.array([pose[-1], -pose[-3], -pose[-2]]))
            print("R: " + str(R))
            focus = pose[:3] + R[:, 0]
            print("focus: " + str(focus))
            plt2.camera.SetFocalPoint(focus[0], focus[1], focus[2])
            plt2.camera.SetViewUp(R[:, 2])
            plt2.camera.SetDistance(5)
            plt2.camera.SetClippingRange([0.2, 10])
            plt2.camera.SetViewAngle(placed_cameras[i].fov[-1] * 1.1)
            plt2.show(resetcam=False)

            plt3.set_position(pose[:3])
            plt3.set_viewup(R[:, 2])
            plt3.set_focus(focus)
            plt3.show(auto_close=False, interactive=False)

        else:
            # vedo_frustum.alpha(0.1)
            # vedo_frustum.color("p")
            quad_mesh_vedo.color('o')
            # geom_list.append(frustum_lines)
            geom_list.append(quad_mesh_vedo)
            # geom_list.append(vedo_frustum)
            plt2.add(quad_mesh_vedo)
            plt3.add_mesh(quad_mesh_pv)

    # testing:
    test = (-plt3.get_image_depth(fill_value=0) / placed_cameras[0].range[1])
    test[test > 1] = 1.0
    test[test < 0] = 0.0
    test = np.round(test * np.iinfo(np.uint16).max)
    test = test.astype(np.uint16)

    # test_cv = cv.normalize(-test / placed_cameras[0].range[1] * 255, 0, 255, cv.NORM_MINMAX)
    cv.imshow('test', test)
    cv.waitKey()

    for actor in geom_list:
        plt1.add(actor)

    plt1.add(
        vedo.Text2D(
            "Press 'y' to approve placement. Press 'n' to reject. "
            "\nPress 'f' for front culling, 'b' for back culling, 'c' to disable culling "
            "\nPress 'q' when done",
            pos='bottom-right',
            c='dg',
            bg='g',
            font='Godsway'))
    plt1.camera.SetPosition(7.8 * np.cos(-145 * np.pi / 180.0),
                            7.8 * np.sin(-145 * np.pi / 180.0), 3.)
    plt1.camera.SetFocalPoint(-0.026929191045848594, 0.5783514020506139,
                              0.8268966663940324)
    plt1.camera.SetViewUp(np.array([0, 0, 1]))
    plt1.camera.SetDistance(7.8)
    plt1.camera.SetClippingRange([0.25, 10])
    plt1.show(resetcam=False)

    return approved
Example #6
0
def evaluate_particle(particle, environment, cameras, placed_cameras=PlacedCameras(),
                      opt_options=CameraPlacementOptions(), debug=False, vedo_plt=None, maximization=False):
    """
    This function evaluates a single particle using the heuristic function defined in evaluate_arrangement()

    :param particle: individual particle as np.array
    :param environment: Environment class instance
    :param cameras: a list of Camera objects corresponding to the cameras which have already been placed in an
     environment
    :param placed_cameras: PlacedCameras class instance
    :param opt_options: CameraPlacementOptions class containing opt opt
    :param debug: flag, if true print and draw relevant information.
    :return: score of particle
    """
    # convert particle into camera pose; evaluate edge normal at each camera position
    cameras = convert_particle_to_state(environment=environment, particle=particle, cameras=cameras,
                                        opt_options=opt_options, debug=debug)

    # plot particles to debug...
    # if debug:
    #     if environment.dimension == 2:
    #         plot_particle_2d(particle=np.squeeze(particle), environment=environment, cameras=cameras,
    #                          placed_cameras=placed_cameras, view_time=0.0001, figure_number=figure_number)
    #     else:
    # # if len(placed_cameras.cameras) > 0:
    # plot_particle_3d(particle=particle, environment=environment, cameras=cameras, placed_cameras=placed_cameras,
    #                  view_time=0.0001, figure_number=figure_number)

    score = evaluate_arrangement_covariance(environment=environment, cameras=cameras, placed_cameras=placed_cameras,
                                            debug=debug, maximization=maximization)

    if vedo_plt is not None:
        if len(placed_cameras.cameras) >= 10:
            geom_list = []
            for i in range(len(cameras)):
                pymesh_frustum = cameras[i].generate_discrete_camera_mesh(degrees_per_step=5, environment=environment)
                if len(pymesh_frustum.faces) > 0 and not np.isnan(pymesh_frustum.vertices).any():
                    pymesh_verts = pymesh_frustum.vertices.copy()
                    pymesh_verts.flags.writeable = True
                    pymesh_faces = pymesh_frustum.faces.copy()
                    pymesh_faces.flags.writeable = True
                    frustum = trimesh.Trimesh(vertices=pymesh_frustum.vertices.copy(),
                                              faces=pymesh_frustum.faces.copy())
                    vedo_frustum = vedo.mesh.Mesh(frustum)
                    vedo_frustum.alpha(0.2)
                    vedo_frustum.color("c")
                    quad_mesh = trimesh.load(
                        "/home/simon/catkin_ws/src/perch_placement/src/ui/models/white-red-black_quad2.ply")
                    R = rot3d_from_x_vec(cameras[i].wall_normal)
                    R2 = rot3d_from_rtp(np.array([0, -90, 0]))
                    R_aug = np.zeros([4, 4])
                    R_aug[:3, :3] = R.dot(R2)
                    R_aug[:3, -1] = cameras[i].pose[:3]
                    quad_mesh.vertices = trimesh.transform_points(quad_mesh.vertices, R_aug)
                    quad_mesh_vedo = vedo.mesh.Mesh(quad_mesh)
                    quad_mesh_vedo.cellIndividualColors(quad_mesh.visual.face_colors / 255, alphaPerCell=True)
                    geom_list.append(quad_mesh_vedo)
                    geom_list.append(vedo_frustum)

                    for actor in geom_list:
                        vedo_plt.add(actor)

                    # p_.camera_position = [
                    #     (R * np.cos(t), R * np.sin(t), z),
                    #     (c[0], c[1], c[2]),  # (-0.026929191045848594, 0.5783514020506139, 0.8268966663940324),
                    #     (0, 0, 1),
                    # ]
                    vedo_plt.camera.SetPosition(7*np.cos(-145*np.pi/180.0), 7*np.sin(-145*np.pi/180.0), 6.25)
                    vedo_plt.camera.SetFocalPoint(-0.026929191045848594, 0.5783514020506139, 0.9268966663940324)
                    vedo_plt.camera.SetViewUp(np.array([0, 0, 1]))
                    vedo_plt.camera.SetDistance(7.8)
                    vedo_plt.camera.SetClippingRange([0.25, 30])
                    vedo_plt.camera
                    vedo_plt.show(interactive=False, rate=30, resetcam=False, fullscreen=True)
                    time.sleep(0.5)
                    actors = vedo_plt.actors
                    for i in range(len(cameras)):
                        vedo_plt.remove(actors.pop())
                        vedo_plt.remove(actors.pop())

        # plot_particle_3d(particle=particle, environment=environment, cameras=cameras, placed_cameras=placed_cameras,
        #                  view_time=0.0001, figure_number=0)
    # print("Particle score: " + str(score) + "; Pose: " + str(cameras[0].pose))

    if debug:
        print(score)

    return score