Exemplo n.º 1
0
 def construct_single(parent_id, X_PB):
     depth_camera = DepthRenderCamera(
         RenderCameraCore("renderer",
                          mut.CameraInfo(width, height, np.pi / 6),
                          ClippingRange(0.1, 6.0), RigidTransform()),
         DepthRange(0.1, 5.5))
     return mut.RgbdSensor(parent_id=parent_id,
                           X_PB=X_PB,
                           depth_camera=depth_camera)
Exemplo n.º 2
0
 def construct(parent_id, X_PB):
     color_camera = ColorRenderCamera(
         RenderCameraCore("renderer",
                          mut.CameraInfo(width, height, np.pi / 6),
                          ClippingRange(0.1, 6.0), RigidTransform()),
         False)
     depth_camera = DepthRenderCamera(color_camera.core(),
                                      DepthRange(0.1, 5.5))
     return mut.RgbdSensor(parent_id=parent_id,
                           X_PB=X_PB,
                           color_camera=color_camera,
                           depth_camera=depth_camera)
Exemplo n.º 3
0
 def test_rgbd_sensor_registration(self):
     X_PC = RigidTransform(p=[1, 2, 3])
     station = ManipulationStation(time_step=0.001)
     station.SetupManipulationClassStation()
     plant = station.get_multibody_plant()
     color_camera = ColorRenderCamera(
         RenderCameraCore("renderer", CameraInfo(10, 10, np.pi / 4),
                          ClippingRange(0.1, 10.0), RigidTransform()),
         False)
     depth_camera = DepthRenderCamera(color_camera.core(),
                                      DepthRange(0.1, 9.5))
     station.RegisterRgbdSensor("single_sensor", plant.world_frame(), X_PC,
                                depth_camera)
     station.RegisterRgbdSensor("dual_sensor", plant.world_frame(), X_PC,
                                color_camera, depth_camera)
     station.Finalize()
     camera_poses = station.GetStaticCameraPosesInWorld()
     # The three default cameras plus the two just added.
     self.assertEqual(len(camera_poses), 5)
     self.assertTrue("single_sensor" in camera_poses)
     self.assertTrue("dual_sensor" in camera_poses)
Exemplo n.º 4
0
    def add_rgbd_sensor(self, camera_name, sensor_config):
        """
        Adds Rgbd camera to the diagram
        """
        builder = self._builder
        if self._renderer_name is None:
            self._renderer_name = "vtk_renderer"
            self._sg.AddRenderer(self._renderer_name,
                                 MakeRenderEngineVtk(RenderEngineVtkParams()))

        width = sensor_config['width']
        height = sensor_config['height']
        fov_y = sensor_config['fov_y']
        z_near = sensor_config['z_near']
        z_far = sensor_config['z_far']

        # This is in right-down-forward convention
        X_W_camera = transform_from_dict(sensor_config)
        color_camera = ColorRenderCamera(
            RenderCameraCore(self._renderer_name,
                             CameraInfo(width, height, fov_y),
                             ClippingRange(z_near, z_far), RigidTransform()),
            False)
        depth_camera = DepthRenderCamera(color_camera.core(),
                                         DepthRange(z_near, z_far))

        # add camera system
        camera = builder.AddSystem(
            RgbdSensor(parent_id=self._sg.world_frame_id(),
                       X_PB=X_W_camera,
                       color_camera=color_camera,
                       depth_camera=depth_camera))
        builder.Connect(self._sg.get_query_output_port(),
                        camera.query_object_input_port())

        self._rgbd_sensors[camera_name] = camera
Exemplo n.º 5
0
                 plant.GetFrameByName("iiwa_link_0", iiwa_2),
                 X_AB=xyz_rpy_deg([0, 1, 0], [0, 0, 0]))

# Adding the renderer / vtk stuff?
renderer_name = "renderer"
scene_graph.AddRenderer(renderer_name,
                        MakeRenderEngineVtk(RenderEngineVtkParams()))

# Now a camera with color and depth (values chosen arbitrarily)

# So here we leverage the renderer attached to the scene graph to power a camera for our robot
depth_camera = DepthRenderCamera(
    RenderCameraCore(renderer_name,
                     CameraInfo(width=640, height=480, fov_y=np.pi / 4),
                     ClippingRange(0.01, 10.0), RigidTransform()),
    DepthRange(0.01, 10.0))

world_id = plant.GetBodyFrameIdOrThrow(plant.world_body().index())
X_WB = xyz_rpy_deg([4, 0, 0], [-90, 0, 90])
sensor = RgbdSensor(world_id, X_PB=X_WB, depth_camera=depth_camera)

builder.AddSystem(sensor)
builder.Connect(scene_graph.get_query_output_port(),
                sensor.query_object_input_port())

# So here we are adding the drake visualizer into the mix.
# IMPORTANT: Need to actually run "drake visualizer" from the build.
DrakeVisualizer.AddToBuilder(builder, scene_graph)

# If we want meshcat instead, can run this line
# Or can swap out the "new" and instead connect to an existing meshcat-server instance
Exemplo n.º 6
0
def perform_iou_testing(model_file, test_specific_temp_directory,
                        pose_directory):

    random_poses = {}
    # Read camera translation calculated and applied on gazebo
    # we read the random positions file as it contains everything:
    with open(
            test_specific_temp_directory + "/pics/" + pose_directory +
            "/poses.txt", "r") as datafile:
        for line in datafile:
            if line.startswith("Translation:"):
                line_split = line.split(" ")
                # we make the value negative since gazebo moved the robot
                # and in drakewe move the camera
                trans_x = float(line_split[1])
                trans_y = float(line_split[2])
                trans_z = float(line_split[3])
            elif line.startswith("Scaling:"):
                line_split = line.split(" ")
                scaling = float(line_split[1])
            else:
                line_split = line.split(" ")
                if line_split[1] == "nan":
                    random_poses[line_split[0][:-1]] = 0
                else:
                    random_poses[line_split[0][:-1]] = float(line_split[1])

    builder = DiagramBuilder()
    plant, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0)

    parser = Parser(plant)
    model = make_parser(plant).AddModelFromFile(model_file)

    model_bodies = me.get_bodies(plant, {model})
    frame_W = plant.world_frame()
    frame_B = model_bodies[0].body_frame()
    if len(plant.GetBodiesWeldedTo(plant.world_body())) < 2:
        plant.WeldFrames(frame_W,
                         frame_B,
                         X_PC=plant.GetDefaultFreeBodyPose(frame_B.body()))

    # Creating cameras:
    renderer_name = "renderer"
    scene_graph.AddRenderer(renderer_name,
                            MakeRenderEngineVtk(RenderEngineVtkParams()))

    # N.B. These properties are chosen arbitrarily.
    depth_camera = DepthRenderCamera(
        RenderCameraCore(
            renderer_name,
            CameraInfo(
                width=960,
                height=540,
                focal_x=831.382036787,
                focal_y=831.382036787,
                center_x=480,
                center_y=270,
            ),
            ClippingRange(0.01, 10.0),
            RigidTransform(),
        ),
        DepthRange(0.01, 10.0),
    )

    world_id = plant.GetBodyFrameIdOrThrow(plant.world_body().index())

    # Creating perspective cam
    X_WB = xyz_rpy_deg(
        [
            1.6 / scaling + trans_x, -1.6 / scaling + trans_y,
            1.2 / scaling + trans_z
        ],
        [-120, 0, 45],
    )
    sensor_perspective = create_camera(builder, world_id, X_WB, depth_camera,
                                       scene_graph)
    # Creating top cam
    X_WB = xyz_rpy_deg([0 + trans_x, 0 + trans_y, 2.2 / scaling + trans_z],
                       [-180, 0, -90])
    sensor_top = create_camera(builder, world_id, X_WB, depth_camera,
                               scene_graph)
    # Creating front cam
    X_WB = xyz_rpy_deg([2.2 / scaling + trans_x, 0 + trans_y, 0 + trans_z],
                       [-90, 0, 90])
    sensor_front = create_camera(builder, world_id, X_WB, depth_camera,
                                 scene_graph)
    # Creating side cam
    X_WB = xyz_rpy_deg([0 + trans_x, 2.2 / scaling + trans_y, 0 + trans_z],
                       [-90, 0, 180])
    sensor_side = create_camera(builder, world_id, X_WB, depth_camera,
                                scene_graph)
    # Creating back cam
    X_WB = xyz_rpy_deg([-2.2 / scaling + trans_x, 0 + trans_y, 0 + trans_z],
                       [-90, 0, -90])
    sensor_back = create_camera(builder, world_id, X_WB, depth_camera,
                                scene_graph)

    DrakeVisualizer.AddToBuilder(builder, scene_graph)

    # Remove gravity to avoid extra movements of the model when running the simulation
    plant.gravity_field().set_gravity_vector(
        np.array([0, 0, 0], dtype=np.float64))

    # Switch off collisions to avoid problems with random positions
    collision_filter_manager = scene_graph.collision_filter_manager()
    model_inspector = scene_graph.model_inspector()
    geometry_ids = GeometrySet(model_inspector.GetAllGeometryIds())
    collision_filter_manager.Apply(
        CollisionFilterDeclaration().ExcludeWithin(geometry_ids))

    plant.Finalize()
    diagram = builder.Build()

    simulator = Simulator(diagram)
    simulator.Initialize()

    dofs = plant.num_actuated_dofs()
    if dofs != plant.num_positions():
        raise ValueError(
            "Error on converted model: Num positions is not equal to num actuated dofs."
        )

    if pose_directory == "random_pose":
        joint_positions = [0] * dofs
        for joint_name, pose in random_poses.items():
            # check if NaN
            if pose != pose:
                pose = 0
            # drake will add '_joint' when there's a name collision
            if plant.HasJointNamed(joint_name):
                joint = plant.GetJointByName(joint_name)
            else:
                joint = plant.GetJointByName(joint_name + "_joint")
            joint_positions[joint.position_start()] = pose
        sim_plant_context = plant.GetMyContextFromRoot(
            simulator.get_mutable_context())
        plant.get_actuation_input_port(model).FixValue(sim_plant_context,
                                                       np.zeros((dofs, 1)))
        plant.SetPositions(sim_plant_context, model, joint_positions)

        simulator.AdvanceTo(1)

    generate_images_and_iou(simulator, sensor_perspective,
                            test_specific_temp_directory, pose_directory, 1)
    generate_images_and_iou(simulator, sensor_top,
                            test_specific_temp_directory, pose_directory, 2)
    generate_images_and_iou(simulator, sensor_front,
                            test_specific_temp_directory, pose_directory, 3)
    generate_images_and_iou(simulator, sensor_side,
                            test_specific_temp_directory, pose_directory, 4)
    generate_images_and_iou(simulator, sensor_back,
                            test_specific_temp_directory, pose_directory, 5)