Ejemplo n.º 1
0
    def communicate(self, commands: Union[dict, List[dict]]) -> List[bytes]:
        resp = super().communicate(commands)

        # Save images per frame.
        images = get_data(resp=resp, d_type=Images)
        if images is not None:
            TDWUtils.save_images(images=images,
                                 filename=TDWUtils.zero_padding(
                                     self.frame_count, width=4),
                                 output_directory=self.output_dir)
            self.frame_count += 1
        return resp
Ejemplo n.º 2
0
    def __init__(self, resp: List[bytes]):
        """
        :param resp: The response from the build.
        """

        env = get_data(resp=resp, d_type=Envs)

        # Get the overall size of the scene.
        self.x_min = 1000
        self.x_max = 0
        self.z_min = 1000
        self.z_max = 0
        self.envs: List[Environment] = list()
        for i in range(env.get_num()):
            e = Environment(env=env, i=i)
            if e.x_0 < self.x_min:
                self.x_min = e.x_0
            if e.z_0 < self.z_min:
                self.z_min = e.z_0
            if e.x_1 > self.x_max:
                self.x_max = e.x_1
            if e.z_1 > self.z_max:
                self.z_max = e.z_1
            self.envs.append(e)
    args = parser.parse_args()

    c = Controller()
    c.start()
    resp = c.communicate([
        TDWUtils.create_empty_room(12, 12),
        c.get_add_object(model_name=args.model, object_id=0), {
            "$type": "send_composite_objects"
        }, {
            "$type": "send_segmentation_colors"
        }
    ])
    c.communicate({"$type": "terminate"})

    # Get the name of each object.
    colors = get_data(resp=resp, d_type=SegmentationColors)
    names = dict()
    for i in range(colors.get_num()):
        names[colors.get_object_id(i)] = colors.get_object_name(i)

    # Parse the composite object data.
    co = get_data(resp=resp, d_type=CompositeObjects)
    sub_objects = dict()
    for i in range(co.get_num()):
        object_id = co.get_object_id(i)
        for j in range(co.get_num_sub_objects(i)):
            machine = co.get_sub_object_machine_type(i, j)
            sub_object_id = co.get_sub_object_id(i, j)
            sub_objects[names[sub_object_id]] = {
                "amp": 0,
                "mass": 0,
Ejemplo n.º 4
0
                            "x": x,
                            "y": 0,
                            "z": z
                        },
                        "scale": 0.3,
                        "color": color
                    })
            # Create an overhead camera and capture an image.
            commands.extend(
                TDWUtils.create_avatar(position={
                    "x": 0,
                    "y": 31,
                    "z": 0
                },
                                       look_at=TDWUtils.VECTOR3_ZERO))
            commands.extend([{
                "$type": "set_pass_masks",
                "pass_masks": ["_img"]
            }, {
                "$type": "send_images"
            }])
            resp = c.communicate(commands)
            # Save the image.
            images = get_data(resp=resp, d_type=Images)
            TDWUtils.save_images(images=images,
                                 filename=f"{scene}_{layout}",
                                 output_directory=output_dir,
                                 append_pass=False)
            print(scene, layout)
    c.communicate({"$type": "terminate"})
Ejemplo n.º 5
0
    c = StickyMittenAvatarController()
    for scene in ["1a", "2a", "4a", "5a"]:
        for layout in [0, 1, 2]:
            print(scene, layout)
            c.init_scene(scene=scene, layout=layout)
            # Get the initial positions of each target object and container.
            positions = dict()
            for object_id in c.frame.object_transforms:
                if c.static_object_info[object_id].container or c.static_object_info[object_id].target_object:
                    positions[object_id] = c.frame.object_transforms[object_id].position

            # Advance the simulation.
            for i in range(100):
                c.communicate([])

            # Get the new position of the objects.
            resp = c.communicate({"$type": "send_transforms"})
            tr = get_data(resp=resp, d_type=Transforms)
            distances = list()
            too_far = False
            for i in range(tr.get_num()):
                object_id = tr.get_id(i)
                if object_id in positions:
                    distance = np.linalg.norm(positions[object_id] - np.array(tr.get_position(i)))
                    if distance > 0.1:
                        print(object_id, distance)
                        too_far = True
            if not too_far:
                print("Good!\n")
    c.end()
Ejemplo n.º 6
0
            commands.extend([{
                "$type": "set_floorplan_roof",
                "show": False
            }, {
                "$type": "remove_position_markers"
            }, {
                "$type": "send_environments"
            }, {
                "$type": "send_version"
            }, {
                "$type": "send_segmentation_colors"
            }])
            # Send the commands.
            resp = c.communicate(commands)
            env = Environments(resp=resp)
            is_standalone = get_data(resp=resp,
                                     d_type=Version).get_standalone()
            # Cache the names of all objects and get all surface models.
            segmentation_colors = get_data(resp=resp,
                                           d_type=SegmentationColors)

            object_names: Dict[int, str] = dict()
            surface_ids: List[int] = list()
            for i in range(segmentation_colors.get_num()):
                object_name = segmentation_colors.get_object_name(i).lower()
                object_id = segmentation_colors.get_object_id(i)
                object_names[object_id] = object_name
                # Check if this is a surface.
                # The record might be None if this is a composite object.
                if object_name in surface_object_categories:
                    surface_ids.append(object_id)
Ejemplo n.º 7
0
    spawn_positions = dict()
    # Iterate through each floorplan scene.
    for scene in [1, 2, 4, 5]:
        print(scene)
        spawn_positions[scene] = dict()

        # Get the scene bounds (use this to get the actual (x, z) coordinates).
        scene_bounds = sbd[str(scene)]

        # Load the scene and request Environments data.
        resp = c.communicate([
            c.get_add_scene(scene_name=f"floorplan_{scene}a"), {
                "$type": "send_environments"
            }
        ])
        envs = get_data(resp=resp, d_type=Environments)

        # Get the center of each room.
        centers = []
        for i in range(envs.get_num()):
            centers.append(np.array(envs.get_center(i)))

        # Get the spawn positions per layout.
        for layout in [0, 1, 2]:
            spawn_positions[scene][layout] = list()
            for center in centers:
                # Load the occupancy map.
                occ = np.load(
                    str(
                        OCCUPANCY_MAP_DIRECTORY.joinpath(
                            f"{scene}_{layout}.npy").resolve()))
Ejemplo n.º 8
0
                          target_object="a",
                          images="cam")

    # Grasp and pick up the container.
    c.grasp_object(object_id=c.container_id, arm=Arm.right)
    c.reach_for_target(target={"x": 0.2, "y": 0.2, "z": 0.3}, arm=Arm.right)

    # Use low-level commands to rotate the head and save an image.
    # Don't use these in an actual simulation!
    # To rotate the camera, see: `StickyMittenAvatar.rotate_camera_by()`
    # To save an image, see: `FrameData`
    resp = c.communicate([{
        "$type": "rotate_head_by",
        "axis": "pitch",
        "angle": 40
    }, {
        "$type": "rotate_head_by",
        "axis": "yaw",
        "angle": 15
    }, {
        "$type": "send_images",
        "frequency": "once",
        "avatar_id": "c"
    }])
    # Save the image.
    TDWUtils.save_images(images=get_data(resp=resp, d_type=Images),
                         output_directory=str(Path("..").resolve()),
                         filename="social",
                         append_pass=False)
    c.end()
Ejemplo n.º 9
0
    def __init__(self, resp: List[bytes], objects: Dict[int, StaticObjectInfo],
                 avatar: Avatar):
        """
        :param resp: The response from the build.
        :param objects: Static object info per object. Key = the ID of the object in the scene.
        :param avatar: The avatar in the scene.
        """

        self._frame_count = Controller.get_frame(resp[-1])

        self.audio: List[Tuple[Base64Sound, int]] = list()
        collisions, env_collisions, rigidbodies = FrameData._P.get_collisions(
            resp=resp)

        # Record avatar collisions.
        if avatar is not None:
            self.avatar_object_collisions = avatar.collisions
            self.avatar_env_collisions = avatar.env_collisions
            self.held_objects = {
                Arm.left: avatar.frame.get_held_left(),
                Arm.right: avatar.frame.get_held_right()
            }
        else:
            self.avatar_object_collisions = None
            self.avatar_env_collisions = None
            self.held_objects = None

        # Get the object transform data.
        self.object_transforms: Dict[int, Transform] = dict()
        tr = get_data(resp=resp, d_type=Transforms)
        for i in range(tr.get_num()):
            o_id = tr.get_id(i)
            self.object_transforms[o_id] = Transform(
                position=np.array(tr.get_position(i)),
                rotation=np.array(tr.get_rotation(i)),
                forward=np.array(tr.get_forward(i)))

        # Get camera matrix data.
        matrices = get_data(resp=resp, d_type=CameraMatrices)
        self.projection_matrix = matrices.get_projection_matrix()
        self.camera_matrix = matrices.get_camera_matrix()

        # Get the transform data of the avatar.
        self.avatar_transform = Transform(
            position=np.array(avatar.frame.get_position()),
            rotation=np.array(avatar.frame.get_rotation()),
            forward=np.array(avatar.frame.get_forward()))
        self.avatar_body_part_transforms: Dict[int, Transform] = dict()
        for i in range(avatar.frame.get_num_body_parts()):
            self.avatar_body_part_transforms[avatar.frame.get_body_part_id(
                i)] = Transform(
                    position=np.array(avatar.frame.get_body_part_position(i)),
                    rotation=np.array(avatar.frame.get_body_part_rotation(i)),
                    forward=np.array(avatar.frame.get_body_part_forward(i)))

        # Get the audio of each collision.
        for coll in collisions:
            if not FrameData._P.is_valid_collision(coll):
                continue

            collider_id = coll.get_collider_id()
            collidee_id = coll.get_collidee_id()

            collider_info: Optional[ObjectInfo] = None
            collidee_info: Optional[ObjectInfo] = None

            if collider_id in objects:
                collider_info = objects[collider_id].audio
            # Check if the object is a body part.
            else:
                if collider_id in avatar.body_parts_static:
                    collider_info = avatar.body_parts_static[collider_id].audio
            if collidee_id in objects:
                collidee_info = objects[collidee_id].audio
            # Check if the object is a body part.
            else:
                if collidee_id in avatar.body_parts_static:
                    collidee_info = avatar.body_parts_static[collidee_id].audio

            # If either object isn't a cached object, don't try to add audio.
            if collider_info is None or collidee_info is None:
                continue

            if collider_info.mass < collidee_info.mass:
                target_id = collider_id
                target_amp = collider_info.amp
                target_mat = collider_info.material.name
                other_id = collidee_id
                other_amp = collidee_info.amp
                other_mat = collider_info.material.name
            else:
                target_id = collidee_id
                target_amp = collidee_info.amp
                target_mat = collidee_info.material.name
                other_id = collider_id
                other_amp = collider_info.amp
                other_mat = collider_info.material.name
            rel_amp = other_amp / target_amp
            audio = FrameData._P.get_sound(coll, rigidbodies, other_id,
                                           other_mat, target_id, target_mat,
                                           rel_amp)
            self.audio.append((audio, target_id))
        # Get the audio of each environment collision.
        for coll in env_collisions:
            collider_id = coll.get_object_id()
            if collider_id not in objects:
                continue
            v = FrameData._get_velocity(rigidbodies, collider_id)
            if (v is not None) and (v > 0):
                collider_info = objects[collider_id].audio
                audio = FrameData._P.get_sound(
                    coll, rigidbodies, 1, FrameData._SURFACE_MATERIAL.name,
                    collider_id, collider_info.material.name, 0.01)
                self.audio.append((audio, collider_id))
        # Get the image data.
        self.id_pass: Optional[np.array] = None
        self.depth_pass: Optional[np.array] = None
        self.image_pass: Optional[np.array] = None
        for i in range(0, len(resp) - 1):
            if OutputData.get_data_type_id(resp[i]) == "imag":
                images = Images(resp[i])
                for j in range(images.get_num_passes()):
                    if images.get_pass_mask(j) == "_id":
                        self.id_pass = images.get_image(j)
                    elif images.get_pass_mask(j) == "_depth_simple":
                        self.depth_pass = images.get_image(j)
                    elif images.get_pass_mask(j) == "_img":
                        self.image_pass = images.get_image(j)