Exemple #1
0
    def run(self, drag_on_land, angular_drag_on_land):
        self.start()
        self.communicate(TDWUtils.create_empty_room(40, 40))

        # Create the avatar.
        self.communicate(TDWUtils.create_avatar(avatar_type="A_Simple_Body",
                                                position={"x": 0, "y": 1, "z": 0}))

        # When the y value of the avatar's position is this value, the avatar is on the ground.
        ground_y = 1
        # When the y value of the avatar's position exceeds ground_y by this much, it is in the air.
        in_air_threshold = 0.05

        # If true, the avatar is in the air.
        is_in_air = False

        # Set high default drag values.
        # Set a low mass.
        # Apply a force.
        # Request avatar data.
        resp = self.communicate([{"$type": "set_avatar_drag",
                                  "avatar_id": "a",
                                  "drag": 30,
                                  "angular_drag": 80},
                                 {"$type": "send_avatars",
                                  "frequency": "always"},
                                 {"$type": "apply_force_to_avatar",
                                  "avatar_id": "a",
                                  "direction": {"x": 0.3, "y": 0.7, "z": 0},
                                  "magnitude": 900}])

        for i in range(500):
            x, y, z = AvatarSimpleBody(resp[0]).get_position()
            # If the avatar just launched in to the air, set its drag values to 0.
            if y > ground_y + in_air_threshold and not is_in_air:
                is_in_air = True
                resp = self.communicate({"$type": "set_avatar_drag",
                                         "avatar_id": "a",
                                         "drag": 0,
                                         "angular_drag": 0})
                print("The avatar is in the air on frame: " + str(Controller.get_frame(resp[-1])))
            # If the avatar just landed on the ground, set drag values.
            elif y <= ground_y + in_air_threshold and is_in_air:
                resp = self.communicate({"$type": "set_avatar_drag",
                                         "avatar_id": "a",
                                         "drag": drag_on_land,
                                         "angular_drag": angular_drag_on_land})
                is_in_air = False
                print("The avatar is on the ground on frame: " + str(Controller.get_frame(resp[-1])))
            # Wait.
            else:
                resp = self.communicate({"$type": "do_nothing"})
    def __init__(self, resp: List[bytes], objects: Dict[int, StaticObjectInfo],
                 avatar: Avatar):
        """
        :param resp: The response from the build.
        :param objects: Static object info per object. Key = the ID of the object in the scene.
        :param avatar: The avatar in the scene.
        """

        self._frame_count = Controller.get_frame(resp[-1])

        self.audio: List[Tuple[Base64Sound, int]] = list()
        collisions, env_collisions, rigidbodies = FrameData._P.get_collisions(
            resp=resp)

        # Record avatar collisions.
        if avatar is not None:
            self.avatar_object_collisions = avatar.collisions
            self.avatar_env_collisions = avatar.env_collisions
            self.held_objects = {
                Arm.left: avatar.frame.get_held_left(),
                Arm.right: avatar.frame.get_held_right()
            }
        else:
            self.avatar_object_collisions = None
            self.avatar_env_collisions = None
            self.held_objects = None

        # Get the object transform data.
        self.object_transforms: Dict[int, Transform] = dict()
        tr = get_data(resp=resp, d_type=Transforms)
        for i in range(tr.get_num()):
            o_id = tr.get_id(i)
            self.object_transforms[o_id] = Transform(
                position=np.array(tr.get_position(i)),
                rotation=np.array(tr.get_rotation(i)),
                forward=np.array(tr.get_forward(i)))

        # Get camera matrix data.
        matrices = get_data(resp=resp, d_type=CameraMatrices)
        self.projection_matrix = matrices.get_projection_matrix()
        self.camera_matrix = matrices.get_camera_matrix()

        # Get the transform data of the avatar.
        self.avatar_transform = Transform(
            position=np.array(avatar.frame.get_position()),
            rotation=np.array(avatar.frame.get_rotation()),
            forward=np.array(avatar.frame.get_forward()))
        self.avatar_body_part_transforms: Dict[int, Transform] = dict()
        for i in range(avatar.frame.get_num_body_parts()):
            self.avatar_body_part_transforms[avatar.frame.get_body_part_id(
                i)] = Transform(
                    position=np.array(avatar.frame.get_body_part_position(i)),
                    rotation=np.array(avatar.frame.get_body_part_rotation(i)),
                    forward=np.array(avatar.frame.get_body_part_forward(i)))

        # Get the audio of each collision.
        for coll in collisions:
            if not FrameData._P.is_valid_collision(coll):
                continue

            collider_id = coll.get_collider_id()
            collidee_id = coll.get_collidee_id()

            collider_info: Optional[ObjectInfo] = None
            collidee_info: Optional[ObjectInfo] = None

            if collider_id in objects:
                collider_info = objects[collider_id].audio
            # Check if the object is a body part.
            else:
                if collider_id in avatar.body_parts_static:
                    collider_info = avatar.body_parts_static[collider_id].audio
            if collidee_id in objects:
                collidee_info = objects[collidee_id].audio
            # Check if the object is a body part.
            else:
                if collidee_id in avatar.body_parts_static:
                    collidee_info = avatar.body_parts_static[collidee_id].audio

            # If either object isn't a cached object, don't try to add audio.
            if collider_info is None or collidee_info is None:
                continue

            if collider_info.mass < collidee_info.mass:
                target_id = collider_id
                target_amp = collider_info.amp
                target_mat = collider_info.material.name
                other_id = collidee_id
                other_amp = collidee_info.amp
                other_mat = collider_info.material.name
            else:
                target_id = collidee_id
                target_amp = collidee_info.amp
                target_mat = collidee_info.material.name
                other_id = collider_id
                other_amp = collider_info.amp
                other_mat = collider_info.material.name
            rel_amp = other_amp / target_amp
            audio = FrameData._P.get_sound(coll, rigidbodies, other_id,
                                           other_mat, target_id, target_mat,
                                           rel_amp)
            self.audio.append((audio, target_id))
        # Get the audio of each environment collision.
        for coll in env_collisions:
            collider_id = coll.get_object_id()
            if collider_id not in objects:
                continue
            v = FrameData._get_velocity(rigidbodies, collider_id)
            if (v is not None) and (v > 0):
                collider_info = objects[collider_id].audio
                audio = FrameData._P.get_sound(
                    coll, rigidbodies, 1, FrameData._SURFACE_MATERIAL.name,
                    collider_id, collider_info.material.name, 0.01)
                self.audio.append((audio, collider_id))
        # Get the image data.
        self.id_pass: Optional[np.array] = None
        self.depth_pass: Optional[np.array] = None
        self.image_pass: Optional[np.array] = None
        for i in range(0, len(resp) - 1):
            if OutputData.get_data_type_id(resp[i]) == "imag":
                images = Images(resp[i])
                for j in range(images.get_num_passes()):
                    if images.get_pass_mask(j) == "_id":
                        self.id_pass = images.get_image(j)
                    elif images.get_pass_mask(j) == "_depth_simple":
                        self.depth_pass = images.get_image(j)
                    elif images.get_pass_mask(j) == "_img":
                        self.image_pass = images.get_image(j)