示例#1
0
    def _set_segmentation_colors(self, resp: List[bytes]) -> None:

        self.object_segmentation_colors = None
        for r in resp:
            if OutputData.get_data_type_id(r) == 'segm':
                seg = SegmentationColors(r)
                colors = {}
                for i in range(seg.get_num()):
                    colors[seg.get_object_id(i)] = seg.get_object_color(i)

                self.object_segmentation_colors = []
                for o_id in self.object_ids:
                    if o_id in colors.keys():
                        self.object_segmentation_colors.append(
                            np.array(colors[o_id], dtype=np.uint8).reshape(1,3))

                self.object_segmentation_colors = np.concatenate(self.object_segmentation_colors, 0)
示例#2
0
    def __init__(self, index: int, rigidbodies: Rigidbodies,
                 segmentation_colors: SegmentationColors, audio: ObjectInfo):
        """
        :param index: The index of the object in `segmentation_colors`
        :param rigidbodies: Rigidbodies output data.
        :param segmentation_colors: Segmentation colors output data.
        """

        self.object_id = segmentation_colors.get_object_id(index)
        self.model_name = segmentation_colors.get_object_name(index)
        self.segmentation_color = np.array(
            segmentation_colors.get_object_color(index))
        self.audio = audio

        # Get the mass.
        self.mass: float = -1
        for i in range(rigidbodies.get_num()):
            if rigidbodies.get_id(i) == self.object_id:
                self.mass = rigidbodies.get_mass(i)
                break
        assert self.mass >= 0, f"Mass not found: {self.object_id}"
示例#3
0
    def run(self):
        self.start()
        self.communicate(TDWUtils.create_empty_room(12, 12))

        # Find the local asset bundle for this platform.
        url = "file:///" + str(
            Path("composite_objects/" + platform.system() +
                 "/test_composite_object").resolve())

        # Add the local object.
        o_id = self.get_unique_id()
        self.communicate([{
            "$type": "add_object",
            "name": "test_composite_object",
            "url": url,
            "scale_factor": 1,
            "id": o_id
        }, {
            "$type": "set_mass",
            "id": o_id,
            "mass": 100
        }])

        self.communicate(
            TDWUtils.create_avatar(position={
                "x": 0,
                "y": 1.49,
                "z": -2.77
            }))

        # Test that each sub-object has a unique segmentation color.
        resp = self.communicate({
            "$type": "send_segmentation_colors",
            "ids": []
        })
        colors = SegmentationColors(resp[0])
        segmentation_colors = []
        # There are 4 objects: The parent object, the motor, the "base" of the motor, and the light.
        assert colors.get_num() == 4, colors.get_num()
        print("Segmentation colors:")
        for i in range(colors.get_num()):
            print(colors.get_object_id(i), colors.get_object_color(i))

            # Cache the color for the next test.
            segmentation_colors.append(colors.get_object_color(i))

        # Test that each sub-object is visible to the avatar.
        resp = self.communicate({
            "$type": "send_id_pass_segmentation_colors",
            "frequency": "once"
        })
        colors = IdPassSegmentationColors(resp[0])
        # There are three visible objects (the light won't be visible in the _id pass).
        assert colors.get_num_segmentation_colors() == 3

        print("\nObserved colors:")

        # Test that the colors observed by the avatar are in the cache.
        for i in range(colors.get_num_segmentation_colors()):
            observed_color = colors.get_segmentation_color(i)
            assert observed_color in segmentation_colors
            print(observed_color)

        # Get composite objects data.
        resp = self.communicate({"$type": "send_composite_objects"})
        assert len(resp) > 1
        assert OutputData.get_data_type_id(resp[0]) == "comp"

        o = CompositeObjects(resp[0])
        assert o.get_num() == 1
        # There are 3 sub-objects: The motor, the "base" of the motor, and the light.
        assert o.get_num_sub_objects(0) == 3

        print("\nCompositeObjects: ")

        commands = []
        lights = []

        # Iterate through each sub-object.
        for s in range(o.get_num_sub_objects(0)):
            sub_object_id = o.get_sub_object_id(0, s)
            s_type = o.get_sub_object_machine_type(0, s)
            print(sub_object_id, s_type)

            # Add commands depending on the type of sub-object.
            if s_type == "motor":
                # Start the motor.
                commands.append({
                    "$type": "set_motor",
                    "target_velocity": 500,
                    "force": 500,
                    "id": sub_object_id
                })
            elif s_type == "light":
                commands.append({
                    "$type": "set_sub_object_light",
                    "is_on": True,
                    "id": sub_object_id
                })
                lights.append(sub_object_id)

        self.communicate(commands)

        is_on = True
        for i in range(1000):
            commands = []
            # Every 50 frames, blink the lights on and off.
            if i % 50 == 0:
                is_on = not is_on
                for light in lights:
                    commands.append({
                        "$type": "set_sub_object_light",
                        "is_on": is_on,
                        "id": light
                    })
            else:
                commands.append({"$type": "do_nothing"})
            self.communicate(commands)
示例#4
0
    def run(self):
        self.start()
        self.communicate(TDWUtils.create_empty_room(12, 12))

        # Create the objects.
        statue_id = self.add_object("satiro_sculpture",
                                    position={
                                        "x": 2,
                                        "y": 0,
                                        "z": 1
                                    })
        # Request segmentation colors.
        resp = self.communicate({"$type": "send_segmentation_colors"})
        segmentation_colors = SegmentationColors(resp[0])

        # Get the segmentation color of the sculpture.
        statue_color = None
        for i in range(segmentation_colors.get_num()):
            if segmentation_colors.get_object_id(i) == statue_id:
                statue_color = segmentation_colors.get_object_color(i)
                break

        # Create the VR Rig.
        self.communicate({"$type": "create_vr_rig"})
        # Attach an avatar to the VR rig.
        self.communicate({"$type": "attach_avatar_to_vr_rig", "id": "a"})
        # Request the colors of objects currently observed by the avatar per frame.
        # Request VR rig data per frame.
        # Reduce render quality in order to improve framerate.
        self.communicate([{
            "$type": "send_id_pass_segmentation_colors",
            "frequency": "always"
        }, {
            "$type": "send_vr_rig",
            "frequency": "always"
        }, {
            "$type": "set_post_process",
            "value": False
        }, {
            "$type": "set_render_quality",
            "render_quality": 0
        }])

        while True:
            resp = self.communicate({"$type": "do_nothing"})
            head_rotation = None
            can_see_statue = False
            for r in resp[:-1]:
                r_id = OutputData.get_data_type_id(r)
                # Get the head rotation.
                if r_id == "vrri":
                    head_rotation = VRRig(r).get_head_rotation()
                # Check if we can see the statue.
                elif r_id == "ipsc":
                    observed_objects = IdPassSegmentationColors(r)
                    for i in range(
                            observed_objects.get_num_segmentation_colors()):
                        if observed_objects.get_segmentation_color(
                                i) == statue_color:
                            can_see_statue = True
            if can_see_statue:
                print("You can see the object!\nHead rotation: " +
                      str(head_rotation) + "\n")
示例#5
0
    def run(self):
        self.start()
        init_setup_commands = [{
            "$type": "set_screen_size",
            "width": 600,
            "height": 480
        }, {
            "$type": "set_render_quality",
            "render_quality": 5
        }]
        self.communicate(init_setup_commands)

        self.communicate(self.get_add_hdri_skybox("industrial_sunset_4k"))

        # Create an empty room.
        self.communicate(TDWUtils.create_empty_room(8, 8))
        # Disable physics.
        self.communicate({"$type": "set_gravity", "value": False})

        # Add the avatar.
        self.communicate(
            TDWUtils.create_avatar(position={
                "x": 1,
                "y": 1.5,
                "z": 0.3
            },
                                   look_at=TDWUtils.array_to_vector3(
                                       [2.5, 0.5, 0.5]),
                                   avatar_id="avatar"))

        lib = MaterialLibrarian(library="materials_med.json")
        record = lib.get_record("concrete_049")
        self.communicate({
            "$type": "add_material",
            "name": "concrete_049",
            "url": record.get_url()
        })
        self.communicate({
            "$type": "set_proc_gen_floor_material",
            "name": "concrete_049"
        })
        # self.communicate({"$type": "set_proc_gen_walls_material", "name": "concrete"})

        # self.communicate({"$type": "set_field_of_view",
        #                   "field_of_view": 68.0,
        #                   "avatar_id": "avatar"})

        bench = self.add_object(model_name="cgaxis_models_51_19_01",
                                position={
                                    "x": 2.5,
                                    "y": 0,
                                    "z": 0.5
                                },
                                rotation={
                                    "x": 0,
                                    "y": 90,
                                    "z": 0
                                },
                                library="models_full.json")

        bench_bounds = self.get_bounds_data(bench)
        top = bench_bounds.get_top(0)

        self.add_object(model_name="b05_cat_model_3dmax2012",
                        position={
                            "x": 2.5,
                            "y": top[1],
                            "z": 0.2
                        },
                        rotation={
                            "x": 0,
                            "y": 0,
                            "z": 0
                        },
                        library="models_full.json")

        self.add_object(model_name="azor",
                        position={
                            "x": 3,
                            "y": 0,
                            "z": 0.5
                        },
                        rotation={
                            "x": 0,
                            "y": 90,
                            "z": 0
                        },
                        library="models_full.json")
        """ Post-scene construction """

        # Enable image capture
        self.communicate({
            "$type": "set_pass_masks",
            "avatar_id": "avatar",
            "pass_masks": ["_id"]
        })

        scene_data = self.communicate({
            "$type":
            "look_at_position",
            "avatar_id":
            "avatar",
            "position":
            TDWUtils.array_to_vector3([3, 0.5, 0.5])
        })

        # images = Images(scene_data[0])
        im = cv2.imread(
            "/Users/leonard/Desktop/TDWBase-1.5.0/Python/Leonard/"
            "compare_COCO_TDW/replicated_images/exterior/id_bench_bike.png")
        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

        # WRITE THIS TO JSON!
        segmentation_data = []
        COCO_to_TDW = pd.read_csv(
            '/Users/leonard/Desktop/coco/COCO_to_TDW.csv',
            header=None,
            index_col=0,
            squeeze=True).to_dict()

        segmentation_colors = self.communicate({
            "$type": "send_segmentation_colors",
            "frequency": "once"
        })
        for r in segmentation_colors[:-1]:
            r_id = OutputData.get_data_type_id(r)
            if r_id == "segm":
                s = SegmentationColors(r)
                for i in range(s.get_num()):
                    name = s.get_object_name(i)
                    category = get_COCO(name, COCO_to_TDW)

                    color = s.get_object_color(i)
                    indices = np.where(im != 0)  # Segmentation pixels
                    print(color)
                    print(indices)
                    print(im[indices[0][0]]
                          )  # some random RGB val -- MUSt match one of the 3
                    segmentation_data.append([indices, category])

        # TDWUtils.save_images(images, "bench_bike",
        #                      output_directory="/Users/leonard/Desktop/TDWBase-1.5.0/Python/Leonard/compare_COCO_TDW/replicated_images/exterior")

        # Consider using zip() to clean up coordinates

        return segmentation_data
示例#6
0
    def run(self, c: Controller):
        """
        Run the trial and save the output.

        :param c: The controller.
        """

        print(f"Images will be saved to: {self.output_dir}")

        # Initialize the scene.
        resp = c.communicate(self.init_commands)
        # Get a map of the segmentation colors.
        segm = SegmentationColors(resp[0])
        for i in range(segm.get_num()):
            for obj in self.moving_objects:
                if obj.object_id == segm.get_object_id(i):
                    obj.possibility.segmentation_color = segm.get_object_color(
                        i)

        # Request scene data and images per frame.
        frame_data: List[dict] = []
        resp = c.communicate([{
            "$type": "send_images",
            "frequency": "always"
        }, {
            "$type": "send_transforms",
            "ids": self.m_ids,
            "frequency": "always"
        }, {
            "$type": "send_rigidbodies",
            "ids": self.m_ids,
            "frequency": "always"
        }])

        # Run the trial.
        for frame in range(self.num_frames):
            colors: Dict[int, Tuple[int, int, int]] = {}
            transforms: Dict[int, Tuple[float, float, float]] = {}

            transform_data = None
            rigidbody_data = None

            # Parse the output data.
            for r in resp[:-1]:
                r_id = OutputData.get_data_type_id(r)
                # Record all Transforms data.
                if r_id == "tran":
                    transform_data = Transforms(r)
                    for i in range(transform_data.get_num()):
                        transforms.update({
                            transform_data.get_id(i):
                            transform_data.get_position(i)
                        })
                # Record all Rigidbodies data.
                elif r_id == "rigi":
                    rigidbody_data = Rigidbodies(r)
                # Save the images.
                elif r_id == "imag":
                    images = Images(r)
                    for p in range(images.get_num_passes()):
                        if images.get_pass_mask(p) == "_id":
                            image_colors = TDWUtils.get_pil_image(
                                images, p).getcolors()
                            for ic in image_colors:
                                color = ic[1]
                                for obj in self.moving_objects:
                                    if obj.possibility.segmentation_color == color:
                                        colors.update({obj.object_id: color})

                    TDWUtils.save_images(Images(r),
                                         TDWUtils.zero_padding(frame),
                                         output_directory=self.output_dir)

            # Append frame data.
            frame_data.append(
                Trial._get_frame_state(transform_data, rigidbody_data, frame))

            # Build the frame state.
            state = State(colors, transforms, frame)

            # Apply object actions.
            commands = []
            for o in self.occluders:
                commands.extend(o.get_frame_commands(state))
            for mo in self.moving_objects:
                commands.extend(mo.get_frame_commands(state))
            if len(commands) == 0:
                commands = [{"$type": "do_nothing"}]

            # Send the commands and update the state.
            resp = c.communicate(commands)

        # Cleanup.
        c.communicate([{
            "$type": "destroy_all_objects"
        }, {
            "$type": "unload_asset_bundles"
        }, {
            "$type": "send_images",
            "frequency": "never"
        }, {
            "$type": "send_transforms",
            "ids": self.m_ids,
            "frequency": "never"
        }, {
            "$type": "send_rigidbodies",
            "ids": self.m_ids,
            "frequency": "never"
        }])

        print("\tGenerated images.")
        # Output the scene metadata.
        Path(self.output_dir).joinpath("state.json").write_text(
            json.dumps({"frames": frame_data}), encoding="utf-8")
        print("\tWrote state file.")

        # Get _id passes with randomized colors.
        self._randomize_segmentation_colors()
        print("\tCreated random segmentation colors.")

        # Organize the images.
        self._organize_output()
        print("\tOrganized files")
示例#7
0
    def __init__(self,
                 object_id: int,
                 rigidbodies: Rigidbodies,
                 segmentation_colors: SegmentationColors,
                 bounds: Bounds,
                 audio: ObjectInfo,
                 target_object: bool = False):
        """
        :param object_id: The unique ID of the object.
        :param rigidbodies: Rigidbodies output data.
        :param bounds: Bounds output data.
        :param segmentation_colors: Segmentation colors output data.
        """

        self.object_id = object_id
        self.model_name = audio.name
        self.container = self.model_name in StaticObjectInfo.CONTAINERS
        self.kinematic = self.model_name in StaticObjectInfo._KINEMATIC
        self.target_object = target_object

        self.category = ""
        # This is a sub-object of a composite object.
        if audio.library == "":
            # Get the record of the composite object.
            for k in StaticObjectInfo._COMPOSITE_OBJECTS:
                for v in StaticObjectInfo._COMPOSITE_OBJECTS[k]:
                    if v == audio.name:
                        record = TransformInitData.LIBRARIES[
                            "models_core.json"].get_record(k)
                        # Get the semantic category.
                        self.category = record.wcategory
                        break
        else:
            # Get the model record from the audio data.
            record = TransformInitData.LIBRARIES[audio.library].get_record(
                audio.name)
            # Get the semantic category.
            self.category = record.wcategory

        # Get the segmentation color.
        self.segmentation_color: Optional[np.array] = None
        for i in range(segmentation_colors.get_num()):
            if segmentation_colors.get_object_id(i) == self.object_id:
                self.segmentation_color = np.array(
                    segmentation_colors.get_object_color(i))
                break
        assert self.segmentation_color is not None, f"Segmentation color not found: {self.object_id}"

        # Get the size of the object.
        self.size = np.array([0, 0, 0])
        for i in range(bounds.get_num()):
            if bounds.get_id(i) == self.object_id:
                self.size = np.array([
                    float(
                        np.abs(bounds.get_right(i)[0] -
                               bounds.get_left(i)[0])),
                    float(
                        np.abs(bounds.get_top(i)[1] -
                               bounds.get_bottom(i)[1])),
                    float(
                        np.abs(bounds.get_front(i)[2] - bounds.get_back(i)[2]))
                ])
                break
        assert np.linalg.norm(
            self.size) > 0, f"Bounds data not found for: {self.object_id}"

        # Get the mass.
        self.mass: float = -1
        for i in range(rigidbodies.get_num()):
            if rigidbodies.get_id(i) == self.object_id:
                self.mass = rigidbodies.get_mass(i)
                break
        assert self.mass >= 0, f"Mass not found: {self.object_id}"