def test_get_camera_locations(self):
     test_locations = {'Camera': np.array([0, 0, 20])}
     locations = camera.get_current_cameras_locations(['Camera'])
     self.assertEqual(len(test_locations), len(locations), 'Wrong number of Cameras locations')
     self.assertEqual(test_locations.keys(), locations.keys(), 'Wrong camera names')
     npt.assert_almost_equal(test_locations['Camera'], locations['Camera'],
                             err_msg='Wrong camera locations')
示例#2
0
    def generate_dataset(self):
        """This will generate a multiview dataset according to the configuration that
        was passed in the constructor.
        """
        # The number of images in the dataset is controlled differently in case of default (singleview) vs multiview
        # rendering mode.
        # In default mode
        #   dataset.image_count controls the number of images
        #
        # In multiview mode
        #   dataset.image_count = dataset.scene_count * dataset.view_count
        #
        # In addition the [multiview] config section defines specific configuration such as
        #
        #   [multiview_setup]
        #   mode(str): how to generate camera locations for multiview. E.g., viewsphere, bezier, random
        #   mode_cfg(dict-like/config): additional mode specific configs

        # filename setup
        if self.config.dataset.image_count <= 0:
            return False
        scn_format_width = int(ceil(log(self.config.dataset.scene_count, 10)))

        camera_names = [
            self.get_camera_name(cam_str)
            for cam_str in self.config.scene_setup.cameras
        ]
        if self.render_mode == 'default':
            cameras_locations = camera_utils.get_current_cameras_locations(
                camera_names)
            for cam_name, cam_location in cameras_locations.items():
                cameras_locations[cam_name] = np.reshape(cam_location, (1, 3))

        elif self.render_mode == 'multiview':
            cameras_locations, _ = camera_utils.generate_multiview_cameras_locations(
                num_locations=self.config.dataset.view_count,
                mode=self.config.multiview_setup.mode,
                camera_names=camera_names,
                config=self.config.multiview_setup.mode_config,
                offset=self.config.multiview_setup.offset)

        else:
            raise ValueError(
                f'Selected render mode {self.render_mode} not currently supported'
            )

        # some debug options
        # NOTE: at this point the object of interest have been loaded in the blender
        # file but their positions have not yet been randomized..so they should all be located
        # at the origin
        if self.config.debug.enabled:
            # simple plot of generated camera locations
            if self.config.debug.plot:
                from amira_blender_rendering.math.curves import plot_points

                for cam_name in camera_names:
                    plot_points(np.array(cameras_locations[cam_name]),
                                bpy.context.scene.objects[cam_name],
                                plot_axis=self.config.debug.plot_axis,
                                scatter=self.config.debug.scatter)

            # save all generated camera locations to .blend for later debug
            if self.config.debug.save_to_blend:
                for i_cam, cam_name in enumerate(camera_names):
                    self.save_to_blend(
                        self.dirinfos[i_cam],
                        camera_name=cam_name,
                        camera_locations=cameras_locations[cam_name],
                        basefilename='robottable_camera_locations')

        # control loop for the number of static scenes to render
        scn_counter = 0
        while scn_counter < self.config.dataset.scene_count:

            # randomize scene: move objects at random locations, and forward simulate physics
            self.randomize_environment_texture()
            self.randomize_textured_objects_textures()
            self.randomize_object_transforms(self.objs + self.distractors)
            self.forward_simulate()

            # check visibility
            repeat_frame = False
            if not self.config.render_setup.allow_occlusions:
                for cam_name, cam_locations in cameras_locations.items():
                    repeat_frame = not self.test_visibility(
                        cam_name, cam_locations)

            # if we need to repeat (change static scene) we skip one iteration
            # without increasing the counter
            if repeat_frame:
                self.logger.warn(
                    f'Something wrong. '
                    f'Re-randomizing scene {scn_counter + 1}/{self.config.dataset.scene_count}'
                )
                continue

            # loop over cameras
            for i_cam, cam_str in enumerate(self.config.scene_setup.cameras):
                # get bpy object camera name
                cam_name = self.get_camera_name(cam_str)

                # check whether we broke the for-loop responsible for image generation for
                # multiple camera views and repeat the frame by re-generating the static scene
                if repeat_frame:
                    break

                # extract camera locations
                cam_locations = cameras_locations[cam_name]

                # compute format width
                view_format_width = int(ceil(log(len(cam_locations), 10)))

                # activate camera
                self.activate_camera(cam_name)

                # loop over locations
                for view_counter, cam_loc in enumerate(cam_locations):

                    self.logger.info(
                        f"Generating image for camera {cam_str}: "
                        f"scene {scn_counter + 1}/{self.config.dataset.scene_count}, "
                        f"view {view_counter + 1}/{self.config.dataset.view_count}"
                    )

                    # filename
                    base_filename = f"s{scn_counter:0{scn_format_width}}_v{view_counter:0{view_format_width}}"

                    # set camera location
                    self.set_camera_location(cam_name, cam_loc)

                    # at this point all the locations have already been tested for visibility
                    # according to allow_occlusions config.
                    # Here, we re-run visibility to set object visibility level as well as to update
                    # the depsgraph needed to update translation and rotation info
                    all_visible = self.test_visibility(cam_name, cam_loc)

                    if not all_visible:
                        # if debug is enabled save to blender for debugging
                        if self.config.debug.enabled and self.config.debug.save_to_blend:
                            self.save_to_blend(
                                self.dirinfos[i_cam],
                                scene_index=scn_counter,
                                view_index=view_counter,
                                basefilename='robottable_visibility')

                    # update path information in compositor
                    self.renderman.setup_pathspec(self.dirinfos[i_cam],
                                                  base_filename, self.objs)

                    # finally, render
                    self.renderman.render()

                    # postprocess. this will take care of creating additional
                    # information, as well as fix filenames
                    try:
                        self.renderman.postprocess(
                            self.dirinfos[i_cam],
                            base_filename,
                            bpy.context.scene.camera,
                            self.objs,
                            self.config.camera_info.zeroing,
                            postprocess_config=self.config.postprocess)

                        if self.config.debug.enabled and self.config.debug.save_to_blend:
                            # reset frame to 0 and save
                            bpy.context.scene.frame_set(0)
                            self.save_to_blend(self.dirinfos[i_cam],
                                               scene_index=scn_counter,
                                               view_index=view_counter,
                                               basefilename='robottable')

                    except ValueError:
                        self.logger.error(
                            f"\033[1;31mValueError during post-processing. "
                            f"Re-generating image {scn_counter + 1}/{self.config.dataset.scene_count}\033[0;37m"
                        )
                        repeat_frame = True

                        # if requested save to blend files for debugging
                        if self.config.debug.enabled and self.config.debug.save_to_blend:
                            self.logger.error(
                                'There might be a discrepancy between generated mask and '
                                'object visibility data. Saving debug info to .blend'
                            )
                            self.save_to_blend(self.dirinfos[i_cam],
                                               scene_index=scn_counter,
                                               view_index=view_counter,
                                               on_error=True,
                                               basefilename='robottable')

                        break

            # update scene counter
            if not repeat_frame:
                scn_counter = scn_counter + 1

        return True