コード例 #1
0
ファイル: video_camera.py プロジェクト: mudrole1/morse
    def default_action(self):
        """ Update the texture image. """

        # Grab an image from the texture
        if self.bge_object['capturing'] and (self._n != 0) :

            # Call the action of the parent class
            super(self.__class__, self).default_action()

            # NOTE: Blender returns the image as a binary string
            #  encoded as RGBA
            #image_data = morse.core.blenderapi.cameras()[self.name()].source
            image_data = blenderapi.texture().imageToArray(morse.core.blenderapi.cameras()[self.name()].source, 'RGB')

            self.robot_pose = copy.copy(self.robot_parent.position_3d)

            # Fill in the exportable data
            self.local_data['image'] = image_data
            self.capturing = True

            if (self._n > 0):
                self._n -= 1
                if (self._n == 0):
                    self.completed(status.SUCCESS)
        else:
            self.capturing = False
コード例 #2
0
ファイル: camera.py プロジェクト: ekyah411/morse
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        # Get the reference to the scene
        scene_map = blenderapi.get_scene_map()
        logger.info("Scene %s from %s" %
                    (self.scene_name, repr(scene_map.keys())))
        self._scene = scene_map[self.scene_name]
        self._morse_scene = scene_map['S.MORSE_LOGIC']

        # Link the objects using bge.texture
        if not blenderapi.hascameras():
            blenderapi.initcameras()

        mat_id = blenderapi.texture().materialID(screen, material_name)
        vt_camera = blenderapi.texture().Texture(screen, mat_id)
        vt_camera.source = blenderapi.texture().ImageRender(
            self._scene, camera)

        # Set the focal length of the camera using the Game Logic Property
        camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)

        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                    camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                    camera.far)

        # Set the background to be used for the render
        vt_camera.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        vt_camera.source.capsize = [self.image_width, self.image_height]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                    (self.name(), vt_camera.source.capsize))

        # Reverse the image (boolean game-property)
        vt_camera.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if 'retrieve_zbuffer' in self.bge_object:
                vt_camera.source.zbuff = self.bge_object['retrieve_zbuffer']
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)

        try:
            # Use the Z-Buffer as input with an array of depths
            if 'retrieve_depth' in self.bge_object:
                vt_camera.source.depth = self.bge_object['retrieve_depth']
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)

        blenderapi.cameras()[self.name()] = vt_camera
コード例 #3
0
ファイル: camera.py プロジェクト: zyh1994/morse
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        if not self._offscreen_create:
            self._compute_syncable_objects()
            img_renderer = blenderapi.texture().ImageRender(
                self._scene, camera)
        else:
            fbo = self._offscreen_create(
                self.image_width, self.image_height,
                blenderapi.render().RAS_OFS_RENDER_TEXTURE)
            img_renderer = blenderapi.texture().ImageRender(
                blenderapi.scene(), camera, fbo)

        mat_id = blenderapi.texture().materialID(screen, material_name)
        self._camera_image = blenderapi.texture().Texture(screen, mat_id)
        self._camera_image.source = img_renderer

        # Set the focal length of the camera using the Game Logic Property. One
        # can use either focal (lens) or fov parameter: setting one computes the
        # other accordingly. Fov supersedes focal.
        if self.image_fov is not None:
            camera.fov = self.image_fov
            self.image_focal = camera.lens
        else:
            camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)
        logger.info("\tFOV of the camera is: %s" % camera.fov)

        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                    camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                    camera.far)

        # Set the background to be used for the render
        self._camera_image.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        self._camera_image.source.capsize = [
            self.image_width, self.image_height
        ]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                    (self.name(), self._camera_image.source.capsize))

        # Workaround capsize limit to window size
        self.image_width, self.image_height = self._camera_image.source.capsize

        # Reverse the image (boolean game-property)
        self._camera_image.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if self.retrieve_zbuffer:
                self._camera_image.source.zbuff = True
            # Use the Z-Buffer as input with an array of depths
            if self.retrieve_depth:
                self._camera_image.source.depth = True
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)
コード例 #4
0
ファイル: camera.py プロジェクト: DAInamite/morse
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        # Get the reference to the scene
        scene_map = blenderapi.get_scene_map()
        logger.info("Scene %s from %s"% (self.scene_name, repr(scene_map.keys()) ) )
        self._scene = scene_map[self.scene_name]
        self._morse_scene = scene_map['S.MORSE_LOGIC']

        """
        Compute the relation between objects in the current scene and
        objects in the main logic scene.

        The logic is a bit complex, as in the case of group, we can have
        objects with the same name (but different ids). So, in this
        case, we follow the hierarchy on both scene to find
        correspondance (assuming no recursive group)

        known_ids is used to track objects alreay referenced and not
        include it twice (and possibly missing the fact that the same
        name can reference multiples different objects)

        I'm definitively not sure it is correct at all, it is a really
        really dark corner of Blender :). But it seems to do the job!
        """
        self._scene_syncable_objects = []
        known_ids = set()
        for obj in self._scene.objects:
            if obj.name != '__default__cam__' and id(obj) not in known_ids:
                if blenderapi.version() < (2, 63, 0):
                    members = None
                elif blenderapi.version() < (2, 64, 0):
                    members = obj.group
                elif blenderapi.version() < (2, 65, 0):
                    members = obj.group_parent
                else:
                    members = obj.groupMembers
                if not members:
                    self._scene_syncable_objects.append(
                            (obj, self._morse_scene.objects[obj.name]))
                    known_ids.add(id(obj))
                else:
                    if blenderapi.version() < (2, 64, 0):
                        main_members = self._morse_scene.objects[obj.name].group
                    elif blenderapi.version() < (2, 65, 0):
                        main_members = self._morse_scene.objects[obj.name].group_parent
                    else:
                        main_members = self._morse_scene.objects[obj.name].groupMembers
                    for i in range(0, len(main_members)):
                        self._scene_syncable_objects.append(
                                (members[i], main_members[i]))
                        known_ids.add(id(members[i]))
                        childs = members[i].childrenRecursive
                        main_childs = main_members[i].childrenRecursive
                        for child in childs:
                            self._scene_syncable_objects.append(
                                    (child, main_childs[child.name]))
                            known_ids.add(id(child))


        # Link the objects using bge.texture
        if not blenderapi.hascameras():
            blenderapi.initcameras()

        mat_id = blenderapi.texture().materialID(screen, material_name)
        vt_camera = blenderapi.texture().Texture(screen, mat_id)
        vt_camera.source = blenderapi.texture().ImageRender(self._scene, camera)

        # Set the focal length of the camera using the Game Logic Property
        camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)

        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                       camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                       camera.far)

        # Set the background to be used for the render
        vt_camera.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        vt_camera.source.capsize = [self.image_width, self.image_height]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                (self.name(), vt_camera.source.capsize))

        # Reverse the image (boolean game-property)
        vt_camera.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if self.retrieve_zbuffer:
                vt_camera.source.zbuff = True
            # Use the Z-Buffer as input with an array of depths
            if self.retrieve_depth:
                vt_camera.source.depth = True
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)

        blenderapi.cameras()[self.name()] = vt_camera
コード例 #5
0
ファイル: camera.py プロジェクト: flixr/morse
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        if not self._offscreen_create:
            self._compute_syncable_objects()
            img_renderer = blenderapi.texture().ImageRender(self._scene, camera)
        else:
            fbo = self._offscreen_create(self.image_width, self.image_height,
                                         blenderapi.render().RAS_OFS_RENDER_TEXTURE)
            img_renderer = blenderapi.texture().ImageRender(blenderapi.scene(), camera, fbo)

        mat_id = blenderapi.texture().materialID(screen, material_name)
        self._camera_image = blenderapi.texture().Texture(screen, mat_id)
        self._camera_image.source = img_renderer

        # Set the focal length of the camera using the Game Logic Property
        camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)

        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                       camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                       camera.far)

        # Set the background to be used for the render
        self._camera_image.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        self._camera_image.source.capsize = [self.image_width, self.image_height]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                (self.name(), self._camera_image.source.capsize))

        # Workaround capsize limit to window size
        self.image_width, self.image_height = self._camera_image.source.capsize

        # Reverse the image (boolean game-property)
        self._camera_image.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if self.retrieve_zbuffer:
                self._camera_image.source.zbuff = True
            # Use the Z-Buffer as input with an array of depths
            if self.retrieve_depth:
                self._camera_image.source.depth = True
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)
コード例 #6
0
ファイル: camera.py プロジェクト: imclab/morse
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        # Get the reference to the scene
        scene_map = blenderapi.get_scene_map()
        logger.info("Scene %s from %s"% (self.scene_name, repr(scene_map.keys()) ) )
        self._scene = scene_map[self.scene_name]
        self._morse_scene = scene_map['S.MORSE_LOGIC']

        # Link the objects using bge.texture
        if not blenderapi.hascameras():
            blenderapi.initcameras()

        mat_id = blenderapi.texture().materialID(screen, material_name)
        vt_camera = blenderapi.texture().Texture(screen, mat_id)
        vt_camera.source = blenderapi.texture().ImageRender(self._scene, camera)

        # Set the focal length of the camera using the Game Logic Property
        camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)
        
        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                       camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                       camera.far)

        # Set the background to be used for the render
        vt_camera.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        vt_camera.source.capsize = [self.image_width, self.image_height]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                (self.name(), vt_camera.source.capsize))

        # Reverse the image (boolean game-property)
        vt_camera.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if 'retrieve_zbuffer' in self.bge_object:
                vt_camera.source.zbuff = self.bge_object['retrieve_zbuffer']
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)

        try:
            # Use the Z-Buffer as input with an array of depths
            if 'retrieve_depth' in self.bge_object:
                vt_camera.source.depth = self.bge_object['retrieve_depth']
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)

        blenderapi.cameras()[self.name()] = vt_camera
コード例 #7
0
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        # Get the reference to the scene
        scene_map = blenderapi.get_scene_map()
        logger.info("Scene %s from %s" %
                    (self.scene_name, repr(scene_map.keys())))
        self._scene = scene_map[self.scene_name]
        self._morse_scene = scene_map['S.MORSE_LOGIC']
        """
        Compute the relation between objects in the current scene and
        objects in the main logic scene.

        The logic is a bit complex, as in the case of group, we can have
        objects with the same name (but different ids). So, in this
        case, we follow the hierarchy on both scene to find
        correspondance (assuming no recursive group)

        known_ids is used to track objects alreay referenced and not
        include it twice (and possibly missing the fact that the same
        name can reference multiples different objects)

        I'm definitively not sure it is correct at all, it is a really
        really dark corner of Blender :). But it seems to do the job!
        """
        self._scene_syncable_objects = []
        known_ids = set()
        for obj in self._scene.objects:
            if obj.name != '__default__cam__' and id(obj) not in known_ids:
                if blenderapi.version() < (2, 63, 0):
                    members = None
                elif blenderapi.version() < (2, 64, 0):
                    members = obj.group
                elif blenderapi.version() < (2, 65, 0):
                    members = obj.group_parent
                else:
                    members = obj.groupMembers
                if not members:
                    self._scene_syncable_objects.append(
                        (obj, self._morse_scene.objects[obj.name]))
                    known_ids.add(id(obj))
                else:
                    if blenderapi.version() < (2, 64, 0):
                        main_members = self._morse_scene.objects[
                            obj.name].group
                    elif blenderapi.version() < (2, 65, 0):
                        main_members = self._morse_scene.objects[
                            obj.name].group_parent
                    else:
                        main_members = self._morse_scene.objects[
                            obj.name].groupMembers
                    for i in range(0, len(main_members)):
                        self._scene_syncable_objects.append(
                            (members[i], main_members[i]))
                        known_ids.add(id(members[i]))
                        childs = members[i].childrenRecursive
                        main_childs = main_members[i].childrenRecursive
                        for child in childs:
                            self._scene_syncable_objects.append(
                                (child, main_childs[child.name]))
                            known_ids.add(id(child))

        # Link the objects using bge.texture
        if not blenderapi.hascameras():
            blenderapi.initcameras()

        mat_id = blenderapi.texture().materialID(screen, material_name)
        vt_camera = blenderapi.texture().Texture(screen, mat_id)
        vt_camera.source = blenderapi.texture().ImageRender(
            self._scene, camera)

        # Set the focal length of the camera using the Game Logic Property
        camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)

        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                    camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                    camera.far)

        # Set the background to be used for the render
        vt_camera.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        vt_camera.source.capsize = [self.image_width, self.image_height]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                    (self.name(), vt_camera.source.capsize))

        # Reverse the image (boolean game-property)
        vt_camera.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if self.retrieve_zbuffer:
                vt_camera.source.zbuff = True
            # Use the Z-Buffer as input with an array of depths
            if self.retrieve_depth:
                vt_camera.source.depth = True
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)

        blenderapi.cameras()[self.name()] = vt_camera
コード例 #8
0
ファイル: camera.py プロジェクト: Arkapravo/morse-0.6
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.blender_obj.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]  
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError as detail:
            logger.error("""
    !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    ERROR: The video camera could not be properly initialized.
    The children object could not be found.
    Best solution is to re-link the camera.
    !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                    """)
            return (False)

        # Get the reference to the scene
        scene = blenderapi.scene()

        # Link the objects using bge.texture
        if not blenderapi.hascameras():
            blenderapi.initcameras()

        mat_id = blenderapi.texture().materialID(screen, material_name)
        vt_camera = blenderapi.texture().Texture(screen, mat_id)
        vt_camera.source = blenderapi.texture().ImageRender(scene, camera)

        # Set the focal length of the camera using the Game Logic Property
        camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)
        
        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" % camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" % camera.far)

        # Set the background to be used for the render
        vt_camera.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        vt_camera.source.capsize = [self.image_width, self.image_height]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" % \
                (self.name(), vt_camera.source.capsize))

        # Reverse the image (boolean game-property)
        # cf. bge.logic.video.source.flip (bge.texture.ImageRender)
        # http://wiki.blender.org/index.php/Dev:Source/GameEngine/2.49/VideoTexture#Setup_the_source
        vt_camera.source.flip = self.vertical_flip

        try:
            # Use the z buffer as an image texture for the camera
            if 'Zbuffer' in self.blender_obj:
                vt_camera.source.zbuff = self.blender_obj['Zbuffer']
        except AttributeError as detail:
            logger.warn("%s\nBlender does not support z buffer in images. You need to add a patch" % detail)

        try:
            # Use the z buffer as input with an array of depths
            if 'Depth' in self.blender_obj:
                vt_camera.source.depth = self.blender_obj['Depth']
        except AttributeError as detail:
            logger.warn("%s\nBlender does not support z buffer in images. You need to add a patch" % detail)

        blenderapi.cameras()[self.name()] = vt_camera