Esempio n. 1
0
    def __init__ (self, obj, parent=None):

        AbstractObject.__init__(self)

        # Fill in the data sent as parameters
        self.bge_object = obj
        self.robot_parent = parent

        self.level = self.bge_object.get("abstraction_level", "default")

        # Variable to indicate the activation status of the component
        self._active = True

        self.check_level()

        # Define the position of sensors with respect
        #  to their robot parent
        # TODO: implement this using morse.helpers.transformation
        if parent:
            self.relative_position = obj.getVectTo(parent.bge_object)

        # Create an instance of the 3d transformation class
        self.position_3d = morse.helpers.transformation.Transformation3d(obj)

        self.initialize_local_data()
        self.update_properties()

        # The actual frequency at which the action is called
        # The frequency of the game sensor specifies how many times
        # the action is skipped when the logic brick is executed.
        # e.g. game sensor frequency = 0 -> sensor runs at full logic rate
        sensors = blenderapi.getalwayssensors(obj)
        self._frequency = blenderapi.getfrequency()
        # New MORSE_LOGIC sensor, see AbstractComponent.morseable()
        morselogic = [s for s in sensors if s.name.startswith('MORSE_LOGIC')]
        if len(morselogic) == 1:
            if blenderapi.version() >= (2, 74, 5):
                self._frequency /= morselogic[0].skippedTicks + 1
            else:
                self._frequency /= morselogic[0].frequency + 1
        # Backward compatible (some actuators got special logic)
        elif len(sensors) == 1:
            if blenderapi.version() >= (2, 74, 5):
                self._frequency /= sensors[0].skippedTicks + 1
            else:
                self._frequency /= sensors[0].frequency + 1
        elif len(sensors) == 0:
            logger.warning("Can't get frequency for " + self.name() + \
                           " as the Game Logic sensor calling the action can't be found.")
        else:
            logger.warning(self.name() + " has too many Game Logic sensors to get " + \
                    "an unambiguous frequency for the action.")
Esempio n. 2
0
    def __init__(self, obj, parent=None):

        AbstractObject.__init__(self)

        # Fill in the data sent as parameters
        self.bge_object = obj
        self.robot_parent = parent

        self.level = self.bge_object.get("abstraction_level", "default")

        # Variable to indicate the activation status of the component
        self._active = True

        self.check_level()

        # Define the position of sensors with respect
        #  to their robot parent
        # TODO: implement this using morse.helpers.transformation
        if parent:
            self.relative_position = obj.getVectTo(parent.bge_object)

        # Create an instance of the 3d transformation class
        self.position_3d = morse.helpers.transformation.Transformation3d(obj)

        self.initialize_local_data()
        self.update_properties()

        # The actual frequency at which the action is called
        # The frequency of the game sensor specifies how many times
        # the action is skipped when the logic brick is executed.
        # e.g. game sensor frequency = 0 -> sensor runs at full logic rate
        sensors = blenderapi.getalwayssensors(obj)
        self._frequency = blenderapi.getfrequency()
        # New MORSE_LOGIC sensor, see AbstractComponent.morseable()
        morselogic = [s for s in sensors if s.name.startswith('MORSE_LOGIC')]
        if len(morselogic) == 1:
            if blenderapi.version() >= (2, 74, 5):
                self._frequency /= morselogic[0].skippedTicks + 1
            else:
                self._frequency /= morselogic[0].frequency + 1
        # Backward compatible (some actuators got special logic)
        elif len(sensors) == 1:
            if blenderapi.version() >= (2, 74, 5):
                self._frequency /= sensors[0].skippedTicks + 1
            else:
                self._frequency /= sensors[0].frequency + 1
        elif len(sensors) == 0:
            logger.warning("Can't get frequency for " + self.name() + \
                           " as the Game Logic sensor calling the action can't be found.")
        else:
            logger.warning(self.name() + " has too many Game Logic sensors to get " + \
                    "an unambiguous frequency for the action.")
Esempio n. 3
0
    def update_Y_forward(self, obj):
        """
        Update the transformation3D to reflect the tranformation
        between obj (a blender object) and the blender world origin.
        In this case, the robot moves forwad along the Y axis.

        Change the values of yaw, pitch, roll for Blender vehicles
        Robots that use the Blender vehicle constrainst move in the
        direction of the Y axis, contrary to most of the MORSE components
        that move along the X axis.
        """
        rot_matrix = obj.orientation
        self.matrix = mathutils.Matrix((rot_matrix[0], \
                                        rot_matrix[1], \
                                        rot_matrix[2]))
        self.matrix = self.matrix * self.correction_matrix
        self.matrix.resize_4x4()

        pos = obj.worldPosition
        for i in range(0, 3):
            if blenderapi.version() < (2, 62, 0):
                self.matrix[3][i] = pos[i]
            else:
                self.matrix[i][3] = pos[i]
        self.matrix[3][3] = 1

        self.euler = self.matrix.to_euler()
    def update_Y_forward(self, obj):
        """
        Update the transformation3D to reflect the tranformation
        between obj (a blender object) and the blender world origin.
        In this case, the robot moves forwad along the Y axis.

        Change the values of yaw, pitch, roll for Blender vehicles
        Robots that use the Blender vehicle constrainst move in the
        direction of the Y axis, contrary to most of the MORSE components
        that move along the X axis.
        """
        rot_matrix = obj.orientation
        self.matrix = mathutils.Matrix((rot_matrix[0], \
                                        rot_matrix[1], \
                                        rot_matrix[2]))
        self.matrix = self.matrix * self.correction_matrix
        self.matrix.resize_4x4()

        pos = obj.worldPosition
        for i in range(0, 3):
            if blenderapi.version() < (2,62,0):
                self.matrix[3][i] = pos[i]
            else:
                self.matrix[i][3] = pos[i]
        self.matrix[3][3] = 1

        self.euler = self.matrix.to_euler()
Esempio n. 5
0
 def z(self):
     """
     Return the translation  against the z axle
     """
     if blenderapi.version() < (2, 62, 0):
         return self.matrix[3][2]
     else:
         return self.matrix[2][3]
 def z(self):
     """
     Return the translation  against the z axle
     """
     if blenderapi.version() < (2,62,0):
         return self.matrix[3][2]
     else:
         return self.matrix[2][3]
Esempio n. 7
0
 def z(self):
     """
     Return the translation along the z-axis
     """
     if blenderapi.version() < (2, 62, 0):
         return self.matrix[3][2]
     else:
         return self.matrix[2][3]
Esempio n. 8
0
 def z(self):
     """
     Return the translation along the z-axis
     """
     if blenderapi.version() < (2, 62, 0):
         return self.matrix[3][2]
     else:
         return self.matrix[2][3]
Esempio n. 9
0
 def _restore_ik_targets(self):
     for c in self._ik_targets.values():
         #Bug in Blender! cf http://developer.blender.org/T37892
         if version() < (2, 70, 0):
             if c.active:
                 c.active = True
                 logger.info("Tracking IK target <%s>" % c.target.name)
         else:
             if not c.active:
                 c.active = True
                 logger.info("Tracking IK target <%s>" % c.target.name)
Esempio n. 10
0
 def _suspend_ik_targets(self):
     for c in self._ik_targets.values():
         #Bug in Blender! cf http://developer.blender.org/T37892
         if blenderapi.version() < (2, 70, 0):
             if not c.active:
                 logger.info("Stop tracking IK target <%s>" % c.target.name)
                 c.active = False
         else:
             if c.active:
                 logger.info("Stop tracking IK target <%s>" % c.target.name)
                 c.active = False
Esempio n. 11
0
 def _suspend_ik_targets(self):
     for c in self._ik_targets.values():
         #Bug in Blender! cf http://developer.blender.org/T37892
         if version() < (2, 70, 0):
             if not c.active:
                 logger.info("Stop tracking IK target <%s>" % c.target.name)
                 c.active = False
         else:
             if c.active:
                 logger.info("Stop tracking IK target <%s>" % c.target.name)
                 c.active = False
Esempio n. 12
0
 def _restore_ik_targets(self):
     for c in self._ik_targets.values():
         #Bug in Blender! cf http://developer.blender.org/T37892
         if blenderapi.version() < (2, 70, 0):
             if c.active:
                 c.active = True
                 logger.info("Tracking IK target <%s>" % c.target.name)
         else:
             if not c.active:
                 c.active = True
                 logger.info("Tracking IK target <%s>" % c.target.name)
Esempio n. 13
0
    def update(self, obj):
        """
        Update the transformation3D to reflect the transformation
        between obj (a blender object) and the blender world origin
        """
        self.matrix = obj.worldOrientation.to_4x4()

        pos = obj.worldPosition
        for i in range(0, 3):
            if blenderapi.version() < (2, 62, 0):
                self.matrix[3][i] = pos[i]
            else:
                self.matrix[i][3] = pos[i]
        self.matrix[3][3] = 1

        self.euler = self.matrix.to_euler()
Esempio n. 14
0
    def update(self, obj):
        """
        Update the transformation3D to reflect the transformation
        between obj (a blender object) and the blender world origin
        """
        self.matrix = obj.worldOrientation.to_4x4()

        pos = obj.worldPosition
        for i in range(0, 3):
            if blenderapi.version() < (2, 62, 0):
                self.matrix[3][i] = pos[i]
            else:
                self.matrix[i][3] = pos[i]
        self.matrix[3][3] = 1

        self.euler = self.matrix.to_euler()
Esempio n. 15
0
    def change_arc(self):
        # Change the shape of the arc to show what the sensor detects
        # Display only for 1 layer scanner
        if (2, 65, 0) < blenderapi.version() <= (2, 66, 3):
            # see http://projects.blender.org/tracker/?func=detail&aid=34550
            return  # not supported in 2.66 due to BGE bug #34550
        # TODO rework the LDMRS (3 layers) display [code in 1.0-beta2]
        if self.visible_arc:
            for mesh in self._ray_arc.meshes:
                for m_index in range(len(mesh.materials)):
                    index = 0
                    for v_index in range(mesh.getVertexArrayLength(m_index)):
                        # Switch to a new layer after a set number of vertices
                        if index % self._vertex_per_layer == 0:
                            index += 1

                        # Skip the first vertex of a triangle. It will always
                        #  be at the origin, and should not be changed
                        if v_index % 3 == 0:
                            continue

                        # Place the next vertex in the triangle
                        if v_index % 3 == 2:
                            point = self.local_data['point_list'][index]
                            if point == [0.0, 0.0, 0.0]:
                                # If there was no intersection, move the vertex
                                # to the laser range
                                point = self._ray_list[index] * self.laser_range
                            vertex = mesh.getVertex(m_index, v_index)
                            vertex.setXYZ(point)
                            index += 1

                        # Set the final vertex, in the correct order to have
                        #  the normals facing upwards.
                        if v_index % 3 == 1:
                            point = self.local_data['point_list'][index - 1]
                            if point == [0.0, 0.0, 0.0]:
                                # If there was no intersection, move the vertex
                                # to the laser range
                                point = self._ray_list[index -
                                                       1] * self.laser_range
                            vertex = mesh.getVertex(m_index, v_index)
                            vertex.setXYZ(point)
Esempio n. 16
0
 def change_arc(self):
     # Change the shape of the arc to show what the sensor detects
     # Display only for 1 layer scanner
     if (2, 65, 0) < blenderapi.version() <= (2, 66, 3):
         # see http://projects.blender.org/tracker/?func=detail&aid=34550
         return # not supported in 2.66 due to BGE bug #34550
     # TODO rework the LDMRS (3 layers) display [code in 1.0-beta2]
     if self.visible_arc and self._layers == 1:
         for mesh in self._ray_arc.meshes:
             for m_index in range(len(mesh.materials)):
                 # Skip the first vertex (located at the center of the sensor)
                 for v_index in range(1, mesh.getVertexArrayLength(m_index)):
                     vertex = mesh.getVertex(m_index, v_index)
                     point = self.local_data['point_list'][v_index-1]
                     if point == [0.0, 0.0, 0.0]:
                         # If there was no intersection, move the vertex
                         # to the laser range
                         point = self._ray_list[v_index-1] * self.laser_range
                     vertex.setXYZ(point[:3])
Esempio n. 17
0
    def default_action(self):
        """ Switch on/off the light. """
        # if no changes, return
        if self._last == self.local_data['emit']:
            return

        if self.local_data['emit']:
            self.light.energy = self._energy
        else:
            self.light.energy = 0.0

        # workaround Blender < 2.66 did not share light energy between scenes
        if blenderapi.version() < (2, 66, 0):
            # for each camera's scene: update the light
            for scene in blenderapi.get_scene_list():
                if scene.name not in ['S.MORSE_ENV', 'S.MORSE_LOGIC'] and \
                        self.light.name in scene.objects:
                    scene.objects[self.light.name].energy = self.light.energy

        self._last = self.local_data['emit']
Esempio n. 18
0
    def update(self, obj):
        """
        Update the transformation3D to reflect the tranformation
        between obj (a blender object) and the blender world origin
        """
        rot_matrix = obj.orientation
        self.matrix = mathutils.Matrix((rot_matrix[0], \
                                        rot_matrix[1], \
                                        rot_matrix[2]))
        self.matrix.resize_4x4()

        pos = obj.worldPosition
        for i in range(0, 3):
            if blenderapi.version() < (2, 62, 0):
                self.matrix[3][i] = pos[i]
            else:
                self.matrix[i][3] = pos[i]
        self.matrix[3][3] = 1

        self.euler = self.matrix.to_euler()
    def update(self, obj):
        """
        Update the transformation3D to reflect the tranformation
        between obj (a blender object) and the blender world origin
        """
        rot_matrix = obj.orientation
        self.matrix = mathutils.Matrix((rot_matrix[0], \
                                        rot_matrix[1], \
                                        rot_matrix[2]))
        self.matrix.resize_4x4()

        pos = obj.worldPosition
        for i in range(0, 3):
            if blenderapi.version() < (2,62,0):
                self.matrix[3][i] = pos[i]
            else:
                self.matrix[i][3] = pos[i]
        self.matrix[3][3] = 1

        self.euler = self.matrix.to_euler()
Esempio n. 20
0
 def change_arc(self):
     # Change the shape of the arc to show what the sensor detects
     # Display only for 1 layer scanner
     if (2, 65, 0) < blenderapi.version() <= (2, 66, 3):
         # see http://projects.blender.org/tracker/?func=detail&aid=34550
         return  # not supported in 2.66 due to BGE bug #34550
     # TODO rework the LDMRS (3 layers) display [code in 1.0-beta2]
     if self.visible_arc and self._layers == 1:
         for mesh in self._ray_arc.meshes:
             for m_index in range(len(mesh.materials)):
                 # Skip the first vertex (located at the center of the sensor)
                 for v_index in range(1,
                                      mesh.getVertexArrayLength(m_index)):
                     vertex = mesh.getVertex(m_index, v_index)
                     point = self.local_data['point_list'][v_index - 1]
                     if point == [0.0, 0.0, 0.0]:
                         # If there was no intersection, move the vertex
                         # to the laser range
                         point = self._ray_list[v_index -
                                                1] * self.laser_range
                     vertex.setXYZ(point[:3])
Esempio n. 21
0
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        # Get the reference to the scene
        scene_map = blenderapi.get_scene_map()
        logger.info("Scene %s from %s" %
                    (self.scene_name, repr(scene_map.keys())))
        self._scene = scene_map[self.scene_name]
        self._morse_scene = scene_map['S.MORSE_LOGIC']
        """
        Compute the relation between objects in the current scene and
        objects in the main logic scene.

        The logic is a bit complex, as in the case of group, we can have
        objects with the same name (but different ids). So, in this
        case, we follow the hierarchy on both scene to find
        correspondance (assuming no recursive group)

        known_ids is used to track objects alreay referenced and not
        include it twice (and possibly missing the fact that the same
        name can reference multiples different objects)

        I'm definitively not sure it is correct at all, it is a really
        really dark corner of Blender :). But it seems to do the job!
        """
        self._scene_syncable_objects = []
        known_ids = set()
        for obj in self._scene.objects:
            if obj.name != '__default__cam__' and id(obj) not in known_ids:
                if blenderapi.version() < (2, 63, 0):
                    members = None
                elif blenderapi.version() < (2, 64, 0):
                    members = obj.group
                elif blenderapi.version() < (2, 65, 0):
                    members = obj.group_parent
                else:
                    members = obj.groupMembers
                if not members:
                    self._scene_syncable_objects.append(
                        (obj, self._morse_scene.objects[obj.name]))
                    known_ids.add(id(obj))
                else:
                    if blenderapi.version() < (2, 64, 0):
                        main_members = self._morse_scene.objects[
                            obj.name].group
                    elif blenderapi.version() < (2, 65, 0):
                        main_members = self._morse_scene.objects[
                            obj.name].group_parent
                    else:
                        main_members = self._morse_scene.objects[
                            obj.name].groupMembers
                    for i in range(0, len(main_members)):
                        self._scene_syncable_objects.append(
                            (members[i], main_members[i]))
                        known_ids.add(id(members[i]))
                        childs = members[i].childrenRecursive
                        main_childs = main_members[i].childrenRecursive
                        for child in childs:
                            self._scene_syncable_objects.append(
                                (child, main_childs[child.name]))
                            known_ids.add(id(child))

        # Link the objects using bge.texture
        if not blenderapi.hascameras():
            blenderapi.initcameras()

        mat_id = blenderapi.texture().materialID(screen, material_name)
        vt_camera = blenderapi.texture().Texture(screen, mat_id)
        vt_camera.source = blenderapi.texture().ImageRender(
            self._scene, camera)

        # Set the focal length of the camera using the Game Logic Property
        camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)

        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                    camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                    camera.far)

        # Set the background to be used for the render
        vt_camera.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        vt_camera.source.capsize = [self.image_width, self.image_height]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                    (self.name(), vt_camera.source.capsize))

        # Reverse the image (boolean game-property)
        vt_camera.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if self.retrieve_zbuffer:
                vt_camera.source.zbuff = True
            # Use the Z-Buffer as input with an array of depths
            if self.retrieve_depth:
                vt_camera.source.depth = True
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)

        blenderapi.cameras()[self.name()] = vt_camera
Esempio n. 22
0
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        # Get the reference to the scene
        scene_map = blenderapi.get_scene_map()
        logger.info("Scene %s from %s"% (self.scene_name, repr(scene_map.keys()) ) )
        self._scene = scene_map[self.scene_name]
        self._morse_scene = scene_map['S.MORSE_LOGIC']

        """
        Compute the relation between objects in the current scene and
        objects in the main logic scene.

        The logic is a bit complex, as in the case of group, we can have
        objects with the same name (but different ids). So, in this
        case, we follow the hierarchy on both scene to find
        correspondance (assuming no recursive group)

        known_ids is used to track objects alreay referenced and not
        include it twice (and possibly missing the fact that the same
        name can reference multiples different objects)

        I'm definitively not sure it is correct at all, it is a really
        really dark corner of Blender :). But it seems to do the job!
        """
        self._scene_syncable_objects = []
        known_ids = set()
        for obj in self._scene.objects:
            if obj.name != '__default__cam__' and id(obj) not in known_ids:
                if blenderapi.version() < (2, 63, 0):
                    members = None
                elif blenderapi.version() < (2, 64, 0):
                    members = obj.group
                elif blenderapi.version() < (2, 65, 0):
                    members = obj.group_parent
                else:
                    members = obj.groupMembers
                if not members:
                    self._scene_syncable_objects.append(
                            (obj, self._morse_scene.objects[obj.name]))
                    known_ids.add(id(obj))
                else:
                    if blenderapi.version() < (2, 64, 0):
                        main_members = self._morse_scene.objects[obj.name].group
                    elif blenderapi.version() < (2, 65, 0):
                        main_members = self._morse_scene.objects[obj.name].group_parent
                    else:
                        main_members = self._morse_scene.objects[obj.name].groupMembers
                    for i in range(0, len(main_members)):
                        self._scene_syncable_objects.append(
                                (members[i], main_members[i]))
                        known_ids.add(id(members[i]))
                        childs = members[i].childrenRecursive
                        main_childs = main_members[i].childrenRecursive
                        for child in childs:
                            self._scene_syncable_objects.append(
                                    (child, main_childs[child.name]))
                            known_ids.add(id(child))


        # Link the objects using bge.texture
        if not blenderapi.hascameras():
            blenderapi.initcameras()

        mat_id = blenderapi.texture().materialID(screen, material_name)
        vt_camera = blenderapi.texture().Texture(screen, mat_id)
        vt_camera.source = blenderapi.texture().ImageRender(self._scene, camera)

        # Set the focal length of the camera using the Game Logic Property
        camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)

        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                       camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                       camera.far)

        # Set the background to be used for the render
        vt_camera.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        vt_camera.source.capsize = [self.image_width, self.image_height]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                (self.name(), vt_camera.source.capsize))

        # Reverse the image (boolean game-property)
        vt_camera.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if self.retrieve_zbuffer:
                vt_camera.source.zbuff = True
            # Use the Z-Buffer as input with an array of depths
            if self.retrieve_depth:
                vt_camera.source.depth = True
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)

        blenderapi.cameras()[self.name()] = vt_camera
Esempio n. 23
0
    def default_action(self):
        """
        Do ray tracing from the SICK object using a semicircle

        Generates a list of lists, with the points located.
        Also deforms the geometry of the arc associated to the SICK,
        as a way to display the results obtained.
        """
        #logger.debug("ARC POSITION: [%.4f, %.4f, %.4f]" %
        #                (self.bge_object.position[0],
        #                 self.bge_object.position[1],
        #                 self.bge_object.position[2]))

        # Get the inverse of the transformation matrix
        inverse = self.position_3d.matrix.inverted()

        index = 0
        for ray in self._ray_list:
            # Transform the ray to the current position and rotation
            #  of the sensor
            correct_ray = self.position_3d.matrix * ray

            # Shoot a ray towards the target
            target, point, normal = self.bge_object.rayCast(correct_ray, None,
                                                             self.laser_range)

            #logger.debug("\tTarget, point, normal: %s, %s, %s" %
            #               (target, point, normal))

            # Register when an intersection occurred
            if target:
                distance = self.bge_object.getDistanceTo(point)
                # Return the point to the reference of the sensor
                new_point = inverse * point

                #logger.debug("\t\tGOT INTERSECTION WITH RAY: [%.4f, %.4f, %.4f]" % (correct_ray[0], correct_ray[1], correct_ray[2]))
                #logger.debug("\t\tINTERSECTION AT: [%.4f, %.4f, %.4f] = %s" % (point[0], point[1], point[2], target))
            # If there was no intersection, store the default values
            else:
                distance = self.laser_range
                new_point = [0.0, 0.0, 0.0]

            # Save the information gathered
            self.local_data['point_list'][index] = new_point[:]
            self.local_data['range_list'][index] = distance
            index += 1

        # Change the shape of the arc to show what the sensor detects
        # Display only for 1 layer scanner
        if (2, 65, 0) < blenderapi.version() <= (2, 66, 3):
            # see http://projects.blender.org/tracker/?func=detail&aid=34550
            return # not supported in 2.66 due to BGE bug #34550
        # TODO rework the LDMRS (3 layers) display [code in 1.0-beta2]
        if self.visible_arc and self._layers == 1:
            for mesh in self._ray_arc.meshes:
                for m_index in range(len(mesh.materials)):
                    # Skip the first vertex (located at the center of the sensor)
                    for v_index in range(1, mesh.getVertexArrayLength(m_index)):
                        vertex = mesh.getVertex(m_index, v_index)
                        point = self.local_data['point_list'][v_index-1]
                        if point == [0.0, 0.0, 0.0]:
                            # If there was no intersection, move the vertex
                            # to the laser range
                            point = self._ray_list[v_index-1] * self.laser_range
                        vertex.setXYZ(point)
    def default_action(self):
        """
        Do ray tracing from the SICK object using a semicircle

        Generates a list of lists, with the points located.
        Also deforms the geometry of the arc associated to the SICK,
        as a way to display the results obtained.
        """
        #logger.debug("ARC POSITION: [%.4f, %.4f, %.4f]" %
        #                (self.bge_object.position[0],
        #                 self.bge_object.position[1],
        #                 self.bge_object.position[2]))

        # Get the inverse of the transformation matrix
        inverse = self.position_3d.matrix.inverted()

        index = 0
        for ray in self._ray_list:
            # Transform the ray to the current position and rotation
            #  of the sensor
            correct_ray = self.position_3d.matrix * ray

            # Shoot a ray towards the target
            target, point, normal = self.bge_object.rayCast(
                correct_ray, None, self.laser_range)

            #logger.debug("\tTarget, point, normal: %s, %s, %s" %
            #               (target, point, normal))

            # Register when an intersection occurred
            if target:
                distance = self.bge_object.getDistanceTo(point)
                # Return the point to the reference of the sensor
                new_point = inverse * point

                #logger.debug("\t\tGOT INTERSECTION WITH RAY: [%.4f, %.4f, %.4f]" % (correct_ray[0], correct_ray[1], correct_ray[2]))
                #logger.debug("\t\tINTERSECTION AT: [%.4f, %.4f, %.4f] = %s" % (point[0], point[1], point[2], target))
            # If there was no intersection, store the default values
            else:
                distance = self.laser_range
                new_point = [0.0, 0.0, 0.0]

            # Save the information gathered
            self.local_data['point_list'][index] = new_point[:]
            self.local_data['range_list'][index] = distance
            index += 1

        # Change the shape of the arc to show what the sensor detects
        # Display only for 1 layer scanner
        if (2, 65, 0) < blenderapi.version() <= (2, 66, 3):
            # see http://projects.blender.org/tracker/?func=detail&aid=34550
            return  # not supported in 2.66 due to BGE bug #34550
        # TODO rework the LDMRS (3 layers) display [code in 1.0-beta2]
        if self.visible_arc and self._layers == 1:
            for mesh in self._ray_arc.meshes:
                for m_index in range(len(mesh.materials)):
                    # Skip the first vertex (located at the center of the sensor)
                    for v_index in range(1,
                                         mesh.getVertexArrayLength(m_index)):
                        vertex = mesh.getVertex(m_index, v_index)
                        point = self.local_data['point_list'][v_index - 1]
                        if point == [0.0, 0.0, 0.0]:
                            # If there was no intersection, move the vertex
                            # to the laser range
                            point = self._ray_list[v_index -
                                                   1] * self.laser_range
                        vertex.setXYZ(point)