Пример #1
0
    def __init__(self, obj, parent=None):
        """
        Constructor method.

        Receives the reference to the Blender object.
        The second parameter should be the name of the object's parent.
        """
        logger.info('%s initialization' % obj.name)
        # Call the constructor of the parent class
        Sensor.__init__(self, obj, parent)

        arc_prefix = 'Arc_'

        # Look for a child arc to use for the scans
        for child in obj.children:
            if arc_prefix in child.name:
                self._ray_arc = child
                logger.info("Sick: Using arc object: '%s'" % self._ray_arc)
                break

        # Set its visibility, according to the settings
        self._ray_arc.setVisible(self.visible_arc)
        self._ray_list = []

        # Create an empty list to store the intersection points
        self.local_data['point_list'] = []
        self.local_data['range_list'] = []

        # Get the datablock of the arc, to extract its vertices
        ray_object = blenderapi.objectdata(self._ray_arc.name)
        for vertex in ray_object.data.vertices:
            logger.debug ("Vertex %d = %s" % (vertex.index, vertex.co))

            # Skip the first vertex.
            # It is the one located at the center of the sensor
            if vertex.index == 0:
                continue

            # Store the position of the vertex in a list
            # The position is already given as a mathutils.Vector
            self._ray_list.append(vertex.co)

            # Insert empty points into the data list
            self.local_data['point_list'].append([0.0, 0.0, 0.0])
            # Insert zeros into the range list
            self.local_data['range_list'].append(0.0)

            logger.debug("RAY %d = [%.4f, %.4f, %.4f]" %
                         (vertex.index, self._ray_list[vertex.index-1][0],
                                        self._ray_list[vertex.index-1][1],
                                        self._ray_list[vertex.index-1][2]))

        # Get some information to be able to deform the arcs
        if self.visible_arc:
            self._layers = 1
            if 'layers' in self.bge_object:
                self._layers = self.bge_object['layers']
            self._vertex_per_layer = len(self._ray_list) // self._layers

        logger.info('Component initialized, runs at %.2f Hz', self.frequency)
Пример #2
0
    def get_object_global_bbox(self, object_name):
        """ Returns the global bounding box of an object as list encapsulated as
        string: "[[x0, y0, z0 ], ... ,[x7, y7, z7]]".
        
        :param string object_name: The name of the object.
        """
        # Test whether the object exists in the scene  
        b_obj = get_obj_by_name(object_name)
    
        # Get bounding box of object
        bb = blenderapi.objectdata(object_name).bound_box

        # Group x,y,z-coordinates as lists 
        bbox_local = [[bb_corner[i] for i in range(3)] for bb_corner in bb]
    
        world_pos = b_obj.worldPosition
        world_ori = b_obj.worldOrientation.to_3x3()

        bbox_global = []
        for corner in bbox_local:
            vec = world_ori * mathutils.Vector(corner) + \
                mathutils.Vector(world_pos) 
            bbox_global.append([vec.x,vec.y,vec.z])
        
        return json.dumps(bbox_global)
Пример #3
0
    def __init__(self, obj, parent=None):
        """
        Constructor method.

        Receives the reference to the Blender object.
        The second parameter should be the name of the object's parent.
        """
        logger.info('%s initialization' % obj.name)
        # Call the constructor of the parent class
        Sensor.__init__(self, obj, parent)

        arc_prefix = 'Arc_'

        # Look for a child arc to use for the scans
        for child in obj.children:
            if arc_prefix in child.name:
                self._ray_arc = child
                logger.info("Sick: Using arc object: '%s'" % self._ray_arc)
                break

        # Set its visibility, according to the settings
        self._ray_arc.setVisible(self.visible_arc)
        self._ray_list = []

        # Create an empty list to store the intersection points
        self.local_data['point_list'] = []
        self.local_data['range_list'] = []

        # Get the datablock of the arc, to extract its vertices
        ray_object = blenderapi.objectdata(self._ray_arc.name)
        for vertex in ray_object.data.vertices:
            logger.debug("Vertex %d = %s" % (vertex.index, vertex.co))

            # Skip the first vertex.
            # It is the one located at the center of the sensor
            if vertex.index == 0:
                continue

            # Store the position of the vertex in a list
            # The position is already given as a mathutils.Vector
            self._ray_list.append(vertex.co)

            # Insert empty points into the data list
            self.local_data['point_list'].append([0.0, 0.0, 0.0])
            # Insert zeros into the range list
            self.local_data['range_list'].append(0.0)

            logger.debug("RAY %d = [%.4f, %.4f, %.4f]" %
                         (vertex.index, self._ray_list[vertex.index - 1][0],
                          self._ray_list[vertex.index - 1][1],
                          self._ray_list[vertex.index - 1][2]))

        # Get some information to be able to deform the arcs
        if self.visible_arc:
            self._layers = 1
            if 'layers' in self.bge_object:
                self._layers = self.bge_object['layers']
            self._vertex_per_layer = len(self._ray_list) // self._layers

        logger.info('Component initialized, runs at %.2f Hz', self.frequency)
Пример #4
0
    def get_object_global_bbox(self, object_name):
        """ Returns the global bounding box of an object as list encapsulated as
        string: "[[x0, y0, z0 ], ... ,[x7, y7, z7]]".
        
        :param string object_name: The name of the object.
        """
        # Test whether the object exists in the scene
        b_obj = get_obj_by_name(object_name)

        # Get bounding box of object
        bb = blenderapi.objectdata(object_name).bound_box

        # Group x,y,z-coordinates as lists
        bbox_local = [[bb_corner[i] for i in range(3)] for bb_corner in bb]

        world_pos = b_obj.worldPosition
        world_ori = b_obj.worldOrientation.to_3x3()

        bbox_global = []
        for corner in bbox_local:
            vec = world_ori * mathutils.Vector(corner) + \
                mathutils.Vector(world_pos)
            bbox_global.append([vec.x, vec.y, vec.z])

        return json.dumps(bbox_global)
Пример #5
0
    def __init__(self, obj, parent=None):
        LaserScanner.__init__(self, obj, parent)
        self.local_data['remission_list'] = []
        ray_object = blenderapi.objectdata(self._ray_arc.name)
        for vertex in ray_object.data.vertices:
            # Skip the first vertex.
            # It is the one located at the center of the sensor
            if vertex.index == 0:
                continue

            # Insert zeros in the remission list
            self.local_data['remission_list'].append(0.0)
Пример #6
0
    def __init__(self, obj, parent=None):
        LaserScanner.__init__(self, obj, parent)
        self.local_data['remission_list'] = []
        ray_object = blenderapi.objectdata(self._ray_arc.name)
        for vertex in ray_object.data.vertices:
            # Skip the first vertex.
            # It is the one located at the center of the sensor
            if vertex.index == 0:
                continue

            # Insert zeros in the remission list
            self.local_data['remission_list'].append(0.0) 
Пример #7
0
    def get_object_bbox(self, object_name):
        """ Returns the local bounding box of an object as list encapsulated as
        string: "[[x0, y0, z0 ], ... ,[x7, y7, z7]]".
        
        :param string object_name: The name of the object.
        """
        # Test whether the object exists in the scene
        get_obj_by_name(object_name)

        # Get bounding box of object
        bb = blenderapi.objectdata(object_name).bound_box

        # Group x,y,z-coordinates as lists
        bbox_local = [[bb_corner[i] for i in range(3)] for bb_corner in bb]

        return json.dumps(bbox_local)
Пример #8
0
    def get_object_bbox(self, object_name):
        """ Returns the local bounding box of an object as list encapsulated as
        string: "[[x0, y0, z0 ], ... ,[x7, y7, z7]]".
        
        :param string object_name: The name of the object.
        """
        # Test whether the object exists in the scene  
        get_obj_by_name(object_name)
    
        # Get bounding box of object
        bb = blenderapi.objectdata(object_name).bound_box

        # Group x,y,z-coordinates as lists 
        bbox_local = [[bb_corner[i] for i in range(3)] for bb_corner in bb]

        return json.dumps(bbox_local)
    def __init__(self, obj, parent=None):
        """ Constructor method.

        Receives the reference to the Blender object.
        The second parameter should be the name of the object's parent.
        """
        logger.info('%s initialization' % obj.name)
        # Call the constructor of the parent class
        morse.sensors.camera.Camera.__init__(self, obj, parent)

        # Locate the Blender camera object associated with this sensor
        main_obj = self.bge_object
        for obj in main_obj.children:
            if hasattr(obj, 'lens'):
                self.blender_cam = obj
                logger.info("Camera object: {0}".format(self.blender_cam))
                break
        if not self.blender_cam:
            logger.error("no camera object associated to the semantic camera. \
                         The semantic camera requires a standard Blender  \
                         camera in its children.")

        # TrackedObject is a dictionary containing the list of tracked objects
        # (->meshes with a class property set up) as keys
        #  and the bounding boxes of these objects as value.
        if not 'trackedObjects' in blenderapi.persistantstorage():
            logger.info('Initialization of tracked objects:')
            blenderapi.persistantstorage().trackedObjects = \
                            dict.fromkeys(passive_objects.active_objects())

            # Store the bounding box of the marked objects
            for obj in blenderapi.persistantstorage().trackedObjects.keys():

                # bound_box returns the bounding box in local space
                #  instead of world space.
                blenderapi.persistantstorage().trackedObjects[obj] = \
                                    blenderapi.objectdata(obj.name).bound_box

                details = passive_objects.details(obj)
                logger.info('    - {%s} (type:%s)' %
                            (details['label'], details['type']))

        if self.noocclusion:
            logger.info(
                "Semantic camera running in 'no occlusion' mode (fast mode).")
        logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
Пример #10
0
    def __init__(self, obj, parent=None):
        """ Constructor method.

        Receives the reference to the Blender object.
        The second parameter should be the name of the object's parent.
        """
        
        logger.info('%s initialization of' % obj.name)
        # Call the constructor of the parent class
        morse.sensors.camera.Camera.__init__(self, obj, parent)

        # Locate the Blender camera object associated with this sensor
        main_obj = self.bge_object
        for obj in main_obj.children:
            if hasattr(obj, 'lens'):
                self.blender_cam = obj
                logger.info("Camera object: {0}".format(self.blender_cam))
                break
        if not self.blender_cam:
            logger.error("no camera object associated to the semantic camera. \
                         The semantic camera requires a standard Blender  \
                         camera in its children.")

        # TrackedObject is a dictionary containing the list of tracked objects
        # (->meshes with a class property set up) as keys
        #  and the bounding boxes of these objects as value.
        if not 'trackedObjects' in blenderapi.persistantstorage():
            logger.info('Initialization of tracked objects:')
            blenderapi.persistantstorage().trackedObjects = \
                            dict.fromkeys(passive_objects.active_objects())

            # Store the bounding box of the marked objects
            for obj in blenderapi.persistantstorage().trackedObjects.keys():

                # bound_box returns the bounding box in local space
                #  instead of world space.
                blenderapi.persistantstorage().trackedObjects[obj] = \
                                    blenderapi.objectdata(obj.name).bound_box

                details = passive_objects.details(obj)
                logger.info('    - {%s} (type:%s)'%
                            (details['label'], details['type']))

        if self.noocclusion:
            logger.info("Semantic camera running in 'no occlusion' mode (fast mode).")
        logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
Пример #11
0
    def __init__(self, obj, parent=None):
        """ Constructor method.

        Receives the reference to the Blender object.
        The second parameter should be the name of the object's parent.
        """
        logger.info("%s initialization" % obj.name)
        # Call the constructor of the parent class
        super(self.__class__, self).__init__(obj, parent)

        # Locate the Blender camera object associated with this sensor
        main_obj = self.bge_object
        for obj in main_obj.children:
            if hasattr(obj, "lens"):
                self.blender_cam = obj
                logger.info("Camera object: {0}".format(self.blender_cam))
                break
        if not self.blender_cam:
            logger.error(
                "no camera object associated to the semantic camera. \
                         The semantic camera requires a standard Blender  \
                         camera in its children."
            )

        # TrackedObject is a dictionary containing the list of tracked objects
        # (->meshes with a class property set up) as keys
        #  and the bounding boxes of these objects as value.
        if not "trackedObjects" in blenderapi.persistantstorage():
            logger.info("Initialization of tracked objects:")
            blenderapi.persistantstorage().trackedObjects = dict.fromkeys(passive_objects.active_objects())

            # Store the bounding box of the marked objects
            for obj in blenderapi.persistantstorage().trackedObjects.keys():

                # bound_box returns the bounding box in local space
                #  instead of world space.
                blenderapi.persistantstorage().trackedObjects[obj] = blenderapi.objectdata(obj.name).bound_box

                details = passive_objects.details(obj)
                logger.info("    - {%s} (type:%s)" % (details["label"], details["type"]))

        logger.info("Component initialized")
Пример #12
0
    def __init__(self, obj, parent=None):
        """ Constructor method.

        Receives the reference to the Blender object.
        The second parameter should be the name of the object's parent.
        """
        logger.info('%s initialization' % obj.name)
        # Call the constructor of the parent class
        morse.sensors.camera.Camera.__init__(self, obj, parent)

        # Locate the Blender camera object associated with this sensor
        main_obj = self.bge_object
        for obj in main_obj.children:
            if hasattr(obj, 'lens'):
                self.blender_cam = obj
                logger.info("Camera object: {0}".format(self.blender_cam))
                break
        if not self.blender_cam:
            logger.error("no camera object associated to the semantic camera. \
                         The semantic camera requires a standard Blender  \
                         camera in its children.")

        # TrackedObject is a dictionary containing the list of tracked objects
        # (->meshes with a class property set up) as keys
        #  and the bounding boxes of these objects as value.
        self.trackedObjects = {}
        for o in blenderapi.scene().objects:
            tagged = ('Type' in o
                      and o['Type'] == self.tag) or (self.tag in o
                                                     and bool(o[self.tag]))

            if tagged:
                self.trackedObjects[o] = blenderapi.objectdata(
                    o.name).bound_box
                logger.warning('    - %s' % o.name)

        if self.noocclusion:
            logger.info(
                "Semantic camera running in 'no occlusion' mode (fast mode).")
        logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
Пример #13
0
    def __init__(self, obj, parent=None):
        """ Constructor method.

        Receives the reference to the Blender object.
        The second parameter should be the name of the object's parent.
        """
        logger.info('%s initialization' % obj.name)
        # Call the constructor of the parent class
        morse.sensors.camera.Camera.__init__(self, obj, parent)

        # Locate the Blender camera object associated with this sensor
        main_obj = self.bge_object
        for obj in main_obj.children:
            if hasattr(obj, 'lens'):
                self.blender_cam = obj
                logger.info("Camera object: {0}".format(self.blender_cam))
                break
        if not self.blender_cam:
            logger.error("no camera object associated to the semantic camera. \
                         The semantic camera requires a standard Blender  \
                         camera in its children.")

        # TrackedObject is a dictionary containing the list of tracked objects
        # (->meshes with a class property set up) as keys
        #  and the bounding boxes of these objects as value.
        self.trackedObjects = {}
        for o in blenderapi.scene().objects:
            tagged = ('Type' in o and o['Type'] == self.tag) or (self.tag in o and bool(o[self.tag]))
                               
            if tagged:
                self.trackedObjects[o] = blenderapi.objectdata(o.name).bound_box
                logger.warning('    - tracking %s' % o.name)

        if self.noocclusion:
            logger.info("Semantic camera running in 'no occlusion' mode (fast mode).")
        logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
Пример #14
0
 def GetWheelRadius(self, wheelName):
     dims=blenderapi.objectdata(wheelName).dimensions
     # average the x and y dimension to get diameter - divide by 2 for radius
     return (dims[0]+dims[1])/4
Пример #15
0
 def get_wheel_radius(self, wheel_name):
     dims = blenderapi.objectdata(wheel_name).dimensions
     # average the x and y dimension to get diameter - divide by 2 for radius
     radius = (dims[0]+dims[1])/4
     return radius
Пример #16
0
 def GetWheelRadius(self, wheelName):
     dims = blenderapi.objectdata(wheelName).dimensions
     # average the x and y dimension to get diameter - divide by 2 for radius
     return (dims[0] + dims[1]) / 4
def interact(cont):
    """
    Script for opening doors, drawers and grabbing objects
    
    press left mousebutton to open, close or grab
    press right mousebutton to drop the currently selected object
    """

    ow = cont.owner

    # get the suffix of the human to reference the right objects
    suffix = ow.name[-4:] if ow.name[-4] == "." else ""

    right_hand=objects['IK_Target_Empty.R' + suffix]
    look = objects['Target_Empty' + suffix]
    human = objects[ow.parent.parent.parent.name + suffix]

    # if the human is external, do nothing
    if human.get('External_Robot_Tag') or human['disable_keyboard_control']:
        return
    
    lmb=cont.sensors['LMB']
    ray = cont.sensors['Ray']
    cam = ray.owner
    lay_down_ray = cont.sensors['LayDownRay']
    rmb=cont.sensors['RMB']
    space = cont.sensors['SPACEBAR']
    head = objects['Head_Empty' + suffix]
    hand = objects['Hand.R' + suffix]

    
    # Get the focusing object:
    # A ray sensor is attached to the HumanCamera sensor.
    # It returns all colliding objects in a 10 cm range of the hand.
    # We filter the result to keep only objects that have the 'Object'
    # property or that have children with the 'Object' property.
    focus = None
    prox_obj = ray.hitObject                     # focused object
    if prox_obj:
        if 'Object' in prox_obj:
            focus = prox_obj
        elif 'Door' in prox_obj or 'Drawer' in prox_obj or 'Switch' in prox_obj:
            focus = prox_obj
        else:
            for obj in prox_obj.children:
                if 'Object' in obj:
                    focus = obj
    
    # set the overlay scene and change the displayed text
    # and texture
    if human['Manipulate'] and focus:
            
        can_be_manipulated = False

        if focus in passive_objects.graspable_objects():
            can_be_manipulated = True
            if not ow['selected']:
                ow['Status'] = 'Pick up ' + passive_objects.label(focus)
            else:
                ow['Status'] = passive_objects.label(focus)
        elif 'Door' in focus or 'Drawer' in focus:
            can_be_manipulated = True
            
            try:
                if focus['Open']:
                    ow['Status'] = 'Close ' + str(focus['Description'])
                else:
                    ow['Status'] = 'Open ' + str(focus['Description'])
            except KeyError:
                logger.warning('Key missing in focused Object ' + focus.name +
                      ' --- no description given')
        elif 'Switch' in focus:
            can_be_manipulated = True
            if objects[focus['Switch']]['On']:
                ow['Status'] = "Turn off " + focus['Switch']
            else:
                ow['Status'] = "Turn on " + focus['Switch']
        else:
            ow['Status'] = None
    else:
        ow['Status'] = None

    if human['Manipulate']:
        if not crosshairs in scene.post_draw:
            scene.post_draw.append(crosshairs)
    else:
        if crosshairs in scene.post_draw:
            scene.post_draw.remove(crosshairs)


    if ow['Status']:
        if not write_interaction_status in scene.post_draw:
            scene.post_draw.append(write_interaction_status)
        if not status_image in scene.post_draw:
            scene.post_draw.append(status_image)
    else:
        if write_interaction_status in scene.post_draw:
            scene.post_draw.remove(write_interaction_status)
        if status_image in scene.post_draw:
            scene.post_draw.remove(status_image)



    if space.positive:
        # blocks mouse movement if interactable object is focused 
        try:
            if ('Door' in focus or 'Object' in focus or 'Drawer' in focus) and not ow['selected']:

                human['FOCUSED'] = True
                vect = Matrix.OrthoProjection('XY', 3) * human.getVectTo(focus)[1]
                human.alignAxisToVect(vect, 0, 1.0)
                # align the local x axis to point to the focused object
            else:
                human['FOCUSED'] = False
        except TypeError:
            human['FOCUSED'] = False
    else:
        human['FOCUSED'] = False


    try:
        if focus in passive_objects.graspable_objects():
            if lmb.positive and not ow['selected']:
                # set a property - a property-sensor will fire the grab-function
                ow['grabbing'] = focus
        elif 'Door' in focus and lmb.positive:
            open_door(focus)
            # if you decide to use IPOs for the doors,
            # comment the previous line and uncomment the next line
            # the logic can be set with code in morse utils, which is currently
            # commented
            # focus['Open'] = not focus['Open']
        elif 'Drawer' in focus and lmb.positive:
            focus['Open'] = not focus['Open']
        elif 'Switch' in focus and lmb.positive:
            objects[focus['Switch']]['On'] = not objects[focus['Switch']]['On']
    except TypeError:
        pass


    if rmb.positive:                #drop selected Object
        ow['grabbing'] = None
        focused_object = lay_down_ray.hitObject
        if focused_object != None:
            actor_focused = blenderapi.objectdata(focused_object.name).game.use_actor
        # accurate placing of objects under certain conditions
        if human['Manipulate'] and lay_down_ray.positive \
           and focused_object != ow['selected'] \
           and actor_focused:
            # check not to lay the object on itself
            if ow['selected']:
                right_hand['LayDown'] = lay_down_ray.hitPosition
                right_hand['LayDownObj'] = focused_object
        # otherwise just drop the object
        else:
            if ow['selected']:
                ow['selected'].removeParent()
                ow['selected'] = None
                right_hand['moveArm'] = True
Пример #18
0
 def get_wheel_radius(self, wheel_name):
     dims = blenderapi.objectdata(wheel_name).dimensions
     # average the x and y dimension to get diameter - divide by 2 for radius
     radius = (dims[0]+dims[1])/4
     return radius
def interact(cont):
    """
    Script for opening doors, drawers and grabbing objects
    
    press left mousebutton to open, close or grab
    press right mousebutton to drop the currently selected object
    """

    ow = cont.owner

    # get the suffix of the human to reference the right objects
    suffix = ow.name[-4:] if ow.name[-4] == "." else ""

    right_hand = objects['IK_Target_Empty.R' + suffix]
    look = objects['Target_Empty' + suffix]
    human = objects[ow.parent.parent.parent.name + suffix]

    # if the human is external, do nothing
    if human.get('External_Robot_Tag') or human['disable_keyboard_control']:
        return

    lmb = cont.sensors['LMB']
    ray = cont.sensors['Ray']
    cam = ray.owner
    lay_down_ray = cont.sensors['LayDownRay']
    rmb = cont.sensors['RMB']
    space = cont.sensors['SPACEBAR']
    head = objects['Head_Empty' + suffix]
    hand = objects['Hand.R' + suffix]

    # Get the focusing object:
    # A ray sensor is attached to the HumanCamera sensor.
    # It returns all colliding objects in a 10 cm range of the hand.
    # We filter the result to keep only objects that have the 'Object'
    # property or that have children with the 'Object' property.
    focus = None
    prox_obj = ray.hitObject  # focused object
    if prox_obj:
        if 'Object' in prox_obj:
            focus = prox_obj
        elif 'Door' in prox_obj or 'Drawer' in prox_obj or 'Switch' in prox_obj:
            focus = prox_obj
        else:
            for obj in prox_obj.children:
                if 'Object' in obj:
                    focus = obj

    # set the overlay scene and change the displayed text
    # and texture
    if human['Manipulate'] and focus:

        can_be_manipulated = False

        if focus in passive_objects.graspable_objects():
            can_be_manipulated = True
            if not ow['selected']:
                ow['Status'] = 'Pick up ' + passive_objects.label(focus)
            else:
                ow['Status'] = passive_objects.label(focus)
        elif 'Door' in focus or 'Drawer' in focus:
            can_be_manipulated = True

            try:
                if focus['Open']:
                    ow['Status'] = 'Close ' + str(focus['Description'])
                else:
                    ow['Status'] = 'Open ' + str(focus['Description'])
            except KeyError:
                logger.warning('Key missing in focused Object ' + focus.name +
                               ' --- no description given')
        elif 'Switch' in focus:
            can_be_manipulated = True
            if objects[focus['Switch']]['On']:
                ow['Status'] = "Turn off " + focus['Switch']
            else:
                ow['Status'] = "Turn on " + focus['Switch']
        else:
            ow['Status'] = None
    else:
        ow['Status'] = None

    if human['Manipulate']:
        if not crosshairs in scene.post_draw:
            scene.post_draw.append(crosshairs)
    else:
        if crosshairs in scene.post_draw:
            scene.post_draw.remove(crosshairs)

    if ow['Status']:
        if not write_interaction_status in scene.post_draw:
            scene.post_draw.append(write_interaction_status)
        if not status_image in scene.post_draw:
            scene.post_draw.append(status_image)
    else:
        if write_interaction_status in scene.post_draw:
            scene.post_draw.remove(write_interaction_status)
        if status_image in scene.post_draw:
            scene.post_draw.remove(status_image)

    if space.positive:
        # blocks mouse movement if interactable object is focused
        try:
            if ('Door' in focus or 'Object' in focus
                    or 'Drawer' in focus) and not ow['selected']:

                human['FOCUSED'] = True
                vect = Matrix.OrthoProjection('XY',
                                              3) * human.getVectTo(focus)[1]
                human.alignAxisToVect(vect, 0, 1.0)
                # align the local x axis to point to the focused object
            else:
                human['FOCUSED'] = False
        except TypeError:
            human['FOCUSED'] = False
    else:
        human['FOCUSED'] = False

    try:
        if focus in passive_objects.graspable_objects():
            if lmb.positive and not ow['selected']:
                # set a property - a property-sensor will fire the grab-function
                ow['grabbing'] = focus
        elif 'Door' in focus and lmb.positive:
            open_door(focus)
            # if you decide to use IPOs for the doors,
            # comment the previous line and uncomment the next line
            # the logic can be set with code in morse utils, which is currently
            # commented
            # focus['Open'] = not focus['Open']
        elif 'Drawer' in focus and lmb.positive:
            focus['Open'] = not focus['Open']
        elif 'Switch' in focus and lmb.positive:
            objects[focus['Switch']]['On'] = not objects[focus['Switch']]['On']
    except TypeError:
        pass

    if rmb.positive:  #drop selected Object
        ow['grabbing'] = None
        focused_object = lay_down_ray.hitObject
        if focused_object != None:
            actor_focused = blenderapi.objectdata(
                focused_object.name).game.use_actor
        # accurate placing of objects under certain conditions
        if human['Manipulate'] and lay_down_ray.positive \
           and focused_object != ow['selected'] \
           and actor_focused:
            # check not to lay the object on itself
            if ow['selected']:
                right_hand['LayDown'] = lay_down_ray.hitPosition
                right_hand['LayDownObj'] = focused_object
        # otherwise just drop the object
        else:
            if ow['selected']:
                ow['selected'].removeParent()
                ow['selected'] = None
                right_hand['moveArm'] = True