def default_action(self): """ Do the actual semantic 'grab'. Iterate over all the tracked objects, and check if they are visible for the robot. Visible objects must have a boundin box and be active for physical simulation (have the 'Actor' checkbox selected) """ # Call the action of the parent class super(self.__class__, self).default_action() visibles = self.visibles # check which objects are visible for obj in bge.logic.trackedObjects.keys(): label = passive_objects.label(obj) visible = self._check_visible(obj) obj_dict = dict([('name', label), ('position', obj.worldPosition)]) # Object is visible and not yet in the visible_objects list... if visible and label not in visibles: # Create dictionary to contain object name, type, description, position and orientation self.visibles.append(label) # Scale the object to show it is visible #obj.localScale = [1.2, 1.2, 1.2] logger.debug("Semantic %s: %s just appeared" % (self.blender_obj.name, label)) # Object is not visible and was in the visible_objects list... if not visible and label in visibles: self.visibles.remove(label) # Return the object to normal size # when it is no longer visible #obj.localScale = [1.0, 1.0, 1.0] logger.debug("Semantic %s: %s just disappeared" % (self.blender_obj.name, label)) # Create dictionaries self.local_data['visible_objects'] = [] for obj in bge.logic.trackedObjects.keys(): label = passive_objects.label(obj) if label in visibles: # Create dictionary to contain object name, type, description, position and orientation obj_dict = dict([('name', label), ('description', ''), ('type', ''), ('position', obj.worldPosition), ('orientation', obj.worldOrientation.to_quaternion())]) # Set description and type if those properties exist try: obj_dict['description'] = obj['Description'] except KeyError: pass try: obj_dict['type'] = obj['Type'] except KeyError: pass self.local_data['visible_objects'].append(obj_dict) logger.debug("Visible objects: " + str(self.local_data['visible_objects']))
def default_action(self): """ Do the actual semantic 'grab'. Iterate over all the tracked objects, and check if they are visible for the robot. Visible objects must have a boundin box and be active for physical simulation (have the 'Actor' checkbox selected) """ # Call the action of the parent class super(self.__class__,self).default_action() visibles = self.visibles # check which objects are visible for obj in GameLogic.trackedObjects.keys(): label = passive_objects.label(obj) visible = self._check_visible(obj) obj_dict = dict([('name', label), ('position', obj.worldPosition)]) # Object is visible and not yet in the visible_objects list... if visible and label not in visibles: # Create dictionary to contain object name, type, description, position and orientation self.visibles.append(label) # Scale the object to show it is visible #obj.localScale = [1.2, 1.2, 1.2] logger.info("Semantic: {0} just appeared".format(label)) # Object is not visible and was in the visible_objects list... if not visible and label in visibles: self.visibles.remove(label) # Return the object to normal size # when it is no longer visible #obj.localScale = [1.0, 1.0, 1.0] logger.info("Semantic: {0} just disappeared".format(label)) # Create dictionaries self.local_data['visible_objects'] = [] for obj in GameLogic.trackedObjects.keys(): label = passive_objects.label(obj) if label in visibles: # Create dictionary to contain object name, type, description, position and orientation obj_dict = dict([('name', label), ('description', ''), ('type', ''), ('position', obj.worldPosition), ('orientation', obj.worldOrientation.to_quaternion())]) # Set description and type if those properties exist try: obj_dict['description'] = obj['Description'] except KeyError: pass try: obj_dict['type'] = obj['Type'] except KeyError: pass self.local_data['visible_objects'].append(obj_dict) logger.debug("Visible objects: "+ str(self.local_data['visible_objects']))
def default_action(self): """ Do the actual semantic 'grab'. Iterate over all the tracked objects, and check if they are visible for the robot. Visible objects must have a bounding box and be active for physical simulation (have the 'Actor' checkbox selected) """ # Call the action of the parent class super(self.__class__, self).default_action() # Create dictionaries self.local_data['visible_objects'] = [] for obj in blenderapi.persistantstorage().trackedObjects.keys(): if self._check_visible(obj): # Create dictionary to contain object name, type, # description, position and orientation obj_dict = { 'name': passive_objects.label(obj), 'description': obj.get('Description', ''), 'type': obj.get('Type', ''), 'position': obj.worldPosition, 'orientation': obj.worldOrientation.to_quaternion() } self.local_data['visible_objects'].append(obj_dict) logger.debug("Visible objects: %s" % self.local_data['visible_objects'])
def default_action(self): """ Do the actual semantic 'grab'. Iterate over all the tracked objects, and check if they are visible for the robot. Visible objects must have a bounding box and be active for physical simulation (have the 'Actor' checkbox selected) """ # Call the action of the parent class super(self.__class__, self).default_action() # Create dictionaries self.local_data['visible_objects'] = [] for obj in blenderapi.persistantstorage().trackedObjects.keys(): if self._check_visible(obj): # Create dictionary to contain object name, type, # description, position and orientation obj_dict = {'name': passive_objects.label(obj), 'description': obj.get('Description', ''), 'type': obj.get('Type', ''), 'position': obj.worldPosition, 'orientation': obj.worldOrientation.to_quaternion()} self.local_data['visible_objects'].append(obj_dict) logger.debug("Visible objects: %s" % self.local_data['visible_objects'])
def show(contr): """ Add a text over all interactable Objects in the scene """ scene = logic.getCurrentScene() for obj in passive_objects.active_objects(): textObj = scene.addObject("Text_proxy", obj, 0) # create a new instance of a text object textObj["Text"] = passive_objects.label(obj) # Property to identify all added text objects textObj["_tooltip"] = True textObj.setParent(obj, False, True) # There can be more than one mesh # see Blender API for more information if obj.meshes: meshes = obj.meshes else: # It seems our object has no mesh attached. It's # probably an empty. In that case, check the children's # meshes if obj.children: meshes = [] for child in obj.children: meshes += child.meshes else: # No children? hum... # Then give up... logger.warning( "I was unable to place the %s label, since I couldn't " + "find any mesh attached to this object or its children!" % obj ) continue z = 0 # iterate over all verticies to get the highest for mesh in meshes: for mat in range(0, mesh.numMaterials): for vert_id in range(mesh.getVertexArrayLength(mat)): vertex = mesh.getVertex(mat, vert_id) if vertex.z > z: z = vertex.z # set the text over the highest vertex textObj.applyMovement([0.0, 0.0, z * 1.2])
def interact(cont): """ Script for opening doors, drawers and grabbing objects press left mousebutton to open, close or grab press right mousebutton to drop the currently selected object """ ow = cont.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" right_hand = objects['IK_Target_Empty.R' + suffix] look = objects['Target_Empty' + suffix] human = objects[ow.parent.parent.parent.name + suffix] # if the human is external, do nothing if human.get('External_Robot_Tag') or human['disable_keyboard_control']: return lmb = cont.sensors['LMB'] ray = cont.sensors['Ray'] cam = ray.owner lay_down_ray = cont.sensors['LayDownRay'] rmb = cont.sensors['RMB'] space = cont.sensors['SPACEBAR'] head = objects['Head_Empty' + suffix] hand = objects['Hand.R' + suffix] # Get the focusing object: # A ray sensor is attached to the HumanCamera sensor. # It returns all colliding objects in a 10 cm range of the hand. # We filter the result to keep only objects that have the 'Object' # property or that have children with the 'Object' property. focus = None prox_obj = ray.hitObject # focused object if prox_obj: if 'Object' in prox_obj: focus = prox_obj elif 'Door' in prox_obj or 'Drawer' in prox_obj or 'Switch' in prox_obj: focus = prox_obj else: for obj in prox_obj.children: if 'Object' in obj: focus = obj # set the overlay scene and change the displayed text # and texture if human['Manipulate'] and focus: can_be_manipulated = False if focus in passive_objects.graspable_objects(): can_be_manipulated = True if not ow['selected']: ow['Status'] = 'Pick up ' + passive_objects.label(focus) else: ow['Status'] = passive_objects.label(focus) elif 'Door' in focus or 'Drawer' in focus: can_be_manipulated = True try: if focus['Open']: ow['Status'] = 'Close ' + str(focus['Description']) else: ow['Status'] = 'Open ' + str(focus['Description']) except KeyError: logger.warning('Key missing in focused Object ' + focus.name + ' --- no description given') elif 'Switch' in focus: can_be_manipulated = True if objects[focus['Switch']]['On']: ow['Status'] = "Turn off " + focus['Switch'] else: ow['Status'] = "Turn on " + focus['Switch'] else: ow['Status'] = None else: ow['Status'] = None if human['Manipulate']: if not crosshairs in scene.post_draw: scene.post_draw.append(crosshairs) else: if crosshairs in scene.post_draw: scene.post_draw.remove(crosshairs) if ow['Status']: if not write_interaction_status in scene.post_draw: scene.post_draw.append(write_interaction_status) if not status_image in scene.post_draw: scene.post_draw.append(status_image) else: if write_interaction_status in scene.post_draw: scene.post_draw.remove(write_interaction_status) if status_image in scene.post_draw: scene.post_draw.remove(status_image) if space.positive: # blocks mouse movement if interactable object is focused try: if ('Door' in focus or 'Object' in focus or 'Drawer' in focus) and not ow['selected']: human['FOCUSED'] = True vect = Matrix.OrthoProjection('XY', 3) * human.getVectTo(focus)[1] human.alignAxisToVect(vect, 0, 1.0) # align the local x axis to point to the focused object else: human['FOCUSED'] = False except TypeError: human['FOCUSED'] = False else: human['FOCUSED'] = False try: if focus in passive_objects.graspable_objects(): if lmb.positive and not ow['selected']: # set a property - a property-sensor will fire the grab-function ow['grabbing'] = focus elif 'Door' in focus and lmb.positive: open_door(focus) # if you decide to use IPOs for the doors, # comment the previous line and uncomment the next line # the logic can be set with code in morse utils, which is currently # commented # focus['Open'] = not focus['Open'] elif 'Drawer' in focus and lmb.positive: focus['Open'] = not focus['Open'] elif 'Switch' in focus and lmb.positive: objects[focus['Switch']]['On'] = not objects[focus['Switch']]['On'] except TypeError: pass if rmb.positive: #drop selected Object ow['grabbing'] = None focused_object = lay_down_ray.hitObject if focused_object != None: actor_focused = blenderapi.objectdata( focused_object.name).game.use_actor # accurate placing of objects under certain conditions if human['Manipulate'] and lay_down_ray.positive \ and focused_object != ow['selected'] \ and actor_focused: # check not to lay the object on itself if ow['selected']: right_hand['LayDown'] = lay_down_ray.hitPosition right_hand['LayDownObj'] = focused_object # otherwise just drop the object else: if ow['selected']: ow['selected'].removeParent() ow['selected'] = None right_hand['moveArm'] = True
def interact(cont): """ Script for opening doors, drawers and grabbing objects press left mousebutton to open, close or grab press right mousebutton to drop the currently selected object """ ow = cont.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" right_hand=objects['IK_Target_Empty.R' + suffix] look = objects['Target_Empty' + suffix] human = objects['Human' + suffix] # if the human is external, do nothing if human.get('External_Robot_Tag') or human['disable_keyboard_control']: return lmb=cont.sensors['LMB'] ray = cont.sensors['Ray'] cam = ray.owner lay_down_ray = cont.sensors['LayDownRay'] rmb=cont.sensors['RMB'] space = cont.sensors['SPACEBAR'] head = objects['Head_Empty' + suffix] hand = objects['Hand.R' + suffix] # Get the focusing object: # A ray sensor is attached to the HumanCamera sensor. # It returns all colliding objects in a 10 cm range of the hand. # We filter the result to keep only objects that have the 'Object' # property or that have children with the 'Object' property. focus = None prox_obj = ray.hitObject # focused object if prox_obj: if 'Object' in prox_obj: focus = prox_obj elif 'Door' in prox_obj or 'Drawer' in prox_obj or 'Switch' in prox_obj: focus = prox_obj else: for obj in prox_obj.children: if 'Object' in obj: focus = obj # set the overlay scene and change the displayed text # and texture if human['Manipulate'] and focus: can_be_manipulated = False if focus in passive_objects.graspable_objects(): can_be_manipulated = True if not ow['selected']: ow['Status'] = 'Pick up ' + passive_objects.label(focus) else: ow['Status'] = passive_objects.label(focus) elif 'Door' in focus or 'Drawer' in focus: can_be_manipulated = True try: if focus['Open']: ow['Status'] = 'Close ' + str(focus['Description']) else: ow['Status'] = 'Open ' + str(focus['Description']) except KeyError: logger.warning('Key missing in focused Object ' + focus.name + ' --- no description given') elif 'Switch' in focus: can_be_manipulated = True if objects[focus['Switch']]['On']: ow['Status'] = "Turn off " + focus['Switch'] else: ow['Status'] = "Turn on " + focus['Switch'] else: ow['Status'] = None else: ow['Status'] = None if human['Manipulate']: if not crosshairs in scene.post_draw: scene.post_draw.append(crosshairs) else: if crosshairs in scene.post_draw: scene.post_draw.remove(crosshairs) if ow['Status']: if not write_interaction_status in scene.post_draw: scene.post_draw.append(write_interaction_status) if not status_image in scene.post_draw: scene.post_draw.append(status_image) else: if write_interaction_status in scene.post_draw: scene.post_draw.remove(write_interaction_status) if status_image in scene.post_draw: scene.post_draw.remove(status_image) if space.positive: # blocks mouse movement if interactable object is focused try: if ('Door' in focus or 'Object' in focus or 'Drawer' in focus) and not ow['selected']: human['FOCUSED'] = True vect = Matrix.OrthoProjection('XY', 3) * human.getVectTo(focus)[1] human.alignAxisToVect(vect, 0, 1.0) # align the local x axis to point to the focused object else: human['FOCUSED'] = False except TypeError: human['FOCUSED'] = False else: human['FOCUSED'] = False try: if focus in passive_objects.graspable_objects(): if lmb.positive and not ow['selected']: # set a property - a property-sensor will fire the grab-function ow['grabbing'] = focus elif 'Door' in focus and lmb.positive: open_door(focus) # if you decide to use IPOs for the doors, # comment the previous line and uncomment the next line # the logic can be set with code in morse utils, which is currently # commented # focus['Open'] = not focus['Open'] elif 'Drawer' in focus and lmb.positive: focus['Open'] = not focus['Open'] elif 'Switch' in focus and lmb.positive: objects[focus['Switch']]['On'] = not objects[focus['Switch']]['On'] except TypeError: pass if rmb.positive: #drop selected Object ow['grabbing'] = None focused_object = lay_down_ray.hitObject if focused_object != None: actor_focused = data.objects[focused_object.name].game.use_actor # accurate placing of objects under certain conditions if human['Manipulate'] and lay_down_ray.positive \ and focused_object != ow['selected'] \ and actor_focused: # check not to lay the object on itself if ow['selected']: right_hand['LayDown'] = lay_down_ray.hitPosition right_hand['LayDownObj'] = focused_object # otherwise just drop the object else: if ow['selected']: ow['selected'].removeParent() ow['selected'] = None right_hand['moveArm'] = True
def interact(): """ Script for opening doors, drawers and grabbing objects press left mousebutton to open, close or grab press right mousebutton to drop the currently selected object """ right_hand = objects["IK_Target_Empty.R"] look = objects["Target_Empty"] human = objects["Human"] co = logic.getCurrentController() ow = co.owner lmb = co.sensors["LMB"] ray = co.sensors["Ray"] cam = ray.owner lay_down_ray = co.sensors["LayDownRay"] rmb = co.sensors["RMB"] space = co.sensors["SPACEBAR"] head = objects["Head_Empty"] hand = objects["Hand.R"] # Get the focusing object: # A ray sensor is attached to the HumanCamera sensor. # It returns all colliding objects in a 10 cm range of the hand. # We filter the result to keep only objects that have the 'Object' # property or that have children with the 'Object' property. focus = None prox_obj = ray.hitObject # focused object if prox_obj: if "Object" in prox_obj: focus = prox_obj elif "Door" in prox_obj or "Drawer" in prox_obj or "Switch" in prox_obj: focus = prox_obj else: for obj in prox_obj.children: if "Object" in obj: focus = obj # set the overlay scene and change the displayed text # and texture if human["Manipulate"] and focus: can_be_manipulated = False if focus in passive_objects.graspable_objects(): can_be_manipulated = True if not ow["selected"]: ow["Status"] = "Pick up " + passive_objects.label(focus) else: ow["Status"] = passive_objects.label(focus) elif "Door" in focus or "Drawer" in focus: can_be_manipulated = True try: if focus["Open"]: ow["Status"] = "Close " + str(focus["Description"]) else: ow["Status"] = "Open " + str(focus["Description"]) except KeyError: logger.warning("Key missing in focused Object " + focus.name + " --- no description given") elif "Switch" in focus: can_be_manipulated = True if objects[focus["Switch"]]["On"]: ow["Status"] = "Turn off " + focus["Switch"] else: ow["Status"] = "Turn on " + focus["Switch"] else: ow["Status"] = None else: ow["Status"] = None if human["Manipulate"]: if not crosshairs in scene.post_draw: scene.post_draw.append(crosshairs) else: if crosshairs in scene.post_draw: scene.post_draw.remove(crosshairs) if ow["Status"]: if not write_interaction_status in scene.post_draw: scene.post_draw.append(write_interaction_status) if not status_image in scene.post_draw: scene.post_draw.append(status_image) else: if write_interaction_status in scene.post_draw: scene.post_draw.remove(write_interaction_status) if status_image in scene.post_draw: scene.post_draw.remove(status_image) if space.positive: # blocks mouse movement if interactable object is focused try: if ("Door" in focus or "Object" in focus or "Drawer" in focus) and not ow["selected"]: human["FOCUSED"] = True vect = Matrix.OrthoProjection("XY", 3) * human.getVectTo(focus)[1] human.alignAxisToVect(vect, 0, 1.0) # align the local x axis to point to the focused object else: human["FOCUSED"] = False except TypeError: human["FOCUSED"] = False else: human["FOCUSED"] = False try: if focus in passive_objects.graspable_objects(): if lmb.positive and not ow["selected"]: # set a property - a property-sensor will fire the grab-function ow["grabbing"] = focus elif "Door" in focus and lmb.positive: open_door(focus) # if you decide to use IPOs for the doors, # comment the previous line and uncomment the next line # the logic can be set with code in morse utils, which is currently # commented # focus['Open'] = not focus['Open'] elif "Drawer" in focus and lmb.positive: focus["Open"] = not focus["Open"] elif "Switch" in focus and lmb.positive: objects[focus["Switch"]]["On"] = not objects[focus["Switch"]]["On"] except TypeError: pass if rmb.positive: # drop selected Object ow["grabbing"] = None focused_object = lay_down_ray.hitObject if focused_object != None: actor_focused = data.objects[focused_object.name].game.use_actor # accurate placing of objects under certain conditions if human["Manipulate"] and lay_down_ray.positive and focused_object != ow["selected"] and actor_focused: # check not to lay the object on itself if ow["selected"]: right_hand["LayDown"] = lay_down_ray.hitPosition # otherwise just drop the object else: if ow["selected"]: ow["selected"].removeParent() ow["selected"] = None right_hand["moveArm"] = True
def interact(): """ Script for opening doors, drawers and grabbing objects press left mousebutton to open, close or grab press right mousebutton to drop the currently selected object """ sobList= logic.getCurrentScene().objects # list of all objects in current scene right_hand=sobList['IK_Target_Empty.R'] look = sobList['Target_Empty'] human = sobList['POS_EMPTY'] co = logic.getCurrentController() ow = co.owner lmb=co.sensors['LMB'] ray = co.sensors['Ray'] cam = ray.owner lay_down_ray = co.sensors['LayDownRay'] rmb=co.sensors['RMB'] space = co.sensors['SPACEBAR'] AddSc = co.actuators['AddScene'] RemSc = co.actuators['RemoveScene'] Description = co.actuators['Descr_message'] head = sobList['Head_Empty'] hand = sobList['Hand.R'] # Get the focusing object: # A ray sensor is attached to the HumanCamera sensor. # It returns all colliding objects in a 10 cm range of the hand. # We filter the result to keep only objects that have the 'Object' # property or that have children with the 'Object' property. focus = None prox_obj = ray.hitObject # focused object if prox_obj: if 'Object' in prox_obj: focus = prox_obj elif 'Door' in prox_obj or 'Drawer' in prox_obj: focus = prox_obj else: for obj in prox_obj.children: if 'Object' in obj: focus = obj scenes = logic.getSceneList() # set the overlay scene and send messages to change the displayed text # and texture if human['Manipulate']: try: can_be_manipulated = False if focus in passive_objects.graspable_objects(): can_be_manipulated = True Description.body = ('Pick up ' + passive_objects.label(focus)) elif 'Door' in focus or 'Drawer' in focus: can_be_manipulated = True try: if focus['Open']: Description.body = ('Close ' + str(focus['Description'])) else: Description.body = ('Open ' + str(focus['Description'])) except KeyError: print('Key missing in focused Object ' + focus.name + ' --- no description given') Description.body = '' if can_be_manipulated: ow.sendMessage('selected', str(ow['selected']), 'overlay') co.activate(Description) if len(scenes) == 2: co.activate(AddSc) else: if len(scenes) == 3: co.activate(RemSc) except TypeError: if len(scenes) == 3: co.activate(RemSc) else: if len(scenes) == 3: co.activate(RemSc) if space.positive: # blocks mouse movement if interactable object is focused try: if ('Door' in focus or 'Object' in focus or 'Drawer' in focus) and not ow['selected']: human['FOCUSED'] = True vect = Matrix.OrthoProjection('XY', 3) * human.getVectTo(focus)[1] human.alignAxisToVect(vect, 0, 1.0) # align the local x axis to point to the focused object else: human['FOCUSED'] = False except TypeError: human['FOCUSED'] = False else: human['FOCUSED'] = False try: if focus in passive_objects.graspable_objects(): if lmb.positive and not ow['selected']: # set a property - a property-sensor will fire the grab-function ow['grabbing'] = focus elif 'Door' in focus and lmb.positive: open_door(focus) # if you decide to use IPOs for the doors, # comment the previous line and uncomment the next line # the logic can be set with code in morse utils, which is currently # commented # focus['Open'] = not focus['Open'] elif 'Drawer' in focus and lmb.positive: focus['Open'] = not focus['Open'] except TypeError: pass if rmb.positive: #drop selected Object ow['grabbing'] = None focused_object = lay_down_ray.hitObject if focused_object != None: actor_focused = data.objects[focused_object.name].game.use_actor # accurate placing of objects under certain conditions if human['Manipulate'] and lay_down_ray.positive \ and focused_object != ow['selected'] \ and actor_focused: # check not to lay the object on itself if ow['selected']: right_hand['LayDown'] = lay_down_ray.hitPosition # otherwise just drop the object else: if ow['selected']: ow['selected'].removeParent() ow['selected'] = None right_hand['moveArm'] = True