def write(): """ Write the name of all active objects on Screen """ # OpenGL setup bgl.glMatrixMode(bgl.GL_PROJECTION) bgl.glLoadIdentity() bgl.gluOrtho2D(0, windowWidth, 0, windowHeight) bgl.glMatrixMode(bgl.GL_MODELVIEW) bgl.glLoadIdentity() cam = scene.active_camera for obj in passive_objects.active_objects(): # test if the object is in the view frustum if cam.pointInsideFrustum(obj.worldPosition): pos = cam.getScreenPosition(obj) blf.size(font_id, int(windowWidth * 0.02), 72) # draw a black shadow to increase contrast with white parts blf.enable(font_id, blf.SHADOW) blf.shadow(font_id, 5, 0.0, 0.0, 0.0, 1.0) blf.position(font_id, pos[0] * windowWidth, (1 - pos[1]) * windowHeight, 0) blf.draw(font_id, obj.name)
def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent. """ logger.info('%s initialization' % obj.name) # Call the constructor of the parent class super(self.__class__, self).__init__(obj, parent) # Locate the Blender camera object associated with this sensor main_obj = self.blender_obj for obj in main_obj.children: if hasattr(obj, 'lens'): self.blender_cam = obj logger.info("Camera object: {0}".format(self.blender_cam)) break if not self.blender_cam: logger.error("no camera object associated to the semantic camera. " +\ "The semantic camera requires a standard Blender camera in its children.") # TrackedObject is a dictionary containing the list of tracked objects # (->meshes with a class property set up) as keys # and the bounding boxes of these objects as value. if not hasattr(bge.logic, 'trackedObjects'): logger.info('Initialization of tracked objects:') scene = bge.logic.getCurrentScene() bge.logic.trackedObjects = dict.fromkeys( passive_objects.active_objects()) # Store the bounding box of the marked objects for obj in bge.logic.trackedObjects.keys(): # bound_box returns the bounding box in local space # instead of world space. bge.logic.trackedObjects[obj] = bpy.data.objects[ obj.name].bound_box details = passive_objects.details(obj) logger.info(' - {0} (type:{1})'.format( details['label'], details['type'])) # Prepare the exportable data of this sensor # In this case, it is the list of currently visible objects # by each independent robot. # Array for lables of visible objects self.visibles = [] self.local_data['visible_objects'] = [] # Variable to indicate this is a camera self.semantic_tag = True logger.info('Component initialized')
def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent. """ logger.info('%s initialization' % obj.name) # Call the constructor of the parent class super(self.__class__,self).__init__(obj, parent) # Locate the Blender camera object associated with this sensor main_obj = self.blender_obj for obj in main_obj.children: if hasattr(obj, 'lens'): self.blender_cam = obj logger.info("Camera object: {0}".format(self.blender_cam)) break if not self.blender_cam: logger.error("no camera object associated to the semantic camera. " +\ "The semantic camera requires a standard Blender camera in its children.") # TrackedObject is a dictionary containing the list of tracked objects # (->meshes with a class property set up) as keys # and the bounding boxes of these objects as value. if not hasattr(GameLogic, 'trackedObjects'): logger.info('Initialization of tracked objects:') scene = GameLogic.getCurrentScene() GameLogic.trackedObjects = dict.fromkeys(passive_objects.active_objects()) # Store the bounding box of the marked objects for obj in GameLogic.trackedObjects.keys(): # bound_box returns the bounding box in local space # instead of world space. GameLogic.trackedObjects[obj] = bpy.data.objects[obj.name].bound_box details = passive_objects.details(obj) logger.info(' - {0} (type:{1})'.format(details['label'], details['type'])) # Prepare the exportable data of this sensor # In this case, it is the list of currently visible objects # by each independent robot. # Array for lables of visible objects self.visibles = [] self.local_data['visible_objects'] = [] # Variable to indicate this is a camera self.semantic_tag = True logger.info('Component initialized')
def show(contr): """ Add a text over all interactable Objects in the scene """ scene = logic.getCurrentScene() for obj in passive_objects.active_objects(): textObj = scene.addObject("Text_proxy", obj, 0) # create a new instance of a text object textObj["Text"] = passive_objects.label(obj) # Property to identify all added text objects textObj["_tooltip"] = True textObj.setParent(obj, False, True) # There can be more than one mesh # see Blender API for more information if obj.meshes: meshes = obj.meshes else: # It seems our object has no mesh attached. It's # probably an empty. In that case, check the children's # meshes if obj.children: meshes = [] for child in obj.children: meshes += child.meshes else: # No children? hum... # Then give up... logger.warning( "I was unable to place the %s label, since I couldn't " + "find any mesh attached to this object or its children!" % obj ) continue z = 0 # iterate over all verticies to get the highest for mesh in meshes: for mat in range(0, mesh.numMaterials): for vert_id in range(mesh.getVertexArrayLength(mat)): vertex = mesh.getVertex(mat, vert_id) if vertex.z > z: z = vertex.z # set the text over the highest vertex textObj.applyMovement([0.0, 0.0, z * 1.2])
def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent. """ logger.info('%s initialization' % obj.name) # Call the constructor of the parent class morse.sensors.camera.Camera.__init__(self, obj, parent) # Locate the Blender camera object associated with this sensor main_obj = self.bge_object for obj in main_obj.children: if hasattr(obj, 'lens'): self.blender_cam = obj logger.info("Camera object: {0}".format(self.blender_cam)) break if not self.blender_cam: logger.error("no camera object associated to the semantic camera. \ The semantic camera requires a standard Blender \ camera in its children.") # TrackedObject is a dictionary containing the list of tracked objects # (->meshes with a class property set up) as keys # and the bounding boxes of these objects as value. if not 'trackedObjects' in blenderapi.persistantstorage(): logger.info('Initialization of tracked objects:') blenderapi.persistantstorage().trackedObjects = \ dict.fromkeys(passive_objects.active_objects()) # Store the bounding box of the marked objects for obj in blenderapi.persistantstorage().trackedObjects.keys(): # bound_box returns the bounding box in local space # instead of world space. blenderapi.persistantstorage().trackedObjects[obj] = \ blenderapi.objectdata(obj.name).bound_box details = passive_objects.details(obj) logger.info(' - {%s} (type:%s)' % (details['label'], details['type'])) if self.noocclusion: logger.info( "Semantic camera running in 'no occlusion' mode (fast mode).") logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent. """ logger.info('%s initialization of' % obj.name) # Call the constructor of the parent class morse.sensors.camera.Camera.__init__(self, obj, parent) # Locate the Blender camera object associated with this sensor main_obj = self.bge_object for obj in main_obj.children: if hasattr(obj, 'lens'): self.blender_cam = obj logger.info("Camera object: {0}".format(self.blender_cam)) break if not self.blender_cam: logger.error("no camera object associated to the semantic camera. \ The semantic camera requires a standard Blender \ camera in its children.") # TrackedObject is a dictionary containing the list of tracked objects # (->meshes with a class property set up) as keys # and the bounding boxes of these objects as value. if not 'trackedObjects' in blenderapi.persistantstorage(): logger.info('Initialization of tracked objects:') blenderapi.persistantstorage().trackedObjects = \ dict.fromkeys(passive_objects.active_objects()) # Store the bounding box of the marked objects for obj in blenderapi.persistantstorage().trackedObjects.keys(): # bound_box returns the bounding box in local space # instead of world space. blenderapi.persistantstorage().trackedObjects[obj] = \ blenderapi.objectdata(obj.name).bound_box details = passive_objects.details(obj) logger.info(' - {%s} (type:%s)'% (details['label'], details['type'])) if self.noocclusion: logger.info("Semantic camera running in 'no occlusion' mode (fast mode).") logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent. """ logger.info("%s initialization" % obj.name) # Call the constructor of the parent class super(self.__class__, self).__init__(obj, parent) # Locate the Blender camera object associated with this sensor main_obj = self.bge_object for obj in main_obj.children: if hasattr(obj, "lens"): self.blender_cam = obj logger.info("Camera object: {0}".format(self.blender_cam)) break if not self.blender_cam: logger.error( "no camera object associated to the semantic camera. \ The semantic camera requires a standard Blender \ camera in its children." ) # TrackedObject is a dictionary containing the list of tracked objects # (->meshes with a class property set up) as keys # and the bounding boxes of these objects as value. if not "trackedObjects" in blenderapi.persistantstorage(): logger.info("Initialization of tracked objects:") blenderapi.persistantstorage().trackedObjects = dict.fromkeys(passive_objects.active_objects()) # Store the bounding box of the marked objects for obj in blenderapi.persistantstorage().trackedObjects.keys(): # bound_box returns the bounding box in local space # instead of world space. blenderapi.persistantstorage().trackedObjects[obj] = blenderapi.objectdata(obj.name).bound_box details = passive_objects.details(obj) logger.info(" - {%s} (type:%s)" % (details["label"], details["type"])) logger.info("Component initialized")
def write(): """ Write the name of all active objects on Screen """ # OpenGL setup bgl.glMatrixMode(bgl.GL_PROJECTION) bgl.glLoadIdentity() bgl.gluOrtho2D(0, windowWidth, 0, windowHeight) bgl.glMatrixMode(bgl.GL_MODELVIEW) bgl.glLoadIdentity() cam = scene.active_camera for obj in passive_objects.active_objects(): # test if the object is in the view frustum if cam.pointInsideFrustum(obj.worldPosition): pos = cam.getScreenPosition(obj) blf.size(font_id, int(windowWidth * 0.02), 72) # draw a black shadow to increase contrast with white parts blf.enable(font_id, blf.SHADOW) blf.shadow(font_id, 5, 0.0, 0.0, 0.0, 1.0) blf.position(font_id, pos[0]*windowWidth, (1 - pos[1])*windowHeight,0) blf.draw(font_id, obj.name)