def initialize(self): PocolibsDataStreamOutput.initialize(self, VimanObjectPublicArray) # Initialise the object self.obj = VimanObjectPublicArray() self.scene_object_list = [] #If provided, read the list of ARToolkit tags to fill the list of objects #to export. self.scene_object_list += self._read_object_list() #Complete the list with the objects already tracked by the semantic cam. if 'passiveObjectsDict' in blenderapi.persistantstorage(): self.scene_object_list += [obj['label'] for obj in blenderapi.persistantstorage().passiveObjectsDict.values()] if not self.scene_object_list: logger.error("No VIMAN object to track. Make sure some objects have " +\ "the game property 'Object' defined. Disabling poster for now.") return None # Init the data structures used by this poster self.obj.nbObjects = len(self.scene_object_list) i = 0 for object in self.scene_object_list: logger.info("Adding " + object + " to the objects tracked by VIMAN") self.obj.objects[i].name = bytes(str(object), 'utf-8') i += 1
def initialize(self): super(self.__class__, self).initialize(VimanObjectPublicArray) # Initialise the object self.obj = VimanObjectPublicArray() self.scene_object_list = [] #If provided, read the list of ARToolkit tags to fill the list of objects #to export. self.scene_object_list += self._read_object_list() #Complete the list with the objects already tracked by the semantic cam. if 'passiveObjectsDict' in blenderapi.persistantstorage(): self.scene_object_list += [ obj['label'] for obj in blenderapi.persistantstorage().passiveObjectsDict.values() ] if not self.scene_object_list: logger.error("No VIMAN object to track. Make sure some objects have " +\ "the game property 'Object' defined. Disabling poster for now.") return None # Init the data structures used by this poster self.obj.nbObjects = len(self.scene_object_list) i = 0 for object in self.scene_object_list: logger.info("Adding " + object + " to the objects tracked by VIMAN") self.obj.objects[i].name = bytes(str(object), 'utf-8') i = i + 1
def default(self, ci): first_cam = blenderapi.persistantstorage().componentDict[self.camera_order[0]] main_to_origin = first_cam.robot_pose pom_robot_position = ViamPos() pom_robot_position.x = main_to_origin.x pom_robot_position.y = main_to_origin.y pom_robot_position.z = main_to_origin.z pom_robot_position.yaw = main_to_origin.yaw pom_robot_position.pitch = main_to_origin.pitch pom_robot_position.roll = main_to_origin.roll # Compute the current time pom_date, t = Pocolibs.compute_date() ors_cameras = [] ors_images = [] # Cycle throught the cameras on the base # In normal circumstances, there will be two for stereo for camera_name in self.camera_order: camera_instance = blenderapi.persistantstorage().componentDict[camera_name] main_to_sensor = main_to_origin.transformation3d_with( camera_instance.position_3d) imX = camera_instance.image_width imY = camera_instance.image_height try: image_data = camera_instance.local_data['image'] except KeyError as detail: logger.warning("Camera image not found to read by VIAM poster.\n \ \tThe 'Class' property in the Camera component could be \ wrongly defined") # Don't create a poster if the camera is disabled if image_data is None or not camera_instance.capturing: logger.debug("Camera '%s' not capturing. Exiting viam poster" % \ camera_instance.bge_object.name) return # Fill in the structure with the image information camera_data = ViamSimuImage() camera_data.width = imX camera_data.height = imY camera_data.pom_tag = pom_date camera_data.tacq_sec = t.second camera_data.tacq_usec = t.microsecond camera_data.x = main_to_sensor.x camera_data.y = main_to_sensor.y camera_data.z = main_to_sensor.z camera_data.yaw = main_to_sensor.yaw camera_data.pitch = main_to_sensor.pitch camera_data.roll = main_to_sensor.roll camera_data.flipped = camera_instance.vertical_flip ors_cameras.append(camera_data) ors_images.append(image_data) self.obj.post(pom_robot_position, ors_cameras, ors_images)
def default(self, ci): first_cam = blenderapi.persistantstorage().componentDict[self.camera_order[0]] main_to_origin = first_cam.robot_pose pom_robot_position = ViamPos() pom_robot_position.x = main_to_origin.x pom_robot_position.y = main_to_origin.y pom_robot_position.z = main_to_origin.z pom_robot_position.yaw = main_to_origin.yaw pom_robot_position.pitch = main_to_origin.pitch pom_robot_position.roll = main_to_origin.roll # Compute the current time pom_date, t = Pocolibs.compute_date() ors_cameras = [] ors_images = [] # Cycle throught the cameras on the base # In normal circumstances, there will be two for stereo for camera_name in self.camera_order: camera_instance = blenderapi.persistantstorage().componentDict[camera_name] main_to_sensor = main_to_origin.transformation3d_with( camera_instance.position_3d) imX = camera_instance.image_width imY = camera_instance.image_height try: image_data = camera_instance.local_data['image'] except KeyError as detail: logger.warning("Camera image not found to read by VIAM poster.\n \ \tThe 'Class' property in the Camera component could be \ wrongly defined") # Don't create a poster if the camera is disabled if image_data == None or not camera_instance.capturing: logger.debug("Camera '%s' not capturing. Exiting viam poster" % \ camera_instance.bge_object.name) return # Fill in the structure with the image information camera_data = ViamSimuImage() camera_data.width = imX camera_data.height = imY camera_data.pom_tag = pom_date camera_data.tacq_sec = t.second camera_data.tacq_usec = t.microsecond camera_data.x = main_to_sensor.x camera_data.y = main_to_sensor.y camera_data.z = main_to_sensor.z camera_data.yaw = main_to_sensor.yaw camera_data.pitch = main_to_sensor.pitch camera_data.roll = main_to_sensor.roll camera_data.flipped = camera_instance.vertical_flip ors_cameras.append(camera_data) ors_images.append(image_data) self.obj.post(pom_robot_position, ors_cameras, ors_images)
def action(self): if self._time_initialized: self.node.morse_ambassador.advance_time() else: self.node.init_time() self._time_initialized = True if self.time_sync and self.stop_time < self.node.morse_ambassador.logical_time: blenderapi.persistantstorage().serviceObjectDict["simulation"].quit()
def action(self): if self._time_initialized: self.node.morse_ambassador.advance_time() else: self.node.init_time() self._time_initialized = True if self.time_sync and \ self.stop_time < self.node.morse_ambassador.logical_time: blenderapi.persistantstorage( ).serviceObjectDict["simulation"].quit()
def _robot_exists(robot): try: for obj, robot_instance in blenderapi.persistantstorage().robotDict.items(): if obj.name == robot: return robot_instance except KeyError: try: for obj, robot_instance in blenderapi.persistantstorage().externalRobotDict.items(): if obj.name == robot: return robot_instance except KeyError: return None
def switch_cameras(self): """ Change between the main camera view in MORSE and the first person camera """ scene = blenderapi.scene() index = blenderapi.persistantstorage().current_camera_index next_camera = scene.cameras[index] scene.active_camera = next_camera logger.info("Showing view from camera: '%s'" % next_camera.name) # Disable mouse cursor for Human camera if next_camera.name == "Human_Camera": blenderapi.mousepointer(visible=False) else: blenderapi.mousepointer(visible=True) # Update the index for the next call index = (index + 1) % len(scene.cameras) blenderapi.persistantstorage().current_camera_index = index
def switch_cameras(self): """ Change between the main camera view in MORSE and the first person camera """ scene = blenderapi.scene() index = blenderapi.persistantstorage().current_camera_index next_camera = scene.cameras[index] scene.active_camera = next_camera logger.info("Showing view from camera: '%s'" % next_camera.name) # Disable mouse cursor for Human camera if next_camera.name == "Human_Camera": blenderapi.mousepointer(visible = False) else: blenderapi.mousepointer(visible = True) # Update the index for the next call index = (index + 1) % len(scene.cameras) blenderapi.persistantstorage().current_camera_index = index
def synchronize(self): if self.connected: # Get the coordinates of local robots robot_dict = blenderapi.persistantstorage().robotDict for obj, local_robot_data in robot_dict.items(): #self.out_data[obj.name] = [obj.worldPosition.to_tuple()] euler_rotation = obj.worldOrientation.to_euler() self.out_data[obj.name] = [ obj.worldPosition.to_tuple(), [euler_rotation.x, euler_rotation.y, euler_rotation.z] ] # Send the encoded dictionary through a socket # and receive a reply with any changes in the other nodes in_data = self._exchange_data(self.out_data) if in_data != None: scene = blenderapi.scene() # Update the positions of the external robots for obj_name, robot_data in in_data.items(): try: obj = scene.objects[obj_name] if obj not in robot_dict: logger.debug("Data received: ", robot_data) obj.worldPosition = robot_data[0] obj.worldOrientation = mathutils.Euler( robot_data[1]).to_matrix() except KeyError as detail: logger.info( "Robot %s not found in this simulation scenario, but present in another node. Ignoring it!" % detail)
def initialize(self): try: self.out_robot = self.rtia_.getObjectClassHandle("Robot") self.out_position = self.rtia_.getAttributeHandle("position", self.out_robot) self.out_orientation = self.rtia_.getAttributeHandle("orientation", self.out_robot) except rti.NameNotFound: logger.error("'Robot' (or attributes) not declared in FOM." + "Your '.fed' file may not be up-to-date.") return False self.rtia_.publishObjectClass(self.out_robot, [self.out_position, self.out_orientation]) robot_dict = blenderapi.persistantstorage().robotDict for obj, local_robot_data in robot_dict.items(): self.objects.append(self.rtia_.registerObjectInstance(self.out_robot, obj.name)) logger.info("Pose of robot %s will be published on the %s federation.", obj.name, self.federation) self.in_robot = self.rtia_.getObjectClassHandle("Robot") self.in_position = self.rtia_.getAttributeHandle("position", self.in_robot) self.in_orientation = self.rtia_.getAttributeHandle("orientation", self.in_robot) self.rtia_.subscribeObjectClassAttributes(self.in_robot, [self.in_position, self.in_orientation]) # TSO initialization if self.tag: self.tag = False self.lookahead = 1 self.current_time = self.rtia_.queryFederateTime() logger.debug("Initial Federate time is %s", self.current_time) self.rtia_.enableTimeConstrained() self.rtia_.enableTimeRegulation(self.current_time, self.lookahead) while not self.constrained and not self.regulator and not self.tag: self.rtia_.tick(0, 1) logger.debug("MorseAmbassador initialized")
def _check_visible(self, obj): """ Check if an object lies inside of the camera frustum. """ # TrackedObjects was filled at initialization # with the object's bounding boxes bb = blenderapi.persistantstorage().trackedObjects[obj] pos = obj.position bbox = [[bb_corner[i] + pos[i] for i in range(3)] for bb_corner in bb] logger.debug("\n--- NEW TEST ---") logger.debug("OBJECT '{0}' AT {1}".format(obj, pos)) logger.debug("CAMERA '{0}' AT {1}".format(self.blender_cam, self.blender_cam.position)) logger.debug("BBOX: >{0}<".format(bbox)) logger.debug("BBOX: {0}".format(bb)) # Translate the bounding box to the current object position # and check if it is in the frustum if self.blender_cam.boxInsideFrustum(bbox) != self.blender_cam.OUTSIDE: # Check that there are no other objects between the camera # and the selected object # NOTE: This is a very simple test. Hiding only the 'center' # of an object will make it invisible, even if the rest is # still seen from the camera closest_obj = self.bge_object.rayCastTo(obj) if closest_obj in [obj] + list(obj.children): return True return False
def default_action(self): """ Do the actual semantic 'grab'. Iterate over all the tracked objects, and check if they are visible for the robot. Visible objects must have a bounding box and be active for physical simulation (have the 'Actor' checkbox selected) """ # Call the action of the parent class super(self.__class__, self).default_action() # Create dictionaries self.local_data['visible_objects'] = [] for obj in blenderapi.persistantstorage().trackedObjects.keys(): if self._check_visible(obj): # Create dictionary to contain object name, type, # description, position and orientation obj_dict = { 'name': passive_objects.label(obj), 'description': obj.get('Description', ''), 'type': obj.get('Type', ''), 'position': obj.worldPosition, 'orientation': obj.worldOrientation.to_quaternion() } self.local_data['visible_objects'].append(obj_dict) logger.debug("Visible objects: %s" % self.local_data['visible_objects'])
def synchronize(self): if not self.node_stream: logger.debug("not self.node_stream") return if not self.node_stream.connected: logger.debug("not self.node_stream.connected") return # Get the coordinates of local robots for obj in blenderapi.persistantstorage().robotDict.keys(): #self.out_data[obj.name] = [obj.worldPosition.to_tuple()] euler_rotation = obj.worldOrientation.to_euler() self.out_data[obj.name] = [obj.worldPosition.to_tuple(), \ [euler_rotation.x, euler_rotation.y, euler_rotation.z]] self.out_data['__time'] = [ self.simulation_time.time, 1.0 / blenderapi.getfrequency(), self.simulation_time.real_time ] # Send the encoded dictionary through a socket # and receive a reply with any changes in the other nodes in_data = self._exchange_data(self.out_data) logger.debug("Received: %s" % in_data) if not in_data: return try: self.update_scene(in_data, blenderapi.scene()) except Exception as e: logger.warning("error while processing incoming data: " + str(e))
def default_action(self): """ Do the actual semantic 'grab'. Iterate over all the tracked objects, and check if they are visible for the robot. Visible objects must have a bounding box and be active for physical simulation (have the 'Actor' checkbox selected) """ # Call the action of the parent class super(self.__class__, self).default_action() # Create dictionaries self.local_data['visible_objects'] = [] for obj in blenderapi.persistantstorage().trackedObjects.keys(): if self._check_visible(obj): # Create dictionary to contain object name, type, # description, position and orientation obj_dict = {'name': passive_objects.label(obj), 'description': obj.get('Description', ''), 'type': obj.get('Type', ''), 'position': obj.worldPosition, 'orientation': obj.worldOrientation.to_quaternion()} self.local_data['visible_objects'].append(obj_dict) logger.debug("Visible objects: %s" % self.local_data['visible_objects'])
def update(self, value=None): if value is not None: self.value = value true_value = self.getAngleRate( ) if self.mode == 'SPEED' else self.getAngle() err = value - true_value time = blenderapi.persistantstorage().time.time elapsed_time = time - self.prev_time #print( elapsed_time ) self.int_err += (self.prev_err + err) / 2 * elapsed_time torque = self.Kp * err + self.Ki * self.int_err if elapsed_time > 0: torque += self.Kd * (err - self.prev_err) / elapsed_time if self.max_torque is not None: torque = min(self.max_torque, max(-self.max_torque, torque)) self.applyTorque(torque) self.prev_err = err self.prev_time = time return true_value, torque
def _check_visible(self, obj): """ Check if an object lies inside of the camera frustum. """ # TrackedObjects was filled at initialization # with the object's bounding boxes bb = blenderapi.persistantstorage().trackedObjects[obj] pos = obj.position bbox = [[bb_corner[i] + pos[i] for i in range(3)] for bb_corner in bb] logger.debug("\n--- NEW TEST ---") logger.debug("OBJECT '{0}' AT {1}".format(obj, pos)) logger.debug("CAMERA '{0}' AT {1}".format( self.blender_cam, self.blender_cam.position)) logger.debug("BBOX: >{0}<".format(bbox)) logger.debug("BBOX: {0}".format(bb)) # Translate the bounding box to the current object position # and check if it is in the frustum if self.blender_cam.boxInsideFrustum(bbox) != self.blender_cam.OUTSIDE: # Check that there are no other objects between the camera # and the selected object # NOTE: This is a very simple test. Hiding only the 'center' # of an object will make it invisible, even if the rest is # still seen from the camera closest_obj = self.bge_object.rayCastTo(obj) if closest_obj in [obj] + list(obj.children): return True return False
def synchronize(self): if not self.node_stream: logger.debug("not self.node_stream") return if not self.node_stream.connected: logger.debug("not self.node_stream.connected") return # Get the coordinates of local robots for obj in blenderapi.persistantstorage().robotDict.keys(): #self.out_data[obj.name] = [obj.worldPosition.to_tuple()] euler_rotation = obj.worldOrientation.to_euler() self.out_data[obj.name] = [obj.worldPosition.to_tuple(), \ [euler_rotation.x, euler_rotation.y, euler_rotation.z]] self.out_data['__time'] = [self.simulation_time.time, 1.0/ blenderapi.getfrequency(), self.simulation_time.real_time] # Send the encoded dictionary through a socket # and receive a reply with any changes in the other nodes in_data = self._exchange_data(self.out_data) logger.debug("Received: %s" % in_data) if not in_data: return try: self.update_scene(in_data, blenderapi.scene()) except Exception as e: logger.warning("error while processing incoming data: " + str(e))
def _get_armature_actuator(self): # Get the reference to the class instance of the armature actuator component_dict = blenderapi.persistantstorage().componentDict if self.armature and self.armature.name in component_dict: self._armature_actuator = component_dict[self.armature.name] else: logger.error("Could not find armature actuator <%s> from armature sensor <%s>!" % (self.armature.name, self.name()))
def initialize(self): try: out_robot = self.object_handle("Robot") self.out_position = self.attribute_handle("position", out_robot) self.out_orientation = self.attribute_handle( "orientation", out_robot) except rti.NameNotFound: logger.error("'Robot' (or attributes) not declared in FOM." + \ "Your '.fed' file may not be up-to-date.") return False self._rtia.publishObjectClass( out_robot, [self.out_position, self.out_orientation]) robot_dict = blenderapi.persistantstorage().robotDict for obj in robot_dict.keys(): self.register_object(out_robot, obj.name) logger.info( "Pose of robot %s will be published on the %s federation.", obj.name, self.federation) in_robot = self.object_handle("Robot") self.in_position = self.attribute_handle("position", in_robot) self.in_orientation = self.attribute_handle("orientation", in_robot) self.suscribe_attributes(in_robot, [self.in_position, self.in_orientation])
def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent. """ logger.info('%s initialization of' % obj.name) # Call the constructor of the parent class morse.sensors.camera.Camera.__init__(self, obj, parent) # Locate the Blender camera object associated with this sensor main_obj = self.bge_object for obj in main_obj.children: if hasattr(obj, 'lens'): self.blender_cam = obj logger.info("Camera object: {0}".format(self.blender_cam)) break if not self.blender_cam: logger.error("no camera object associated to the semantic camera. \ The semantic camera requires a standard Blender \ camera in its children.") # TrackedObject is a dictionary containing the list of tracked objects # (->meshes with a class property set up) as keys # and the bounding boxes of these objects as value. if not 'trackedObjects' in blenderapi.persistantstorage(): logger.info('Initialization of tracked objects:') blenderapi.persistantstorage().trackedObjects = \ dict.fromkeys(passive_objects.active_objects()) # Store the bounding box of the marked objects for obj in blenderapi.persistantstorage().trackedObjects.keys(): # bound_box returns the bounding box in local space # instead of world space. blenderapi.persistantstorage().trackedObjects[obj] = \ blenderapi.objectdata(obj.name).bound_box details = passive_objects.details(obj) logger.info(' - {%s} (type:%s)'% (details['label'], details['type'])) if self.noocclusion: logger.info("Semantic camera running in 'no occlusion' mode (fast mode).") logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
def deactivate(component_name): """ Stop the specified component from calling its default_action method """ try: blenderapi.persistantstorage().componentDict[component_name]._active = False except KeyError as detail: logger.warn("Component %s not found. Can't deactivate" % detail) raise MorseRPCTypeError("Component %s not found. Can't deactivate" % detail)
def __init__(self, args, kwargs): """ Initialize the socket connections """ # Call the constructor of the parent class DatastreamManager.__init__(self, args, kwargs) self._mav = mavlink.MAVLink(None) self._conn_manager = MavlinkConnManager(self._mav) self._boot_time = blenderapi.persistantstorage().time.time
def list_robots(): """ Return a list of the robots in the current scenario Uses the list generated during the initialisation of the scenario """ return [ obj.name for obj in blenderapi.persistantstorage().robotDict.keys() ]
def activate(component_name): """ Enable the functionality of the component specified """ try: blenderapi.persistantstorage().componentDict[component_name]._active = True except KeyError as detail: logger.warn("Component %s not found. Can't activate" % detail) raise MorseRPCTypeError("Component %s not found. Can't activate" % detail)
def activate(self, component_name): """ Enable the functionality of the component specified """ try: blenderapi.persistantstorage().componentDict[component_name]._active = True except KeyError as detail: logger.warn("Component %s not found. Can't activate" % detail) raise MorseRPCTypeError("Component %s not found. Can't activate" % detail)
def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent. """ logger.info('%s initialization' % obj.name) # Call the constructor of the parent class morse.sensors.camera.Camera.__init__(self, obj, parent) # Locate the Blender camera object associated with this sensor main_obj = self.bge_object for obj in main_obj.children: if hasattr(obj, 'lens'): self.blender_cam = obj logger.info("Camera object: {0}".format(self.blender_cam)) break if not self.blender_cam: logger.error("no camera object associated to the semantic camera. \ The semantic camera requires a standard Blender \ camera in its children.") # TrackedObject is a dictionary containing the list of tracked objects # (->meshes with a class property set up) as keys # and the bounding boxes of these objects as value. if not 'trackedObjects' in blenderapi.persistantstorage(): logger.info('Initialization of tracked objects:') blenderapi.persistantstorage().trackedObjects = \ dict.fromkeys(passive_objects.active_objects()) # Store the bounding box of the marked objects for obj in blenderapi.persistantstorage().trackedObjects.keys(): # bound_box returns the bounding box in local space # instead of world space. blenderapi.persistantstorage().trackedObjects[obj] = \ blenderapi.objectdata(obj.name).bound_box details = passive_objects.details(obj) logger.info(' - {%s} (type:%s)' % (details['label'], details['type'])) if self.noocclusion: logger.info( "Semantic camera running in 'no occlusion' mode (fast mode).") logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
def deactivate(self, component_name): """ Stop the specified component from calling its default_action method """ try: blenderapi.persistantstorage().componentDict[component_name]._active = False except KeyError as detail: logger.warn("Component %s not found. Can't deactivate" % detail) raise MorseRPCTypeError("Component %s not found. Can't deactivate" % detail)
def get_cameras_instance(self): if self.video_camera and self.depth_camera: return # Get the reference to the class instance of the depth and video cameras component_dict = blenderapi.persistantstorage().componentDict if self.video_camera_name in component_dict: self.video_camera = component_dict[self.video_camera_name] if self.depth_camera_name in component_dict: self.depth_camera = component_dict[self.depth_camera_name]
def abort_request(self, request_id): """ This method will interrupt a running asynchronous service, uniquely described by its request_id """ component_name, service_name = self._pending_requests[request_id] for component in blenderapi.persistantstorage().componentDict.values(): if component.name() == component_name: logger.info("calling interrupt on %s" % str(component)) component.interrupt() return # if not found, search in the overlay dictionnary for overlay in blenderapi.persistantstorage().overlayDict.values(): if overlay.name() == component_name: logger.info("calling interrupt on %s" % str(overlay)) overlay.interrupt() return
def default(self, ci='unused'): logger.debug("Posting message to the MOOS database.") current_time = blenderapi.persistantstorage().current_time #iterate through all objects of the component_instance and post the data for variable, data in self.data.items(): name = "%s_%s" % (self.component_name, variable) logger.debug("name: %s, type: %s, data: %s"% (name, type(data), str(data))) self.m.Notify(name, str(data), current_time)
def default(self, ci='unused'): logger.debug("Posting message to the MOOS database.") current_time = blenderapi.persistantstorage().time.time #iterate through all objects of the component_instance and post the data for variable, data in self.data.items(): name = "%s_%s" % (self.component_name, variable) logger.debug("name: %s, type: %s, data: %s" % (name, type(data), str(data))) self.m.Notify(name, str(data), current_time)
def graspable_objects(): """ Returns all objects in current scene that have the 'Graspable' property set to True, amongst active objects. """ return [ obj for (obj, details ) in blenderapi.persistantstorage().passiveObjectsDict.items() if details['graspable'] ]
def default(self, ci='unused'): # post the simulation time so that it can be synced to MOOSTime self.notify('MORSE_TIME', blenderapi.persistantstorage().time.time) # post the robot position self.notify('MORSE_SIM_X', self.data['x']) self.notify('MORSE_SIM_Y', self.data['y']) self.notify('MORSE_SIM_Z', self.data['z']) self.notify('MORSE_SIM_YAW', self.data['yaw']) self.notify('MORSE_SIM_ROLL', self.data['roll']) self.notify('MORSE_SIM_PITCH', self.data['pitch'])
def __init__(self, name, hvl, radioactivity): """Initialisation setting name, density [g/cm^3], half-value layers, and radioactivity. The HVL has to be supplied as dictionary with the source radionuclide as key and the corresponding HVL [cm] as value. """ self.name = name self.hvl = hvl self.radioactivity = radioactivity self.t_created = blenderapi.persistantstorage().time.time
def initialize(self): AbstractMOOS.initialize(self) # post lidar settings to the database only once at startup cur_time=blenderapi.persistantstorage().time.time self.m.Notify('sScanAngle', self.component_instance.scan_window, cur_time) self.m.Notify('sScanResolution', self.component_instance.resolution, cur_time) self.m.Notify('sScanRange', self.component_instance.laser_range, cur_time)
def look_robot(camera): """ Put the camera above a robot """ global robots, current_robot if not robots: robots = [r for r in blenderapi.persistantstorage().robotDict] robot = robots[ current_robot ] camera.worldTransform = robot.worldTransform * camera_to_robot_transform # switch between robots current_robot = (current_robot + 1) % len(robots)
def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent. """ logger.info("%s initialization" % obj.name) # Call the constructor of the parent class super(self.__class__, self).__init__(obj, parent) # Locate the Blender camera object associated with this sensor main_obj = self.bge_object for obj in main_obj.children: if hasattr(obj, "lens"): self.blender_cam = obj logger.info("Camera object: {0}".format(self.blender_cam)) break if not self.blender_cam: logger.error( "no camera object associated to the semantic camera. \ The semantic camera requires a standard Blender \ camera in its children." ) # TrackedObject is a dictionary containing the list of tracked objects # (->meshes with a class property set up) as keys # and the bounding boxes of these objects as value. if not "trackedObjects" in blenderapi.persistantstorage(): logger.info("Initialization of tracked objects:") blenderapi.persistantstorage().trackedObjects = dict.fromkeys(passive_objects.active_objects()) # Store the bounding box of the marked objects for obj in blenderapi.persistantstorage().trackedObjects.keys(): # bound_box returns the bounding box in local space # instead of world space. blenderapi.persistantstorage().trackedObjects[obj] = blenderapi.objectdata(obj.name).bound_box details = passive_objects.details(obj) logger.info(" - {%s} (type:%s)" % (details["label"], details["type"])) logger.info("Component initialized")
def details(obj): """ Returns a dictionary containing the differents properties for a given active object. If the object is not active (ie, it has no 'Object' property, or the property is set to False), None is returned. If no label is available, it defaults to the Blender name. The description field may be an empty string. If no type is available, it defaults to 'Object'. If the graspable flag is not present, it defaults to False. :param name: the Blender name of the object. :return: a dictionary {'label':string, 'description':string, 'type':string, 'graspable':bool} """ if not obj in blenderapi.persistantstorage().passiveObjectsDict.keys(): return None else: return blenderapi.persistantstorage().passiveObjectsDict[obj]
def in_zones(self, name = None, type = None): """ Determine which zone(s) contain(s) current object If a :param name: is precised, check only if this specific zone contains the position If a :param type: is precised, only search in the zone of this type. """ zone_manager = blenderapi.persistantstorage().zone_manager return zone_manager.contains(self, name = name, type = type)
def update_scene(self, in_data, scene): # Update the positions of the external robots for obj_name, robot_data in in_data.items(): try: obj = scene.objects[obj_name] except Exception as e: logger.debug("%s not found in this simulation scenario, but present in another node. Ignoring it!" % obj_name) continue if obj not in blenderapi.persistantstorage().robotDict: obj.worldPosition = robot_data[0] obj.worldOrientation = mathutils.Euler(robot_data[1]).to_matrix()
def in_zones(self, name=None, type=None): """ Determine which zone(s) contain(s) current object If a :param name: is precised, check only if this specific zone contains the position If a :param type: is precised, only search in the zone of this type. """ zone_manager = blenderapi.persistantstorage().zone_manager return zone_manager.contains(self, name=name, type=type)
def capture(self, n): """ The service takes an integer an argument and dispatch the call to all its individual cameras. The service ends when each camera has terminated its work. :param n: the number of call to each individual camera """ self._expected_answer = self.num_cameras for camera in self.camera_list: camera_instance = blenderapi.persistantstorage().componentDict[camera] camera_instance.capture(partial(self.capture_completion), n)
def details(self): """Returns a structure containing all possible details about the simulation currently running, including the list of robots, the list of services and datastreams, the list of middleware in use, etc. """ simu = blenderapi.persistantstorage() details = {} # Retrieves the list of services and associated middlewares services = {} services_iface = {} for n, i in simu.morse_services.request_managers().items(): services.update(i.services()) for cmpt in i.services(): services_iface.setdefault(cmpt, []).append(n) def cmptdetails(c): c = simu.componentDict[c.name] cmpt = { "type": type(c).__name__, } if c.name() in services: cmpt["services"] = services[c.name()] cmpt["service_interfaces"] = services_iface[c.name()] if c.name() in simu.datastreams: stream = simu.datastreams[c.name()] cmpt["stream"] = stream[0] cmpt["stream_interfaces"] = stream[1] return cmpt def robotdetails(r): robot = { "name": r.name(), "type": type(r).__name__, "components": {c.name: cmptdetails(c) for c in r.components}, } if r.name() in services: robot["services"] = services[r.name()] robot["services_interfaces"] = services_iface[r.name()] return robot for n, i in simu.stream_managers.items(): pass details['robots'] = [ robotdetails(r) for n, r in simu.robotDict.items() ] return details
def default(self, ci='unused'): cur_time=pymoos.MOOSCommClient.MOOSTime() # post the simulation time so that it can be synced to MOOSTime self.m.Notify('actual_time', blenderapi.persistantstorage().current_time, cur_time) # post the robot position self.m.Notify('simEast', self.data['x'], cur_time) self.m.Notify('simNorth', self.data['y'], cur_time) self.m.Notify('simHeight', self.data['z'], cur_time) self.m.Notify('simYaw', self.data['yaw'], cur_time) self.m.Notify('simRoll', self.data['roll'], cur_time) self.m.Notify('simPitch', self.data['pitch'], cur_time)
def default(self, ci='unused'): cur_time = pymoos.MOOSCommClient.MOOSTime() # post the simulation time so that it can be synced to MOOSTime self.m.Notify('actual_time', blenderapi.persistantstorage().current_time, cur_time) # post the robot position self.m.Notify('simEast', self.data['x'], cur_time) self.m.Notify('simNorth', self.data['y'], cur_time) self.m.Notify('simHeight', self.data['z'], cur_time) self.m.Notify('simYaw', self.data['yaw'], cur_time) self.m.Notify('simRoll', self.data['roll'], cur_time) self.m.Notify('simPitch', self.data['pitch'], cur_time)
def look_robot(camera): """ Put the camera above a robot """ global robots, current_robot if not robots: robots = [r for r in blenderapi.persistantstorage().robotDict] if not robots: # if current node is just a 'watcher' logger.warning("no robots on current node") return robot = robots[ current_robot ] camera.worldTransform = robot.worldTransform * camera_to_robot_transform # switch between robots current_robot = (current_robot + 1) % len(robots)
def default_action(self): """ Adds additional information (date, time and heading) to the message of the RawGPS """ # Call the default_action of the parent class RawGPS.default_action(self) current_time = time.gmtime(blenderapi.persistantstorage().time.time) date = time.strftime("%d%m%y", current_time) time_h_m_s = time.strftime("%H%M%S", current_time) heading = (2*math.pi - math.atan2(self.dy, self.dx) + math.pi/2)%(2*math.pi) self.local_data['date'] = date self.local_data['time'] = time_h_m_s self.local_data['heading'] = math.degrees(heading)
def encode_data(self): parent_position = self.component_instance.robot_parent.position_3d lines = [] lines.append('==> Data at X,Y,Z: [%.6f %.6f %.6f]' 'yaw,pitch,roll: [%.6f %.6f %.6f] | index %d | time %.2f\n' % (parent_position.x, parent_position.y, parent_position.z, parent_position.yaw, parent_position.pitch, parent_position.roll, self.index, blenderapi.persistantstorage().current_time)) for variable, data in self.data.items(): if isinstance(data, float): lines.append("\t%s = %.6f\n" % (variable, data)) else: lines.append("\t%s = %s\n" % (variable, repr(data))) return ''.join(lines)