def add_control(self): try: msg_dict = yaml.load(strify_message(self.msgWidget.msg)) zone = self.zoneListWidget.selectedItems()[0].text() if zone not in self.zone_dict: self.zone_dict[zone] = [] self.zone_dict[zone].append(msg_dict) except: import pdb; pdb.set_trace() # breakpoint 82a32b6a //
def roundtrip(m): yaml_text = strify_message(m) print(yaml_text) loaded = yaml.safe_load(yaml_text) print("loaded", loaded) new_inst = m.__class__() if loaded is not None: fill_message_args(new_inst, [loaded]) else: fill_message_args(new_inst, []) return new_inst
def roundtrip(m): yaml_text = strify_message(m) print(yaml_text) loaded = yaml.load(yaml_text) print("loaded", loaded) new_inst = m.__class__() if loaded is not None: fill_message_args(new_inst, [loaded]) else: fill_message_args(new_inst, []) return new_inst
def roundtrip(m, escape_strings=False): yaml_text = strify_message(m, escape_strings=escape_strings) print(yaml_text) loaded = yaml.load(yaml_text) print("loaded", loaded) new_inst = m.__class__() if loaded is not None: fill_message_args(new_inst, [loaded]) else: fill_message_args(new_inst, []) return new_inst
def test_strify_message(self): # this is a bit overtuned, but it will catch regressions from genpy.message import Message, strify_message class M1(Message): __slots__ = [] _slot_types = [] def __init__(self): pass self.assertEquals('', strify_message(M1())) class M2(Message): __slots__ = ['str', 'int', 'float', 'bool', 'list'] _slot_types = ['string', 'int32', 'float32', 'bool', 'int32[]'] def __init__(self, str_, int_, float_, bool_, list_): self.str = str_ self.int = int_ self.float = float_ self.bool = bool_ self.list = list_ self.assertEquals("""str: string int: 123456789101112 float: 5678.0 bool: True list: [1, 2, 3]""", strify_message(M2('string', 123456789101112, 5678., True, [1,2,3]))) self.assertEquals("""str: '' int: -1 float: 0.0 bool: False list: []""", strify_message(M2('', -1, 0., False, []))) class M3(Message): __slots__ = ['m2'] _slot_types=['M1'] def __init__(self, m2): self.m2 = m2 self.assertEquals("""m2: str: string int: -1 float: 0.0 bool: False list: []""", strify_message(M3(M2('string', -1, 0., False, [])))) # test array of Messages field class M4(Message): __slots__ = ['m2s'] _slot_types=['M2[]'] def __init__(self, m2s): self.m2s = m2s self.assertEquals("""m2s: - str: string int: 1234 float: 5678.0 bool: True list: [1, 2, 3] - str: string int: -1 float: 0.0 bool: False list: []""", strify_message(M4([ M2('string', 1234, 5678., True, [1,2,3]), M2('string', -1, 0., False, []), ]))) # test Time and Duration from genpy import Time, Duration class M5(Message): __slots__ = ['t', 'd'] _slot_types=['time', 'duration'] def __init__(self, t, d): self.t = t self.d = d self.assertEquals("""t: secs: 987 nsecs: 654 d: secs: 123 nsecs: 456""", strify_message(M5(Time(987, 654), Duration(123, 456)))) # test final clause of strify -- str anything that isn't recognized if sys.hexversion > 0x03000000: # Python3 self.assertEquals("{1}", strify_message(set([1]))) else: self.assertEquals("set([1])", strify_message(set([1])))
def next_message(self , sensors): self.pose = {} self.imu = {} self.imu_t = None self.orientation = {} self.orientation_t =None self.color_img = None self.color_img_t = None self.depth_img = None self.depth_img_t = None self.scan = None self.scan_t = None while True: topic, msg, t = next(self.generator) t = t.to_sec() if topic == '/imu' and ('accelerometer' in sensors or sensors == () or 'stream' in sensors): self.imu['x'] = float(strify_message(msg.linear_acceleration.x)) self.imu['y'] = float(strify_message(msg.linear_acceleration.z)) self.imu_t = t self.topic_name = 'accelerometer' elif topic == '/pose': self.pose['x'] = float(strify_message(msg.pose.pose.position.x)) self.pose['y'] = float(strify_message(msg.pose.pose.position.y)) if ('orientation' in sensors or sensors == () or 'stream' in sensors): self.orientation['qz'] = float(strify_message(msg.pose.pose.orientation.z)) self.orientation['qw'] = float(strify_message(msg.pose.pose.orientation.w)) noise = self.orientation_noise.pop() self.orientation['qz'] = self.orientation['qz'] + noise[0] self.orientation['qw'] = self.orientation['qw'] + noise[1] self.orientation_t = t self.topic_name = 'orientation' elif topic == '/camera/rgb/image_color' and ('color_img' in sensors or sensors == () or 'stream' in sensors): self.color_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding="rgb8") self.color_img_t = t self.topic_name = 'color_img' elif topic == '/camera/depth/image' and ('depth_img' in sensors or sensors == () or 'stream' in sensors): gray_img_buff = self.bridge.imgmsg_to_cv2(msg, desired_encoding=msg.encoding) #self.depth_img = self.depthToRGB8(gray_img_buff , msg.encoding) self.depth_img = gray_img_buff self.depth_img_t = t self.topic_name = 'depth_img' elif topic == '/scan' and ('scan' in sensors or sensors == () or 'stream' in sensors): self.scan = np.array(msg.ranges) self.scan_t = t self.topic_name = 'scan' self.current_timestamp = t if 'stream' in sensors: break if sensors == () : if len(self.pose) > 0 and len(self.imu) > 0 and self.color_img != None and self.depth_img != None and self.scan == None: break else: if 'accelerometer' not in sensors: k= True else: if self.imu_t == None: k = False else: k = True if 'orientation' not in sensors: l= True else: if self.orientation_t == None: l= False else: l = True if 'color_img' not in sensors: m= True else: if self.color_img_t == None: m= False else: m = True if 'depth_img' not in sensors: n= True else: if self.depth_img_t == None: n= False else: n = True if 'scan' not in sensors: o=True else: if self.scan_t == None: o = False else: o = True if k and l and m and n and o: break
def test_strify_message(self): # this is a bit overtuned, but it will catch regressions from genpy.message import Message, strify_message class M1(Message): __slots__ = [] _slot_types = [] def __init__(self): pass self.assertEquals("", strify_message(M1())) class M2(Message): __slots__ = ["str", "int", "float", "bool", "list"] _slot_types = ["string", "int32", "float32", "bool", "int32[]"] def __init__(self, str_, int_, float_, bool_, list_): self.str = str_ self.int = int_ self.float = float_ self.bool = bool_ self.list = list_ self.assertEquals( """str: string int: 123456789101112 float: 5678.0 bool: True list: [1, 2, 3]""", strify_message(M2("string", 123456789101112, 5678.0, True, [1, 2, 3])), ) self.assertEquals( """str: '' int: -1 float: 0.0 bool: False list: []""", strify_message(M2("", -1, 0.0, False, [])), ) class M3(Message): __slots__ = ["m2"] _slot_types = ["M1"] def __init__(self, m2): self.m2 = m2 self.assertEquals( """m2: str: string int: -1 float: 0.0 bool: False list: []""", strify_message(M3(M2("string", -1, 0.0, False, []))), ) # test array of Messages field class M4(Message): __slots__ = ["m2s"] _slot_types = ["M2[]"] def __init__(self, m2s): self.m2s = m2s self.assertEquals( """m2s: - str: string int: 1234 float: 5678.0 bool: True list: [1, 2, 3] - str: string int: -1 float: 0.0 bool: False list: []""", strify_message(M4([M2("string", 1234, 5678.0, True, [1, 2, 3]), M2("string", -1, 0.0, False, [])])), ) # test Time and Duration from genpy import Time, Duration class M5(Message): __slots__ = ["t", "d"] _slot_types = ["time", "duration"] def __init__(self, t, d): self.t = t self.d = d self.assertEquals( """t: secs: 987 nsecs: 654 d: secs: 123 nsecs: 456""", strify_message(M5(Time(987, 654), Duration(123, 456))), ) # test final clause of strify -- str anything that isn't recognized self.assertEquals("set([1])", strify_message(set([1])))
def next_message(self , sensors): self.pose = {} self.imu = {} self.imu_t = None self.orientation = {} self.orientation_t =None self.color_img = None self.color_img_t = None self.depth_img = None self.depth_img_t = None self.pose_t = None self.scan = None self.scan_t = None while True: try: topic, msg, t = next(self.generator) except StopIteration: pred_path = self.pose_obj.get_pred_path() actual_path = self.pose_obj.get_actual_path() trans_error = self.align(pred_path , actual_path) self.myGUI.median.display(np.median(trans_error)) self.myGUI.mean.display(np.mean(trans_error)) self.myGUI.Std_dev.display(np.std(trans_error)) self.myGUI.rmse.display(np.sqrt(np.dot(trans_error,trans_error) / len(trans_error))) print("End") t = t.to_sec() if topic == '/imu' and ('accelerometer' in sensors or sensors == () or 'stream' in sensors): self.imu['x'] = float(strify_message(msg.linear_acceleration.x)) self.imu['y'] = float(strify_message(msg.linear_acceleration.z)) self.imu_t = t self.topic_name = 'accelerometer' elif topic == '/pose': self.pose['x'] = float(strify_message(msg.pose.pose.position.x)) self.pose['y'] = float(strify_message(msg.pose.pose.position.y)) self.pose_t = t if ('orientation' in sensors or sensors == () or 'stream' in sensors): self.orientation['qz'] = float(strify_message(msg.pose.pose.orientation.z)) self.orientation['qw'] = float(strify_message(msg.pose.pose.orientation.w)) noise = self.orientation_noise.pop() self.orientation['qz'] = self.orientation['qz'] + noise[0] self.orientation['qw'] = self.orientation['qw'] + noise[1] self.orientation_t = t self.topic_name = 'orientation' elif topic == '/camera/rgb/image_color' and ('color_img' in sensors or sensors == () or 'stream' in sensors): self.color_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding="rgb8") self.color_img_t = t self.topic_name = 'color_img' elif topic == '/camera/depth/image' and ('depth_img' in sensors or sensors == () or 'stream' in sensors): gray_img_buff = self.bridge.imgmsg_to_cv2(msg, desired_encoding=msg.encoding) #self.depth_img = self.depthToRGB8(gray_img_buff , msg.encoding) self.depth_img = gray_img_buff self.depth_img_t = t self.topic_name = 'depth_img' elif topic == '/scan' and ('scan' in sensors or sensors == () or 'stream' in sensors): self.scan = np.array(msg.ranges) self.scan_t = t self.topic_name = 'scan' self.current_timestamp = t if 'stream' in sensors: break if sensors == () : if len(self.pose) > 0 and len(self.imu) > 0 and self.color_img != None and self.depth_img != None and self.scan == None: break else: if 'accelerometer' not in sensors: k= True else: if self.imu_t == None: k = False else: k = True if 'orientation' not in sensors: l= True else: if self.orientation_t == None: l= False else: l = True if 'color_img' not in sensors: m= True else: if self.color_img_t == None: m= False else: m = True if 'depth_img' not in sensors: n= True else: if self.depth_img_t == None: n= False else: n = True if 'scan' not in sensors: o=True else: if self.scan_t == None: o = False else: o = True if k and l and m and n and o: break
def next_message(self, sensors): self.pose = {} self.imu = {} self.imu_t = None self.orientation = {} self.orientation_t = None self.color_img = None self.color_img_t = None self.depth_img = None self.depth_img_t = None self.pose_t = None self.scan = None self.scan_t = None while True: try: topic, msg, t = next(self.generator) except StopIteration: pred_path = self.pose_obj.get_pred_path() actual_path = self.pose_obj.get_actual_path() trans_error = self.align(pred_path, actual_path) self.myGUI.median.display(np.median(trans_error)) self.myGUI.mean.display(np.mean(trans_error)) self.myGUI.Std_dev.display(np.std(trans_error)) self.myGUI.rmse.display( np.sqrt( np.dot(trans_error, trans_error) / len(trans_error))) print("End") t = t.to_sec() if topic == '/imu' and ('accelerometer' in sensors or sensors == () or 'stream' in sensors): self.imu['x'] = float(strify_message( msg.linear_acceleration.x)) self.imu['y'] = float(strify_message( msg.linear_acceleration.z)) self.imu_t = t self.topic_name = 'accelerometer' elif topic == '/pose': self.pose['x'] = float(strify_message( msg.pose.pose.position.x)) self.pose['y'] = float(strify_message( msg.pose.pose.position.y)) self.pose_t = t if ('orientation' in sensors or sensors == () or 'stream' in sensors): self.orientation['qz'] = float( strify_message(msg.pose.pose.orientation.z)) self.orientation['qw'] = float( strify_message(msg.pose.pose.orientation.w)) noise = self.orientation_noise.pop() self.orientation['qz'] = self.orientation['qz'] + noise[0] self.orientation['qw'] = self.orientation['qw'] + noise[1] self.orientation_t = t self.topic_name = 'orientation' elif topic == '/camera/rgb/image_color' and ('color_img' in sensors or sensors == () or 'stream' in sensors): self.color_img = self.bridge.imgmsg_to_cv2( msg, desired_encoding="rgb8") self.color_img_t = t self.topic_name = 'color_img' elif topic == '/camera/depth/image' and ('depth_img' in sensors or sensors == () or 'stream' in sensors): gray_img_buff = self.bridge.imgmsg_to_cv2( msg, desired_encoding=msg.encoding) #self.depth_img = self.depthToRGB8(gray_img_buff , msg.encoding) self.depth_img = gray_img_buff self.depth_img_t = t self.topic_name = 'depth_img' elif topic == '/scan' and ('scan' in sensors or sensors == () or 'stream' in sensors): self.scan = np.array(msg.ranges) self.scan_t = t self.topic_name = 'scan' self.current_timestamp = t if 'stream' in sensors: break if sensors == (): if len(self.pose) > 0 and len( self.imu ) > 0 and self.color_img != None and self.depth_img != None and self.scan == None: break else: if 'accelerometer' not in sensors: k = True else: if self.imu_t == None: k = False else: k = True if 'orientation' not in sensors: l = True else: if self.orientation_t == None: l = False else: l = True if 'color_img' not in sensors: m = True else: if self.color_img_t == None: m = False else: m = True if 'depth_img' not in sensors: n = True else: if self.depth_img_t == None: n = False else: n = True if 'scan' not in sensors: o = True else: if self.scan_t == None: o = False else: o = True if k and l and m and n and o: break