class ControlSystemServer(object): """ here we are using rospy.Timer functionality to measure co2 every second https://roboticsbackend.com/how-to-use-a-ros-timer-in-python-to-publish-data-at-a-fixed-rate/ https://roboticsbackend.com/ros-rate-roscpy-roscpp/ but every global reconfiguration cycle works with time.localtime() why """ def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " self._logsep = ";" # start node rospy.init_node('control_server', log_level=rospy.DEBUG) # get roslaunch params # names for self topics self._logname = rospy.get_param('~control_log_name', 'control_system') self._log_node_name = rospy.get_param('~control_log_node_name', 'control_log_node') self._service_name = rospy.get_param('~control_service_name', 'control_system') self._raw_co2_pub_name = rospy.get_param('~control_raw_co2_pub_name', 'raw_co2_pub') self._operator_pub_name = rospy.get_param('~control_operator_pub_name', 'operator_pub') # devices services names # self._relay_service_name = rospy.get_param('~control_relay_service_name', 'relay_device') self._led_service_name = rospy.get_param('~control_led_service_name', 'led_device') self._sba5_service_name = rospy.get_param('~control_sba5_service_name', 'sba5_device') # we are using sba5 by default co2 sensor self._exp_service_name = rospy.get_param('~control_exp_service_name', 'exp_system') # experiment params self._LSM_exp_red = int(rospy.get_param('~control_LSM_exp_red', 250)) # mA old = 142 # here we need 306 mA, but our devices can work only with Ir < 250 mA self._LSM_exp_white = int(rospy.get_param('~control_LSM_exp_white', 114)) # mA old = 76 self._LSM_control_red = int(rospy.get_param('~control_LSM_control_red', 237)) # mA self._LSM_control_white = int(rospy.get_param('~control_LSM_control_white', 122)) # mA self._full_experiment_loop_time = int(rospy.get_param('~control_full_experiment_loop_time', 900.0)) # sec self._isolated_measure_time = int(rospy.get_param('~control_isolated_measure_time', 480.0)) # sec self._n2_calibration_time = int(rospy.get_param('~control_n2_calibration_time', 90.0)) # depends on self._air_valves_open_time = int(rospy.get_param('~control_air_valves_open_time', 15.0)) # sec self._co2_measure_time = int(rospy.get_param('~control_co2_measure_time', 1.0)) # sec self._ventilation_time = self._full_experiment_loop_time - self._isolated_measure_time - \ self._n2_calibration_time - self._air_valves_open_time # flags of current control regime #self._mode = rospy.get_param('~control_start_mode', 'life_support') self._mode = rospy.get_param('~control_start_mode', 'experiment') self._mode_flag = 'co2' # mode can be : # experiment # life_support # test # search_experiment # ... #self._sba5_measure_allowed = False # by default # self._at_work = rospy.get_param('~control_start_at_work', True) # start node and loop immediately after launch or # rpi_gpio device stub # TODO add this params to .launch file self._gpio_handler = RelayHandler( n2_valve=5, air_pump_1=6, air_pump_2=12, cooler_1=13, air_valve_1=19, air_valve_2=26, ndir_pump=16, empty=20 ) # params of search experiment self._co2_search_time_start = 0 self._co2_search_time_stop = 0 self._current_search_point_id = 0 self._led_delay_time = 0 # for setting up leds in selected time in vent period # by default: self._current_red = self._LSM_exp_red self._current_white = self._LSM_exp_white self._current_search_is_finished = 1 # by default it is not finished # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # create data topic publisher self._co2_pub = rospy.Publisher(self._raw_co2_pub_name, Temperature, queue_size=10) # create operator-topic to send important msgs to operator # self._operator_pub = rospy.Publisher(self._operator_pub_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.info("control_server init") # create Locks self._co2_measure_allowed_event = Event() # serial errors handling # self._last_serial_error_time = 0 # self._delay_after_serial_error = 300 self._logger.info("control_server service creation") # service self._service = rospy.Service(self._service_name, ControlSystem, self._handle_request) self._logger.info("check if we are in experiment mode") # self._serial_error_counter = 0 # self._serial_error_max = 5 # subscribe to rosout_agg to know if there is fatal error and we need to do smth # self._system_log_sub = rospy.Subscriber( # name='/rosout_agg', data_class=Log, # callback=self._log_callback, # queue_size=5) # reinit sba5 if self._mode == 'full_experiment' or self._mode == 'full_experiment': self._logger.info("update sba5 and allow sba5 measures") self._update_sba5_params() # allow measures of sba5 self._co2_measure_allowed_event.set() # self._default_red = 142 # self._default_white = 76 self._current_red = self._LSM_exp_red self._current_white = self._LSM_exp_white # create timers for async periodic tasks using internal ros mechanics # Create a ROS Timer for reading data rospy.Timer(rospy.Duration(1), self._get_sba5_measure) # 1 Hz # create ros timer for main loop if self._mode == 'life_support_dual': self._dual_led_service_name = rospy.get_param('~control_dual_led_service_name') self._current_red = self._LSM_control_red self._current_white = self._LSM_control_white if self._mode == 'k30_experiment': self._k30_service_name = rospy.get_param('~k30_service_name', 'k30_device') self._k30_calibration_time = int(rospy.get_param('~k30_calibration_time', '25')) self._current_red = self._LSM_control_red self._current_white = self._LSM_control_white rospy.Timer(rospy.Duration(2), self._get_k30_data) # 0.5 Hz self._logger.info("go to loop") self._loop() # ===================== loops for different control modes ====================== def _loop(self): while not rospy.is_shutdown(): # check if mode was changed if self._mode == 'life_support': self._life_support_loop() elif self._mode == 'life_support_dual': self._life_support_dual_loop() elif self._mode == 'test': self._test_loop() elif self._mode == 'full_experiment': self._full_experiment_loop() elif self._mode == 'k30_experiment': self._k30_experiment_loop() else: self._logger.error("current mode is not real mode: {}".format(self._mode)) rospy.sleep(1) def _test_loop(self): t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time / 60.0) == 0: self._logger.debug("start test loop again") rospy.sleep(self._air_valves_open_time) self._logger.debug("we have done nothing, really") def _life_support_loop(self): # one loop t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time / 60.0) == 0: self._logger.debug("start life support loop again") # start it again # reset light parameters to default self._logger.info('red = {} white = {} type = {}'.format( self._current_red, self._current_white, type(self._current_red))) self._set_new_light_mode(self._current_red, self._current_white) # set inside ventilation coolers on self._set_new_relay_state('set_vent_coolers', 0) # start ventilation self._start_ventilation() # self._set_new_relay_state('set_ndir_pump', 0) # for test only # wait for self._ventilation_time rospy.sleep(self._ventilation_time) # its not bad because service calls works in parallel threads # then stop it self._stop_ventilation() # self._set_new_relay_state('set_ndir_pump', 1) # for test only # then wait and do nothing def _life_support_dual_loop(self): # one loop t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time / 60.0) == 0: self._logger.debug("start life support dual loop again") # start it again # reset light parameters to default self._logger.info('red = {} white = {} type = {}'.format( self._current_red, self._current_white, type(self._current_red))) self._set_new_light_mode(self._current_red, self._current_white) self._set_new_light_mode(self._current_red, self._current_white, led_service_name= self._dual_led_service_name) # set inside ventilation coolers on self._set_new_relay_state('set_vent_coolers', 0) # start ventilation self._start_ventilation() # self._set_new_relay_state('set_ndir_pump', 0) # for test only # wait for self._ventilation_time rospy.sleep(self._ventilation_time) # its not bad because service calls works in parallel threads # then stop it self._stop_ventilation() # self._set_new_relay_state('set_ndir_pump', 1) # for test only # then wait and do nothing def _full_experiment_loop(self): # one loop # all experiment t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time/60.0) == 0: # start it again self._logger.info("start full experiment loop again") self._logger.info("start full experiment loop again") # self._get_current_point() # set inside ventilation coolers on self._set_new_relay_state('set_vent_coolers', 0) # start ventilation and calibration self._start_ventilation() self._logger.info("ventilation started") # get new led light params from exp_node self._get_current_point() self._logger.info("we got new exp point: mode={} id={} red={} white={}".format( self._mode_flag, self._current_search_point_id, self._current_red, self._current_white )) self._logger.info("we got new exp point: mode={} id={} red={} white={}".format( self._mode_flag, self._current_search_point_id, self._current_red, self._current_white )) # check new mode flag if self._mode_flag == 'co2': # TODO FIX # stop measuring co2 using threading event self._co2_measure_allowed_event.clear() self._logger.info("We have set measure flag to {}".format(self._co2_measure_allowed_event.is_set())) # do calibration of sba-5 self._logger.info("sba5 calibration started") self._perform_sba5_calibration() self._logger.info("sba5 calibration ended") # start measuring co2 again self._co2_measure_allowed_event.set() self._logger.info("We have set measure flag to {}".format(self._co2_measure_allowed_event.is_set())) # # self._co2_search_time_start = rospy.Time.now() # # send sign to operator # self._logger.info("co2_search_time started {}".format(self._co2_search_time_start)) # wait for self._ventilation_time rospy.sleep(self._led_delay_time) self._set_new_light_mode(self._current_red, self._current_white) rospy.sleep(self._ventilation_time - self._led_delay_time) # stop ventilation self._stop_ventilation() self._logger.info("stop ventilation") self._co2_search_time_start = rospy.Time.now() # send sign to operator ts = datetime.datetime.fromtimestamp( self._co2_search_time_start.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("co2_search_time started {}".format(ts)) self._logger.info("co2_search_time started {}".format(ts)) # wait self._isolated_measure_time rospy.sleep(self._isolated_measure_time) self._co2_search_time_stop = rospy.Time.now() te = datetime.datetime.fromtimestamp( self._co2_search_time_start.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("co2_search_time stopped {}".format(te)) self._logger.info("co2_search_time stopped {}".format(te)) # send start and stop times of this search point to exp_node self._send_point_data() self._logger.info("data sent to exp_system") def _k30_experiment_loop(self): # one loop # all experiment t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time/60.0) == 0: # start it again self._logger.info("start k30 experiment loop again") self._logger.info("start k30 experiment loop again") # self._get_current_point() # set inside ventilation coolers on self._set_new_relay_state('set_vent_coolers', 0) # start ventilation and calibration self._start_ventilation() self._logger.info("ventilation started") # get new led light params from exp_node self._get_current_point() self._logger.info("we got new exp point: mode={} id={} red={} white={}".format( self._mode_flag, self._current_search_point_id, self._current_red, self._current_white )) self._logger.info("we got new exp point: mode={} id={} red={} white={}".format( self._mode_flag, self._current_search_point_id, self._current_red, self._current_white )) self._set_new_light_mode(self._current_red, self._current_white) # check new mode flag if self._mode_flag == 'co2': # TODO FIX # stop measuring co2 using threading event self._co2_measure_allowed_event.clear() self._logger.info("We have set measure flag to {}".format(self._co2_measure_allowed_event.is_set())) # do calibration of sba-5 self._logger.info("k30 calibration started") self._perform_k30_calibration() self._logger.info("k30 calibration ended") # start measuring co2 again self._co2_measure_allowed_event.set() self._logger.info("We have set measure flag to {}".format(self._co2_measure_allowed_event.is_set())) # wait for self._ventilation_time rospy.sleep(self._ventilation_time) # stop ventilation self._stop_ventilation() self._logger.info("stop ventilation") self._co2_search_time_start = rospy.Time.now() # send sign to operator ts = datetime.datetime.fromtimestamp( self._co2_search_time_start.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("co2_search_time started {}".format(ts)) self._logger.info("co2_search_time started {}".format(ts)) # wait self._isolated_measure_time rospy.sleep(self._isolated_measure_time) self._co2_search_time_stop = rospy.Time.now() te = datetime.datetime.fromtimestamp( self._co2_search_time_start.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("co2_search_time stopped {}".format(te)) self._logger.info("co2_search_time stopped {}".format(te)) # send start and stop times of this search point to exp_node self._send_point_data() self._logger.info("data sent to exp_system") # =============================== support methods ============================== def _operator_call(self, msg): # just sends msg to operator topic _time = datetime.datetime.now() msg = str(_time) + self._logsep + str(self._logname) + self._logsep + msg self._operator_pub.publish(msg) # def _log_callback(self, log_msg): # # # hardcoded thing just to handle serial errors # if not time.time() - self._last_serial_error_time >= self._delay_after_serial_error: # if log_msg.name == '/serial_node' and log_msg.level == 8: # self._logger.warning("we got serial error {}, but we will ignore it for a time".format(log_msg)) # else: # if log_msg.name == '/serial_node' and log_msg.level == 8: # self._logger.error("we got serial error {}".format(log_msg)) # self._serial_error_counter += 1 # if self._serial_error_counter >= self._serial_error_max: # self._logger.error("max number of serial error counted: {}".format(self._serial_error_counter)) # self._restart_serial_node() # # self._serial_error_counter = 0 # self._last_serial_error_time = time.time() def _get_k30_data(self, event=None): # if self._sba5_measure_allowed: # event is rospy.TimerEvent if self._mode_flag == 'co2': # TODO FIX self._co2_measure_allowed_event.wait() rospy.wait_for_service(self._sba5_service_name) try: # sba_device = rospy.ServiceProxy(self._sba5_service_name, SBA5Device) # raw_resp = sba_device("measure_co2") k30_device = rospy.ServiceProxy(self._k30_service_name, K30Device) raw_resp = k30_device("get_data") # self._logger.debug("We got raw response from sba5 : {}".format(raw_resp.response)) # self._logger.debug("Type of raw response : {}".format(type(raw_resp.response))) # self._logger.debug("Size of raw response : {}".format(len(raw_resp.response))) # lets get co2 measure from that string # pattern = re.compile(r'\w+: (\d+.\d+)') # for answers like "success: 55.21" # co2_data = float(pattern.findall(raw_resp.response)[0]) co2_data = float(raw_resp) self._logger.debug("k30 measure_co2: We find co2 : {}".format(co2_data)) # add to self array # self._add_new_data_to_array(co2_data) # then publish it self._publish_sba5_measure(co2_data) # self._logger.debug("We published it") return float(co2_data) except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.warning("Service call failed: {}".format(err_list)) def _perform_k30_calibration(self): self._logger.debug("start k30 recalibration") # stop NDIRGA external air pump self._set_new_relay_state('set_ndir_pump', 1) # open n2 valve self._set_new_relay_state('set_n2_valve', 0) # send Z to sba-5 # rospy.wait_for_service(self._sba5_service_name) try: # wait until nitrogen fills the volume rospy.sleep(self._k30_calibration_time-5) # TODO fix it k30_device = rospy.ServiceProxy(self._k30_service_name, K30Device) raw_resp = k30_device('recalibrate') self._logger.debug("We got raw response from k30 {}".format(raw_resp)) except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) # raise ControlSystemException(e) # close n2 valve self._set_new_relay_state('set_n2_valve', 1) # start NDIRGA external air pump self._set_new_relay_state('set_ndir_pump', 0) # def _restart_serial_node(self): # # we just need to kill serial node # # master must rebirth it # # # self._logger.warning("trying to kill relay node") # # # os.system("rosnode kill /relay_device") # # time.sleep(1.5) # # self._logger.warning("trying to kill serial node") # # os.system("rosnode kill /serial_node") # # self._logger.warning("waiting for serial node respawn") # # time.sleep(1.5) # # # then check if it was created again # serial_node_found = False # # relay_device_found = False # # while not serial_node_found and relay_device_found: # while not serial_node_found: # nodes = os.popen("rosnode list").readlines() # for i in range(len(nodes)): # nodes[i] = nodes[i].replace("\n", "") # if nodes[i] == "/serial_node": # serial_node_found = True # self._logger.warning("found serial node in rosnode list") # # # if nodes[i] == "/relay_device": # # relay_device_found = True # # self._logger.warning("found relay_device in rosnode list") # time.sleep(0.5) # time.sleep(5) # # then send respawn signal to relay device to restore relay state on mcu # self._logger.warning("send respawn signal to relay") # rospy.wait_for_service(self._relay_service_name) # try: # relay_wrapper = rospy.ServiceProxy(self._relay_service_name, RelayDevice) # resp = relay_wrapper("respawn", 1) # self._logger.debug(resp) # return resp # # except rospy.ServiceException, e: # exc_info = sys.exc_info() # err_list = traceback.format_exception(*exc_info) # self._logger.error("Service call failed: {}".format(err_list)) def _send_point_data(self): self._logger.info("try to send data about current search point") rospy.wait_for_service(self._exp_service_name) try: exp_device = rospy.ServiceProxy(self._exp_service_name, ExpSystem) resp = exp_device(command="set_point_data", point_id=self._current_search_point_id, start_time=self._co2_search_time_start, end_time=self._co2_search_time_stop ) self._logger.debug(resp) return resp except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) def _get_current_point(self): self._logger.info("try to get new search point") rospy.wait_for_service(self._exp_service_name, 1) # NOTE check this place try: exp_device = rospy.ServiceProxy(self._exp_service_name, ExpSystem) resp = exp_device(command="get_current_point") self._mode_flag = resp.response self._current_search_point_id = resp.point_id self._current_red = resp.red self._current_white = resp.white self._led_delay_time = resp.led_delay self._logger.info(resp) return resp except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) # def _update_control_params(self): # # get new params and set them to corresponding self.xxxx values # # NOTE: method is DEPRECATED # self._set_new_light_mode(self._current_red, self._current_white) # for a time def _start_ventilation(self): # open drain valves 'set_air_valves' self._set_new_relay_state('set_air_valves', 0) # wait time to open them ~15 sec rospy.sleep(self._air_valves_open_time) # start drain pumps 'set_air_pumps' self._set_new_relay_state('set_air_pumps', 0) pass def _stop_ventilation(self): # stop drain pumps self._set_new_relay_state('set_air_pumps', 1) # wait 1 sec rospy.sleep(1.0) # close drain valves self._set_new_relay_state('set_air_valves', 1) pass def _perform_sba5_calibration(self): self._logger.debug("start sba recalibration") # stop NDIRGA external air pump self._set_new_relay_state('set_ndir_pump', 1) # open n2 valve self._set_new_relay_state('set_n2_valve', 0) # send Z to sba-5 rospy.wait_for_service(self._sba5_service_name) try: sba_device = rospy.ServiceProxy(self._sba5_service_name, SBA5Device) raw_resp = sba_device('recalibrate') self._logger.debug("We got raw response from sba5 {}".format(raw_resp)) # wait for calibration time 21/40/90 sec rospy.sleep(self._n2_calibration_time) except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) # raise ControlSystemException(e) # close n2 valve self._set_new_relay_state('set_n2_valve', 1) # start NDIRGA external air pump self._set_new_relay_state('set_ndir_pump', 0) pass def _publish_sba5_measure(self, data): self._logger.debug("publish_sba5_measure: We got data: {}".format(data)) co2_msg = Temperature() co2_msg.header.stamp = rospy.Time.now() co2_msg.temperature = data self._logger.debug("Message: {}".format(co2_msg)) self._co2_pub.publish(co2_msg) def _set_new_relay_state(self, command, state): # command - str, state - 0 or 1 self._logger.info("start setting new relay mode '{}' '{}' ".format(command, state)) # rospy.wait_for_service(self._relay_service_name) try: # relay_wrapper = rospy.ServiceProxy(self._relay_service_name, RelayDevice) # resp = relay_wrapper(command, state) # self._logger.debug(resp) # return resp self._gpio_handler.parse_old_command(command=command, arg=state) except Exception, e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list))
class SI7021DeviceServer(object): """ """ def __init__(self): # start node rospy.init_node('si7021_device_server', log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._logname = rospy.get_param('~si7021_log_name', 'si7021') self._lost_data_marker = rospy.get_param('~si7021_lost_data_marker', -65536) self._log_node_name = rospy.get_param('~si7021_log_node_name', 'si7021_log_node') self._hum_pub_name = rospy.get_param('~si7021_hum_pub_name', 'si7021_1_hum_pub') self._temp_pub_name = rospy.get_param('~si7021_temp_pub_name', 'si7021_1_temp_pub') self._timeout = rospy.get_param('~si7021_timeout', 1) self._measure_interval = rospy.get_param('~si7021_measure_interval') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) self._hum_pub = rospy.Publisher(self._hum_pub_name, Temperature, queue_size=10) self._temp_pub = rospy.Publisher(self._temp_pub_name, Temperature, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("si7021_device_server init") # print("we here init") # and go to infinite loop # self._service = rospy.Service(self._service_name, SBA5Device, self.handle_request) self._loop() def _loop(self): while not rospy.is_shutdown(): rospy.sleep(self._measure_interval) try: temp, hum = get_si7021_data() msg = Temperature() msg.temperature = temp msg.header.stamp.secs = rospy.get_time() self._temp_pub.publish(msg) msg = Temperature() msg.temperature = hum msg.header.stamp.secs = rospy.get_time() self._hum_pub.publish(msg) except Exception as e: msg = Temperature() msg.temperature = self._lost_data_marker msg.header.stamp.secs = rospy.get_time() self._temp_pub.publish(msg) self._hum_pub.publish(msg)
class K30DeviceServer: """ """ def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('k30_device_server', log_level=rospy.DEBUG) self._logname = rospy.get_param('~k30_log_name', 'k30') self._log_node_name = rospy.get_param('~k30_log_node_name', 'k30_log_node') self._port = rospy.get_param( '~k30_port', '/dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_' 'UART_Bridge_Controller_0003-if00-port0') self._baudrate = rospy.get_param('~k30_baudrate', 9600) self._timeout = rospy.get_param('~k30_timeout', 1) self._service_name = rospy.get_param('~k30_service_name', 'k30_device') self._calibration_time = rospy.get_param('~k30_calibration_time', 25) self._lost_data_marker = rospy.get_param('~k30_lost_data_marker', -65536) # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("k30_device_server init") # device self._device = K30(devname=self._port, baudrate=self._baudrate, timeout=self._timeout) # service self._service = rospy.Service(self._service_name, K30Device, self.handle_request) self._loop() def _loop(self): rospy.spin() def handle_request(self, req): self._logger.debug("we got request: {}".format(req)) if not req.command: # if we got empty string resp = self._error_response + 'empty_command' return resp elif req.command == 'get_info': try: ans = self._device.get_info() resp = self._success_response + ans return resp except Exception as e: resp = self._error_response + e.args[0] return resp elif req.command == 'get_data': try: co2, ans = self._device.get_co2() # resp = self._success_response + ans return str(co2) except Exception as e: # resp = self._error_response + e.args[0] co2 = self._lost_data_marker return str(co2) elif req.command == 'recalibrate': try: ans = self._device.recalibrate(ctime=5) resp = self._success_response + ans return resp except Exception as e: resp = self._error_response + e.args[0] return resp
class MYSQLHandler(object): def __init__(self): print("========== start init") # start node rospy.init_node('mysql_data_plotter_server', log_level=rospy.DEBUG, anonymous=True) # self logging self._logname = rospy.get_param('~mysql_data_plotter_log_name', 'mysql_data_plotter') self._log_node_name = rospy.get_param( '~mysql_data_plotter_log_node_name', 'mysql_data_plotter_log_node') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("mysql_data_plotter_server init") # for default we will use current ip in tun0 protocol # if there is no any tun0 - it must be a critical error self.ipv4 = os.popen('ip addr show tun0').read().split( "inet ")[1].split("/")[0] self.web_addr = self.ipv4 self.web_port = 8090 # on every rpi self.list_of_hdf = list() self.metastring = "experiment metadata: \n" # TODO load description from experiments table and add to metastring # self.list_of_datasets = print("========== start get list of datasets") # TODO; fix -all below only for test |||| # vvvv self.list_of_tables = rospy.get_param('~mysql_data_plotter_raw_topics') # self.list_of_tables = [ # TODO must be loaded from .launch # {'name': '/bmp180_1_temp_pub', 'id': 1, 'type': 'temperature', 'dtype': 'float64', 'units': 'C', 'status': 'raw_data'}, # {'name': '/bmp180_1_pressure_pub', 'id': 2, 'type': 'pressure', 'dtype': 'float64', 'units': 'kPa', 'status': 'raw_data'}, # {'name': '/raw_co2_pub', 'type': 'co2', 'id': 3, 'dtype': 'float64', 'units': 'ppmv', 'status': 'raw_data'}, # {'name': '/si7021_1_hum_pub', 'id': 4, 'type': 'humidity', 'dtype': 'float64', 'units': 'percents', 'status': 'raw_data'}, # {'name': '/si7021_1_temp_pub', 'id': 5, 'type': 'temperature', 'dtype': 'float64', 'units': 'C', 'status': 'raw_data'} # ] print("========== end of init") def get_dataset(self, name): print("========== start get dataset {}".format(name)) info_dict = None # lets find dict with same name as user wants to see for i in self.list_of_tables: if i['name'] == name: info_dict = i # dirty meta = info_dict # table name, where data located must be placed in field "status" if info_dict['status'] == 'raw_data': # if raw, it must be in raw_data table and must be defined by sensor_id sensor_id = info_dict['id'] con = pymysql.connect( host='localhost', user='******', password='******', # db='experiment', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cur = con.cursor() cur.execute("use experiment") comm_str = 'select data, time from raw_data where sensor_id = {}'.format( sensor_id) self._logger.debug("comm_str: {}".format(comm_str)) cur.execute(comm_str) res_list = cur.fetchall() # it must be array of dicts # where each dict is one line from db con.close() print("========== end of get dataset") return res_list, meta
class RegressionTestScript(object): _CREDENTIAL = './conf/credential.json' _REGRESSION_SHEET_NAME = 'GRP__RegressionTesting' _CFS_SHEET_NAME = 'GRP__Component-Feature-Service' _SINGLE_REGRESSION = "\n" _TEST_COUNT = 0 _FAILED_COUNT = 0 def __init__(self, product, service, zuul_url, zuul_route): self._PRODUCT = product self._SERVICE = service self._BASE_PATH = os.path.expanduser('~') + "/Documents/GRP" now = datetime.now() log_dir = "/var/log/grp/regression-test-automation/%s/%s/%s" % \ (self._PRODUCT, self._SERVICE, '{0:%Y-%m-%d__%H.%M.%S}'.format(now)) self.log = CustomLogger(log_dir).get_logger() self.log.info("Script starts executes at : {0:%Y-%m-%d %H:%M:%S}\n".format(now)) pcfs = service.split('/') if len(pcfs) != 4: sys.exit("Invalid service : %s\nPlease enter a valid service." % service) self._COMPONENT = pcfs[0] self._FEATURE = pcfs[1] self._VERSION = pcfs[2] self._SERVICE_NAME = pcfs[3] self._ZUUL_URL = zuul_url self._ZUUL_ROUTE = zuul_route def set_up(self): self.log.info("Setup environment variables.") self.g = pygsheets.authorize(service_file=self._CREDENTIAL) self.log.debug("\n_PRODUCT = %s\n_SERVICE = %s\n_CREDENTIAL = %s\n_REGRESSION_SHEET_NAME = %s\n" "_CFS_SHEET_NAME = %s\ng = %s\n" % (self._PRODUCT, self._SERVICE, self._CREDENTIAL, self._REGRESSION_SHEET_NAME, self._CFS_SHEET_NAME, str(self.g))) def load_regression_sheet(self): self.log.debug("Start loading data from regression sheet.") worksheet = self.g.open(self._REGRESSION_SHEET_NAME) worksheet = worksheet.worksheet('title', self._PRODUCT) worksheet = worksheet.get_all_values() self.log.debug("Worksheet data : " + str(worksheet)) data = list(filter(lambda x: self._SERVICE.strip() == x[2].strip(), worksheet)) if len(data) == 0: self.log.error("Service not found. Product : %s, Service : %s" % (self._PRODUCT, self._SERVICE)) sys.exit("Service not found") self.regression_data = data print(data) self.log.debug("Regression test data : " + str(self.regression_data)) self.log.info("Loading complete.\n") def create_base_directory(self): self.log.debug("Start creating base directory for test script.") product_dir = self._BASE_PATH + "/" + self._PRODUCT + "-service" + "/tests/regression" if not os.path.exists(product_dir): self.log.error("directory not found : %s", product_dir) sys.exit(-1) self.product_dir = product_dir service = self._SERVICE.split("/") self.directory = self._BASE_PATH + "/" + self._PRODUCT + "-service/tests/regression/" + service[0] + \ "-service/python/" + service[0] + "-" + service[1] + "-" + service[2] + "/" + service[3] + "/" self.log.info("Test script directory : %s" % self.directory) try: if not os.path.exists(self.directory): os.makedirs(self.directory) if not os.path.exists(self.directory + "data/"): os.makedirs(self.directory + "data/") if not os.path.exists(self.directory + "script/"): os.makedirs(self.directory + "script/") except Exception as e: self.log.error("Error when creating directory %s : %s", self.directory, str(e)) sys.exit(-1) self.log.debug("Directory created : \n1 : %s\n2 : %sdata\n3 : %sscript\n" % ( self.directory, self.directory, self.directory)) def get_service_request_response(self): self.log.info("Start loading request and response json.") rows = self.g.open(self._CFS_SHEET_NAME).worksheet('title', self._PRODUCT).get_all_values() component_col_no = rows[3].index("component") feature_col_no = rows[3].index("feature") version_col_no = rows[3].index("version") service_col_no = rows[3].index("service") data = list(filter(lambda x: self._COMPONENT == x[component_col_no] and self._FEATURE == x[feature_col_no] and self._VERSION == x[version_col_no] and self._SERVICE_NAME == x[service_col_no], rows)) if len(data) == 0: print("Service not defined in CFS sheet : %s" % self._SERVICE) sys.exit(-1) data = data[0] try: request_json = json.loads(data[rows[3].index("test-request-json")]) response_json = json.loads(data[rows[3].index("test-response-json")]) request_json['header']['requestId'] = None request_json['header']['requestTime'] = None request_json['header']['requestType'] = None request_json['header']['requestSource'] = "python-regression" request_json['header']['requestSourceService'] = "regression-script" self.log.debug("Request json : \n%s", json.dumps(request_json, indent=4)) self.log.debug("Response json : \n%s", json.dumps(response_json, indent=4)) with open(self.directory + "data/request.json", "w") as f: f.write(json.dumps(request_json, sort_keys=False, indent=4)) with open(self.directory + "data/response.json", "w") as f: f.write(json.dumps(response_json, sort_keys=False, indent=4)) except Exception as e: self.log.error("Error when loading request and response json : %s", str(e)) sys.exit(-1) self.log.info("Loading complete.\n") def generate_basetest(self): with open("./templates/basetest.txt", "r") as r: with open(self.directory + "script/basetest.py", "w") as w: w.write(r.read()) self.log.info("File created : basetest.py ") def generate_run_regression(self): with open("./templates/run_regression.txt", "r") as r: with open(self.directory + "run_regression.sh", "w") as w: w.write(r.read().replace("<ZUUL_URL>", self._ZUUL_URL).replace("<ZUUL_ROUTE>", self._ZUUL_ROUTE) .replace("<SERVICE>", self._SERVICE).replace("<PRODUCT>", self._PRODUCT).replace("<COMPONENT>", self._COMPONENT)) self.log.info("File created : run_regression.sh ") def generate_run_regression_single(self): with open("./templates/run_regression_single.txt", "r") as r: with open(self.directory + "run_regression_single.sh", "w") as w: w.write(r.read().replace("<ZUUL_URL>", self._ZUUL_URL).replace("<ZUUL_ROUTE>", self._ZUUL_ROUTE) .replace("<SERVICE>", self._SERVICE)) self.log.info("File created : run_regression_single.sh\n") def create_environment(self): self.log.debug("Start creating environment.") # with open("./conf/port.json", "r") as r: # ports = json.loads(r.read()) # self.port = ports[self._PRODUCT] # if not self.port: # self.log.error("Port not found for service : %s", self._SERVICE) # sys.exit(-1) # self.log.info("Service Port : " + self.port) self.create_base_directory() self.get_service_request_response() self.generate_basetest() self.generate_run_regression() self.generate_run_regression_single() self.log.debug("Environment created.\n") def register_script(self, file, attribute, type): self._SINGLE_REGRESSION += "#./%s ${URL} ${SERVICE} ${TOKEN}\n" % file def create_request_null_test_script(self, attribute): self._TEST_COUNT += 1 file = "service_test_request_null_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_null.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "null") self.log.info("File created : %s", file) def create_request_empty_test_script(self, attribute): self._TEST_COUNT += 1 file = "service_test_request_empty_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_empty.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "empty") self.log.info("File created : %s", file) def create_request_wrongtype_test_script(self, attribute, type): if type == 'str': type = "\"\"" elif type == 'double': type = "1.00" elif type == '[]' or type == 'array': type = "[]" elif type == '{}' or type == 'object': type = "{}" elif type == 'boolean': type = "True" elif type == 'int': type = "1" else: print("Type not found :", type) self._TEST_COUNT += 1 file = "service_test_request_wrongtype_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_wrongtype.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<type>", type)) self.register_script(file, attribute + " data type", "wrong") self.log.info("File created : %s", file) def request_wronglength_formater(self, attribute, length): len_dic = {"condition": "<length_condition>", "min": "1", "max": "20"} if not length: self.log.error("Pattern not define of wronglength for request attribute : %s", attribute) self._FAILED_COUNT += 1 return len_dic length = length.strip() if re.match(r'^[0-9]+$', length): min_max = [int(length) - 8, int(length) + 8] len_dic["condition"] = "len(suffix) != " + length elif re.match(r'^:[0-9]+$', length): length = length.replace(":", "") min_max = [1, int(length) + 16] len_dic["condition"] = "len(suffix) > " + length elif re.match(r'^[0-9]+:$', length): length = length.replace(":", "") min_max = [int(length) - 8, int(length) + 8] len_dic["condition"] = "len(suffix) < " + length elif re.match(r'^[0-9]+:[0-9]+$', length): v_range = length.split(":") min_max = [int(v_range[0]) - 8, int(v_range[1]) + 8] len_dic["condition"] = "len(suffix) < " + v_range[0] + " or len(suffix) > " + v_range[1] elif re.match(r'^[0-9,]+$', length): v_nums = list(map(lambda n: int(n), length.split(","))) min_max = [min(v_nums) - 8, max(v_nums) + 8] len_dic["condition"] = "len(suffix) not in " + str(v_nums) else: self.log.error("Pattern not match of wronglength for request attribute : %s", attribute) self._FAILED_COUNT += 1 return len_dic min_max[0] = min_max[0] if min_max[0] > 1 else 1 len_dic["min"] = str(min_max[0]) len_dic["max"] = str(min_max[1]) return len_dic def create_request_wronglength_test_script(self, attribute, length): len_dic = self.request_wronglength_formater(attribute, length) self._TEST_COUNT += 1 file = "service_test_request_wronglength_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_wronglength.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<min>", len_dic["min"]) .replace("<max>", len_dic["max"]).replace("<length_condition>", len_dic["condition"])) self.register_script(file, attribute + " length", "wrong") self.log.info("File created : %s", file) def create_request_malformed_test_script(self, attribute, pattern): length_value = "range(1, 30)" length = pattern.get("wronglength") if length: length = length.strip() if re.match(r'^[0-9]+$', length): length_value = "[%s]" % length elif re.match(r'^:[0-9]+$', length): length_value = "range(1, %d)" % (int(length.replace(":", "")) + 1) elif re.match(r'^[0-9]+:$', length): l = int(length.replace(":", "")) length_value = "range(%d, %d)" % (l, l + 16) elif re.match(r'^[0-9]+:[0-9]+$', length): l = length.split(":") length_value = "range(%s, %d)" % (l[0], int(l[1]) + 1) elif re.match(r'^[0-9,]+$', length): length_value = "[ %s ]" % length self._TEST_COUNT += 1 file = "service_test_request_malformed_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_malformed.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: if pattern.get("malformed") is None: self._FAILED_COUNT += 1 pattern["malformed"] = "<malformed_pattern>" self.log.error("Pattern not define of malformed for request attribute : %s", attribute) w.write(r.read().replace("<attribute>", attribute) .replace("<malformed_pattern>", pattern.get("malformed")) .replace("<length_value>", length_value)) self.register_script(file, attribute, "malformed") self.log.info("File created : %s", file) def create_request_invalid_test_script(self, attribute, pattern, data_type): self._TEST_COUNT += 1 file = "service_test_request_invalid_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() if isinstance(pattern, type([])): with open("./templates/request_invalid_str.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: if pattern is None: self._FAILED_COUNT += 1 pattern = "<values>" w.write(r.read().replace("<attribute>", attribute).replace("<values>", str(pattern))) elif pattern is not None and re.match(r'^[0-9:]+$', pattern): value = 0 if re.match(r'^[0-9]+$', pattern): value = float(pattern) - (random.random() * 10 + 1) elif re.match(r'^:[0-9]+$', pattern): value = float(pattern.replace(":", "")) + (random.random() * 10 + 1) elif re.match(r'^[0-9]+:$', pattern): value = float(pattern.replace(":", "")) - (random.random() * 10 + 1) elif re.match(r'^[0-9]+:[0-9]+$', pattern): v = pattern.split(":") value = float(v[0]) - (random.random() * 10 + 1) if data_type == 'int': value = str(round(value)) elif data_type == 'double': value = str(round(value, 2)) else: value = '"' + str(round(value)) + '"' with open("./templates/request_invalid_number.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<value>", value)) else: self._FAILED_COUNT += 1 self.log.error("Pattern not define of invalid for request attribute : %s", attribute) with open("./templates/request_invalid.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "invalid") self.log.info("File created : %s", file) def create_request_unknown_test_script(self): self._TEST_COUNT += 1 file = "service_test_request_unknown_attribute.py" with open("./templates/request_unknown.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read()) self.register_script(file, "attribute", "unknown") self.log.info("File created : %s", file) self._SINGLE_REGRESSION += "\n" def create_response_null_test_script(self, attribute): self._TEST_COUNT += 1 file = "service_test_response_null_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/response_null.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "null") self.log.info("File created : %s", file) def create_response_empty_test_script(self, attribute, is_mandatory): self._TEST_COUNT += 1 file = "service_test_response_empty_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() null_conditon = "" if not is_mandatory: null_conditon = "self._ATTR not in self.response_data['body'] or " with open("./templates/response_empty.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<null_checker>", null_conditon)) self.register_script(file, attribute, "empty") self.log.info("File created : %s", file) def create_response_mismatched_test_script(self, attribute): self._TEST_COUNT += 1 file = "service_test_response_mismatched_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/response_mismatched.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "mismatched") self.log.info("File created : %s", file) def create_response_wrongtype_test_script(self, attribute, type, is_mandatory): if type == 'str': type = "[type(\"\")]" elif type == 'double': type = "[type(1.00), type(1)]" elif type == '[]' or type == 'array': type = "[type([])]" elif type == '{}' or type == 'object': type = "[type({})]" elif type == 'boolean': type = "[type(True)]" elif type == 'int': type = "[type(1)]" else: print("Type not found :", type) type = "<type>" self._TEST_COUNT += 1 file = "service_test_response_wrongtype_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() null_conditon = "" if not is_mandatory: null_conditon = "self._ATTR not in self.response_data['body'] or " with open("./templates/response_wrongtype.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<type>", type) .replace("<null_checker>", null_conditon)) self.register_script(file, attribute + " data type", "wrong") self.log.info("File created : %s", file) def response_wronglength_formater(self, attribute, length): value = "len(self.response_data['body'][self._ATTR])" if not length: self.log.error("Pattern not define of wronglength for response attribute : %s", attribute) return "<length_condition>" length = length.strip() if re.match(r'^[0-9]+$', length): return value + " == " + length elif re.match(r'^:[0-9]+$', length): length = length.replace(":", "") return value + " <= " + length elif re.match(r'^[0-9]+:$', length): length = length.replace(":", "") return value + " >= " + length elif re.match(r'^[0-9]+:[0-9]+$', length): v_range = length.split(":") return value + " >= " + v_range[0] + " and " + value + " <= " + v_range[1] elif re.match(r'^[0-9,]+$', length): v_nums = list(map(lambda n: int(n), length.split(","))) return value + " in " + str(v_nums) else: self.log.error("Pattern not match of wronglength for response attribute : %s", attribute) return "<length_condition>" def create_response_wronglength_test_script(self, attribute, length, is_mandatory): length_condition = self.response_wronglength_formater(attribute, length) self._TEST_COUNT += 1 file = "service_test_response_wronglength_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() if not is_mandatory: length_condition = "self._ATTR not in self.response_data['body'] or %s" % length_condition with open("./templates/response_wronglength.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute) .replace("<length_condition>", length_condition)) self.register_script(file, attribute + " length", "wrong") self.log.info("File created : %s", file) def create_response_malformed_test_script(self, attribute, pattern, is_mandatory): self._TEST_COUNT += 1 file = "service_test_response_malformed_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() null_conditon = "" with open("./templates/response_malformed.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: if pattern is None: self._FAILED_COUNT += 1 pattern = "<malformed_pattern>" self.log.error("Pattern not define of malformed for response attribute : %s", attribute) if not is_mandatory: null_conditon = "self._ATTR not in self.response_data['body'] or " w.write(r.read().replace("<attribute>", attribute).replace("<malformed_pattern>", pattern) .replace("<null_checker>", null_conditon)) self.register_script(file, attribute, "malformed") self.log.info("File created : %s", file) def create_response_invalid_test_script(self, attribute, pattern, is_mandatory): self._TEST_COUNT += 1 file = "service_test_response_invalid_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() null_conditon = "" if not is_mandatory: null_conditon = "self._ATTR not in self.response_data['body'] or " if isinstance(pattern, type([])): with open("./templates/response_invalid_str.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<values>", str(pattern)) .replace("<null_condition>", null_conditon)) elif pattern is not None and re.match(r'^[0-9:]+$', pattern): value = 0 condition = "float(self.response_data[\"body\"][\"%s\"])" % attribute error_message = "" if re.match(r'^[0-9]+$', pattern): condition = condition + " != " + pattern error_message = pattern elif re.match(r'^:[0-9]+$', pattern): value = pattern.replace(":", "") condition = condition + " > " + value error_message = "greater than " + value elif re.match(r'^[0-9]+:$', pattern): value = pattern.replace(":", "") condition = condition + " < " + value error_message = "less than " + value elif re.match(r'^[0-9]+:[0-9]+$', pattern): value = pattern.split(":") condition = condition + " < " + value[0] + " or " + condition + " > " + value[1] error_message = "less than " + value[0] + " or greater than " + value[1] with open("./templates/response_invalid_number.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute) .replace("<condition>", condition) .replace("<error_message>", error_message) .replace("<null_condition>", null_conditon)) else: self.log.error("Pattern not define of invalid for response attribute : %s", attribute) self._FAILED_COUNT += 1 with open("./templates/response_invalid.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "invalid") self.log.info("File created : %s", file) def create_response_unknown_test_script(self, mandatory_attrs, optional_attrs): self._TEST_COUNT += 1 file = "service_test_response_unknown_attribute.py" with open("./templates/response_unknown.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<mandatory_attribute_list>", mandatory_attrs) .replace("<optional_attribute_list>", optional_attrs)) self.register_script(file, "attribute", "unknown") self.log.info("File created : %s", file) def create_response_missing_test_script(self, mandatory_attrs): self._TEST_COUNT += 1 file = "service_test_response_missing_attribute.py" with open("./templates/response_missing.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<mandatory_attribute_list>", mandatory_attrs)) self.register_script(file, "attribute", "missing") self.log.info("File created : %s", file) def generate_request_test_script(self, data): self.log.debug("Attribute name : %s", data[5]) attr_dic = {"attribute": data[5], "mandatory": False} pattern = {} if len(data) >= 17 and re.match(r"^\{.*\}$", data[16]): pattern = json.loads(data[16]) self.log.debug("Pattern : \n%s", json.dumps(pattern)) if "null" in data[9]: attr_dic['mandatory'] = True self.create_request_null_test_script(data[5]) if "empty" in data[8]: self.create_request_empty_test_script(data[5]) if "wrongtype" in data[7]: self.create_request_wrongtype_test_script(data[5], data[6]) if "wronglength" in data[10]: self.create_request_wronglength_test_script(data[5], pattern.get('wronglength')) if "malformed" in data[11]: self.create_request_malformed_test_script(data[5], pattern) if "invalid" in data[13]: self.create_request_invalid_test_script(data[5], pattern.get('invalid'), data[6]) self._SINGLE_REGRESSION += "\n" return attr_dic def generate_response_test_script(self, data): self.log.debug("Attribute name : %s", data[5]) attr_dic = {"attribute": data[5], "mandatory": False} pattern = {} if len(data) >= 17 and re.match(r"^\{.*\}$", data[16]): pattern = json.loads(data[16]) if "null" in data[9]: attr_dic['mandatory'] = True self.create_response_null_test_script(data[5]) if "empty" in data[8]: self.create_response_empty_test_script(data[5], attr_dic['mandatory']) if "wrongtype" in data[7]: self.create_response_wrongtype_test_script(data[5], data[6], attr_dic['mandatory']) if "wronglength" in data[10]: self.create_response_wronglength_test_script(data[5], pattern.get('wronglength'), attr_dic['mandatory']) if "malformed" in data[11]: self.create_response_malformed_test_script(data[5], pattern.get('malformed'), attr_dic['mandatory']) if "mismatch" in data[12]: self.create_response_mismatched_test_script(data[5]) if "invalid" in data[13]: self.create_response_invalid_test_script(data[5], pattern.get('invalid'), attr_dic['mandatory']) self._SINGLE_REGRESSION += "\n" return attr_dic def create_test_script(self): self.log.info("Start creating request test script") (seq(self.regression_data).filter(lambda x: "request" in x[3]) .map(lambda d: self.generate_request_test_script(d)).to_list()) self.create_request_unknown_test_script() self.log.info("\nStart creating response test script") response_attr_list = seq(self.regression_data).filter(lambda x: "response" in x[3]) \ .map(lambda d: self.generate_response_test_script(d)).to_list() mandatory_res_attrs = str(seq(response_attr_list).filter(lambda d: d['mandatory']) .map(lambda d: d['attribute']).to_list()) optional_res_attrs = str(seq(response_attr_list).filter(lambda d: not d['mandatory']) .map(lambda d: d['attribute']).to_list()) self.create_response_missing_test_script(mandatory_res_attrs) self.create_response_unknown_test_script(mandatory_res_attrs, optional_res_attrs) with open(self.directory + "run_regression_single.sh", "a") as a: a.write(self._SINGLE_REGRESSION) self.log.debug("Update run_regression_single.sh file") self.log.info("Successfully created : (%d/%d)", self._TEST_COUNT - self._FAILED_COUNT, self._TEST_COUNT) self.log.info("Only Template create : (%d/%d)", self._FAILED_COUNT, self._TEST_COUNT) def change_script_permission(self): self.log.debug("Change file mode to 775") os.system('chmod 775 ' + self.directory + 'run_regression*') os.system('chmod 775 ' + self.directory + 'script/*') def run(self): start_time = int(round(time.time() * 1000)) self.set_up() self.load_regression_sheet() self.create_environment() self.create_test_script() self.change_script_permission() self.log.info("") self.log.info("Script execution end at : {0:%Y-%m-%d %H:%M:%S}".format(datetime.now())) self.log.info('Execution Time : %d ms', int(round(time.time() * 1000)) - start_time)
class ExpSystemServer(object): """ ros wrapper for handling exp points """ def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('exp_server', log_level=rospy.DEBUG) # get roslaunch params # names for self topics self._logname = rospy.get_param('~exp_log_name', 'exp_system') self._log_node_name = rospy.get_param('~exp_log_node_name', 'exp_log_node') self._service_name = rospy.get_param('~exp_service_name', 'exp_system') self._exp_pub_name = rospy.get_param('~exp_pub_name', 'exp_pub') # mode of work # no matter which method is chosen we will parse params for all of them from .launch file self._mode = rospy.get_param('~exp_mode_name', 'table') # self._stand_type = rospy.get_param('~exp_lamp_type', 'exp') # self._height_of_lamps = rospy.get_param('~exp_lamp_h', 25) self._search_table = rospy.get_param('exp_search_table') self._exp_description = rospy.get_param('mysql_data_saver_experiment_description') self._exp_id = self._exp_description["experiment_number"] self._db_params=rospy.get_param('mysql_db_params') self._db_name = "experiment" + str(self._exp_id) # self._exp_config_path = rospy.get_param('~exp_config_path', 'test.xml') # todo add here parsing params of gradient search and other smart methods # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.info("exp_server init") # create exp_db if it is not exists now self._create_exp_db() # create search method handler object # it must not contain any ros api, because i hate ros if self._mode == 'table': self._search_handler = TableSearchHandler( self._search_table, self._db_params, self._exp_description, self._logger) elif self._mode == 'ordered_table': self._search_handler = OrderedTableSearchHandler( self._search_table, self._db_params, self._exp_description, self._logger) # todo add other search modes # now we have to calculate current search point # self._search_handler will calculate and store it # until we get reqv from control system self._search_handler.calculate_next_point() # service self._service = rospy.Service(self._service_name, ExpSystem, self._handle_request) self._loop() def _loop(self): rospy.spin() def _handle_request(self, req): # self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") # self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("we got request: {}".format(req)) if not req.command: # if we got empty string resp = self._error_response + 'empty_command' return resp elif req.command == 'set_point_data': # TODO fix try: self._set_point_data(req.point_id, req.start_time, req.end_time) resp = ExpSystemResponse() resp.response = self._success_response self._logger.info("we got set_point_data reqv. data from control:") start_time_ = datetime.datetime.fromtimestamp( req.start_time.to_sec()).strftime('%Y_%m_%d %H:%M:%S') end_time_ = datetime.datetime.fromtimestamp( req.end_time.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("t_start={} t_stop={}".format( start_time_, end_time_ )) return resp except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) resp = ExpSystemResponse() resp.response = self._error_response + e.args[0] return resp elif req.command == 'get_current_point': # request from control_node to get current point_id try: mode, p_id, red, white, led_delay = self._get_current_point() resp = ExpSystemResponse() resp.response = mode resp.point_id = p_id resp.red = red resp.white = white resp.led_delay = led_delay self._logger.info("we got get_current_point reqv from control:mode={} p_id={} red={} white={} delay={}".format( mode, p_id, red, white, led_delay )) return resp except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) resp = ExpSystemResponse() resp.response = self._error_response + e.args[0] # resp = self._error_response + e.args[0] return resp else: resp = ExpSystemResponse() resp.response = self._error_response + 'unknown command' return resp # def _get_last_search_point_from_db(self): # """ # sends request to mariadb to get last row in exp_data table , corresponds to exp_id # WE NEED ALL SEARCH POINTS FROM THIS EXP because they were used randomly # and we dont know how exactly # """ # pass # def _load_done_points_from_db(self): # # search method must do it by itself, because it depends very much # # anyway use : to find points calculated today # # SELECT * FROM raw_data where dayofmonth(time)=dayofmonth(now()); # # # # pass # def _get_point_from_db_by_id(self, point_id): # pass def _set_point_data(self, p_id, start_time, stop_time): # this command handles incoming msg with data about experiment # we want to get from this msg # uint32 point_id # time start_time # time end_time # and put that to correspond row in mysql db # # NOTE we have to use update statement # https://oracleplsql.ru/update-mariadb.html # update raw_data set data_id=5754714, exp_id=200, time='2020-10-19 23:23:06', # data=500 where data_id=5754714; # self._logger.debug("type(start_time): {}".format(type(start_time))) self._search_handler.update_point_data(p_id, start_time, stop_time) def _get_current_point(self): return self._search_handler.calculate_next_point() # return (random.randint(0,100), 120, 55) def _create_exp_db(self): con = pymysql.connect(host=self._db_params["host"], user=self._db_params["user"], password=self._db_params["password"], # db='experiment', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cur = con.cursor() cur.execute("use {}".format(self._db_name)) cur.execute('create table if not exists exp_data' '(point_id bigint unsigned primary key not null auto_increment,' 'step_id int unsigned,' # id of search step whom this point belongs 'exp_id SMALLINT unsigned,' 'red int,' 'white int,' 'start_time timestamp,' # start time of dataset 'end_time timestamp,' # end time of dataset 'number_of_data_points int unsigned,' # num of points in set 'point_of_calc double,' # point X where we calculate value of derivative 'f_val double,' # value of derivative dCO2/dt(X) on this dataset 'q_val double,' 'is_finished SMALLINT unsigned ' # flag to mark if point was finished correctly and how ')' ) # finished SMALLINT unsigned' # 0 - finished correctly # 1 - not finished # cur.execute('describe exp_data') self._logger.debug(cur.fetchall()) cur.execute('commit') self._logger.debug(cur.fetchall())
class WatchdogServer(object): def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('watchdog_server', anonymous=True, log_level=rospy.DEBUG) # get roslaunch params # names for self topics self._logname = rospy.get_param('~watchdog_log_name', 'data_saver') self._log_node_name = rospy.get_param('~watchdog_log_node_name', 'watchdog') self._service_name = rospy.get_param('~watchdog_service_name', 'watchdog') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # start logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("data_saver server init") self.dead_flag = False # service self._service = rospy.Service(self._service_name, Watchdog, self._handle_request) self._logger.debug("end of init, go to loop") self._loop() def _loop(self): try: while not rospy.core.is_shutdown() and not self.dead_flag: rospy.sleep(1) except KeyboardInterrupt: self._logger.warning("keyboard interrupt, shutting down") rospy.core.signal_shutdown('keyboard interrupt') def _handle_request(self, req): self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("we got request: {}".format(req)) if not req.command: # if we got empty string resp = self._error_response + 'empty_command' return resp elif req.command == 'shutdown': self.dead_flag = True resp = self._success_response + "self.dead_flag is {}".format( self.dead_flag) return resp # elif req.command == 'free_lock': # self._data_write_lock.release() # resp = self._success_response + "the lock has been released" # # lock will be acqured forever if we dont call free_lock command # return resp else: resp = self._error_response + 'unknown command' return resp
class SBA5DeviceServer(object): """ class to receive messages from other ros nodes, handle it send messages to PP-systems SBA-5 IRGA and parse its output back as response """ def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('sba5_device_server', log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._calibration_time = int(rospy.get_param('~sba5_calibration_time')) self._logname = rospy.get_param('~sba5_log_name', 'sba5') self._log_node_name = rospy.get_param('~sba5_log_node_name', 'sba5_log_node') self._port = rospy.get_param('~sba5_port', '/dev/serial/by-id/usb-FTDI_FT230X_Basic_UART_DN03WQZS-if00-port0') self._baudrate = int(rospy.get_param('~sba5_baudrate', 19200)) self._timeout = float(rospy.get_param('~sba5_timeout', 0.2)) self._service_name = rospy.get_param('~sba5_service_name', 'sba5_device') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("sba5_device_server init") # self._logger.info("sba5_device_server cal time is {}, type is {}".format( # self._calibration_time, type(self._calibration_time) # )) # service self._service = rospy.Service(self._service_name, SBA5Device, self.handle_request) self._loop() def _loop(self): #self._logger.debug("led_device_server ready to serve") rospy.spin() def handle_request(self, req): self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("we got request: {}".format(req)) if not req.command: # if we got empty string resp = self._error_response + 'empty_command' return resp elif req.command == 'get_info': try: ans = self.get_info() resp = self._success_response + ans return resp except SBA5DeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'measure_full': try: ans = self.do_measurement() resp = self._success_response + ans return resp except SBA5DeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'measure_co2': try: ans = self.do_measurement() self._logger.debug("raw result from measure {}".format(ans)) pattern = re.compile(r'M \d+ \d+ (\d+.\d+) \d+.\d+ \d+.\d+ \d+.\d+ \d+ \d+\r\n') res = pattern.findall(ans) self._logger.debug("we have found this {}".format(res[0])) resp = self._success_response + str(res[0]) return resp except Exception as e: # todo fix, find root of problem # [ERROR][1598038134.061680]: Error processing request: list index out of range # ['Traceback (most recent call last):\n', # ' File "/opt/ros/melodic/lib/python2.7/dist-packages/rospy/impl/tcpros_service.py", line 632, in _handle_request\n response = convert_return_to_response(self.handler(request), self.response_class)\n', # ' File "/opt/ros/melodic/lib/ros_farmer_pc/sba5_device.py", line 307, in handle_request\n self._logger.debug("we have found this {}".format(res[0]))\n', # 'IndexError: list index out of range\n'] # [ERROR][1598038134.067210]: 1598038134.07; control_system; error #Service call failed: service[ / sba5_device] responded # with an error: error processing request: list index out of range resp = self._error_response + e.args[0] return resp elif req.command == 'recalibrate': try: ans = self.do_calibration() resp = self._success_response + ans return resp except SBA5DeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'init': try: ans = self.set_initial_params() resp = self._success_response + ans return resp except SBA5DeviceException as e: resp = self._error_response + e.args[0] return resp else: resp = self._error_response + 'unknown command' return resp pass # =========================== low level commands =================================== def send_command(self, command): """ Command must ends with \r\n ! Its important :param command: :return: """ """ To initiate a command, the USB port or RS232 port sends an ASCII character or string. A single character command is acted on immediately when the character is received. A string command is acted on after the command string terminator <CR> is received. The command can be sent with or without a checksum. If a checksum is sent, a “C” follows the checksum value. For example, Device sends command without checksum: S,11,1<CR> Device sends command with checksum: S,11,1,043C<CR> On successfully receiving a command string, the SBA5+ sends an acknowledgement by echoing back to all the ports the Command String and “OK”, each terminated with a <CR> and<linefeed>. """ # \r\n try: ser = serial.Serial( port=self._port, baudrate=self._baudrate, timeout=self._timeout ) bcom = command.encode('utf-8') ser.write(bcom) except Exception as e: raise SBA5DeviceException("SBAWrapper error while send command: {}".format(e)) # self._logger.error("SBAWrapper error while send command: {}".format(e)) # then try to read answer # it must be two messages, ended with \r\n try: ser = serial.Serial( port=self._port, baudrate=self._baudrate, timeout=self._timeout ) echo = (ser.readline()).decode('utf-8') status = (ser.readline()).decode('utf-8') return echo+status #return status except Exception as e: raise SBA5DeviceException("SBAWrapper error while read answer from command: {}".format(e)) # self._logger.error("SBAWrapper error while read answer from command: {}".format(e)) def set_initial_params(self): # TODO: mb we need protection from calling that method twice? # async part of init # we have to send some commands before start regular work of unit self._logger.debug("Second part of CO2Sensor init") # ans = await self.sensor.send_command("?\r\n") # self.logger.info("Command ?, answer: {}".format(ans)) # if not "OK" in ans: # self.logger.info("CO2SensorError: {}".format(ans)) # return "CO2SensorError: {}".format(ans) ans_ = "" # we need to shut down auto measurements ans = self.send_command("!\r\n") ans_ += ans self._logger.debug("Command !, answer: {}".format(ans)[:-1]) # we need to shut down auto zero operations ans += self.send_command("A0\r\n") ans_ += ans self._logger.debug("Command A0, answer: {}".format(ans)[:-1]) # we need to set format of output ans += self.send_command("F252\r\n") ans_ += ans self._logger.debug("Command F252, answer: {}".format(ans)[:-1]) # we need to start pump ans += self.send_command("P1\r\n") ans_ += ans self._logger.debug("Command P1, answer: {}".format(ans)[:-1]) # set time of calibration if self._calibration_time == 90: command = "EL\r\n" elif self._calibration_time == 40: command = "EM\r\n" elif self._calibration_time == 21: command = "ES\r\n" else: self._logger.error("wrong initial calibration time, {}".format(self._calibration_time)) self._logger.error("default time will be 90 sec") command = "EL\r\n" ans += self.send_command(command) ans_ += ans self._logger.debug("Command calibraton, answer: {}".format(ans)[:-1]) return ans_ def get_info(self): # only first line of answer ans = self.send_command("?\r\n") self._logger.debug("Getting info from SBA5") return ans[:-1] def do_calibration(self): ans = self.send_command("Z\r\n") self._logger.debug("Starting calibration of SBA5") return ans def do_measurement(self): ans = self.send_command("M\r\n") self._logger.debug("Do measure SBA5") self._logger.debug(("SBA5 result is {}".format(ans))[:-1]) #its try to remove last \n from here return ans def do_command(self, com): ans = self.send_command(com) self._logger.info("send {} command to SBA5".format(com)[:-1]) return ans
class ScalesDeviceServer(object): """ """ def __init__(self): # start node rospy.init_node('scales_device_server', log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._logname = rospy.get_param('scales_log_name', 'scales') self._lost_data_marker = rospy.get_param('scales_lost_data_marker', -65536) self._log_node_name = rospy.get_param('scales_log_node_name', 'scales_log_node') self._data_pub_name = rospy.get_param('scales_data_pub_name', 'scales_data_pub') self._port = rospy.get_param( 'scales_port', '/dev/serial/by-id/usb-USB_Vir_USB_Virtual_COM-if00') self._baudrate = rospy.get_param('scales_baudrate', 9600) self._timeout = rospy.get_param('scales_timeout', 1) self._measure_interval = rospy.get_param('scales_measure_interval', 5) # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) self._data_pub = rospy.Publisher(self._data_pub_name, Temperature, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("scales_device_server init") # print("we here init") # and go to infinite loop # self._service = rospy.Service(self._service_name, SBA5Device, self.handle_request) self._loop() def _loop(self): # rospy.sleep(1) while not rospy.is_shutdown(): res = self.get_mean_data(self._measure_interval) msg = Temperature() msg.temperature = res msg.header.stamp.secs = rospy.get_time() self._data_pub.publish(msg) # print("we alive") def get_mean_data(self, dtime): w_datas = [] t_start = time.time() with serial.Serial(self._port, self._baudrate, timeout=self._timeout) as scales: while time.time() - t_start < dtime: try: raw_data = scales.readline() pattern = re.compile( r'\w\w,\w\w,\s*(\d+.\d+)\s*\w' ) # for answers like "ST, GS, 55.210 g" w_data = float(pattern.findall(raw_data)[0]) w_datas.append(w_data) res = numpy.mean(w_datas) except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error( "scales serial error: {}".format(err_list)) res = self._lost_data_marker return res def get_data(self): with serial.Serial(self._port, self._baudrate, timeout=self._timeout) as scales: try: raw_data = scales.readline() pattern = re.compile(r'\w\w, \w\w, (\d+.\d+) \w' ) # for answers like "ST, GS, 55.210 g" w_data = float(pattern.findall(raw_data)[0]) return w_data except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("scales serial error: {}".format(err_list)) return self._lost_data_marker
class LedDeviceServer(object): """ class to receive messages from other ros nodes, handle it send messages to led driver and parse its output back as response """ def __init__(self): # hardcoded constants self._success_response = "success" self._error_response = "error: " # start node rospy.init_node('led_device_server', anonymous=True, log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._logname = rospy.get_param('~led_log_name', 'LED') self._log_node_name = rospy.get_param('~led_log_node_name', 'led_log_node') self._port = rospy.get_param('~led_port', '/dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_UART_Bridge_Controller_0001-if00-port0') self._baudrate = rospy.get_param('~led_baudrate', 19200) self._timeout = rospy.get_param('~led_timeout', 10) self._service_name = rospy.get_param('~led_service_name') self._max_red_current = rospy.get_param('~led_max_red_current', 250) self._min_red_current = rospy.get_param('~led_min_red_current', 10) self._max_white_current = rospy.get_param('~led_max_white_current', 250) self._min_white_current = rospy.get_param('~led_min_white_current', 10) # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("led_device_server init") # service self._service = rospy.Service(self._service_name, LedDevice, self.handle_request) self._loop() def _loop(self): #self._logger.debug("led_device_server ready to serve") rospy.spin() def handle_request(self, req): # check params from request # at first find what user wants from us: self._logger.info("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.info("we got request: {}".format(req)) if not req.command: # if we got empty string resp = self._error_response + 'empty_command' return resp elif req.command == 'start': try: self._start() resp = self._success_response return resp except LedDeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'stop': try: self._stop() resp = self._success_response return resp except LedDeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'start_configure': try: self._start_configure() resp = self._success_response return resp except LedDeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'finish_configure_and_save': try: self._finish_configure_with_saving() resp = self._success_response return resp except LedDeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'set_current': if req.red < self._min_red_current or req.red > self._max_red_current: resp = self._error_response + 'too big red current' return resp elif req.white < self._min_white_current or req.white > self._max_white_current: resp = self._error_response + 'too big white current' return resp else: try: # red self._set_current(0, int(req.red)) # white self._set_current(1, int(req.white)) resp = self._success_response return resp except LedDeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'full_reconfigure': if req.red < self._min_red_current or req.red > self._max_red_current: resp = self._error_response + 'too big red current' return resp elif req.white < self._min_white_current or req.white > self._max_white_current: resp = self._error_response + 'too big white current' return resp else: try: self._stop() self._start_configure() # red self._set_current(0, int(req.red)) # white self._set_current(1, int(req.white)) self._finish_configure_with_saving() self._start() resp = self._success_response return resp except LedDeviceException as e: resp = self._error_response + e.args[0] return resp else: resp = self._error_response + 'unknown command' return resp # ==================== low level command wrappers ================== def send_command(self, com, log_comment=None ): ans = None self._logger.debug("-------------------------------") if (log_comment): self._logger.info("Sending {}".format(log_comment)) else: self._logger.debug("We want to send this:") self._logger.debug(self.parse_command(com)) try: ser = serial.Serial(port=self._port, baudrate=self._baudrate, timeout=self._timeout) ser.write(com) except Exception as e: self._logger.error("Error happened while write: {}".format(e)) self._logger.debug("-------------------------------") raise LedDeviceException("Error happened while write: {}".format(e)) try: ans = ser.read(len(com)) # returns str in python2, oooooff self._logger.debug("We have read {} bytes".format(len(ans))) except Exception as e: self._logger.error("Error happened while read: {}".format(e)) self._logger.debug("-------------------------------") raise LedDeviceException("Error happened while read: {}".format(e)) if (not ans or (len(ans) != len(com))): self._logger.error("Broken answer from GIC: {}".format(ans)) self._logger.debug("-------------------------------") raise LedDeviceException("Broken answer from GIC: {}".format(ans)) else: self._logger.info("Succesfully got answer from GIC:") # lets try to decode to int byte_ans = bytearray() for b_ in ans: b_decoded = ord(b_) # important when encode ser.read() output back to int self._logger.debug("Decoded answer byte: {}".format(hex(b_decoded))) byte_ans.extend([b_decoded]) self._logger.debug(self.parse_command(byte_ans)) return byte_ans def crc16_ccitt(self, data_, crc=0xffff): """Calculate the crc16 ccitt checksum of some data A starting crc value may be specified if desired. The input data is expected to be a sequence of bytes (string) and the output is an integer in the range (0, 0xFFFF). No packing is done to the resultant crc value. To check the value a checksum, just pass in the data byes and checksum value. If the data matches the checksum, then the resultant checksum from this function should be 0. """ tab = CRC16_CCITT_TAB # minor optimization (now in locals) # for byte in six.iterbytes(data_): for byte in data_: self._logger.debug("current byte is {}".format(hex(byte))) crc = (((crc << 8) & 0xff00) ^ tab[((crc >> 8) & 0xff) ^ byte]) self._logger.debug("current crc is {}".format(hex(byte))) self._logger.debug("final crc is {}".format(hex(crc & 0xffff))) return crc & 0xffff def parse_command(self, com): # parse content of command data_length = com[2] length = len(com) parsed_output = "" self._logger.debug("-------------------------------") self._logger.debug("Parsed command ") for b_ in com: pass # self._logger.debug("{} - type of raw byte".format(type(b_))) # self._logger.debug("{} - type of raw byte, and hex value of it {} ".format(type(b_), hex(b_))) # self._logger.debug("{} - raw byte ".format(hex(b_))) self._logger.debug("------------------") self._logger.debug("{} - header byte ".format(hex(com[0]))) self._logger.debug("{} - destination byte".format(hex(com[1]))) self._logger.debug("{} - length of command".format(hex(com[2]))) self._logger.debug("{} - type of command".format(hex(com[3]))) if data_length > 1: # parse content of command for i in range(4, 4 + data_length - 1): self._logger.debug("{} - data byte".format(hex(com[i]))) else: pass self._logger.debug("{} - last byte of CRC16 ccitt control sum".format(hex(com[length - 2]))) self._logger.debug("{} - first byte of CRC16 ccitt control sum".format(hex(com[length - 1]))) self._logger.debug("-------------------------------") return parsed_output def simple_command(self, ACK=0x00, NACK=0x80, ctype=0x00, data=None, name=None ): # data is list of ints or None # there is a simple command template command = self.create_command(ctype=ctype, data=data) ans = self.send_command(command, log_comment=name) if ans: answer = bytearray(ans) if ACK in answer: self._logger.debug("There is ACK flag {} in answer ".format(hex(ACK))) self._logger.debug("-------------------------------") return answer if NACK in answer: self._logger.debug("There is NACK flag {} in answer ".format(hex(NACK))) self._logger.debug("Something went wrong in GIC") self._logger.debug("-------------------------------") raise LedDeviceException("There is NACK flag {} in answer ".format(hex(NACK))) else: self._logger.debug("Something went wrong, we got no answer") self._logger.debug("-------------------------------") raise LedDeviceException("Something went wrong") def create_command(self, preamble=0x55, direction=0xCC, length=None, ctype=0x01, data=None ): command = bytearray() # print("preamble ", preamble) command.extend([preamble]) # print("direction ", direction) command.extend([direction]) if (not length): # length of command is length of data + 1 byte of command_type if (data): length = len(bytearray(data)) + 1 # split and add to length # length = int.to_bytes(lenn, 1, byteorder = 'big') command.extend([length]) else: length = 0x01 command.extend([length]) else: command.extend([length]) # print("length ", length) # print("ctype ", ctype) command.extend([ctype]) if (data): # data must be list or none command.extend(data) # print("data ", data) # crc should be calculated only for LENGTH | TYPE | DATA fields payload = bytearray() payload.extend([length]) payload.extend([ctype]) if (data): payload.extend(data) crc_raw = self.crc16_ccitt(payload) # returns int self._logger.debug("{} - crc raw ".format(hex(crc_raw))) # crc_bytes = crc_raw.to_bytes(2, byteorder='little') # byteorder='little' first_byte = ((crc_raw & 0xff00) >> 8) last_byte = (crc_raw & 0x00ff) self._logger.debug("{} - first crc byte".format(hex(first_byte))) self._logger.debug("{} - last crc byte".format(hex(last_byte))) # then reorder crc_bytes = bytearray([last_byte, first_byte]) # its important # print("crc_bytes ", crc_bytes) command.extend(crc_bytes) return command # ======================= real commands =========================== def _start(self): # START = bytearray(b'\x55\xCC\x01\x02\x7C\x0E') return self.simple_command( ACK=0x02, NACK=0x82, ctype=0x02, data=None, name="START" ) def _stop(self): # STOP = bytearray(b'\x55\xCC\x01\x03\x5D\x1E') return self.simple_command( ACK=0x03, NACK=0x83, ctype=0x03, data=None, name="STOP" ) def _start_configure(self): # START_CONFIGURE = bytearray(b'\x55\xCC\x01\x05\x9B\x7E') return self.simple_command( ACK=0x05, NACK=0x85, ctype=0x05, data=None, name="START_CONFIGURE" ) def _set_current(self, channel=0, value=100): # SET_CURRENT = bytearray(b'\x55\xCC\x04\x0B\x01\xE8\x03\x00\x00') # SET_CURRENT_200_1 = bytearray(b'\x55\xCC\x04\x0B\x01\xC8\x00\xD8\x2E') # SET_CURRENT_200_0 = bytearray(b'\x55\xCC\x04\x0B\x00\xC8\x00\xE8\x19') # SET_CURRENT_50_1 = bytearray(b'\x55\xCC\x04\x0B\x01\x32\x00\xD2\xD2') data = bytearray() data.extend([channel]) # split value bytes and add in little-endian to data # # first_byte = ((crc_raw & 0xff00) >> 8) # last_byte = (crc_raw & 0x00ff) # self._logger.debug("current byte is{}".format(hex(byte))) self._logger.debug("value raw {}".format(hex(value))) # crc_bytes = crc_raw.to_bytes(2, byteorder='little') # byteorder='little' value_first_byte = ((value & 0xff00) >> 8) self._logger.debug("value first byte {}".format(hex(value_first_byte))) # , value_first_byte) value_last_byte = (value & 0x00ff) self._logger.debug("value last byte {}".format(hex(value_last_byte))) # , value_last_byte) # data.extend(int.to_bytes(channel, 1, byteorder='big')) # data.extend(int.to_bytes(value, 2, byteorder='little')) data.extend([value_last_byte, value_first_byte]) self._logger.debug("final data array {}".format(data)) return self.simple_command( ACK=0x0B, NACK=0x8B, ctype=0x0B, data=data, name="SET_CURRENT_{}_{}".format(channel, value) ) def _finish_configure_with_saving(self): # FINISH_CONFIGURE_WITH_SAVING = bytearray(b'\x55\xCC\x01\x07\xD9\x5E') return self.simple_command( ACK=0x07, NACK=0x87, ctype=0x07, data=None, name="FINISH_CONFIGURE_WITH_SAVING" ) def _exit_without_saving(self): # EXIT_WITHOUT_SAVING = bytearray(b'"\x55\xCC\x01\x06\xF8\x4E') return self.simple_command( ACK=0x06, NACK=0x86, ctype=0x06, data=None, name="EXIT_WITHOUT_SAVING" )
class MYSQLDataSaver(object): def generate_experiment_id(self, date_=None, type_=None, number_=None): """ date - str type - str number - str """ if not date_: date_ = datetime.datetime.now().strftime('%Y_%m_%d') if not type_: type_ = "TEST" if not number_: number_ = "0000" # experiment id is string like that YYYY_MM_DD_TYPE_NUMB # where YYYY_MM_DD is data of start # TYPE is short type of experiment (like TEST, TABLE, SEARCH etc) # NUMB is uniq number of experiment id_str = date_ + type_ + number_ return id_str def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('mysql_data_saver_server', log_level=rospy.INFO) # get roslaunch params self._description = rospy.get_param('mysql_data_saver_experiment_description') # do not add ~ !! self._db_name = "experiment" + str(self._description["experiment_number"]) # names for self topics self._logname = rospy.get_param('~mysql_data_saver_log_name', 'mysql_data_saver') self._lost_data_marker = rospy.get_param('~mysql_data_saver_lost_data_marker', -65536) self._log_node_name = rospy.get_param('~mysql_data_saver_log_node_name', 'mysql_data_saver_log') # self._service_name = rospy.get_param('~mysql_data_saver_service_name', 'mysql_data_saver') self._db_params = rospy.get_param('mysql_db_params') self._system_log_sub = rospy.Subscriber( name='/rosout_agg', data_class=Log, callback=self._log_callback, queue_size=50) # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # start logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.info("mysql_data_saver_server init") default_id = self.generate_experiment_id() # self._experiment_id = rospy.get_param('~mysql_data_saver_experiment_id', default_id) # experiment id is string like that YYYY_MM_DD_TYPE_NUMB # where YYYY_MM_DD is data of start # TYPE is short type of experiment (like TEST, TABLE, SEARCH etc) # NUMB is uniq number of experiment # TODO get params of sql database # TODO really get self.con = pymysql.connect(host=self._db_params["host"], user=self._db_params["user"], password=self._db_params["password"], # db='experiment', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) self.con.close() # check if db available self._raw_topics = rospy.get_param('mysql_data_saver_raw_topics') # self._exp_topics = rospy.get_param('~mysql_data_saver_exp_topics') # list to keep subscribers (for what?) self._subscribers_list = list() # check if tables in db exists, or update it self._create_database() # TODO insert into sensors table all sensors from .launch if not exists # TODO and insert into experiment table current experiment if not exists # sub to all topics from parsed string self._logger.debug("creating raw subs") for topic in self._raw_topics: # for now we think that all raw_data_topics have type "sensor_msgs/Temperature" # it must be in architecture of all system ! # so here we think that it is "sensor_msgs/Temperature" as default s = rospy.Subscriber(name=topic['name'], data_class=Temperature, callback=self._raw_data_callback, callback_args=topic, queue_size=2) print(s) self._subscribers_list.append(s) self._logger.debug("end of init, go to loop") self._loop() def _loop(self): rospy.spin() def _log_callback(self, log_msg): exp_id_ = self._description["experiment_number"] time_ = datetime.datetime.fromtimestamp( log_msg.header.stamp.to_sec()).strftime('%Y_%m_%d %H:%M:%S') level_ = log_msg.level # # Pseudo-constants # DEBUG = 1 # INFO = 2 # WARN = 4 # ERROR = 8 # FATAL = 16 node_ = log_msg.name msg_ = log_msg.msg if level_ == 1: log_table_name = "debug_logs" if level_ == 2: log_table_name = "info_logs" if level_ == 4: log_table_name = "warn_logs" if level_ == 8: log_table_name = "error_logs" if level_ == 16: log_table_name = "fatal_logs" con = pymysql.connect(host=self._db_params["host"], user=self._db_params["user"], password=self._db_params["password"], # db='experiment', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cur = con.cursor() cur.execute("use {}".format(self._db_name)) comm_str = 'insert into {}' \ '(exp_id, time, level, node, msg)' \ 'values("{}", "{}","{}", "{}", "{}")'.format(log_table_name, exp_id_, time_, level_, node_, msg_) # print("comm_str: {}".format(comm_str)) # TODO: remove after debug try: cur.execute(comm_str) except Exception as e: print("Error while saving logs from :") print(e) print(log_msg.msg) cur.execute('commit') con.close() def _raw_data_callback(self, data_message, topic_info): # topic_info must be dict and contain 'name', 'id', 'type', 'dtype', 'units', 'status': # example: # { name: '/bmp180_1_temp_pub', id: '2', type: 'temperature', dtype: 'float64', units: 'C', status:'raw }, # example how to convert rospy time to datetime # (datetime.datetime.fromtimestamp(rospy.Time.now().to_sec())).strftime('%Y_%m_%d,%H:%M:%S') data_ = data_message.temperature self._logger.debug("data: {}".format(data_)) if data_ != self._lost_data_marker: time_ = datetime.datetime.fromtimestamp( data_message.header.stamp.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.debug("time: {}".format(time_)) sensor_ = topic_info["id"] self._logger.debug("sensor: {}".format(sensor_)) exp_id_ = self._description["experiment_number"] self._logger.debug("exp_id: {}".format(exp_id_)) con = pymysql.connect(host=self._db_params["host"], user=self._db_params["user"], password=self._db_params["password"], # db='experiment', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cur = con.cursor() cur.execute("use {}".format(self._db_name)) comm_str = 'insert into raw_data'\ '(exp_id, time, sensor_id, data)'\ 'values("{}", "{}","{}", "{}" )'.format( exp_id_, time_, sensor_, data_) self._logger.debug("comm_str: {}".format(comm_str)) cur.execute(comm_str) cur.execute('commit') con.close() else: self._logger.debug("we got lost data marker from " "node and we wont save it to db") def _create_database(self): self._logger.debug("create database") con = pymysql.connect(host=self._db_params["host"], user=self._db_params["user"], password=self._db_params["password"], # db='experiment', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cur = con.cursor() cur.execute("CREATE DATABASE IF NOT EXISTS {}".format(self._db_name)) cur.execute("use {}".format(self._db_name)) # TODO load params from .launch and put to the database # create all log tables for different types of error status for log_table_name in ["fatal_logs", "error_logs", "warn_logs", "info_logs", "debug_logs"]: self._logger.info("create tables for all logs") cur.execute('create table if not exists {}' ' ( log_id bigint unsigned primary key not null auto_increment,' ' exp_id SMALLINT unsigned,' ' time timestamp,' ' level TINYINT,' ' node varchar(100),' ' msg varchar(2000) )'.format(log_table_name) ) cur.execute('describe {}'.format(log_table_name)) print(cur.fetchall()) self._logger.info("create table raw_data") cur.execute('create table if not exists raw_data' ' (data_id bigint unsigned primary key not null auto_increment,' ' exp_id SMALLINT unsigned,' ' time timestamp,' ' sensor_id SMALLINT unsigned,' ' data double)' ) cur.execute('describe raw_data') print(cur.fetchall()) self._logger.info("create table sensors") cur.execute('create table if not exists sensors' '(sensor_id SMALLINT unsigned primary key not null,' 'name varchar(100),' 'type varchar(100),' 'units varchar(100),' 'prec double,' 'description varchar(1000),' 'status varchar(100)' ')') # put default data from launch file to sensors table self._logger.info("creating sensors table") for topic in self._raw_topics: # TODO add data from dict to table pass cur.execute('describe sensors') print(cur.fetchall()) self._logger.info("create table experiments") cur.execute('create table if not exists experiments' '( exp_id SMALLINT unsigned primary key not null,' 'start_date timestamp,' 'end_date timestamp,' 'params varchar(1000)' ')' ) cur.execute('describe experiments') print(cur.fetchall()) cur.execute('commit') con.close()