def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('exp_server', log_level=rospy.DEBUG) # get roslaunch params # names for self topics self._logname = rospy.get_param('~exp_log_name', 'exp_system') self._log_node_name = rospy.get_param('~exp_log_node_name', 'exp_log_node') self._service_name = rospy.get_param('~exp_service_name', 'exp_system') self._exp_pub_name = rospy.get_param('~exp_pub_name', 'exp_pub') # mode of work # no matter which method is chosen we will parse params for all of them from .launch file self._mode = rospy.get_param('~exp_mode_name', 'table') # self._stand_type = rospy.get_param('~exp_lamp_type', 'exp') # self._height_of_lamps = rospy.get_param('~exp_lamp_h', 25) self._search_table = rospy.get_param('exp_search_table') self._exp_description = rospy.get_param('mysql_data_saver_experiment_description') self._exp_id = self._exp_description["experiment_number"] self._db_params=rospy.get_param('mysql_db_params') self._db_name = "experiment" + str(self._exp_id) # self._exp_config_path = rospy.get_param('~exp_config_path', 'test.xml') # todo add here parsing params of gradient search and other smart methods # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.info("exp_server init") # create exp_db if it is not exists now self._create_exp_db() # create search method handler object # it must not contain any ros api, because i hate ros if self._mode == 'table': self._search_handler = TableSearchHandler( self._search_table, self._db_params, self._exp_description, self._logger) elif self._mode == 'ordered_table': self._search_handler = OrderedTableSearchHandler( self._search_table, self._db_params, self._exp_description, self._logger) # todo add other search modes # now we have to calculate current search point # self._search_handler will calculate and store it # until we get reqv from control system self._search_handler.calculate_next_point() # service self._service = rospy.Service(self._service_name, ExpSystem, self._handle_request) self._loop()
def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('sba5_device_server', log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._calibration_time = int(rospy.get_param('~sba5_calibration_time')) self._logname = rospy.get_param('~sba5_log_name', 'sba5') self._log_node_name = rospy.get_param('~sba5_log_node_name', 'sba5_log_node') self._port = rospy.get_param('~sba5_port', '/dev/serial/by-id/usb-FTDI_FT230X_Basic_UART_DN03WQZS-if00-port0') self._baudrate = int(rospy.get_param('~sba5_baudrate', 19200)) self._timeout = float(rospy.get_param('~sba5_timeout', 0.2)) self._service_name = rospy.get_param('~sba5_service_name', 'sba5_device') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("sba5_device_server init") # self._logger.info("sba5_device_server cal time is {}, type is {}".format( # self._calibration_time, type(self._calibration_time) # )) # service self._service = rospy.Service(self._service_name, SBA5Device, self.handle_request) self._loop()
def __init__(self): # hardcoded constants self._success_response = "success" self._error_response = "error: " # start node rospy.init_node('led_device_server', anonymous=True, log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._logname = rospy.get_param('~led_log_name', 'LED') self._log_node_name = rospy.get_param('~led_log_node_name', 'led_log_node') self._port = rospy.get_param('~led_port', '/dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_UART_Bridge_Controller_0001-if00-port0') self._baudrate = rospy.get_param('~led_baudrate', 19200) self._timeout = rospy.get_param('~led_timeout', 10) self._service_name = rospy.get_param('~led_service_name') self._max_red_current = rospy.get_param('~led_max_red_current', 250) self._min_red_current = rospy.get_param('~led_min_red_current', 10) self._max_white_current = rospy.get_param('~led_max_white_current', 250) self._min_white_current = rospy.get_param('~led_min_white_current', 10) # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("led_device_server init") # service self._service = rospy.Service(self._service_name, LedDevice, self.handle_request) self._loop()
def __init__(self): # start node rospy.init_node('ds18b20_device_server', log_level=rospy.DEBUG, anonymous=True) # get roslaunch params and reinit part of params self._device_name = rospy.get_param('~ds18b20_device_name') self._logname = 'ds18b20_' + self._device_name[ 3:] #rospy.get_param('~ds18b20_log_name', 'ds18b20') self._lost_data_marker = rospy.get_param('~ds18b20_lost_data_marker', -65536) # self._log_node_name = rospy.get_param('~ds18b20_log_node_name', 'ds18b20_log_node') # self._temp_pub_name = rospy.get_param('~ds18b20_temp_pub_name', 'ds18b20_1_temp_pub') self._temp_pub_name = 'ds18b20_' + self._device_name[3:] + '_data_pub' self._timeout = rospy.get_param('~ds18b20_timeout', 1) self._measure_interval = rospy.get_param('~ds18b20_measure_interval') self._temp_pub = rospy.Publisher(self._temp_pub_name, Temperature, queue_size=10) # logger self._logger = CustomLogger(name=self._logname) self._logger.info("ds18b20_device_server init : {}".format( self._device_name)) # print("we here init") # and go to infinite loop self._loop()
class DS18B20DeviceServer(object): """ """ def __init__(self): # start node rospy.init_node('ds18b20_device_server', log_level=rospy.DEBUG, anonymous=True) # get roslaunch params and reinit part of params self._device_name = rospy.get_param('~ds18b20_device_name') self._logname = 'ds18b20_' + self._device_name[ 3:] #rospy.get_param('~ds18b20_log_name', 'ds18b20') self._lost_data_marker = rospy.get_param('~ds18b20_lost_data_marker', -65536) # self._log_node_name = rospy.get_param('~ds18b20_log_node_name', 'ds18b20_log_node') # self._temp_pub_name = rospy.get_param('~ds18b20_temp_pub_name', 'ds18b20_1_temp_pub') self._temp_pub_name = 'ds18b20_' + self._device_name[3:] + '_data_pub' self._timeout = rospy.get_param('~ds18b20_timeout', 1) self._measure_interval = rospy.get_param('~ds18b20_measure_interval') self._temp_pub = rospy.Publisher(self._temp_pub_name, Temperature, queue_size=10) # logger self._logger = CustomLogger(name=self._logname) self._logger.info("ds18b20_device_server init : {}".format( self._device_name)) # print("we here init") # and go to infinite loop self._loop() def _loop(self): while not rospy.is_shutdown(): rospy.sleep(self._measure_interval) try: temp = ds18b20_read_temp(self._device_name) msg = Temperature() msg.temperature = temp msg.header.stamp.secs = rospy.get_time() self._temp_pub.publish(msg) except Exception as e: msg = Temperature() msg.temperature = self._lost_data_marker self._logger.warning( "ds18b20_device_server {} error : {}".format( self._device_name, e)) msg.header.stamp.secs = rospy.get_time() self._temp_pub.publish(msg)
def __init__(self): print("========== start init") # start node rospy.init_node('mysql_data_plotter_server', log_level=rospy.DEBUG, anonymous=True) # self logging self._logname = rospy.get_param('~mysql_data_plotter_log_name', 'mysql_data_plotter') self._log_node_name = rospy.get_param( '~mysql_data_plotter_log_node_name', 'mysql_data_plotter_log_node') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("mysql_data_plotter_server init") # for default we will use current ip in tun0 protocol # if there is no any tun0 - it must be a critical error self.ipv4 = os.popen('ip addr show tun0').read().split( "inet ")[1].split("/")[0] self.web_addr = self.ipv4 self.web_port = 8090 # on every rpi self.list_of_hdf = list() self.metastring = "experiment metadata: \n" # TODO load description from experiments table and add to metastring # self.list_of_datasets = print("========== start get list of datasets") # TODO; fix -all below only for test |||| # vvvv self.list_of_tables = rospy.get_param('~mysql_data_plotter_raw_topics') # self.list_of_tables = [ # TODO must be loaded from .launch # {'name': '/bmp180_1_temp_pub', 'id': 1, 'type': 'temperature', 'dtype': 'float64', 'units': 'C', 'status': 'raw_data'}, # {'name': '/bmp180_1_pressure_pub', 'id': 2, 'type': 'pressure', 'dtype': 'float64', 'units': 'kPa', 'status': 'raw_data'}, # {'name': '/raw_co2_pub', 'type': 'co2', 'id': 3, 'dtype': 'float64', 'units': 'ppmv', 'status': 'raw_data'}, # {'name': '/si7021_1_hum_pub', 'id': 4, 'type': 'humidity', 'dtype': 'float64', 'units': 'percents', 'status': 'raw_data'}, # {'name': '/si7021_1_temp_pub', 'id': 5, 'type': 'temperature', 'dtype': 'float64', 'units': 'C', 'status': 'raw_data'} # ] print("========== end of init")
def __init__(self, product, service, zuul_url, zuul_route): self._PRODUCT = product self._SERVICE = service self._BASE_PATH = os.path.expanduser('~') + "/Documents/GRP" now = datetime.now() log_dir = "/var/log/grp/regression-test-automation/%s/%s/%s" % \ (self._PRODUCT, self._SERVICE, '{0:%Y-%m-%d__%H.%M.%S}'.format(now)) self.log = CustomLogger(log_dir).get_logger() self.log.info("Script starts executes at : {0:%Y-%m-%d %H:%M:%S}\n".format(now)) pcfs = service.split('/') if len(pcfs) != 4: sys.exit("Invalid service : %s\nPlease enter a valid service." % service) self._COMPONENT = pcfs[0] self._FEATURE = pcfs[1] self._VERSION = pcfs[2] self._SERVICE_NAME = pcfs[3] self._ZUUL_URL = zuul_url self._ZUUL_ROUTE = zuul_route
def performance(): my_logger = CustomLogger('main_db_logging.yaml').logger my_config_file = 'main_db_settings.ini' batch_sizes = [5000, 10000, 20000, 50000] df = pd.DataFrame(columns=['batch_size', 'time']) for i, batch_size in enumerate(batch_sizes): db = DatabaseConnector(logger=my_logger, batch_size=batch_size, config_file=my_config_file) run_time = db.run() df.loc[i, 'batch_size'] = db.batch_size df.loc[i, 'time'] = str(run_time).split('.')[0] print(df)
def __init__(self): # start node rospy.init_node('si7021_device_server', log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._logname = rospy.get_param('~si7021_log_name', 'si7021') self._lost_data_marker = rospy.get_param('~si7021_lost_data_marker', -65536) self._log_node_name = rospy.get_param('~si7021_log_node_name', 'si7021_log_node') self._hum_pub_name = rospy.get_param('~si7021_hum_pub_name', 'si7021_1_hum_pub') self._temp_pub_name = rospy.get_param('~si7021_temp_pub_name', 'si7021_1_temp_pub') self._timeout = rospy.get_param('~si7021_timeout', 1) self._measure_interval = rospy.get_param('~si7021_measure_interval') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) self._hum_pub = rospy.Publisher(self._hum_pub_name, Temperature, queue_size=10) self._temp_pub = rospy.Publisher(self._temp_pub_name, Temperature, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("si7021_device_server init") # print("we here init") # and go to infinite loop # self._service = rospy.Service(self._service_name, SBA5Device, self.handle_request) self._loop()
def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('k30_device_server', log_level=rospy.DEBUG) self._logname = rospy.get_param('~k30_log_name', 'k30') self._log_node_name = rospy.get_param('~k30_log_node_name', 'k30_log_node') self._port = rospy.get_param( '~k30_port', '/dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_' 'UART_Bridge_Controller_0003-if00-port0') self._baudrate = rospy.get_param('~k30_baudrate', 9600) self._timeout = rospy.get_param('~k30_timeout', 1) self._service_name = rospy.get_param('~k30_service_name', 'k30_device') self._calibration_time = rospy.get_param('~k30_calibration_time', 25) self._lost_data_marker = rospy.get_param('~k30_lost_data_marker', -65536) # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("k30_device_server init") # device self._device = K30(devname=self._port, baudrate=self._baudrate, timeout=self._timeout) # service self._service = rospy.Service(self._service_name, K30Device, self.handle_request) self._loop()
def main(): # performance() my_logger = CustomLogger('main_db_logging.yaml').logger my_config_file = 'main_db_settings.ini' my_db = DatabaseConnector(logger=my_logger, config_file=my_config_file) my_db.run() if my_db.number_of_rows <= 10: my_db.open_db() print('-' * 100) print(my_db.get_max_db_id()) print('-' * 100) print(my_db.get_data()) print('-' * 100) my_db.close_db()
def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('watchdog_server', anonymous=True, log_level=rospy.DEBUG) # get roslaunch params # names for self topics self._logname = rospy.get_param('~watchdog_log_name', 'data_saver') self._log_node_name = rospy.get_param('~watchdog_log_node_name', 'watchdog') self._service_name = rospy.get_param('~watchdog_service_name', 'watchdog') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # start logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("data_saver server init") self.dead_flag = False # service self._service = rospy.Service(self._service_name, Watchdog, self._handle_request) self._logger.debug("end of init, go to loop") self._loop()
def __init__(self): # start node rospy.init_node('scales_device_server', log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._logname = rospy.get_param('scales_log_name', 'scales') self._lost_data_marker = rospy.get_param('scales_lost_data_marker', -65536) self._log_node_name = rospy.get_param('scales_log_node_name', 'scales_log_node') self._data_pub_name = rospy.get_param('scales_data_pub_name', 'scales_data_pub') self._port = rospy.get_param( 'scales_port', '/dev/serial/by-id/usb-USB_Vir_USB_Virtual_COM-if00') self._baudrate = rospy.get_param('scales_baudrate', 9600) self._timeout = rospy.get_param('scales_timeout', 1) self._measure_interval = rospy.get_param('scales_measure_interval', 5) # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) self._data_pub = rospy.Publisher(self._data_pub_name, Temperature, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("scales_device_server init") # print("we here init") # and go to infinite loop # self._service = rospy.Service(self._service_name, SBA5Device, self.handle_request) self._loop()
class ControlSystemServer(object): """ here we are using rospy.Timer functionality to measure co2 every second https://roboticsbackend.com/how-to-use-a-ros-timer-in-python-to-publish-data-at-a-fixed-rate/ https://roboticsbackend.com/ros-rate-roscpy-roscpp/ but every global reconfiguration cycle works with time.localtime() why """ def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " self._logsep = ";" # start node rospy.init_node('control_server', log_level=rospy.DEBUG) # get roslaunch params # names for self topics self._logname = rospy.get_param('~control_log_name', 'control_system') self._log_node_name = rospy.get_param('~control_log_node_name', 'control_log_node') self._service_name = rospy.get_param('~control_service_name', 'control_system') self._raw_co2_pub_name = rospy.get_param('~control_raw_co2_pub_name', 'raw_co2_pub') self._operator_pub_name = rospy.get_param('~control_operator_pub_name', 'operator_pub') # devices services names # self._relay_service_name = rospy.get_param('~control_relay_service_name', 'relay_device') self._led_service_name = rospy.get_param('~control_led_service_name', 'led_device') self._sba5_service_name = rospy.get_param('~control_sba5_service_name', 'sba5_device') # we are using sba5 by default co2 sensor self._exp_service_name = rospy.get_param('~control_exp_service_name', 'exp_system') # experiment params self._LSM_exp_red = int(rospy.get_param('~control_LSM_exp_red', 250)) # mA old = 142 # here we need 306 mA, but our devices can work only with Ir < 250 mA self._LSM_exp_white = int(rospy.get_param('~control_LSM_exp_white', 114)) # mA old = 76 self._LSM_control_red = int(rospy.get_param('~control_LSM_control_red', 237)) # mA self._LSM_control_white = int(rospy.get_param('~control_LSM_control_white', 122)) # mA self._full_experiment_loop_time = int(rospy.get_param('~control_full_experiment_loop_time', 900.0)) # sec self._isolated_measure_time = int(rospy.get_param('~control_isolated_measure_time', 480.0)) # sec self._n2_calibration_time = int(rospy.get_param('~control_n2_calibration_time', 90.0)) # depends on self._air_valves_open_time = int(rospy.get_param('~control_air_valves_open_time', 15.0)) # sec self._co2_measure_time = int(rospy.get_param('~control_co2_measure_time', 1.0)) # sec self._ventilation_time = self._full_experiment_loop_time - self._isolated_measure_time - \ self._n2_calibration_time - self._air_valves_open_time # flags of current control regime #self._mode = rospy.get_param('~control_start_mode', 'life_support') self._mode = rospy.get_param('~control_start_mode', 'experiment') self._mode_flag = 'co2' # mode can be : # experiment # life_support # test # search_experiment # ... #self._sba5_measure_allowed = False # by default # self._at_work = rospy.get_param('~control_start_at_work', True) # start node and loop immediately after launch or # rpi_gpio device stub # TODO add this params to .launch file self._gpio_handler = RelayHandler( n2_valve=5, air_pump_1=6, air_pump_2=12, cooler_1=13, air_valve_1=19, air_valve_2=26, ndir_pump=16, empty=20 ) # params of search experiment self._co2_search_time_start = 0 self._co2_search_time_stop = 0 self._current_search_point_id = 0 self._led_delay_time = 0 # for setting up leds in selected time in vent period # by default: self._current_red = self._LSM_exp_red self._current_white = self._LSM_exp_white self._current_search_is_finished = 1 # by default it is not finished # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # create data topic publisher self._co2_pub = rospy.Publisher(self._raw_co2_pub_name, Temperature, queue_size=10) # create operator-topic to send important msgs to operator # self._operator_pub = rospy.Publisher(self._operator_pub_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.info("control_server init") # create Locks self._co2_measure_allowed_event = Event() # serial errors handling # self._last_serial_error_time = 0 # self._delay_after_serial_error = 300 self._logger.info("control_server service creation") # service self._service = rospy.Service(self._service_name, ControlSystem, self._handle_request) self._logger.info("check if we are in experiment mode") # self._serial_error_counter = 0 # self._serial_error_max = 5 # subscribe to rosout_agg to know if there is fatal error and we need to do smth # self._system_log_sub = rospy.Subscriber( # name='/rosout_agg', data_class=Log, # callback=self._log_callback, # queue_size=5) # reinit sba5 if self._mode == 'full_experiment' or self._mode == 'full_experiment': self._logger.info("update sba5 and allow sba5 measures") self._update_sba5_params() # allow measures of sba5 self._co2_measure_allowed_event.set() # self._default_red = 142 # self._default_white = 76 self._current_red = self._LSM_exp_red self._current_white = self._LSM_exp_white # create timers for async periodic tasks using internal ros mechanics # Create a ROS Timer for reading data rospy.Timer(rospy.Duration(1), self._get_sba5_measure) # 1 Hz # create ros timer for main loop if self._mode == 'life_support_dual': self._dual_led_service_name = rospy.get_param('~control_dual_led_service_name') self._current_red = self._LSM_control_red self._current_white = self._LSM_control_white if self._mode == 'k30_experiment': self._k30_service_name = rospy.get_param('~k30_service_name', 'k30_device') self._k30_calibration_time = int(rospy.get_param('~k30_calibration_time', '25')) self._current_red = self._LSM_control_red self._current_white = self._LSM_control_white rospy.Timer(rospy.Duration(2), self._get_k30_data) # 0.5 Hz self._logger.info("go to loop") self._loop() # ===================== loops for different control modes ====================== def _loop(self): while not rospy.is_shutdown(): # check if mode was changed if self._mode == 'life_support': self._life_support_loop() elif self._mode == 'life_support_dual': self._life_support_dual_loop() elif self._mode == 'test': self._test_loop() elif self._mode == 'full_experiment': self._full_experiment_loop() elif self._mode == 'k30_experiment': self._k30_experiment_loop() else: self._logger.error("current mode is not real mode: {}".format(self._mode)) rospy.sleep(1) def _test_loop(self): t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time / 60.0) == 0: self._logger.debug("start test loop again") rospy.sleep(self._air_valves_open_time) self._logger.debug("we have done nothing, really") def _life_support_loop(self): # one loop t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time / 60.0) == 0: self._logger.debug("start life support loop again") # start it again # reset light parameters to default self._logger.info('red = {} white = {} type = {}'.format( self._current_red, self._current_white, type(self._current_red))) self._set_new_light_mode(self._current_red, self._current_white) # set inside ventilation coolers on self._set_new_relay_state('set_vent_coolers', 0) # start ventilation self._start_ventilation() # self._set_new_relay_state('set_ndir_pump', 0) # for test only # wait for self._ventilation_time rospy.sleep(self._ventilation_time) # its not bad because service calls works in parallel threads # then stop it self._stop_ventilation() # self._set_new_relay_state('set_ndir_pump', 1) # for test only # then wait and do nothing def _life_support_dual_loop(self): # one loop t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time / 60.0) == 0: self._logger.debug("start life support dual loop again") # start it again # reset light parameters to default self._logger.info('red = {} white = {} type = {}'.format( self._current_red, self._current_white, type(self._current_red))) self._set_new_light_mode(self._current_red, self._current_white) self._set_new_light_mode(self._current_red, self._current_white, led_service_name= self._dual_led_service_name) # set inside ventilation coolers on self._set_new_relay_state('set_vent_coolers', 0) # start ventilation self._start_ventilation() # self._set_new_relay_state('set_ndir_pump', 0) # for test only # wait for self._ventilation_time rospy.sleep(self._ventilation_time) # its not bad because service calls works in parallel threads # then stop it self._stop_ventilation() # self._set_new_relay_state('set_ndir_pump', 1) # for test only # then wait and do nothing def _full_experiment_loop(self): # one loop # all experiment t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time/60.0) == 0: # start it again self._logger.info("start full experiment loop again") self._logger.info("start full experiment loop again") # self._get_current_point() # set inside ventilation coolers on self._set_new_relay_state('set_vent_coolers', 0) # start ventilation and calibration self._start_ventilation() self._logger.info("ventilation started") # get new led light params from exp_node self._get_current_point() self._logger.info("we got new exp point: mode={} id={} red={} white={}".format( self._mode_flag, self._current_search_point_id, self._current_red, self._current_white )) self._logger.info("we got new exp point: mode={} id={} red={} white={}".format( self._mode_flag, self._current_search_point_id, self._current_red, self._current_white )) # check new mode flag if self._mode_flag == 'co2': # TODO FIX # stop measuring co2 using threading event self._co2_measure_allowed_event.clear() self._logger.info("We have set measure flag to {}".format(self._co2_measure_allowed_event.is_set())) # do calibration of sba-5 self._logger.info("sba5 calibration started") self._perform_sba5_calibration() self._logger.info("sba5 calibration ended") # start measuring co2 again self._co2_measure_allowed_event.set() self._logger.info("We have set measure flag to {}".format(self._co2_measure_allowed_event.is_set())) # # self._co2_search_time_start = rospy.Time.now() # # send sign to operator # self._logger.info("co2_search_time started {}".format(self._co2_search_time_start)) # wait for self._ventilation_time rospy.sleep(self._led_delay_time) self._set_new_light_mode(self._current_red, self._current_white) rospy.sleep(self._ventilation_time - self._led_delay_time) # stop ventilation self._stop_ventilation() self._logger.info("stop ventilation") self._co2_search_time_start = rospy.Time.now() # send sign to operator ts = datetime.datetime.fromtimestamp( self._co2_search_time_start.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("co2_search_time started {}".format(ts)) self._logger.info("co2_search_time started {}".format(ts)) # wait self._isolated_measure_time rospy.sleep(self._isolated_measure_time) self._co2_search_time_stop = rospy.Time.now() te = datetime.datetime.fromtimestamp( self._co2_search_time_start.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("co2_search_time stopped {}".format(te)) self._logger.info("co2_search_time stopped {}".format(te)) # send start and stop times of this search point to exp_node self._send_point_data() self._logger.info("data sent to exp_system") def _k30_experiment_loop(self): # one loop # all experiment t = time.localtime() # every 15 minutes by default if t.tm_min % (self._full_experiment_loop_time/60.0) == 0: # start it again self._logger.info("start k30 experiment loop again") self._logger.info("start k30 experiment loop again") # self._get_current_point() # set inside ventilation coolers on self._set_new_relay_state('set_vent_coolers', 0) # start ventilation and calibration self._start_ventilation() self._logger.info("ventilation started") # get new led light params from exp_node self._get_current_point() self._logger.info("we got new exp point: mode={} id={} red={} white={}".format( self._mode_flag, self._current_search_point_id, self._current_red, self._current_white )) self._logger.info("we got new exp point: mode={} id={} red={} white={}".format( self._mode_flag, self._current_search_point_id, self._current_red, self._current_white )) self._set_new_light_mode(self._current_red, self._current_white) # check new mode flag if self._mode_flag == 'co2': # TODO FIX # stop measuring co2 using threading event self._co2_measure_allowed_event.clear() self._logger.info("We have set measure flag to {}".format(self._co2_measure_allowed_event.is_set())) # do calibration of sba-5 self._logger.info("k30 calibration started") self._perform_k30_calibration() self._logger.info("k30 calibration ended") # start measuring co2 again self._co2_measure_allowed_event.set() self._logger.info("We have set measure flag to {}".format(self._co2_measure_allowed_event.is_set())) # wait for self._ventilation_time rospy.sleep(self._ventilation_time) # stop ventilation self._stop_ventilation() self._logger.info("stop ventilation") self._co2_search_time_start = rospy.Time.now() # send sign to operator ts = datetime.datetime.fromtimestamp( self._co2_search_time_start.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("co2_search_time started {}".format(ts)) self._logger.info("co2_search_time started {}".format(ts)) # wait self._isolated_measure_time rospy.sleep(self._isolated_measure_time) self._co2_search_time_stop = rospy.Time.now() te = datetime.datetime.fromtimestamp( self._co2_search_time_start.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("co2_search_time stopped {}".format(te)) self._logger.info("co2_search_time stopped {}".format(te)) # send start and stop times of this search point to exp_node self._send_point_data() self._logger.info("data sent to exp_system") # =============================== support methods ============================== def _operator_call(self, msg): # just sends msg to operator topic _time = datetime.datetime.now() msg = str(_time) + self._logsep + str(self._logname) + self._logsep + msg self._operator_pub.publish(msg) # def _log_callback(self, log_msg): # # # hardcoded thing just to handle serial errors # if not time.time() - self._last_serial_error_time >= self._delay_after_serial_error: # if log_msg.name == '/serial_node' and log_msg.level == 8: # self._logger.warning("we got serial error {}, but we will ignore it for a time".format(log_msg)) # else: # if log_msg.name == '/serial_node' and log_msg.level == 8: # self._logger.error("we got serial error {}".format(log_msg)) # self._serial_error_counter += 1 # if self._serial_error_counter >= self._serial_error_max: # self._logger.error("max number of serial error counted: {}".format(self._serial_error_counter)) # self._restart_serial_node() # # self._serial_error_counter = 0 # self._last_serial_error_time = time.time() def _get_k30_data(self, event=None): # if self._sba5_measure_allowed: # event is rospy.TimerEvent if self._mode_flag == 'co2': # TODO FIX self._co2_measure_allowed_event.wait() rospy.wait_for_service(self._sba5_service_name) try: # sba_device = rospy.ServiceProxy(self._sba5_service_name, SBA5Device) # raw_resp = sba_device("measure_co2") k30_device = rospy.ServiceProxy(self._k30_service_name, K30Device) raw_resp = k30_device("get_data") # self._logger.debug("We got raw response from sba5 : {}".format(raw_resp.response)) # self._logger.debug("Type of raw response : {}".format(type(raw_resp.response))) # self._logger.debug("Size of raw response : {}".format(len(raw_resp.response))) # lets get co2 measure from that string # pattern = re.compile(r'\w+: (\d+.\d+)') # for answers like "success: 55.21" # co2_data = float(pattern.findall(raw_resp.response)[0]) co2_data = float(raw_resp) self._logger.debug("k30 measure_co2: We find co2 : {}".format(co2_data)) # add to self array # self._add_new_data_to_array(co2_data) # then publish it self._publish_sba5_measure(co2_data) # self._logger.debug("We published it") return float(co2_data) except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.warning("Service call failed: {}".format(err_list)) def _perform_k30_calibration(self): self._logger.debug("start k30 recalibration") # stop NDIRGA external air pump self._set_new_relay_state('set_ndir_pump', 1) # open n2 valve self._set_new_relay_state('set_n2_valve', 0) # send Z to sba-5 # rospy.wait_for_service(self._sba5_service_name) try: # wait until nitrogen fills the volume rospy.sleep(self._k30_calibration_time-5) # TODO fix it k30_device = rospy.ServiceProxy(self._k30_service_name, K30Device) raw_resp = k30_device('recalibrate') self._logger.debug("We got raw response from k30 {}".format(raw_resp)) except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) # raise ControlSystemException(e) # close n2 valve self._set_new_relay_state('set_n2_valve', 1) # start NDIRGA external air pump self._set_new_relay_state('set_ndir_pump', 0) # def _restart_serial_node(self): # # we just need to kill serial node # # master must rebirth it # # # self._logger.warning("trying to kill relay node") # # # os.system("rosnode kill /relay_device") # # time.sleep(1.5) # # self._logger.warning("trying to kill serial node") # # os.system("rosnode kill /serial_node") # # self._logger.warning("waiting for serial node respawn") # # time.sleep(1.5) # # # then check if it was created again # serial_node_found = False # # relay_device_found = False # # while not serial_node_found and relay_device_found: # while not serial_node_found: # nodes = os.popen("rosnode list").readlines() # for i in range(len(nodes)): # nodes[i] = nodes[i].replace("\n", "") # if nodes[i] == "/serial_node": # serial_node_found = True # self._logger.warning("found serial node in rosnode list") # # # if nodes[i] == "/relay_device": # # relay_device_found = True # # self._logger.warning("found relay_device in rosnode list") # time.sleep(0.5) # time.sleep(5) # # then send respawn signal to relay device to restore relay state on mcu # self._logger.warning("send respawn signal to relay") # rospy.wait_for_service(self._relay_service_name) # try: # relay_wrapper = rospy.ServiceProxy(self._relay_service_name, RelayDevice) # resp = relay_wrapper("respawn", 1) # self._logger.debug(resp) # return resp # # except rospy.ServiceException, e: # exc_info = sys.exc_info() # err_list = traceback.format_exception(*exc_info) # self._logger.error("Service call failed: {}".format(err_list)) def _send_point_data(self): self._logger.info("try to send data about current search point") rospy.wait_for_service(self._exp_service_name) try: exp_device = rospy.ServiceProxy(self._exp_service_name, ExpSystem) resp = exp_device(command="set_point_data", point_id=self._current_search_point_id, start_time=self._co2_search_time_start, end_time=self._co2_search_time_stop ) self._logger.debug(resp) return resp except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) def _get_current_point(self): self._logger.info("try to get new search point") rospy.wait_for_service(self._exp_service_name, 1) # NOTE check this place try: exp_device = rospy.ServiceProxy(self._exp_service_name, ExpSystem) resp = exp_device(command="get_current_point") self._mode_flag = resp.response self._current_search_point_id = resp.point_id self._current_red = resp.red self._current_white = resp.white self._led_delay_time = resp.led_delay self._logger.info(resp) return resp except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) # def _update_control_params(self): # # get new params and set them to corresponding self.xxxx values # # NOTE: method is DEPRECATED # self._set_new_light_mode(self._current_red, self._current_white) # for a time def _start_ventilation(self): # open drain valves 'set_air_valves' self._set_new_relay_state('set_air_valves', 0) # wait time to open them ~15 sec rospy.sleep(self._air_valves_open_time) # start drain pumps 'set_air_pumps' self._set_new_relay_state('set_air_pumps', 0) pass def _stop_ventilation(self): # stop drain pumps self._set_new_relay_state('set_air_pumps', 1) # wait 1 sec rospy.sleep(1.0) # close drain valves self._set_new_relay_state('set_air_valves', 1) pass def _perform_sba5_calibration(self): self._logger.debug("start sba recalibration") # stop NDIRGA external air pump self._set_new_relay_state('set_ndir_pump', 1) # open n2 valve self._set_new_relay_state('set_n2_valve', 0) # send Z to sba-5 rospy.wait_for_service(self._sba5_service_name) try: sba_device = rospy.ServiceProxy(self._sba5_service_name, SBA5Device) raw_resp = sba_device('recalibrate') self._logger.debug("We got raw response from sba5 {}".format(raw_resp)) # wait for calibration time 21/40/90 sec rospy.sleep(self._n2_calibration_time) except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) # raise ControlSystemException(e) # close n2 valve self._set_new_relay_state('set_n2_valve', 1) # start NDIRGA external air pump self._set_new_relay_state('set_ndir_pump', 0) pass def _publish_sba5_measure(self, data): self._logger.debug("publish_sba5_measure: We got data: {}".format(data)) co2_msg = Temperature() co2_msg.header.stamp = rospy.Time.now() co2_msg.temperature = data self._logger.debug("Message: {}".format(co2_msg)) self._co2_pub.publish(co2_msg) def _set_new_relay_state(self, command, state): # command - str, state - 0 or 1 self._logger.info("start setting new relay mode '{}' '{}' ".format(command, state)) # rospy.wait_for_service(self._relay_service_name) try: # relay_wrapper = rospy.ServiceProxy(self._relay_service_name, RelayDevice) # resp = relay_wrapper(command, state) # self._logger.debug(resp) # return resp self._gpio_handler.parse_old_command(command=command, arg=state) except Exception, e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list))
class MYSQLHandler(object): def __init__(self): print("========== start init") # start node rospy.init_node('mysql_data_plotter_server', log_level=rospy.DEBUG, anonymous=True) # self logging self._logname = rospy.get_param('~mysql_data_plotter_log_name', 'mysql_data_plotter') self._log_node_name = rospy.get_param( '~mysql_data_plotter_log_node_name', 'mysql_data_plotter_log_node') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("mysql_data_plotter_server init") # for default we will use current ip in tun0 protocol # if there is no any tun0 - it must be a critical error self.ipv4 = os.popen('ip addr show tun0').read().split( "inet ")[1].split("/")[0] self.web_addr = self.ipv4 self.web_port = 8090 # on every rpi self.list_of_hdf = list() self.metastring = "experiment metadata: \n" # TODO load description from experiments table and add to metastring # self.list_of_datasets = print("========== start get list of datasets") # TODO; fix -all below only for test |||| # vvvv self.list_of_tables = rospy.get_param('~mysql_data_plotter_raw_topics') # self.list_of_tables = [ # TODO must be loaded from .launch # {'name': '/bmp180_1_temp_pub', 'id': 1, 'type': 'temperature', 'dtype': 'float64', 'units': 'C', 'status': 'raw_data'}, # {'name': '/bmp180_1_pressure_pub', 'id': 2, 'type': 'pressure', 'dtype': 'float64', 'units': 'kPa', 'status': 'raw_data'}, # {'name': '/raw_co2_pub', 'type': 'co2', 'id': 3, 'dtype': 'float64', 'units': 'ppmv', 'status': 'raw_data'}, # {'name': '/si7021_1_hum_pub', 'id': 4, 'type': 'humidity', 'dtype': 'float64', 'units': 'percents', 'status': 'raw_data'}, # {'name': '/si7021_1_temp_pub', 'id': 5, 'type': 'temperature', 'dtype': 'float64', 'units': 'C', 'status': 'raw_data'} # ] print("========== end of init") def get_dataset(self, name): print("========== start get dataset {}".format(name)) info_dict = None # lets find dict with same name as user wants to see for i in self.list_of_tables: if i['name'] == name: info_dict = i # dirty meta = info_dict # table name, where data located must be placed in field "status" if info_dict['status'] == 'raw_data': # if raw, it must be in raw_data table and must be defined by sensor_id sensor_id = info_dict['id'] con = pymysql.connect( host='localhost', user='******', password='******', # db='experiment', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cur = con.cursor() cur.execute("use experiment") comm_str = 'select data, time from raw_data where sensor_id = {}'.format( sensor_id) self._logger.debug("comm_str: {}".format(comm_str)) cur.execute(comm_str) res_list = cur.fetchall() # it must be array of dicts # where each dict is one line from db con.close() print("========== end of get dataset") return res_list, meta
# -*- coding: utf-8 -*- from config import Config from custom_logger import CustomLogger log = CustomLogger(__name__) if Config.get('disable_xbmcaddon_mock_log'): log.set_log_level('ERROR') ADDONS_SETTINGS = { 'plugin.video.catchuptvandmore': Config.get('addon_settings'), 'script.module.codequick': Config.get('codequick_fake_settings'), 'script.module.inputstreamhelper': Config.get('inputstreamhelper_fake_settings'), 'script.module.youtube.dl': Config.get('youtubedl_fake_settings') } ADDONS_LABELS = { 'plugin.video.catchuptvandmore': Config.get('addon_labels'), 'script.module.codequick': Config.get('codequick_labels'), 'script.module.inputstreamhelper': Config.get('inputstreamhelper_labels'), 'script.module.youtube.dl': Config.get('youtubedl_labels') } ADDONS_NAMES = { 'plugin.video.catchuptvandmore': 'Catch-up TV & More', 'script.module.codequick': 'CodeQuick',
class SI7021DeviceServer(object): """ """ def __init__(self): # start node rospy.init_node('si7021_device_server', log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._logname = rospy.get_param('~si7021_log_name', 'si7021') self._lost_data_marker = rospy.get_param('~si7021_lost_data_marker', -65536) self._log_node_name = rospy.get_param('~si7021_log_node_name', 'si7021_log_node') self._hum_pub_name = rospy.get_param('~si7021_hum_pub_name', 'si7021_1_hum_pub') self._temp_pub_name = rospy.get_param('~si7021_temp_pub_name', 'si7021_1_temp_pub') self._timeout = rospy.get_param('~si7021_timeout', 1) self._measure_interval = rospy.get_param('~si7021_measure_interval') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) self._hum_pub = rospy.Publisher(self._hum_pub_name, Temperature, queue_size=10) self._temp_pub = rospy.Publisher(self._temp_pub_name, Temperature, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("si7021_device_server init") # print("we here init") # and go to infinite loop # self._service = rospy.Service(self._service_name, SBA5Device, self.handle_request) self._loop() def _loop(self): while not rospy.is_shutdown(): rospy.sleep(self._measure_interval) try: temp, hum = get_si7021_data() msg = Temperature() msg.temperature = temp msg.header.stamp.secs = rospy.get_time() self._temp_pub.publish(msg) msg = Temperature() msg.temperature = hum msg.header.stamp.secs = rospy.get_time() self._hum_pub.publish(msg) except Exception as e: msg = Temperature() msg.temperature = self._lost_data_marker msg.header.stamp.secs = rospy.get_time() self._temp_pub.publish(msg) self._hum_pub.publish(msg)
import urls from decorator import time_it from custom_logger import CustomLogger from config_utils import get_conf_value from cobra.model.pol import Uni as PolUni from cobra.model.aaa import UserEp as AaaUserEp from cobra.model.aaa import AppUser as AaaAppUser from cobra.model.aaa import UserCert as AaaUserCert try: from OpenSSL.crypto import FILETYPE_PEM, load_privatekey, sign except Exception: print("=== could not import openssl crypto ===") logger = CustomLogger.get_logger("/home/app/log/app.log") APIC_IP = get_conf_value('APIC', 'APIC_IP') STATIC_IP = get_conf_value('APIC', 'STATIC_IP') APIC_THREAD_POOL = int(get_conf_value('APIC', 'APIC_THREAD_POOL')) def create_cert_session(): """ Create user certificate and session. """ cert_user = '******' # Name of Application, used for token generation # static generated upon install plugin_key_file = '/home/app/credentials/plugin.key' pol_uni = PolUni('')
def main(): # Get config Config.init_config() # Init custom logger CustomLogger.set_global_log_level(Config.get('log_level')) log = CustomLogger(__name__) log.info('') log.info('#######################################') log.info('# #') log.info('# Catch-up TV & More simulator #') log.info('# #') log.info('#######################################') log.info('') # Custumize sys.path to use custom modules customize_sys_path() if Config.get('test_modules'): return test_modules(log) else: exploration_loop(log) if Config.get('print_all_explored_items'): print('\n* All explored items:\n') for s in Route.explored_routes_l: print(s) RuntimeErrorCQ.print_encountered_warnings() ret_val = RuntimeErrorCQ.print_encountered_errors() return ret_val
from compiled_file_system import CompiledFileSystem from custom_logger import CustomLogger from data_source_registry import CreateDataSource from environment import GetAppVersion from file_system import IsFileSystemThrottledError from future import Future from gcs_file_system_provider import CloudStorageFileSystemProvider from github_file_system_provider import GithubFileSystemProvider from host_file_system_provider import HostFileSystemProvider from object_store_creator import ObjectStoreCreator from refresh_tracker import RefreshTracker from server_instance import ServerInstance from servlet import Servlet, Request, Response from timer import Timer, TimerClosure _log = CustomLogger('refresh') class RefreshServlet(Servlet): '''Servlet which refreshes a single data source. ''' def __init__(self, request, delegate_for_test=None): Servlet.__init__(self, request) self._delegate = delegate_for_test or RefreshServlet.Delegate() class Delegate(object): '''RefreshServlet's runtime dependencies. Override for testing. ''' def CreateBranchUtility(self, object_store_creator): return BranchUtility.Create(object_store_creator)
class SBA5DeviceServer(object): """ class to receive messages from other ros nodes, handle it send messages to PP-systems SBA-5 IRGA and parse its output back as response """ def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('sba5_device_server', log_level=rospy.DEBUG) # get roslaunch params and reinit part of params self._calibration_time = int(rospy.get_param('~sba5_calibration_time')) self._logname = rospy.get_param('~sba5_log_name', 'sba5') self._log_node_name = rospy.get_param('~sba5_log_node_name', 'sba5_log_node') self._port = rospy.get_param('~sba5_port', '/dev/serial/by-id/usb-FTDI_FT230X_Basic_UART_DN03WQZS-if00-port0') self._baudrate = int(rospy.get_param('~sba5_baudrate', 19200)) self._timeout = float(rospy.get_param('~sba5_timeout', 0.2)) self._service_name = rospy.get_param('~sba5_service_name', 'sba5_device') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("sba5_device_server init") # self._logger.info("sba5_device_server cal time is {}, type is {}".format( # self._calibration_time, type(self._calibration_time) # )) # service self._service = rospy.Service(self._service_name, SBA5Device, self.handle_request) self._loop() def _loop(self): #self._logger.debug("led_device_server ready to serve") rospy.spin() def handle_request(self, req): self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("we got request: {}".format(req)) if not req.command: # if we got empty string resp = self._error_response + 'empty_command' return resp elif req.command == 'get_info': try: ans = self.get_info() resp = self._success_response + ans return resp except SBA5DeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'measure_full': try: ans = self.do_measurement() resp = self._success_response + ans return resp except SBA5DeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'measure_co2': try: ans = self.do_measurement() self._logger.debug("raw result from measure {}".format(ans)) pattern = re.compile(r'M \d+ \d+ (\d+.\d+) \d+.\d+ \d+.\d+ \d+.\d+ \d+ \d+\r\n') res = pattern.findall(ans) self._logger.debug("we have found this {}".format(res[0])) resp = self._success_response + str(res[0]) return resp except Exception as e: # todo fix, find root of problem # [ERROR][1598038134.061680]: Error processing request: list index out of range # ['Traceback (most recent call last):\n', # ' File "/opt/ros/melodic/lib/python2.7/dist-packages/rospy/impl/tcpros_service.py", line 632, in _handle_request\n response = convert_return_to_response(self.handler(request), self.response_class)\n', # ' File "/opt/ros/melodic/lib/ros_farmer_pc/sba5_device.py", line 307, in handle_request\n self._logger.debug("we have found this {}".format(res[0]))\n', # 'IndexError: list index out of range\n'] # [ERROR][1598038134.067210]: 1598038134.07; control_system; error #Service call failed: service[ / sba5_device] responded # with an error: error processing request: list index out of range resp = self._error_response + e.args[0] return resp elif req.command == 'recalibrate': try: ans = self.do_calibration() resp = self._success_response + ans return resp except SBA5DeviceException as e: resp = self._error_response + e.args[0] return resp elif req.command == 'init': try: ans = self.set_initial_params() resp = self._success_response + ans return resp except SBA5DeviceException as e: resp = self._error_response + e.args[0] return resp else: resp = self._error_response + 'unknown command' return resp pass # =========================== low level commands =================================== def send_command(self, command): """ Command must ends with \r\n ! Its important :param command: :return: """ """ To initiate a command, the USB port or RS232 port sends an ASCII character or string. A single character command is acted on immediately when the character is received. A string command is acted on after the command string terminator <CR> is received. The command can be sent with or without a checksum. If a checksum is sent, a “C” follows the checksum value. For example, Device sends command without checksum: S,11,1<CR> Device sends command with checksum: S,11,1,043C<CR> On successfully receiving a command string, the SBA5+ sends an acknowledgement by echoing back to all the ports the Command String and “OK”, each terminated with a <CR> and<linefeed>. """ # \r\n try: ser = serial.Serial( port=self._port, baudrate=self._baudrate, timeout=self._timeout ) bcom = command.encode('utf-8') ser.write(bcom) except Exception as e: raise SBA5DeviceException("SBAWrapper error while send command: {}".format(e)) # self._logger.error("SBAWrapper error while send command: {}".format(e)) # then try to read answer # it must be two messages, ended with \r\n try: ser = serial.Serial( port=self._port, baudrate=self._baudrate, timeout=self._timeout ) echo = (ser.readline()).decode('utf-8') status = (ser.readline()).decode('utf-8') return echo+status #return status except Exception as e: raise SBA5DeviceException("SBAWrapper error while read answer from command: {}".format(e)) # self._logger.error("SBAWrapper error while read answer from command: {}".format(e)) def set_initial_params(self): # TODO: mb we need protection from calling that method twice? # async part of init # we have to send some commands before start regular work of unit self._logger.debug("Second part of CO2Sensor init") # ans = await self.sensor.send_command("?\r\n") # self.logger.info("Command ?, answer: {}".format(ans)) # if not "OK" in ans: # self.logger.info("CO2SensorError: {}".format(ans)) # return "CO2SensorError: {}".format(ans) ans_ = "" # we need to shut down auto measurements ans = self.send_command("!\r\n") ans_ += ans self._logger.debug("Command !, answer: {}".format(ans)[:-1]) # we need to shut down auto zero operations ans += self.send_command("A0\r\n") ans_ += ans self._logger.debug("Command A0, answer: {}".format(ans)[:-1]) # we need to set format of output ans += self.send_command("F252\r\n") ans_ += ans self._logger.debug("Command F252, answer: {}".format(ans)[:-1]) # we need to start pump ans += self.send_command("P1\r\n") ans_ += ans self._logger.debug("Command P1, answer: {}".format(ans)[:-1]) # set time of calibration if self._calibration_time == 90: command = "EL\r\n" elif self._calibration_time == 40: command = "EM\r\n" elif self._calibration_time == 21: command = "ES\r\n" else: self._logger.error("wrong initial calibration time, {}".format(self._calibration_time)) self._logger.error("default time will be 90 sec") command = "EL\r\n" ans += self.send_command(command) ans_ += ans self._logger.debug("Command calibraton, answer: {}".format(ans)[:-1]) return ans_ def get_info(self): # only first line of answer ans = self.send_command("?\r\n") self._logger.debug("Getting info from SBA5") return ans[:-1] def do_calibration(self): ans = self.send_command("Z\r\n") self._logger.debug("Starting calibration of SBA5") return ans def do_measurement(self): ans = self.send_command("M\r\n") self._logger.debug("Do measure SBA5") self._logger.debug(("SBA5 result is {}".format(ans))[:-1]) #its try to remove last \n from here return ans def do_command(self, com): ans = self.send_command(com) self._logger.info("send {} command to SBA5".format(com)[:-1]) return ans
class RegressionTestScript(object): _CREDENTIAL = './conf/credential.json' _REGRESSION_SHEET_NAME = 'GRP__RegressionTesting' _CFS_SHEET_NAME = 'GRP__Component-Feature-Service' _SINGLE_REGRESSION = "\n" _TEST_COUNT = 0 _FAILED_COUNT = 0 def __init__(self, product, service, zuul_url, zuul_route): self._PRODUCT = product self._SERVICE = service self._BASE_PATH = os.path.expanduser('~') + "/Documents/GRP" now = datetime.now() log_dir = "/var/log/grp/regression-test-automation/%s/%s/%s" % \ (self._PRODUCT, self._SERVICE, '{0:%Y-%m-%d__%H.%M.%S}'.format(now)) self.log = CustomLogger(log_dir).get_logger() self.log.info("Script starts executes at : {0:%Y-%m-%d %H:%M:%S}\n".format(now)) pcfs = service.split('/') if len(pcfs) != 4: sys.exit("Invalid service : %s\nPlease enter a valid service." % service) self._COMPONENT = pcfs[0] self._FEATURE = pcfs[1] self._VERSION = pcfs[2] self._SERVICE_NAME = pcfs[3] self._ZUUL_URL = zuul_url self._ZUUL_ROUTE = zuul_route def set_up(self): self.log.info("Setup environment variables.") self.g = pygsheets.authorize(service_file=self._CREDENTIAL) self.log.debug("\n_PRODUCT = %s\n_SERVICE = %s\n_CREDENTIAL = %s\n_REGRESSION_SHEET_NAME = %s\n" "_CFS_SHEET_NAME = %s\ng = %s\n" % (self._PRODUCT, self._SERVICE, self._CREDENTIAL, self._REGRESSION_SHEET_NAME, self._CFS_SHEET_NAME, str(self.g))) def load_regression_sheet(self): self.log.debug("Start loading data from regression sheet.") worksheet = self.g.open(self._REGRESSION_SHEET_NAME) worksheet = worksheet.worksheet('title', self._PRODUCT) worksheet = worksheet.get_all_values() self.log.debug("Worksheet data : " + str(worksheet)) data = list(filter(lambda x: self._SERVICE.strip() == x[2].strip(), worksheet)) if len(data) == 0: self.log.error("Service not found. Product : %s, Service : %s" % (self._PRODUCT, self._SERVICE)) sys.exit("Service not found") self.regression_data = data print(data) self.log.debug("Regression test data : " + str(self.regression_data)) self.log.info("Loading complete.\n") def create_base_directory(self): self.log.debug("Start creating base directory for test script.") product_dir = self._BASE_PATH + "/" + self._PRODUCT + "-service" + "/tests/regression" if not os.path.exists(product_dir): self.log.error("directory not found : %s", product_dir) sys.exit(-1) self.product_dir = product_dir service = self._SERVICE.split("/") self.directory = self._BASE_PATH + "/" + self._PRODUCT + "-service/tests/regression/" + service[0] + \ "-service/python/" + service[0] + "-" + service[1] + "-" + service[2] + "/" + service[3] + "/" self.log.info("Test script directory : %s" % self.directory) try: if not os.path.exists(self.directory): os.makedirs(self.directory) if not os.path.exists(self.directory + "data/"): os.makedirs(self.directory + "data/") if not os.path.exists(self.directory + "script/"): os.makedirs(self.directory + "script/") except Exception as e: self.log.error("Error when creating directory %s : %s", self.directory, str(e)) sys.exit(-1) self.log.debug("Directory created : \n1 : %s\n2 : %sdata\n3 : %sscript\n" % ( self.directory, self.directory, self.directory)) def get_service_request_response(self): self.log.info("Start loading request and response json.") rows = self.g.open(self._CFS_SHEET_NAME).worksheet('title', self._PRODUCT).get_all_values() component_col_no = rows[3].index("component") feature_col_no = rows[3].index("feature") version_col_no = rows[3].index("version") service_col_no = rows[3].index("service") data = list(filter(lambda x: self._COMPONENT == x[component_col_no] and self._FEATURE == x[feature_col_no] and self._VERSION == x[version_col_no] and self._SERVICE_NAME == x[service_col_no], rows)) if len(data) == 0: print("Service not defined in CFS sheet : %s" % self._SERVICE) sys.exit(-1) data = data[0] try: request_json = json.loads(data[rows[3].index("test-request-json")]) response_json = json.loads(data[rows[3].index("test-response-json")]) request_json['header']['requestId'] = None request_json['header']['requestTime'] = None request_json['header']['requestType'] = None request_json['header']['requestSource'] = "python-regression" request_json['header']['requestSourceService'] = "regression-script" self.log.debug("Request json : \n%s", json.dumps(request_json, indent=4)) self.log.debug("Response json : \n%s", json.dumps(response_json, indent=4)) with open(self.directory + "data/request.json", "w") as f: f.write(json.dumps(request_json, sort_keys=False, indent=4)) with open(self.directory + "data/response.json", "w") as f: f.write(json.dumps(response_json, sort_keys=False, indent=4)) except Exception as e: self.log.error("Error when loading request and response json : %s", str(e)) sys.exit(-1) self.log.info("Loading complete.\n") def generate_basetest(self): with open("./templates/basetest.txt", "r") as r: with open(self.directory + "script/basetest.py", "w") as w: w.write(r.read()) self.log.info("File created : basetest.py ") def generate_run_regression(self): with open("./templates/run_regression.txt", "r") as r: with open(self.directory + "run_regression.sh", "w") as w: w.write(r.read().replace("<ZUUL_URL>", self._ZUUL_URL).replace("<ZUUL_ROUTE>", self._ZUUL_ROUTE) .replace("<SERVICE>", self._SERVICE).replace("<PRODUCT>", self._PRODUCT).replace("<COMPONENT>", self._COMPONENT)) self.log.info("File created : run_regression.sh ") def generate_run_regression_single(self): with open("./templates/run_regression_single.txt", "r") as r: with open(self.directory + "run_regression_single.sh", "w") as w: w.write(r.read().replace("<ZUUL_URL>", self._ZUUL_URL).replace("<ZUUL_ROUTE>", self._ZUUL_ROUTE) .replace("<SERVICE>", self._SERVICE)) self.log.info("File created : run_regression_single.sh\n") def create_environment(self): self.log.debug("Start creating environment.") # with open("./conf/port.json", "r") as r: # ports = json.loads(r.read()) # self.port = ports[self._PRODUCT] # if not self.port: # self.log.error("Port not found for service : %s", self._SERVICE) # sys.exit(-1) # self.log.info("Service Port : " + self.port) self.create_base_directory() self.get_service_request_response() self.generate_basetest() self.generate_run_regression() self.generate_run_regression_single() self.log.debug("Environment created.\n") def register_script(self, file, attribute, type): self._SINGLE_REGRESSION += "#./%s ${URL} ${SERVICE} ${TOKEN}\n" % file def create_request_null_test_script(self, attribute): self._TEST_COUNT += 1 file = "service_test_request_null_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_null.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "null") self.log.info("File created : %s", file) def create_request_empty_test_script(self, attribute): self._TEST_COUNT += 1 file = "service_test_request_empty_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_empty.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "empty") self.log.info("File created : %s", file) def create_request_wrongtype_test_script(self, attribute, type): if type == 'str': type = "\"\"" elif type == 'double': type = "1.00" elif type == '[]' or type == 'array': type = "[]" elif type == '{}' or type == 'object': type = "{}" elif type == 'boolean': type = "True" elif type == 'int': type = "1" else: print("Type not found :", type) self._TEST_COUNT += 1 file = "service_test_request_wrongtype_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_wrongtype.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<type>", type)) self.register_script(file, attribute + " data type", "wrong") self.log.info("File created : %s", file) def request_wronglength_formater(self, attribute, length): len_dic = {"condition": "<length_condition>", "min": "1", "max": "20"} if not length: self.log.error("Pattern not define of wronglength for request attribute : %s", attribute) self._FAILED_COUNT += 1 return len_dic length = length.strip() if re.match(r'^[0-9]+$', length): min_max = [int(length) - 8, int(length) + 8] len_dic["condition"] = "len(suffix) != " + length elif re.match(r'^:[0-9]+$', length): length = length.replace(":", "") min_max = [1, int(length) + 16] len_dic["condition"] = "len(suffix) > " + length elif re.match(r'^[0-9]+:$', length): length = length.replace(":", "") min_max = [int(length) - 8, int(length) + 8] len_dic["condition"] = "len(suffix) < " + length elif re.match(r'^[0-9]+:[0-9]+$', length): v_range = length.split(":") min_max = [int(v_range[0]) - 8, int(v_range[1]) + 8] len_dic["condition"] = "len(suffix) < " + v_range[0] + " or len(suffix) > " + v_range[1] elif re.match(r'^[0-9,]+$', length): v_nums = list(map(lambda n: int(n), length.split(","))) min_max = [min(v_nums) - 8, max(v_nums) + 8] len_dic["condition"] = "len(suffix) not in " + str(v_nums) else: self.log.error("Pattern not match of wronglength for request attribute : %s", attribute) self._FAILED_COUNT += 1 return len_dic min_max[0] = min_max[0] if min_max[0] > 1 else 1 len_dic["min"] = str(min_max[0]) len_dic["max"] = str(min_max[1]) return len_dic def create_request_wronglength_test_script(self, attribute, length): len_dic = self.request_wronglength_formater(attribute, length) self._TEST_COUNT += 1 file = "service_test_request_wronglength_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_wronglength.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<min>", len_dic["min"]) .replace("<max>", len_dic["max"]).replace("<length_condition>", len_dic["condition"])) self.register_script(file, attribute + " length", "wrong") self.log.info("File created : %s", file) def create_request_malformed_test_script(self, attribute, pattern): length_value = "range(1, 30)" length = pattern.get("wronglength") if length: length = length.strip() if re.match(r'^[0-9]+$', length): length_value = "[%s]" % length elif re.match(r'^:[0-9]+$', length): length_value = "range(1, %d)" % (int(length.replace(":", "")) + 1) elif re.match(r'^[0-9]+:$', length): l = int(length.replace(":", "")) length_value = "range(%d, %d)" % (l, l + 16) elif re.match(r'^[0-9]+:[0-9]+$', length): l = length.split(":") length_value = "range(%s, %d)" % (l[0], int(l[1]) + 1) elif re.match(r'^[0-9,]+$', length): length_value = "[ %s ]" % length self._TEST_COUNT += 1 file = "service_test_request_malformed_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/request_malformed.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: if pattern.get("malformed") is None: self._FAILED_COUNT += 1 pattern["malformed"] = "<malformed_pattern>" self.log.error("Pattern not define of malformed for request attribute : %s", attribute) w.write(r.read().replace("<attribute>", attribute) .replace("<malformed_pattern>", pattern.get("malformed")) .replace("<length_value>", length_value)) self.register_script(file, attribute, "malformed") self.log.info("File created : %s", file) def create_request_invalid_test_script(self, attribute, pattern, data_type): self._TEST_COUNT += 1 file = "service_test_request_invalid_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() if isinstance(pattern, type([])): with open("./templates/request_invalid_str.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: if pattern is None: self._FAILED_COUNT += 1 pattern = "<values>" w.write(r.read().replace("<attribute>", attribute).replace("<values>", str(pattern))) elif pattern is not None and re.match(r'^[0-9:]+$', pattern): value = 0 if re.match(r'^[0-9]+$', pattern): value = float(pattern) - (random.random() * 10 + 1) elif re.match(r'^:[0-9]+$', pattern): value = float(pattern.replace(":", "")) + (random.random() * 10 + 1) elif re.match(r'^[0-9]+:$', pattern): value = float(pattern.replace(":", "")) - (random.random() * 10 + 1) elif re.match(r'^[0-9]+:[0-9]+$', pattern): v = pattern.split(":") value = float(v[0]) - (random.random() * 10 + 1) if data_type == 'int': value = str(round(value)) elif data_type == 'double': value = str(round(value, 2)) else: value = '"' + str(round(value)) + '"' with open("./templates/request_invalid_number.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<value>", value)) else: self._FAILED_COUNT += 1 self.log.error("Pattern not define of invalid for request attribute : %s", attribute) with open("./templates/request_invalid.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "invalid") self.log.info("File created : %s", file) def create_request_unknown_test_script(self): self._TEST_COUNT += 1 file = "service_test_request_unknown_attribute.py" with open("./templates/request_unknown.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read()) self.register_script(file, "attribute", "unknown") self.log.info("File created : %s", file) self._SINGLE_REGRESSION += "\n" def create_response_null_test_script(self, attribute): self._TEST_COUNT += 1 file = "service_test_response_null_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/response_null.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "null") self.log.info("File created : %s", file) def create_response_empty_test_script(self, attribute, is_mandatory): self._TEST_COUNT += 1 file = "service_test_response_empty_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() null_conditon = "" if not is_mandatory: null_conditon = "self._ATTR not in self.response_data['body'] or " with open("./templates/response_empty.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<null_checker>", null_conditon)) self.register_script(file, attribute, "empty") self.log.info("File created : %s", file) def create_response_mismatched_test_script(self, attribute): self._TEST_COUNT += 1 file = "service_test_response_mismatched_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() with open("./templates/response_mismatched.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "mismatched") self.log.info("File created : %s", file) def create_response_wrongtype_test_script(self, attribute, type, is_mandatory): if type == 'str': type = "[type(\"\")]" elif type == 'double': type = "[type(1.00), type(1)]" elif type == '[]' or type == 'array': type = "[type([])]" elif type == '{}' or type == 'object': type = "[type({})]" elif type == 'boolean': type = "[type(True)]" elif type == 'int': type = "[type(1)]" else: print("Type not found :", type) type = "<type>" self._TEST_COUNT += 1 file = "service_test_response_wrongtype_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() null_conditon = "" if not is_mandatory: null_conditon = "self._ATTR not in self.response_data['body'] or " with open("./templates/response_wrongtype.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<type>", type) .replace("<null_checker>", null_conditon)) self.register_script(file, attribute + " data type", "wrong") self.log.info("File created : %s", file) def response_wronglength_formater(self, attribute, length): value = "len(self.response_data['body'][self._ATTR])" if not length: self.log.error("Pattern not define of wronglength for response attribute : %s", attribute) return "<length_condition>" length = length.strip() if re.match(r'^[0-9]+$', length): return value + " == " + length elif re.match(r'^:[0-9]+$', length): length = length.replace(":", "") return value + " <= " + length elif re.match(r'^[0-9]+:$', length): length = length.replace(":", "") return value + " >= " + length elif re.match(r'^[0-9]+:[0-9]+$', length): v_range = length.split(":") return value + " >= " + v_range[0] + " and " + value + " <= " + v_range[1] elif re.match(r'^[0-9,]+$', length): v_nums = list(map(lambda n: int(n), length.split(","))) return value + " in " + str(v_nums) else: self.log.error("Pattern not match of wronglength for response attribute : %s", attribute) return "<length_condition>" def create_response_wronglength_test_script(self, attribute, length, is_mandatory): length_condition = self.response_wronglength_formater(attribute, length) self._TEST_COUNT += 1 file = "service_test_response_wronglength_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() if not is_mandatory: length_condition = "self._ATTR not in self.response_data['body'] or %s" % length_condition with open("./templates/response_wronglength.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute) .replace("<length_condition>", length_condition)) self.register_script(file, attribute + " length", "wrong") self.log.info("File created : %s", file) def create_response_malformed_test_script(self, attribute, pattern, is_mandatory): self._TEST_COUNT += 1 file = "service_test_response_malformed_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() null_conditon = "" with open("./templates/response_malformed.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: if pattern is None: self._FAILED_COUNT += 1 pattern = "<malformed_pattern>" self.log.error("Pattern not define of malformed for response attribute : %s", attribute) if not is_mandatory: null_conditon = "self._ATTR not in self.response_data['body'] or " w.write(r.read().replace("<attribute>", attribute).replace("<malformed_pattern>", pattern) .replace("<null_checker>", null_conditon)) self.register_script(file, attribute, "malformed") self.log.info("File created : %s", file) def create_response_invalid_test_script(self, attribute, pattern, is_mandatory): self._TEST_COUNT += 1 file = "service_test_response_invalid_%s.py" % re.sub('([A-Z]+)', r'_\1', attribute).lower() null_conditon = "" if not is_mandatory: null_conditon = "self._ATTR not in self.response_data['body'] or " if isinstance(pattern, type([])): with open("./templates/response_invalid_str.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute).replace("<values>", str(pattern)) .replace("<null_condition>", null_conditon)) elif pattern is not None and re.match(r'^[0-9:]+$', pattern): value = 0 condition = "float(self.response_data[\"body\"][\"%s\"])" % attribute error_message = "" if re.match(r'^[0-9]+$', pattern): condition = condition + " != " + pattern error_message = pattern elif re.match(r'^:[0-9]+$', pattern): value = pattern.replace(":", "") condition = condition + " > " + value error_message = "greater than " + value elif re.match(r'^[0-9]+:$', pattern): value = pattern.replace(":", "") condition = condition + " < " + value error_message = "less than " + value elif re.match(r'^[0-9]+:[0-9]+$', pattern): value = pattern.split(":") condition = condition + " < " + value[0] + " or " + condition + " > " + value[1] error_message = "less than " + value[0] + " or greater than " + value[1] with open("./templates/response_invalid_number.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute) .replace("<condition>", condition) .replace("<error_message>", error_message) .replace("<null_condition>", null_conditon)) else: self.log.error("Pattern not define of invalid for response attribute : %s", attribute) self._FAILED_COUNT += 1 with open("./templates/response_invalid.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<attribute>", attribute)) self.register_script(file, attribute, "invalid") self.log.info("File created : %s", file) def create_response_unknown_test_script(self, mandatory_attrs, optional_attrs): self._TEST_COUNT += 1 file = "service_test_response_unknown_attribute.py" with open("./templates/response_unknown.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<mandatory_attribute_list>", mandatory_attrs) .replace("<optional_attribute_list>", optional_attrs)) self.register_script(file, "attribute", "unknown") self.log.info("File created : %s", file) def create_response_missing_test_script(self, mandatory_attrs): self._TEST_COUNT += 1 file = "service_test_response_missing_attribute.py" with open("./templates/response_missing.txt", "r") as r: with open(self.directory + "script/" + file, "w") as w: w.write(r.read().replace("<mandatory_attribute_list>", mandatory_attrs)) self.register_script(file, "attribute", "missing") self.log.info("File created : %s", file) def generate_request_test_script(self, data): self.log.debug("Attribute name : %s", data[5]) attr_dic = {"attribute": data[5], "mandatory": False} pattern = {} if len(data) >= 17 and re.match(r"^\{.*\}$", data[16]): pattern = json.loads(data[16]) self.log.debug("Pattern : \n%s", json.dumps(pattern)) if "null" in data[9]: attr_dic['mandatory'] = True self.create_request_null_test_script(data[5]) if "empty" in data[8]: self.create_request_empty_test_script(data[5]) if "wrongtype" in data[7]: self.create_request_wrongtype_test_script(data[5], data[6]) if "wronglength" in data[10]: self.create_request_wronglength_test_script(data[5], pattern.get('wronglength')) if "malformed" in data[11]: self.create_request_malformed_test_script(data[5], pattern) if "invalid" in data[13]: self.create_request_invalid_test_script(data[5], pattern.get('invalid'), data[6]) self._SINGLE_REGRESSION += "\n" return attr_dic def generate_response_test_script(self, data): self.log.debug("Attribute name : %s", data[5]) attr_dic = {"attribute": data[5], "mandatory": False} pattern = {} if len(data) >= 17 and re.match(r"^\{.*\}$", data[16]): pattern = json.loads(data[16]) if "null" in data[9]: attr_dic['mandatory'] = True self.create_response_null_test_script(data[5]) if "empty" in data[8]: self.create_response_empty_test_script(data[5], attr_dic['mandatory']) if "wrongtype" in data[7]: self.create_response_wrongtype_test_script(data[5], data[6], attr_dic['mandatory']) if "wronglength" in data[10]: self.create_response_wronglength_test_script(data[5], pattern.get('wronglength'), attr_dic['mandatory']) if "malformed" in data[11]: self.create_response_malformed_test_script(data[5], pattern.get('malformed'), attr_dic['mandatory']) if "mismatch" in data[12]: self.create_response_mismatched_test_script(data[5]) if "invalid" in data[13]: self.create_response_invalid_test_script(data[5], pattern.get('invalid'), attr_dic['mandatory']) self._SINGLE_REGRESSION += "\n" return attr_dic def create_test_script(self): self.log.info("Start creating request test script") (seq(self.regression_data).filter(lambda x: "request" in x[3]) .map(lambda d: self.generate_request_test_script(d)).to_list()) self.create_request_unknown_test_script() self.log.info("\nStart creating response test script") response_attr_list = seq(self.regression_data).filter(lambda x: "response" in x[3]) \ .map(lambda d: self.generate_response_test_script(d)).to_list() mandatory_res_attrs = str(seq(response_attr_list).filter(lambda d: d['mandatory']) .map(lambda d: d['attribute']).to_list()) optional_res_attrs = str(seq(response_attr_list).filter(lambda d: not d['mandatory']) .map(lambda d: d['attribute']).to_list()) self.create_response_missing_test_script(mandatory_res_attrs) self.create_response_unknown_test_script(mandatory_res_attrs, optional_res_attrs) with open(self.directory + "run_regression_single.sh", "a") as a: a.write(self._SINGLE_REGRESSION) self.log.debug("Update run_regression_single.sh file") self.log.info("Successfully created : (%d/%d)", self._TEST_COUNT - self._FAILED_COUNT, self._TEST_COUNT) self.log.info("Only Template create : (%d/%d)", self._FAILED_COUNT, self._TEST_COUNT) def change_script_permission(self): self.log.debug("Change file mode to 775") os.system('chmod 775 ' + self.directory + 'run_regression*') os.system('chmod 775 ' + self.directory + 'script/*') def run(self): start_time = int(round(time.time() * 1000)) self.set_up() self.load_regression_sheet() self.create_environment() self.create_test_script() self.change_script_permission() self.log.info("") self.log.info("Script execution end at : {0:%Y-%m-%d %H:%M:%S}".format(datetime.now())) self.log.info('Execution Time : %d ms', int(round(time.time() * 1000)) - start_time)
class WatchdogServer(object): def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('watchdog_server', anonymous=True, log_level=rospy.DEBUG) # get roslaunch params # names for self topics self._logname = rospy.get_param('~watchdog_log_name', 'data_saver') self._log_node_name = rospy.get_param('~watchdog_log_node_name', 'watchdog') self._service_name = rospy.get_param('~watchdog_service_name', 'watchdog') # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # start logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("data_saver server init") self.dead_flag = False # service self._service = rospy.Service(self._service_name, Watchdog, self._handle_request) self._logger.debug("end of init, go to loop") self._loop() def _loop(self): try: while not rospy.core.is_shutdown() and not self.dead_flag: rospy.sleep(1) except KeyboardInterrupt: self._logger.warning("keyboard interrupt, shutting down") rospy.core.signal_shutdown('keyboard interrupt') def _handle_request(self, req): self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("we got request: {}".format(req)) if not req.command: # if we got empty string resp = self._error_response + 'empty_command' return resp elif req.command == 'shutdown': self.dead_flag = True resp = self._success_response + "self.dead_flag is {}".format( self.dead_flag) return resp # elif req.command == 'free_lock': # self._data_write_lock.release() # resp = self._success_response + "the lock has been released" # # lock will be acqured forever if we dont call free_lock command # return resp else: resp = self._error_response + 'unknown command' return resp
def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " self._logsep = ";" # start node rospy.init_node('control_server', log_level=rospy.DEBUG) # get roslaunch params # names for self topics self._logname = rospy.get_param('~control_log_name', 'control_system') self._log_node_name = rospy.get_param('~control_log_node_name', 'control_log_node') self._service_name = rospy.get_param('~control_service_name', 'control_system') self._raw_co2_pub_name = rospy.get_param('~control_raw_co2_pub_name', 'raw_co2_pub') self._operator_pub_name = rospy.get_param('~control_operator_pub_name', 'operator_pub') # devices services names # self._relay_service_name = rospy.get_param('~control_relay_service_name', 'relay_device') self._led_service_name = rospy.get_param('~control_led_service_name', 'led_device') self._sba5_service_name = rospy.get_param('~control_sba5_service_name', 'sba5_device') # we are using sba5 by default co2 sensor self._exp_service_name = rospy.get_param('~control_exp_service_name', 'exp_system') # experiment params self._LSM_exp_red = int(rospy.get_param('~control_LSM_exp_red', 250)) # mA old = 142 # here we need 306 mA, but our devices can work only with Ir < 250 mA self._LSM_exp_white = int(rospy.get_param('~control_LSM_exp_white', 114)) # mA old = 76 self._LSM_control_red = int(rospy.get_param('~control_LSM_control_red', 237)) # mA self._LSM_control_white = int(rospy.get_param('~control_LSM_control_white', 122)) # mA self._full_experiment_loop_time = int(rospy.get_param('~control_full_experiment_loop_time', 900.0)) # sec self._isolated_measure_time = int(rospy.get_param('~control_isolated_measure_time', 480.0)) # sec self._n2_calibration_time = int(rospy.get_param('~control_n2_calibration_time', 90.0)) # depends on self._air_valves_open_time = int(rospy.get_param('~control_air_valves_open_time', 15.0)) # sec self._co2_measure_time = int(rospy.get_param('~control_co2_measure_time', 1.0)) # sec self._ventilation_time = self._full_experiment_loop_time - self._isolated_measure_time - \ self._n2_calibration_time - self._air_valves_open_time # flags of current control regime #self._mode = rospy.get_param('~control_start_mode', 'life_support') self._mode = rospy.get_param('~control_start_mode', 'experiment') self._mode_flag = 'co2' # mode can be : # experiment # life_support # test # search_experiment # ... #self._sba5_measure_allowed = False # by default # self._at_work = rospy.get_param('~control_start_at_work', True) # start node and loop immediately after launch or # rpi_gpio device stub # TODO add this params to .launch file self._gpio_handler = RelayHandler( n2_valve=5, air_pump_1=6, air_pump_2=12, cooler_1=13, air_valve_1=19, air_valve_2=26, ndir_pump=16, empty=20 ) # params of search experiment self._co2_search_time_start = 0 self._co2_search_time_stop = 0 self._current_search_point_id = 0 self._led_delay_time = 0 # for setting up leds in selected time in vent period # by default: self._current_red = self._LSM_exp_red self._current_white = self._LSM_exp_white self._current_search_is_finished = 1 # by default it is not finished # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # create data topic publisher self._co2_pub = rospy.Publisher(self._raw_co2_pub_name, Temperature, queue_size=10) # create operator-topic to send important msgs to operator # self._operator_pub = rospy.Publisher(self._operator_pub_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.info("control_server init") # create Locks self._co2_measure_allowed_event = Event() # serial errors handling # self._last_serial_error_time = 0 # self._delay_after_serial_error = 300 self._logger.info("control_server service creation") # service self._service = rospy.Service(self._service_name, ControlSystem, self._handle_request) self._logger.info("check if we are in experiment mode") # self._serial_error_counter = 0 # self._serial_error_max = 5 # subscribe to rosout_agg to know if there is fatal error and we need to do smth # self._system_log_sub = rospy.Subscriber( # name='/rosout_agg', data_class=Log, # callback=self._log_callback, # queue_size=5) # reinit sba5 if self._mode == 'full_experiment' or self._mode == 'full_experiment': self._logger.info("update sba5 and allow sba5 measures") self._update_sba5_params() # allow measures of sba5 self._co2_measure_allowed_event.set() # self._default_red = 142 # self._default_white = 76 self._current_red = self._LSM_exp_red self._current_white = self._LSM_exp_white # create timers for async periodic tasks using internal ros mechanics # Create a ROS Timer for reading data rospy.Timer(rospy.Duration(1), self._get_sba5_measure) # 1 Hz # create ros timer for main loop if self._mode == 'life_support_dual': self._dual_led_service_name = rospy.get_param('~control_dual_led_service_name') self._current_red = self._LSM_control_red self._current_white = self._LSM_control_white if self._mode == 'k30_experiment': self._k30_service_name = rospy.get_param('~k30_service_name', 'k30_device') self._k30_calibration_time = int(rospy.get_param('~k30_calibration_time', '25')) self._current_red = self._LSM_control_red self._current_white = self._LSM_control_white rospy.Timer(rospy.Duration(2), self._get_k30_data) # 0.5 Hz self._logger.info("go to loop") self._loop()
import logging import posixpath from custom_logger import CustomLogger from extensions_paths import EXAMPLES from file_system_util import CreateURLsFromPaths from future import Future from render_servlet import RenderServlet from special_paths import SITE_VERIFICATION_FILE from timer import Timer _SUPPORTED_TARGETS = { 'examples': (EXAMPLES, 'extensions/examples'), } _log = CustomLogger('render_refresher') class _SingletonRenderServletDelegate(RenderServlet.Delegate): def __init__(self, server_instance): self._server_instance = server_instance def CreateServerInstance(self): return self._server_instance def _RequestEachItem(title, items, request_callback): '''Runs a task |request_callback| named |title| for each item in |items|. |request_callback| must take an item and return a servlet response. Returns true if every item was successfully run, false if any return a non-200 response or raise an exception.
# -*- coding: utf-8 -*- from config import Config from random import randint from custom_logger import CustomLogger log = CustomLogger(__name__) class AutoExploration: # - key: str(path) # - value: list<int> items_to_explore = {} explored_items_cnt = 0 @classmethod def add_items_current_menu(cls, current_path, current_directory): # If current_path alredy in items_to_explore, nothing to do if str(current_path) in cls.items_to_explore: log.info( 'Menu already seen, do not add its items in stack to explore') return # Is max depth reached nothing to do if len(current_path) >= Config.get('max_depth') and \ Config.get('max_depth') != -1: log.info( 'Max depth reached, do not add menu items in stack to explore')
# -*- coding: utf-8 -*- from config import Config from custom_logger import CustomLogger from runtime_error import RuntimeErrorCQ log_ = CustomLogger(__name__) if Config.get("disable_xbmc_mock_log"): log_.set_log_level("ERROR") LOGDEBUG = 0 LOGINFO = 1 LOGNOTICE = 2 LOGWARNING = 3 LOGERROR = 4 LOGSEVERE = 5 LOGFATAL = 6 LOGNONE = 7 levelno_dict = { 0: "debug", 1: "info", 2: "notice", 3: "warning", 4: "error", 5: "severe", 6: "fatal", 7: "none", }
class K30DeviceServer: """ """ def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('k30_device_server', log_level=rospy.DEBUG) self._logname = rospy.get_param('~k30_log_name', 'k30') self._log_node_name = rospy.get_param('~k30_log_node_name', 'k30_log_node') self._port = rospy.get_param( '~k30_port', '/dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_' 'UART_Bridge_Controller_0003-if00-port0') self._baudrate = rospy.get_param('~k30_baudrate', 9600) self._timeout = rospy.get_param('~k30_timeout', 1) self._service_name = rospy.get_param('~k30_service_name', 'k30_device') self._calibration_time = rospy.get_param('~k30_calibration_time', 25) self._lost_data_marker = rospy.get_param('~k30_lost_data_marker', -65536) # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.debug("k30_device_server init") # device self._device = K30(devname=self._port, baudrate=self._baudrate, timeout=self._timeout) # service self._service = rospy.Service(self._service_name, K30Device, self.handle_request) self._loop() def _loop(self): rospy.spin() def handle_request(self, req): self._logger.debug("we got request: {}".format(req)) if not req.command: # if we got empty string resp = self._error_response + 'empty_command' return resp elif req.command == 'get_info': try: ans = self._device.get_info() resp = self._success_response + ans return resp except Exception as e: resp = self._error_response + e.args[0] return resp elif req.command == 'get_data': try: co2, ans = self._device.get_co2() # resp = self._success_response + ans return str(co2) except Exception as e: # resp = self._error_response + e.args[0] co2 = self._lost_data_marker return str(co2) elif req.command == 'recalibrate': try: ans = self._device.recalibrate(ctime=5) resp = self._success_response + ans return resp except Exception as e: resp = self._error_response + e.args[0] return resp
class ExpSystemServer(object): """ ros wrapper for handling exp points """ def __init__(self): # hardcoded constants self._success_response = "success: " self._error_response = "error: " # start node rospy.init_node('exp_server', log_level=rospy.DEBUG) # get roslaunch params # names for self topics self._logname = rospy.get_param('~exp_log_name', 'exp_system') self._log_node_name = rospy.get_param('~exp_log_node_name', 'exp_log_node') self._service_name = rospy.get_param('~exp_service_name', 'exp_system') self._exp_pub_name = rospy.get_param('~exp_pub_name', 'exp_pub') # mode of work # no matter which method is chosen we will parse params for all of them from .launch file self._mode = rospy.get_param('~exp_mode_name', 'table') # self._stand_type = rospy.get_param('~exp_lamp_type', 'exp') # self._height_of_lamps = rospy.get_param('~exp_lamp_h', 25) self._search_table = rospy.get_param('exp_search_table') self._exp_description = rospy.get_param('mysql_data_saver_experiment_description') self._exp_id = self._exp_description["experiment_number"] self._db_params=rospy.get_param('mysql_db_params') self._db_name = "experiment" + str(self._exp_id) # self._exp_config_path = rospy.get_param('~exp_config_path', 'test.xml') # todo add here parsing params of gradient search and other smart methods # create log topic publisher self._log_pub = rospy.Publisher(self._log_node_name, String, queue_size=10) # logger self._logger = CustomLogger(name=self._logname, logpub=self._log_pub) self._logger.info("exp_server init") # create exp_db if it is not exists now self._create_exp_db() # create search method handler object # it must not contain any ros api, because i hate ros if self._mode == 'table': self._search_handler = TableSearchHandler( self._search_table, self._db_params, self._exp_description, self._logger) elif self._mode == 'ordered_table': self._search_handler = OrderedTableSearchHandler( self._search_table, self._db_params, self._exp_description, self._logger) # todo add other search modes # now we have to calculate current search point # self._search_handler will calculate and store it # until we get reqv from control system self._search_handler.calculate_next_point() # service self._service = rospy.Service(self._service_name, ExpSystem, self._handle_request) self._loop() def _loop(self): rospy.spin() def _handle_request(self, req): # self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") # self._logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++") self._logger.debug("we got request: {}".format(req)) if not req.command: # if we got empty string resp = self._error_response + 'empty_command' return resp elif req.command == 'set_point_data': # TODO fix try: self._set_point_data(req.point_id, req.start_time, req.end_time) resp = ExpSystemResponse() resp.response = self._success_response self._logger.info("we got set_point_data reqv. data from control:") start_time_ = datetime.datetime.fromtimestamp( req.start_time.to_sec()).strftime('%Y_%m_%d %H:%M:%S') end_time_ = datetime.datetime.fromtimestamp( req.end_time.to_sec()).strftime('%Y_%m_%d %H:%M:%S') self._logger.info("t_start={} t_stop={}".format( start_time_, end_time_ )) return resp except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) resp = ExpSystemResponse() resp.response = self._error_response + e.args[0] return resp elif req.command == 'get_current_point': # request from control_node to get current point_id try: mode, p_id, red, white, led_delay = self._get_current_point() resp = ExpSystemResponse() resp.response = mode resp.point_id = p_id resp.red = red resp.white = white resp.led_delay = led_delay self._logger.info("we got get_current_point reqv from control:mode={} p_id={} red={} white={} delay={}".format( mode, p_id, red, white, led_delay )) return resp except Exception as e: exc_info = sys.exc_info() err_list = traceback.format_exception(*exc_info) self._logger.error("Service call failed: {}".format(err_list)) resp = ExpSystemResponse() resp.response = self._error_response + e.args[0] # resp = self._error_response + e.args[0] return resp else: resp = ExpSystemResponse() resp.response = self._error_response + 'unknown command' return resp # def _get_last_search_point_from_db(self): # """ # sends request to mariadb to get last row in exp_data table , corresponds to exp_id # WE NEED ALL SEARCH POINTS FROM THIS EXP because they were used randomly # and we dont know how exactly # """ # pass # def _load_done_points_from_db(self): # # search method must do it by itself, because it depends very much # # anyway use : to find points calculated today # # SELECT * FROM raw_data where dayofmonth(time)=dayofmonth(now()); # # # # pass # def _get_point_from_db_by_id(self, point_id): # pass def _set_point_data(self, p_id, start_time, stop_time): # this command handles incoming msg with data about experiment # we want to get from this msg # uint32 point_id # time start_time # time end_time # and put that to correspond row in mysql db # # NOTE we have to use update statement # https://oracleplsql.ru/update-mariadb.html # update raw_data set data_id=5754714, exp_id=200, time='2020-10-19 23:23:06', # data=500 where data_id=5754714; # self._logger.debug("type(start_time): {}".format(type(start_time))) self._search_handler.update_point_data(p_id, start_time, stop_time) def _get_current_point(self): return self._search_handler.calculate_next_point() # return (random.randint(0,100), 120, 55) def _create_exp_db(self): con = pymysql.connect(host=self._db_params["host"], user=self._db_params["user"], password=self._db_params["password"], # db='experiment', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cur = con.cursor() cur.execute("use {}".format(self._db_name)) cur.execute('create table if not exists exp_data' '(point_id bigint unsigned primary key not null auto_increment,' 'step_id int unsigned,' # id of search step whom this point belongs 'exp_id SMALLINT unsigned,' 'red int,' 'white int,' 'start_time timestamp,' # start time of dataset 'end_time timestamp,' # end time of dataset 'number_of_data_points int unsigned,' # num of points in set 'point_of_calc double,' # point X where we calculate value of derivative 'f_val double,' # value of derivative dCO2/dt(X) on this dataset 'q_val double,' 'is_finished SMALLINT unsigned ' # flag to mark if point was finished correctly and how ')' ) # finished SMALLINT unsigned' # 0 - finished correctly # 1 - not finished # cur.execute('describe exp_data') self._logger.debug(cur.fetchall()) cur.execute('commit') self._logger.debug(cur.fetchall())
from branch_utility import BranchUtility from compiled_file_system import CompiledFileSystem from custom_logger import CustomLogger from data_source_registry import CreateDataSources from environment import GetAppVersion from gcs_file_system_provider import CloudStorageFileSystemProvider from github_file_system_provider import GithubFileSystemProvider from host_file_system_provider import HostFileSystemProvider from object_store_creator import ObjectStoreCreator from refresh_tracker import RefreshTracker from render_refresher import RenderRefresher from server_instance import ServerInstance from servlet import Servlet, Request, Response from timer import Timer _log = CustomLogger('cron') class CronServlet(Servlet): '''Servlet which runs a cron job. ''' def __init__(self, request, delegate_for_test=None): Servlet.__init__(self, request) self._delegate = delegate_for_test or CronServlet.Delegate() class Delegate(object): '''CronServlet's runtime dependencies. Override for testing. ''' def CreateBranchUtility(self, object_store_creator): return BranchUtility.Create(object_store_creator)