def apiMachineLearningRaw(): try: param = request.args.get('param') variables.log2("routes", '/api/machine-learning/raw ' + param) param = json.loads(param) if param["node"] is not None: (data, info) = variables.machine_learning.get_raw_data(param["node"]) else: return json.dumps({"result": Constants.RESULT_FAIL}) if param["node"] != -1: info2 = variables.machine_learning.get_info(param["node"]) else: info2 = None # print(data) return json.dumps({ "data": data, "info": info, "extra": info2 }, default=default_json) except: variables.print_exception("[routes][/api/machine-learning/raw]") return json.dumps({"result": Constants.RESULT_FAIL})
def apiDatabaseSensors(): try: param = request.args.get('param') variables.log2("routes", '/api/database/sensors ' + param) param = json.loads(param) # param['sid'] # params['n'] # print(param) if param['id'] != 0 and variables.app_config["app"]["db_access"]: cursor = variables.cnxn.cursor() cursor.execute( "SELECT * FROM (SELECT TOP " + str(param['n']) + " * FROM SensorData_Flow WHERE Pipe_ID = ? ORDER BY Timestamp DESC) a ORDER BY Timestamp ASC", param['id']) # cursor.execute("SELECT TOP 100 * FROM SensorData_Flow WHERE Pipe_ID = ? ORDER BY Timestamp DESC",param['id']) rows = cursor.fetchall() columns = [column[0] for column in cursor.description] results = [] for row in rows: results.append(dict(zip(columns, row))) # print(results) return json.dumps(results, default=default_json) else: result = Constants.RESULT_FAIL return json.dumps({"result": result}) except: variables.print_exception("[routes][/api/database/sensors]") result = Constants.RESULT_FAIL return json.dumps({"result": result})
def add_new_hil_device(self, ip, port=9001, dev = None): """ add new hil device and test runner thread :param ip: ip """ variables.log2("[add_new_hil_device]", str(self.hil_device_index) + ", " + ip + ":" + str(port)) self.hil = HIL_socket(ip, port) new_device_data = copy.deepcopy(Constants.hil_object_data_model) new_device_data["id"] = dev["info"]["id"] new_device_data["type"] = dev["info"]["type"] new_device_data["ip"] = ip new_device_data["port"] = port new_device_data["index"] = self.hil_device_index if dev is not None: if "info" in dev: new_device_data["info"] = dev["info"] variables.device_data.append(new_device_data) if dev["source"] == 0: new_hil_object = { "function": { "socket": self.hil, }, "data": new_device_data } self.hil_object_array.append(new_hil_object) self.tr.append(TestRunner(new_hil_object)) if self.mode == 'multi': self.tr[self.hil_device_index].start() self.hil_device_index += 1
def apiMachineLearningClustersRangeFirstStage(): route = '/api/machine-learning/clusters/range/first-stage' try: param = request.args.get('param') variables.log2("routes", route) param = json.loads(param) res = None # n_clusters = 3 n_clusters = None if "range" in param and param["range"] is not None: r = param["range"] else: r = list(range(0, param["new_node"])) print(r) # get clusters from all nodes res = variables.machine_learning.run_clustering_on_node_range( r, n_clusters) (data, info) = variables.machine_learning.get_display_data( res, global_scale=param["global_scale"]) info2 = None return json.dumps( { "data": data, "info": info, "extra": info2, "params": param }, default=default_json) except: variables.print_exception(route) result = Constants.RESULT_FAIL return json.dumps({"result": result})
def apiMachineLearningClustersNodeFirstStage(): route = '/api/machine-learning/clusters/node/first-stage' try: param = request.args.get('param') variables.log2("routes", route) param = json.loads(param) res = None # n_clusters = 3 n_clusters = None res = variables.machine_learning.run_clustering_on_node_id( param["node"], n_clusters) (data, info) = variables.machine_learning.get_display_data( res, global_scale=param["global_scale"]) info2 = variables.machine_learning.get_info(param["node"]) return json.dumps( { "data": data, "info": info, "extra": info2, "params": param }, default=default_json) except: variables.print_exception(route) result = Constants.RESULT_FAIL return json.dumps({"result": result})
def request_hex(self, service_def): if self.debug_log: variables.log2(self.__class__.__name__, "get data") format = "!BB" tx_length = calcsize(format) - 1 try: tx_message = pack(format, tx_length, self.servicesDict[service_def]) # tx_message = pack(self.txMsgStructuresDict['DATA'], tx_length, self.servicesDict['SERVICE_DATA']) except: if self.debug_log: variables.print_exception(self.__class__.__name__) return (-1, None) self.send(tx_message) rx_message = self.receive(buffSize) try: rx_data = [rm for rm in rx_message] # get error code and data return (rx_data[2], rx_data) except: if self.debug_log: variables.print_exception(self.__class__.__name__) return (-1, None)
def add_new_hil_device(self, ip): """ add new hil device and test runner thread :param ip: ip """ variables.log2("[add_new_hil_device]", ip) self.hil = HIL_socket() new_hil_object = { "function": { "socket": self.hil, }, "data": copy.deepcopy(Constants.hil_object_data_model) } new_hil_object["data"]["def"]["ip"] = ip new_hil_object["data"]["def"]["index"] = self.hil_device_index self.hil_object_array.append(new_hil_object) variables.results["availableList"].append(ip) variables.deviceList.append(new_hil_object["data"]) self.tr.append( TestRunner(self.hil_object_array[self.hil_device_index], self.hil_object_array)) if self.mode == 'multi': self.tr[self.hil_device_index].start() self.load_test_spec(None, self.hil_device_index) self.hil_device_index += 1
def run_async(self): self.t = time.time() self.update_test_timer() if not self.hil_socket.is_connected(): self.hil_socket.newSocket() self.hil_socket.connect() if self.t - self.t0 >= self.TS: # cyclic msg, request data from devices self.t0 = self.t self.msg_out = "100" if self.hil_def["data"]["info"] is not None: if self.hil_def["data"]["info"]["type"] == Constants.NODE_FLOW_SENSOR: # flow measurement node self.msg_out = "100,3" self.flag_send_data = True if self.flag_send_data: self.flag_send_data = False t_req = time.time() # variables.log2(self.__class__.__name__, 'sending to "%s" "%s"' % (self.hil_def["data"]["ip"], self.msg_out)) res = self.hil_socket.request(self.msg_out) t_req = time.time() - t_req if self.log: variables.log2(self.__class__.__name__, 'recv from "%s" "%s", time ms "%d"' % (self.hil_def["data"]["ip"], res[1], t_req*1000)) if res[0] == -1: self.hil_socket.reset_connection() else: self.get_response_data(res[1])
def connect(self): """ :param IP: :param port: :return: """ server_address = (self.ip, self.port) # if self.debug_log: variables.log2(self.__class__.__name__, 'starting up on %s port %s' % server_address) try: self.Socket.connect(server_address) self.isconn = True return 0 except: if self.debug_log: variables.log2( self.__class__.__name__, 'Error starting connection on %s port %s' % server_address) variables.print_exception(self.__class__.__name__) self.isconn = False return 1
def apiMachineLearningRunClusteringAssignmentPartialSample(): route = '/api/machine-learning/clusters/node/partial-sample' try: param = request.args.get('param') variables.log2("routes", route) param = json.loads(param) res = None # res = variables.machine_learning.update_node_clusters_with_partial_sample(param["node"], param["sample"]) # (data, info) = variables.machine_learning.get_display_data(res, global_scale=param["global_scale"]) # info2 = variables.machine_learning.get_info(param["node"]) res = variables.machine_learning.run_clustering_on_partial_sample( param["node"], param["sample"]) (data, info) = variables.machine_learning.get_display_data( res, global_scale=param["global_scale"]) info2 = variables.machine_learning.get_info(param["node"]) # # print(res) # data=res[0] # info=res[1] return json.dumps( { "data": data, "info": info, "extra": info2, "params": param }, default=default_json) except: variables.print_exception(route) result = Constants.RESULT_FAIL return json.dumps({"result": result})
def run(self): variables.log2("[TestRunner]", "started") while True: time.sleep(Constants.LOOP_DELAY) self.run_async() if self.stop_request(): break variables.log2(self.__class__.__name__, "stopped")
def apiNetworkGraph(): try: variables.log2("routes", '/api/network/graph ') data = json.dumps(variables.network.get_data_format()) return json.dumps({"data": data}, default=default_json) except: variables.print_exception("[routes][/api/network/graph]") result = Constants.RESULT_FAIL return json.dumps({"result": result})
def apiMachineLearningInfo(): try: variables.log2("routes", '/api/machine-learning/info') info = variables.machine_learning.get_info() return json.dumps({"info": info}, default=default_json) except: variables.print_exception("[routes][/api/machine-learning/info]") result = Constants.RESULT_FAIL return json.dumps({"result": result})
def apiMachineLearningInit(): try: # param = request.args.get('param') variables.log2("routes", '/api/machine-learning/init ') # param = json.loads(param) variables.machine_learning.init() return json.dumps({"result": Constants.RESULT_FAIL}) except: variables.print_exception("[routes][/api/machine-learning/init]") return json.dumps({"result": Constants.RESULT_FAIL})
def apiMachineLearningClustersNodeSecondStage(): route = '/api/machine-learning/clusters/range/second-stage' try: param = request.args.get('param') variables.log2("routes", route) param = json.loads(param) res = None # n_clusters = 3 n_clusters = None n_clusters_final = 4 # get clusters from clusters from all nodes # this will recalculate all clusters and reassign nodes if "range" in param and param["range"] is not None: r = param["range"] else: if "new_node" in param and param["new_node"] is not None: r = list(range(0, param["new_node"])) else: r = None print(r) try: res = variables.machine_learning.run_dual_clustering_on_node_range( r, n_clusters, n_clusters_final) except: # different number of final clusters variables.print_exception(route) variables.machine_learning.init() res = variables.machine_learning.run_dual_clustering_on_node_range( r, n_clusters, n_clusters_final) # get clusters from clusters from all nodes # this will recalculate all clusters and reassign nodes if "assign" in param and param["assign"]: process_data() (data, info) = variables.machine_learning.get_display_data( res, global_scale=param["global_scale"]) info2 = None return json.dumps( { "data": data, "info": info, "extra": info2, "params": param }, default=default_json) except: variables.print_exception(route) result = Constants.RESULT_FAIL return json.dumps({"result": result})
def run(self): """ check for user input check status of test runners """ variables.log2(self.__class__.__name__, "started") while True: time.sleep(Constants.LOOP_DELAY) self.t = time.time() try: if self.t - self.t0 >= self.TS: self.t0 = self.t if variables.app_flags['log'] == True: variables.log_sensor_data() if variables.app_config["app"]["db_access"] and variables.app_config["app"]["db_logging"]: # database log # logging is done synchronously with latest data # even if data from sensors are slightly delayed # if a sensor is disconnected, the last value will not be # written in the database (if the timeout expires) if ((self.t - self.t0_db) > self.TS_DB): self.t0_db = self.t variables.log2(self.__class__.__name__, "save to db") for s in variables.sensor_data: # only save new data # do not save the same data multiple times in the database if 'recent' in s: if s['recent']: s['recent'] = False if variables.cnxn is not None: cursor = variables.cnxn.cursor() tm = datetime.datetime.now() cursor.execute( "insert into SensorData_Flow(Timestamp, Value, Pipe_ID) values (?, ?, ?)", tm, s['value2'], s['id']) variables.cnxn.commit() s['recent'] = False # manage runners for (i, hil_def) in enumerate(self.hil_object_array): if self.mode == 'async': self.tr[i].run_async() except: variables.print_exception(self.__class__.__name__) continue
def close(self): """ :return: """ if self.debug_log: variables.log2(self.__class__.__name__, "closing socket") try: self.Socket.shutdown(socket.SHUT_RDWR) self.Socket.close() return 0 except: if self.debug_log: variables.print_exception(self.__class__.__name__) return 1
def send(self, message): """ :param message: :return: """ if self.debug_log: # variables.log2(self.__class__.__name__, 'sending to "%s" "%s"' % (self.ip, message)) # variables.log2(self.__class__.__name__, 'sending (parsed) to "%s" "%s"' % (self.ip, "".join("%s " % ("0x%0.2X" % tup) for tup in message))) pass try: self.Socket.sendall(message) return 0 except: if self.debug_log: variables.log2(self.__class__.__name__, 'Error sending "%s"' % message) variables.print_exception(self.__class__.__name__) return 1
def run_test_file_on_dev_id(self, id): """ Start the test (signal for test runner thread) for selected hil device by id :param id: the ID of the selected hil device, None for starting all tests """ variables.log2(self.run_test_file_on_dev_id.__name__, '') variables.initResultList() variables.logModule.init() if id is None: # run on all devices for i in range(len(self.hil_object_array)): if i == 0: self.stop_test_exec_on_dev_id(i) else: self.tr[i].start_test() else: self.tr[id].start_test() self.flag_operation_in_progress = 1
def DatabaseManagerProcess(qDatabaseIn,qDatabaseOut,qDebug1,dbfile): stop_flag=0 variables.log2(self.__class__.__name__, "running") while True: time.sleep(0.1) if stop_flag: break if not qDatabaseIn.empty(): try: data = qDatabaseIn.get(False) requestId = data[0] if requestId == -1: stop_flag=1 break sqlstr = data[1] params = data[2] variables.log2(self.__class__.__name__, requestId + " - " + str(sqlstr) + ' - ' + str(params)) # conn = sqlite3.connect(configuration.database_path, timeout=10.0) conn = sqlite3.connect(dbfile, timeout=variables.db_timeout) conn.row_factory = sqlite3.Row # This enables column access by name: row['column_name'] curs = conn.cursor() curs.execute(sqlstr,params) # commit the changes data = curs.fetchall() if len(data) == 0: data = False else: data = json.dumps([dict(ix) for ix in data]) #CREATE JSON qDatabaseOut.put((requestId,data)) variables.log2(self.__class__.__name__, "commit") conn.commit() conn.close() except: variables.log2(self.__class__.__name__, traceback.format_exc()) continue
def run(self): """ check for user input check status of test runners """ variables.log2(self.__class__.__name__, "started") while True: time.sleep(variables.LOOP_DELAY) try: # check user input if self.flag_operation_in_progress == 0: if self.flag_run_ip_scan: # blocking self.flag_run_ip_scan = False variables.log2('', "detected cmd: run ip scan") self.flag_operation_in_progress = 2 variables.results[ "inProgress"] = self.flag_operation_in_progress self.scan_network_for_devices(self.gateway, self.port) self.flag_operation_in_progress = 0 if self.flag_run_test_action_manual: self.flag_run_test_action_manual = False variables.log2('', "detected cmd: run test action manual") self.tr[self.test_device_manual].run_test_action( self.test_action_name_manual, exec_flag=True, manual=True, params_def=self.test_params_manual) # check task status devices_running = 0 for (i, hil_def) in enumerate(self.hil_object_array): if self.mode == 'async': self.tr[i].run_async() if hil_def["data"]["status"]["runTestActionsInProgress"]: devices_running += 1 if (devices_running == 0) and not self.flag_run_ip_scan: self.flag_operation_in_progress = 0 if self.flag_stop_test_aux: self.flag_stop_test_aux = False variables.results[ "inProgress"] = self.flag_operation_in_progress variables.results["devicesRunning"] = devices_running except: traceback.print_exc() continue
def handle_post_data(jsondata): variables.log2("socketio - post data", json.dumps(jsondata)) if jsondata['type'] == 'dev': if jsondata['id'] == 1: # set pump cmd variables.test_manager.set_pump(jsondata['value']) elif jsondata['type'] == 'app': if jsondata['id'] == 1: # change flow ref variables.app_flags['ref'] = jsondata['value'] elif jsondata['id'] == 10: # set log flag try: mode = int(jsondata['value']) except: mode = jsondata['value'] if mode == 1: if not variables.app_flags['log']: variables.log2("socketio - post data", "start log") variables.new_log() variables.app_flags['log'] = True else: variables.log2("socketio - post data", "stop log") variables.app_flags['log'] = False elif jsondata['id'] == 20: # change control mode try: mode = int(jsondata['value']) except: mode = jsondata['value'] variables.app_flags['mode'] = mode variables.app_aux_flags['spab_index'] = 0 if variables.app_flags['mode'] not in [1, 5]: variables.app_flags['integral'] = 0 elif jsondata['id'] == 21: # change controller model variables.app_flags['controller_id'] = jsondata['value'] elif jsondata['id'] == 30: # supervisor try: mode = int(jsondata['value']) except: mode = jsondata['value'] variables.app_flags['supervisor'] = mode
def run(self): variables.log2(self.__class__.__name__, "running") t0 = time.time() t0_spab = t0 t0_ramp = t0 t0_step = t0 t0_control = t0 controller_type = 1 # if appVariables.appConfig['controller_class'] == 'pi': # controller_type = 1 # # elif appVariables.appConfig['controller_class'] == 'pid': # controller_type=2 Kp = 0 Ki = 0 Kd = 0 Tf = 0 ctrl_id_prev = -1 yk = 0 ek = 0 while True: time.sleep(Constants.LOOP_DELAY) t1 = time.time() if 'value2' in variables.sensor_data[variables.app_config["params"] ["y_index"]]: variables.app_flags['yk'] = variables.sensor_data[ variables.app_config["params"]["y_index"]]['value2'] variables.app_flags['ek'] = variables.app_flags[ 'ref'] - variables.app_flags['yk'] yk = variables.app_flags['yk'] ek = variables.app_flags['ek'] uk = variables.app_flags["pump"] if (t1 - t0_control ) >= variables.app_config["params"]["ts_control"]: t0_control = t1 # multi model control evaluation best_model_id = 0 min_err = 100000 Ts = variables.app_config["params"]["ts_control"] # calculate model outputs sum_error = 0 sum_error_norm = 0 uk_multi = 0 for i in range(len(variables.app_config['models'])): model = variables.app_config['models'][i] yk1_m = variables.app_flags["models"][i]['yk'] # the characteristic has insensitive zone # uk_model = uk - model['u_min'] uk_model = uk if uk_model < 0: uk_model = 0 yk_m = yk1_m * model['den'][1] + uk_model * model['num'][0] variables.app_flags["models"][i]['yk'] = yk_m ek_m = yk - yk_m variables.app_flags["models"][i]['ek'] = ek_m if abs(ek_m) < min_err: min_err = abs(ek_m) best_model_id = i if min_err != 0: for i in range(len(variables.app_config['models'])): model_ek = variables.app_flags["models"][i]['ek'] if model_ek == 0: model_ek = 0.01 variables.app_flags["models"][i]['ek_norm'] = 1 / ( abs(model_ek) / min_err) sum_error_norm += abs( variables.app_flags["models"][i]['ek_norm']) # calculate controller outputs for i in range(len(variables.app_config['controllers'])): controller_data = variables.app_config['controllers'][i] Kp = variables.app_config['controllers'][i]['kp'] Ki = variables.app_config['controllers'][i]['ki'] integral = variables.app_flags['controllers'][i][ 'integral'] integral += ek * Ts * Ki if (integral > 255): integral = 255 if (integral < 0): integral = 0 variables.app_flags['controllers'][i][ 'uk'] = ek * Kp + integral variables.app_flags['controllers'][i][ 'integral'] = integral if sum_error_norm != 0: for i in range(len(variables.app_config['controllers'])): variables.app_flags['controllers'][i][ 'a'] = variables.app_flags["models"][i][ 'ek_norm'] / sum_error_norm # blend controller commands for smooth switching between controllers uk_multi += variables.app_flags['controllers'][i][ 'a'] * variables.app_flags['controllers'][i]['uk'] # auto modes # check modes if variables.app_flags["mode"] == 1 or variables.app_flags[ "mode"] == 5: # auto mode variables.app_flags['control_time'] = t1 if variables.app_flags["mode"] == 5: if (t1 - t0_step ) >= variables.app_config["params"]["ts_step"]: t0_step = t1 if (variables.app_flags["spab_index"] < len( variables.app_config['ref_step_sequence']) ): variables.app_flags[ "ref"] = variables.app_config[ 'ref_step_sequence'][ variables.app_flags["spab_index"]] variables.app_flags["spab_index"] += 1 ctrl_id = variables.app_flags['controller_id'] if variables.app_flags['supervisor'] == 1: # multi model Ts = variables.app_config["params"]["ts_control"] # show best model with corresponding controller output variables.app_flags['controller_id'] = best_model_id uk = uk_multi # uk = appVariables.appFlags['controllers'][appVariables.appFlags['controller_id']]['uk'] else: if controller_type == 1: if ctrl_id != ctrl_id_prev: Kp = variables.app_config['controllers'][ ctrl_id]['kp'] Ki = variables.app_config['controllers'][ ctrl_id]['ki'] variables.app_flags['Kp'] = Kp variables.app_flags['Ki'] = Ki variables.app_flags['Kd'] = 0 variables.app_flags['Tf'] = 0 variables.app_flags['integral'] += ek * Ts * Ki if (variables.app_flags['integral'] > 255): variables.app_flags['integral'] = 255 if (variables.app_flags['integral'] < 0): variables.app_flags['integral'] = 0 uk = ek * Kp + variables.app_flags['integral'] elif controller_type == 2: if ctrl_id != ctrl_id_prev: Kp = variables.app_config['controllers'][ ctrl_id]['kp'] Ki = variables.app_config['controllers'][ ctrl_id]['ki'] Kd = variables.app_config['controllers'][ ctrl_id]['kd'] Tf = variables.app_config['controllers'][ ctrl_id]['tf'] Ts = variables.app_config["params"][ "ts_control"] variables.app_flags['Kp'] = Kp variables.app_flags['Ki'] = Ki variables.app_flags['Kd'] = Kd variables.app_flags['Tf'] = Tf # K1=Kp+Ki+Kd # K2=-Kp-2*Kd # K3=Kd K1 = Kp + (Ts * Ki) + Kd / Ts K2 = -Kp - 2 * Kd / Ts K3 = Kd / Ts ek1 = 0 ek2 = 0 uk1 = 0 variables.app_flags['integral'] += ek * Ts * Ki if (variables.app_flags['integral'] > 255): variables.app_flags['integral'] = 255 if (variables.app_flags['integral'] < 0): variables.app_flags['integral'] = 0 derivative = (ek - ek1) / Ts uk = ek * Ki + variables.app_flags[ 'integral'] + derivative * Kd ek1 = ek if (uk > 255): uk = 255 if (uk < variables.app_config["params"]["u_min"]): uk = variables.app_config["params"]["u_min"] variables.app_flags["pump"] = int(uk) variables.test_manager.set_pump(int(uk)) ctrl_id_prev = ctrl_id if variables.app_flags["mode"] == 2: # ident mode / static if (t1 - t0_ramp) >= variables.app_config["params"]["ts_ramp"]: t0_ramp = t1 if variables.app_aux_flags["dir_pump"] == 1: if uk <= 255 - variables.app_config["params"][ "du_ramp"]: uk += variables.app_config["params"]["du_ramp"] else: # appVariables.appFlagsAux["dir_pump"] = 0 pass else: if uk >= variables.app_config["params"]["du_ramp"]: uk -= variables.app_config["params"]["du_ramp"] else: variables.app_aux_flags["dir_pump"] = 1 variables.app_flags["pump"] = int(uk) variables.test_manager.set_pump(int(uk)) elif variables.app_flags["mode"] == 3: # ident mode / step sequence if (t1 - t0_step) >= variables.app_config["params"]["ts_step"]: t0_step = t1 if (variables.app_flags["spab_index"] < len( variables.app_config['step_sequence'])): uk = variables.app_config['step_sequence'][ variables.app_flags["spab_index"]] variables.app_flags["spab_index"] += 1 variables.app_flags["pump"] = int(uk) variables.test_manager.set_pump(int(uk)) elif variables.app_flags["mode"] == 4: # ident mode / spab if (t1 - t0_spab) >= variables.app_config["params"]["ts_spab"]: t0_spab = t1 delta = variables.spab_data[variables.app_flags[ "spab_index"]] * variables.app_config["params"][ "du_spab"] variables.app_flags["spab_index"] += 1 if variables.app_flags["spab_index"] == len( variables.spab_data): variables.app_flags["spab_index"] = 0 uk = variables.app_config["params"]["um_spab"] + delta variables.app_flags["pump"] = int(uk) variables.test_manager.set_pump(int(uk)) variables.log2(self.__class__.__name__, "stopped")
t = TCPServer("127.0.0.1", 9003) t.set_function(1) t.start() if variables.app_config["app"]["use_mqtt"]: from modules.test.mqtt_client import MQTTClient m = MQTTClient() m.start() if variables.app_config["app"]["machine_learning"]: variables.machine_learning = MachineLearningMain() variables.machine_learning.read_data() variables.network = FindPath() variables.network.load_data_json(Constants.NETWORK_FILE) variables.network.format_data() variables.log2("main", " server started") variables.machine_learning.init() variables.machine_learning.run_dual_clustering_on_node_range(None, 3, 3) process_data() # test # variables.machine_learning.set_lib(False) server = pywsgi.WSGIServer(('0.0.0.0', 8086), app, handler_class=WebSocketHandler) server.serve_forever()