def k_means_clust_dynamic_partial_update_whole(self, data): """ adding sample to data and updating the clusters there should be already clusters and new data is only added to the cluster :param nsamp: :return: """ for n in range(self.num_iter): # assign data points to clusters # print(ts1_partial) min_dist = float('inf') closest_clust = None for c_ind, ts2 in enumerate(self.centroids): # compare the data to each cluster centroids cur_dist = self.euclid_dist(data, ts2) if cur_dist < min_dist: min_dist = cur_dist closest_clust = c_ind try: n_assignments = len(self.assignments[closest_clust]) self.centroids[closest_clust] = (self.centroids[closest_clust] * n_assignments + data)/(n_assignments+1) # self.centroids[closest_clust] = (self.centroids[closest_clust] + data) / 2 except: print("exception") variables.print_exception("") # print(np.average(self.centroids)) return self.centroids
def apiDatabaseSensors(): try: param = request.args.get('param') variables.log2("routes", '/api/database/sensors ' + param) param = json.loads(param) # param['sid'] # params['n'] # print(param) if param['id'] != 0 and variables.app_config["app"]["db_access"]: cursor = variables.cnxn.cursor() cursor.execute( "SELECT * FROM (SELECT TOP " + str(param['n']) + " * FROM SensorData_Flow WHERE Pipe_ID = ? ORDER BY Timestamp DESC) a ORDER BY Timestamp ASC", param['id']) # cursor.execute("SELECT TOP 100 * FROM SensorData_Flow WHERE Pipe_ID = ? ORDER BY Timestamp DESC",param['id']) rows = cursor.fetchall() columns = [column[0] for column in cursor.description] results = [] for row in rows: results.append(dict(zip(columns, row))) # print(results) return json.dumps(results, default=default_json) else: result = Constants.RESULT_FAIL return json.dumps({"result": result}) except: variables.print_exception("[routes][/api/database/sensors]") result = Constants.RESULT_FAIL return json.dumps({"result": result})
def apiMachineLearningClustersRangeFirstStage(): route = '/api/machine-learning/clusters/range/first-stage' try: param = request.args.get('param') variables.log2("routes", route) param = json.loads(param) res = None # n_clusters = 3 n_clusters = None if "range" in param and param["range"] is not None: r = param["range"] else: r = list(range(0, param["new_node"])) print(r) # get clusters from all nodes res = variables.machine_learning.run_clustering_on_node_range( r, n_clusters) (data, info) = variables.machine_learning.get_display_data( res, global_scale=param["global_scale"]) info2 = None return json.dumps( { "data": data, "info": info, "extra": info2, "params": param }, default=default_json) except: variables.print_exception(route) result = Constants.RESULT_FAIL return json.dumps({"result": result})
def apiMachineLearningClustersNodeFirstStage(): route = '/api/machine-learning/clusters/node/first-stage' try: param = request.args.get('param') variables.log2("routes", route) param = json.loads(param) res = None # n_clusters = 3 n_clusters = None res = variables.machine_learning.run_clustering_on_node_id( param["node"], n_clusters) (data, info) = variables.machine_learning.get_display_data( res, global_scale=param["global_scale"]) info2 = variables.machine_learning.get_info(param["node"]) return json.dumps( { "data": data, "info": info, "extra": info2, "params": param }, default=default_json) except: variables.print_exception(route) result = Constants.RESULT_FAIL return json.dumps({"result": result})
def request_hex(self, service_def): if self.debug_log: variables.log2(self.__class__.__name__, "get data") format = "!BB" tx_length = calcsize(format) - 1 try: tx_message = pack(format, tx_length, self.servicesDict[service_def]) # tx_message = pack(self.txMsgStructuresDict['DATA'], tx_length, self.servicesDict['SERVICE_DATA']) except: if self.debug_log: variables.print_exception(self.__class__.__name__) return (-1, None) self.send(tx_message) rx_message = self.receive(buffSize) try: rx_data = [rm for rm in rx_message] # get error code and data return (rx_data[2], rx_data) except: if self.debug_log: variables.print_exception(self.__class__.__name__) return (-1, None)
def write_data_json(self, filename="data/network.json"): try: with open(filename, 'w') as f: data = json_graph.node_link_data(self.G) f.write(json.dumps(data, indent=2)) except: variables.print_exception("write data json")
def request(self, tx_message): # if self.debug_log: # variables.log2(self.__class__.__name__, "get data") # self.send(tx_message + "\n") # print(variables.python_version) if variables.python_version == 2: self.send((tx_message + "\n").encode()) elif variables.python_version == 3: self.send(bytes(tx_message + "\n", "utf-8")) rx_message = self.receive(buffSize) # print("RECV: " + str(rx_message)) try: if variables.python_version == 2: rx_message = rx_message.decode() elif variables.python_version == 3: rx_message = str(rx_message, "utf-8") rx_data = [int(rm) for rm in rx_message.split(",")] # get error code and data return (0, rx_data) except: if self.debug_log: variables.print_exception(self.__class__.__name__) return (-1, None)
def connect(self): """ :param IP: :param port: :return: """ server_address = (self.ip, self.port) # if self.debug_log: variables.log2(self.__class__.__name__, 'starting up on %s port %s' % server_address) try: self.Socket.connect(server_address) self.isconn = True return 0 except: if self.debug_log: variables.log2( self.__class__.__name__, 'Error starting connection on %s port %s' % server_address) variables.print_exception(self.__class__.__name__) self.isconn = False return 1
def apiMachineLearningRunClusteringAssignmentPartialSample(): route = '/api/machine-learning/clusters/node/partial-sample' try: param = request.args.get('param') variables.log2("routes", route) param = json.loads(param) res = None # res = variables.machine_learning.update_node_clusters_with_partial_sample(param["node"], param["sample"]) # (data, info) = variables.machine_learning.get_display_data(res, global_scale=param["global_scale"]) # info2 = variables.machine_learning.get_info(param["node"]) res = variables.machine_learning.run_clustering_on_partial_sample( param["node"], param["sample"]) (data, info) = variables.machine_learning.get_display_data( res, global_scale=param["global_scale"]) info2 = variables.machine_learning.get_info(param["node"]) # # print(res) # data=res[0] # info=res[1] return json.dumps( { "data": data, "info": info, "extra": info2, "params": param }, default=default_json) except: variables.print_exception(route) result = Constants.RESULT_FAIL return json.dumps({"result": result})
def apiMachineLearningRaw(): try: param = request.args.get('param') variables.log2("routes", '/api/machine-learning/raw ' + param) param = json.loads(param) if param["node"] is not None: (data, info) = variables.machine_learning.get_raw_data(param["node"]) else: return json.dumps({"result": Constants.RESULT_FAIL}) if param["node"] != -1: info2 = variables.machine_learning.get_info(param["node"]) else: info2 = None # print(data) return json.dumps({ "data": data, "info": info, "extra": info2 }, default=default_json) except: variables.print_exception("[routes][/api/machine-learning/raw]") return json.dumps({"result": Constants.RESULT_FAIL})
def apiMachineLearningInfo(): try: variables.log2("routes", '/api/machine-learning/info') info = variables.machine_learning.get_info() return json.dumps({"info": info}, default=default_json) except: variables.print_exception("[routes][/api/machine-learning/info]") result = Constants.RESULT_FAIL return json.dumps({"result": result})
def apiNetworkGraph(): try: variables.log2("routes", '/api/network/graph ') data = json.dumps(variables.network.get_data_format()) return json.dumps({"data": data}, default=default_json) except: variables.print_exception("[routes][/api/network/graph]") result = Constants.RESULT_FAIL return json.dumps({"result": result})
def apiMachineLearningInit(): try: # param = request.args.get('param') variables.log2("routes", '/api/machine-learning/init ') # param = json.loads(param) variables.machine_learning.init() return json.dumps({"result": Constants.RESULT_FAIL}) except: variables.print_exception("[routes][/api/machine-learning/init]") return json.dumps({"result": Constants.RESULT_FAIL})
def load_data_json(self, filename="data/network.json"): try: with open(filename, 'r') as f: data = f.read() data = json.loads(data) self.get_graph_data(data) self.G = json_graph.node_link_graph(data) # self.pos = nx.circular_layout(self.G) self.pos = nx.spring_layout(self.G) except: variables.print_exception("load data json")
def apiMachineLearningClustersNodeSecondStage(): route = '/api/machine-learning/clusters/range/second-stage' try: param = request.args.get('param') variables.log2("routes", route) param = json.loads(param) res = None # n_clusters = 3 n_clusters = None n_clusters_final = 4 # get clusters from clusters from all nodes # this will recalculate all clusters and reassign nodes if "range" in param and param["range"] is not None: r = param["range"] else: if "new_node" in param and param["new_node"] is not None: r = list(range(0, param["new_node"])) else: r = None print(r) try: res = variables.machine_learning.run_dual_clustering_on_node_range( r, n_clusters, n_clusters_final) except: # different number of final clusters variables.print_exception(route) variables.machine_learning.init() res = variables.machine_learning.run_dual_clustering_on_node_range( r, n_clusters, n_clusters_final) # get clusters from clusters from all nodes # this will recalculate all clusters and reassign nodes if "assign" in param and param["assign"]: process_data() (data, info) = variables.machine_learning.get_display_data( res, global_scale=param["global_scale"]) info2 = None return json.dumps( { "data": data, "info": info, "extra": info2, "params": param }, default=default_json) except: variables.print_exception(route) result = Constants.RESULT_FAIL return json.dumps({"result": result})
def run(self): """ check for user input check status of test runners """ variables.log2(self.__class__.__name__, "started") while True: time.sleep(Constants.LOOP_DELAY) self.t = time.time() try: if self.t - self.t0 >= self.TS: self.t0 = self.t if variables.app_flags['log'] == True: variables.log_sensor_data() if variables.app_config["app"]["db_access"] and variables.app_config["app"]["db_logging"]: # database log # logging is done synchronously with latest data # even if data from sensors are slightly delayed # if a sensor is disconnected, the last value will not be # written in the database (if the timeout expires) if ((self.t - self.t0_db) > self.TS_DB): self.t0_db = self.t variables.log2(self.__class__.__name__, "save to db") for s in variables.sensor_data: # only save new data # do not save the same data multiple times in the database if 'recent' in s: if s['recent']: s['recent'] = False if variables.cnxn is not None: cursor = variables.cnxn.cursor() tm = datetime.datetime.now() cursor.execute( "insert into SensorData_Flow(Timestamp, Value, Pipe_ID) values (?, ?, ?)", tm, s['value2'], s['id']) variables.cnxn.commit() s['recent'] = False # manage runners for (i, hil_def) in enumerate(self.hil_object_array): if self.mode == 'async': self.tr[i].run_async() except: variables.print_exception(self.__class__.__name__) continue
def request(self, tx_message): # if self.debug_log: # variables.log2(self.__class__.__name__, "get data") self.send((tx_message + "\n").encode()) rx_message = self.receive(buffSize) try: rx_message = rx_message.decode() rx_data = [int(rm) for rm in rx_message.split(",")] # get error code and data return (0, rx_data) except: if self.debug_log: variables.print_exception(self.__class__.__name__) return (-1, None)
def close(self): """ :return: """ if self.debug_log: variables.log2(self.__class__.__name__, "closing socket") try: self.Socket.shutdown(socket.SHUT_RDWR) self.Socket.close() return 0 except: if self.debug_log: variables.print_exception(self.__class__.__name__) return 1
def receive(self, bufsize): """ :param bufsize: :return: """ data = None try: data = self.Socket.recv(bufsize) if self.debug_log: # variables.log2(self.__class__.__name__, 'received from "%s" "%s"' % (self.ip, data)) # variables.log2(self.__class__.__name__, 'received from "%s" "%s"' % (self.ip, "".join("%s " % ("0x%0.2X" % tup) for tup in data))) pass except: if self.debug_log: variables.print_exception(self.__class__.__name__) return data
def set_color_for_nodes(self): minp = 0 maxp = 0 for node in self.graph_data["nodes"]: if "priority" in node and node["priority"] > maxp: maxp = node["priority"] for (i, node) in enumerate(self.graph_data["nodes"]): try: p = node["priority"] if p == -1: p = 0 color = Colors.convert_to_rgb_custom(minp, maxp, p, colors) color = Colors.get_rgba(color) node["color"] = {"border": color, "background": color} # value property is used for scaling node["value"] = p except: variables.print_exception("")
def send(self, message): """ :param message: :return: """ if self.debug_log: # variables.log2(self.__class__.__name__, 'sending to "%s" "%s"' % (self.ip, message)) # variables.log2(self.__class__.__name__, 'sending (parsed) to "%s" "%s"' % (self.ip, "".join("%s " % ("0x%0.2X" % tup) for tup in message))) pass try: self.Socket.sendall(message) return 0 except: if self.debug_log: variables.log2(self.__class__.__name__, 'Error sending "%s"' % message) variables.print_exception(self.__class__.__name__) return 1
def k_means_clust_dynamic_partial_update(self, nsamp, data=None): """ self.data should be only the current time series (partial) :param nsamp: :return: """ counter = 0 ni = self.num_iter # ni = 1 for n in range(1): # assign data points to clusters if data is None: ts1 = self.data else: ts1 = data ts1_partial = ts1[0:nsamp] # print(ts1_partial) min_dist = float('inf') closest_clust = None for c_ind, ts2 in enumerate(self.centroids): # compare the partial time series to the partial centroid ts2_partial = ts2[0:nsamp] # print(ts2_partial) # print(ts1_partial.shape) # print(ts2_partial.shape) cur_dist = self.euclid_dist(ts1_partial, ts2_partial) if cur_dist < min_dist: min_dist = cur_dist closest_clust = c_ind centroid = [0] * nsamp # print(nsamp) for di, d in enumerate(ts1.tolist()): # print(di) centroid[di] += d try: n_assignments = len(self.assignments[closest_clust]) self.centroids[closest_clust][0:len1] = (self.centroids[closest_clust][0:len1] * n_assignments + centroid)/(n_assignments+1) except: variables.print_exception("") return self.centroids
def set_class_for_consumers(self, node_data=None): """ this method should be run before calculating the priorities for inner nodes :param node_data: """ print("network: set cluster for consumers") minp = 0 maxp = 0 for node in self.graph_data["nodes"]: if "priority" in node and node["priority"] > maxp: maxp = node["priority"] # print("maxp: ", maxp) for (i, node) in enumerate(self.graph_data["nodes"]): try: # node["shape"] = "circle" color = "rgba(0,0,0,0.5)" # node["scaling"] = {"min": 30, "max": 100} node["value"] = 0 if "id_consumer" in node: node_data1 = node_data[node["id_consumer"]] node["class"] = node_data1["class"] node["demand"] = node_data1["demand"] node["priority"] = node_data1["priority"] # value property is used for scaling node["value"] = node["priority"] # print(node["id"], node["id_consumer"], node["class"]) if node_data is not None and maxp != 0: try: color = Colors.convert_to_rgb_custom( minp, maxp, node["priority"], colors) color = Colors.get_rgba(color) except: variables.print_exception("") node["color"] = {"border": color, "background": color} except: variables.print_exception("set_cluster_for_consumers") break
def run(self): t0 = time.time() Ts = 0.1 Ts = variables.app_config['ts'] Tlog = variables.app_config['t_log'] msg = "[DataBucketThread] " + "running" if not variables.qDebug1.full(): variables.qDebug1.put(msg) # this thread gathers data from other modules and dispatches to control threads tstart = time.time() t0_slow = tstart t0_data = tstart db_logging = variables.app_config["app"]["db_logging"] # appVariables.clientListFcn[i]['t0_polling'] = tstart while True: try: time.sleep(0.01) t1 = time.time() # request data from generic nodes, discover nodes if (t1 - t0_slow) >= 0.5: t0_slow = t1 for i in range(len(variables.clientList)): if not variables.clientListFcn[i]['q_out'].full(): if variables.clientList[i]['type'] == 11: variables.clientListFcn[i]['q_out'].put("1") elif variables.clientList[i]['type'] == -1: msg = "[DataBucketThread] " + 'discover client ' + variables.clientList[ i]['ip'] if not variables.qDebug1.full(): variables.qDebug1.put(msg) variables.clientListFcn[i]['q_out'].put("1") # request data from nodes (polling), each with own sampling rate for i in range(len(variables.clientList)): # print appVariables.clientList[i]['ip'] if (t1 - variables.clientListFcn[i]['t0_polling']) >= Ts: variables.clientListFcn[i]['t0_polling'] = t1 if not variables.clientListFcn[i]['q_out'].full(): # print appVariables.clientList[i]['type'] # print appVariables.clientList[i]['type'] if variables.clientList[i]['type'] == 1: # request data type # 1: total flow # 2: flow rate # 3: flow rate filtered variables.clientListFcn[i]['q_out'].put("3") # basic i/o client operations # write data if variables.app_aux_flags['set_pump']: variables.app_aux_flags['set_pump'] = False variables.app_flags['pump_cmd_time'] = time.time() for i in range(len(variables.clientList)): if not variables.clientListFcn[i]['q_out'].full(): if variables.clientList[i]['type'] == 11: variables.clientListFcn[i]['q_out'].put( "2," + str(variables.app_flags['pump'])) # read data for i in range(len(variables.clientList)): # retrieve and process data from clients of type flow sensor!! if variables.clientList[i]['type'] == 1: if not variables.clientListFcn[i]['q_in'].empty(): cdata = variables.clientListFcn[i]['q_in'].get( block=False) # print(cdata['data']) variables.getBoardData(cdata['data'], array=True) variables.app_flags['ts_sensor'] = int( 1000 * (t1 - t0_data)) variables.app_flags['ts_sensor_avg'] = int( variables.app_flags['ts_sensor_avg'] * 0.9 + variables.app_flags['ts_sensor'] * 0.1) t0_data = t1 if variables.app_flags['log'] == True: variables.log_sensor_data() # database log # logging is done synchronously with latest data # even if data from sensors are slightly delayed # if a sensor is disconnected, the last value will not be # written in the database (if the timeout expires) if ((t1 - t0) > Tlog) and db_logging: t0 = t1 msg = "[DataBucketThread] " + "log" if not variables.qDebug1.full(): variables.qDebug1.put(msg) for s in variables.sensor_data: # only save new data # do not save the same data multiple times in the database if 'recent' in s: if s['recent']: if variables.cnxn is not None: cursor = variables.cnxn.cursor() tm = datetime.datetime.now() cursor.execute( "insert into SensorData_Flow(Timestamp, Value, Pipe_ID) values (?, ?, ?)", tm, s['value2'], s['id']) variables.cnxn.commit() s['recent'] = False except: variables.print_exception("[DataBucketThread] ")
def handle(self): try: # print "Connection from: %s" % str(self.client_address) # request_msg = self.rfile.readline(1024) # msg = "[simple_tcp_server] "+str(request_msg) # if not appVariables.qDebug1.full(): # appVariables.qDebug1.put(msg) # self.wfile.write('211,\n') # self.wfile.flush() self.request.setblocking(0) t0 = time.time() self.data = '' self.index = 0 msg = "[TCPRequestHandler] new connection at " + str( self.client_address[0]) if not variables.qDebug1.full(): variables.qDebug1.put(msg) # update client list with connected client clientInList = False for i in range(len(variables.clientList)): if variables.clientList[i]['ip'] == self.client_address[0]: msg = "[TCPRequestHandler] already in list" if not variables.qDebug1.full(): variables.qDebug1.put(msg) clientInList = True variables.clientList[i]['id'] = -1 variables.clientList[i]['type'] = -1 variables.clientListFcn[i]['t0'] = t0 variables.clientListFcn[i]['t0_polling'] = t0 variables.clientListFcn[i]['t0_log'] = t0 break if not clientInList: msg = "[TCPRequestHandler] add client " if not variables.qDebug1.full(): variables.qDebug1.put(msg) newClientFcn = copy.deepcopy(variables.clientModelFcn) newClientFcn['q_in'] = Queue(maxsize=20) newClientFcn['q_out'] = Queue(maxsize=20) newClientFcn['t0'] = t0 newClientFcn['t0_polling'] = t0 newClientFcn['t0_log'] = t0 variables.clientListFcn.append(newClientFcn) newClient = copy.deepcopy(variables.clientModel) newClient['ip'] = self.client_address[0] newClient['counter_rx'] = 0 newClient['counter_tx'] = 0 variables.clientList.append(newClient) else: msg = "[TCPRequestHandler] update client " if not variables.qDebug1.full(): variables.qDebug1.put(msg) while 1: time.sleep(0.0001) t1 = time.time() # self.data = self.request.recv(1024) # self.c = self.request.recv(1) # handle sending data for i in range(len(variables.clientList)): if variables.clientList[i]['ip'] == self.client_address[0]: # handle external data if not variables.clientListFcn[i]['q_out'].empty(): new_data = variables.clientListFcn[i]['q_out'].get( block=False) variables.clientList[i][ 'counter_tx'] = variables.clientList[i][ 'counter_tx'] + 1 new_data = variables.add_checksum(new_data) new_data += '\n' # print 'send to ' + self.client_address[0] + ': ' + new_data self.request.send(new_data) try: if (not variables.qTCPOut.full()) and ( variables.clientList[i]['data'][0] != 211): variables.qTCPOut.put( '[' + str(variables.clientList[i]['id']) + '] ' + new_data) except: pass # handle receiving data while 1: time.sleep(0.00001) try: self.c = self.rfile.read(1) except: break if self.c != '\n': self.data += self.c self.index += 1 # try: # self.data = self.rfile.readline() # except: # break if ((self.c == '\n') or (self.index >= 1024)): # if True: self.index = 0 clientNumStr = self.data.split(",") # print clientNumStr clientData = [0] * len(clientNumStr) i = 0 for i in range(len(clientNumStr)): try: clientData[i] = int(clientNumStr[i]) except: clientData[i] = 0 # update client list # print 'received from ' + self.client_address[0] + ': '+ self.data for i in range(len(variables.clientList)): if variables.clientList[i][ 'ip'] == self.client_address[0]: variables.clientList[i]['in'] = self.data if len(clientData) >= 3: variables.clientList[i]['id'] = clientData[ 1] variables.clientList[i][ 'type'] = clientData[2] variables.clientList[i]['data'] = clientData if not variables.clientListFcn[i]['q_in'].full( ): variables.clientListFcn[i]['q_in'].put({ 'str': self.data, 'data': clientData }) variables.clientListFcn[i]['t0'] = t1 variables.clientList[i][ 'counter_rx'] = variables.clientList[i][ 'counter_rx'] + 1 self.data = '' break # end while # handle timeouts (received data) expired = False for i in range(len(variables.clientList)): if variables.clientList[i]['ip'] == self.client_address[0]: if (t1 - variables.clientListFcn[i]['t0']) > 5: variables.clientListFcn[i]['t0'] = t1 msg = "[TCPRequestHandler] " + ' no data ' + ' at ' + str( self.client_address[0]) + '. socket closed' if not variables.qDebug1.full(): variables.qDebug1.put(msg) del variables.clientListFcn[i] del variables.clientList[i] expired = True break if expired: break except: variables.print_exception( "[TCPRequestHandler] exception. closing socket at " + self.client_address[0])
q_write_tcp = Queue(maxsize=10) time.sleep(1) print(variables.app_config["app"]["db_selection"]) db_info = variables.app_config["db_info"][variables.app_config["app"] ["db_selection"]] if variables.app_config["app"]["db_access"]: import pyodbc try: variables.cnxn = pyodbc.connect('DRIVER=' + db_info["driver"] + ';SERVER=' + db_info["server"] + ';DATABASE=' + db_info["database"] + ';UID=' + db_info["username"] + ';PWD=' + db_info["password"]) except: variables.print_exception("server") variables.test_manager = TestRunnerManager() variables.test_manager.start() tlog = Log() tlog.start() thread8 = ControlSystemsThread() thread8.start() # test server from modules.test.TCPServer import TCPServer t = TCPServer("127.0.0.1", 9001) t.set_function(0)