def set_vars(observed_nodes_list, ctrl_list, value_list): """ Set new value for node. :param observed_nodes_list: list of nodes, the client subscribed to :param ctrl_list: list of nodes to update :param value_list: list of values to assign """ i = 0 for ctrl in ctrl_list: for var in observed_nodes_list: if var.nodeid == ctrl.nodeid: try: variant_type = var.get_data_value().Value.VariantType var.set_value(value_list[i], variant_type) break except Exception as ex: if type(ex).__name__ in TimeoutError.__name__: print( DateHelper.get_local_datetime(), 'TimeOutError ignored while set var in OPCClient' ) pass else: print(DateHelper.get_local_datetime(), ex) raise i += 1
def start(self): while not self._terminated: try: # start opc client self._init_OPCUA() # start AutoVarUpdater self.prepare_auto_updater() self.start_auto_updater() print(DateHelper.get_local_datetime(), self.__class__.__name__, " finished Start-Routine") # start server status request loop while not self._terminated: try: browse_name = self.opc_client.client.get_server_node( ).get_browse_name() time.sleep(1) except Exception as ex: print(DateHelper.get_local_datetime(), self.__class__.__name__, 'lost connection to server:') print(ex) break except Exception as ex: print(ex) finally: if not self._terminated: print(DateHelper.get_local_datetime(), 'Restart ', self.__class__.__name__) time.sleep(1) self._finalize()
def add_objects_subfolder(self, parent, dir_name): # check if old dir with dir_name exists. if so then delete this dir first try: obj = self.root.get_child( ["0:Objects", ("{}:" + dir_name).format(self.idx)]) self.server.delete_nodes([obj], True) except BadNoMatch: print(DateHelper.get_local_datetime(), "There is no old folder with the name: " + dir_name) folder = self.obj.add_folder(self.idx, dir_name) print(DateHelper.get_local_datetime(), "Add subfolder: " + dir_name)
def run2(self): count = 0 t1 = 0 t2 = 0 while not self.ticker.wait( self.PERIOD / 1000) and not self._terminated: now = DateHelper.create_local_utc_datetime() values = [] for var in self.vars: # add "noise" to dt in the range [0 TIMESTAMP_PRECISION] delta = random.random() * self.TIMESTAMP_PRECISION * 1000 now_noised = now + datetime.timedelta(0, 0, delta) dv = DataValue() dv.SourceTimestamp = now # dv.SourceTimestamp = now_noised if self.ANORMAL_NODE_NAME in var.get_browse_name().Name: dv.Value = ua.Variant(2 * sin(100 * pi * (t1 + t2))) else: if self.SLACK_NODE_NAME in var.get_browse_name().Name: dv.Value = ua.Variant( (len(self.vars) - 1) * 2 * sin(100 * pi * t1)) else: dv.Value = ua.Variant(2 * sin(100 * pi * t1)) values.append(dv) if self.DEBUG_MODE_PRINT: print(self.__class__.__name__, dv.Value) self.opc_client.set_vars(self.vars, values) t1 = count / 1000 # make deviation after 60 time steps if count > self.TIME_STEPS_NO_ERROR: t2 = count * 0.05 / 1000 count += 1 # reset after 120 time steps if count > self.TIME_STEPS_NO_ERROR + self.TIME_STEPS_ERROR: print(DateHelper.get_local_datetime(), self.__class__.__name__, " reset Var_Updater loop") count = 0 t1 = 0 t2 = 0 print(DateHelper.get_local_datetime(), self.__class__.__name__, " stopped")
def set_status_flags(self, path, list_of_nodes_to_reset, dir_name): """Get status_nodes from topology file specified by *path* and map they with nodes on *dir_name* at opc server. In a next step make subscription for status nodes. """ # get at server registered vars allocated as CustomVar server_vars = self.get_customized_server_vars(dir_name) # get new topology self._clear_topo_status_flags() self.topo_data = TopologyData(path) # cluster topology nodes into lists (here searching only for misc) and assign the appropriate CustomVar for topo_opctag, topo_browsename in self.topo_data.get_itempairs(): for var in server_vars: if var.opctag == topo_opctag: if "RES" not in topo_opctag and "CTRL" not in topo_opctag: self.misc_nodes_list.append(var) break print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful updated status flags from file:" + path) # update OPC-client: delete old subscription and start new subscription self._update_subscription_opc_client(self, self.misc_nodes_list, True, 500) self.reset_flags(list_of_nodes_to_reset, dir_name)
def __init__(self, auth_name=None, auth_password=None, server_endpoint="opc.tcp://0.0.0.0:4840/OPCUA/python_server/", client_request_timeout=4): # super self.NAMESPACE = os.environ.get("NAMESPACE") self.ENABLE_CERTIFICATE = bool( strtobool(os.environ.get("ENABLE_CERTIFICATE"))) self.CERTIFICATE_PATH_CLIENT_CERT = os.path.dirname( os.getcwd()) + os.environ.get("CERTIFICATE_PATH_CLIENT_CERT", "not_specified") self.CERTIFICATE_PATH_CLIENT_PRIVATE_KEY = os.path.dirname( os.getcwd()) + os.environ.get( "CERTIFICATE_PATH_CLIENT_PRIVATE_KEY", "not_specified") self.DEBUG_MODE_PRINT = bool( strtobool(os.environ.get("DEBUG_MODE_PRINT", "False"))) super().__init__(server_endpoint, self.NAMESPACE, self.ENABLE_CERTIFICATE, self.CERTIFICATE_PATH_CLIENT_CERT, self.CERTIFICATE_PATH_CLIENT_PRIVATE_KEY, auth_name, auth_password, self.DEBUG_MODE_PRINT) self.full_node_list = [] print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful init")
def update_data(self, node, datetime_source, val): # pause DiffCore Thread to not rw DataFrame which is in Update process with self.__lock: self.check_for_unused_meas_data() if self.clear_meas_data_flag: self.clear_meas_data() start = time.time_ns() # otherwise update data used for DiffCore for var in (self.Iph1_nodes_list + self.Iph2_nodes_list + self.Iph3_nodes_list): if var.nodeid == node.nodeid: # TODO necessary for real meas devices with fixed timestamp? ts = DateHelper.round_time(datetime_source, self.TIMESTAMP_PRECISION) if self.slack_ph1 is not None and var.opctag == self.slack_ph1.opctag or \ self.slack_ph2 is not None and var.opctag == self.slack_ph2.opctag or \ self.slack_ph3 is not None and var.opctag == self.slack_ph3.opctag: val = -val # IMPORTANT: slack counts in negative manner if var.phase == 1: self.df_ph1.loc[ts, var.opctag] = val self.print_dataframe(self.df_ph1) elif var.phase == 2: self.df_ph2.loc[ts, var.opctag] = val self.print_dataframe(self.df_ph2) elif var.phase == 3: self.df_ph3.loc[ts, var.opctag] = val self.print_dataframe(self.df_ph3) end = time.time_ns() self.print_process_time(start, end) break
def __init__( self, meas_device_tag="RES", auth_name=None, auth_password=None, start_threshold=5000, server_endpoint="opc.tcp://0.0.0.0:4840/OPCUA/python_server/"): self.SERVER_ENDPOINT = os.environ.get("SERVER_ENDPOINT", server_endpoint) self.NAMESPACE = os.environ.get("NAMESPACE") self.ENABLE_CERTIFICATE = bool( strtobool(os.environ.get("ENABLE_CERTIFICATE"))) self.CERTIFICATE_PATH_CLIENT_CERT = os.path.dirname( os.getcwd()) + os.environ.get("CERTIFICATE_PATH_CLIENT_CERT") self.CERTIFICATE_PATH_CLIENT_PRIVATE_KEY = os.path.dirname(os.getcwd( )) + os.environ.get("CERTIFICATE_PATH_CLIENT_PRIVATE_KEY") self.DEBUG_MODE_PRINT = bool( strtobool(os.environ.get("DEBUG_MODE_PRINT", "False"))) self.START_THRESHOLD = int( os.environ.get( "AUTO_VAR_UPDATER_START_THRESHOLD", start_threshold)) * 1000 * 1000 # conversion into ns self.OPCUA_DIR_NAME = os.environ.get("OPCUA_SERVER_DIR_NAME") self.meas_device_tag = meas_device_tag self.opc_client = None self.vup = None self._terminated = False print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful init")
def start(self): try: self.client.connect() except ConnectionError as er: print(DateHelper.get_local_datetime(), er) raise # sys.exit(1) except Exception as ex: print(DateHelper.get_local_datetime(), ex) raise # sys.exit(1) # Now getting root variable node using its browse path self.root = self.client.get_root_node() uri = self.NAMESPACE self.idx = self.client.get_namespace_index(uri)
def __init__(self, host='localhost', port=8086, update_period=500, db_name="demonstrator_grid_protection"): self.UPDATE_PERIOD = int( os.environ.get("DATABASE_UPDATE_PERIOD", update_period)) self.DEBUG_MODE_PRINT = bool( strtobool(os.environ.get("DEBUG_MODE_PRINT", "False"))) self.INFLUXDB_HOST = os.environ.get("INFLUXDB_HOST", host) self.INFLUXDB_PORT = os.environ.get("INFLUXDB_PORT", port) self.INFLUXDB_NAME = os.environ.get("INFLUXDB_NAME", db_name) # TimeLoop # https://medium.com/greedygame-engineering/an-elegant-way-to-run-periodic-tasks-in-python-61b7c477b679 self.ticker = threading.Event() # DB related stuff self._db_user = '******' self._db_password = '******' self._db_protocol = 'line' self.db_client = None # OPC Client self.opc_client = None self._terminated = False print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful init")
def run_online_grid_protection(self, parent, setpoint, parent_node=""): try: obj = self.root.get_child( ["0:Objects", ("{}:" + parent_node).format(self.idx)]) except BadNoMatch: print(DateHelper.get_local_datetime(), "run_online_grid_protection(): Change in On/Off failed.") raise for mvar in obj.get_variables(): if "RUN_ONLINE_GRID_PROTECTION" in mvar.get_browse_name().Name: variant_type = mvar.get_data_value().Value.VariantType mvar.set_value(clamp(setpoint, 0, 1), variant_type) print( DateHelper.get_local_datetime(), "Change status of online grid protection to " + str(clamp(setpoint, 0, 1)))
def run(self): start_time = time.time_ns() while not self._terminated or len(self.vars) >= 1: if time.time_ns() > (start_time + self.threshold): print(DateHelper.get_local_datetime(), self.__class__.__name__, " started") self.run2() break
def set_meas_topology(self, path, list_of_nodes_to_reset, dir_name): """Match topology specified in *path* used for grid protection with all available nodes on *dir_name* at opc server This results in defining slack nodes for subgrids as well as mapping each node to information about the representing phase. After all node are sorted in local lists the opc client is requested to make a subscription for each node of interest at the opc server. """ # get at server registered vars allocated as CustomVar server_vars = self.get_customized_server_vars(dir_name) # get new topology self._clear_topo_meas() self.topo_data = TopologyData(path) # cluster topology nodes into lists (current, ctrl and misc) and assign the appropriate CustomVar for topo_opctag, topo_browsename in self.topo_data.get_itempairs(): for var in server_vars: if var.opctag == topo_opctag: if "_I_" in topo_opctag and "RES" in topo_opctag: # separate nodes into groups for each phase if "PH1" in topo_opctag: self.Iph1_nodes_list.append(var) if "slack" in topo_browsename.lower( ): # find slack node for each phase and self.slack_ph1 = var # assign the appropriate CustomVar elif "PH2" in topo_opctag: self.Iph2_nodes_list.append(var) if "slack" in topo_browsename.lower(): self.slack_ph2 = var elif "PH3" in topo_opctag: self.Iph3_nodes_list.append(var) if "slack" in topo_browsename.lower(): self.slack_ph3 = var elif "_I_" not in topo_opctag and "RES" in topo_opctag: self.other_meas_nodes_list.append(var) elif "CTRL" in topo_opctag: self.ctrl_nodes_list.append(var) break print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful updated meas topology from file:" + path) # update topology of DataHandler self.DataHandler.set_topology(self.slack_ph1, self.slack_ph2, self.slack_ph3, self.Iph1_nodes_list, self.Iph2_nodes_list, self.Iph3_nodes_list, self.ctrl_nodes_list, self.misc_nodes_list) # update OPC-client: delete old subscription and start new subscription self._update_subscription_opc_client( self.DataHandler, self.Iph1_nodes_list + self.Iph2_nodes_list + self.Iph3_nodes_list + self.other_meas_nodes_list + self.ctrl_nodes_list, False) # reset flags self.reset_flags(list_of_nodes_to_reset, dir_name)
def set_pv_active_power_setpoint(self, parent, setpoint, parent_node=""): try: obj = self.root.get_child( ["0:Objects", ("{}:" + parent_node).format(self.idx)]) except BadNoMatch: print( DateHelper.get_local_datetime(), "set_pv_active_power_setpoint(): assign new value to node failed." ) raise for mvar in obj.get_variables(): if "PV" and "CTRL" in mvar.get_browse_name().Name: variant_type = mvar.get_data_value().Value.VariantType mvar.set_value(clamp(setpoint, 0, 100), variant_type) print( DateHelper.get_local_datetime(), "Set Value of node " + mvar.get_browse_name().Name + " to " + str(clamp(setpoint, 0, 100)))
def register_devices(self, dir_name, device_config_path): """Register device representation specified in *device_config_path* as node on *dir_name* at opc server """ self.opc_client.create_dir_on_server(dir_name) self.opc_client.register_variables_to_server(dir_name, device_config_path) print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful register devices from file:" + device_config_path)
def register_opc_tag(self, parent, opctag, variant_type="Float", parent_node=""): # Object "parent_node": try: obj = self.root.get_child( ["0:Objects", ("{}:" + parent_node).format(self.idx)]) except BadNoMatch: print( DateHelper.get_local_datetime(), "register_opc_tag(): OPCUA_server_dir the variables should be assigned to, doesn't exists." ) raise var = ua.Variant(0, strings_to_vartyps(variant_type)) mvar = obj.add_variable(self.idx, opctag.strip(), var) mvar.set_writable() print( DateHelper.get_local_datetime(), "Add variable: " + opctag + " of type " + variant_type + " @node " + parent_node)
def set_status_online_grid_protection(self, status_value): """Set a value for node RUN_ONLINE_GRID_PROTECTION """ nodes = [] values = [] for node in self.misc_nodes_list: if "RUN_ONLINE_GRID_PROTECTION" in node.opctag: nodes.append(node) values.append(status_value) self.opc_client.set_vars(nodes, values) print(DateHelper.get_local_datetime(), self.__class__.__name__, "updated status of Online Grid Protection.")
def set_start_value_for_misc_node(self, start_value, key_phrase): """Set a *start_value* for a node specified as misc by *key_phrase* """ nodes = [] values = [] for node in self.misc_nodes_list: if key_phrase in node.opctag: nodes.append(node) values.append(start_value) self.opc_client.set_vars(nodes, values) print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful set startValues for misc node")
def start(self): while not self._terminated: try: self.db_client = DataFrameClient(self.INFLUXDB_HOST, self.INFLUXDB_PORT, self._db_user, self._db_password, self.INFLUXDB_NAME) self.db_client.create_database(self.INFLUXDB_NAME) self._init_OPCUA() while not self._terminated and not self.ticker.wait( self.UPDATE_PERIOD / 1000): try: # check is server node browsename still exists/is valid browse_name = self.opc_client.client.get_server_node( ).get_browse_name() df = self.opc_client.get_last_dataframe() self.update_database(df) except Exception as ex: print(DateHelper.get_local_datetime(), self.__class__.__name__, " lost client connection") print(ex) break except Exception as ex: print(ex) finally: if not self._terminated: print(DateHelper.get_local_datetime(), 'Restart ', self.__class__.__name__) time.sleep(1) self._finalize()
def start(self): # start opc client super().start() # get_all_observed_nodes have to requested before subscription # TimeoutError() all_observed_nodes = self.get_server_vars(self.OPCUA_DIR_NAME) for var in all_observed_nodes: opctag = var.get_browse_name().Name nodeid_identifier = str(var.nodeid.Identifier) self.node_dict.update({nodeid_identifier: opctag}) # make subscription self.make_subscription(self.OPCUA_DIR_NAME, self.get_server_vars(self.OPCUA_DIR_NAME)) print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful connected")
def make_subscription(self, dir_name, list_of_nodes_to_subscribe, sub_interval=1): """ Make a subscription for list of nodes :param dir_name: subfolder, which contains the requested nodes :param list_of_nodes_to_subscribe: list of nodes/customVars :param sub_interval: time interval the subscribed node is checked (in ms) """ sub_handler = SubHandler(self) self.subscription, self.subscription_handle, self.subscribed_nodes = \ self._subscribe(dir_name, sub_handler, self.subscription, self.subscription_handle, list_of_nodes_to_subscribe, self.subscribed_nodes, sub_interval) print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful updates subscription")
def __init__( self, auth_name=None, auth_password=None, server_endpoint="opc.tcp://0.0.0.0:4840/OPCUA/python_server/"): # super self.SERVER_ENDPOINT = os.environ.get("SERVER_ENDPOINT", server_endpoint) self.NAMESPACE = os.environ.get("NAMESPACE") self.ENABLE_CERTIFICATE = bool( strtobool(os.environ.get("ENABLE_CERTIFICATE"))) self.CERTIFICATE_PATH_CLIENT_CERT = os.path.dirname( os.getcwd()) + os.environ.get("CERTIFICATE_PATH_CLIENT_CERT", "not_specified") self.CERTIFICATE_PATH_CLIENT_PRIVATE_KEY = os.path.dirname( os.getcwd()) + os.environ.get( "CERTIFICATE_PATH_CLIENT_PRIVATE_KEY", "not_specified") self.DEBUG_MODE_PRINT = bool( strtobool(os.environ.get("DEBUG_MODE_PRINT", "False"))) super().__init__(self.SERVER_ENDPOINT, self.NAMESPACE, self.ENABLE_CERTIFICATE, self.CERTIFICATE_PATH_CLIENT_CERT, self.CERTIFICATE_PATH_CLIENT_PRIVATE_KEY, auth_name, auth_password, self.DEBUG_MODE_PRINT) # custom self.OPCUA_DIR_NAME = os.environ.get("OPCUA_SERVER_DIR_NAME", 'default') self.node_dict = dict() self.subscribed_nodes = [] self.subscription = None self.subscription_handle = None self.df = pd.DataFrame() print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful init")
def make_subscription(self, target_object, dir_name, list_of_nodes_to_subscribe, are_status_nodes=False, sub_interval=1): """ Make a subscription for list of nodes and set some properties. :param target_object: object the datachange_notification of subscription is sent to :param dir_name: subfolder, which contains the requested nodes :param list_of_nodes_to_subscribe: list of nodes/customVars :param are_status_nodes: flag if requested subscription is only for status flags :param sub_interval: time interval the subscribed node is checked (in ms) """ sub_handler = SubHandler(target_object) if are_status_nodes is True: self.subscription_status_nodes, self.subscription_handle_status_nodes, self.subscribed_status_nodes = \ self._subscribe(dir_name, sub_handler, self.subscription_status_nodes, self.subscription_handle_status_nodes, list_of_nodes_to_subscribe, self.subscribed_status_nodes, sub_interval) else: self.subscription_meas_nodes, self.subscription_handle_meas_nodes, self.subscribed_meas_nodes = \ self._subscribe(dir_name, sub_handler, self.subscription_meas_nodes, self.subscription_handle_meas_nodes, list_of_nodes_to_subscribe, self.subscribed_meas_nodes, sub_interval) print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful updates subscription")
def set_power_infeed_limit(self, upper_limit): # check the actual state of CTRLs # self.update_ctrl_states() nodes = [] values = [] # update the state of CTRLs for ctrl in self.ctrl_nodes_list: if "LIMIT_CTRL" in ctrl.opctag: nodes.append(ctrl) values.append(upper_limit) # decrease power infeed to 0% # snippet when using with PowerFactory # if "BUS_LV_BREAKER" in ctrl.opctag: # # ## add new state # ctrls.append(ctrl) # values.append(1) # only for testing # # values.append(0) # open breaker 0 # execute set_vars() self.opc_client.set_vars(nodes, values) print(DateHelper.get_local_datetime(), self.__class__.__name__, "All CTRL devices are set to power feedin = 0.")
def start_auto_updater(self): print(self.__class__.__name__, type(self.vup)) self.vup.start() print(DateHelper.get_local_datetime(), self.__class__.__name__, "started Auto-VarUpdater")
def print_process_time(self, start, end): if self.DEBUG_MODE_PRINT: print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful updated data @ ", str((end - start) / (1000 * 1000)) + " ms")
def stop(self): super().stop() print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful disconnected")
def stop(self): self.server.stop() print(DateHelper.get_local_datetime(), self.__class__.__name__, " successful stopped")
def start(self): """Start routine to setup GridProtectionManager """ while not self._terminated: try: # start opc client self._init_OPCUA() # Init DataHandler self.DataHandler = DataHandler(self.opc_client) # Init DiffCore self.mDiffCore = DiffCore(self.opc_client, self.DataHandler) time.sleep(1) # Registration of vars at server self.register_devices( self.server_dir_name, os.path.dirname(os.getcwd()) + self.DEVICE_PATH) # Set status nodes used for monitoring and topology/device updates self.set_status_flags(self.topo_path, [], self.server_dir_name) # Set topology used for grid protection functionality itself self.set_meas_topology(self.topo_path, [], self.server_dir_name) # Set start value for controllable node self.set_start_value_for_ctrl_node( 100, "LIMIT_CTRL") # set start value of PV_LIMIT_CTRL to 100% # Set start value for misc node self.set_start_value_for_misc_node( 1, "RUN_ONLINE_GRID_PROTECTION") # set start value to ON # start DiffCore time.sleep(1) self.mDiffCore.start() print(DateHelper.get_local_datetime(), self.__class__.__name__, " finished Start-Routine") # start server status request loop while not self._terminated: try: browse_name = self.opc_client.client.get_server_node( ).get_browse_name() time.sleep(1) except Exception as ex: print(DateHelper.get_local_datetime(), self.__class__.__name__, 'lost connection to server:') print(ex) break except Exception as ex: print(ex) finally: if not self._terminated: print(DateHelper.get_local_datetime(), 'Restart ', self.__class__.__name__) time.sleep(1) self._finalize()
def print_work_status(self, work_status): print(DateHelper.get_local_datetime(), self.__class__.__name__, work_status)