def __configure_pyodbc(self): pyodbc_config = self.__config.get("pyodbc", {}) if not pyodbc_config: return for name, value in pyodbc_config.items(): pyodbc.__dict__[name] = value log.info("[%s] Set pyodbc attributes: %s", self.get_name(), pyodbc_config)
def __close(self): if self.is_connected(): try: self.__cursor.close() if self.__rpc_cursor is not None: self.__rpc_cursor.close() self.__connection.close() finally: log.info("[%s] Connection to database closed", self.get_name()) self.__connection = None self.__cursor = None self.__rpc_cursor = None
def run(self): while not self.__stopped: # Initialization phase if not self.is_connected(): while not self.__stopped and \ not self.__init_connection() and \ self.__config["connection"].get("reconnect", self.DEFAULT_RECONNECT_STATE): reconnect_period = self.__config["connection"].get( "reconnectPeriod", self.DEFAULT_RECONNECT_PERIOD) log.info("[%s] Will reconnect to database in %d second(s)", self.get_name(), reconnect_period) sleep(reconnect_period) if not self.is_connected(): log.error( "[%s] Cannot connect to database so exit from main loop", self.get_name()) break if not self.__init_iterator(): log.error( "[%s] Cannot init database iterator so exit from main loop", self.get_name()) break # Polling phase try: self.__poll() if not self.__stopped: polling_period = self.__config["polling"].get( "period", self.DEFAULT_POLL_PERIOD) log.debug( "[%s] Next polling iteration will be in %d second(s)", self.get_name(), polling_period) sleep(polling_period) except pyodbc.Warning as w: log.warning("[%s] Warning while polling database: %s", self.get_name(), str(w)) except pyodbc.Error as e: log.error("[%s] Error while polling database: %s", self.get_name(), str(e)) self.__close() self.__close() self.__stopped = False log.info("[%s] Stopped", self.get_name())
def __init_connection(self): try: log.debug("[%s] Opening connection to database", self.get_name()) connection_config = self.__config["connection"] self.__connection = pyodbc.connect( connection_config["str"], **connection_config.get("attributes", {})) if connection_config.get("encoding", ""): log.info("[%s] Setting encoding to %s", self.get_name(), connection_config["encoding"]) self.__connection.setencoding(connection_config["encoding"]) decoding_config = connection_config.get("decoding") if decoding_config is not None: if isinstance(decoding_config, dict): if decoding_config.get("char", ""): log.info("[%s] Setting SQL_CHAR decoding to %s", self.get_name(), decoding_config["char"]) self.__connection.setdecoding(pyodbc.SQL_CHAR, decoding_config["char"]) if decoding_config.get("wchar", ""): log.info("[%s] Setting SQL_WCHAR decoding to %s", self.get_name(), decoding_config["wchar"]) self.__connection.setdecoding(pyodbc.SQL_WCHAR, decoding_config["wchar"]) if decoding_config.get("metadata", ""): log.info("[%s] Setting SQL_WMETADATA decoding to %s", self.get_name(), decoding_config["metadata"]) self.__connection.setdecoding( pyodbc.SQL_WMETADATA, decoding_config["metadata"]) else: log.warning( "[%s] Unknown decoding configuration %s. Read data may be misdecoded", self.get_name(), decoding_config) self.__cursor = self.__connection.cursor() log.info("[%s] Connection to database opened, attributes %s", self.get_name(), connection_config.get("attributes", {})) except pyodbc.Error as e: log.error("[%s] Failed to connect to database: %s", self.get_name(), str(e)) self.__close() return self.is_connected()
def process_data(self, request): if not request.json: abort(415) endpoint_config = self.__endpoint['config'] if request.method.upper() not in [ method.upper() for method in endpoint_config['HTTPMethods'] ]: abort(405) try: log.info("CONVERTER CONFIG: %r", endpoint_config['converter']) converter = self.__endpoint['converter']( endpoint_config['converter']) converted_data = converter.convert( config=endpoint_config['converter'], data=request.get_json()) self.send_to_storage(self.__name, converted_data) log.info("CONVERTED_DATA: %r", converted_data) return "OK", 200 except Exception as e: log.exception("Error while post to basic handler: %s", e) return "", 500
def __init_iterator(self): save_iterator = self.DEFAULT_SAVE_ITERATOR if "persistent" not in self.__config["polling"]["iterator"]: self.__config["polling"]["iterator"]["persistent"] = save_iterator else: save_iterator = self.__config["polling"]["iterator"]["persistent"] log.info("[%s] Iterator saving %s", self.get_name(), "enabled" if save_iterator else "disabled") if save_iterator and self.__load_iterator_config(): log.info( "[%s] Init iterator from file '%s': column=%s, start_value=%s", self.get_name(), self.__iterator_file_name, self.__iterator["name"], self.__iterator["value"]) return True self.__iterator = { "name": self.__config["polling"]["iterator"]["column"], "total": 0 } if "value" in self.__config["polling"]["iterator"]: self.__iterator["value"] = self.__config["polling"]["iterator"][ "value"] log.info( "[%s] Init iterator from configuration: column=%s, start_value=%s", self.get_name(), self.__iterator["name"], self.__iterator["value"]) elif "query" in self.__config["polling"]["iterator"]: try: self.__iterator["value"] = \ self.__cursor.execute(self.__config["polling"]["iterator"]["query"]).fetchone()[0] log.info( "[%s] Init iterator from database: column=%s, start_value=%s", self.get_name(), self.__iterator["name"], self.__iterator["value"]) except pyodbc.Warning as w: log.warning("[%s] Warning on init iterator from database: %s", self.get_name(), str(w)) except pyodbc.Error as e: log.error("[%s] Failed to init iterator from database: %s", self.get_name(), str(e)) else: log.error( "[%s] Failed to init iterator: value/query param is absent", self.get_name()) return "value" in self.__iterator
def __parse_rpc_config(self): if "serverSideRpc" not in self.__config: self.__config["serverSideRpc"] = {} if "enableUnknownRpc" not in self.__config["serverSideRpc"]: self.__config["serverSideRpc"][ "enableUnknownRpc"] = self.DEFAULT_ENABLE_UNKNOWN_RPC log.info( "[%s] Processing unknown RPC %s", self.get_name(), "enabled" if self.__config["serverSideRpc"]["enableUnknownRpc"] else "disabled") if "overrideRpcConfig" not in self.__config["serverSideRpc"]: self.__config["serverSideRpc"][ "overrideRpcConfig"] = self.DEFAULT_OVERRIDE_RPC_PARAMS log.info( "[%s] Overriding RPC config %s", self.get_name(), "enabled" if self.__config["serverSideRpc"]["overrideRpcConfig"] else "disabled") if "serverSideRpc" not in self.__config or not self.__config[ "serverSideRpc"].get("methods", []): self.__config["serverSideRpc"] = {"methods": {}} return reformatted_config = {} for rpc_config in self.__config["serverSideRpc"]["methods"]: if isinstance(rpc_config, str): reformatted_config[rpc_config] = {} elif isinstance(rpc_config, dict): reformatted_config[rpc_config["name"]] = rpc_config else: log.warning( "[%s] Wrong RPC config format. Expected str or dict, get %s", self.get_name(), type(rpc_config)) self.__config["serverSideRpc"]["methods"] = reformatted_config
def __poll(self): rows = self.__cursor.execute(self.__config["polling"]["query"], self.__iterator["value"]) if not self.__column_names: for column in self.__cursor.description: self.__column_names.append(column[0]) log.info("[%s] Fetch column names: %s", self.get_name(), self.__column_names) # For some reason pyodbc.Cursor.rowcount may be 0 (sqlite) so use our own row counter row_count = 0 for row in rows: # log.debug("[%s] Fetch row: %s", self.get_name(), row) row_count += 1 self.__process_row(row) self.__iterator["total"] += row_count log.info( "[%s] Polling iteration finished. Processed rows: current %d, total %d", self.get_name(), row_count, self.__iterator["total"]) if self.__config["polling"]["iterator"]["persistent"] and row_count > 0: self.__save_iterator_config()
def __init__(self, gateway, config, connector_type): super().__init__() # Initialize parents classes self.statistics = {'MessagesReceived': 0, 'MessagesSent': 0} # Dictionary, will save information about count received and sent messages. self.__config = config # Save configuration from the configuration file. self.__gateway = gateway # Save gateway object, we will use some gateway methods for adding devices and saving data from them. self.setName(self.__config.get("name", "Custom %s connector " % self.get_name() + ''.join(choice(ascii_lowercase) for _ in range(5)))) # get from the configuration or create name for logs. log.info("Starting Custom %s connector", self.get_name()) # Send message to logger self.daemon = True # Set self thread as daemon self.stopped = True # Service variable for check state self.__connected = False # Service variable for check connection to device self.__devices = {} # Dictionary with devices, will contain devices configurations, converters for devices and serial port objects self.__load_converters(connector_type) # Call function to load converters and save it into devices dictionary self.__connect_to_devices() # Call function for connect to devices log.info('Custom connector %s initialization success.', self.get_name()) # Message to logger log.info("Devices in configuration file found: %s ", '\n'.join(device for device in self.__devices)) # Message to logger
def poll(self): if self.first_run: log.info("[%s] Starting poller", self.connector.get_name()) for polling_config in self.connector.get_polling_messages(): key = polling_config["key"] if polling_config["type"] == "always": log.info("[%s] Polling '%s' key every %f sec", self.connector.get_name(), key, polling_config["period"]) self.__poll_and_schedule( bytearray.fromhex(polling_config["dataInHex"]), polling_config) elif self.first_run: log.info("[%s] Polling '%s' key once", self.connector.get_name(), key) self.connector.send_data_to_bus(bytearray.fromhex( polling_config["dataInHex"]), polling_config, raise_exception=self.first_run) if self.first_run: self.first_run = False self.start()
def run(self): need_run = True while need_run: bus_notifier = None poller = None try: interface = self.__bus_conf["interface"] channel = self.__bus_conf["channel"] kwargs = self.__bus_conf["backend"] self.__bus = ThreadSafeBus(interface=interface, channel=channel, **kwargs) reader = BufferedReader() bus_notifier = Notifier(self.__bus, [reader]) log.info("[%s] Connected to CAN bus (interface=%s,channel=%s)", self.get_name(), interface, channel) if self.__polling_messages: poller = Poller(self) # Poll once to check if network is not down. # It would be better to have some kind of a ping message to check a bus state. poller.poll() # Initialize the connected flag and reconnect count only after bus creation and sending poll messages. # It is expected that after these operations most likely the bus is up. self.__connected = True self.__reconnect_count = 0 while not self.__stopped: message = reader.get_message() if message is not None: # log.debug("[%s] New CAN message received %s", self.get_name(), message) self.__process_message(message) self.__check_if_error_happened() except Exception as e: log.error("[%s] Error on CAN bus: %s", self.get_name(), str(e)) finally: try: if poller is not None: poller.stop() if bus_notifier is not None: bus_notifier.stop() if self.__bus is not None: log.debug( "[%s] Shutting down connection to CAN bus (state=%s)", self.get_name(), self.__bus.state) self.__bus.shutdown() except Exception as e: log.error( "[%s] Error on shutdown connection to CAN bus: %s", self.get_name(), str(e)) self.__connected = False if not self.__stopped: if self.__is_reconnect_enabled(): retry_period = self.__reconnect_conf["period"] log.info( "[%s] Next attempt to connect will be in %f seconds (%s attempt left)", self.get_name(), retry_period, "infinite" if self.__reconnect_conf["maxCount"] is None else self.__reconnect_conf["maxCount"] - self.__reconnect_count + 1) time.sleep(retry_period) else: need_run = False log.info( "[%s] Last attempt to connect has failed. Exiting...", self.get_name()) else: need_run = False log.info("[%s] Stopped", self.get_name())
def close(self): self.__stopped = True if self.__connected: self.client.disconnect() self.__connected = False log.info('%s has been stopped.', self.get_name())
def run(self): while not self.__connected: try: self.client.connect() try: self.client.load_type_definitions() except Exception as e: log.debug(e) log.debug("Error on loading type definitions.") log.debug(self.client.get_namespace_array()[-1]) log.debug( self.client.get_namespace_index( self.client.get_namespace_array()[-1])) except ConnectionRefusedError: log.error( "Connection refused on connection to OPC-UA server with url %s", self.__server_conf.get("url")) time.sleep(10) except OSError: log.error( "Connection refused on connection to OPC-UA server with url %s", self.__server_conf.get("url")) time.sleep(10) except Exception as e: log.debug("error on connection to OPC-UA server.") log.error(e) time.sleep(10) else: self.__connected = True log.info("OPC-UA connector %s connected to server %s", self.get_name(), self.__server_conf.get("url")) self.__initialize_client() while not self.__stopped: try: time.sleep(.1) self.__check_connection() if not self.__connected and not self.__stopped: self.client.connect() self.__initialize_client() log.info("Reconnected to the OPC-UA server - %s", self.__server_conf.get("url")) elif not self.__stopped: if self.__server_conf.get( "disableSubscriptions", False ) and time.time( ) * 1000 - self.__previous_scan_time > self.__server_conf.get( "scanPeriodInMillis", 60000): self.scan_nodes_from_config() self.__previous_scan_time = time.time() * 1000 # giusguerrini, 2020-09-24: Fix: flush event set and send all data to platform, # so data_to_send doesn't grow indefinitely in case of more than one value change # per cycle, and platform doesn't lose events. # NOTE: possible performance improvement: use a map to store only one event per # variable to reduce frequency of messages to platform. while self.data_to_send: self.__gateway.send_to_storage(self.get_name(), self.data_to_send.pop()) if self.__stopped: self.close() break except (KeyboardInterrupt, SystemExit): self.close() raise except FuturesTimeoutError: self.__check_connection() except Exception as e: log.error( "Connection failed on connection to OPC-UA server with url %s", self.__server_conf.get("url")) log.exception(e) self.client = Client( self.__opcua_url, timeout=self.__server_conf.get("timeoutInMillis", 4000) / 1000) self._subscribed = {} self.__available_object_resources = {} time.sleep(10)
def open(self): self.__stopped = False self.start() log.info("Starting OPC-UA Connector")
def open(self): log.info("[%s] Starting...", self.get_name()) self.__stopped = False self.start()
def run(self): self.scheduler.run() log.info("[%s] Poller stopped", self.connector.get_name())
def __parse_config(self, config): self.__reconnect_count = 0 self.__reconnect_conf = { "enabled": config.get("reconnect", self.DEFAULT_RECONNECT_STATE), "period": config.get("reconnectPeriod", self.DEFAULT_RECONNECT_PERIOD), "maxCount": config.get("reconnectCount", None) } self.__bus_conf = { "interface": config.get("interface", "socketcan"), "channel": config.get("channel", "vcan0"), "backend": config.get("backend", {}) } for device_config in config.get("devices"): is_device_config_valid = False device_name = device_config["name"] device_type = device_config.get("type", self.__connector_type) strict_eval = device_config.get("strictEval", self.DEFAULT_STRICT_EVAL_FLAG) self.__devices[device_name] = {} self.__devices[device_name][ "enableUnknownRpc"] = device_config.get( "enableUnknownRpc", self.DEFAULT_ENABLE_UNKNOWN_RPC) self.__devices[device_name]["overrideRpcConfig"] = True if self.__devices[device_name]["enableUnknownRpc"] \ else device_config.get("overrideRpcConfig", self.DEFAULT_OVERRIDE_RPC_PARAMS) self.__converters[device_name] = {} if not strict_eval: log.info( "[%s] Data converters for '%s' device will use non-strict eval", self.get_name(), device_name) if "serverSideRpc" in device_config and device_config[ "serverSideRpc"]: is_device_config_valid = True self.__rpc_calls[device_name] = {} self.__converters[device_name][ "downlink"] = self.__get_converter( device_config.get("converters"), False) for rpc_config in device_config["serverSideRpc"]: rpc_config["strictEval"] = strict_eval self.__rpc_calls[device_name][ rpc_config["method"]] = rpc_config if "attributeUpdates" in device_config and device_config[ "attributeUpdates"]: is_device_config_valid = True self.__shared_attributes[device_name] = {} if "downlink" not in self.__converters[device_name]: self.__converters[device_name][ "downlink"] = self.__get_converter( device_config.get("converters"), False) for attribute_config in device_config["attributeUpdates"]: attribute_config["strictEval"] = strict_eval attribute_name = attribute_config.get( "attributeOnThingsBoard") or attribute_config.get( "attribute") self.__shared_attributes[device_name][ attribute_name] = attribute_config for config_key in ["timeseries", "attributes"]: if config_key not in device_config or not device_config[ config_key]: continue is_device_config_valid = True is_ts = (config_key[0] == "t") tb_item = "telemetry" if is_ts else "attributes" self.__devices[device_name][tb_item] = {} if "uplink" not in self.__converters[device_name]: self.__converters[device_name][ "uplink"] = self.__get_converter( device_config.get("converters"), True) for msg_config in device_config[config_key]: tb_key = msg_config["key"] msg_config["strictEval"] = strict_eval msg_config["is_ts"] = is_ts node_id = msg_config.get("nodeId", self.UNKNOWN_ARBITRATION_ID) if node_id == self.UNKNOWN_ARBITRATION_ID: log.warning( "[%s] Ignore '%s' %s configuration: no arbitration id", self.get_name(), tb_key, config_key) continue value_config = self.__parse_value_config( msg_config.get("value")) if value_config is not None: msg_config.update(value_config) else: log.warning( "[%s] Ignore '%s' %s configuration: no value configuration", self.get_name(), tb_key, config_key, ) continue if msg_config.get("command", "") and node_id not in self.__commands: cmd_config = self.__parse_command_config( msg_config["command"]) if cmd_config is None: log.warning( "[%s] Ignore '%s' %s configuration: wrong command configuration", self.get_name(), tb_key, config_key, ) continue cmd_id = cmd_config["value"] self.__commands[node_id] = cmd_config else: cmd_id = self.NO_CMD_ID self.__commands[node_id] = None if node_id not in self.__nodes: self.__nodes[node_id] = {} if cmd_id not in self.__nodes[node_id]: self.__nodes[node_id][cmd_id] = { "deviceName": device_name, "deviceType": device_type, "sendOnChange": device_config.get("sendDataOnlyOnChange", self.DEFAULT_SEND_IF_CHANGED), "configs": [] } self.__nodes[node_id][cmd_id]["configs"].append(msg_config) self.__devices[device_name][tb_item][tb_key] = None if "polling" in msg_config: try: polling_config = msg_config.get("polling") polling_config["key"] = tb_key # Just for logging polling_config["type"] = polling_config.get( "type", "always") polling_config["period"] = polling_config.get( "period", self.DEFAULT_POLL_PERIOD) polling_config["nodeId"] = node_id polling_config["isExtendedId"] = msg_config.get( "isExtendedId", self.DEFAULT_EXTENDED_ID_FLAG) polling_config["isFd"] = msg_config.get( "isFd", self.DEFAULT_FD_FLAG) polling_config["bitrateSwitch"] = msg_config.get( "bitrateSwitch", self.DEFAULT_BITRATE_SWITCH_FLAG) # Create CAN message object to validate its data can_msg = Message( arbitration_id=polling_config["nodeId"], is_extended_id=polling_config["isExtendedId"], is_fd=polling_config["isFd"], bitrate_switch=polling_config["bitrateSwitch"], data=bytearray.fromhex( polling_config["dataInHex"]), check=True) self.__polling_messages.append(polling_config) except (ValueError, TypeError) as e: log.warning( "[%s] Ignore '%s' %s polling configuration, wrong CAN data: %s", self.get_name(), tb_key, config_key, str(e)) continue if is_device_config_valid: log.debug("[%s] Done parsing of '%s' device configuration", self.get_name(), device_name) self.__gateway.add_device(device_name, {"connector": self}) else: log.warning( "[%s] Ignore '%s' device configuration, because it doesn't have attributes," "attributeUpdates,timeseries or serverSideRpc", self.get_name(), device_name)
def open(self): self.__stopped = False self.start() log.info("Starting Modbus connector")
def __get_services_and_chars(self): for device in self.__devices_around: try: if self.__devices_around.get( device) is not None and self.__devices_around[ device].get('scanned_device') is not None: log.debug('Connecting to device: %s', device) if self.__devices_around[device].get('peripheral') is None: address_type = self.__devices_around[device][ 'device_config'].get('addrType', "public") peripheral = Peripheral( self.__devices_around[device]['scanned_device'], address_type) self.__devices_around[device][ 'peripheral'] = peripheral else: peripheral = self.__devices_around[device][ 'peripheral'] try: log.info(peripheral.getState()) except BTLEInternalError: peripheral.connect( self.__devices_around[device]['scanned_device']) try: services = peripheral.getServices() except BTLEDisconnectError: self.__check_and_reconnect(device) services = peripheral.getServices() for service in services: if self.__devices_around[device].get( 'services') is None: log.debug( 'Building device %s map, it may take a time, please wait...', device) self.__devices_around[device]['services'] = {} service_uuid = str(service.uuid).upper() if self.__devices_around[device]['services'].get( service_uuid) is None: self.__devices_around[device]['services'][ service_uuid] = {} try: characteristics = service.getCharacteristics() except BTLEDisconnectError: self.__check_and_reconnect(device) characteristics = service.getCharacteristics() if self.__config.get('buildDevicesMap', False): for characteristic in characteristics: descriptors = [] self.__check_and_reconnect(device) try: descriptors = characteristic.getDescriptors( ) except BTLEDisconnectError: self.__check_and_reconnect(device) descriptors = characteristic.getDescriptors( ) except BTLEGattError as e: log.debug(e) except Exception as e: log.exception(e) characteristic_uuid = str( characteristic.uuid).upper() if self.__devices_around[device][ 'services'][service_uuid].get( characteristic_uuid) is None: self.__check_and_reconnect(device) self.__devices_around[device][ 'services'][service_uuid][ characteristic_uuid] = { 'characteristic': characteristic, 'handle': characteristic.handle, 'descriptors': {} } for descriptor in descriptors: log.debug(descriptor.handle) log.debug(str(descriptor.uuid)) log.debug(str(descriptor)) self.__devices_around[device][ 'services'][service_uuid][ characteristic_uuid][ 'descriptors'][ descriptor. handle] = descriptor else: for characteristic in characteristics: characteristic_uuid = str( characteristic.uuid).upper() self.__devices_around[device]['services'][ service_uuid][characteristic_uuid] = { 'characteristic': characteristic, 'handle': characteristic.handle } if self.__devices_around[device]['is_new_device']: log.debug('New device %s - processing.', device) self.__devices_around[device]['is_new_device'] = False self.__new_device_processing(device) for interest_char in self.__devices_around[device][ 'interest_uuid']: characteristics_configs_for_processing_by_methods = {} for configuration_section in self.__devices_around[ device]['interest_uuid'][interest_char]: characteristic_uuid_from_config = configuration_section[ 'section_config'].get("characteristicUUID") if characteristic_uuid_from_config is None: log.error( 'Characteristic not found in config: %s', pformat(configuration_section)) continue method = configuration_section[ 'section_config'].get('method') if method is None: log.error('Method not found in config: %s', pformat(configuration_section)) continue characteristics_configs_for_processing_by_methods[ method.upper()] = { "method": method, "characteristicUUID": characteristic_uuid_from_config } for method in characteristics_configs_for_processing_by_methods: data = self.__service_processing( device, characteristics_configs_for_processing_by_methods[ method]) for section in self.__devices_around[device][ 'interest_uuid'][interest_char]: converter = section['converter'] converted_data = converter.convert( section, data) self.statistics[ 'MessagesReceived'] = self.statistics[ 'MessagesReceived'] + 1 log.debug(data) log.debug(converted_data) self.__gateway.send_to_storage( self.get_name(), converted_data) self.statistics[ 'MessagesSent'] = self.statistics[ 'MessagesSent'] + 1 except BTLEDisconnectError: log.debug('Connection lost. Device %s', device) continue except Exception as e: log.exception(e)
def __rpc_cancel_processing(self, iocb): log.info("RPC with iocb %r - cancelled.", iocb)
def close(self): self.__stopped = True self.__stop_connections_to_masters() log.info('%s has been stopped.', self.get_name())