def stopkill(self): """Called to stop all scheduled RepeatedFunctions""" try: self.stop_event.set() #no more scheduling new events while len(self.running_rep_functions[:]) != 0: for repfunc_thread in self.running_rep_functions[:]: repfunc_thread.stop_event.set( ) #stop this RepeatedFunction for repfunc_thread in self.running_rep_functions[:]: if not repfunc_thread.is_alive(): self.running_rep_functions.remove(repfunc_thread) logger.debug("removed {}".format(repfunc_thread)) # now self.running_rep_functions is empty self.stop_event.clear() except Exception as e: logger.error("Unexpected Error, sending to cloud {}".format(e)) formatted_publish_message( topic=TOPIC_STATE, payload= "Error scheduling old timing events, consider restarting device: {} " .format(e), c_queue=pub_queue)
def start_publish(self): logger.debug("Start the MQTT CLIENT") self.run_publish = True while self.run_publish == True: self.start_new_connection() self.publish_data() logger.debug("Ended the MQTT CLIENT")
def run(self): """runs a function every interval seconds. """ self.sleep_until = time.time() while not self.stop_event.is_set( ): #until calls [object].stop_event.set() self.function(*self.args, **self.kwargs) if self.interval == 0: logger.debug("Repeating function only once: {} {} ".format( self.args, self.kwargs)) self.stop_event.set( ) #self terminate, as with interval = 0 it is wished that is not repeated self.sleep_until += self.interval self.stop_event.wait(timeout=max( 0, self.sleep_until - time.time())) #wait until timeout or until stop_event.is_set() return #end
def run(self): self.alive = True while True: #Endless Loop of reconfigure, connect_serial and read_modbus_event logger.debug("RESTART the Modbus reader") try: self.reconfigure() while self.alive == True: # do until stopkill is callled self.connect_serial() #serial connected self.read_modbus_event() while self.alive == False: # do until startup is callled logger.debug("Modbus reader haltering") time.sleep(2) except Exception as ex: logger.error("Unexpected run modbus error {}".format(ex)) time.sleep(1)
def connect_serial(self): "connect the serial port and modbus rtu master over serial port" logger.debug("calling connect serial") try: if self.serial_port != "undefined": #if initialisized self.serial_port.close() logger.debug("serial port closed successfully") except: pass try: self.serial_connected = False self.master_status = self.max_master_attemps self.serial_port = serial.Serial( port=self.port_config["port"], baudrate=self.port_config["baudrate"], bytesize=self.port_config["databits"], parity=self.port_config["parity"], stopbits=self.port_config["stopbits"], xonxoff=0) logger.debug("connected serial") self.master = modbus_rtu.RtuMaster(self.serial_port) self.master.set_timeout(self.port_config["timeout_connection"] ) #max waittime for modbus answers self.master.set_verbose( False) #if True, log additional modbus info #Successfull, set flags logger.debug("connected rtu master") self.serial_connected = True self.master_status = 0 with self.timing_queue.mutex: #reset timing events queue self.timing_queue.queue.clear() except Exception as e: time.sleep(3) logger.error( "ERROR Serial Port connection not successful{}".format(e)) self.serial_connected = False
def read_modbus_event(self): """read operations of the timing_queue with the Modbus RTU Master and Execute them with """ logger.debug("calling read_modbus_event") while self.serial_connected and self.alive and self.master_status < self.max_master_attemps - 1: request = self.timing_queue.get( ) #blocking call until new request is received if self.alive == False: #discontinue if function is wished to be stopped self.timing_queue.task_done() break slave_id = request["slave_id"] startadress = request["startadress"] function_code = request["function_code"] display_name = request["display_name"] result = (99999, "modbus_request_not_possible") #default try: if "quantity_of_x" in request: result = self.master.execute( slave=slave_id, function_code=function_code, starting_address=startadress, quantity_of_x=request["quantity_of_x"]) #read elif "output_value" in request: result = self.master.execute( slave=slave_id, function_code=function_code, starting_address=startadress, output_value=request["output_value"]) #write logger.debug( "slave no {}, starting_adress {} with name {} and {} ". format(slave_id, startadress, display_name, str(result))) self.master_status = 0 #successfull read, reset to 0 except ModbusError as exc: self.master_status = self.master_status + 1 #unsuccessfull, increase the status logger.warning( "ModbusError in read_modbus_event {}".format(ex)) except Exception as ex: #other error, like serial port etc. logger.warning( "Unexpected error in read_modbus_event {}".format(ex)) self.master_status = self.master_status + 5 #unsuccessfull, increase the status self.serial_connected = False payload = { "na": display_name, "res": result, "sl": slave_id, "time": time.time() } formatted_publish_message(topic=TOPIC_EVENT, payload=payload, c_queue=self.publishing_queue) self.timing_queue.task_done() logger.warning("Disconnected the Modbus, restarting") try: if self.serial_port != "undefined": #if not just initialisized self.serial_port.close() except Exception as ex: logger.error("error closing serial port: {}".format(ex))
def startup(self): """Start or Restart the Scheduling events""" #Step 1: Figure out what current setup_modbus.json is and its slaveconfig unused_port_config, slaveconfig = read_setup( self.publishing_queue) #wait to get newest setup try: while not self.stop_event.is_set(): #Step 2: For every operation in slaveconfig: find out details for slave_name in slaveconfig: #looping through all exisiting slaves operations = slaveconfig[slave_name]["operations"] slave_id = slaveconfig[slave_name]["slave_id"] for operation in operations: #looping for all operations of a specific slave #[Start get Information of Operation] op = operations[operation] interval = (op["sampling_interval"] ) #interval for Scheduling events process_request = { "slave_id": slave_id, "startadress": op["startadress"], "function_code": op["function_code"], "display_name": op["display_name"], } if "quantity_of_x" in op: process_request["quantity_of_x"] = op[ "quantity_of_x"] elif "output_value" in op: process_request["output_value"] = op[ "output_value"] else: logger.error( "FATAL ERROR, no output value or quantity_of_x {}" .format(process_request)) #[End get Information of Operation] self.stop_event.wait( self.timeout_between_functions ) #timeout, so differet request will be executed at different times if self.stop_event.is_set(): logger.warning( "Error: unwanted break in scheduling new RepeatedFunctions" ) return #Step 3: For a specific operation schedule a specific RepeatedFunction every interval seconds #[Start Schedule new RepeatedFunction] rf = RepeatedFunction(interval, self.query_task, (self, process_request)) rf.setDaemon = True rf.start() self.running_rep_functions.append(rf) logger.debug( "scheduleded {} \t at interval {} ".format( process_request, (str(interval) + "s") if interval != 0 else "once occuring")) #[End Schedule new RepeatedFunction] logger.debug("ending this scheduler thread {}".format( self.running_rep_functions)) return except Exception as e: #should in no case occur logger.error( "Error scheduling new timing events, consider restarting device: {}" .format(e)) formatted_publish_message( topic=TOPIC_STATE, payload= "Error scheduling new timing events, consider restarting device: {} " .format(e), c_queue=pub_queue)
def run(self): """Start of the Thread""" logger.debug("Start the Scheduler ") self.startup()
def publish_data(self): """Publish data from the publishing queue via MQTT start_new_connection and initial_start_client must be run first. Will do nothing if self.connection_working and self.run_publish are not set to true beforehand. """ try: while self.connection_working == True and self.run_publish == True: # [Start Queue message from queue] while True: try: #[Start Do Exponential Backoff and Reconnect] if self.should_backoff and ( datetime.datetime.utcnow() - self.last_client_restart).seconds > 5: logger.info("Backoff detected, reconnect") self.do_exponential_backoff() self.start_new_connection() #[End Do Exponential Backoff and Reconnect] publish_request = self.publishing_queue.get( timeout=1 ) #wait for newest message from publishing_queue that is queued to be puplished break #if element was in queue except queue.Empty: pass sub_topic = publish_request["sub_topic"] payload = publish_request["payload"] qos = int(publish_request["qos"]) mqtt_topic = "/devices/{}/{}".format( DEVICE_ID, sub_topic) #where message is published # [End message from queue] # [START jwt_refresh] seconds_since_issue = (datetime.datetime.utcnow() - self.jwt_iat).seconds if seconds_since_issue + 60 >= int(60 * JWT_EXPIRES_MINUTES): logger.info( "Refreshing JWT token and connection after {}s".format( seconds_since_issue)) self.start_new_connection() # [END jwt_refresh] # [START Precaution before publish on recent opened connection] elapsed_seconds_since_restart = ( datetime.datetime.utcnow() - self.last_client_restart).seconds if elapsed_seconds_since_restart < 20: #if paho mqtt not ready if elapsed_seconds_since_restart < 5: logger.debug( "MQTT just after connection restart, pausing message send for 5 seconds" ) time.sleep(max(0, 5 - elapsed_seconds_since_restart) ) #up to 5 seconds sleep if qos == 0: logger.debug("Set message from qos 0 to qos 1") qos = 1 #set to qos 1 until stable connection and fully set up # [END Precaution before publish on recent opened connection] # [Start State messages delay] if sub_topic == TOPIC_STATE: #TOPIC_STATE can be refreshed no more than once per second & 6000 times per minute per project in the Google Cloud, optimum once each 5 or 10 seconds max. time_difference_state = ( datetime.datetime.utcnow() - self.last_state_message_queued).seconds if 0 <= time_difference_state < 5: logger.info("State message, Sleeping for {}".format( 5 - time_difference_state)) time.sleep(max(0, 5 - time_difference_state)) self.last_state_message_queued = datetime.datetime.utcnow() # [End State messages delay] self.client.publish( mqtt_topic, payload, qos=qos, retain=True) # Publish payload to the MQTT topic. self.publishing_queue.task_done() except Exception as e: logger.error( 'An error occured during publishing data {}'.format(e)) self.connection_working == False
def on_log(self, client, userdata, level, buf): """Paho callback when a client has logging information """ logger.debug("Paho Log: {} ".format(buf))