def __init__(self, internal, topic_params, config, emptyValue={}, id=None, section=None, prepare_topic_qos=True, sub_pub=False, connect_check_flag=False): super(DataReceiver, self).__init__() self.logger = MessageLogger.get_logger(__name__, id) self.stop_request = False self.internal = internal self.topic_params = topic_params self.prepare_topic_qos = prepare_topic_qos self.emptyValue = emptyValue self.connect_check_flag = connect_check_flag self.data = self.emptyValue.copy() self.data_update = False self.config = config self.channel = "MQTT" self.topics = None self.port = None self.host_params = {} self.first_time = 0 self.last_time = 0 self.id = id self.section = section self.redisDB = RedisDB() self.sub_pub = sub_pub if self.section is None: self.section = "IO" self.setup() if self.channel == "MQTT": self.init_mqtt(self.topics) elif self.channel == "ZMQ": self.init_zmq(self.topics)
def __init__(self, internal, topic_params, config, generic_name, id, buffer, dT, base_value_flag): redisDB = RedisDB() self.logger = MessageLogger.get_logger(__name__, id) self.generic_name = generic_name self.buffer = buffer self.dT = dT self.base_value_flag = base_value_flag if "detachable" in topic_params.keys(): self.detachable = topic_params["detachable"] else: self.detachable = False if "reuseable" in topic_params.keys(): self.reuseable = topic_params["reuseable"] else: self.reuseable = False self.start_of_day = datetime.datetime.now().replace( hour=0, minute=0, second=0, microsecond=0).timestamp() self.total_steps_in_day = floor(24 * 60 * 60 / self.dT) self.current_day_index = 0 self.number_of_bucket_days = int(buffer / self.total_steps_in_day) self.bucket_index = False self.length = 1 try: super().__init__(internal, topic_params, config, id=id) except Exception as e: redisDB.set("Error mqtt" + self.id, True) self.logger.error(e)
def __init__(self, topic_params, config, buffer, save_path, topic_name, id, load_file_data, max_file_size_mins, influxDB): self.file_path = save_path redisDB = RedisDB() try: super().__init__(False, topic_params, config, [], id) except Exception as e: redisDB.set("Error mqtt" + self.id, True) logger.error(e) self.influxDB = influxDB self.buffer_data = [] self.buffer = buffer self.current_minute = None self.id = id self.sum = 0 self.count = 0 self.minute_data = [] self.topic_name = topic_name self.max_file_size_mins = max_file_size_mins self.save_cron_freq = config.getint("IO", "raw.data.file.save.frequency.sec", fallback=3600) self.copy_file_data_to_influx() if load_file_data: self.load_data() self.file_save_thread = threading.Thread(target=self.save_to_file_cron, args=(self.save_cron_freq, )) self.file_save_thread.start()
def __init__(self, config, id, topic_name, dT_in_seconds, control_frequency, horizon_in_steps, prediction_data_file_container, raw_data_file_container, topic_params, error_result_file_path, output_config, influxDB): self.logger = MessageLogger.get_logger(__name__, id) self.control_frequency = control_frequency self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.raw_data_file_container = raw_data_file_container self.raw_data = RawDataReader() self.stopRequest = threading.Event() self.topic_name = topic_name self.id = id self.prediction_data_file_container = prediction_data_file_container self.error_result_file_path = error_result_file_path self.output_config = output_config self.topic_params = topic_params self.influxDB = influxDB redisDB = RedisDB() try: if self.update_topic_params(): super().__init__(False, self.topic_params, config, control_frequency, id) else: super().__init__(True, self.topic_params, config, control_frequency, id) except Exception as e: redisDB.set("Error mqtt" + self.id, True) self.logger.error(e)
def exit_gracefully(self, signum, frame): print("\nReceived {} signal".format(self.signals[signum])) print("Cleaning up resources. End of the program") from IO.redisDB import RedisDB redisDB = RedisDB() redisDB.set("End ofw", "True") time.sleep(6) self.kill_now = True
def clear_redis(logger): logger.info("reset redis training key locks") training_lock_key = "training_lock" from IO.redisDB import RedisDB redisDB = RedisDB() try: redisDB.remove(training_lock_key) except Exception as e: logger.debug("training_lock key does not exist")
def __init__(self, id, solver_name, model_path, control_frequency, repetition, output_config, input_config_parser, config, horizon_in_steps, dT_in_seconds, optimization_type): super(ControllerBase, self).__init__() self.logger = MessageLogger.get_logger(__name__, id) self.logger.info("Initializing optimization controller " + id) #pyomo_path = "/usr/src/app/logs/pyomo/" + str(id) self.pyomo_path = "/usr/src/app/logs/pyomo/" self.pyomo_path = os.path.abspath(self.pyomo_path) self.logger.debug("pyomo_path "+str(self.pyomo_path)) if not os.path.exists(self.pyomo_path): try: os.makedirs(self.pyomo_path, mode=0o777, exist_ok=False) os.chmod(self.pyomo_path, 0o777) os.chmod(self.pyomo_path, 0o777) except Exception as e: self.logger.error(e) TempfileManager.tempdir = self.pyomo_path self.id = id self.results = "" self.model_path = os.path.abspath(model_path) self.solver_name = solver_name self.control_frequency = control_frequency self.repetition = repetition self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.output_config = output_config self.input_config_parser = input_config_parser self.stopRequest = None#threading.Event() self.redisDB = RedisDB() self.lock_key = "id_lock" self.optimization_type = optimization_type self.stop_signal_key = "opt_stop_" + self.id self.finish_status_key = "finish_status_" + self.id self.redisDB.set(self.stop_signal_key, False) self.redisDB.set(self.finish_status_key, False) self.repetition_completed = False self.preprocess = False self.input = None self.output = None self.solver_ipopt_max_iteration = config.getint("SolverSection", "solver.ipopt.max.iteration", fallback=1000) self.solver_ipopt_timeout = config.getint("SolverSection", "solver.ipopt.timeout", fallback=120) self.solver_gurobi_max_iteration = config.getint("SolverSection", "solver.gurobi.max.iteration", fallback=1000) self.solver_gurobi_timeout = config.getint("SolverSection", "solver.gurobi.timeout", fallback=3) if "False" in self.redisDB.get("Error mqtt" + self.id): self.output = OutputController(self.id, self.output_config) if "False" in self.redisDB.get("Error mqtt" + self.id): self.input = InputController(self.id, self.input_config_parser, config, self.control_frequency, self.horizon_in_steps, self.dT_in_seconds) self.monitor = MonitorPub(config, id)
class PVForecastPublisher(DataPublisher): def __init__(self, internal_topic_params, config, id, control_frequency, horizon_in_steps, dT_in_seconds, q): self.logger = MessageLogger.get_logger(__name__, id) self.pv_data = {} self.q = q self.control_frequency = control_frequency self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.topic = "P_PV" self.redisDB = RedisDB() self.id = id try: super().__init__(True, internal_topic_params, config, control_frequency, id) except Exception as e: self.redisDB.set("Error mqtt" + self.id, True) self.logger.error(e) def get_data(self): # check if new data is available if not self.redisDB.get_bool(Constants.get_data_flow_key(self.id)): return None self.logger.debug("Getting PV data from Queue") if not self.q.empty(): try: new_data = self.q.get_nowait() self.logger.debug("new data "+str(new_data)) self.q.task_done() self.pv_data = new_data self.logger.debug("extract pv data") data = self.convert_to_senml() return data except Exception: self.logger.error("Queue empty") else: self.logger.debug("PV Queue empty") return None def convert_to_senml(self): meas = [] if len(self.pv_data) > 0: for row in self.pv_data: meas.append(self.get_senml_meas(float(row[1]), row[0])) doc = senml.SenMLDocument(meas) val = doc.to_json() return json.dumps(val) def get_senml_meas(self, value, time): if not isinstance(time, float): time = float(time.timestamp()) meas = senml.SenMLMeasurement() meas.time = time meas.value = value meas.name = self.topic return meas
def __init__(self, internal, topic_params, config, generic_name, id, event_callback): redisDB = RedisDB() self.logger = MessageLogger.get_logger(__name__, id) self.generic_name = generic_name self.event_callback = event_callback try: super().__init__(internal, topic_params, config, id=id) except Exception as e: redisDB.set("Error mqtt" + self.id, True) self.logger.error(e)
def __init__(self, id=None, output_config=None): self.logger = MessageLogger.get_logger(__name__, id) self.logger.info("Output Class started") self.output_config = output_config self.mqtt = {} self.redisDB = RedisDB() self.mqtt_params = {} self.output_mqtt = {} self.id = id self.config_parser_utils = ConfigParserUtils() self.logger.debug("output_config: " + str(self.output_config) + " " + str(type(self.output_config))) if self.output_config is not None: self.extract_mqtt_params() self.init_mqtt()
def __init__(self, internal_topic_params, config, id, control_frequency, horizon_in_steps, dT_in_seconds, q): self.logger = MessageLogger.get_logger(__name__, id) self.pv_data = {} self.q = q self.control_frequency = control_frequency self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.topic = "P_PV" self.redisDB = RedisDB() self.id = id try: super().__init__(True, internal_topic_params, config, control_frequency, id) except Exception as e: self.redisDB.set("Error mqtt" + self.id, True) self.logger.error(e)
def clear_redis(logger): logger.info("reset redis") from IO.redisDB import RedisDB redisDB = RedisDB() redisDB.reset() redisDB.set("time", time.time()) redisDB.set("End ofw", "False") return redisDB
def __init__(self, model_name, control_frequency, horizon_in_steps, dT_in_seconds, repetition, solver, id, optimization_type, single_ev): self.logger = MessageLogger.get_logger(__name__, id) self.model_name = model_name self.control_frequency = control_frequency self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.repetition = repetition self.solver = solver self.id = id self.optimization_type = optimization_type self.single_ev = single_ev self.redisDB = RedisDB() self.pyro_mip_server = None
def __init__(self, internal, topic_params, config, generic_name, id, buffer, dT, base_value_flag): self.id = id self.redisDB = RedisDB() self.logger = MessageLogger.get_logger(__name__, id) self.generic_name = generic_name self.buffer = buffer self.dT = dT self.base_value_flag = base_value_flag self.set_data_update(False) persist_real_data_path = config.get("IO","persist.real.data.path", fallback="optimization/resources") persist_real_data_path = os.path.join("/usr/src/app", persist_real_data_path, id, "real") self.persist_real_data_file = os.path.join(persist_real_data_path, generic_name+".txt") if "detachable" in topic_params.keys(): self.detachable = topic_params["detachable"] else: self.detachable = False if self.detachable: self.value_used_once = False if "reuseable" in topic_params.keys(): self.reuseable = topic_params["reuseable"] else: self.reuseable = False if self.reuseable and not os.path.exists(persist_real_data_path): os.makedirs(persist_real_data_path) self.start_of_day = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0).timestamp() self.total_steps_in_day = floor(24 * 60 * 60 / self.dT) self.current_day_index = 0 self.number_of_bucket_days = int(buffer / self.total_steps_in_day) self.bucket_index = False self.length = 1 try: super(BaseDataReceiver, self).__init__(internal, topic_params, config, id=id) except Exception as e: self.redisDB.set("Error mqtt" + id, True) self.logger.error(e) if self.reuseable: formated_data = self.read_data() if formated_data is not None and len(formated_data) > 0: self.length = len(formated_data) self.data.update(formated_data) self.set_data_update(True) self.last_time = time.time()
def __init__(self, id=None, output_config=None): self.logger = MessageLogger.get_logger(__name__, id) self.logger.info("Output Class started") self.output_config = output_config self.mqtt = {} self.redisDB = RedisDB() self.mqtt_params = {} self.output_mqtt = {} self.id = id self.logger.debug("output_config: " + str(self.output_config) + " " + str(type(self.output_config))) if self.output_config is not None: self.mqtt_params = ConfigParserUtils.extract_mqtt_params_output( self.output_config, "error_calculation", False) self.logger.debug("params = " + str(self.mqtt_params)) self.init_mqtt()
def delete_output(id): # noqa: E501 """Deletes the output of the framework # noqa: E501 :param id: Name of the registry to be deleted :type id: str :rtype: None """ redisDB = RedisDB() output_keys = redisDB.get_keys_for_pattern("o:" + id + ":*") if output_keys is not None: for key in output_keys: redisDB.remove(key) return "success"
def __init__(self, internal_topic_params, config, queue, publish_frequency, topic, id, horizon_in_steps, dT_in_seconds): self.logger = MessageLogger.get_logger(__name__, id) self.load_data = {} self.flag = True self.file_path = os.path.join("/usr/src/app", "optimization", "loadData.dat") self.q = queue self.topic = topic self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.id = id self.redisDB = RedisDB() try: super().__init__(True, internal_topic_params, config, publish_frequency, id) except Exception as e: self.redisDB.set("Error mqtt" + self.id, True) self.logger.error(e)
def __init__(self, id, solver_name, model_path, control_frequency, repetition, output_config, input_config_parser, config, horizon_in_steps, dT_in_seconds, optimization_type): super().__init__() pyomo_path = "/usr/src/app/logs/pyomo_" + str(id) if not os.path.exists(pyomo_path): os.makedirs(pyomo_path, mode=0o777, exist_ok=False) os.chmod(pyomo_path, 0o777) TempfileManager.tempdir = pyomo_path self.logger = MessageLogger.get_logger(__name__, id) self.logger.info("Initializing optimization controller " + id) self.id = id self.results = "" self.model_path = model_path self.solver_name = solver_name self.control_frequency = control_frequency self.repetition = repetition self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.output_config = output_config self.input_config_parser = input_config_parser self.stopRequest = None #threading.Event() self.redisDB = RedisDB() self.lock_key = "id_lock" self.optimization_type = optimization_type self.stop_signal_key = "opt_stop_" + self.id self.finish_status_key = "finish_status_" + self.id self.redisDB.set(self.stop_signal_key, False) self.redisDB.set(self.finish_status_key, False) self.repetition_completed = False self.preprocess = False self.input = None self.output = None if "False" in self.redisDB.get("Error mqtt" + self.id): self.output = OutputController(self.id, self.output_config) if "False" in self.redisDB.get("Error mqtt" + self.id): self.input = InputController(self.id, self.input_config_parser, config, self.control_frequency, self.horizon_in_steps, self.dT_in_seconds) """try:
def __init__(self, topic_params, config, buffer, training_data_size, save_path, topic_name, id): self.file_path = save_path redisDB = RedisDB() try: super().__init__(False, topic_params, config, [], id) except Exception as e: redisDB.set("Error mqtt" + self.id, True) logger.error(e) self.buffer_data = [] self.buffer = buffer self.training_data_size = training_data_size self.current_minute = None self.id = id self.sum = 0 self.count = 0 self.minute_data = [] self.topic_name = topic_name self.load_data() self.file_save_thread = threading.Thread(target=self.save_to_file_cron) self.file_save_thread.start()
def framework_stop(id): # noqa: E501 """Command for stoping the framework # noqa: E501 :param id: Id of the registry to be stopped :type id: str :rtype: None """ try: redis_db = RedisDB() flag = redis_db.get("run:" + id) logger.debug("Flag " + str(flag)) message = "" code = 200 if flag is not None and flag == "running": logger.debug("System running and trying to stop") redis_db.set("run:" + id, "stop") time.sleep(1) flag = redis_db.get("run:" + id) logger.debug("Flag in stop: " + str(flag)) if flag is "stopped" or None: # TODO: is none necessary? logger.debug("System stopped succesfully") message = "System stopped succesfully" elif "stopping" in flag: message = "System stopped succesfully" counter = 0 while ("stopping" in flag): flag = redis_db.get("run:" + id) counter = counter + 1 if counter >= 15: message = "system stopped succesfully" break else: time.sleep(1) logger.debug("System stopped succesfully") else: message = "Problems while stopping the system" code = 500 elif flag is not None and flag == "stopped": logger.debug("System already stopped") message = "System already stopped" elif flag is None: logger.debug("System already stopped") message = "System already stopped" except Exception as e: logger.error(e) message = "Error stoping the system" code = 500 return message, code
def get_output(id): # noqa: E501 """Get ouput of the optimization # noqa: E501 :param id: Name of the registry to be actualized :type id: str :rtype: OptimizationOutput """ result = {} redisDB = RedisDB() output_keys = redisDB.get_keys_for_pattern("o:" + id + ":*") if output_keys is not None: meta = redisDB.get("id_meta:" + id) if meta is not None: meta = json.loads(meta) dT = meta["dT_in_seconds"] for key in output_keys: sub_key = key.split(":") topic = sub_key[2] index = sub_key[3] json_value = redisDB.get(key) json_value = json.loads(json_value) time = None value = 0 for t, v in json_value.items(): time = t value = v break if topic not in result.keys(): result[topic] = {} if time is not None: t = float(time) + int(index) * dT result[topic][t] = float(value) logger.debug(result) return OptimizationOutput.from_dict(result)
def __init__(self, control_frequency, horizon_in_steps, num_timesteps, hidden_size, batch_size, num_epochs, raw_data_file, processingData, model_file_container, model_file_container_train, topic_name, id, dT_in_seconds, output_size, log): super().__init__() self.control_frequency = control_frequency self.horizon_in_steps = horizon_in_steps self.num_timesteps = num_timesteps self.hidden_size = hidden_size self.batch_size = batch_size self.num_epochs = num_epochs # 10 self.min_training_size = num_timesteps + output_size + 5 self.model_file_container = model_file_container self.model_file_container_train = model_file_container_train self.today = datetime.datetime.now().day self.processingData = processingData self.trained = False self.raw_data_file = raw_data_file self.stopRequest = threading.Event() self.redisDB = RedisDB() self.training_lock_key = "training_lock" self.topic_name = topic_name self.id = id self.dT_in_seconds = dT_in_seconds self.output_size = output_size self.logger = log
class ThreadFactory: def __init__(self, model_name, control_frequency, horizon_in_steps, dT_in_seconds, repetition, solver, id, optimization_type, single_ev): self.logger = MessageLogger.get_logger(__name__, id) self.model_name = model_name self.control_frequency = control_frequency self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.repetition = repetition self.solver = solver self.id = id self.optimization_type = optimization_type self.single_ev = single_ev self.redisDB = RedisDB() self.pyro_mip_server = None def getFilePath(self, dir, file_name): # print(os.path.sep) # print(os.environ.get("HOME")) project_dir = os.path.dirname(os.path.realpath(__file__)) data_file = os.path.join("/usr/src/app", dir, file_name) return data_file def startOptControllerThread(self): self.logger.info("Creating optimization controller thread") self.logger.info("Number of repetitions: " + str(self.repetition)) self.logger.info("Output with the following control_frequency: " + str(self.control_frequency)) self.logger.info( "Optimization calculated with the following horizon_in_steps: " + str(self.horizon_in_steps)) self.logger.info( "Optimization calculated with the following dT_in_seconds: " + str(self.dT_in_seconds)) self.logger.info("Optimization calculated with the following model: " + self.model_name) self.logger.info( "Optimization calculated with the following solver: " + self.solver) self.logger.info( "Optimization calculated with the following optimization_type: " + self.optimization_type) self.redisDB.set("Error mqtt" + self.id, False) self.logger.debug("Error mqtt " + str(self.redisDB.get("Error mqtt" + self.id))) # Creating an object of the configuration file (standard values) try: config = configparser.RawConfigParser() config.read( self.getFilePath("optimization/resources", "ConfigFile.properties")) except Exception as e: self.logger.error(e) # Loads the solver name if it was not given thorough the endpoint command/start/id if not self.model_name: self.model_name = config.get("SolverSection", "model.name") self.logger.debug("This is the model name: " + self.model_name) self.model_path = os.path.join( config.get("SolverSection", "model.base.path"), self.model_name) + ".py" self.logger.debug("This is the path of the model: " + str(self.model_path)) # Loads the solver name if not specified in command/start/id if not self.solver: self.solver_name = config.get("SolverSection", "solver.name") else: self.solver_name = self.solver self.logger.debug( "Optimization calculated with the following solver: " + self.solver_name) ############################################################################################## output_config = None try: # Reads the registry/output and stores it into an object path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Output.registry.mqtt") if not os.path.exists(path): self.logger.debug( "Output.registry.mqtt not set, only file output available") else: with open(path, "r") as file: output_config = json.loads(file.read()) except Exception as e: self.logger.error( "Output.registry.mqtt not set, only file output available") try: # Reads the registry/input and stores it into an object path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Input.registry.file") if not os.path.exists(path): input_config_file = {} self.logger.debug("Not Input.registry.file present") else: with open(path, "r") as file: input_config_file = json.loads(file.read()) self.logger.debug("Input.registry.file found") except Exception as e: self.logger.error("Input file not found") input_config_file = {} self.logger.error(e) try: # Reads the registry/input and stores it into an object path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Input.registry.mqtt") if not os.path.exists(path): input_config_mqtt = {} self.logger.debug("Not Input.registry.mqtt present") else: with open(path, "r") as file: input_config_mqtt = json.loads(file.read()) self.logger.debug("Input.registry.mqtt found") except Exception as e: self.logger.error("Input file not found") input_config_mqtt = {} self.logger.error(e) input_config_parser = InputConfigParser(input_config_file, input_config_mqtt, self.model_name, self.id, self.optimization_type) missing_keys = input_config_parser.check_keys_for_completeness() if len(missing_keys) > 0: raise MissingKeysException( "Data source for following keys not declared: " + str(missing_keys)) self.prediction_threads = {} self.prediction_names = input_config_parser.get_prediction_names() if self.prediction_names is not None and len( self.prediction_names) > 0: for prediction_name in self.prediction_names: flag = input_config_parser.get_forecast_flag(prediction_name) if flag: self.logger.info( "Creating prediction controller thread for topic " + str(prediction_name)) topic_param = input_config_parser.get_params( prediction_name) parameters = json.dumps({ "control_frequency": self.control_frequency, "horizon_in_steps": self.horizon_in_steps, "topic_param": topic_param, "dT_in_seconds": self.dT_in_seconds }) self.redisDB.set( "train:" + self.id + ":" + prediction_name, parameters) self.prediction_threads[prediction_name] = LoadPrediction( config, self.control_frequency, self.horizon_in_steps, prediction_name, topic_param, self.dT_in_seconds, self.id, True) # self.prediction_threads[prediction_name].start() self.non_prediction_threads = {} self.non_prediction_names = input_config_parser.get_non_prediction_names( ) if self.non_prediction_names is not None and len( self.non_prediction_names) > 0: for non_prediction_name in self.non_prediction_names: flag = input_config_parser.get_forecast_flag( non_prediction_name) if flag: if non_prediction_name == "P_PV": self.non_prediction_threads[ non_prediction_name] = PVPrediction( config, input_config_parser, self.id, self.control_frequency, self.horizon_in_steps, self.dT_in_seconds, non_prediction_name) self.non_prediction_threads[non_prediction_name].start( ) # Initializing constructor of the optimization controller thread if self.optimization_type == "MPC": self.opt = OptControllerMPC( self.id, self.solver_name, self.model_path, self.control_frequency, self.repetition, output_config, input_config_parser, config, self.horizon_in_steps, self.dT_in_seconds, self.optimization_type) elif self.optimization_type == "discrete": self.opt = OptControllerDiscrete( self.id, self.solver_name, self.model_path, self.control_frequency, self.repetition, output_config, input_config_parser, config, self.horizon_in_steps, self.dT_in_seconds, self.optimization_type) elif self.optimization_type == "stochastic": self.opt = OptControllerStochastic( self.id, self.solver_name, self.model_path, self.control_frequency, self.repetition, output_config, input_config_parser, config, self.horizon_in_steps, self.dT_in_seconds, self.optimization_type, self.single_ev) try: ####starts the optimization controller thread self.logger.debug("Mqtt issue " + str(self.redisDB.get("Error mqtt" + self.id))) if "False" in self.redisDB.get("Error mqtt" + self.id): self.opt.start() self.logger.debug("Optimization object started") return 0 else: self.redisDB.set("run:" + self.id, "stopping") self.stopOptControllerThread() self.redisDB.set("run:" + self.id, "stopped") self.logger.error("Optimization object could not be started") return 1 except Exception as e: self.logger.error(e) return 1 def stopOptControllerThread(self): try: # stop as per ID for name, obj in self.prediction_threads.items(): self.redisDB.remove("train:" + self.id + ":" + name) obj.Stop() for name, obj in self.non_prediction_threads.items(): obj.Stop() del obj self.logger.info("Stopping optimization controller thread") self.opt.Stop() del self.opt self.logger.info("Optimization controller thread stopped") del self.logger return "Optimization controller thread stopped" except Exception as e: self.logger.error(e) return e def is_running(self): return not self.opt.get_finish_status() def update_training_params(self, key, parameters): while True: self.redisDB.set(key, parameters) time.sleep("60")
def __init__(self): self.factory = {} self.statusThread = {} self.running = {} self.redisDB = RedisDB() self.lock_key = "id_lock"
class ForecastPublisher(DataPublisher): def __init__(self, internal_topic_params, config, queue, publish_frequency, topic, id, horizon_in_steps, dT_in_seconds): self.logger = MessageLogger.get_logger(__name__, id) self.load_data = {} self.flag = True self.file_path = os.path.join("/usr/src/app", "optimization", "loadData.dat") self.q = queue self.topic = topic self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.id = id self.redisDB = RedisDB() try: super().__init__(True, internal_topic_params, config, publish_frequency, id) except Exception as e: self.redisDB.set("Error mqtt" + self.id, True) self.logger.error(e) def get_data(self): try: if not self.redisDB.get_bool(Constants.get_data_flow_key(self.id)): return None # check if new data is available if not self.q.empty(): try: new_data = self.q.get_nowait() self.q.task_done() self.load_data = new_data except Exception: self.logger.debug("Queue empty") if not self.load_data: return None self.logger.debug("extract load data") data = self.extract_horizon_data() self.logger.debug(str(data)) return data except Exception as e: self.logger.error(str(e)) return None def extract_horizon_data(self): meas = [] list = self.load_data.items() list = sorted(list) list = list[-self.horizon_in_steps:] for i in range(self.horizon_in_steps): value = list[i][1] if value < 0: value = 0 meas.append(self.get_senml_meas(value, list[i][0])) doc = senml.SenMLDocument(meas) val = doc.to_json() return json.dumps(val) def get_senml_meas(self, value, time): if not isinstance(time, float): time = float(time.timestamp()) meas = senml.SenMLMeasurement() meas.time = time meas.value = value meas.name = self.topic return meas
def __init__(self, config, horizon_in_steps, topic_name, dT_in_seconds, id, type, opt_values): super(MachineLearning, self).__init__() self.logger = MessageLogger.get_logger(__name__, id) self.horizon_in_steps = horizon_in_steps self.topic_name = topic_name self.dT_in_seconds = dT_in_seconds self.id = id self.type = type self.redisDB = RedisDB() self.influxDB = InfluxDBManager() if self.type == "load": self.model_data_dT = 60 self.input_size = 1440 self.hidden_size = 100 self.batch_size = 1 self.num_epochs = 10 self.output_size = 1440 self.processingData = ProcessingData(type) self.model_file_container_base = os.path.join( "/usr/src/app/prediction/model", "model_base.h5") elif self.type == "pv": self.model_data_dT = 60 self.input_size = 1 self.input_size_hist = 24 self.hidden_size = 100 self.batch_size = 1 self.num_epochs = 10 self.output_size = 1440 city = "Bonn" country = "Germany" self.logger.info("opt va " + str(opt_values)) try: if "City" in opt_values.keys( ) and "Country" in opt_values.keys(): for k, v in opt_values["City"].items(): city = v break for k, v in opt_values["Country"].items(): country = v break else: self.logger.error("City or country not present in pv meta") except Exception: self.logger.error("City or country not present in pv meta") location = {"city": city, "country": country} radiation = Radiation(config, 1, dT_in_seconds, location, horizon_in_steps) hist_data = radiation.get_complete_data() self.processingData = ProcessingData(type, hist_data) self.model_file_container_base = os.path.join( "/usr/src/app/prediction/model", "model_base_pv.h5") base_path = "/usr/src/app/prediction/resources" dir_data = os.path.join(base_path, self.id) if not os.path.exists(dir_data): os.makedirs(dir_data) self.raw_data_file_container = os.path.join( base_path, self.id, "raw_data_" + str(topic_name) + ".csv") self.model_file_container = os.path.join( base_path, self.id, "model_" + str(topic_name) + ".h5") self.model_file_container_temp = os.path.join( base_path, self.id, "model_temp_" + str(topic_name) + ".h5") self.model_file_container_train = os.path.join( base_path, self.id, "model_train_" + str(topic_name) + ".h5") self.forecast_pub = None self.prediction_thread = None self.training_thread = None self.raw_data = None self.models = Models(self.model_file_container, self.model_file_container_temp, self.model_file_container_base)
class ControllerBase(ABC, threading.Thread): def __init__(self, id, solver_name, model_path, control_frequency, repetition, output_config, input_config_parser, config, horizon_in_steps, dT_in_seconds, optimization_type): super().__init__() pyomo_path = "/usr/src/app/logs/pyomo_" + str(id) if not os.path.exists(pyomo_path): os.makedirs(pyomo_path, mode=0o777, exist_ok=False) os.chmod(pyomo_path, 0o777) TempfileManager.tempdir = pyomo_path self.logger = MessageLogger.get_logger(__name__, id) self.logger.info("Initializing optimization controller " + id) self.id = id self.results = "" self.model_path = model_path self.solver_name = solver_name self.control_frequency = control_frequency self.repetition = repetition self.horizon_in_steps = horizon_in_steps self.dT_in_seconds = dT_in_seconds self.output_config = output_config self.input_config_parser = input_config_parser self.stopRequest = None #threading.Event() self.redisDB = RedisDB() self.lock_key = "id_lock" self.optimization_type = optimization_type self.stop_signal_key = "opt_stop_" + self.id self.finish_status_key = "finish_status_" + self.id self.redisDB.set(self.stop_signal_key, False) self.redisDB.set(self.finish_status_key, False) self.repetition_completed = False self.preprocess = False self.input = None self.output = None if "False" in self.redisDB.get("Error mqtt" + self.id): self.output = OutputController(self.id, self.output_config) if "False" in self.redisDB.get("Error mqtt" + self.id): self.input = InputController(self.id, self.input_config_parser, config, self.control_frequency, self.horizon_in_steps, self.dT_in_seconds) """try: # dynamic load of a class self.logger.info("This is the model path: " + self.model_path) module = self.path_import2(self.model_path) self.logger.info(getattr(module, 'Model')) self.my_class = getattr(module, 'Model') except Exception as e: self.logger.error(e) raise InvalidModelException("model is invalid/contains python syntax errors")""" # Importint a class dynamically def path_import2(self, absolute_path): spec = importlib.util.spec_from_file_location(absolute_path, absolute_path) module = spec.loader.load_module(spec.name) return module def join(self, timeout=None): #self.stopRequest.set() super(ControllerBase, self).join(timeout) def Stop(self): try: if self.input: self.input.Stop() self.logger.debug("Deleting input instances") #del self.input.inputPreprocess #del self.input except Exception as e: self.logger.error("error stopping input " + str(e)) try: if self.output: self.output.Stop() self.logger.debug("Deleting output instances") #del self.output except Exception as e: self.logger.error("error stopping output " + str(e)) #erasing files from pyomo #self.erase_pyomo_files() self.logger.debug("setting stop_signal_key") self.redisDB.set(self.stop_signal_key, True) if self.isAlive(): self.join(1) def initialize_opt_solver(self): start_time_total = time.time() self.optsolver = SolverFactory( self.solver_name ) #, tee=False, keepfiles=False, verbose=False, load_solutions=False) # , solver_io="lp") #self.optsolver.verbose= False #self.optsolver.load_solutions = False self.logger.debug("Solver factory: " + str(self.optsolver)) #self.optsolver.options.tee=False #self.optsolver.options.keepfiles = False #self.optsolver.options.load_solutions = False # optsolver.options["max_iter"]=5000 self.logger.info("solver instantiated with " + self.solver_name) #return self.optsolver def initialize_solver_manager(self): ###create a solver manager self.solver_manager = None #self.solver_manager = SolverManagerFactory('pyro', host='localhost') self.logger.debug("Starting the solver_manager") #return self.solver_manager # optsolver.options.pyro_shutdown = True def erase_pyomo_files(self, folder): # erasing files from pyomo #folder = "/usr/src/app/logs/pyomo" for the_file in os.listdir(folder): file_path = os.path.join(folder, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) # elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: self.logger.error(e) # Start the optimization process and gives back a result def run(self): self.logger.info("Starting optimization controller") return_msg = "success" execution_error = False try: count = 0 self.optimize(count, self.solver_name, self.model_path) except Exception as e: execution_error = True self.logger.error("error overall " + str(e)) e = str(e) solver_error = "The SolverFactory was unable to create the solver" if solver_error in e: i = e.index(solver_error) i_start = e.index("\"", i) i_end = e.index("\"", i_start + 1) solver = e[i_start + 1:i_end] return_msg = "Incorrect solver " + str(solver) + " used" else: return_msg = e finally: self.logger.info("repetition completed " + str(self.repetition_completed)) self.logger.info("stop request " + str(self.redisDB.get_bool(self.stop_signal_key))) self.logger.info("execution error " + str(execution_error)) if not self.redisDB.get_bool( self.stop_signal_key ) and not self.repetition_completed and not execution_error: self.logger.error("Process interrupted") self.redisDB.set("kill_signal", True) #erase pyomo folder folder = "/usr/src/app/logs/pyomo_" + str(self.id) shutil.rmtree(folder, ignore_errors=True) # If Stop signal arrives it tries to disconnect all mqtt clients if self.output: for key, object in self.output.mqtt.items(): object.MQTTExit() self.logger.debug("Client " + key + " is being disconnected") self.logger.info(return_msg) self.redisDB.set(self.finish_status_key, True) return return_msg @abstractmethod def optimize(self, count, solver_name, model_path): while not self.redisDB.get_bool(self.stop_signal_key): pass def get_finish_status(self): return self.redisDB.get_bool(self.finish_status_key)
class CommandController: _instance = None _lock = threading.Lock() def __new__(cls): if CommandController._instance is None: with CommandController._lock: if CommandController._instance is None: CommandController._instance = super( CommandController, cls).__new__(cls) return CommandController._instance def __init__(self): self.factory = {} self.statusThread = {} self.running = {} self.redisDB = RedisDB() self.lock_key = "id_lock" def set(self, id, object): self.factory[id] = object def get_length_factory(self): return len(self.factory) def get(self, id): return self.factory[id] def set_isRunning(self, id, bool): self.running[id] = bool def isRunningExists(self): logger.debug("IsRunning exists: " + str(len(self.running))) if len(self.running): return True else: return False def get_isRunning(self, id): if id in self.running.keys(): return self.running[id] else: return False def get_running(self): return self.running def get_statusThread(self, id): return self.statusThread[id] def start(self, id, json_object, dict_object=None): logger.debug(str(json_object)) if json_object is not None: self.model_name = json_object.model_name self.control_frequency = json_object.control_frequency self.horizon_in_steps = json_object.horizon_in_steps self.dT_in_seconds = json_object.d_t_in_seconds self.repetition = json_object.repetition self.solver = json_object.solver self.optimization_type = json_object.optimization_type self.single_ev = json_object.single_ev elif dict_object is not None: self.model_name = dict_object["model"] self.control_frequency = dict_object["control_frequency"] self.horizon_in_steps = dict_object["horizon_in_steps"] self.dT_in_seconds = dict_object["dT_in_seconds"] self.repetition = dict_object["repetition"] self.solver = dict_object["solver"] self.optimization_type = dict_object["optimization_type"] self.single_ev = dict_object["single_ev"] self.set( id, ThreadFactory(self.model_name, self.control_frequency, self.horizon_in_steps, self.dT_in_seconds, self.repetition, self.solver, id, self.optimization_type, self.single_ev)) logger.info("Thread: " + str(self.get(id))) self.redisDB.set("run:" + id, "starting") msg = self.get(id).startOptControllerThread() logger.debug("Answer from Thread factory" + str(msg)) if msg == 0: self.set_isRunning(id, True) logger.debug("Flag isRunning set to True") self.statusThread[id] = threading.Thread(target=self.run_status, args=(id, )) logger.debug("Status of the Thread started") self.statusThread[id].start() meta_data = { "id": id, "model": self.model_name, "control_frequency": self.control_frequency, "horizon_in_steps": self.horizon_in_steps, "dT_in_seconds": self.dT_in_seconds, "repetition": self.repetition, "solver": self.solver, "optimization_type": self.optimization_type, "single_ev": self.single_ev, "ztarttime": time.time() } self.redisDB.set("run:" + id, "running") IDStatusManager.persist_id(id, True, meta_data, self.redisDB) logger.info("running status " + str(self.running)) logger.debug("Command controller start finished") return 0 else: self.set_isRunning(id, False) logger.debug("Flag isRunning set to False") IDStatusManager.persist_id(id, False, None, self.redisDB) self.factory[id].stopOptControllerThread() self.redisDB.set("run:" + id, "stopped") logger.error("Command controller start could not be finished") # logger.debug("System stopped succesfully") return 1 def stop(self, id): logger.debug("Stop signal received") logger.debug("This is the factory object: " + str(self.get(id))) if self.factory[id]: IDStatusManager.persist_id(id, False, None, self.redisDB) self.factory[id].stopOptControllerThread() del self.factory[id] del self.statusThread[id] #self.stop_pyro_servers() #self.stop_name_servers() self.set_isRunning(id, False) message = "System stopped succesfully" self.redisDB.set("run:" + id, "stopped") logger.debug(message) gc.collect() else: message = "No threads found" logger.debug(message) def run_status(self, id): while True: status = self.get(id).is_running() flag = self.redisDB.get("run:" + id) if not status or (flag is not None and flag == "stop"): self.redisDB.set("run:" + id, "stopping") self.stop(id) break time.sleep(1) def restart_ids(self): old_ids, stopped_ids = IDStatusManager.instances_to_restart( self.redisDB) for s in old_ids: val = json.loads(s) try: self.start(val["id"], None, val) except (InvalidModelException, MissingKeysException, InvalidMQTTHostException) as e: # TODO: should we catch these exceptions here? logger.error("Error " + str(e)) self.redisDB.set("run:" + val["id"], "stopped") return str(e) for s in stopped_ids: val = json.loads(s) id = val["id"] self.redisDB.set("run:" + id, "stopped") self.redisDB.set(Constants.id_meta + ":" + id, json.dumps(val)) def get_status(self): status = {} keys = self.redisDB.get_keys_for_pattern("run:*") if keys is not None: for key in keys: value = self.redisDB.get(key) id = key[4:] status[id] = {} if value is None or (value is not None and value == "stopped"): status[id]["status"] = "stopped" elif value == "running": status[id]["status"] = "running" elif value == "stop" or value == "stopping": status[id]["status"] = "stopping" elif value == "starting": status[id]["status"] = "starting" keys = self.redisDB.get_keys_for_pattern(Constants.id_meta + ":*") if keys is not None: for key in keys: value = self.redisDB.get(key) id = key[8:] if id not in status.keys(): status[id] = {} status[id]["status"] = "stopped" status[id]["config"] = {} if value is not None: status[id]["config"].update(json.loads(value)) # logger.debug("status id config "+str(status)) if "ztarttime" in status[id]["config"].keys(): status[id]["start_time"] = status[id]["config"][ "ztarttime"] status[id]["config"].pop("ztarttime") if "model" in status[id]["config"].keys(): status[id]["config"]["model_name"] = status[id][ "config"]["model"] status[id]["config"].pop("model") return status
class OutputController: def __init__(self, id=None, output_config=None): self.logger = MessageLogger.get_logger(__name__, id) self.logger.info("Output Class started") self.output_config = output_config self.mqtt = {} self.redisDB = RedisDB() self.mqtt_params = {} self.output_mqtt = {} self.id = id self.config_parser_utils = ConfigParserUtils() self.logger.debug("output_config: " + str(self.output_config) + " " + str(type(self.output_config))) if self.output_config is not None: self.extract_mqtt_params() self.init_mqtt() def extract_mqtt_params(self): self.logger.debug("Output config = " + str(self.output_config)) for key, value in self.output_config.items(): self.logger.debug("key " + str(key) + " value " + str(value)) for key2, value2 in value.items(): self.logger.debug("key2 " + str(key2) + " value2 " + str(value2)) mqtt = self.config_parser_utils.get_mqtt(value2) unit, horizon_values = self.read_extra_values(value2) if mqtt is not None: self.mqtt_params[key2] = mqtt.copy() self.mqtt_params[key2]["unit"] = unit self.mqtt_params[key2]["horizon_values"] = horizon_values self.logger.debug("params = " + str(self.mqtt_params)) def read_extra_values(self, value2): unit = None horizon_values = False if isinstance(value2, dict): if "unit" in value2.keys(): unit = value2["unit"] if "horizon_values" in value2.keys(): horizon_values = value2["horizon_values"] return unit, horizon_values def init_mqtt(self): ###Connection to the mqtt broker self.logger.debug("Starting init mqtt") self.redisDB.set("Error mqtt" + self.id, False) try: for key, value in self.mqtt_params.items(): self.logger.debug("key " + str(key) + " value " + str(value)) # self.output_mqtt[key2] = {"host":host, "topic":topic, "qos":qos} client_id = "client_publish" + str(randrange(100000)) + str(time.time()).replace(".", "") host = str(value["host"]) port = value["mqtt.port"] self.logger.debug("client " + client_id) self.logger.debug("host " + host) self.logger.debug("port " + str(port)) client_key = host+":"+str(port) if client_key not in self.mqtt.keys(): self.mqtt[client_key] = MQTTClient(str(host), port, client_id, username=value["username"], password=value["password"], ca_cert_path=value["ca_cert_path"], set_insecure=value["insecure"], id=self.id) self.logger.info("successfully subscribed") except Exception as e: self.logger.debug("Exception while starting mqtt") self.redisDB.set("Error mqtt" + self.id, True) self.logger.error(e) def publish_data(self, id, data, dT): self.logger.debug("output data : "+ json.dumps(data, indent=4)) current_time = int(time.time()) try: senml_data = self.senml_message_format(data, current_time, dT) for mqtt_key, value in senml_data.items(): v = json.dumps(value) # self.logger.debug("key: "+str(key)) # self.logger.debug("mqtt params: " + str(self.mqtt_params.keys())) if mqtt_key in self.mqtt_params.keys(): value2 = self.mqtt_params[mqtt_key] topic = value2["topic"] host = value2["host"] port = value2["mqtt.port"] qos = value2["qos"] client_key = host + ":" + str(port) self.mqtt[client_key].sendResults(topic, v, qos) except Exception as e: self.logger.error("error in publish data ", e) self.save_to_redis(id, data, current_time) def Stop(self): self.stop_request = True try: for key, value in self.mqtt_params.items(): self.logger.debug("key " + str(key) + " value " + str(value)) self.mqtt[key].MQTTExit() self.logger.info("OutputController safe exit") except Exception as e: self.logger.error(e) def senml_message_format(self, data, current_time, dT): new_data = {} # self.logger.debug("data for senml "+str(data)) for key, value in data.items(): flag = False time = current_time u = None base = None if isinstance(value, dict): bn, n, val = self.get_names(value) else: bn, n, val = None, None, value if bn: base = senml.SenMLMeasurement() base.name = bn if key in self.mqtt_params.keys(): if self.mqtt_params[key]["unit"] is not None: u = self.mqtt_params[key]["unit"] """ else: u = "W" """ flag = self.mqtt_params[key]["horizon_values"] meas_list = [] for v in val: meas = senml.SenMLMeasurement() meas.name = n meas.time = time meas.value = v if u: meas.unit = u meas_list.append(meas) time += dT if not flag: break # only want the first value if len(meas_list) > 0: doc = senml.SenMLDocument(meas_list, base=base) new_data[key] = doc.to_json() # self.logger.debug("Topic MQTT Senml message: "+str(new_data)) return new_data def save_to_redis(self, id, data, time): try: part_key = "o:" + id + ":" output_keys = self.redisDB.get_keys_for_pattern(part_key+"*") if output_keys is not None: for key in output_keys: self.redisDB.remove(key) for key, value in data.items(): key = key.replace("~","/") if isinstance(value, dict): bn, n, val = self.get_names(value) else: bn, n, val = None, key, value if bn: n = bn + "/" + n index = 0 for v in val: k = part_key + n + ":" + str(index) self.redisDB.set(k, json.dumps({str(time): v})) index += 1 except Exception as e: self.logger.error("error adding to redis " + str(e)) def get_names(self, dict): bn = None n = None v = None if "bn" in dict.keys(): bn = dict["bn"] if "n" in dict.keys(): n = dict["n"] if "v" in dict.keys(): v = dict["v"] return bn,n,v
def framework_start(id, startOFW): # noqa: E501 """Command for starting the framework # noqa: E501 :param id: Id of the registry to be started :type id: str :param startOFW: Start command for the optimization framework repetitions: -1 infinite repetitions :type startOFW: dict | bytes :rtype: None """ available_solvers = ["ipopt", "glpk", "bonmin", "gurobi", "cbc"] available_optimizers = ["discrete", "stochastic", "MPC"] response_msg = "" response_code = 200 if connexion.request.is_json: logger.info("Starting the system") startOFW = Start.from_dict(connexion.request.get_json()) models = get_models() if startOFW.model_name != "" and startOFW.model_name not in models: response_msg = "Model not available. Available models are :" + str( models) response_code = 400 elif startOFW.solver not in available_solvers: response_msg = "Use one of the following solvers :" + str( available_solvers) response_code = 400 elif startOFW.optimization_type not in available_optimizers: response_msg = "Use one of the following optimizer types : " + str( available_optimizers) response_code = 400 else: dir = os.path.join(os.getcwd(), "optimization/resources", str(id)) if not os.path.exists(dir): response_msg = "Id not existing" response_code = 400 else: redis_db = RedisDB() flag = redis_db.get("run:" + id) if flag is not None and flag == "running": response_msg = "System already running" else: try: msg = variable.start(id, startOFW) if msg == 0: response_msg = "System started succesfully" else: response_msg = "System could not start" response_code = 400 except (InvalidModelException, MissingKeysException, InvalidMQTTHostException) as e: logger.error("Error " + str(e)) redis_db.set("run:" + id, "stopped") response_msg = str(e) response_code = 400 else: response_msg = "Wrong Content-Type" response_code = 400 logger.error("Wrong Content-Type") return response_msg, response_code