def framework_stop(id):  # noqa: E501
    """Command for stoping the framework

     # noqa: E501

    :param id: Id of the registry to be stopped
    :type id: str

    :rtype: None
    """
    try:
        redis_db = RedisDB()
        flag = redis_db.get("run:" + id)
        logger.debug("Flag " + str(flag))
        message = ""
        code = 200
        if flag is not None and flag == "running":
            logger.debug("System running and trying to stop")
            redis_db.set("run:" + id, "stop")
            time.sleep(1)
            flag = redis_db.get("run:" + id)
            logger.debug("Flag in stop: " + str(flag))

            if flag is "stopped" or None:  # TODO: is none necessary?
                logger.debug("System stopped succesfully")
                message = "System stopped succesfully"
            elif "stopping" in flag:
                message = "System stopped succesfully"
                counter = 0
                while ("stopping" in flag):
                    flag = redis_db.get("run:" + id)
                    counter = counter + 1
                    if counter >= 15:
                        message = "system stopped succesfully"
                        break
                    else:
                        time.sleep(1)
                logger.debug("System stopped succesfully")
            else:
                message = "Problems while stopping the system"
                code = 500
        elif flag is not None and flag == "stopped":
            logger.debug("System already stopped")
            message = "System already stopped"
        elif flag is None:
            logger.debug("System already stopped")
            message = "System already stopped"
    except Exception as e:
        logger.error(e)
        message = "Error stoping the system"
        code = 500
    return message, code
Exemple #2
0
def get_output(id):  # noqa: E501
    """Get ouput of the optimization

     # noqa: E501

    :param id: Name of the registry to be actualized
    :type id: str

    :rtype: OptimizationOutput
    """
    result = {}
    redisDB = RedisDB()
    output_keys = redisDB.get_keys_for_pattern("o:" + id + ":*")
    if output_keys is not None:
        meta = redisDB.get("id_meta:" + id)
        if meta is not None:
            meta = json.loads(meta)
            dT = meta["dT_in_seconds"]
            for key in output_keys:
                sub_key = key.split(":")
                topic = sub_key[2]
                index = sub_key[3]
                json_value = redisDB.get(key)
                json_value = json.loads(json_value)
                time = None
                value = 0
                for t, v in json_value.items():
                    time = t
                    value = v
                    break
                if topic not in result.keys():
                    result[topic] = {}
                if time is not None:
                    t = float(time) + int(index) * dT
                    result[topic][t] = float(value)
            logger.debug(result)
    return OptimizationOutput.from_dict(result)
class ThreadFactory:
    def __init__(self, model_name, control_frequency, horizon_in_steps,
                 dT_in_seconds, repetition, solver, id, optimization_type,
                 single_ev):
        self.logger = MessageLogger.get_logger(__name__, id)
        self.model_name = model_name
        self.control_frequency = control_frequency
        self.horizon_in_steps = horizon_in_steps
        self.dT_in_seconds = dT_in_seconds
        self.repetition = repetition
        self.solver = solver
        self.id = id
        self.optimization_type = optimization_type
        self.single_ev = single_ev
        self.redisDB = RedisDB()
        self.pyro_mip_server = None

    def getFilePath(self, dir, file_name):
        # print(os.path.sep)
        # print(os.environ.get("HOME"))
        project_dir = os.path.dirname(os.path.realpath(__file__))
        data_file = os.path.join("/usr/src/app", dir, file_name)
        return data_file

    def startOptControllerThread(self):
        self.logger.info("Creating optimization controller thread")
        self.logger.info("Number of repetitions: " + str(self.repetition))
        self.logger.info("Output with the following control_frequency: " +
                         str(self.control_frequency))
        self.logger.info(
            "Optimization calculated with the following horizon_in_steps: " +
            str(self.horizon_in_steps))
        self.logger.info(
            "Optimization calculated with the following dT_in_seconds: " +
            str(self.dT_in_seconds))
        self.logger.info("Optimization calculated with the following model: " +
                         self.model_name)
        self.logger.info(
            "Optimization calculated with the following solver: " +
            self.solver)
        self.logger.info(
            "Optimization calculated with the following optimization_type: " +
            self.optimization_type)

        self.redisDB.set("Error mqtt" + self.id, False)
        self.logger.debug("Error mqtt " +
                          str(self.redisDB.get("Error mqtt" + self.id)))

        # Creating an object of the configuration file (standard values)
        try:
            config = configparser.RawConfigParser()
            config.read(
                self.getFilePath("optimization/resources",
                                 "ConfigFile.properties"))
        except Exception as e:
            self.logger.error(e)

        # Loads the solver name if it was not given thorough the endpoint command/start/id
        if not self.model_name:
            self.model_name = config.get("SolverSection", "model.name")
        self.logger.debug("This is the model name: " + self.model_name)
        self.model_path = os.path.join(
            config.get("SolverSection", "model.base.path"),
            self.model_name) + ".py"
        self.logger.debug("This is the path of the model: " +
                          str(self.model_path))

        # Loads the solver name if not specified in command/start/id
        if not self.solver:
            self.solver_name = config.get("SolverSection", "solver.name")
        else:
            self.solver_name = self.solver
        self.logger.debug(
            "Optimization calculated with the following solver: " +
            self.solver_name)

        ##############################################################################################
        output_config = None
        try:
            # Reads the registry/output and stores it into an object
            path = os.path.join(os.getcwd(), "optimization/resources",
                                str(self.id), "Output.registry.mqtt")
            if not os.path.exists(path):
                self.logger.debug(
                    "Output.registry.mqtt not set, only file output available")
            else:
                with open(path, "r") as file:
                    output_config = json.loads(file.read())
        except Exception as e:
            self.logger.error(
                "Output.registry.mqtt not set, only file output available")

        try:
            # Reads the registry/input and stores it into an object
            path = os.path.join(os.getcwd(), "optimization/resources",
                                str(self.id), "Input.registry.file")
            if not os.path.exists(path):
                input_config_file = {}
                self.logger.debug("Not Input.registry.file present")
            else:
                with open(path, "r") as file:
                    input_config_file = json.loads(file.read())
                self.logger.debug("Input.registry.file found")
        except Exception as e:
            self.logger.error("Input file not found")
            input_config_file = {}
            self.logger.error(e)

        try:
            # Reads the registry/input and stores it into an object
            path = os.path.join(os.getcwd(), "optimization/resources",
                                str(self.id), "Input.registry.mqtt")
            if not os.path.exists(path):
                input_config_mqtt = {}
                self.logger.debug("Not Input.registry.mqtt present")
            else:
                with open(path, "r") as file:
                    input_config_mqtt = json.loads(file.read())
                self.logger.debug("Input.registry.mqtt found")
        except Exception as e:
            self.logger.error("Input file not found")
            input_config_mqtt = {}
            self.logger.error(e)

        input_config_parser = InputConfigParser(input_config_file,
                                                input_config_mqtt,
                                                self.model_name, self.id,
                                                self.optimization_type)

        missing_keys = input_config_parser.check_keys_for_completeness()
        if len(missing_keys) > 0:
            raise MissingKeysException(
                "Data source for following keys not declared: " +
                str(missing_keys))

        self.prediction_threads = {}
        self.prediction_names = input_config_parser.get_prediction_names()
        if self.prediction_names is not None and len(
                self.prediction_names) > 0:
            for prediction_name in self.prediction_names:
                flag = input_config_parser.get_forecast_flag(prediction_name)
                if flag:
                    self.logger.info(
                        "Creating prediction controller thread for topic " +
                        str(prediction_name))
                    topic_param = input_config_parser.get_params(
                        prediction_name)
                    parameters = json.dumps({
                        "control_frequency": self.control_frequency,
                        "horizon_in_steps": self.horizon_in_steps,
                        "topic_param": topic_param,
                        "dT_in_seconds": self.dT_in_seconds
                    })
                    self.redisDB.set(
                        "train:" + self.id + ":" + prediction_name, parameters)
                    self.prediction_threads[prediction_name] = LoadPrediction(
                        config, self.control_frequency, self.horizon_in_steps,
                        prediction_name, topic_param, self.dT_in_seconds,
                        self.id, True)
                    # self.prediction_threads[prediction_name].start()

        self.non_prediction_threads = {}
        self.non_prediction_names = input_config_parser.get_non_prediction_names(
        )
        if self.non_prediction_names is not None and len(
                self.non_prediction_names) > 0:
            for non_prediction_name in self.non_prediction_names:
                flag = input_config_parser.get_forecast_flag(
                    non_prediction_name)
                if flag:
                    if non_prediction_name == "P_PV":
                        self.non_prediction_threads[
                            non_prediction_name] = PVPrediction(
                                config, input_config_parser, self.id,
                                self.control_frequency, self.horizon_in_steps,
                                self.dT_in_seconds, non_prediction_name)
                        self.non_prediction_threads[non_prediction_name].start(
                        )

        # Initializing constructor of the optimization controller thread
        if self.optimization_type == "MPC":
            self.opt = OptControllerMPC(
                self.id, self.solver_name, self.model_path,
                self.control_frequency, self.repetition, output_config,
                input_config_parser, config, self.horizon_in_steps,
                self.dT_in_seconds, self.optimization_type)
        elif self.optimization_type == "discrete":
            self.opt = OptControllerDiscrete(
                self.id, self.solver_name, self.model_path,
                self.control_frequency, self.repetition, output_config,
                input_config_parser, config, self.horizon_in_steps,
                self.dT_in_seconds, self.optimization_type)
        elif self.optimization_type == "stochastic":
            self.opt = OptControllerStochastic(
                self.id, self.solver_name, self.model_path,
                self.control_frequency, self.repetition, output_config,
                input_config_parser, config, self.horizon_in_steps,
                self.dT_in_seconds, self.optimization_type, self.single_ev)

        try:
            ####starts the optimization controller thread
            self.logger.debug("Mqtt issue " +
                              str(self.redisDB.get("Error mqtt" + self.id)))
            if "False" in self.redisDB.get("Error mqtt" + self.id):
                self.opt.start()
                self.logger.debug("Optimization object started")
                return 0
            else:
                self.redisDB.set("run:" + self.id, "stopping")
                self.stopOptControllerThread()
                self.redisDB.set("run:" + self.id, "stopped")
                self.logger.error("Optimization object could not be started")
                return 1
        except Exception as e:
            self.logger.error(e)
            return 1

    def stopOptControllerThread(self):
        try:
            # stop as per ID
            for name, obj in self.prediction_threads.items():
                self.redisDB.remove("train:" + self.id + ":" + name)
                obj.Stop()
            for name, obj in self.non_prediction_threads.items():
                obj.Stop()
                del obj
            self.logger.info("Stopping optimization controller thread")
            self.opt.Stop()
            del self.opt
            self.logger.info("Optimization controller thread stopped")
            del self.logger
            return "Optimization controller thread stopped"
        except Exception as e:
            self.logger.error(e)
            return e

    def is_running(self):
        return not self.opt.get_finish_status()

    def update_training_params(self, key, parameters):
        while True:
            self.redisDB.set(key, parameters)
            time.sleep("60")
Exemple #4
0
class ControllerBase(ABC, threading.Thread):
    def __init__(self, id, solver_name, model_path, control_frequency,
                 repetition, output_config, input_config_parser, config,
                 horizon_in_steps, dT_in_seconds, optimization_type):
        super().__init__()

        pyomo_path = "/usr/src/app/logs/pyomo_" + str(id)
        if not os.path.exists(pyomo_path):
            os.makedirs(pyomo_path, mode=0o777, exist_ok=False)
            os.chmod(pyomo_path, 0o777)
        TempfileManager.tempdir = pyomo_path

        self.logger = MessageLogger.get_logger(__name__, id)
        self.logger.info("Initializing optimization controller " + id)
        self.id = id
        self.results = ""
        self.model_path = model_path
        self.solver_name = solver_name
        self.control_frequency = control_frequency
        self.repetition = repetition
        self.horizon_in_steps = horizon_in_steps
        self.dT_in_seconds = dT_in_seconds
        self.output_config = output_config
        self.input_config_parser = input_config_parser
        self.stopRequest = None  #threading.Event()
        self.redisDB = RedisDB()
        self.lock_key = "id_lock"
        self.optimization_type = optimization_type
        self.stop_signal_key = "opt_stop_" + self.id
        self.finish_status_key = "finish_status_" + self.id
        self.redisDB.set(self.stop_signal_key, False)
        self.redisDB.set(self.finish_status_key, False)
        self.repetition_completed = False
        self.preprocess = False
        self.input = None
        self.output = None
        if "False" in self.redisDB.get("Error mqtt" + self.id):
            self.output = OutputController(self.id, self.output_config)
        if "False" in self.redisDB.get("Error mqtt" + self.id):
            self.input = InputController(self.id, self.input_config_parser,
                                         config, self.control_frequency,
                                         self.horizon_in_steps,
                                         self.dT_in_seconds)
        """try:
            # dynamic load of a class
            self.logger.info("This is the model path: " + self.model_path)
            module = self.path_import2(self.model_path)
            self.logger.info(getattr(module, 'Model'))
            self.my_class = getattr(module, 'Model')

        except Exception as e:
            self.logger.error(e)
            raise InvalidModelException("model is invalid/contains python syntax errors")"""

    # Importint a class dynamically
    def path_import2(self, absolute_path):
        spec = importlib.util.spec_from_file_location(absolute_path,
                                                      absolute_path)
        module = spec.loader.load_module(spec.name)
        return module

    def join(self, timeout=None):
        #self.stopRequest.set()
        super(ControllerBase, self).join(timeout)

    def Stop(self):
        try:
            if self.input:
                self.input.Stop()
                self.logger.debug("Deleting input instances")
                #del self.input.inputPreprocess
                #del self.input
        except Exception as e:
            self.logger.error("error stopping input " + str(e))
        try:
            if self.output:
                self.output.Stop()
                self.logger.debug("Deleting output instances")
                #del self.output
        except Exception as e:
            self.logger.error("error stopping output " + str(e))

        #erasing files from pyomo
        #self.erase_pyomo_files()
        self.logger.debug("setting stop_signal_key")
        self.redisDB.set(self.stop_signal_key, True)

        if self.isAlive():
            self.join(1)

    def initialize_opt_solver(self):
        start_time_total = time.time()

        self.optsolver = SolverFactory(
            self.solver_name
        )  #, tee=False, keepfiles=False, verbose=False, load_solutions=False)  # , solver_io="lp")
        #self.optsolver.verbose= False
        #self.optsolver.load_solutions = False
        self.logger.debug("Solver factory: " + str(self.optsolver))
        #self.optsolver.options.tee=False
        #self.optsolver.options.keepfiles = False
        #self.optsolver.options.load_solutions = False
        # optsolver.options["max_iter"]=5000
        self.logger.info("solver instantiated with " + self.solver_name)
        #return self.optsolver

    def initialize_solver_manager(self):
        ###create a solver manager
        self.solver_manager = None
        #self.solver_manager = SolverManagerFactory('pyro', host='localhost')
        self.logger.debug("Starting the solver_manager")
        #return self.solver_manager
        # optsolver.options.pyro_shutdown = True

    def erase_pyomo_files(self, folder):
        # erasing files from pyomo
        #folder = "/usr/src/app/logs/pyomo"
        for the_file in os.listdir(folder):
            file_path = os.path.join(folder, the_file)
            try:
                if os.path.isfile(file_path):
                    os.unlink(file_path)
                # elif os.path.isdir(file_path): shutil.rmtree(file_path)
            except Exception as e:
                self.logger.error(e)

    # Start the optimization process and gives back a result
    def run(self):
        self.logger.info("Starting optimization controller")

        return_msg = "success"
        execution_error = False
        try:

            count = 0

            self.optimize(count, self.solver_name, self.model_path)

        except Exception as e:
            execution_error = True
            self.logger.error("error overall " + str(e))
            e = str(e)
            solver_error = "The SolverFactory was unable to create the solver"
            if solver_error in e:
                i = e.index(solver_error)
                i_start = e.index("\"", i)
                i_end = e.index("\"", i_start + 1)
                solver = e[i_start + 1:i_end]
                return_msg = "Incorrect solver " + str(solver) + " used"
            else:
                return_msg = e
        finally:

            self.logger.info("repetition completed " +
                             str(self.repetition_completed))
            self.logger.info("stop request " +
                             str(self.redisDB.get_bool(self.stop_signal_key)))
            self.logger.info("execution error " + str(execution_error))
            if not self.redisDB.get_bool(
                    self.stop_signal_key
            ) and not self.repetition_completed and not execution_error:
                self.logger.error("Process interrupted")
                self.redisDB.set("kill_signal", True)

            #erase pyomo folder
            folder = "/usr/src/app/logs/pyomo_" + str(self.id)
            shutil.rmtree(folder, ignore_errors=True)

            # If Stop signal arrives it tries to disconnect all mqtt clients
            if self.output:
                for key, object in self.output.mqtt.items():
                    object.MQTTExit()
                    self.logger.debug("Client " + key +
                                      " is being disconnected")

            self.logger.info(return_msg)
            self.redisDB.set(self.finish_status_key, True)
            return return_msg

    @abstractmethod
    def optimize(self, count, solver_name, model_path):
        while not self.redisDB.get_bool(self.stop_signal_key):
            pass

    def get_finish_status(self):
        return self.redisDB.get_bool(self.finish_status_key)
class CommandController:
    _instance = None
    _lock = threading.Lock()

    def __new__(cls):
        if CommandController._instance is None:
            with CommandController._lock:
                if CommandController._instance is None:
                    CommandController._instance = super(
                        CommandController, cls).__new__(cls)
        return CommandController._instance

    def __init__(self):
        self.factory = {}
        self.statusThread = {}
        self.running = {}
        self.redisDB = RedisDB()
        self.lock_key = "id_lock"

    def set(self, id, object):
        self.factory[id] = object

    def get_length_factory(self):
        return len(self.factory)

    def get(self, id):
        return self.factory[id]

    def set_isRunning(self, id, bool):
        self.running[id] = bool

    def isRunningExists(self):
        logger.debug("IsRunning exists: " + str(len(self.running)))
        if len(self.running):
            return True
        else:
            return False

    def get_isRunning(self, id):
        if id in self.running.keys():
            return self.running[id]
        else:
            return False

    def get_running(self):
        return self.running

    def get_statusThread(self, id):
        return self.statusThread[id]

    def start(self, id, json_object, dict_object=None):
        logger.debug(str(json_object))
        if json_object is not None:
            self.model_name = json_object.model_name
            self.control_frequency = json_object.control_frequency
            self.horizon_in_steps = json_object.horizon_in_steps
            self.dT_in_seconds = json_object.d_t_in_seconds
            self.repetition = json_object.repetition
            self.solver = json_object.solver
            self.optimization_type = json_object.optimization_type
            self.single_ev = json_object.single_ev
        elif dict_object is not None:
            self.model_name = dict_object["model"]
            self.control_frequency = dict_object["control_frequency"]
            self.horizon_in_steps = dict_object["horizon_in_steps"]
            self.dT_in_seconds = dict_object["dT_in_seconds"]
            self.repetition = dict_object["repetition"]
            self.solver = dict_object["solver"]
            self.optimization_type = dict_object["optimization_type"]
            self.single_ev = dict_object["single_ev"]

        self.set(
            id,
            ThreadFactory(self.model_name, self.control_frequency,
                          self.horizon_in_steps, self.dT_in_seconds,
                          self.repetition, self.solver, id,
                          self.optimization_type, self.single_ev))

        logger.info("Thread: " + str(self.get(id)))
        self.redisDB.set("run:" + id, "starting")
        msg = self.get(id).startOptControllerThread()
        logger.debug("Answer from Thread factory" + str(msg))
        if msg == 0:
            self.set_isRunning(id, True)
            logger.debug("Flag isRunning set to True")
            self.statusThread[id] = threading.Thread(target=self.run_status,
                                                     args=(id, ))
            logger.debug("Status of the Thread started")
            self.statusThread[id].start()
            meta_data = {
                "id": id,
                "model": self.model_name,
                "control_frequency": self.control_frequency,
                "horizon_in_steps": self.horizon_in_steps,
                "dT_in_seconds": self.dT_in_seconds,
                "repetition": self.repetition,
                "solver": self.solver,
                "optimization_type": self.optimization_type,
                "single_ev": self.single_ev,
                "ztarttime": time.time()
            }
            self.redisDB.set("run:" + id, "running")
            IDStatusManager.persist_id(id, True, meta_data, self.redisDB)
            logger.info("running status " + str(self.running))
            logger.debug("Command controller start finished")
            return 0
        else:
            self.set_isRunning(id, False)
            logger.debug("Flag isRunning set to False")
            IDStatusManager.persist_id(id, False, None, self.redisDB)
            self.factory[id].stopOptControllerThread()
            self.redisDB.set("run:" + id, "stopped")
            logger.error("Command controller start could not be finished")
            # logger.debug("System stopped succesfully")
            return 1

    def stop(self, id):

        logger.debug("Stop signal received")
        logger.debug("This is the factory object: " + str(self.get(id)))
        if self.factory[id]:
            IDStatusManager.persist_id(id, False, None, self.redisDB)
            self.factory[id].stopOptControllerThread()
            del self.factory[id]
            del self.statusThread[id]
            #self.stop_pyro_servers()
            #self.stop_name_servers()
            self.set_isRunning(id, False)
            message = "System stopped succesfully"
            self.redisDB.set("run:" + id, "stopped")
            logger.debug(message)
            gc.collect()
        else:
            message = "No threads found"
            logger.debug(message)

    def run_status(self, id):
        while True:
            status = self.get(id).is_running()
            flag = self.redisDB.get("run:" + id)
            if not status or (flag is not None and flag == "stop"):
                self.redisDB.set("run:" + id, "stopping")
                self.stop(id)
                break
            time.sleep(1)

    def restart_ids(self):
        old_ids, stopped_ids = IDStatusManager.instances_to_restart(
            self.redisDB)
        for s in old_ids:
            val = json.loads(s)
            try:
                self.start(val["id"], None, val)
            except (InvalidModelException, MissingKeysException,
                    InvalidMQTTHostException) as e:
                # TODO: should we catch these exceptions here?
                logger.error("Error " + str(e))
                self.redisDB.set("run:" + val["id"], "stopped")
                return str(e)
        for s in stopped_ids:
            val = json.loads(s)
            id = val["id"]
            self.redisDB.set("run:" + id, "stopped")
            self.redisDB.set(Constants.id_meta + ":" + id, json.dumps(val))

    def get_status(self):
        status = {}
        keys = self.redisDB.get_keys_for_pattern("run:*")
        if keys is not None:
            for key in keys:
                value = self.redisDB.get(key)
                id = key[4:]
                status[id] = {}
                if value is None or (value is not None and value == "stopped"):
                    status[id]["status"] = "stopped"
                elif value == "running":
                    status[id]["status"] = "running"
                elif value == "stop" or value == "stopping":
                    status[id]["status"] = "stopping"
                elif value == "starting":
                    status[id]["status"] = "starting"
        keys = self.redisDB.get_keys_for_pattern(Constants.id_meta + ":*")
        if keys is not None:
            for key in keys:
                value = self.redisDB.get(key)
                id = key[8:]
                if id not in status.keys():
                    status[id] = {}
                    status[id]["status"] = "stopped"
                status[id]["config"] = {}
                if value is not None:
                    status[id]["config"].update(json.loads(value))
                    # logger.debug("status id config "+str(status))
                    if "ztarttime" in status[id]["config"].keys():
                        status[id]["start_time"] = status[id]["config"][
                            "ztarttime"]
                        status[id]["config"].pop("ztarttime")
                    if "model" in status[id]["config"].keys():
                        status[id]["config"]["model_name"] = status[id][
                            "config"]["model"]
                        status[id]["config"].pop("model")
        return status
def framework_start(id, startOFW):  # noqa: E501
    """Command for starting the framework

     # noqa: E501

    :param id: Id of the registry to be started
    :type id: str
    :param startOFW: Start command for the optimization framework   repetitions: -1 infinite repetitions
    :type startOFW: dict | bytes

    :rtype: None
    """

    available_solvers = ["ipopt", "glpk", "bonmin", "gurobi", "cbc"]
    available_optimizers = ["discrete", "stochastic", "MPC"]
    response_msg = ""
    response_code = 200
    if connexion.request.is_json:
        logger.info("Starting the system")
        startOFW = Start.from_dict(connexion.request.get_json())
        models = get_models()
        if startOFW.model_name != "" and startOFW.model_name not in models:
            response_msg = "Model not available. Available models are :" + str(
                models)
            response_code = 400
        elif startOFW.solver not in available_solvers:
            response_msg = "Use one of the following solvers :" + str(
                available_solvers)
            response_code = 400
        elif startOFW.optimization_type not in available_optimizers:
            response_msg = "Use one of the following optimizer types : " + str(
                available_optimizers)
            response_code = 400
        else:
            dir = os.path.join(os.getcwd(), "optimization/resources", str(id))
            if not os.path.exists(dir):
                response_msg = "Id not existing"
                response_code = 400
            else:
                redis_db = RedisDB()
                flag = redis_db.get("run:" + id)
                if flag is not None and flag == "running":
                    response_msg = "System already running"
                else:
                    try:
                        msg = variable.start(id, startOFW)
                        if msg == 0:
                            response_msg = "System started succesfully"
                        else:
                            response_msg = "System could not start"
                            response_code = 400
                    except (InvalidModelException, MissingKeysException,
                            InvalidMQTTHostException) as e:
                        logger.error("Error " + str(e))
                        redis_db.set("run:" + id, "stopped")
                        response_msg = str(e)
                        response_code = 400
    else:
        response_msg = "Wrong Content-Type"
        response_code = 400
        logger.error("Wrong Content-Type")
    return response_msg, response_code
class BaseDataReceiver(DataReceiver, ABC):

    def __init__(self, internal, topic_params, config, generic_name, id, buffer, dT, base_value_flag):
        self.id = id
        self.redisDB = RedisDB()
        self.logger = MessageLogger.get_logger(__name__, id)
        self.generic_name = generic_name
        self.buffer = buffer
        self.dT = dT
        self.base_value_flag = base_value_flag
        self.set_data_update(False)

        persist_real_data_path = config.get("IO","persist.real.data.path",
                                                 fallback="optimization/resources")
        persist_real_data_path = os.path.join("/usr/src/app", persist_real_data_path, id, "real")
        self.persist_real_data_file = os.path.join(persist_real_data_path, generic_name+".txt")

        if "detachable" in topic_params.keys():
            self.detachable = topic_params["detachable"]
        else:
            self.detachable = False
        if self.detachable:
            self.value_used_once = False
        if "reuseable" in topic_params.keys():
            self.reuseable = topic_params["reuseable"]
        else:
            self.reuseable = False
        if self.reuseable and not os.path.exists(persist_real_data_path):
            os.makedirs(persist_real_data_path)

        self.start_of_day = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0).timestamp()
        self.total_steps_in_day = floor(24 * 60 * 60 / self.dT)
        self.current_day_index = 0
        self.number_of_bucket_days = int(buffer / self.total_steps_in_day)
        self.bucket_index = False
        self.length = 1

        try:
            super(BaseDataReceiver, self).__init__(internal, topic_params, config, id=id)
        except Exception as e:
            self.redisDB.set("Error mqtt" + id, True)
            self.logger.error(e)

        if self.reuseable:
            formated_data = self.read_data()
            if formated_data is not None and len(formated_data) > 0:
                self.length = len(formated_data)
                self.data.update(formated_data)
                self.set_data_update(True)
                self.last_time = time.time()

        
    def on_msg_received(self, payload):
        try:
            self.start_of_day = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0).timestamp()
            print_details = False
            if "chargers" in payload:
                self.logger.debug("data received for charger = "+str(payload))
            senml_data = json.loads(payload)
            formated_data = self.add_formated_data(senml_data)
            if self.reuseable:
                self.save_data(formated_data)
            self.data.update(formated_data)
            self.set_data_update(True)
            self.last_time = time.time()
        except Exception as e:
            self.logger.error(e)
    
    def save_data(self, formated_data):
        keys = list(formated_data.keys())
        sorted(keys)
        values = [formated_data[key] for key in keys]
        with open(self.persist_real_data_file, "w") as f:
            value = "\n".join(map(str, values))
            f.writelines(value)
            self.logger.debug("saved real reuseable data to file "+self.persist_real_data_file)

    def read_data(self):
        if os.path.exists(self.persist_real_data_file):
            with open(self.persist_real_data_file, "r") as f:
                data = f.readlines()
                formated_data = {}
                bucket = 0
                for row in data:
                    bucket_key = str(self.current_day_index) + "_" + str(bucket)
                    formated_data[bucket_key] = float(row)
                    bucket += 1
                    if bucket >= self.total_steps_in_day:
                        bucket = 0
                        self.current_day_index += 1
                        if self.current_day_index >= self.number_of_bucket_days:
                            self.current_day_index = 0
                return formated_data
        return None


    def add_formated_data(self, json_data):
        doc = None
        try:
            doc = senml.SenMLDocument.from_json(json_data)
        except Exception as e:
            pass
        if not doc:
            try:
                meas = senml.SenMLMeasurement.from_json(json_data)
                doc = senml.SenMLDocument([meas])
            except Exception as e:
                pass

        if doc:
            base_data = doc.base
            bn, bu = None, None
            if base_data:
                bn = base_data.name
                bu = base_data.unit
            data = {}
            raw_data = []
            doc.measurements = sorted(doc.measurements, key=lambda x: x.time)
            if len(doc.measurements) > 0:
                for meas in doc.measurements:
                    n = meas.name
                    u = meas.unit
                    v = meas.value
                    t = meas.time
                    t = self.time_conversion(t)
                    if not u:
                        u = bu
                    # dont check bn
                    if not n:
                        n = self.generic_name
                    try:
                        processed_value = self.preprocess_data(bn, n, v, u)
                        if processed_value is not None and processed_value is not {}:
                            raw_data.append([t, processed_value])
                    except Exception as e:
                        self.logger.error("error " + str(e) + "  n = " + str(n))
                #self.logger.debug("raw data: " + str(raw_data))
                raw_data = TimeSeries.expand_and_resample(raw_data, self.dT, True)
                if len(raw_data) > 0:
                    self.length = len(raw_data)
                    bucket = self.time_to_bucket(raw_data[0][0])
                    for row in raw_data:
                        bucket_key = str(self.current_day_index) + "_" + str(bucket)
                        bucket += 1
                        if bucket >= self.total_steps_in_day:
                            bucket = 0
                            self.current_day_index += 1
                            if self.current_day_index >= self.number_of_bucket_days:
                                self.current_day_index = 0
                        data[bucket_key] = row[1]
            return data
        return {}

    @abstractmethod
    def preprocess_data(self, base, name, value, unit):
        return value

    def iterative_init(self, d, v):
        if len(v) <= 0:
            return d
        d = self.iterative_init({v[-1]: d}, v[:-1])
        return d

    def get_bucket_aligned_data(self, bucket, steps, wait_for_data=True, check_bucket_change=True):
        bucket_requested = bucket
        self.logger.info("Get "+str(self.generic_name)+" data for bucket = "+str(bucket_requested))
        bucket_available = True
        if self.base_value_flag:
            final_data = self.iterative_init({}, self.generic_name.split("/"))
        else:
            final_data = {self.generic_name: {}}

        #TODO: figure out every use case
        if self.detachable and self.reuseable:
            data = self.get_data(require_updated=2)
        elif self.detachable and not self.value_used_once:
            data = self.get_data(require_updated=2)
        elif self.detachable:
            data = self.get_data(require_updated=2, clearData=True)
        elif self.reuseable:
            data = self.get_data(require_updated=1)
        elif wait_for_data:
            data = self.get_data(require_updated=0)
        else:
            data = self.get_data(require_updated=1)

        if not self.redisDB.get("End ofw") == "True":
            self.logger.debug(str(self.generic_name) + " data from mqtt is : "+ json.dumps(data, indent=4))
            self.logger.debug(str(self.generic_name) + " steps: "+str(steps) + " length: "+str(self.length))
            if steps > self.length:
                steps = self.length
            day = None
            self.logger.debug(str(self.generic_name) + " steps: " + str(steps))
            if len(data) >= steps:
                for i in reversed(range(self.number_of_bucket_days+1)):
                    key = str(i) + "_" + str(bucket)
                    self.logger.debug("key in data: "+str(key)+" for "+str(self.generic_name))
                    if key in data.keys():
                        day = str(i)
                        break
                if day is None and self.detachable and not self.value_used_once and self.last_time > 0:
                    self.logger.debug("Day set to 0 for detachable for " + str(self.generic_name))
                    day = "0"
                if day is None and self.detachable:
                    self.logger.debug("Ignoring day for detachable for " + str(self.generic_name))
                    pass
                elif day is None:
                    bucket_available = False
                    self.logger.debug("Setting bucket available to False. Day is None for " + str(self.generic_name))
                else:
                    new_data = {}
                    index = 0
                    while len(new_data) < steps:
                        bucket_key = day + "_" + str(bucket)
                        if bucket_key in data.keys():
                            new_data[index] = data[bucket_key]
                            index += 1
                        bucket += 1
                        if bucket >= self.total_steps_in_day:
                            bucket = 0
                            day_i = int(day) + 1
                            if day_i >= self.number_of_bucket_days:
                                day_i = 0
                            day = str(day_i)
                    self.logger.debug("base_value_flag "+str(self.base_value_flag)+" for "+str(self.generic_name))
                    if self.base_value_flag:
                        for k, v in new_data.items():
                            if isinstance(v, dict):
                                final_data.update(v)
                    else:
                        final_data = {self.generic_name: new_data}
            if check_bucket_change:
                self.logger.debug("check_bucket_change flag: "+str(check_bucket_change)+ " for "+str(self.generic_name))
                new_bucket = self.time_to_bucket(datetime.datetime.now().timestamp())
                if new_bucket > bucket_requested:
                    self.logger.debug("bucket changed from " + str(bucket_requested) +
                                      " to " + str(new_bucket) + " due to wait time for " + str(self.generic_name))
                    final_data, bucket_available, _ = self.get_bucket_aligned_data(new_bucket, steps, wait_for_data=False, check_bucket_change=False)
        else:
            self.logger.debug("End ofw in redis is True")
            
        if self.detachable and bucket_available:
            self.value_used_once = True
        return (final_data, bucket_available, self.last_time)

    def time_conversion(self, time):
        t = str(time)
        l = len(t)
        if "." in t:
            l = t.find(".")
        if l > 10:
            new_t = time / (10 ** (l - 10))
            return new_t
        else:
            return time

    def time_to_bucket(self, time):
        bucket = floor((time - self.start_of_day) / self.dT)
        if bucket > self.total_steps_in_day:
            bucket = self.total_steps_in_day
        elif bucket < 0:
            bucket = bucket%self.total_steps_in_day
            self.logger.warning("Received data is of older timestamp = "+str(time)+
                                " than start of today = "+str(self.start_of_day)+" for "+str(self.generic_name)+
                                ". set to bucket "+str(bucket)+" with total buckets "+str(self.total_steps_in_day))
        return bucket

    def get_current_bucket_data(self, steps, wait_for_data=True, check_bucket_change=True):
        bucket = self.time_to_bucket(datetime.datetime.now().timestamp())
        self.logger.debug("current b = "+str(bucket))
        return self.get_bucket_aligned_data(bucket, steps, wait_for_data, check_bucket_change)
class DataReceiver(ABC):

    def __init__(self, internal, topic_params, config, emptyValue={}, id=None, section=None, prepare_topic_qos=True,
                 sub_pub=False, connect_check_flag=False):
        super(DataReceiver, self).__init__()
        self.logger = MessageLogger.get_logger(__name__, id)
        self.stop_request = False
        self.internal = internal
        self.topic_params = topic_params
        self.prepare_topic_qos = prepare_topic_qos
        self.emptyValue = emptyValue
        self.connect_check_flag = connect_check_flag
        self.data = self.emptyValue.copy()
        self.data_update = False
        self.config = config
        self.channel = "MQTT"
        self.topics = None
        self.port = None
        self.host_params = {}
        self.first_time = 0
        self.last_time = 0
        self.id = id
        self.section = section
        self.redisDB = RedisDB()
        self.sub_pub = sub_pub
        if self.section is None:
            self.section = "IO"
        self.setup()
        if self.channel == "MQTT":
                self.init_mqtt(self.topics)
        elif self.channel == "ZMQ":
            self.init_zmq(self.topics)

    def setup(self):
        if self.internal:
            self.channel = self.config.get("IO", "channel")
            self.topics, self.host_params = self.get_internal_channel_params()
        else:
            self.topics, self.host, self.host_params = self.get_external_channel_params()

    def get_external_channel_params(self):
        topic_qos = []
        # read from config
        sub_mqtt = "sub.mqtt.host"
        if self.sub_pub:
            sub_mqtt = "pub.mqtt.host"
        if sub_mqtt in dict(self.config.items(self.section)):
            host = self.config.get(self.section, sub_mqtt)
        else:
            host = self.config.get("IO", "mqtt.host")

        host, host_params, qos, topic, self.port = ConfigParserUtils.extract_host_params(host, self.port, self.topic_params,
                                                                    self.config, self.section)
        if topic:
            topic_qos.append((topic, qos))
        return (topic_qos, host, host_params)

    def get_internal_channel_params(self):
        if self.channel == "MQTT":
            sub_mqtt = "sub.mqtt.host"
            if self.sub_pub:
                sub_mqtt = "pub.mqtt.host"
            topic_qos = []
            host_params = {}
            if self.prepare_topic_qos:
                for k, v in self.topic_params.items():
                    if k == "topic":
                        topic_qos.append((v + "/" + self.id,1))
                    elif k == "mqtt.port":
                        self.port = v
            elif isinstance(self.topic_params, list):
                topic_qos = self.topic_params
                self.port = self.config.get("IO", "mqtt.port")
            if sub_mqtt in dict(self.config.items("IO")):
                self.host = self.config.get("IO", sub_mqtt)
            if "mqtt.host" in dict(self.config.items("IO")):
                self.host = self.config.get("IO", "mqtt.host")
            host_params["username"] = self.config.get("IO", "mqtt.username", fallback=None)
            host_params["password"] = self.config.get("IO", "mqtt.password", fallback=None)
            host_params["ca_cert_path"] = self.config.get("IO", "mqtt.ca.cert.path", fallback=None)
            host_params["insecure_flag"] = bool(self.config.get("IO", "mqtt.insecure.flag", fallback=False))
            return (topic_qos, host_params)
        elif self.channel == "ZMQ":
            topics = []
            for k, v in self.topic_params.items():
                if k == "topic":
                    topics.append(v + "/" + self.id)
            self.port = self.config.get("IO", "zmq.sub.port")
            self.host = self.config.get("IO", "zmq.host")
            return (topics, None)

    def init_mqtt(self, topic_qos):
        self.logger.info("Initializing mqtt subscription client")
        #if we set it to false here again then it may overwrite previous true value
        #self.redisDB.set("Error mqtt"+self.id, False)
        try:
            if not self.port:
                self.port = 1883
                #read from config
            self.client_id = "client_receive" + str(randrange(100000)) + str(time.time()).replace(".","")
            self.mqtt = MQTTClient(str(self.host), self.port, self.client_id, username=self.host_params["username"],
                                   password=self.host_params["password"], ca_cert_path=self.host_params["ca_cert_path"],
                                   set_insecure=self.host_params["insecure_flag"], id=self.id,
                                   connect_check_flag=self.connect_check_flag)

            self.mqtt.subscribe_to_topics(topic_qos, self.on_msg_received)
            self.logger.info("successfully subscribed")
        except Exception as e:
            self.logger.error(e)
            # error for mqtt will be caught by parent
            raise e

    def init_zmq(self, topics):
        self.logger.info("Initializing zmq subscription client")
        self.zmq = ZMQClient(self.host, None, self.port)
        self.zmq.init_subscriber(topics, self.id)

    @abstractmethod
    def on_msg_received(self, payload):
        pass

    def get_data_update(self):
        return self.data_update

    def set_data_update(self, data_update):
        self.data_update = data_update

    def get_mqtt_data(self, require_updated, clearData):
        if require_updated == 1 and not self.data:
            require_updated = 0
        ctr = 0
        while require_updated == 0 and not self.get_data_update() and not self.stop_request and not self.redisDB.get("End ofw") == "True":
            if ctr >= 19:
                ctr = 0
                self.logger.debug("wait for data "+str(self.topics))
            ctr += 1
            time.sleep(0.5)
        return self.get_and_update_data(clearData)

    def exit(self):
        self.stop_request = True
        try:
            if self.channel == "MQTT":
                self.mqtt.MQTTExit()
            elif self.channel == "ZMQ":
                self.zmq.stop()
            self.logger.info("dataReceiver safe exit")
        except Exception as e:
            self.logger.warning(str(e))

    def get_zmq_msg(self, clearData):
        while True and not self.stop_request:
            self.logger.debug("get zmq msg")
            flag, topic, message = self.zmq.receive_message()
            self.logger.debug("zmq subscription msg received for topic "+str(topic)+" for id "+str(self.id))
            if flag:
                self.on_msg_received(message)
                break
            time.sleep(1)
        return self.get_and_update_data(clearData)

    def get_and_update_data(self, clearData):
        new_data = self.data.copy()
        self.set_data_update(False)
        if clearData:
            self.clear_data()
        self.logger.debug("new_data "+str(new_data))
        return new_data

    def clear_data(self):
        self.data = self.emptyValue.copy()

    def get_data(self, require_updated=0, clearData=False):
        """

        :param require_updated: 0 -> wait for new data
                                1 -> wait for new data if no prev data
                                2 -> return prev data, even if empty
        :return:
        """
        data = {}
        if self.channel == "MQTT":
            data = self.get_mqtt_data(require_updated, clearData)
        elif self.channel == "ZMQ":
            data = self.get_zmq_msg(clearData)
        return data