Esempio n. 1
0
class JNTTServer(JNTTBase):
    """Server base test
    """

    server_class = None
    server_conf = ""
    server_section = None
    hadd_ctrl = None
    hadds = None

    def setUp(self):
        JNTTBase.setUp(self)
        self.mqttc = None
        self.message = None
        self.message_received = False
        self.hearbeat_mqttc = None
        self.heartbeat_message = None
        self.heartbeat_waiting = None
        self.heartbeat_waitings = None
        self.heartbeat_received = False
        self.server = None
        self.running_server = None
        if self.hadd_ctrl is None and self.hadds is not None and len(self.hadds) > 0:
            self.hadd_ctrl = self.hadds[0]

    def tearDown(self):
        self.stopClient()
        self.mqttc = None
        self.mqtthearbeat = None
        self.stopClient()
        self.stopServer()
        self.mqttc = None
        self.message = None
        self.message_received = False
        self.hearbeat_mqttc = None
        self.heartbeat_message = None
        self.heartbeat_waiting = None
        self.heartbeat_waitings = None
        self.heartbeat_received = False
        self.server = None
        self.running_server = None
        JNTTBase.tearDown(self)

    def startClient(self, conf=None):
        if conf is None:
            conf = self.server.options.data
        if self.mqttc is None:
            self.mqttc = MQTTClient(options=conf)
            self.mqttc.connect()
            self.mqttc.start()
        if self.hearbeat_mqttc is None:
            self.hearbeat_mqttc = MQTTClient(options=conf)
            self.hearbeat_mqttc.connect()
            self.hearbeat_mqttc.start()
            self.hearbeat_mqttc.subscribe(topic=TOPIC_HEARTBEAT, callback=self.mqtt_on_heartbeat_message)

    def stopClient(self):
        if self.mqttc != None:
            self.mqttc.stop()
        if self.hearbeat_mqttc != None:
            self.hearbeat_mqttc.unsubscribe(topic=TOPIC_HEARTBEAT)
            self.hearbeat_mqttc.stop()
        if self.mqttc != None:
            if self.mqttc.is_alive():
                self.mqttc.join()
            self.mqttc = None
        if self.hearbeat_mqttc != None:
            if self.hearbeat_mqttc.is_alive():
                self.hearbeat_mqttc.join()
            self.hearbeat_mqttc = None

    def startServer(self):
        if self.server is None:
            with mock.patch(
                "sys.argv", ["%s" % self.server_class, "start", "--conf_file=%s" % self.getDataFile(self.server_conf)]
            ):
                options = vars(jnt_parse_args())
                self.server = self.server_class(options)
            self.server.start()
            self.running_server = threading.Timer(0.01, self.server.run)
            self.running_server.start()
            time.sleep(1.5)

    def stopServer(self):
        if self.server is not None:
            self.server.stop()
            time.sleep(5)
            self.server = None
        if self.running_server is not None:
            self.running_server.cancel()
            time.sleep(5)
            self.running_server = None
        self.message = None

    def start(self):
        self.startServer()
        self.startClient()

    def stop(self):
        self.stopClient()
        self.stopServer()

    def mqtt_on_heartbeat_message(self, client, userdata, message):
        """On generic message
        """
        self.heartbeat_message = message
        hb = HeartbeatMessage(self.heartbeat_message)
        hbadd_ctrl, hbadd_node, state = hb.get_heartbeat()
        if hbadd_ctrl is not None and hbadd_node is not None:
            if self.heartbeat_waiting is None:
                if self.heartbeat_waitings is None:
                    self.heartbeat_received = True
                elif HADD % (hbadd_ctrl, hbadd_node) in self.heartbeat_waitings:
                    self.heartbeat_waitings.remove(HADD % (hbadd_ctrl, hbadd_node))
                    if len(self.heartbeat_waitings) == 0:
                        self.heartbeat_received = True
            elif self.heartbeat_waiting == HADD % (hbadd_ctrl, hbadd_node):
                self.heartbeat_received = True
        print "HADD : %s/%s = %s" % (hbadd_ctrl, hbadd_node, state)

    def assertInLogfile(self, expr="^ERROR "):
        """Assert an expression is in logifle
        Must be called at the end of process, when the server has closed the logfile.
        """
        self.assertTrue(self.server_conf is not None)
        options = JNTOptions(options={"conf_file": self.getDataFile(self.server_conf)})
        log_file_from_config = options.get_option("handler_file", "args", None)
        self.assertTrue(log_file_from_config is not None)
        # I know, it's bad
        log_args = eval(log_file_from_config)
        log_file_from_config = log_args[0]
        self.assertFile(log_file_from_config)
        found = False
        with open(log_file_from_config, "r") as hand:
            for line in hand:
                print line
                if re.search(expr, line):
                    found = True
        self.assertTrue(found)

    def assertNotInLogfile(self, expr="^ERROR "):
        """Assert an expression is not in logifle.
        Must be called at the end of process, when the server has closed the logfile.
        """
        self.assertTrue(self.server_conf is not None)
        options = JNTOptions(options={"conf_file": self.getDataFile(self.server_conf)})
        log_file_from_config = options.get_option("handler_file", "args", None)
        self.assertTrue(log_file_from_config is not None)
        # I know, it's bad
        log_args = eval(log_file_from_config)
        log_file_from_config = log_args[0]
        self.assertFile(log_file_from_config)
        found = False
        with open(log_file_from_config, "r") as hand:
            for line in hand:
                print line
                if re.search(expr, line):
                    found = True
        self.assertFalse(found)

    def assertHeartbeatNode(self, hadd=None, timeout=60):
        print "Waiting for %s" % (hadd)
        self.heartbeat_waiting = hadd
        self.heartbeat_waitings = None
        self.heartbeat_message = None
        self.heartbeat_received = False
        i = 0
        while i < timeout * 10000 and not self.heartbeat_received:
            time.sleep(0.0001)
            i += 1
        self.assertTrue(self.heartbeat_received)
        time.sleep(0.5)

    def assertHeartbeatNodes(self, hadds=None, timeout=60):
        if hadds is None:
            hadds = self.hadds
        print "Waiting for %s" % (hadds)
        self.heartbeat_waiting = None
        self.heartbeat_waitings = list(hadds)
        self.heartbeat_message = None
        self.heartbeat_received = False
        i = 0
        while i < timeout * 10000 and not self.heartbeat_received:
            time.sleep(0.0001)
            i += 1
        print "Unreceived heartbeats %s" % self.heartbeat_waitings
        self.assertTrue(self.heartbeat_received)
        time.sleep(0.5)

    def waitHeartbeatNodes(self, hadds=None, timeout=60):
        if hadds is None:
            hadds = self.hadds
        print "Waiting for %s" % (hadds)
        self.heartbeat_waiting = None
        self.heartbeat_waitings = list(hadds)
        self.heartbeat_message = None
        self.heartbeat_received = False
        i = 0
        while i < timeout * 10000 and not self.heartbeat_received:
            time.sleep(0.0001)
            i += 1
        print "Unreceived heartbeats %s" % self.heartbeat_waitings
        time.sleep(0.5)

    def assertNodeRequest(
        self,
        cmd_class=0,
        genre=0x04,
        uuid="request_info_nodes",
        node_hadd=None,
        client_hadd=None,
        data=None,
        is_writeonly=False,
        is_readonly=False,
        timeout=5,
    ):
        self.message_received = False
        print "Waiting for %s : %s" % (node_hadd, uuid)

        def mqtt_on_message(client, userdata, message):
            """On generic message
            """
            self.message = message
            self.message_received = True

        self.mqttc.subscribe(topic=TOPIC_NODES_REPLY % client_hadd, callback=mqtt_on_message)
        time.sleep(0.5)
        msg = {
            "cmd_class": cmd_class,
            "genre": genre,
            "uuid": uuid,
            "reply_hadd": client_hadd,
            "data": data,
            "hadd": node_hadd,
            "is_writeonly": is_writeonly,
            "is_readonly": is_readonly,
        }
        self.mqttc.publish("/nodes/%s/request" % (node_hadd), json_dumps(msg))
        i = 0
        while i < timeout * 10000 and not self.message_received:
            time.sleep(0.0001)
            i += 1
        self.assertTrue(self.message_received)
        self.mqttc.unsubscribe(topic=TOPIC_NODES_REPLY % client_hadd)
        time.sleep(0.5)

    def assertBroadcastRequest(
        self,
        cmd_class=0,
        genre=0x04,
        uuid="request_info_nodes",
        node_hadd=None,
        client_hadd=None,
        data=None,
        is_writeonly=False,
        is_readonly=False,
        timeout=5,
    ):
        self.message_received = False

        def mqtt_on_message(client, userdata, message):
            """On generic message
            """
            self.message = message
            self.message_received = True

        self.mqttc.subscribe(topic=TOPIC_BROADCAST_REPLY % client_hadd, callback=mqtt_on_message)
        time.sleep(0.5)
        msg = {
            "cmd_class": cmd_class,
            "genre": genre,
            "uuid": uuid,
            "reply_hadd": client_hadd,
            "data": data,
            "is_writeonly": is_writeonly,
            "is_readonly": is_readonly,
        }
        self.mqttc.publish(TOPIC_BROADCAST_REQUEST, json_dumps(msg))
        i = 0
        while i < timeout * 10000 and not self.message_received:
            time.sleep(0.0001)
            i += 1
        self.assertTrue(self.message_received)
        self.mqttc.unsubscribe(topic=TOPIC_BROADCAST_REPLY % client_hadd)
        time.sleep(0.5)

    def assertUpdateValue(
        self,
        type="user",
        data=None,
        cmd_class=0,
        genre=0x04,
        uuid="request_info_nodes",
        node_hadd=None,
        client_hadd=None,
        is_writeonly=False,
        is_readonly=False,
        timeout=5,
    ):
        self.message_received = False
        self.message = None
        print "Waiting for %s : %s" % (node_hadd, uuid)

        def mqtt_on_message(client, userdata, message):
            """On generic message
            """
            msg = json_loads(message.payload)
            print "Received message %s" % msg
            if msg["uuid"] == uuid and msg["hadd"] == node_hadd:
                self.message = message
                self.message_received = True

        self.mqttc.subscribe(topic="/values/%s/%s/#" % (type, node_hadd), callback=mqtt_on_message)
        print "Subscribe to /values/%s/%s/#" % (type, node_hadd)
        time.sleep(0.5)
        msg = {
            "cmd_class": cmd_class,
            "genre": genre,
            "uuid": uuid,
            "reply_hadd": client_hadd,
            "data": data,
            "hadd": node_hadd,
            "is_writeonly": is_writeonly,
            "is_readonly": is_readonly,
        }
        self.mqttc.publish("/nodes/%s/request" % (node_hadd), json_dumps(msg))
        i = 0
        while i < timeout * 10000 and not self.message_received:
            time.sleep(0.0001)
            i += 1
        self.assertTrue(self.message_received)
        self.assertTrue(self.message is not None)
        self.assertTrue(self.message.payload is not None)
        if data is not None:
            msg = json_loads(self.message.payload)
            self.assertEqual(msg["data"], data)
        self.mqttc.unsubscribe(topic="/values/%s/%s/#" % (type, node_hadd))
        time.sleep(0.5)

    def assertNotUpdateValue(
        self,
        type="user",
        data=None,
        cmd_class=0,
        genre=0x04,
        uuid="request_info_nodes",
        node_hadd=None,
        client_hadd=None,
        is_writeonly=False,
        is_readonly=False,
        timeout=5,
    ):
        self.message_received = False
        self.message = None
        print "Waiting for %s : %s" % (node_hadd, uuid)

        def mqtt_on_message(client, userdata, message):
            """On generic message
            """
            msg = json_loads(message.payload)
            print "Received message %s" % msg
            if msg["uuid"] == uuid and msg["hadd"] == node_hadd:
                self.message = message
                self.message_received = True

        self.mqttc.subscribe(topic="/values/%s/%s/#" % (type, node_hadd), callback=mqtt_on_message)
        print "Subscribe to /values/%s/%s/#" % (type, node_hadd)
        time.sleep(0.5)
        msg = {
            "cmd_class": cmd_class,
            "genre": genre,
            "uuid": uuid,
            "reply_hadd": client_hadd,
            "data": data,
            "hadd": node_hadd,
            "is_writeonly": is_writeonly,
            "is_readonly": is_readonly,
        }
        self.mqttc.publish("/nodes/%s/request" % (node_hadd), json_dumps(msg))
        i = 0
        while i < timeout * 10000 and not self.message_received:
            time.sleep(0.0001)
            i += 1
        self.assertTrue(self.message is None)
        self.assertFalse(self.message_received)
        self.mqttc.unsubscribe(topic="/values/%s/%s/#" % (type, node_hadd))
        time.sleep(0.5)

    def assertWaitValue(
        self,
        type="user",
        data=None,
        cmd_class=0,
        genre=0x04,
        uuid="request_info_nodes",
        node_hadd=None,
        client_hadd=None,
        is_writeonly=False,
        is_readonly=False,
        timeout=5,
    ):
        self.message_received = False
        self.message = None
        print "Waiting for %s : %s" % (node_hadd, uuid)

        def mqtt_on_message(client, userdata, message):
            """On generic message
            """
            msg = json_loads(message.payload)
            print "Received message %s" % msg
            if msg["uuid"] == uuid and msg["hadd"] == node_hadd:
                self.message = message
                self.message_received = True

        self.mqttc.subscribe(topic="/values/%s/%s/#" % (type, node_hadd), callback=mqtt_on_message)
        print "Subscribe to /values/%s/%s/#" % (type, node_hadd)
        time.sleep(0.5)
        i = 0
        while i < timeout * 10000 and not self.message_received:
            time.sleep(0.0001)
            i += 1
        self.assertTrue(self.message_received)
        self.assertTrue(self.message is not None)
        self.assertTrue(self.message.payload is not None)
        if data is not None:
            msg = json_loads(self.message.payload)
            self.assertEqual(msg["data"], data)
        self.mqttc.unsubscribe(topic="/values/%s/%s/#" % (type, node_hadd))
        time.sleep(0.5)

    def assertNotWaitValue(
        self,
        type="user",
        data=None,
        cmd_class=0,
        genre=0x04,
        uuid="request_info_nodes",
        node_hadd=None,
        client_hadd=None,
        is_writeonly=False,
        is_readonly=False,
        timeout=5,
    ):
        self.message_received = False
        self.message = None
        print "Waiting for %s : %s" % (node_hadd, uuid)

        def mqtt_on_message(client, userdata, message):
            """On generic message
            """
            msg = json_loads(message.payload)
            print "Received message %s" % msg
            if msg["uuid"] == uuid and msg["hadd"] == node_hadd:
                self.message = message
                self.message_received = True

        self.mqttc.subscribe(topic="/values/%s/%s/#" % (type, node_hadd), callback=mqtt_on_message)
        print "Subscribe to /values/%s/%s/#" % (type, node_hadd)
        time.sleep(0.5)
        i = 0
        while i < timeout * 10000 and not self.message_received:
            time.sleep(0.0001)
            i += 1
        self.assertTrue(self.message is None)
        self.assertFalse(self.message_received)
        self.mqttc.unsubscribe(topic="/values/%s/%s/#" % (type, node_hadd))
        time.sleep(0.5)

    def assertFsmBoot(self, state="booting"):
        """Assert Finish State Machine can boot
        """
        thread = self.server.find_thread(self.server_section)
        self.assertNotEqual(thread, None)
        JNTTBase.assertFsmBoot(self, bus=thread.bus, state="booting")
Esempio n. 2
0
class RemoteNodeComponent(JNTComponent):
    """ A resource ie /rrd """

    def __init__(self, bus=None, addr=None, **kwargs):
        """
        """
        oid = kwargs.pop('oid', 'remote.node')
        name = kwargs.pop('name', "Remote node")
        product_name = kwargs.pop('product_name', "Remote node")
        product_type = kwargs.pop('product_type', "Software")
        JNTComponent.__init__(self, oid=oid, bus=bus, addr=addr, name=name,
                product_name=product_name, product_type=product_type, **kwargs)
        logger.debug("[%s] - __init__ node uuid:%s", self.__class__.__name__, self.uuid)
        self.mqttc_heartbeat = None
        self.mqttc_values = None
        self.state = 'OFFLINE'
        self.remote_hadd = (None,None)
        uuid="remote_hadd"
        self.values[uuid] = self.value_factory['config_string'](options=self.options, uuid=uuid,
            node_uuid=self.uuid,
            help='HADD of the remote node that we will listen to',
            label='rhadd',
            default=None,
        )
        uuid="user_read"
        self.values[uuid] = self.value_factory['rread_value'](options=self.options, uuid=uuid,
            node_uuid=self.uuid,
            help='The user values to listen to : value_uuid:index',
            label='ruser',
            default=None,
        )
        uuid="user_write"
        self.values[uuid] = self.value_factory['rwrite_value'](options=self.options, uuid=uuid,
            node_uuid=self.uuid,
            help='The user values to listen to : value_uuid:index',
            label='wuser',
            default=None,
        )
        uuid="basic_read"
        self.values[uuid] = self.value_factory['rread_value'](options=self.options, uuid=uuid,
            node_uuid=self.uuid,
            help='The basic values to listen to : value_uuid:index',
            label='rbasic',
            default=None,
        )
        uuid="basic_write"
        self.values[uuid] = self.value_factory['rwrite_value'](options=self.options, uuid=uuid,
            node_uuid=self.uuid,
            help='The basic values to listen to : value_uuid:index',
            label='wbasic',
            default=None,
        )

    def start(self, mqttc):
        """Start the component.
        """
        self.state = 'BOOT'
        JNTComponent.start(self, mqttc)
        hadd = self.values['remote_hadd'].data
        logger.debug("[%s] - Found remote HADD %s for node %s", self.__class__.__name__, hadd, self.node.uuid)
        if hadd is None:
            logger.debug("[%s] - No remote HADD. Exit ...", self.__class__.__name__)
            return False
        self.remote_hadd = hadd_split(hadd)
        if self.remote_hadd[0] is None or self.remote_hadd[1] is None:
            logger.warning("[%s] - Bad remote HADD %s", self.__class__.__name__, hadd)
            return False
        try:
            self.mqttc_heartbeat = MQTTClient(options=self.options.data)
            self.mqttc_heartbeat.connect()
            self.mqttc_heartbeat.subscribe(topic=TOPIC_HEARTBEAT_NODE%(hadd), callback=self.on_heartbeat)
            self.mqttc_heartbeat.start()
        except Exception:
            logger.exception("[%s] - start", self.__class__.__name__)
        values_read = self.get_read_values()
        values_write = self.get_write_values()
        logger.debug("[%s] - found %s values_read", self.__class__.__name__, len(values_read))
        logger.debug("[%s] - found %s values_write", self.__class__.__name__, len(values_write))
        topics = []
        for value in values_read:
            if value[0] == 'user':
                topic=TOPIC_VALUES_USER
            else:
                topic=TOPIC_VALUES_BASIC
            topic = topic%(hadd+'/'+value[1])
            topics.append((topic, 0))
            logger.debug("[%s] - subscribe to %s", self.__class__.__name__, topic)
        if len(topics)>0:
            try:
                self.mqttc_values = MQTTClient(options=self.options.data)
                self.mqttc_values.connect()
                self.mqttc_values.subscribe(topics=topics, callback=self.on_remote_value)
                self.mqttc_values.start()
            except Exception:
                logger.exception("[%s] - start", self.__class__.__name__)

        #~ print max_index
        #~ for index in range(max_index):
            #~ print index
        return True

    def get_read_values(self):
        """Return all the read values.
        """
        values = []
        for which in ['user_read', 'basic_read']:
            nb_instances = self.values[which].get_length()
            for i in range(nb_instances):
                vuuid, vindex = self.values[which].get_value_config(index=i)
                values.append((which.replace('_read',''), vuuid, int(vindex)))
        return values

    def get_write_values(self):
        """Return all the read values.
        """
        values = []
        for which in ['user_write', 'basic_write']:
            nb_instances = self.values[which].get_length()
            for i in range(nb_instances):
                vuuid, vindex, cmdcls, ston, stoff = self.values[which].get_value_config(index=i)
                values.append((which.replace('_write',''), vuuid, int(vindex), cmdcls, ston, stoff))
        return values

    def stop(self):
        """Stop the component.
        """
        if self.mqttc_values is not None:
            try:
                hadd = HADD%(self.remote_hadd[0], self.remote_hadd[1])
                values_read = self.get_read_values()
                topics = []
                for value in values_read:
                    if value[0] == 'user':
                        topic=TOPIC_VALUES_USER
                    else:
                        topic=TOPIC_VALUES_BASIC
                    topic = topic%(HADD%(self.remote_hadd)+'/'+value[1])
                    topics.append(topic)
                logger.debug("[%s] - Unsubscribe to %s", self.__class__.__name__, topics)
                self.mqttc_values.unsubscribe(topics)
                self.mqttc_values.stop()
                if self.mqttc_values.is_alive():
                    self.mqttc_values.join()
                self.mqttc_values = None
            except Exception:
                logger.exception("[%s] - stop", self.__class__.__name__)
        if self.mqttc_heartbeat is not None:
            try:
                hadd = HADD%(self.remote_hadd[0], self.remote_hadd[1])
                logger.debug("[%s] - Unsubscribe to %s", self.__class__.__name__, TOPIC_HEARTBEAT_NODE%(hadd))
                self.mqttc_heartbeat.unsubscribe(topic=TOPIC_HEARTBEAT_NODE%(hadd))
                self.mqttc_heartbeat.stop()
                if self.mqttc_heartbeat.is_alive():
                    self.mqttc_heartbeat.join()
                self.mqttc_heartbeat = None
            except Exception:
                logger.exception("[%s] - stop", self.__class__.__name__)
        JNTComponent.stop(self)
        return True

    def check_heartbeat(self):
        """Check that the component is 'available'

        """
        #~ print "it's me %s : %s" % (self.values['upsname'].data, self._ups_stats_last)
        if self.mqttc_heartbeat is not None:
            return self.state
        return False

    def on_heartbeat(self, client, userdata, message):
        """On request

        :param client: the Client instance that is calling the callback.
        :type client: paho.mqtt.client.Client
        :param userdata: user data of any type and can be set when creating a new client instance or with user_data_set(userdata).
        :type userdata: all
        :param message: The message variable is a MQTTMessage that describes all of the message parameters.
        :type message: paho.mqtt.client.MQTTMessage
        """
        hb = HeartbeatMessage(message)
        add_ctrl, add_node, state = hb.get_heartbeat()
        if add_ctrl is None or add_node is None:
            return
        if (add_ctrl == self.remote_hadd[0]) and \
           (add_node == self.remote_hadd[1] or add_node == -1) :
               self.state = state

    def on_remote_value(self, client, userdata, message):
        """On request

        :param client: the Client instance that is calling the callback.
        :type client: paho.mqtt.client.Client
        :param userdata: user data of any type and can be set when creating a new client instance or with user_data_set(userdata).
        :type userdata: all
        :param message: The message variable is a MQTTMessage that describes all of the message parameters.
        :type message: paho.mqtt.client.MQTTMessage
        """
        pass
Esempio n. 3
0
class RrdStoreThread(BaseThread):
    """The Rdd cache thread

    Implement a cache.

    """
    def __init__(self, section, options={}):
        """Initialise the cache thread

        Manage a cache for the rrd.

        A timer in a separated thread will pickle the cache to disk every 30 seconds.

        An other thread will update the rrd every hours

        :param options: The options used to start the worker.
        :type clientid: str
        """
        self.section = section
        BaseThread.__init__(self, options=options)
        self.config_timeout_delay = 1
        self.loop_sleep = 0.01
        self._lock = threading.Lock()
        #~ self._cache_rrd_ttl = 60*60
        self._cache_rrd_ttl = 60*60
        self._cache_pickle_ttl = 240
        self._cache_pickle_timer = None
        self._cache_dead_ttl = 60*60*24
        self._cache_dead_timer = None
        self._thread_delay = 0.01
        self._rrd_rotate_next_run = None
        self._rrd_rotate_ttl = 1
        self._dirmask = None
        self._filemask = None
        self._cache = {}
        """
        whe have a datasource with a unique_name haddnode but at component start, we don't have it.
        whe receive data from mqtt with params : haddvalue, value_uuid, value_index at etime

        whe should update the rrd with : rtime:val1:val2:val3 ...

        Whe should be abble to flush a rrd file for graphing

        cache { 'rrd_0001' : { 'step':300, 'values' : {'epoch_time' : { 0 : 'U', ...}}, 'labels':{0:...}}

        When updating a data, we can find the latest serie values filtering on epoch_time

        We should look at the cache to find rrd_files that need a new serie of values (now - last_epoch) > step.
        In this case, we add a new serie of with 'U' as values.

                hadd
                    value_uuid
                        value_index
        """
        self.epoch = datetime.datetime(1970,1,1)
        self._mqttc = None
        """
        """
        self.params = {}

    def config_thread(self, cache_rrd_ttl=None, cache_pickle_ttl=None, cache_dead_ttl=None,
                    dirmask=RRD_DIR, filemask='%(rrd_file)s.rrd'):
        """
        """
        if dirmask is not None:
            self._dirmask = dirmask
        if filemask is not None:
            self._filemask = filemask
        if cache_rrd_ttl is not None:
            self._cache_rrd_ttl = cache_rrd_ttl
        if cache_dead_ttl is not None:
            self._cache_dead_ttl = cache_dead_ttl
        if cache_pickle_ttl is not None:
            self._cache_pickle_ttl = cache_pickle_ttl

    def pre_loop(self):
        """Launch before entering the run loop. The node manager is available.
        """
        self._mqttc = MQTTClient(options=self.options.data)
        self._mqttc.connect()
        self._mqttc.subscribe(topic=TOPIC_VALUES, callback=self.on_value)
        self._mqttc.start()
        self.restore()
        self.start_timer_pickle()
        self._rrd_rotate_next_run = datetime.datetime.now()
        self.start_timer_dead()

    def post_loop(self):
        """Launch after finishing the run loop. The node manager is still available.
        """
        logger.debug("[%s] - stop timers in postloop", self.__class__.__name__)
        self.stop_timer_pickle()
        self.stop_timer_dead()
        self._mqttc.unsubscribe(topic=TOPIC_VALUES)
        self._mqttc.stop()
        #~ self.flush_all()
        self.dump()
        if self._mqttc.is_alive():
            self._mqttc.join()
        self._mqttc = None

    def loop(self):
        """Launch after finishing the run loop. The node manager is still available.
        """
        if self._rrd_rotate_next_run < datetime.datetime.now():
            now = datetime.datetime.now()
            #Check for data that need a rotation
            etime = (datetime.datetime.now() - self.epoch).total_seconds()
            try:
                for key in self._cache.keys():
                    try:
                        epochs = sorted(self._cache[key]['values'].keys())
                        #~ print "epochs : ", epochs
                        #~ print "condition : ", epochs[0] + self._cache_rrd_ttl, etime
                        if len(epochs) == 0 or etime > epochs[-1] + self._cache[key]['step']:
                            #We should rotate the values
                            self.timer_rrd_rotate(key, etime)
                        elif len(epochs) > 1 and epochs[0] + self._cache_rrd_ttl < etime :
                            #We should flush to the rrd
                            self.flush(key)
                    except Exception:
                        logger.exception("[%s] - Exception when rotating %s in cache", self.__class__.__name__, key)
            except Exception:
                logger.exception("[%s] - Exception when rotating in cache", self.__class__.__name__)
            self._rrd_rotate_next_run = datetime.datetime.now() + datetime.timedelta(seconds=self._rrd_rotate_ttl)
        self._reloadevent.wait(self.loop_sleep)

    def on_value(self, client, userdata, message):
        """On value

        Do not lock as it don't add new values to dict. Should be ok using keys instead of iterator.

        :param client: the Client instance that is calling the callback.
        :type client: paho.mqtt.client.Client
        :param userdata: user data of any type and can be set when creating a new client instance or with user_data_set(userdata).
        :type userdata: all
        :param message: The message variable is a MQTTMessage that describes all of the message parameters.
        :type message: paho.mqtt.client.MQTTMessage
        """
        #logger.debug("[%s] - on_value %s", self.__class__.__name__, message.payload)
        try:
            data = json_loads(message.payload)
            if 'genre' in data:
                data = {0:{0:data}}
            elif 'genre' in data[data.keys()[0]]:
                data = {0:data}
            store_index = self.create_store_index()
            for nval in data:
                for kval in data[nval]:
                    if data[nval][kval]['genre'] in [0x02, 0x01]:
                        hadd = data[nval][kval]['hadd']
                        uuid = data[nval][kval]['uuid']
                        index = 0
                        if 'index' in data[nval][kval]:
                            index = data[nval][kval]['index']
                        index = str(index)
                        #~ logger.debug("[%s] - update_last %s,%s,%s : %s", self.__class__.__name__, hadd, value_uuid, value_index)
                        if (hadd, uuid, index) in store_index:
                            self.update_last(hadd, uuid, index, data[nval][kval]['data'])
        except Exception:
            logger.exception("[%s] - Exception in on_value", self.__class__.__name__)

    def create_store_index(self):
        """ Create an in dex of keys
            :ret: a list of tuple () of values in cache
        """
        ret = []
        rrds = self._cache.keys()
        for rrd in rrds:
            try:
                indexes = self._cache[rrd]['indexes'].keys()
                for index in indexes:
                    try:
                        ret.append( (self._cache[rrd]['hadds'][index], \
                                     self._cache[rrd]['uuids'][index], \
                                     self._cache[rrd]['indexes'][index]) )
                    except Exception:
                        logger.exception('[%s] - Exception in create_store_index : rrd= %s, index = %s', self.__class__.__name__, rrd, index)
            except Exception:
                logger.exception('[%s] - Exception in create_store_index : rrd = %s', self.__class__.__name__, rrd)
        #~ logger.debug("[%s] - create_store_index %s", self.__class__.__name__, ret)
        return ret

    def update_last(self, hadd, value_uuid, value_index, data):
        """ An helper to find
            :ret:
        """
        #~ logger.debug("[%s] - update_last %s,%s,%s : %s", self.__class__.__name__, hadd, value_uuid, value_index, data)
        ret = []
        rrds = self._cache.keys()
        for rrd in rrds:
            indexes = self._cache[rrd]['indexes'].keys()
            for index in indexes:
                if self._cache[rrd]['hadds'][index]==hadd and \
                        self._cache[rrd]['uuids'][index]==value_uuid and \
                        self._cache[rrd]['indexes'][index]==value_index:
                    epochs = sorted(self._cache[rrd]['values'].keys())
                    if len(epochs) == 0:
                        logger.warning("[%s] - Can't update value. No epoch found for %s", self.__class__.__name__, rrd)
                    else:
                        if data is None:
                            data = 'U'
                        self._cache[rrd]['values'][epochs[-1]][index] = data
                        self._cache[rrd]['last_update'] = datetime.datetime.now()
                        logger.debug("[%s] - Value updated in store %s,%s,%s : %s", self.__class__.__name__, hadd, value_uuid, value_index, data)

    def timer_rrd_rotate(self, rrd_file, etime=None):
        """Rotate via a separate thread in a timer
        """
        if rrd_file is None or rrd_file not in self._cache:
            return False
        if etime is None:
            etime = (datetime.datetime.now() - self.epoch).total_seconds()
        th = threading.Timer(self._thread_delay, self.rrd_rotate, args=(rrd_file, etime))
        th.start()

    def rrd_rotate(self, rrd_file, etime=None):
        """Rotate
        """
        if etime is None:
            etime = (datetime.datetime.now() - self.epoch).total_seconds()
        logger.debug("[%s] - Rotate the rrd data in cache", self.__class__.__name__)
        self._lock.acquire()
        try:
            if rrd_file is None or rrd_file not in self._cache:
                return False
            self._cache[rrd_file]['values'][etime] = {}
            for key in self._cache[rrd_file]['hadds'].keys():
                self._cache[rrd_file]['values'][etime][key]='U'
        except Exception:
            logger.exception("[%s] - Exception when rotating %s in cache", self.__class__.__name__, rrd_file)
        finally:
            self._lock.release()

    def run(self):
        """Run the loop
        """
        self._stopevent.clear()
        #~ self.boot()
        self.trigger_reload()
        logger.debug("[%s] - Wait for the thread reload event for initial startup", self.__class__.__name__)
        while not self._reloadevent.isSet() and not self._stopevent.isSet():
            self._reloadevent.wait(0.50)
        logger.debug("[%s] - Entering the thread loop", self.__class__.__name__)
        while not self._stopevent.isSet():
            self._reloadevent.clear()
            try:
                self.pre_loop()
            except Exception:
                logger.exception('[%s] - Exception in pre_loop', self.__class__.__name__)
                self._stopevent.set()
            while not self._reloadevent.isSet() and not self._stopevent.isSet():
                self.loop()
            try:
                self.post_loop()
            except Exception:
                logger.exception('[%s] - Exception in post_loop', self.__class__.__name__)

    def get_rrd_directory(self, params):
        """Get and create the direcotry if needed
        """
        dirname='.'
        if 'home_dir' in self.options.data and self.options.data['home_dir'] is not None:
            dirname = self.options.data['home_dir']
        directory = os.path.join(dirname, self._dirmask %(params))
        if not os.path.exists(directory):
            os.makedirs(directory)
        return directory

    def get_rrd_public_directory(self, params):
        """Get and create the direcotry if needed
        """
        dirname='.'
        if 'home_dir' in self.options.data and self.options.data['home_dir'] is not None:
            dirname = self.options.data['home_dir']
        dirname = os.path.join(dirname, "public")
        directory = os.path.join(dirname, 'rrd', 'rrds')
        if not os.path.exists(directory):
            os.makedirs(directory)
        return directory

    def get_rrd_file(self, params):
        """
        """
        return self._filemask %(params)

    def get_pickle_filename(self):
        """Restore data from disk using pickle
        """
        params={"rrd_file":''}
        dirname = self.get_rrd_directory(params)
        return os.path.join(dirname, 'rrd_cache.pickle')

    def get_list_filename(self):
        """Restore data from disk using pickle
        """
        params={"rrd_file":''}
        directory = self.get_rrd_public_directory(params)
        return os.path.join(directory, "index.txt")

    def get_rrd_filename(self, rrd_file):
        """
        """
        if rrd_file is None:
            logger.debug("Can't retrieve rrd_file %s", rrd_file)
            return None
        params={"rrd_file":rrd_file}
        directory = self.get_rrd_public_directory(params)
        filename = self.get_rrd_file(params)
        return os.path.join(directory, filename)

    def get_rrd_httpname(self, rrd_file):
        """
        """
        if rrd_file is None:
            logger.debug("Can't retrieve rrd_file %s", rrd_file)
            return None
        params={"rrd_file":rrd_file}
        directory = self._dirmask %(params)
        filename = self.get_rrd_file(params)
        return os.path.join(directory, 'rrds', filename)

    def get_rrd_label(self, index, rrd_file, config):
        """
        """
        if config is None:
            if rrd_file in self._cache and index in self._cache[rrd_file]["labels"]:
                return self._cache[rrd_file]["labels"][index]
        try:
            hadd = None
            hadd, value_uuid, value_index, rrd_type, rrd_label = config.split('|')
            self._cache[rrd_file]["labels"][index] = rrd_label
            return rrd_label
        except Exception:
            logger.exception("[%s] - Can't retrieve add_ctrl, add_node from hadd %s", self.__class__.__name__, hadd)
            return None

    def dump(self):
        """Dump data to disk using pickle
        """
        self._lock.acquire()
        logger.debug("[%s] - Dump cache to disk")
        try:
            filename = self.get_pickle_filename()
            pickle.dump( self._cache, open( filename, "wb" ) )
        except Exception:
            logger.exception("[%s] - Exception when dumping data to file", self.__class__.__name__)
        finally:
            self._lock.release()

    def restore(self):
        """Restore data from disk using pickle
        """
        self._lock.acquire()
        logger.debug("[%s] - Restore cache from disk")
        try:
            filename = self.get_pickle_filename()
            if os.path.exists(filename):
                self._cache = pickle.load( open( filename, "rb" ) )
        except Exception:
            self._cache = {}
            logger.exception("[%s] - Exception when restoring data from dump", self.__class__.__name__)
        finally:
            self._lock.release()

    def start_timer_dead(self):
        """
        """
        if self._cache_dead_timer is None:
            self._cache_dead_timer = threading.Timer(self._cache_dead_ttl / 3, self.timer_dead)
            self._cache_dead_timer.start()

    def timer_dead(self):
        """Remove dead entries from cache
        """
        self.stop_timer_dead()
        logger.debug("[%s] - Remove dead entries in cache", self.__class__.__name__)
        try:
            now = datetime.datetime.now()
            dead_time = now - datetime.timedelta(seconds=self._cache_dead_ttl)
            for key in self._cache.keys():
                self._lock.acquire()
                if 'last_update' not in self._cache[key]:
                    self._cache[key]['last_update'] = now
                try:
                    if key in self._cache and self._cache[key]['last_update']  < dead_time:
                        logger.debug("[%s] - Remove dead entries in cache : %s", self.__class__.__name__, key)
                        self.remove_rrd_from_list(key)
                        del self._cache[key]
                except Exception:
                    logger.exception("[%s] - Exception when removing dead entry %s in cache", self.__class__.__name__, key)
                finally:
                    self._lock.release()
        except Exception:
            logger.exception("[%s] - Exception when removing dead entries", self.__class__.__name__)
        self.start_timer_dead()

    def stop_timer_dead(self):
        """
        """
        if self._cache_dead_timer is not None:
            self._cache_dead_timer.cancel()
            self._cache_dead_timer = None

    def start_timer_pickle(self):
        """
        """
        if self._cache_pickle_timer is None:
            self._cache_pickle_timer = threading.Timer(self._cache_pickle_ttl, self.timer_pickle)
            self._cache_pickle_timer.start()

    def timer_pickle(self):
        """Dump cache to file using pickle
        """
        self.stop_timer_pickle()
        self.dump()
        self.start_timer_pickle()

    def stop_timer_pickle(self):
        """
        """
        if self._cache_pickle_timer is not None:
            self._cache_pickle_timer.cancel()
            self._cache_pickle_timer = None

    def timer_flush_all(self):
        """Flush all data via a separate thread in a timer
        """
        if hadd is None or value_uuid is None or value_index is None:
            return False
        th = threading.Timer(self._thread_delay, self.flush_all, args=(hadd, value_uuid, value_index))
        th.start()

    def flush_all(self):
        """Flush all data to rrd files and remove them from cache
        """
        rrds = self._cache.keys()
        for rrd in rrds:
            try:
                self.flush(rrd)
            except Exception:
                logger.exception("[%s] - Exception in flush_all : rrd = %s", self.__class__.__name__, rrd)

    def timer_flush(self, rrd_file):
        """Flush data from in cache from a value via a separate thread in a timer
        """
        if hadd is None or value_uuid is None or value_index is None:
            return False
        th = threading.Timer(self._thread_delay, self.flush, args=(rrd_file))
        th.start()

    def flush(self, rrd_file):
        """Flush data from a value to rrd file and remove them from cache
        """
        if rrd_file is None or rrd_file not in self._cache:
            return False
        self._lock.acquire()
        logger.info("[%s] - Flush rrd_file %s", self.__class__.__name__, rrd_file)
        try:
            rrd_dict = self._cache[rrd_file]
            epochs = sorted(rrd_dict['values'].keys())
            rrd_data = [self.get_rrd_filename(rrd_file)]
            last_epoch = epochs[-1]
            for epoch in epochs:
                try:
                    rrd_line = ""
                    if epoch != last_epoch:
                        #We must let the last serie in cache
                        #Otherwise we could raise :
                        # error: /tmp/janitoo/home/public/datarrd_store/rrd/open_files.rrd: illegal attempt to update using time 1443837167 when last
                        # update time is 1443837240 (minimum one second step)
                        rrd_line = '%s' %(epoch)
                        for key_idx in rrd_dict['values'][epoch]:
                            try:
                                val = 'U'
                                if rrd_dict['values'][epoch][key_idx] is not None:
                                    val = rrd_dict['values'][epoch][key_idx]
                                rrd_line = '%s:%s' %(rrd_line, val)
                            except Exception:
                                rrd_line = '%s:%s' %(rrd_line, 'U')
                                logger.exception("[%s] - Exception when flushing cache for %s epoch %s:%s", self.__class__.__name__, rrd_file, epoch, key_idx)
                        del self._cache[rrd_file]['values'][epoch]
                    if rrd_line != "":
                        rrd_data.append(rrd_line)
                except Exception:
                    logger.exception("[%s] - Exception when flushing cache for %s epoch %s", self.__class__.__name__, rrd_file, epoch)
            if len (rrd_data) > 1:
                rrdtool.update(rrd_data)
        except Exception:
            logger.exception("[%s] - Exception when flushing cache for %s", self.__class__.__name__, rrd_file)
        finally:
            self._lock.release()

    def get_count_values(self):
        """Retrieve the number of values cached
        """
        return len(self._cache)

    def get_count_series(self):
        """Retrieve the number of series of values cached
        """
        numb=0
        for rrd_file in self._cache.keys():
            numb += len(self._cache[rrd_file]['values'])
        return numb

    def get_values_to_dump(self):
        """Return a list of tuples (hadd, value_uuid, value_index) of values in timeout. They must be flush to disk
        """
        return 0

    def remove_config(self, rrd_file):
        """Remove an entry in cache and its rrd file
        """
        if rrd_file not in self._cache:
            logger.warning("[%s] - Remove a non existent entry [%s] from cache ", self.__class__.__name__, rrd_file)
        if len(self._cache[rrd_file]['values']) > 0:
            logger.warning("[%s] - Remove a non empty entry [%s] from cache : %s ", self.__class__.__name__, rrd_file, self._cache[rrd_file])
        self._lock.acquire()
        try:
            filename = self.get_rrd_filename(rrd_file)
            if os.path.exists(filename) == True:
                os.remove(filename)
            if rrd_file is not None and rrd_file in self._cache:
                del self._cache[rrd_file]
        except Exception:
            logger.exception("[%s] - Exception when removing config", self.__class__.__name__)
        finally:
            self._lock.release()

    def timer_add_config(self, rrd_file, step, config):
        """Add a config via a separate thread in a timer
        """
        if rrd_file is None or step is None or config is None:
            return False
        th = threading.Timer(self._thread_delay*5, self.add_config, args=(rrd_file, step, config))
        th.start()

    def add_config(self, rrd_file, step, config):
        """
        ret = rrdtool.create("example.rrd", "--step", "1800", "--start", '0',
                                 "DS:metric1:GAUGE:2000:U:U",
                                 "DS:metric2:GAUGE:2000:U:U",
                                 "RRA:AVERAGE:0.5:1:600",
                                 "RRA:AVERAGE:0.5:6:700",
                                 "RRA:AVERAGE:0.5:24:775",
                                 "RRA:AVERAGE:0.5:288:797",
                                 "RRA:MAX:0.5:1:600",
                                 "RRA:MAX:0.5:6:700",
                                 "RRA:MAX:0.5:24:775",
                                 "RRA:MAX:0.5:444:797")

        Let’s consider all lines in details. First line include name of RRD database (“example.rrd”) and you can use here any path you want,
        step of parameters checking (30 minutes in our case), and the start point (0 or N means ‘now’).
        ‘DS’ in line 4-5 means Data Source, these lines include two our metrics.
        ‘2000’ means that RRD can wait for 2000 seconds to get new values until it considers them as unknown
        (that’s is why we use 2000, which 200 seconds more of our 30 minutes interval).
        Last two parameters – ‘U:U’ – stand for min and max values of each metric (‘unknown’ in our case).
        Lines 6-13 describe what types of gained values RRD should store in its database.
        It’s pretty self-describing (average and max values).  Mentioned values describe how many parameters RRD should keep.
        Considering it can be confusing I will omit explanation but note that these values were choosen  to be compatible with MRTG (actually, it’s not quite true
        since we use 1800 seconds periods and not 5 minutes, so you might need to modify it (if you also don’t use 5 minutes period) or keep like I did).

        GAUGE, COUNTER, DERIVE or ABSOLUTE
        """
        #print "add_config", rrd_file, config
        self._lock.acquire()
        rrd_sources = []
        try:
            if rrd_file is None:
                logger.warning("[%s] - Can't add %s in cache", self.__class__.__name__, rrd_file)
                return False
            if rrd_file not in self._cache:
                self._cache[rrd_file] = {'step':step, 'last_update':datetime.datetime.now(), 'labels' : {}, 'types':{}, 'hadds':{}, 'uuids':{}, 'indexes':{}, 'values':{}}
            for key in sorted(config.keys()):
                hadd, value_uuid, value_index, rrd_type, rrd_label = config[key].split('|')
                self._cache[rrd_file]["labels"][key] = rrd_label
                self._cache[rrd_file]["types"][key] = rrd_type
                self._cache[rrd_file]["hadds"][key] = hadd
                self._cache[rrd_file]["uuids"][key] = value_uuid
                self._cache[rrd_file]["indexes"][key] = value_index
                rrd_sources.append("DS:%s:%s:%s:U:U" %(rrd_label, rrd_type, step*2))
        except Exception:
            logger.exception("[%s] - Exception when adding config in cache", self.__class__.__name__)
        finally:
            self._lock.release()
        #print "rrd_sources :", rrd_sources
        try:
            filename = self.get_rrd_filename(rrd_file)
            if os.path.exists(filename) == False:
                rrdtool.create(filename, "--step", str(step), "--start", '0',
                                     rrd_sources,
                                     "RRA:AVERAGE:0.5:1:1440",
                                     "RRA:AVERAGE:0.5:12:1440",
                                     "RRA:AVERAGE:0.5:144:1440",
                                     "RRA:AVERAGE:0.5:288:1440",
                                     "RRA:MAX:0.5:1:1440",
                                     "RRA:MAX:0.5:12:1440",
                                     "RRA:MAX:0.5:144:1440",
                                     "RRA:MAX:0.5:288:1440",
                                     "RRA:MIN:0.5:1:1440",
                                     "RRA:MIN:0.5:12:1440",
                                     "RRA:MIN:0.5:144:1440",
                                     "RRA:MIN:0.5:288:1440")
            self.add_rrd_to_list(rrd_file)
        except Exception:
            logger.exception("[%s] - Exception when creating rrd file %s", self.__class__.__name__, rrd_file)

    def add_rrd_to_list(self, rrd_file):
        """Add the rrd_file to index.txt
        """
        filename = self.get_list_filename()
        rrd_list = []
        if os.path.exists(filename) == True:
            with open(filename) as file:    # Use file to refer to the file object
                data = file.read()
                rrd_list = data.split("|")
        if rrd_file in rrd_list:
            return
        rrd_list.append(rrd_file)
        line = '|'.join(rrd_list)
        with open(filename, "w") as file:
            file.write(line)

    def remove_rrd_from_list(self, rrd_file):
        """Remove the rrd from index.txt
        """
        filename = self.get_list_filename()
        rrd_list = []
        if os.path.exists(filename) == True:
            with open(filename) as file:    # Use file to refer to the file object
                data = file.read()
                rrd_list = data.split("|")
        if rrd_file not in rrd_list:
            return
        rrd_list.remove(rrd_file)
        line = '|'.join(rrd_list)
        with open(filename, "w") as file:
            file.write(line)