def _start_ruuvitag(self): logger.debug('enter') if not self._run: return False l_ruuvitag = self._cfgh.get_cfg(section=_def.KEY_RUUVITAG) if l_ruuvitag: l_outqueue = self._find_queue( l_ruuvitag.get('ruuviname', _def.RUUVITAG_RUUVINAME)) try: l_name = l_ruuvitag.get('name', _def.RUUVITAG_NAME) l_proc = _tag( loop=self._loop, scheduler=self._scheduler, collector=l_ruuvitag.get('collector', _def.RUUVITAG_COLLECTOR), outqueue=l_outqueue, # fbqueue = self._fbqueue, whtlist=l_ruuvitag.get('WHTLIST', None), blklist=l_ruuvitag.get('BLKLIST', None), adjustment=l_ruuvitag.get('ADJUSTMENT', None), tags=l_ruuvitag.get('TAGS', None), sample_interval=l_ruuvitag.get( 'sample_interval', _def.RUUVITAG_SAMPLE_INTERVAL), calc=l_ruuvitag.get('calc', _def.RUUVITAG_CALC), calc_in_datas=l_ruuvitag.get('calc_in_datas', _def.RUUVITAG_CALC_IN_DATAS), debug=l_ruuvitag.get('debug', _def.RUUVITAG_DEBUG), device_timeout=l_ruuvitag.get( 'device_timeout', _def.RUUVITAG_DEVICE_TIMEOUT), device_reset=l_ruuvitag.get('device_reset', _def.RUUVITAG_DEVICE_RESET), whtlist_from_tags=l_ruuvitag.get( 'whtlist_from_tags', _def.RUUVITAG_WHTLIST_FROM_TAGS), minmax=l_ruuvitag.get('MINMAX', _def.RUUVITAG_MINMAX), device=l_ruuvitag.get('device', _def.RUUVITAG_DEVICE)) # start ruuvitag task l_task = self._loop.create_task(l_proc.run()) self._procs.add(l_name, procItem(proc=l_proc, queue=None, task=l_task)) logger.info(f'[{_def.KEY_RUUVITAG}] task:{l_name} created') logger.debug( f'[{_def.KEY_RUUVITAG}] proc:{l_proc} task:{l_task}') return True except ValueError: logger.critical( f'*** [{_def.KEY_RUUVITAG}] start failed ValueError') except: logger.exception(f'*** [{_def.KEY_RUUVITAG}] start failed') else: logger.error(f'*** [{_def.KEY_RUUVITAG}] configuration missing') return False
def _start_ruuvi(self): logger.debug('enter') if not self._run: return (False) l_common = self._cfgh.get_cfg(section=_def.KEY_COMMON) l_ruuvi = self._cfgh.get_cfg(section=_def.KEY_RUUVI) if l_ruuvi: l_outqueues = {} for l_measur in l_ruuvi.get('MEASUREMENTS', None): l_outqueue = self._find_queue( l_measur.get('OUTPUT', _def.RUUVI_OUTPUT)) if l_outqueue: l_outqueues = {**l_outqueues, **l_outqueue} if l_outqueues: logger.debug(f'outqueues:{l_outqueues}') try: l_name = l_ruuvi.get('name', _def.RUUVI_NAME) l_inqueue = asyncio.Queue(maxsize=l_ruuvi.get( 'queue_size', _def.RUUVI_QUEUE_SIZE)) l_proc = _ruuvi( cfg=l_ruuvi, hostname=l_common.get('hostname', _def.COMMON_HOSTNAME), outqueues=l_outqueues, inqueue=l_inqueue, # fbqueue = self._fbqueue, loop=self._loop, scheduler=self._scheduler) l_task = self._loop.create_task(l_proc.run()) self._procs.add( l_name, procItem(proc=l_proc, queue=l_inqueue, task=l_task)) logger.info(f'[{_def.KEY_RUUVI}] task:{l_name} created') logger.debug( f'[{_def.KEY_RUUVI}] proc:{l_proc} task:{l_task}') return True except Exception: logger.exception( f'{_def.KEY_RUUVI}] failed to add task:{l_name}') else: logger.error( f'''[{_def.KEY_RUUVI}] queue(s) not found:{l_ruuvi.get('OUTPUT', _def.RUUVI_OUTPUT)}''' ) logger.debug(f'''[{_def.KEY_RUUVI}] procs:{self._procs}''') else: logger.error(f'''*** [{_def.KEY_RUUVI}] configuration missing''') return False
def _start_influx(self): logger.debug('enter') if not self._run: return False l_status = False l_common = self._cfgh.get_cfg(section=_def.KEY_COMMON) l_influxs = self._cfgh.get_cfg(section=_def.KEY_INFLUX) if l_influxs: for l_influx in l_influxs: l_name = l_influx.get('name', _def.INFLUX_NAME) if l_influx.get('enable', _def.INFLUX_ENABLE): try: l_inqueue = asyncio.Queue(maxsize=l_influx.get( 'queue_size', _def.INFLUX_QUEUE_SIZE)) l_proc = _influx( cfg=l_influx, hostname=l_common.get('hostname', _def.COMMON_HOSTNAME), inqueue=l_inqueue, # fbqueue = self._fbqueue, loop=self._loop, scheduler=self._scheduler, nameservers=l_common.get('nameservers', _def.COMMON_NAMESERVERS)) l_task = self._loop.create_task(l_proc.run()) self._procs.add( l_name, procItem(proc=l_proc, queue=l_inqueue, task=l_task)) logger.info( f'[{_def.KEY_INFLUX}] task:{l_name} created') logger.debug( f'[{_def.KEY_INFLUX}] proc:{l_proc} task:{l_task}') l_status = True except Exception: logger.exception( f'''[{_def.KEY_INFLUX}] failed to add task:{l_name}''' ) else: logger.warning( f'''[{_def.KEY_INFLUX}]:{l_name} disabled''') return l_status
def _start_kafka(self): logger.debug('enter') if not self._run: return (False) l_status = False l_common = self._cfgh.get_cfg(section=_def.KEY_COMMON) l_kafkas = self._cfgh.get_cfg(section=_def.KEY_KAFKA_PRODUCER) if l_kafkas: for l_kafka in l_kafkas: l_name = l_kafka.get('name', _def.KAFKA_NAME) if l_kafka.get('enable', _def.KAFKA_ENABLE): try: l_inqueue = asyncio.Queue(maxsize=l_kafka.get( 'queue_size', _def.KAFKA_QUEUE_SIZE)) l_proc = _kafka(loop=self._loop, cfg=l_kafka, inqueue=l_inqueue, scheduler=self._scheduler, nameservers=l_common.get( 'nameservers', _def.COMMON_NAMESERVERS)) l_task = self._loop.create_task(l_proc.run()) self._procs.add( l_name, procItem(proc=l_proc, queue=l_inqueue, task=l_task)) logger.info( f'[{_def.KEY_KAFKA_PRODUCER}] task:{l_name} created' ) logger.debug( f'[{_def.KEY_KAFKA_PRODUCER}] proc:{l_proc} task:{l_task}' ) l_status = True except Exception: logger.exception( f'''[{_def.KEY_KAFKA_PRODUCER}] failed to add task:{l_name}''' ) else: logger.warning( f'''[{_def.KEY_KAFKA_PRODUCER}]:{l_name} disabled''') return l_status