def get_device_names():
    deviceNames = {
        'i2c':
        list(
            json2dict('i2c_service/config.json',
                      __file__)['i2c']['address'].keys()),
        'dist':
        list(
            json2dict('dist_service/config.json',
                      __file__)['dist']['pinout'].keys())
    }
    return deviceNames
Exemple #2
0
def init_bus(pipeline, deviceNames):
	'''
	function for initialize and run BUS module and sensors
	:param queue pipeline: queue class for pushing data
	:param list(string) deviceNames: list of sensor devices.
	'''

	#initialize global variables
	logging.info('Starting BUS service...')

	global _isDebug, _startLoop, _intervalMeasureTime, _debugIterAmount, _serviceName, _serviceDB
	config = helpers.json2dict('config.json', __file__)
	_intervalMeasureTime = config['bus']['intervaltime']
	_startLoop = config['bus']['loop']
	_isDebug = config['bus']['debug']
	_debugIterAmount = config['bus']['iter']
	_serviceName = config['bus']['threadname']

	if _isDebug:
		logging.warning(f'({_serviceName}) - internal debug is enable...')

	#initialize SQLite database by creating a table for each devices	
	logging.info(f'({_serviceName}) - starting Database...')

	Names = list()
	for value in deviceNames.values():
		Names.extend(value)
	_serviceDB = database.SQLITE(tableName = Names)
	deviceNames = Names
	logging.info(_serviceDB.get_table_name())

	#Start the Poller
	with concurrent.futures.ThreadPoolExecutor(max_workers=3, thread_name_prefix = _serviceName) as executor:
		futureException = {
			executor.submit(push_data, _queue): 
			f'{name}-collector' for name, _queue in zip(['i2c','bus'], [pipeline['i2c']['bus'], pipeline['dist']['bus']])
		}

		for futureErrors in concurrent.futures.as_completed(futureException):
			threadName = futureException[futureErrors]
			try:
				data = futureErrors.result()
			except Exception as exc:
				logging.error(f'({_serviceName}) - \'{threadName}\' generated an exception: {exc}')
			else:
				logging.info(f'({_serviceName}) - \'{threadName}\' finish without error and return \'{data}\'')

	if _isDebug:
		for name in deviceNames:
			data = _serviceDB.get_values(name)
			logging.info(f'({_serviceName}) - \'{threadName}\' retriving data from {name} and have {data}')
Exemple #3
0
def init_bit(pipeline):
	'''
	function for initialize and run BIT module
	:param queue pipeline: queue class for pushing data
	'''
	logging.info('Starting BIT Service...')

	global _isDebug, _startLoop, _intervalMeasureTime, _debugIterAmount, _serviceName

	#Check if GPIO library exist, if not, return
	if GPIO is None:
		return

	config = helpers.json2dict('config.json', __file__)
	_intervalMeasureTime = config['bit']['intervaltime']
	_startLoop = config['bit']['loop']
	_isDebug = config['bit']['debug']
	_debugIterAmount = config['bit']['iter']
	_serviceName = config['bit']['threadname']

	if _isDebug:
		logging.warning(f'({_serviceName}) - internal debug is enable...')

	bitControllerDict = {
		key: BitController(value, config['bit']['state'][key]) for key, value in config['bit']['pinout'].items()
	}

	with concurrent.futures.ThreadPoolExecutor(max_workers=10, thread_name_prefix = _serviceName) as executor:

		futureException = {
			executor.submit(
				bit_state_supervisor, bitController
				): key + '_bit_state_supervisor' for key, bitController in bitControllerDict.items()
		}
		futureException.update({
			executor.submit(update_state_monitor, pipeline, bitControllerDict):'update_state_monitor',
			executor.submit(notify_bit_state, pipeline, bitControllerDict):'notify_bit_state',
			executor.submit(bit_buzzer_supervisor, bitControllerDict['alarm']):'alarm_buzzer_state'
			})

		for futureErrors in concurrent.futures.as_completed(futureException):
			threadName = futureException[futureErrors]
			try:
				data = futureErrors.result()
			except Exception as exc:
				logging.error(f'({_serviceName}) - \'{threadName}\' generated an exception: {exc}')
			else:
				logging.info(f'({_serviceName}) - \'{threadName}\' finish without error and return \'{data}\'')
Exemple #4
0
def init_web(pipeline):
    '''
	This function is in charge of initialize, running and coordinate all the threads related to web server interaction.

	:param dict(dict(RotateQueue)) pipeline: communication pipeline hierarchy 
	'''

    logging.info('Starting WEB service...')

    global _isDebug, _startLoop, _intervalMeasureTime, _debugIterAmount, _serviceName

    config = helpers.json2dict('config.json', __file__)
    _serviceName = config['web']['threadname']
    _isDebug = config['web']['debug']
    _debugIterAmount = config['web']['iter']
    _intervalMeasureTime = config['web']['intervaltime']
    internalPipeline = {'websocket': Queue(), 'monitor': Queue()}

    with concurrent.futures.ThreadPoolExecutor(
            max_workers=5, thread_name_prefix=_serviceName) as executor:

        futureException = {
            executor.submit(web_server, pipeline):
            'flask_service',
            executor.submit(system_information, internalPipeline):
            'system_information',
            executor.submit(system_monitor, internalPipeline, pipeline, 'monitor'):
            'system_monitor',
            executor.submit(web_emit_system_info, internalPipeline, 'websocket'):
            'web_emit_system_info',
            executor.submit(web_emit_system_state, pipeline):
            'web_emit_system_state'
        }

        for futureErrors in concurrent.futures.as_completed(futureException):
            threadName = futureException[futureErrors]
            try:
                data = futureErrors.result()
            except Exception as exc:
                logging.error(
                    f'({_serviceName}) - \'{threadName}\' generated an exception: {exc}'
                )
            else:
                logging.info(
                    f'({_serviceName}) - \'{threadName}\' finish without error and return \'{data}\''
                )
def init_dist(pipeline):
    '''
	function for initialize and run DIST module and sensors
	:param queue pipeline: queue class for pushing data
	'''
    logging.info('Starting DIST Service...')

    global _isDebug, _startLoop, _intervalMeasureTime, _debugIterAmount, _serviceName

    config = helpers.json2dict('config.json', __file__)
    _intervalMeasureTime = config['dist']['intervaltime']
    _startLoop = config['dist']['loop']
    _isDebug = config['dist']['debug']
    _debugIterAmount = config['dist']['iter']
    _serviceName = config['dist']['threadname']

    dist_params = config['dist']['pinout']

    if _isDebug:
        logging.warning(f'({_serviceName}) - internal debug is enable...')

    with concurrent.futures.ThreadPoolExecutor(
            max_workers=3, thread_name_prefix=_serviceName) as executor:

        futureException = {
            executor.submit(hcsr04_pool, key, dist_params[key]['echo'],
                            dist_params[key]['trigger'],
                            dist_params[key]['equation'], pipeline): key
            for key in dist_params.keys()
        }

        for futureErrors in concurrent.futures.as_completed(futureException):
            threadName = futureException[futureErrors]
            try:
                data = futureErrors.result()
            except Exception as exc:
                logging.error(
                    f'({_serviceName}) - \'{threadName}\' generated an exception: {exc}'
                )  # % (threadName, exc))
            else:
                logging.info(
                    f'({_serviceName}) - \'{threadName}\' finish without error and return \'{data}\''
                )
def init_i2c(pipeline):
    '''
	This function is in charge of initialize, running and coordinate all the threads that handle all sensors that requires the I2C channel to comunicate with the system. 

	:param dict(dict(RotateQueue)) pipeline: communication pipeline hierarchy 
	'''
    logging.info('Starting I2C Service...')

    #Initialize the internal global variables
    global _isDebug, _startLoop, _intervalMeasureTime, _debugIterAmount, _serviceName
    config = helpers.json2dict('config.json', __file__)
    _intervalMeasureTime = config['i2c']['intervaltime']
    _startLoop = config['i2c']['loop']
    _isDebug = config['i2c']['debug']
    _debugIterAmount = config['i2c']['iter']
    _serviceName = config['i2c']['threadname']

    #Inform if this service is executing in debug mode.
    if _isDebug:
        logging.warning(f'({_serviceName}) - internal debug is enable...')

    #Initialize all I2C sensors parameters that are not ADC.
    temp_params = [{
        'name': name,
        'addr': addr,
        'pipeline': pipeline,
        'channel': config['i2c']['channel']
    } for name, addr in config['i2c']['address'].items() if 'adc' not in name]

    #Initialize ADC sensor parameters.
    adc_params = {
        'name': 'adc-service',
        'addr': config['i2c']['address']['adc'],
        'pipeline': pipeline,
        'adc': config['adc'],
        'channel': config['i2c']['channel']
    }

    #Start the concurrent calls for running create the thread workers
    with concurrent.futures.ThreadPoolExecutor(
            max_workers=4, thread_name_prefix=_serviceName) as executor:

        #Initialize workers and save it in the futureException handler dict
        futureException = {
            executor.submit(mlx90614_loop, data): data['name']
            for data in temp_params
        }
        futureException[executor.submit(adc1115_loop,
                                        adc_params)] = adc_params['name']

        #Monitor any worker state and notify if any changes arrive
        for futureErrors in concurrent.futures.as_completed(futureException):
            threadName = futureException[futureErrors]
            try:
                data = futureErrors.result()
            except Exception as exc:
                logging.error(
                    f'({_serviceName}) - \'{threadName}\' generated an exception: {exc}'
                )  # % (threadName, exc))
            else:
                logging.info(
                    f'({_serviceName}) - \'{threadName}\' finish without error and return \'{data}\''
                )