def taskSampling(): try: if debt <= 0: return if not is_rpi(): red_on() usr0_on() d = sampling_core() if d is None or len(d) <= 0: print('sampling_core() returns nothing') return print('= = = = = = = = = =') pretty_print(d) #tmp = {c.get('comtag',c['dbtag']):d[c['dbtag']] for c in config.conf} #outqueue.append(tmp) outqueue.append(d) # This turns the "local" sample (a dict) into a message that base # stations expect. This way the base station's log2mysql.py can # be reused. # In the future these bbb nodes should ALL double as base stations, # listening and parsing all messages in the air. m = send(None, d, src=SENDER_ID) #socket.send(m) if RABBITMQ_ENABLED: global channel, connection if connection is None or channel is None: connection, channel = init_rabbit(nodeid, cred['rabbitmq']) channel.basic_publish(exchange=exchange, routing_key=routing_key, body=m, properties=pika.BasicProperties( delivery_mode=2, content_type='text/plain', expiration=str(72 * 3600 * 1000))) if not is_rpi(): red_off() usr0_off() payback() if debt > 0: logger.debug('debt={}'.format(debt)) #reactor.callLater(2*random(), taskSampling) else: logger.debug('all debts are paid') except pika.exceptions.ConnectionClosed: connection, channel = None, None logging.error('connection closed' ) # connection to the local exchange closed? wut? except: logger.exception('')
def taskHeartbeat(): try: global channel, connection if connection is None or channel is None: connection, channel = rabbit_init(nodeid, cred['rabbitmq']) uptime_second = float(open('/proc/uptime').readline().split()[0]) usage = shutil.disk_usage('/') d = { 'ts': time.time(), 'uptime_second': uptime_second, 'usedMB': int(usage.used / 1e6), 'freeMB': int(usage.free / 1e6), 'IPs': getIP(), } try: from node.drivers.watchdog import Watchdog w = Watchdog(bus=1) d['VbattV'] = w.read_vbatt() except: # some devices don't have WDT pass m = send(None, d).strip() logging.debug(m) channel.basic_publish(exchange=exchange, routing_key=routing_key, body=m, properties=pika.BasicProperties( delivery_mode=1, content_type='text/plain', expiration=str(5 * 60 * 1000))) except pika.exceptions.ConnectionClosed: connection, channel = None, None logging.error( 'connection closed') # connection to the local exchange closed
formatter = logging.Formatter( '%(name)s,%(levelname)s,%(module)s.%(funcName)s,%(message)s') handler.setFormatter(formatter) logger.addHandler(handler) from sampling_core import sampling_core # hardware ports if XBEE_PORT is None: logger.warning('XBEE_PORT not defined, no XBee telemetry!') xbeesend = lambda m: None ser = None else: assert exists(XBEE_PORT) ser = serial.Serial(XBEE_PORT, XBEE_BAUD, timeout=1) xbeesend = lambda m: send(ser, m) xbeesend({ 'status': 'online', 'INTERVAL': INTERVAL, 'NGROUP': NGROUP, 'Timestamp': time.time() }) if XBEE_LOG_DIR is None: XBEE_LOG_FILE = '/dev/null' else: XBEE_LOG_FILE = join(XBEE_LOG_DIR, 'tsraw.txt') # logging all incoming XBee traffic... just because. rawf = open(XBEE_LOG_FILE, 'a+', 1)
async def taskSampling(): global should_continue, debt global connection, channel while should_continue: assert debt >= 0 try: if debt > 0: logger.debug(f"owe {debt} sample(s)") if not is_rpi(): red_on() usr0_on() d = sampling_core() if d is not None and len(d) > 0: logger.debug(d) print('= = = = = = = = = =') pretty_print(d) outqueue.append(d) m = send(None, d, src=SENDER_ID) if RABBITMQ_ENABLED: if connection is None or channel is None: connection, channel = init_rabbit( nodeid, cred['rabbitmq']) channel.basic_publish(exchange=exchange, routing_key=routing_key, body=m, properties=pika.BasicProperties( delivery_mode=2, content_type='text/plain', expiration=str(7 * 24 * 3600 * 1000))) if not is_rpi(): red_off() usr0_off() payback() else: logger.info('sampling_core() returns nothing') else: logger.debug("Doesn't owe a sample.") except pika.exceptions.ConnectionClosed: connection, channel = None, None logger.error('connection closed' ) # connection to the local exchange closed? wut? except KeyboardInterrupt: raise except: logger.exception('') #raise if debt > 0: await asyncio.sleep(INTERVAL / NGROUP / 10) else: await asyncio.sleep(INTERVAL / NGROUP / 2)