def publish_data(entry, topic, local_metadata=None): last_seen = {} for e in system.entries().values(): last_seen[e.id] = e.last_seen if not entry.config['compress']: entry.publish( topic, { 'from_node': system.default_node_name, 'time': system.time(), 'entries': system.entries_definition_exportable(), 'events': system.events_export(), 'last_seen': last_seen, }) else: entry.publish( topic, { 'from_node': system.default_node_name, 'time': system.time(), '+': utils.b64_compress_data( { 'entries': system.entries_definition_exportable(), 'events': system.events_export(), 'last_seen': last_seen, }) })
def on_subscribe_all_messages(entry, subscribed_message): """ Monitor every mqtt message """ message = subscribed_message.message if (message.retain): # Retained messages should be skipped return firstpm = subscribed_message.message.firstPublishedMessage() listened_events = [e for e in subscribed_message.message.events() if e['name'] == 'connected' or e['name'] == 'alive' or e['name'] == 'failure'] if firstpm and firstpm.entry.is_local and len(listened_events) == 0: if not hasattr(firstpm.entry, 'health_config_alive_on_message'): if firstpm.entry.id != entry.id: logging.error("HEALTH> entry {id} has NO health_config_alive_on_message".format(id = firstpm.entry.id)) elif firstpm.entry.health_config_alive_on_message: event_connected(entry, firstpm.entry, 'connected', { 'params': { 'value': True } }, '', None, from_generic_message = True) if not hasattr(firstpm.entry, 'health_config_dead_message_timeout'): if firstpm.entry.id != entry.id: logging.error("HEALTH> entry {id} has NO health_config_dead_message_timeout".format(id = firstpm.entry.id)) elif firstpm.entry.health_config_dead_message_timeout: entry.health_dead_checker[firstpm.entry.id] = (system.time() + firstpm.entry.health_config_dead_message_timeout, firstpm.entry.health_config_dead_message_timeout, 'silent for too long') # Look for entries subscribed to this topic. If a "response" is defined, we will wait for the response to come (or not) for sm in message.subscribedMessages(): if 'response' in sm.definition: system.subscribe_response(sm.entry, subscribed_message.message, callback = on_response_to_subscribed_message, no_response_callback = on_no_response_to_subscribed_message) # Update publish checker for pm in subscribed_message.message.publishedMessages(): if pm.topic_rule in entry.health_publish_checker and pm.entry.id in entry.health_publish_checker[pm.topic_rule]: entry.health_publish_checker[pm.topic_rule][pm.entry.id]['last_published'] = system.time() if pm.entry and pm.topic_rule in pm.entry.health_publish: del pm.entry.health_publish[pm.topic_rule] check_health_status(pm.entry) """
def on_subscribed_message(installer_entry, subscribed_message): payload = subscribed_message.payload matches = subscribed_message.matches mac_address = matches[1].upper().replace("-", ":") if mac_address in installer_entry.data['owrtwifi2mqtt_mac_addresses']: entry = system.entry_get( installer_entry.data['owrtwifi2mqtt_mac_addresses'][mac_address] [0]) if entry: momentary = installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][1] connected = installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][2] last_seen = installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][3] installer_entry.data['owrtwifi2mqtt_mac_addresses'][mac_address][ 3] = system.time() if matches[2] == 'lastseen/iso8601' or matches[ 2] == 'lastseen/epoch' or (matches[2] == 'event' and payload == 'new'): if momentary: if system.time() - last_seen < utils.read_duration( installer_entry.config['momentary_flood_time']): return else: entry.publish('@/detected') elif not connected: installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][2] = True entry.publish('@/connected') elif (matches[2] == 'event' and payload == 'del' ) and installer_entry.config['disconnect_on_event_del']: installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][2] = False entry.publish('@/disconnected')
def sniff_callback(installer_entry, mac_address): mac_address = mac_address.upper() if mac_address in installer_entry.net_sniffer_mac_addresses: entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: momentary = installer_entry.net_sniffer_mac_addresses[mac_address][ 1] connected = installer_entry.net_sniffer_mac_addresses[mac_address][ 2] last_seen = installer_entry.net_sniffer_mac_addresses[mac_address][ 3] installer_entry.net_sniffer_mac_addresses[mac_address][ 3] = system.time() if momentary: if system.time() - last_seen < utils.read_duration( installer_entry.config['momentary_flood_time']): return else: entry.publish('@/detected') elif not connected: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = True entry.publish('@/connected')
def storeData(entry, blocking=True): if not entry.data: return False if not entry.data_lock.acquire(blocking): return False try: _s = system._stats_start() cmpdata = repr(entry.data) data = None if entry.store_data_saved != cmpdata: data = utils.json_export(entry.data) _storeDataTo(entry, entry.node_name + '_data_' + entry.id_local + '.json', data) entry.store_data_saved = cmpdata if system.time() - entry.store_backup_time > STORAGE_BACKUP_TIME: if not data: data = utils.json_export(entry.data) _storeDataTo( entry, entry.node_name + '_data_' + entry.id_local + '.backup.json', data) entry.store_backup_time = system.time() return True except: logging.exception("#{id}> failed storing data".format(id=entry.id)) return False finally: entry.data_lock.release() system._stats_end('storage.store_data', _s)
def _health_checker_timer(entry): # if system load_level is high i disable health_publish_checker and health_dead_checker. When the load returns low, i'll reset health checker data (to avoid fake health problems) health_disable_load_level = 0 while not threading.currentThread()._destroyed: now = system.time() if node.load_level() > 0: health_disable_load_level = now elif health_disable_load_level > 0 and node.load_level() == 0 and now - health_disable_load_level > 60: health_disable_load_level = 0 # if a moment ago the system was too load, and now is ok, i must consider health_dead_checker and health_publish_checker data as invalid and reset them (or i'll report a lot of fake health problems) if health_disable_load_level > 0: # health_dead_checker for entry_id in entry.health_dead_checker: source_entry = system.entry_get(entry_id) if source_entry: entry.health_dead_checker[entry_id] = (system.time() + entry.health_dead_checker[entry_id][1], entry.health_dead_checker[entry_id][1], entry.health_dead_checker[entry_id][2]) # health_publish_checker for t in entry.health_publish_checker: for e in entry.health_publish_checker[t]: if entry.health_publish_checker[t][e]['last_published'] > 0: entry.health_publish_checker[t][e]['last_published'] = system.time() else: # health_dead_checker timeouts = [ entry_id for entry_id in entry.health_dead_checker if now > entry.health_dead_checker[entry_id][0] ] if timeouts: for entry_id in timeouts: source_entry = system.entry_get(entry_id) if source_entry: source_entry.health_dead = entry.health_dead_checker[entry_id][2] check_health_status(source_entry) entry.health_dead_checker = { entry_id: entry.health_dead_checker[entry_id] for entry_id in entry.health_dead_checker if entry_id not in timeouts } # health_publish_checker delay = system.broker().queueDelay() * 2 if not system.test_mode else 0 for t in entry.health_publish_checker: for e in entry.health_publish_checker[t]: if entry.health_publish_checker[t][e]['last_published'] > 0 and now - entry.health_publish_checker[t][e]['last_published'] > entry.health_publish_checker[t][e]['interval'] + delay: target_entry = system.entry_get(e) if target_entry and t not in target_entry.health_publish: target_entry.health_publish[t] = [now, entry.health_publish_checker[t][e]['last_published'], entry.health_publish_checker[t][e]['interval'], delay] check_health_status(target_entry) system.sleep(entry.config['health-checker-secs'])
def publish_external_ip(entry, topic_rule, topic_definition): entry.publish( './external-ip', { 'external-ip': requests.get( entry.config['external-ip-http-service']).text, 'time': system.time() })
def publish(entry, topic, definition): entry.data['last_time'] = system.time() try: logging.debug("#{id}> Starting speedtest...".format(id=entry.id)) spdtest = speedtest.Speedtest() if entry.config['server'] > 0: servers = [] servers.append(entry.config['server']) spdtest.get_servers(servers) spdtest.get_best_server() entry.data['last_download'] = spdtest.download() entry.data['last_upload'] = spdtest.upload() entry.data['last_ping'] = spdtest.results.ping entry.data['last_server_id'] = spdtest.results.server['id'] entry.data['last_server_name'] = spdtest.results.server[ 'name'] + ' (' + spdtest.results.server['sponsor'] + ')' entry.data['last_error'] = False logging.debug("#{id}> Speedtest done.".format(id=entry.id)) except: logging.exception() entry.data['last_download'] = -1 entry.data['last_upload'] = -1 entry.data['last_ping'] = -1 entry.data['last_server_id'] = -1 entry.data['last_server_name'] = "" entry.data['last_error'] = str(sys.exc_info()[0]) + " - " + str( sys.exc_info()[1]) on_speedtest_get(entry, topic, None, None)
def entry_health_status(entry): if not entry.is_local or not hasattr(entry, 'health_entry'): return None res = {'value': 'idle', 'reason': ''} if entry.health_dead: res['value'] = 'dead' res['reason'] = entry.health_dead else: res['reason'] = entry.health_response if entry.health_required: for e in entry.health_required: if entry.health_required[e] == 'dead' or entry.health_required[e] == 'failure': res['reason'] = res['reason'] + (', ' if res['reason'] else '') + _('{entry} is in state: ' + entry.health_required[e]).format(entry = e) if entry.health_publish: for t in entry.health_publish: res['reason'] = res['reason'] + (', ' if res['reason'] else '') + _('{topic} not published as expected (last published: {last}, check: {now}, diff: {diff}, interval: {interval}, delay: {delay})').format(topic = t, now = entry.health_publish[t][0], last = entry.health_publish[t][1], diff = entry.health_publish[t][0] - entry.health_publish[t][1], interval = entry.health_publish[t][2], delay = entry.health_publish[t][3]) if res['reason']: res['value'] = 'failure' if res['value'] == 'idle' and system.time() - entry.health_time < utils.read_duration(entry.health_entry.config['health-idle-time']): res['value'] = 'alive' return res
def presence_check_sessions(entry): now = system.time() d = 0 for name, p in list(entry.data['presence'].items()): for method, m in list(entry.data['presence'][name]['methods'].items()): if m['session_length'] > 0 and now - m['lastseen'] > m[ 'session_length']: del entry.data['presence'][name]['methods'][method] if 'delete_at' in m and now > m['delete_at']: del entry.data['presence'][name]['methods'][method] if not entry.data['presence'][name]['methods']: del entry.data['presence'][name] entry.publish( './presence/out', { 'name': name, 'after_someone_inside': True if entry.data['presence'] else False, 'method': 'CHECK', 'time': now }) logging.debug("{id}> {name} gone away (CHECK)".format(id=entry.id, name=name)) d = d + 1 if d > 0: exports(entry) return d > 0
def job_load(entry, job): jid = None if 'run_interval' in job or 'run_cron' in job: if 'id' in job: jid = job['id'] del job['id'] if not jid or jid in entry.data['jobs']: #jid = ((job['group'] + '.') if 'group' in job and job['group'] else ((job['entry_id'] + '.') if 'entry_id' in job and job['entry_id'] else '')) + hashlib.sha1((str(i) + ':' + str(job)).encode('UTF-8')).hexdigest()[:16] i = 0 while True: jid = ((job['group'] + '.') if 'group' in job and job['group'] else ((job['entry_id'] + '.') if 'entry_id' in job and job['entry_id'] else '')) + hashlib.sha1( (str(job)).encode('UTF-8')).hexdigest()[:16] + ( ('_' + str(i)) if i else '') if not (jid in entry.data['jobs']): break i = i + 1 if jid in entry.scheduler_oldjobs: job = {**entry.scheduler_oldjobs[jid], **job} if 'do' in job and isinstance(job['do'], str): job['do'] = [job['do']] if 'enabled' not in job: job['enabled'] = True if 'max_delay' not in job: job['max_delay'] = 60 if 'run_cron' in job else 0 job['max_delay'] = utils.read_duration(job['max_delay']) if 'timer_to' not in job: job['timer_to'] = 0 if 'last_run' not in job: job['last_run'] = 0 if 'run_interval' in job: job['run_interval'] = utils.read_duration(job['run_interval']) if job['run_interval'] <= 0: job = False if job and 'run_cron' in job and not croniter.is_valid( job['run_cron']): logging.error( '#{id}> invalid cron rule: {cron} in job: {job}'.format( id=entry.id, cron=job['run_cron'], job=job)) job = False if job: if 'next_run' not in job or ( job['max_delay'] > 0 and system.time() >= job['next_run'] + job['max_delay']): job_set_next_run(job) entry.data['jobs'][jid] = job if 'group' in job and job[ 'group'] and not job['group'] in entry.data['groups']: entry.data['groups'][job['group']] = { 'enabled': True, 'timer_to': 0 } if job[ 'group'] not in entry.scheduler_oldgroups else entry.scheduler_oldgroups[ job['group']] return jid
def script_context(context={}): global script_context_instance, script_context_instance_context_keys, script_context_instance_exports_keys if not script_context_instance or script_context_instance_exports_keys != list( exports.keys()): logging.debug("scripting> Inizializing new script context") script_context_instance = js2py.EvalJs({ 'now': system.time(), 'd': utils.read_duration, 't': _parse_datetime, 'strftime': _strftime, 'array_sum': utils.array_sum, 'array_avg': utils.array_avg, 'array_min': utils.array_min, 'array_max': utils.array_max, 'round': round, 'is_dict': _is_dict, 'is_array': _is_array, 'print': _print, 'str': str, 'camel_to_snake_case': _camel_to_snake_case, 'payload_transfer': _payload_transfer, '_': _translate, **exports, }) script_context_instance.__context = None script_context_instance_context_keys = None script_context_instance_exports_keys = list(exports.keys()) if isinstance(context, js2py.evaljs.EvalJs): context = context.__context if isinstance(context, js2py.base.JsObjectWrapper): context = context.to_dict() if script_context_instance_context_keys: for k in script_context_instance_context_keys: if k not in context: script_context_instance._context['var'][k] = None for k in context: script_context_instance._context['var'][k] = context[k] script_context_instance.__context = context script_context_instance_context_keys = list(context.keys()) script_context_instance._context['var']['now'] = system.time() return script_context_instance
def presence_method_detected(entry, name, method, session_length=0): now = system.time() session_length = utils.read_duration(session_length) someone_inside = True if entry.data['presence'] else False isnew = False if not name in entry.data['presence']: entry.data['presence'][name] = { 'firstseen': now, 'lastseen': now, 'methods': {} } isnew = True else: entry.data['presence'][name]['lastseen'] = now if not method in entry.data['presence'][name]['methods']: entry.data['presence'][name]['methods'][method] = { 'firstseen': now, 'lastseen': now, 'session_length': session_length } else: entry.data['presence'][name]['methods'][method]['lastseen'] = now if session_length > 0 and ( entry.data['presence'][name]['methods'][method] ['session_length'] == 0 or session_length < entry.data['presence'] [name]['methods'][method]['session_length']): entry.data['presence'][name]['methods'][method][ 'session_length'] = session_length if 'delete_at' in entry.data['presence'][name]['methods'][method]: del entry.data['presence'][name]['methods'][method]['delete_at'] if isnew: entry.publish( './presence/in', { 'name': name, 'before_someone_inside': someone_inside, 'method': method, 'time': now }) exports(entry) publish_status(entry) logging.debug( "{id}> {name} presence detected ({method} => {methods})".format( id=entry.id, name=name, method=method, methods=list(entry.data['presence'][name]['methods']))) else: logging.debug( "{id}> {name} presence confirmed ({method} => {methods})".format( id=entry.id, name=name, method=method, methods=list(entry.data['presence'][name]['methods']))) return isnew
def run_publish(entry, topic_rule, topic_definition): entry.publish( '@/status', { 'enabled': entry.data['enabled'], 'time': system.time(), 'timer_to': entry.data['timer_to'], 'groups': entry.data['groups'], 'jobs': entry.data['jobs'] })
def event_connected(self_entry, source_entry, eventname, eventdata, caller, published_message, from_generic_message = False): if eventdata['params']['value']: source_entry.health_dead = '' # A connection resets response failure source_entry.health_response = '' if source_entry.id in self_entry.health_dead_checker: del self_entry.health_dead_checker[source_entry.id] check_health_status(source_entry) else: self_entry.health_dead_checker[source_entry.id] = (system.time() + source_entry.health_config_dead_disconnected_timeout, source_entry.health_config_dead_disconnected_timeout, 'disconnected for too long')
def rx_callback(self, gpio): """RX callback for GPIO event detection. Handle basic signal detection.""" timestamp = int(time.perf_counter() * 1000000) unix_timestamp = system.time() duration = timestamp - self._rx_last_timestamp found = False if duration > 5000: if abs(duration - self._rx_timings[0]) < 200: self._rx_repeat_count += 1 self._rx_change_count -= 1 if self._rx_repeat_count == 2: for pnum in range(1, len(PROTOCOLS)): if self._rx_waveform(pnum, self._rx_change_count, timestamp): #logging.debug("#RFDevice> detected RX code " + str(self.rx_code)) found = True break self._rx_repeat_count = 0 self._rx_change_count = 0 if self._rx_change_count >= MAX_CHANGES: self._rx_change_count = 0 self._rx_repeat_count = 0 self._rx_timings[self._rx_change_count] = duration self._rx_change_count += 1 self._rx_last_timestamp = timestamp if found and not self.rx_proto in self.rx_extcallback_filterprotocols: current = { 'rx_code': self.rx_code, 'rx_code_timestamp': self.rx_code_timestamp, 'rx_pulselength': self.rx_pulselength, 'rx_proto': self.rx_proto } logging.debug("#RFDevice> detected code: " + str(current['rx_code']) + " [pulselength " + str(current['rx_pulselength']) + ", protocol " + str(current['rx_proto']) + "], time: " + ("-" if current['rx_code'] not in self.rx_extcallback_codes else datetime.datetime. fromtimestamp(self.rx_extcallback_codes[ current['rx_code']]).strftime('%H:%M:%S'))) if self.rx_extcallback and current[ 'rx_code_timestamp'] != self.rx_extcallback_timestamp: # and ((current['rx_code'] not in self.rx_extcallback_codes) or (unix_timestamp - self.rx_extcallback_codes[current['rx_code']]) >= self.rx_extcallback_filtertime) self.rx_extcallback_timestamp = current['rx_code_timestamp'] if (current['rx_code'] not in self.rx_extcallback_codes ) or (unix_timestamp - self.rx_extcallback_codes[ current['rx_code']]) >= self.rx_extcallback_filtertime: logging.debug( "#RFDevice> sending detected code to callback...") self.rx_extcallback(current) self.rx_extcallback_codes[ current['rx_code']] = unix_timestamp
def publish_health_status(entry, force = False): status = entry_health_status(entry) if status: changed = status['value'] != entry.health_published_status['value'] or status['reason'] != entry.health_published_status['reason'] if changed: entry.health_changed = entry.health_time if force or changed: entry.health_published_status = status status['changed'] = entry.health_changed status['schanged'] = utils.strftime(status['changed']) if status['changed'] > 0 else '-' #status['entry'] = entry.id status['time'] = system.time() entry.publish('health', status)
def publish_status(entry, subscribed_message=None): if hasattr(entry.request, 'skip_publish_status') and entry.request.skip_publish_status: return res = {} for name, p in list(entry.data['presence'].items()): res[name] = { 'firstseen': p['firstseen'], 'lastseen': p['lastseen'], 'methods': list(p['methods']) } entry.publish('./presence', {"occupants": res, "time": system.time()})
def on_metadata(entry, subscribed_message): payload = subscribed_message.payload if payload and 'from_node' in payload and payload[ 'from_node'] != system.default_node_name and 'time' in payload and system.time( ) - payload['time'] < utils.read_duration( entry.config['dead_time']): entry.data['seen'][payload['from_node']] = { 'my_time': system.time(), 'his_time': payload['time'] } todo = [] for node in payload['nodes']: if node not in entry.data['nodes'] or entry.data['nodes'][node][ 'time'] < payload['nodes'][node]['time']: entry.data['nodes'][node] = payload['nodes'][node] todo.append(node) if not entry.config['local']: for node in todo: #entry.data[node] = payload['nodes'][node] payload_entries = utils.b64_decompress_data(payload['entries']) node_entries = {} for entry_id in payload_entries: if entry_id.endswith('@' + node): node_entries[entry_id] = payload_entries[entry_id] #system.entry_load_definitions(node_entries, node_name = node, unload_other_from_node = True, id_from_definition = False) system.entry_load(node_entries, node_name=node, unload_other_from_node=True, id_from_definition=False) if todo: publish_metadata(entry, entry.topic('./metadata')) logging.debug( '#{id}> Loaded new metadata by: {todo}, current entries: {entries}' .format(id=entry.id, todo=todo, entries=", ".join(system.entries().keys())))
def status_check(installer_entry): config_connection_time = utils.read_duration( installer_entry.config['connection_time']) for mac_address in installer_entry.net_sniffer_mac_addresses: if installer_entry.net_sniffer_mac_addresses[mac_address][ 2] and system.time( ) - installer_entry.net_sniffer_mac_addresses[mac_address][ 3] > config_connection_time: entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = False entry.publish('@/disconnected')
def job_set_next_run(job): now = system.time() t = job['next_run'] if 'next_run' in job and job['next_run'] else now first = True # at least a cycle must be done (the loop should be a do ... while) while first or (job['max_delay'] > 0 and now >= job['next_run'] + job['max_delay']): if 'run_cron' in job: itr = croniter(job['run_cron'], datetime.datetime.fromtimestamp(t).astimezone()) job['next_run'] = itr.get_next() elif 'run_interval' in job: job['next_run'] = t + job['run_interval'] t = job['next_run'] first = False
def publish_metadata(entry, topic, local_metadata=None): entry.publish( topic, { 'from_node': system.default_node_name, 'time': system.time(), 'nodes': entry.data['nodes'], 'entries': utils.b64_compress_data(system.entries_definition_exportable()) if entry.config['compress'] else system.entries_definition_exportable(), })
def start(entry): #OBSOLETE: if 'nodes' not in entry.data: entry.data['nodes'] = { system.default_node_name: { 'description': system.config['description'] if 'description' in system.config else '', 'time': system.time(), } } #OBSOLETE: if 'seen' not in entry.data: entry.data['seen'] = {} publish_metadata(entry, entry.topic('./metadata'))
def on_set(entry, subscribed_message): payload = subscribed_message.payload done = [] if "enabled" in payload: target = payload['target'] if 'target' in payload else '' timer_to = utils.read_duration( payload['timer_to']) if 'timer_to' in payload else 0 if timer_to > 0: if timer_to < 1000000000: timer_to = system.time() + timer_to else: timer_to = 0 if not target or target == '*': if entry.data['enabled'] != payload['enabled']: entry.data['enabled'] = payload['enabled'] entry.data['timer_to'] = timer_to done = ['*'] elif target.startswith('@'): for jid, job in entry.data['jobs'].items(): if 'entry_id' in job and ( job['entry_id'] == target[1:] or job['entry_id'].startswith(target[1:] + '@')): if entry.data['jobs'][jid]['enabled'] != payload['enabled']: entry.data['jobs'][jid]['enabled'] = payload['enabled'] entry.data['jobs'][jid]['timer_to'] = timer_to done.append(jid) elif target in entry.data['jobs']: if entry.data['jobs'][target]['enabled'] != payload['enabled']: entry.data['jobs'][target]['enabled'] = payload['enabled'] entry.data['jobs'][target]['timer_to'] = timer_to done = [target] elif target in entry.data['groups']: if entry.data['groups'][target]['enabled'] != payload['enabled']: entry.data['groups'][target]['enabled'] = payload['enabled'] entry.data['groups'][target]['timer_to'] = timer_to done = [target] entry.publish( '@/result', { 'enabled': payload['enabled'], 'target': ','.join(done), 'timer_to': timer_to } if done else {})
def notification_receive(entry, topic, string, notify_level): for s in entry.notification_subscriptions: if topic_matches(topic, notify_level, s["pattern"], entry): for target_entry in node.entries_implements('notifications_send'): node.entry_invoke(target_entry, 'notifications_send', s["driver"], s["data"], s["pattern"], topic, string, notify_level) if entry.config['notify_send'] and notifications_levels[notify_level] >= notifications_levels[entry.config['notify_send_level']]: entry.publish("notify/" + notify_level + "/" + topic, string) # Salvataggio history if notify_level in notifications_levels and notifications_levels[notify_level] < notifications_levels[entry.config["history_filter"]]: return t = system.time() entry.data['history'].append({'time': t, 'level': notify_level, 'topic': topic, 'message': str(string)}) if random.randrange(100) < entry.config["history_gc_prob"]: while len(entry.data['history']) > 0 and (t - entry.data['history'][0]['time'] > entry.config["history_length"]): entry.data['history'].pop(0)
def status_check(installer_entry): config_connection_time = utils.read_duration( installer_entry.config['connection_time']) for mac_address in installer_entry.net_sniffer_mac_addresses: if installer_entry.net_sniffer_mac_addresses[mac_address][ 2] and system.time( ) - installer_entry.net_sniffer_mac_addresses[mac_address][ 3] > config_connection_time: entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = False entry.publish('@/disconnected') logging.debug( "#{id}> {entry}: status_check, res: disconnected".format( id=installer_entry.id, entry=entry.id))
def run(entry): _s = system._stats_start() now = system.time() changed = False if entry.data['timer_to'] > 0 and now >= entry.data['timer_to']: entry.data['enabled'] = not entry.data['enabled'] entry.data['timer_to'] = 0 changed = True for groupname, group in entry.data['groups'].items(): if group['timer_to'] > 0 and now > group['timer_to']: group['enabled'] = not group['enabled'] group['timer_to'] = 0 changed = True for jid, job in entry.data['jobs'].items(): if job['timer_to'] > 0 and now > job['timer_to']: job['enabled'] = not job['enabled'] job['timer_to'] = 0 changed = True if entry.data['enabled']: for jid, job in entry.data['jobs'].items(): if job['enabled'] and ( 'group' not in job or job['group'] not in entry.data['groups'] or entry.data['groups'][job['group']]['enabled']): if job['max_delay'] > 0 and now >= job['next_run'] + job[ 'max_delay']: logging.warn( '#{id}> max_delay passed, run skipped for job {job}'. format(id=entry.id, job=job)) job_set_next_run(job) if now >= job['next_run']: run_job(entry, job) job['last_run'] = now job_set_next_run(job) changed = True if changed: run_publish(entry, '', {}) system._stats_end('scheduler', _s)
def presence_method_gone_away(entry, name, method, delete_after=0): now = system.time() delete_after = utils.read_duration(delete_after) d = 0 for pname, p in list(entry.data['presence'].items()): if pname == name: if method in entry.data['presence'][name]['methods']: if not delete_after: del entry.data['presence'][name]['methods'][method] if not entry.data['presence'][name]['methods']: del entry.data['presence'][name] entry.publish( './presence/out', { 'name': name, 'after_someone_inside': True if entry.data['presence'] else False, 'method': method, 'time': now }) logging.debug( "{id}> {name} gone away ({method})".format( id=entry.id, name=name, method=method)) d = d + 1 else: entry.data['presence'][name]['lastseen'] = now else: entry.data['presence'][name]['methods'][method][ 'delete_at'] = now + delete_after if d > 0: exports(entry) else: logging.debug( "{id}> {name} removed method ({method} => {methods})".format( id=entry.id, name=name, method=method, methods=list(entry.data['presence'][name]['methods']))) return d > 0
def run(entry): #logging.debug('#{id}> clock {topic}: {value}'.format(id = entry.id, topic = topic, value = system.time())) #entry.publish('', system.time()) if entry.location: now = system.time() h = utils.hour(datetime.datetime.now()) if datetime.date.today() != datetime.date.fromtimestamp( entry.sun_updated): sun = entry.location.sun(local=True) entry.exports['ts'] = now entry.exports['ts_dawn'] = int(sun['dawn'].timestamp()) entry.exports['ts_sunrise'] = int(sun['sunrise'].timestamp()) entry.exports['ts_noon'] = int(sun['noon'].timestamp()) entry.exports['ts_sunset'] = int(sun['sunset'].timestamp()) entry.exports['ts_dusk'] = int(sun['dusk'].timestamp()) entry.exports['tsd'] = utils.read_duration entry.exports['h'] = h entry.exports['h_dawn'] = utils.hour(sun['dawn']) entry.exports['h_sunrise'] = utils.hour(sun['sunrise']) entry.exports['h_noon'] = utils.hour(sun['noon']) entry.exports['h_sunset'] = utils.hour(sun['sunset']) entry.exports['h_dusk'] = utils.hour(sun['dusk']) entry.exports['hour'] = utils.hour entry.exports['hd'] = utils.read_duration_hour entry.sun_updated = now logging.debug( '#{id}> sun info for today: dawn={dawn}, sunrise={sunrise}, noon={noon}, sunset={sunset}, dusk={dusk}' .format(id=entry.id, dawn=sun['dawn'], sunrise=sun['sunrise'], noon=sun['noon'], sunset=sun['sunset'], dusk=sun['dusk'])) entry.exports['is_day'] = h >= entry.exports[ 'h_sunrise'] and h < entry.exports['h_sunset'] entry.exports['is_night'] = not entry.exports['is_day']
def scripting_globals(entry, _globals): return { **_globals, **entry.exports, **entry.methods, # LIBS 'utils': utils, 'logging': logging, 'test': test, # ENTITIES 'entry': entry, 'config': entry.config, 'broker': system.broker(), 'now': system.time(), 'timems': system.timems, #'hour': int(time.strftime("%H")), # METHODS 'd': utils.read_duration, # (duration) 'array_sum': utils.array_sum, 'array_avg': utils.array_avg, 'array_min': utils.array_min, 'array_max': utils.array_max, 'publish': entry.publish, # (topic, payload = None, qos = None, retain = None, response_callback = None, no_response_callback = None, response_id = None) 'entry_invoke': node.entry_invoke, # (entry, method, *args, **kwargs) #'entries_invoke': node.entries_invoke, # (method, *args, **kwargs) 'run_publish': entry.run_publish, # (topic_rule) 'do': system.do_action, # (actionref, params, if_event_not_match = False, if_event_not_match_keys = False, if_event_not_match_timeout = None) 'entry_do': system.entry_do_action, # (entry_or_id, action, params = {}, init = None, if_event_not_match = False, if_event_not_match_keys = False, if_event_not_match_timeout = None) 'self_do': entry.do, # (action, params = {}, init = None, if_event_not_match = False, if_event_not_match_keys = False, if_event_not_match_timeout = None) 'event_get': system.event_get, # (eventref, timeout = None, keys = None, topic = None) 'event_get_time': system.event_get_time, # (eventref, timeout = None, topic = None) 'entry_event_get': system.entry_event_get, # (entry_or_id, eventname, condition = None, keys = None, timeout = None, topic = None) 'call_method_delayed': call_method_delayed_lambda(entry), # (methodname, delay, *args, **kwargs) 'cancel_call_method_delayed': cancel_call_method_delayed_lambda(entry), # () '__builtins__': { 'locals': locals, 're': re, 'logging': logging, 'GeneratorExit': GeneratorExit, 'abs': abs,'dict': dict,'help': help,'min': min,'setattr': setattr,'all': all,'dir': dir,'hex': hex,'next': next,'slice': slice,'any': any,'divmod': divmod,'id': id,'object': object,'sorted': sorted,'ascii': ascii,'enumerate': enumerate,'input': input,'oct': oct,'bin': bin,'int': int,'str': str,'bool': bool,'isinstance': isinstance,'ord': ord,'sum': sum,'bytearray': bytearray,'filter': filter, 'pow': pow,'super': super,'bytes': bytes,'float': float,'iter': iter,'print': print,'tuple': tuple,'format': format,'len': len,'property': property,'type': type,'chr': chr,'list': list,'range': range,'getattr': getattr,'zip': zip,'map': map,'reversed': reversed,'complex': complex,'hasattr': hasattr,'max': max,'round': round,'delattr': delattr,'hash': hash,'set': set } }