def on_entry_event_location(self_entry, entry, eventname, eventdata, caller, published_message): params = eventdata['params'] if "presence_detect" in entry.definition: if 'regions' in params and 'presence_home_regions' in self_entry.config: if [ v for v in params['regions'] if v in self_entry.config['presence_home_regions'] ]: presence_method_detected( self_entry, entry.definition["presence_detect"], 'location_region' + (('/' + params['source']) if 'source' in params else ''), utils.read_duration( self_entry.config["presence_location_session_duration"] )) if 'latitude' in params and params[ 'latitude'] > 0 and 'longitude' in params and params[ 'longitude'] > 0 and "presence_home_location" in self_entry.config: distance = locations_distance( (params['latitude'], params['longitude']), (self_entry.config["presence_home_location"]["latitude"], self_entry.config["presence_home_location"]["longitude"])) if distance < self_entry.config["presence_home_location"][ "radius"] + (params['radius'] if 'radius' in params else 0): presence_method_detected( self_entry, entry.definition["presence_detect"], 'location' + (('/' + params['source']) if 'source' in params else ''), utils.read_duration( self_entry.config["presence_location_session_duration"] ))
def job_load(entry, job): jid = None if 'run_interval' in job or 'run_cron' in job: if 'id' in job: jid = job['id'] del job['id'] if not jid or jid in entry.data['jobs']: #jid = ((job['group'] + '.') if 'group' in job and job['group'] else ((job['entry_id'] + '.') if 'entry_id' in job and job['entry_id'] else '')) + hashlib.sha1((str(i) + ':' + str(job)).encode('UTF-8')).hexdigest()[:16] i = 0 while True: jid = ((job['group'] + '.') if 'group' in job and job['group'] else ((job['entry_id'] + '.') if 'entry_id' in job and job['entry_id'] else '')) + hashlib.sha1( (str(job)).encode('UTF-8')).hexdigest()[:16] + ( ('_' + str(i)) if i else '') if not (jid in entry.data['jobs']): break i = i + 1 if jid in entry.scheduler_oldjobs: job = {**entry.scheduler_oldjobs[jid], **job} if 'do' in job and isinstance(job['do'], str): job['do'] = [job['do']] if 'enabled' not in job: job['enabled'] = True if 'max_delay' not in job: job['max_delay'] = 60 if 'run_cron' in job else 0 job['max_delay'] = utils.read_duration(job['max_delay']) if 'timer_to' not in job: job['timer_to'] = 0 if 'last_run' not in job: job['last_run'] = 0 if 'run_interval' in job: job['run_interval'] = utils.read_duration(job['run_interval']) if job['run_interval'] <= 0: job = False if job and 'run_cron' in job and not croniter.is_valid( job['run_cron']): logging.error( '#{id}> invalid cron rule: {cron} in job: {job}'.format( id=entry.id, cron=job['run_cron'], job=job)) job = False if job: if 'next_run' not in job or ( job['max_delay'] > 0 and system.time() >= job['next_run'] + job['max_delay']): job_set_next_run(job) entry.data['jobs'][jid] = job if 'group' in job and job[ 'group'] and not job['group'] in entry.data['groups']: entry.data['groups'][job['group']] = { 'enabled': True, 'timer_to': 0 } if job[ 'group'] not in entry.scheduler_oldgroups else entry.scheduler_oldgroups[ job['group']] return jid
def entry_health_status(entry): if not entry.is_local or not hasattr(entry, 'health_entry'): return None res = {'value': 'idle', 'reason': ''} if entry.health_dead: res['value'] = 'dead' res['reason'] = entry.health_dead else: res['reason'] = entry.health_response if entry.health_required: for e in entry.health_required: if entry.health_required[e] == 'dead' or entry.health_required[e] == 'failure': res['reason'] = res['reason'] + (', ' if res['reason'] else '') + _('{entry} is in state: ' + entry.health_required[e]).format(entry = e) if entry.health_publish: for t in entry.health_publish: res['reason'] = res['reason'] + (', ' if res['reason'] else '') + _('{topic} not published as expected (last published: {last}, check: {now}, diff: {diff}, interval: {interval}, delay: {delay})').format(topic = t, now = entry.health_publish[t][0], last = entry.health_publish[t][1], diff = entry.health_publish[t][0] - entry.health_publish[t][1], interval = entry.health_publish[t][2], delay = entry.health_publish[t][3]) if res['reason']: res['value'] = 'failure' if res['value'] == 'idle' and system.time() - entry.health_time < utils.read_duration(entry.health_entry.config['health-idle-time']): res['value'] = 'alive' return res
def sniff_callback(installer_entry, mac_address): mac_address = mac_address.upper() if mac_address in installer_entry.net_sniffer_mac_addresses: entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: momentary = installer_entry.net_sniffer_mac_addresses[mac_address][ 1] connected = installer_entry.net_sniffer_mac_addresses[mac_address][ 2] last_seen = installer_entry.net_sniffer_mac_addresses[mac_address][ 3] installer_entry.net_sniffer_mac_addresses[mac_address][ 3] = system.time() if momentary: if system.time() - last_seen < utils.read_duration( installer_entry.config['momentary_flood_time']): return else: entry.publish('@/detected') elif not connected: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = True entry.publish('@/connected')
def on_subscribed_message(installer_entry, subscribed_message): payload = subscribed_message.payload matches = subscribed_message.matches mac_address = matches[1].upper().replace("-", ":") if mac_address in installer_entry.data['owrtwifi2mqtt_mac_addresses']: entry = system.entry_get( installer_entry.data['owrtwifi2mqtt_mac_addresses'][mac_address] [0]) if entry: momentary = installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][1] connected = installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][2] last_seen = installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][3] installer_entry.data['owrtwifi2mqtt_mac_addresses'][mac_address][ 3] = system.time() if matches[2] == 'lastseen/iso8601' or matches[ 2] == 'lastseen/epoch' or (matches[2] == 'event' and payload == 'new'): if momentary: if system.time() - last_seen < utils.read_duration( installer_entry.config['momentary_flood_time']): return else: entry.publish('@/detected') elif not connected: installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][2] = True entry.publish('@/connected') elif (matches[2] == 'event' and payload == 'del' ) and installer_entry.config['disconnect_on_event_del']: installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][2] = False entry.publish('@/disconnected')
def presence_method_detected(entry, name, method, session_length=0): now = system.time() session_length = utils.read_duration(session_length) someone_inside = True if entry.data['presence'] else False isnew = False if not name in entry.data['presence']: entry.data['presence'][name] = { 'firstseen': now, 'lastseen': now, 'methods': {} } isnew = True else: entry.data['presence'][name]['lastseen'] = now if not method in entry.data['presence'][name]['methods']: entry.data['presence'][name]['methods'][method] = { 'firstseen': now, 'lastseen': now, 'session_length': session_length } else: entry.data['presence'][name]['methods'][method]['lastseen'] = now if session_length > 0 and ( entry.data['presence'][name]['methods'][method] ['session_length'] == 0 or session_length < entry.data['presence'] [name]['methods'][method]['session_length']): entry.data['presence'][name]['methods'][method][ 'session_length'] = session_length if 'delete_at' in entry.data['presence'][name]['methods'][method]: del entry.data['presence'][name]['methods'][method]['delete_at'] if isnew: entry.publish( './presence/in', { 'name': name, 'before_someone_inside': someone_inside, 'method': method, 'time': now }) exports(entry) publish_status(entry) logging.debug( "{id}> {name} presence detected ({method} => {methods})".format( id=entry.id, name=name, method=method, methods=list(entry.data['presence'][name]['methods']))) else: logging.debug( "{id}> {name} presence confirmed ({method} => {methods})".format( id=entry.id, name=name, method=method, methods=list(entry.data['presence'][name]['methods']))) return isnew
def entry_init(self_entry, entry): if entry.is_local: for t in entry.definition['publish']: # TODO run_cron support if 'run_interval' in entry.definition['publish'][t] or 'check_interval' in entry.definition['publish'][t]: interval = utils.read_duration(entry.definition['publish'][t]['check_interval'] if 'check_interval' in entry.definition['publish'][t] else entry.definition['publish'][t]['run_interval']) * self_entry.config['health-check_interval-multiplier'] if not t in self_entry.health_publish_checker: self_entry.health_publish_checker[t] = {} self_entry.health_publish_checker[t][entry.id] = { 'interval': interval, 'last_published': 0 }
def entry_load(self_entry, entry): if entry.is_local: entry.health_entry = self_entry entry.health_published_status = { 'value': '', 'reason': '' } entry.health_dead = '' # Reason the entry should be considered dead entry.health_response = '' # A failure in response to subscribed topic entry.health_required = {} # The status of required entries (only for entries in status "dead" or "failure") entry.health_publish = {} # Failure in run_interval/check_interval (not published as often as expected) entry.health_time = 0 # Last access to an health_* variable entry.health_changed = 0 # Last change to an health_* variable entry.health_config_dead_disconnected_timeout = utils.read_duration(entry.config['health-dead-disconnected-timeout'] if 'health-dead-disconnected-timeout' in entry.config else (self_entry.config['health-dead-disconnected-timeout'] if 'health-dead-disconnected-timeout' in self_entry.config else 0)) entry.health_config_alive_on_message = utils.read_duration(entry.config['health-alive-on-message'] if 'health-alive-on-message' in entry.config else (self_entry.config['health-alive-on-message'] if 'health-alive-on-message' in self_entry.config else False)) entry.health_config_dead_message_timeout = utils.read_duration(entry.config['health-dead-message-timeout'] if 'health-dead-message-timeout' in entry.config else (self_entry.config['health-dead-message-timeout'] if 'health-dead-message-timeout' in self_entry.config else 0)) system.entry_definition_add_default(entry, { 'publish': { 'health': { 'description': _('Health of the entry'), 'topic': '@/health', 'type': 'object', 'retain': 1, 'notify': _("{caption} health is changed to: {payload[value]}"), 'notify_if': { 'js:payload["value"] == "failure" || payload["value"] == "dead"': { 'notify_level': 'warn', 'notify': _("{caption} health is changed to: {payload[value]} ({payload[reason]})"), 'notify_next_level': 'warn'}, }, 'events': { 'alive': "js:('value' in payload && payload['value'] in {'alive':0, 'dead':0, 'idle':0} ? { value: payload['value'] != 'dead'} : null)", 'failure': "js:('value' in payload && payload['value'] in {'alive':0, 'failure':0, 'idle':0} ? { value: payload['value'] == 'failure', reason: payload['value'] == 'failure' && 'reason' in payload ? payload['reason'] : ''} : null)", 'clock': "js:({ value: payload['time'] })" }, }, }, "events_listen": [".connected", ".alive", ".failure"] }); if system.entry_support_event(entry, 'connected'): entry.on('connected', lambda source_entry, eventname, eventdata, caller, published_message: event_connected(self_entry, source_entry, eventname, eventdata, caller, published_message), None, self_entry) if 'required' in entry.definition and entry.definition['required']: for req_entry_id in entry.definition['required']: rentry = system.entry_get(req_entry_id) if rentry: _system_loaded_add_required_listeners(self_entry, rentry, entry)
def status_check(installer_entry): config_connection_time = utils.read_duration( installer_entry.config['connection_time']) for mac_address in installer_entry.net_sniffer_mac_addresses: if installer_entry.net_sniffer_mac_addresses[mac_address][ 2] and system.time( ) - installer_entry.net_sniffer_mac_addresses[mac_address][ 3] > config_connection_time: entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = False entry.publish('@/disconnected')
def on_set(entry, subscribed_message): payload = subscribed_message.payload done = [] if "enabled" in payload: target = payload['target'] if 'target' in payload else '' timer_to = utils.read_duration( payload['timer_to']) if 'timer_to' in payload else 0 if timer_to > 0: if timer_to < 1000000000: timer_to = system.time() + timer_to else: timer_to = 0 if not target or target == '*': if entry.data['enabled'] != payload['enabled']: entry.data['enabled'] = payload['enabled'] entry.data['timer_to'] = timer_to done = ['*'] elif target.startswith('@'): for jid, job in entry.data['jobs'].items(): if 'entry_id' in job and ( job['entry_id'] == target[1:] or job['entry_id'].startswith(target[1:] + '@')): if entry.data['jobs'][jid]['enabled'] != payload['enabled']: entry.data['jobs'][jid]['enabled'] = payload['enabled'] entry.data['jobs'][jid]['timer_to'] = timer_to done.append(jid) elif target in entry.data['jobs']: if entry.data['jobs'][target]['enabled'] != payload['enabled']: entry.data['jobs'][target]['enabled'] = payload['enabled'] entry.data['jobs'][target]['timer_to'] = timer_to done = [target] elif target in entry.data['groups']: if entry.data['groups'][target]['enabled'] != payload['enabled']: entry.data['groups'][target]['enabled'] = payload['enabled'] entry.data['groups'][target]['timer_to'] = timer_to done = [target] entry.publish( '@/result', { 'enabled': payload['enabled'], 'target': ','.join(done), 'timer_to': timer_to } if done else {})
def publish(entry, topic, definition): logging.debug("Publishing topic %s" % (topic)) url = definition['url'] if 'url' in definition else entry.config['url'] content = False if url: try: page = requests.get(url, timeout=utils.read_duration( entry.config['request_timeout'])) content = page.content.decode(entry.config['charset']) except: logging.exception("Failed fetching %s" % (url)) if content and 'publish' in definition: v = decode_http_fetch_expression(entry, content, definition['publish']) if v: entry.publish('', v) else: logging.warning('HTTP fetch expression return empty value')
def arpscan_check(entry): try: output = subprocess.check_output(entry.config['arpscan_command'], shell=True).decode("utf-8").lower() d = 0 for i in entry.config["occupants"]: if 'mac_address' in i and i['mac_address'].lower() in output: presence_detected( entry, i['name'], 'arpscan', utils.read_duration( entry.config['arpscan_session_length'])) d = d + 1 return d > 0 except: logging.exception("arpscan_check exception") return False
def status_check(installer_entry): config_connection_time = utils.read_duration( installer_entry.config['connection_time']) for mac_address in installer_entry.net_sniffer_mac_addresses: if installer_entry.net_sniffer_mac_addresses[mac_address][ 2] and system.time( ) - installer_entry.net_sniffer_mac_addresses[mac_address][ 3] > config_connection_time: entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = False entry.publish('@/disconnected') logging.debug( "#{id}> {entry}: status_check, res: disconnected".format( id=installer_entry.id, entry=entry.id))
def presence_method_gone_away(entry, name, method, delete_after=0): now = system.time() delete_after = utils.read_duration(delete_after) d = 0 for pname, p in list(entry.data['presence'].items()): if pname == name: if method in entry.data['presence'][name]['methods']: if not delete_after: del entry.data['presence'][name]['methods'][method] if not entry.data['presence'][name]['methods']: del entry.data['presence'][name] entry.publish( './presence/out', { 'name': name, 'after_someone_inside': True if entry.data['presence'] else False, 'method': method, 'time': now }) logging.debug( "{id}> {name} gone away ({method})".format( id=entry.id, name=name, method=method)) d = d + 1 else: entry.data['presence'][name]['lastseen'] = now else: entry.data['presence'][name]['methods'][method][ 'delete_at'] = now + delete_after if d > 0: exports(entry) else: logging.debug( "{id}> {name} removed method ({method} => {methods})".format( id=entry.id, name=name, method=method, methods=list(entry.data['presence'][name]['methods']))) return d > 0
def run(entry): t = system.time() if 'seen' in entry.data and t - entry.created > 60: dead = [ node for node in entry.data['seen'] if node != system.default_node_name and t - entry.data['seen'] [node]['his_time'] > utils.read_duration(entry.config['dead_time']) ] if dead: for node in dead: entry.publish( './dead-node', { 'name': node, 'last_seen': entry.data['seen'][node]['his_time'], 'time': t }) system.entry_unload_node_entries(node) del entry.data['seen'][node] del entry.data['nodes'][node] #entry.data['seen'] = {node:v for node,v in entry.data['seen'].items() if t - entry.data['seen'][node]['his_time'] <= utils.read_duration(entry.config['dead_time'])} publish_metadata(entry, entry.topic('./metadata'))
def on_metadata(entry, subscribed_message): payload = subscribed_message.payload if payload and 'from_node' in payload and payload[ 'from_node'] != system.default_node_name and 'time' in payload and system.time( ) - payload['time'] < utils.read_duration( entry.config['dead_time']): entry.data['seen'][payload['from_node']] = { 'my_time': system.time(), 'his_time': payload['time'] } todo = [] for node in payload['nodes']: if node not in entry.data['nodes'] or entry.data['nodes'][node][ 'time'] < payload['nodes'][node]['time']: entry.data['nodes'][node] = payload['nodes'][node] todo.append(node) if not entry.config['local']: for node in todo: #entry.data[node] = payload['nodes'][node] payload_entries = utils.b64_decompress_data(payload['entries']) node_entries = {} for entry_id in payload_entries: if entry_id.endswith('@' + node): node_entries[entry_id] = payload_entries[entry_id] #system.entry_load_definitions(node_entries, node_name = node, unload_other_from_node = True, id_from_definition = False) system.entry_load(node_entries, node_name=node, unload_other_from_node=True, id_from_definition=False) if todo: publish_metadata(entry, entry.topic('./metadata')) logging.debug( '#{id}> Loaded new metadata by: {todo}, current entries: {entries}' .format(id=entry.id, todo=todo, entries=", ".join(system.entries().keys())))
def publish_bandwidth(entry, topic_rule, topic_definition): if not entry.config['bandwidth-enabled']: return data1 = _bandwidth_proc_net_dev_data(entry.config) if not data1: entry.publish('', {'error': 'error getting bandwidth'}) return seconds = utils.read_duration(entry.config['bandwidth_check_time']) system.sleep(seconds) data2 = _bandwidth_proc_net_dev_data(entry.config) down_bps = (data2['down'] - data1['down']) * 8 / seconds up_bps = (data2['up'] - data1['up']) * 8 / seconds entry.publish( '', { 'download_bps': round(down_bps), 'download_mbps': round(down_bps / (1024 * 1024), 1), 'upload_bps': round(up_bps), 'upload_mbps': round(up_bps / (1024 * 1024), 1), 'time': system.time(), })
def run_step(): _s = system._stats_start() now = system.time() clone_entry_names = list( system.entries().keys() ) # I make a clone of entry names, because some handler could change "entries" for entry_id in clone_entry_names: entry = system.entry_get(entry_id) if entry and entry.is_local: # Initialization / check configuration validity if 'run_interval' in entry.definition and utils.read_duration( entry.definition['run_interval']) <= 0: logging.error( '#{id}> invalid run_interval: {run_interval}'.format( id=entry_id, run_interval=entry.definition['run_interval'])) del entry.definition['run_interval'] if 'run_cron' in entry.definition and entry_implements( entry_id, 'run') and not ('cron' in entry.data and entry.data['cron'] == entry.definition['run_cron'] and 'next_run' in entry.data): if not croniter.is_valid(entry.definition['run_cron']): logging.error('#{id}> invalid cron rule: {cron}'.format( id=entry_id, cron=entry.definition['run_cron'])) del entry.definition['run_cron'] else: entry.data['cron'] = entry.definition['run_cron'] #itr = croniter(entry.data['cron'], datetime.datetime.now().astimezone()) itr = croniter( entry.data['cron'], datetime.datetime.fromtimestamp(now).astimezone()) entry.data['next_run'] = itr.get_next() if 'last_run' not in entry.data: entry.data['last_run'] = 0 if 'next_run' not in entry.data: entry.data['next_run'] = now if entry_implements(entry_id, 'run') and ('run_interval' in entry.definition or 'run_cron' in entry.definition): throttle_policy = _run_step_throttle_policy( entry, entry.definition, None) if now >= entry.data['next_run']: if throttle_policy == 'force' or throttle_policy == 'skip' or ( isinstance(throttle_policy, int) and now - entry.data['last_run'] > throttle_policy): entry.data['last_run'] = now if 'run_interval' in entry.definition: entry.data['next_run'] = now + utils.read_duration( entry.definition['run_interval']) else: #itr = croniter(entry.data['cron'], datetime.datetime.now().astimezone()) itr = croniter( entry.data['cron'], datetime.datetime.fromtimestamp( now).astimezone()) entry.data['next_run'] = itr.get_next() if throttle_policy != 'skip': entry_invoke_threaded(entry_id, 'run') else: logging.debug( "#{entry}> system overload ({load}), skipped invokation of {method}." .format(entry=entry.id, load=load_level(), method='run')) else: logging.debug( "#{entry}> system overload ({load}), postponed invokation of {method}." .format(entry=entry.id, load=load_level(), method='run')) if 'publish' in entry.definition: for topic_rule in entry.definition['publish']: # Initialization / check configuration validity if 'run_interval' in entry.definition['publish'][ topic_rule] and utils.read_duration( entry.definition['publish'][topic_rule] ['run_interval']) <= 0: logging.error( '#{id}> invalid run_interval for topic rule {topic_rule}: {run_interval}' .format(id=entry_id, topic_rule=topic_rule, run_interval=entry.definition['publish'] [topic_rule]['run_interval'])) del entry.definition['publish'][topic_rule][ 'run_interval'] if 'run_cron' in entry.definition['publish'][ topic_rule] and not ( 'cron_' + topic_rule in entry.data and entry.data['cron_' + topic_rule] == entry. definition['publish'][topic_rule]['run_cron'] and 'next_run_' + topic_rule in entry.data): if not croniter.is_valid(entry.definition['publish'] [topic_rule]['run_cron']): logging.error( '#{id}> invalid cron rule for publishing topic rule {topic_rule}: {cron}' .format(id=entry_id, topic_rule=topic_rule, cron=entry.definition['publish'] [topic_rule]['run_cron'])) del entry.definition['publish'][topic_rule][ 'run_cron'] else: entry.data['cron_' + topic_rule] = entry.definition[ 'publish'][topic_rule]['run_cron'] #itr = croniter(entry.data['cron_' + topic_rule], datetime.datetime.now().astimezone()) itr = croniter( entry.data['cron_' + topic_rule], datetime.datetime.fromtimestamp( now).astimezone()) entry.data['next_run_' + topic_rule] = itr.get_next() if 'last_run_' + topic_rule not in entry.data: entry.data['last_run_' + topic_rule] = 0 if 'next_run_' + topic_rule not in entry.data: entry.data['next_run_' + topic_rule] = now if 'run_interval' in entry.definition['publish'][ topic_rule] or 'run_cron' in entry.definition[ 'publish'][topic_rule]: throttle_policy = _run_step_throttle_policy( entry, entry.definition['publish'][topic_rule], topic_rule) if now >= entry.data['next_run_' + topic_rule]: if throttle_policy == 'force' or throttle_policy == 'skip' or ( isinstance(throttle_policy, int) and now - entry.data['last_run_' + topic_rule] > throttle_policy): entry.data['last_run_' + topic_rule] = now if 'run_interval' in entry.definition[ 'publish'][topic_rule]: entry.data[ 'next_run_' + topic_rule] = now + utils.read_duration( entry.definition['publish'] [topic_rule]['run_interval']) else: #itr = croniter(entry.data['cron_' + topic_rule], datetime.datetime.now().astimezone()) itr = croniter( entry.data['cron_' + topic_rule], datetime.datetime.fromtimestamp( now).astimezone()) entry.data['next_run_' + topic_rule] = itr.get_next() if throttle_policy != 'skip': entry_invoke_publish( entry, topic_rule, entry.definition['publish'] [topic_rule]) else: logging.debug( "#{entry}> system overload ({load}), skipped invokation of publish {method}." .format(entry=entry.id, load=load_level(), method=topic_rule)) else: logging.debug( "#{entry}> system overload ({load}), postponed invokation of publish {method}." .format(entry=entry.id, load=load_level(), method=topic_rule)) _s1 = system._stats_start() entry.store_data(False) system._stats_end('node.run.store_data', _s1) system._stats_end('node.run', _s)
def _thread_checker(installer_entry): while not threading.currentThread()._destroyed: status_check(installer_entry) system.sleep( utils.read_duration(installer_entry.config['connection_time']) / 10)
def publish(entry, topic, definition): try: inverter = solaredge_modbus.Inverter( host=entry.config['solaredge_modbus_tcp_host'], port=entry.config['solaredge_modbus_tcp_port'], timeout=utils.read_duration( entry.config['solaredge_modbus_tcp_timeout']), unit=entry.config['solaredge_modbus_tcp_unit']) inverter_data = {} values = inverter.read_all() filtered = "energy_total" not in values or "temperature" not in values or ( _float_is_zero(values["energy_total"]) and _float_is_zero(values["temperature"])) if not filtered: if "c_serialnumber" in values: inverter_data["c_serialnumber"] = values["c_serialnumber"] for k, v in values.items(): if not entry.config['solaredge_modbus_tcp_data_filter'] or ( "inverter" not in entry.config['solaredge_modbus_tcp_data_filter'] ) or not entry.config['solaredge_modbus_tcp_data_filter'][ "inverter"] or k in entry.config[ 'solaredge_modbus_tcp_data_filter']["inverter"]: if (isinstance(v, int) or isinstance(v, float)) and "_scale" not in k: k_split = k.split("_") scale = 0 if f"{k_split[len(k_split) - 1]}_scale" in values: scale = values[ f"{k_split[len(k_split) - 1]}_scale"] elif f"{k}_scale" in values: scale = values[f"{k}_scale"] inverter_data.update({k: float(v * (10**scale))}) elif "_scale" not in k: inverter_data.update({k: v}) if (inverter_data): entry.publish('./inverter', inverter_data) meter_data = {} meters = inverter.meters() for meter, params in meters.items(): meter = meter.lower() meter_data[meter] = {} values = params.read_all() filtered = "export_energy_active" not in values or "import_energy_active" not in values or "frequency" not in values or ( _float_is_zero(values["export_energy_active"]) and _float_is_zero(values["import_energy_active"]) and _float_is_zero(values["frequency"])) if not filtered: if "c_serialnumber" in values: meter_data[meter]["c_serialnumber"] = values[ "c_serialnumber"] for k, v in values.items(): if not entry.config['solaredge_modbus_tcp_data_filter'] or ( "meter" not in entry.config['solaredge_modbus_tcp_data_filter'] ) or not entry.config['solaredge_modbus_tcp_data_filter'][ "meter"] or k in entry.config[ 'solaredge_modbus_tcp_data_filter']["meter"]: if (isinstance(v, int) or isinstance(v, float)) and "_scale" not in k: k_split = k.split("_") scale = 0 if f"{k_split[len(k_split) - 1]}_scale" in values: scale = values[ f"{k_split[len(k_split) - 1]}_scale"] elif f"{k}_scale" in values: scale = values[f"{k}_scale"] meter_data[meter].update( {k: float(v * (10**scale))}) elif "_scale" not in k: meter_data[meter].update({k: v}) if meter_data[meter]: entry.publish('./meter/' + meter, meter_data[meter]) battery_data = {} batteries = inverter.batteries() for battery, params in batteries.items(): battery = battery.lower() battery_data[battery] = {} values = params.read_all() filtered = "lifetime_export_energy_counter" not in values or "lifetime_export_energy_counter" not in values or "instantaneous_voltage" not in values or ( _float_is_zero(values["lifetime_export_energy_counter"]) and _float_is_zero(values["lifetime_export_energy_counter"]) and _float_is_zero(values["instantaneous_voltage"])) if not filtered: if "c_serialnumber" in values: battery_data[battery]["c_serialnumber"] = values[ "c_serialnumber"] for k, v in values.items(): if not entry.config['solaredge_modbus_tcp_data_filter'] or ( "battery" not in entry.config['solaredge_modbus_tcp_data_filter'] ) or not entry.config['solaredge_modbus_tcp_data_filter'][ "battery"] or k in entry.config[ 'solaredge_modbus_tcp_data_filter']["battery"]: if (isinstance(v, int) or isinstance(v, float)) and "_scale" not in k: k_split = k.split("_") scale = 0 if f"{k_split[len(k_split) - 1]}_scale" in values: scale = values[ f"{k_split[len(k_split) - 1]}_scale"] elif f"{k}_scale" in values: scale = values[f"{k}_scale"] battery_data[battery].update( {k: float(v * (10**scale))}) elif "_scale" not in k: battery_data[battery].update({k: v}) if battery_data[battery]: entry.publish('./battery/' + battery, battery_data[battery]) except: logging.exception( "{id}> Exception during inverter data collection...".format( id=entry.id))
def mac_address_detected(installer_entry, env, mac_address, disconnected=False, ip_address=None): if mac_address in installer_entry.net_sniffer_mac_addresses: logging.debug( "#{id}> mac_address_detected: {mac_address}, connected: {connected}, ip_address: {ip_address}" .format(id=installer_entry.id, mac_address=mac_address, connected=not disconnected, ip_address=ip_address)) entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: momentary = installer_entry.net_sniffer_mac_addresses[mac_address][ 1] was_connected = installer_entry.net_sniffer_mac_addresses[ mac_address][2] last_seen = installer_entry.net_sniffer_mac_addresses[mac_address][ 3] installer_entry.net_sniffer_mac_addresses[mac_address][ 3] = system.time() publish = None if not disconnected and momentary: if system.time() - last_seen < utils.read_duration( installer_entry.config['momentary_flood_time']): return else: publish = '@/detected' elif not disconnected and not was_connected: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = True publish = '@/connected' elif disconnected and was_connected: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = False publish = '@/disconnected' logging.debug( "#{id}> {entry}: mac_address_detected, res: {publish}, mac: {mac_address}, connected: {connected}, ip_address: {ip_address}, momentary: {momentary}, was_connected: {was_connected}, last_seen: {last_seen}" .format(id=installer_entry.id, entry=entry.id, publish=publish, mac_address=mac_address, connected=not disconnected, ip_address=ip_address, momentary=momentary, was_connected=was_connected, last_seen=last_seen)) if publish: data = {'mac_address': mac_address} if not disconnected and not ip_address and installer_entry.config[ 'use_arp']: if 'arp_list' not in env: env['arp_list'] = _arp_list(installer_entry) if mac_address in env['arp_list']: ip_address = env['arp_list'][mac_address] if ip_address: data['ip_address'] = ip_address entry.publish(publish, data)
def notification_build(published_message): global notifications_last_topic_payloads, notifications_topic_next_level entry = published_message.entry ldef = published_message.definition topic = published_message.topic payload = published_message.payload matches = published_message.matches set_notifications_topic_next_level = False defaults = {} if 'notify_if' in ldef: for expr in ldef['notify_if']: v = scripting_js.script_eval(expr, { "topic": topic, "payload": payload, "matches": matches }, cache=True) #v = entry.script_eval(expr, payload = getPayloadItem(payload, ldef), matches = matches, caption = entry.caption) if v: for k in ldef['notify_if'][expr]: defaults[k] = ldef['notify_if'][expr][k] if 'notify_next_level' in ldef['notify_if'][expr]: set_notifications_topic_next_level = ldef['notify_if'][ expr]['notify_next_level'] for k in [ 'notify', 'notify_level', 'notify_handler', 'notify_change_level', 'notify_change_duration', 'payload' ]: if k in ldef and k not in defaults: defaults[k] = ldef[k] string = None if not string and 'notify_handler' in defaults: #handler = node.get_handler(entry, defaults['notify_handler']) #if handler: # string = handler(entry, topic, getPayloadItem(payload, defaults)) # TODO notify_handler deve essere rifatto via JS, gli deve essere passato anche matches, e chi lo usa deve essere modificato di conseguenza string = scripting_js.script_eval(defaults['notify_handler'], { "topic": topic, "payload": payload, "matches": matches }, cache=True) elif not string and 'notify' in defaults and defaults[ 'notify'] and isinstance(defaults['notify'], str): string = defaults['notify'].format( payload=getPayloadItem(payload, defaults), _=None if isinstance(payload, dict) else getPayloadItem( {'payload': payload}, defaults), matches=matches, caption=entry.caption) if string: changed = topic in notifications_last_topic_payloads and notifications_last_topic_payloads[ topic][0] != string if changed and 'notify_change_level' in defaults and ( 'notify_change_duration' not in defaults or system.time() - notifications_last_topic_payloads[topic][1] > utils.read_duration(defaults['notify_change_duration'])): defaults['notify_level'] = defaults['notify_change_level'] notifications_last_topic_payloads[topic] = [string, system.time()] else: notifications_last_topic_payloads[topic] = [ string, notifications_last_topic_payloads[topic][1] if topic in notifications_last_topic_payloads else 0 ] if topic in notifications_topic_next_level and notifications_topic_next_level[ topic]: notify_level = notifications_topic_next_level[topic] else: notify_level = defaults[ 'notify_level'] if 'notify_level' in defaults else 'info' notifications_topic_next_level[topic] = set_notifications_topic_next_level return { 'notification_slevel': notify_level, 'notification_level': notifications_levels[notify_level], 'notification_string': string if string else None }