def _health_checker_timer(entry): # if system load_level is high i disable health_publish_checker and health_dead_checker. When the load returns low, i'll reset health checker data (to avoid fake health problems) health_disable_load_level = 0 while not threading.currentThread()._destroyed: now = system.time() if node.load_level() > 0: health_disable_load_level = now elif health_disable_load_level > 0 and node.load_level() == 0 and now - health_disable_load_level > 60: health_disable_load_level = 0 # if a moment ago the system was too load, and now is ok, i must consider health_dead_checker and health_publish_checker data as invalid and reset them (or i'll report a lot of fake health problems) if health_disable_load_level > 0: # health_dead_checker for entry_id in entry.health_dead_checker: source_entry = system.entry_get(entry_id) if source_entry: entry.health_dead_checker[entry_id] = (system.time() + entry.health_dead_checker[entry_id][1], entry.health_dead_checker[entry_id][1], entry.health_dead_checker[entry_id][2]) # health_publish_checker for t in entry.health_publish_checker: for e in entry.health_publish_checker[t]: if entry.health_publish_checker[t][e]['last_published'] > 0: entry.health_publish_checker[t][e]['last_published'] = system.time() else: # health_dead_checker timeouts = [ entry_id for entry_id in entry.health_dead_checker if now > entry.health_dead_checker[entry_id][0] ] if timeouts: for entry_id in timeouts: source_entry = system.entry_get(entry_id) if source_entry: source_entry.health_dead = entry.health_dead_checker[entry_id][2] check_health_status(source_entry) entry.health_dead_checker = { entry_id: entry.health_dead_checker[entry_id] for entry_id in entry.health_dead_checker if entry_id not in timeouts } # health_publish_checker delay = system.broker().queueDelay() * 2 if not system.test_mode else 0 for t in entry.health_publish_checker: for e in entry.health_publish_checker[t]: if entry.health_publish_checker[t][e]['last_published'] > 0 and now - entry.health_publish_checker[t][e]['last_published'] > entry.health_publish_checker[t][e]['interval'] + delay: target_entry = system.entry_get(e) if target_entry and t not in target_entry.health_publish: target_entry.health_publish[t] = [now, entry.health_publish_checker[t][e]['last_published'], entry.health_publish_checker[t][e]['interval'], delay] check_health_status(target_entry) system.sleep(entry.config['health-checker-secs'])
def sniff_callback(installer_entry, mac_address): mac_address = mac_address.upper() if mac_address in installer_entry.net_sniffer_mac_addresses: entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: momentary = installer_entry.net_sniffer_mac_addresses[mac_address][ 1] connected = installer_entry.net_sniffer_mac_addresses[mac_address][ 2] last_seen = installer_entry.net_sniffer_mac_addresses[mac_address][ 3] installer_entry.net_sniffer_mac_addresses[mac_address][ 3] = system.time() if momentary: if system.time() - last_seen < utils.read_duration( installer_entry.config['momentary_flood_time']): return else: entry.publish('@/detected') elif not connected: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = True entry.publish('@/connected')
def entry_invoke_delayed(entry, timer_key, delay, method, *args, _pass_entry=True, **kwargs): if isinstance(entry, str): entry_str = entry entry = system.entry_get(entry_str) if not entry: logging.error( "#NODE_SYSTEM> skipped invocation of {method} (delayed): entry {entry} not found!" .format(entry=entry_str, method=method)) return False func = get_handler(entry, method) if func: cancel_entry_invoke_delayed(entry, timer_key) if _pass_entry: args = [entry] + list(args) #else: # args = list(args) #entry.timers[timer_key] = threading.Timer(delay, func, args = args, kwargs = kwargs) #entry.timers[timer_key].start() args = [func, method, entry.id] + list(args) entry.timers[timer_key] = threading.Timer(delay, entry_invoke_delayed_wrapper, args=args, kwargs=kwargs) entry.timers[timer_key].start()
def entry_invoke_handler_threaded(entry, handler_param, handler_name, *args, **kwargs): if isinstance(entry, str): entry = system.entry_get(entry) handler = get_handler(entry, handler_param) if entry else None if not handler: logging.error("#{entry}> handler not found: {handler}".format( entry=entry.id, handler=handler_param)) thread_key = entry.id + '.' + handler_name if '_thread_key' in kwargs: thread_key = thread_key + '.' + kwargs['_thread_key'] del kwargs['_thread_key'] if '_thread_multiple' in kwargs: thread_key = thread_key + '.' + str(random.randrange(1000000)) del kwargs['_thread_multiple'] if thread_key not in threads or threads[thread_key] is None or not threads[ thread_key].is_alive(): logging.debug( "#{entry}> invoking handler {method} (threaded) ...".format( entry=entry.id, method=handler_name)) args = [handler, handler_name, entry.id, entry] + list(args) # https://docs.python.org/3/library/threading.html threads[thread_key] = threading.Thread( target=entry_invoke_threaded_wrapper, args=args, kwargs=kwargs, daemon=True ) # daemon = True allows the main application to exit even though the thread is running. It will also (therefore) make it possible to use ctrl+c to terminate the application threads[thread_key].start() else: logging.warn( "#{entry}> skipped invocation of {method}: already running!". format(entry=entry.id, method=handler_name))
def on_subscribed_message(installer_entry, subscribed_message): payload = subscribed_message.payload matches = subscribed_message.matches mac_address = matches[1].upper().replace("-", ":") if mac_address in installer_entry.data['owrtwifi2mqtt_mac_addresses']: entry = system.entry_get( installer_entry.data['owrtwifi2mqtt_mac_addresses'][mac_address] [0]) if entry: momentary = installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][1] connected = installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][2] last_seen = installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][3] installer_entry.data['owrtwifi2mqtt_mac_addresses'][mac_address][ 3] = system.time() if matches[2] == 'lastseen/iso8601' or matches[ 2] == 'lastseen/epoch' or (matches[2] == 'event' and payload == 'new'): if momentary: if system.time() - last_seen < utils.read_duration( installer_entry.config['momentary_flood_time']): return else: entry.publish('@/detected') elif not connected: installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][2] = True entry.publish('@/connected') elif (matches[2] == 'event' and payload == 'del' ) and installer_entry.config['disconnect_on_event_del']: installer_entry.data['owrtwifi2mqtt_mac_addresses'][ mac_address][2] = False entry.publish('@/disconnected')
def publish_all_entries_status(entry, topic_rule, topic_definition): status = {} for entry_id in system.entries(): oentry = system.entry_get(entry_id) if oentry.is_local and entry_id != entry.id: status[entry_id] = entry_health_status(oentry) if status[entry_id]: status[entry_id]['changed'] = oentry.health_changed status[entry_id]['schanged'] = utils.strftime(status[entry_id]['changed']) if status[entry_id]['changed'] > 0 else '-' entry.publish('', status)
def status_check(installer_entry): config_connection_time = utils.read_duration( installer_entry.config['connection_time']) for mac_address in installer_entry.net_sniffer_mac_addresses: if installer_entry.net_sniffer_mac_addresses[mac_address][ 2] and system.time( ) - installer_entry.net_sniffer_mac_addresses[mac_address][ 3] > config_connection_time: entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = False entry.publish('@/disconnected')
def destroy(): global threads, _system_initialized for t in threads: if threads[t] and threads[t].is_alive(): threads[t].join() clone_entry_names = list( system.entries().keys() ) # I make a clone of entry names, because some handler could change "entries" for entry_id in clone_entry_names: entry = system.entry_get(entry_id) if entry and entry.is_local: entry.store_data() storage.destroy() system.destroy() _system_initialized = False
def status_check(installer_entry): config_connection_time = utils.read_duration( installer_entry.config['connection_time']) for mac_address in installer_entry.net_sniffer_mac_addresses: if installer_entry.net_sniffer_mac_addresses[mac_address][ 2] and system.time( ) - installer_entry.net_sniffer_mac_addresses[mac_address][ 3] > config_connection_time: entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = False entry.publish('@/disconnected') logging.debug( "#{id}> {entry}: status_check, res: disconnected".format( id=installer_entry.id, entry=entry.id))
def rf_rx_callback(self_entry, rfdevice): for rf_code in self_entry.rf_codes: if str(rfdevice['rx_code']) == str(rf_code): entry_id, port = self_entry.rf_codes[rf_code] logging.debug( "#{id}> found matching code: {rx_code} for {entry_id}/{port} [pulselength {rx_pulselength}, protocol {rx_proto}]" .format(id=self_entry.id, entry_id=entry_id, port=port if port != '' else '-', rx_code=str(rfdevice['rx_code']), rx_pulselength=str(rfdevice['rx_pulselength']), rx_proto=str(rfdevice['rx_proto']))) entry = system.entry_get(entry_id) if entry: entry.publish('@/detected', port) else: logging.error( '#{id}> entry {entry_id} not found for rf_code {rf_code}'. format(id=id, entry_id=entry_id, rf_code=rf_code))
def entry_load(self_entry, entry): if entry.is_local: entry.health_entry = self_entry entry.health_published_status = { 'value': '', 'reason': '' } entry.health_dead = '' # Reason the entry should be considered dead entry.health_response = '' # A failure in response to subscribed topic entry.health_required = {} # The status of required entries (only for entries in status "dead" or "failure") entry.health_publish = {} # Failure in run_interval/check_interval (not published as often as expected) entry.health_time = 0 # Last access to an health_* variable entry.health_changed = 0 # Last change to an health_* variable entry.health_config_dead_disconnected_timeout = utils.read_duration(entry.config['health-dead-disconnected-timeout'] if 'health-dead-disconnected-timeout' in entry.config else (self_entry.config['health-dead-disconnected-timeout'] if 'health-dead-disconnected-timeout' in self_entry.config else 0)) entry.health_config_alive_on_message = utils.read_duration(entry.config['health-alive-on-message'] if 'health-alive-on-message' in entry.config else (self_entry.config['health-alive-on-message'] if 'health-alive-on-message' in self_entry.config else False)) entry.health_config_dead_message_timeout = utils.read_duration(entry.config['health-dead-message-timeout'] if 'health-dead-message-timeout' in entry.config else (self_entry.config['health-dead-message-timeout'] if 'health-dead-message-timeout' in self_entry.config else 0)) system.entry_definition_add_default(entry, { 'publish': { 'health': { 'description': _('Health of the entry'), 'topic': '@/health', 'type': 'object', 'retain': 1, 'notify': _("{caption} health is changed to: {payload[value]}"), 'notify_if': { 'js:payload["value"] == "failure" || payload["value"] == "dead"': { 'notify_level': 'warn', 'notify': _("{caption} health is changed to: {payload[value]} ({payload[reason]})"), 'notify_next_level': 'warn'}, }, 'events': { 'alive': "js:('value' in payload && payload['value'] in {'alive':0, 'dead':0, 'idle':0} ? { value: payload['value'] != 'dead'} : null)", 'failure': "js:('value' in payload && payload['value'] in {'alive':0, 'failure':0, 'idle':0} ? { value: payload['value'] == 'failure', reason: payload['value'] == 'failure' && 'reason' in payload ? payload['reason'] : ''} : null)", 'clock': "js:({ value: payload['time'] })" }, }, }, "events_listen": [".connected", ".alive", ".failure"] }); if system.entry_support_event(entry, 'connected'): entry.on('connected', lambda source_entry, eventname, eventdata, caller, published_message: event_connected(self_entry, source_entry, eventname, eventdata, caller, published_message), None, self_entry) if 'required' in entry.definition and entry.definition['required']: for req_entry_id in entry.definition['required']: rentry = system.entry_get(req_entry_id) if rentry: _system_loaded_add_required_listeners(self_entry, rentry, entry)
def entry_invoke(entry, method, *args, **kwargs): if isinstance(entry, str): entry_str = entry entry = system.entry_get(entry_str) if not entry: logging.error( "#NODE_SYSTEM> skipped invocation of {method}: entry {entry} not found!" .format(entry=entry_str, method=method)) return False func = get_handler(entry, method) if func: logging.debug("#{entry}> invoking {method} ...".format(entry=entry.id, method=method)) ret = None try: ret = func(entry, *args, **kwargs) except: logging.exception( "#{id}> exception in entry_invoke of method {method}".format( id=entry.id, method=method)) return ret
def entry_invoke_threaded(entry, method, *args, **kwargs): global threads if isinstance(entry, str): entry_str = entry entry = system.entry_get(entry_str) if not entry: logging.error( "#NODE_SYSTEM> skipped invocation of {method}: entry {entry} not found!" .format(entry=entry_str, method=method)) return False func = get_handler(entry, method) if func: thread_key = entry.id + '.' + method if '_thread_key' in kwargs: thread_key = thread_key + '.' + kwargs['_thread_key'] del kwargs['_thread_key'] if '_thread_multiple' in kwargs: thread_key = thread_key + '.' + str(random.randrange(1000000)) del kwargs['_thread_multiple'] if thread_key not in threads or threads[ thread_key] is None or not threads[thread_key].is_alive(): logging.debug("#{entry}> invoking {method} (threaded) ...".format( entry=entry.id, method=method)) args = [func, method, entry.id, entry] + list(args) # https://docs.python.org/3/library/threading.html threads[thread_key] = threading.Thread( target=entry_invoke_threaded_wrapper, args=args, kwargs=kwargs, daemon=True ) # daemon = True allows the main application to exit even though the thread is running. It will also (therefore) make it possible to use ctrl+c to terminate the application threads[thread_key].start() else: logging.warn( "#{entry}> skipped invocation of {method}: already running!". format(entry=entry.id, method=method))
def run_step(): _s = system._stats_start() now = system.time() clone_entry_names = list( system.entries().keys() ) # I make a clone of entry names, because some handler could change "entries" for entry_id in clone_entry_names: entry = system.entry_get(entry_id) if entry and entry.is_local: # Initialization / check configuration validity if 'run_interval' in entry.definition and utils.read_duration( entry.definition['run_interval']) <= 0: logging.error( '#{id}> invalid run_interval: {run_interval}'.format( id=entry_id, run_interval=entry.definition['run_interval'])) del entry.definition['run_interval'] if 'run_cron' in entry.definition and entry_implements( entry_id, 'run') and not ('cron' in entry.data and entry.data['cron'] == entry.definition['run_cron'] and 'next_run' in entry.data): if not croniter.is_valid(entry.definition['run_cron']): logging.error('#{id}> invalid cron rule: {cron}'.format( id=entry_id, cron=entry.definition['run_cron'])) del entry.definition['run_cron'] else: entry.data['cron'] = entry.definition['run_cron'] #itr = croniter(entry.data['cron'], datetime.datetime.now().astimezone()) itr = croniter( entry.data['cron'], datetime.datetime.fromtimestamp(now).astimezone()) entry.data['next_run'] = itr.get_next() if 'last_run' not in entry.data: entry.data['last_run'] = 0 if 'next_run' not in entry.data: entry.data['next_run'] = now if entry_implements(entry_id, 'run') and ('run_interval' in entry.definition or 'run_cron' in entry.definition): throttle_policy = _run_step_throttle_policy( entry, entry.definition, None) if now >= entry.data['next_run']: if throttle_policy == 'force' or throttle_policy == 'skip' or ( isinstance(throttle_policy, int) and now - entry.data['last_run'] > throttle_policy): entry.data['last_run'] = now if 'run_interval' in entry.definition: entry.data['next_run'] = now + utils.read_duration( entry.definition['run_interval']) else: #itr = croniter(entry.data['cron'], datetime.datetime.now().astimezone()) itr = croniter( entry.data['cron'], datetime.datetime.fromtimestamp( now).astimezone()) entry.data['next_run'] = itr.get_next() if throttle_policy != 'skip': entry_invoke_threaded(entry_id, 'run') else: logging.debug( "#{entry}> system overload ({load}), skipped invokation of {method}." .format(entry=entry.id, load=load_level(), method='run')) else: logging.debug( "#{entry}> system overload ({load}), postponed invokation of {method}." .format(entry=entry.id, load=load_level(), method='run')) if 'publish' in entry.definition: for topic_rule in entry.definition['publish']: # Initialization / check configuration validity if 'run_interval' in entry.definition['publish'][ topic_rule] and utils.read_duration( entry.definition['publish'][topic_rule] ['run_interval']) <= 0: logging.error( '#{id}> invalid run_interval for topic rule {topic_rule}: {run_interval}' .format(id=entry_id, topic_rule=topic_rule, run_interval=entry.definition['publish'] [topic_rule]['run_interval'])) del entry.definition['publish'][topic_rule][ 'run_interval'] if 'run_cron' in entry.definition['publish'][ topic_rule] and not ( 'cron_' + topic_rule in entry.data and entry.data['cron_' + topic_rule] == entry. definition['publish'][topic_rule]['run_cron'] and 'next_run_' + topic_rule in entry.data): if not croniter.is_valid(entry.definition['publish'] [topic_rule]['run_cron']): logging.error( '#{id}> invalid cron rule for publishing topic rule {topic_rule}: {cron}' .format(id=entry_id, topic_rule=topic_rule, cron=entry.definition['publish'] [topic_rule]['run_cron'])) del entry.definition['publish'][topic_rule][ 'run_cron'] else: entry.data['cron_' + topic_rule] = entry.definition[ 'publish'][topic_rule]['run_cron'] #itr = croniter(entry.data['cron_' + topic_rule], datetime.datetime.now().astimezone()) itr = croniter( entry.data['cron_' + topic_rule], datetime.datetime.fromtimestamp( now).astimezone()) entry.data['next_run_' + topic_rule] = itr.get_next() if 'last_run_' + topic_rule not in entry.data: entry.data['last_run_' + topic_rule] = 0 if 'next_run_' + topic_rule not in entry.data: entry.data['next_run_' + topic_rule] = now if 'run_interval' in entry.definition['publish'][ topic_rule] or 'run_cron' in entry.definition[ 'publish'][topic_rule]: throttle_policy = _run_step_throttle_policy( entry, entry.definition['publish'][topic_rule], topic_rule) if now >= entry.data['next_run_' + topic_rule]: if throttle_policy == 'force' or throttle_policy == 'skip' or ( isinstance(throttle_policy, int) and now - entry.data['last_run_' + topic_rule] > throttle_policy): entry.data['last_run_' + topic_rule] = now if 'run_interval' in entry.definition[ 'publish'][topic_rule]: entry.data[ 'next_run_' + topic_rule] = now + utils.read_duration( entry.definition['publish'] [topic_rule]['run_interval']) else: #itr = croniter(entry.data['cron_' + topic_rule], datetime.datetime.now().astimezone()) itr = croniter( entry.data['cron_' + topic_rule], datetime.datetime.fromtimestamp( now).astimezone()) entry.data['next_run_' + topic_rule] = itr.get_next() if throttle_policy != 'skip': entry_invoke_publish( entry, topic_rule, entry.definition['publish'] [topic_rule]) else: logging.debug( "#{entry}> system overload ({load}), skipped invokation of publish {method}." .format(entry=entry.id, load=load_level(), method=topic_rule)) else: logging.debug( "#{entry}> system overload ({load}), postponed invokation of publish {method}." .format(entry=entry.id, load=load_level(), method=topic_rule)) _s1 = system._stats_start() entry.store_data(False) system._stats_end('node.run.store_data', _s1) system._stats_end('node.run', _s)
def mac_address_detected(installer_entry, env, mac_address, disconnected=False, ip_address=None): if mac_address in installer_entry.net_sniffer_mac_addresses: logging.debug( "#{id}> mac_address_detected: {mac_address}, connected: {connected}, ip_address: {ip_address}" .format(id=installer_entry.id, mac_address=mac_address, connected=not disconnected, ip_address=ip_address)) entry = system.entry_get( installer_entry.net_sniffer_mac_addresses[mac_address][0]) if entry: momentary = installer_entry.net_sniffer_mac_addresses[mac_address][ 1] was_connected = installer_entry.net_sniffer_mac_addresses[ mac_address][2] last_seen = installer_entry.net_sniffer_mac_addresses[mac_address][ 3] installer_entry.net_sniffer_mac_addresses[mac_address][ 3] = system.time() publish = None if not disconnected and momentary: if system.time() - last_seen < utils.read_duration( installer_entry.config['momentary_flood_time']): return else: publish = '@/detected' elif not disconnected and not was_connected: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = True publish = '@/connected' elif disconnected and was_connected: installer_entry.net_sniffer_mac_addresses[mac_address][ 2] = False publish = '@/disconnected' logging.debug( "#{id}> {entry}: mac_address_detected, res: {publish}, mac: {mac_address}, connected: {connected}, ip_address: {ip_address}, momentary: {momentary}, was_connected: {was_connected}, last_seen: {last_seen}" .format(id=installer_entry.id, entry=entry.id, publish=publish, mac_address=mac_address, connected=not disconnected, ip_address=ip_address, momentary=momentary, was_connected=was_connected, last_seen=last_seen)) if publish: data = {'mac_address': mac_address} if not disconnected and not ip_address and installer_entry.config[ 'use_arp']: if 'arp_list' not in env: env['arp_list'] = _arp_list(installer_entry) if mac_address in env['arp_list']: ip_address = env['arp_list'][mac_address] if ip_address: data['ip_address'] = ip_address entry.publish(publish, data)
def assertAction(name, entryname, action, params, init=None, if_event_not_match=False, if_event_not_match_keys=False, if_event_not_match_timeout=None, assertSubscribe=None, assertSubscribeSomePayload=None, assertEventsTopic=None, assertEventsData=False, assertEvents=None, assertSomeEvents=None, assertNotification=None, assertExports=None, assertChild=None, assertNotChild=None, timeoutms=2000, wait=True): """ Executes an action and check results (published topics) """ global assertsDone entry = system.entry_get(entryname) if not entry: assertsDone[unitname] = { 'unit': current_unit_name, 'name': name, 'count': 0, 'data': { 'entry_reference': [ False, 'entry "' + entryname + '" referenced in assertAction ' + unitname + 'not found' ] } } else: assertx(name, assertSubscribe=assertSubscribe, assertSubscribeSomePayload=assertSubscribeSomePayload, assertEventsTopic=assertEventsTopic, assertEventsData=assertEventsData, assertEvents=assertEvents, assertSomeEvents=assertSomeEvents, assertNotification=assertNotification, assertExports=assertExports, assertChild=assertChild, assertNotChild=assertNotChild, timeoutms=timeoutms, wait=False) entry.do(action, params, init=init, if_event_not_match=if_event_not_match, if_event_not_match_keys=if_event_not_match_keys, if_event_not_match_timeout=if_event_not_match_timeout) if wait: waitRunning()
def test_run(entries): if test_history: entries['history@TEST'].module.run(entries['history@TEST']) if (True): # Test "events", "on" declarations and "payload_transform" test.assertPublish( 's1', 'entry_b/pub1', 'test', assertChild=['entry_a_on_test_event1'], assertNotChild=[ 'entry_a_on_test_event2', 'entry_a_on_test_event_implicit' ], assertEvents={'test_event': { 'port': 'test1' }}, assertNotification=['info', 'Entry B published pub1=test']) test.assertPublish( 's2', 'entry_b/pub1', '', assertChild=['entry_a_on_test_event1', 'entry_a_on_test_event2'], assertEvents={'test_event': { 'port': '1' }}, assertNotification=['info', 'Entry B published pub1=']) test.assertPublish( 's3', 'entry_b/pub2', '{"x": 1}', assertEvents={'test_event': { 'port': '1transformed1' }}, assertNotification=['info', 'Entry B published pub2=1transformed']) test.assertPublish('s4', 'entry_b/pub3', 'test', assertNotChild=[ 'entry_a_on_test_event1', 'entry_a_on_test_event2', 'entry_a_on_test_event_implicit' ], assertEvents={}) # Test .on called programmatically on a specific entry system.entry_get('entry_a').on('test_event', on_test_on_event3, 'js:params["port"] == "entry_a"') entries['entry_a@TEST'].on('test_event', on_test_on_event2, 'js:params["port"] == "fake"') test.assertPublish( 's5', 'item/entry_a/event', '', assertChild=['entry_a_prog_on', 'entry_a_on_test_event_implicit'], assertNotChild=['entry_a_on_test_event2'], assertEvents={'test_event': { "port": "entry_a" }}) #system.entry_reload('entry_a@TEST'); # Test "actions" and "handler" declarations test.assertAction('s6', 'entry_b', 'test_action', {'value': '1'}, init='js:params["val2"] = "0"', assertSubscribe={ 'subs/entry_b': 'test10', 'subs/entry_b/response': 'ok' }, assertChild=[ 'entry_b_on_subscribed_message', 'entry_b_publish', 'entry_b_on_events_passthrough' ], assertEventsTopic='subs/entry_b/response', assertEvents={'test_action_response': {}}) test.assertAction( 's7', 'entry_b', 'test_action2', {'value': '1'}, init='js:params["val2"] = params["val2"] + "X"', assertSubscribe={ 'subs/entry_b/TEST0X': 'test10X', 'subs/entry_b/response': 'ok' }, assertChild=['entry_b_on_subscribed_message2', 'entry_b_publish'], assertEventsTopic='subs/entry_b/response', assertEvents={'test_action_response': {}}) # Test "on"."do" test.assertPublish('s8', 'subs/entry_b/test_do', 2, assertSubscribe={'entry_b/test_do_result': '3'}) # Test changed_params test.assertPublish('s9', 'entry_b/pub1', '', assertEventsData=True, assertEvents=[{ 'name': 'test_event', 'params': { 'port': '1' }, 'changed_params': {}, 'keys': { 'port': '1' }, 'time': ('*', ) }]) test.assertPublish('s10', 'entry_b/pub1', '1', assertEventsData=True, assertEvents=[{ 'name': 'test_event', 'params': { 'port': '11' }, 'changed_params': {}, 'keys': { 'port': '11' }, 'time': ('*', ) }]) if test_history: entries['history@TEST'].module.run(entries['history@TEST']) #################################################################################### ### TEST event_get cache e event_keys if (True): test.assertPublish('e1', 'item/entry_c/event', { 'port': '1', 'value': 5 }, assertEvents={'data': { 'port': '1', 'value': 5 }}) system.time_offset(10) test.assertx( 'e2', assertEq=[ (system.event_get("entry_c.data"), { 'port': '1', 'value': 5 }), (system.event_get("entry_c.data", timeout=5), None), (system.event_get("entry_c.data(js:params['port'] == '1')"), { 'port': '1', 'value': 5 }), (system.event_get("entry_c.data(js:params['port'] == '1')", timeout=5), None), (system.event_get("entry_c.data(js:params['port'] == '2')"), None), ]) test.assertPublish('e3', 'item/entry_c/event', { 'port': '2', 'value': 6 }, assertEvents={'data': { 'port': '2', 'value': 6 }}) system.time_offset(10) test.assertx( 'e4', assertEq=[ (system.event_get("entry_c.data"), { 'port': '2', 'value': 6 }), (system.event_get("entry_c.data", timeout=5), None), (system.event_get("entry_c.data(js:params['port'] == '1')"), { 'port': '1', 'value': 5 }), (system.event_get("entry_c.data(js:params['port'] == '1')", timeout=5), None), (system.event_get("entry_c.data(js:params['port'] == '2')"), { 'port': '2', 'value': 6 }), (system.event_get("entry_c.data(js:params['port'] == '2')", timeout=5), None), ]) system.do_action("entry_c.data-set(js:params['port'] = '2')", {'value': 7}) # invalidates port=2 cache test.assertx( 'e5', assertEq=[ (system.event_get("entry_c.data"), { 'port': '1', 'value': 5 }), (system.event_get("entry_c.data", timeout=5), None), (system.event_get("entry_c.data(js:params['port'] == '1')"), { 'port': '1', 'value': 5 }), (system.event_get("entry_c.data(js:params['port'] == '1')", timeout=5), None), (system.event_get("entry_c.data(js:params['port'] == '2')"), None), (system.event_get("entry_c.data(js:params['port'] == '2')", timeout=5), None), ]) test.assertPublish('e6', 'item/entry_c/event', { 'port': '2', 'value': 7 }, assertEvents={'data': { 'port': '2', 'value': 7 }}) test.assertPublish('e7', 'item/entry_c/event', { 'port': '1', 'value': 8 }, assertEvents={'data': { 'port': '1', 'value': 8 }}) system.do_action( "entry_c.data-set(js:params['port'] = '2')", {'value': 8}, if_event_not_match="entry_c.data(js:params['port'] == 1)" ) # NOT invalidates cache, action is NOT done (event matched) test.assertx( 'e7', assertEq=[ (system.event_get("entry_c.data"), { 'port': '1', 'value': 8 }), (system.event_get("entry_c.data", timeout=5), { 'port': '1', 'value': 8 }), (system.event_get("entry_c.data(js:params['port'] == '1')"), { 'port': '1', 'value': 8 }), (system.event_get("entry_c.data(js:params['port'] == '1')", timeout=5), { 'port': '1', 'value': 8 }), (system.event_get("entry_c.data(js:params['port'] == '2')"), { 'port': '2', 'value': 7 }), (system.event_get("entry_c.data(js:params['port'] == '2')", timeout=5), { 'port': '2', 'value': 7 }), ]) system.do_action( "entry_c.data-set(js:params['port'] = '2')", {'value': 5}, if_event_not_match="entry_c.data(js:params['port'] == 1)" ) # invalidate port=1 cache test.assertx( 'e8', assertEq=[ (system.event_get("entry_c.data"), { 'port': '2', 'value': 7 }), (system.event_get("entry_c.data(js:params['port'] == '1')"), None), (system.event_get("entry_c.data(js:params['port'] == '2')"), { 'port': '2', 'value': 7 }), ]) # test event_keys definition (different cache for 'myport' param and NOT for 'port') # also test for cumulative data of events and temporary test.assertPublish( 'e9', 'item/entry_d/event', { 'myport': '1', 'port': 5, 'cumulated': 9 }, assertEvents={'data': { 'myport': '1', 'port': 5, 'cumulated': 9 }}) test.assertx( 'e10', assertEq=[ (system.event_get("entry_d.data"), { 'myport': '1', 'port': 5, 'cumulated': 9 }), (system.event_get("entry_d.data(js:params['myport'] == '1')"), { 'myport': '1', 'port': 5, 'cumulated': 9 }), (system.event_get("entry_d.data(js:params['myport'] == '2')"), None), (system.event_get("entry_d.data(js:params['port'] == 5)"), { 'myport': '1', 'port': 5, 'cumulated': 9 }), ]) test.assertPublish('e11', 'item/entry_d/event', { 'myport': '2', 'port': 6 }, assertEvents={'data': { 'myport': '2', 'port': 6 }}) test.assertPublish( 'e12', 'item/entry_d/event', { 'myport': '1', 'port': 6 }, assertEvents={'data': { 'myport': '1', 'port': 6, 'cumulated': 9 }}) test.assertx( 'e13', assertEq=[ (system.event_get("entry_d.data"), { 'myport': '1', 'port': 6, 'cumulated': 9 }), (system.event_get("entry_d.data(js:params['myport'] == '1')"), { 'myport': '1', 'port': 6, 'cumulated': 9 }), (system.event_get("entry_d.data(js:params['myport'] == '1')", temporary=True), None), (system.event_get("entry_d.data(js:params['myport'] == '2')"), { 'myport': '2', 'port': 6 }), (system.event_get("entry_d.data(js:params['port'] == 5)"), None), ]) test.assertPublish('e14', 'item/entry_d/event', { 'myport': '1', 'port': 7, 'temporary': True }, assertEvents={ 'data': { 'myport': '1', 'port': 7, 'cumulated': 9, 'temporary': True } }) test.assertx( 'e15', assertEq=[ (system.event_get("entry_d.data"), { 'myport': '1', 'port': 6, 'cumulated': 9 }), (system.event_get("entry_d.data(js:params['myport'] == '1')"), { 'myport': '1', 'port': 6, 'cumulated': 9 }), (system.event_get("entry_d.data(js:params['myport'] == '1')", temporary=True), { 'myport': '1', 'port': 7, 'cumulated': 9, 'temporary': True }), (system.event_get("entry_d.data(js:params['myport'] == '2')"), { 'myport': '2', 'port': 6 }), ]) if test_history: entries['history@TEST'].module.run(entries['history@TEST']) #################################################################################### ### TEST multiple events / event:init / event groups if (True): # this one publish an event {'port': '2', 'x': 'y'} and an event {'port': 1, 'value': 1} test.assertPublish('e16', 'item/entry_e/event', { 'x': 'y', 'port': '2' }, assertEventsData=True, assertEvents=[{ 'name': 'data', 'params': { 'port': '2', 'x': 'y', 'unit': 'B', 'all': 1 }, 'changed_params': { 'x': 'y' }, 'keys': { 'port': '2' }, 'time': () }, { 'name': 'data', 'params': { 'port': '1', 'value': 1, 'unit': 'A', 'all': 1 }, 'changed_params': { 'value': 1 }, 'keys': { 'port': '1' }, 'time': () }]) # this one publish an event {'z': 3} [with no keys => impact other events with keys] and an event {'port': 1, 'value': 1} test.assertPublish('e17', 'item/entry_e/event', {'z': 3}, assertEventsData=True, assertEvents=[{ 'name': 'data', 'params': { 'all': 1, 'z': 3 }, 'changed_params': { 'z': 3 }, 'keys': {}, 'time': () }, { 'name': 'data', 'params': { 'port': '1', 'value': 1, 'unit': 'A', 'all': 1, 'z': 3 }, 'changed_params': {}, 'keys': { 'port': '1' }, 'time': () }]) # this one publish an event {'all': 2 } [with no keys => impact other events with keys] and an event {'port': 1, 'value': 1} test.assertPublish('e18', 'item/entry_e/event', {'all': 2}, assertEventsData=True, assertEvents=[{ 'name': 'data', 'params': { 'all': 2, 'z': 3 }, 'changed_params': { 'all': 2 }, 'keys': {}, 'time': () }, { 'name': 'data', 'params': { 'port': '1', 'value': 1, 'unit': 'A', 'all': 2, 'z': 3 }, 'changed_params': {}, 'keys': { 'port': '1' }, 'time': () }]) if test_history: entries['history@TEST'].module.run(entries['history@TEST']) test.assertPublish('e20', 'item/entry_e/group', {'v1': 1}, assertNotChild=['entry_e.on_group_event'], timeoutms=1000, wait=False) test.assertPublish('e21', 'item/entry_e/group', {'v2': 2}, assertNotChild=['entry_e.on_group_event'], timeoutms=1000, wait=False) test.waitRunning() system.time_offset(60) test.assertx('e22', assertChild=['entry_e.on_group_event']) #################################################################################### ### TEST topic_matches if (True): test.assertx( 'topic_matches', assertEq=[ (system.topic_matches('a/b', 'a/b', ''), { 'matched': True, 'topic_matches': [True], 'use_payload': False, 'used': ('*', ) }), (system.topic_matches('a/+/c', 'a/b/c', ''), { 'matched': True, 'topic_matches': [True], 'use_payload': False, 'used': ('*', ) }), (system.topic_matches('a/#', 'a/b/c', ''), { 'matched': True, 'topic_matches': [True], 'use_payload': False, 'used': ('*', ) }), (system.topic_matches('a/#', 'b/c', ''), { 'matched': False, 'topic_matches': [], 'use_payload': None, 'used': ('*', ) }), (system.topic_matches('/^a(.*)$/', 'a/b/c', ''), { 'matched': True, 'topic_matches': ['a/b/c', '/b/c'], 'use_payload': False, 'used': ('*', ) }), (system.topic_matches('/^a(.*)$/[1/|2/]', 'a/b/c', ''), { 'matched': False, 'topic_matches': ['a/b/c', '/b/c'], 'use_payload': True, 'used': ('*', ) }), (system.topic_matches('/^a(.*)$/[1|2/]', 'a/b/c', '2/'), { 'matched': True, 'topic_matches': ['a/b/c', '/b/c'], 'use_payload': True, 'used': ('*', ) }), (system.topic_matches('a/#[/^z.*z$/]', 'a/b/c', 'x'), { 'matched': False, 'topic_matches': [True], 'use_payload': True, 'used': ('*', ) }), (system.topic_matches('a/#[/^z.*z$/]', 'a/b/c', 'zxz'), { 'matched': True, 'topic_matches': [True], 'use_payload': True, 'used': ('*', ) }), (system.topic_matches('a/+[js: payload=="x"]', 'a/b', ''), { 'matched': False, 'topic_matches': [True], 'use_payload': True, 'used': ('*', ) }), (system.topic_matches('a/+[js: payload=="x"]', 'a/b', 'x'), { 'matched': True, 'topic_matches': [True], 'use_payload': True, 'used': ('*', ) }), (system.topic_matches('/^a(.*)$/[js: matches[1]=="/b/c"]', 'a/b/c', ''), { 'matched': True, 'topic_matches': ['a/b/c', '/b/c'], 'use_payload': False, 'used': ('*', ) }), (system.topic_matches('/^a(.*)$/[js: matches[1]=="/b/c"]', 'a/d/e', ''), { 'matched': False, 'topic_matches': ['a/d/e', '/d/e'], 'use_payload': False, 'used': ('*', ) }), (system.topic_matches( '/^a(.*)$/[js: matches[1]=="/b/c" && payload=="x"]', 'a/b/c', ''), { 'matched': False, 'topic_matches': ['a/b/c', '/b/c'], 'use_payload': True, 'used': ('*', ) }), (system.topic_matches( '/^a(.*)$/[js: matches[1]=="/b/c" && payload=="x"]', 'a/b/c', 'x'), { 'matched': True, 'topic_matches': ['a/b/c', '/b/c'], 'use_payload': True, 'used': ('*', ) }), ]) if test_history: entries['history@TEST'].module.run(entries['history@TEST'])
def entry_implements(entry, method): if isinstance(entry, str): entry = system.entry_get(entry) return True if get_handler(entry, method) else False