def _on_system_entry_unload(entry):
    if entry.is_local:
        entry.store_data()
        storage.entry_uninstall(storage, entry)
        entries_invoke('entry_unload', entry)

        for installer_entry_id, installer_entry in system.entries().items():
            if installer_entry.is_local and 'install_on' in installer_entry.definition:
                conf = _entry_install_on_conf(
                    installer_entry, installer_entry.definition['install_on'],
                    entry)
                if conf:
                    entry_invoke(installer_entry, 'entry_uninstall', entry,
                                 conf)

        # if this entry define an "entry_unload" hook, all previous entries should be passed to it
        if entry.type == 'module' and hasattr(entry.module, 'entry_unload'):
            for eid, eentry in system.entries().items():
                entry.module.entry_unload(entry, eentry)
        # if this entry define an "entry_uninstall" hook, all previous entries matching install rules should be passed to it
        if entry.type == 'module' and hasattr(entry.module, 'entry_uninstall'):
            for eid, eentry in system.entries().items():
                conf = _entry_install_on_conf(entry,
                                              entry.definition['install_on'],
                                              eentry)
                if conf:
                    entry.module.entry_uninstall(entry, eentry, conf)

        entry_invoke(entry, 'destroy')
Example #2
0
def publish_data(entry, topic, local_metadata=None):
    last_seen = {}
    for e in system.entries().values():
        last_seen[e.id] = e.last_seen

    if not entry.config['compress']:
        entry.publish(
            topic, {
                'from_node': system.default_node_name,
                'time': system.time(),
                'entries': system.entries_definition_exportable(),
                'events': system.events_export(),
                'last_seen': last_seen,
            })
    else:
        entry.publish(
            topic, {
                'from_node':
                system.default_node_name,
                'time':
                system.time(),
                '+':
                utils.b64_compress_data(
                    {
                        'entries': system.entries_definition_exportable(),
                        'events': system.events_export(),
                        'last_seen': last_seen,
                    })
            })
def entries_invoke_threaded(method, *args, **kwargs):
    global entries, threads
    base_args = list(args)
    for entry_id, entry in system.entries().items():
        func = get_handler(entry, method)
        if func:
            if (entry.id + '.' + method) not in threads or threads[
                    entry.id + '.' +
                    method] is None or not threads[entry.id + '.' +
                                                   method].is_alive():
                logging.debug(
                    "#{entry}> invoking {method} (threaded) ...".format(
                        entry=entry.id, method=method))
                # https://docs.python.org/3/library/threading.html
                cargs = [func, method, entry_id, entry] + list(args)
                threads[entry.id + '.' + method] = threading.Thread(
                    target=entry_invoke_threaded_wrapper,
                    args=cargs,
                    kwargs=kwargs,
                    daemon=True
                )  # daemon = True allows the main application to exit even though the thread is running. It will also (therefore) make it possible to use ctrl+c to terminate the application
                threads[entry.id + '.' + method].start()
            else:
                logging.warn(
                    "#{entry}> skipped invocation of {method}: already running!"
                    .format(entry=entry.id, method=method))
Example #4
0
def publish_all_entries_status(entry, topic_rule, topic_definition):
  status = {}
  for entry_id in system.entries():
    oentry = system.entry_get(entry_id)
    if oentry.is_local and entry_id != entry.id:
      status[entry_id] = entry_health_status(oentry)
      if status[entry_id]:
        status[entry_id]['changed'] = oentry.health_changed
        status[entry_id]['schanged'] = utils.strftime(status[entry_id]['changed']) if status[entry_id]['changed'] > 0 else '-'
  entry.publish('', status)
def _on_system_entry_init_batch(entries):
    for entry in entries.values():
        # calls *.entry_init(entry)
        entries_invoke('entry_init', entry, _skip_entry_id=entry.id)

        # calls entry.entry_init(*): if this entry define an "entry_init" hook, all previous entries should be passed to it
        if entry.is_local and entry.type == 'module' and hasattr(
                entry.module, 'entry_init'):
            for eid, eentry in system.entries().items():
                if eid != entry.id:
                    entry.module.entry_init(entry, eentry)
def entries_implements(method):
    global entries_implementations
    if method not in entries_implementations:
        res = []
        for entry_id, entry in system.entries().items():
            if get_handler(entry, method):
                res.append(
                    (entry,
                     getattr(entry.module, 'SYSTEM_HANDLER_ORDER_' +
                             method, 0) if entry.type == 'module' else 0))
        entries_implementations[method] = [
            i[0] for i in sorted(res, key=lambda d: d[1])
        ]
    return entries_implementations[method]
def entries_invoke(method, *args, _skip_entry_id=False, **kwargs):
    ret = None
    for entry_id, entry in system.entries().items():
        if not _skip_entry_id or entry_id != _skip_entry_id:
            func = get_handler(entry, method)
            if func:
                logging.debug("#{entry}> invoking {method} ...".format(
                    entry=entry_id, method=method))
                try:
                    ret = func(entry, *args, **kwargs)
                except:
                    logging.exception(
                        "#{id}> exception in entries_invoke of method {method}"
                        .format(id=entry.id, method=method))
    return ret
def destroy():
    global threads, _system_initialized
    for t in threads:
        if threads[t] and threads[t].is_alive():
            threads[t].join()

    clone_entry_names = list(
        system.entries().keys()
    )  # I make a clone of entry names, because some handler could change "entries"
    for entry_id in clone_entry_names:
        entry = system.entry_get(entry_id)
        if entry and entry.is_local:
            entry.store_data()

    storage.destroy()
    system.destroy()
    _system_initialized = False
Example #9
0
def on_metadata(entry, subscribed_message):
    payload = subscribed_message.payload
    if payload and 'from_node' in payload and payload[
            'from_node'] != system.default_node_name and 'time' in payload and system.time(
            ) - payload['time'] < utils.read_duration(
                entry.config['dead_time']):
        entry.data['seen'][payload['from_node']] = {
            'my_time': system.time(),
            'his_time': payload['time']
        }

        todo = []
        for node in payload['nodes']:
            if node not in entry.data['nodes'] or entry.data['nodes'][node][
                    'time'] < payload['nodes'][node]['time']:
                entry.data['nodes'][node] = payload['nodes'][node]
                todo.append(node)
        if not entry.config['local']:
            for node in todo:
                #entry.data[node] = payload['nodes'][node]
                payload_entries = utils.b64_decompress_data(payload['entries'])
                node_entries = {}
                for entry_id in payload_entries:
                    if entry_id.endswith('@' + node):
                        node_entries[entry_id] = payload_entries[entry_id]
                #system.entry_load_definitions(node_entries, node_name = node, unload_other_from_node = True, id_from_definition = False)
                system.entry_load(node_entries,
                                  node_name=node,
                                  unload_other_from_node=True,
                                  id_from_definition=False)

        if todo:
            publish_metadata(entry, entry.topic('./metadata'))
            logging.debug(
                '#{id}> Loaded new metadata by: {todo}, current entries: {entries}'
                .format(id=entry.id,
                        todo=todo,
                        entries=", ".join(system.entries().keys())))
Example #10
0
def run():
    global finished
    current_unit.test_run(system.entries())
    finished = True
Example #11
0
def run_step():
    _s = system._stats_start()
    now = system.time()
    clone_entry_names = list(
        system.entries().keys()
    )  # I make a clone of entry names, because some handler could change "entries"
    for entry_id in clone_entry_names:
        entry = system.entry_get(entry_id)
        if entry and entry.is_local:
            # Initialization / check configuration validity
            if 'run_interval' in entry.definition and utils.read_duration(
                    entry.definition['run_interval']) <= 0:
                logging.error(
                    '#{id}> invalid run_interval: {run_interval}'.format(
                        id=entry_id,
                        run_interval=entry.definition['run_interval']))
                del entry.definition['run_interval']
            if 'run_cron' in entry.definition and entry_implements(
                    entry_id,
                    'run') and not ('cron' in entry.data and entry.data['cron']
                                    == entry.definition['run_cron']
                                    and 'next_run' in entry.data):
                if not croniter.is_valid(entry.definition['run_cron']):
                    logging.error('#{id}> invalid cron rule: {cron}'.format(
                        id=entry_id, cron=entry.definition['run_cron']))
                    del entry.definition['run_cron']
                else:
                    entry.data['cron'] = entry.definition['run_cron']
                    #itr = croniter(entry.data['cron'], datetime.datetime.now().astimezone())
                    itr = croniter(
                        entry.data['cron'],
                        datetime.datetime.fromtimestamp(now).astimezone())
                    entry.data['next_run'] = itr.get_next()
            if 'last_run' not in entry.data:
                entry.data['last_run'] = 0
            if 'next_run' not in entry.data:
                entry.data['next_run'] = now

            if entry_implements(entry_id,
                                'run') and ('run_interval' in entry.definition
                                            or 'run_cron' in entry.definition):
                throttle_policy = _run_step_throttle_policy(
                    entry, entry.definition, None)

                if now >= entry.data['next_run']:
                    if throttle_policy == 'force' or throttle_policy == 'skip' or (
                            isinstance(throttle_policy, int) and
                            now - entry.data['last_run'] > throttle_policy):
                        entry.data['last_run'] = now
                        if 'run_interval' in entry.definition:
                            entry.data['next_run'] = now + utils.read_duration(
                                entry.definition['run_interval'])
                        else:
                            #itr = croniter(entry.data['cron'], datetime.datetime.now().astimezone())
                            itr = croniter(
                                entry.data['cron'],
                                datetime.datetime.fromtimestamp(
                                    now).astimezone())
                            entry.data['next_run'] = itr.get_next()

                        if throttle_policy != 'skip':
                            entry_invoke_threaded(entry_id, 'run')
                        else:
                            logging.debug(
                                "#{entry}> system overload ({load}), skipped invokation of {method}."
                                .format(entry=entry.id,
                                        load=load_level(),
                                        method='run'))
                    else:
                        logging.debug(
                            "#{entry}> system overload ({load}), postponed invokation of {method}."
                            .format(entry=entry.id,
                                    load=load_level(),
                                    method='run'))

            if 'publish' in entry.definition:
                for topic_rule in entry.definition['publish']:
                    # Initialization / check configuration validity
                    if 'run_interval' in entry.definition['publish'][
                            topic_rule] and utils.read_duration(
                                entry.definition['publish'][topic_rule]
                                ['run_interval']) <= 0:
                        logging.error(
                            '#{id}> invalid run_interval for topic rule {topic_rule}: {run_interval}'
                            .format(id=entry_id,
                                    topic_rule=topic_rule,
                                    run_interval=entry.definition['publish']
                                    [topic_rule]['run_interval']))
                        del entry.definition['publish'][topic_rule][
                            'run_interval']
                    if 'run_cron' in entry.definition['publish'][
                            topic_rule] and not (
                                'cron_' + topic_rule in entry.data
                                and entry.data['cron_' + topic_rule] == entry.
                                definition['publish'][topic_rule]['run_cron']
                                and 'next_run_' + topic_rule in entry.data):
                        if not croniter.is_valid(entry.definition['publish']
                                                 [topic_rule]['run_cron']):
                            logging.error(
                                '#{id}> invalid cron rule for publishing topic rule {topic_rule}: {cron}'
                                .format(id=entry_id,
                                        topic_rule=topic_rule,
                                        cron=entry.definition['publish']
                                        [topic_rule]['run_cron']))
                            del entry.definition['publish'][topic_rule][
                                'run_cron']
                        else:
                            entry.data['cron_' +
                                       topic_rule] = entry.definition[
                                           'publish'][topic_rule]['run_cron']
                            #itr = croniter(entry.data['cron_' + topic_rule], datetime.datetime.now().astimezone())
                            itr = croniter(
                                entry.data['cron_' + topic_rule],
                                datetime.datetime.fromtimestamp(
                                    now).astimezone())
                            entry.data['next_run_' +
                                       topic_rule] = itr.get_next()
                    if 'last_run_' + topic_rule not in entry.data:
                        entry.data['last_run_' + topic_rule] = 0
                    if 'next_run_' + topic_rule not in entry.data:
                        entry.data['next_run_' + topic_rule] = now

                    if 'run_interval' in entry.definition['publish'][
                            topic_rule] or 'run_cron' in entry.definition[
                                'publish'][topic_rule]:
                        throttle_policy = _run_step_throttle_policy(
                            entry, entry.definition['publish'][topic_rule],
                            topic_rule)

                        if now >= entry.data['next_run_' + topic_rule]:
                            if throttle_policy == 'force' or throttle_policy == 'skip' or (
                                    isinstance(throttle_policy, int) and
                                    now - entry.data['last_run_' + topic_rule]
                                    > throttle_policy):
                                entry.data['last_run_' + topic_rule] = now
                                if 'run_interval' in entry.definition[
                                        'publish'][topic_rule]:
                                    entry.data[
                                        'next_run_' +
                                        topic_rule] = now + utils.read_duration(
                                            entry.definition['publish']
                                            [topic_rule]['run_interval'])
                                else:
                                    #itr = croniter(entry.data['cron_' + topic_rule], datetime.datetime.now().astimezone())
                                    itr = croniter(
                                        entry.data['cron_' + topic_rule],
                                        datetime.datetime.fromtimestamp(
                                            now).astimezone())
                                    entry.data['next_run_' +
                                               topic_rule] = itr.get_next()

                                if throttle_policy != 'skip':
                                    entry_invoke_publish(
                                        entry, topic_rule,
                                        entry.definition['publish']
                                        [topic_rule])
                                else:
                                    logging.debug(
                                        "#{entry}> system overload ({load}), skipped invokation of publish {method}."
                                        .format(entry=entry.id,
                                                load=load_level(),
                                                method=topic_rule))
                            else:
                                logging.debug(
                                    "#{entry}> system overload ({load}), postponed invokation of publish {method}."
                                    .format(entry=entry.id,
                                            load=load_level(),
                                            method=topic_rule))

            _s1 = system._stats_start()
            entry.store_data(False)
            system._stats_end('node.run.store_data', _s1)
    system._stats_end('node.run', _s)
Example #12
0
def _on_system_entry_load_batch(loading_defs):
    # @see system.on_entry_load_batch for docs

    # reloading check
    previously_loaded_entries_to_reload = []
    for entry in loading_defs.values():
        # if this entry define an "entry_load" hook, all running entries should be reloaded
        if entry.type == 'module' and has_handler(entry, 'entry_load'):
            for other_entry in system.entries().values():
                if other_entry.loaded and other_entry.id not in previously_loaded_entries_to_reload:
                    previously_loaded_entries_to_reload.append(other_entry.id)
        else:
            # if this entry define an "entry_install" hook, all running entries matching install rules should be reloaded
            if entry.type == 'module' and 'install_on' in entry.definition:
                for other_entry in system.entries().values():
                    if other_entry.loaded and other_entry.id not in previously_loaded_entries_to_reload:
                        conf = _entry_install_on_conf(
                            entry, entry.definition['install_on'], other_entry)
                        if conf:
                            previously_loaded_entries_to_reload.append(
                                other_entry.id)

    for entry in loading_defs.values():
        # entry_load: calls previously_loaded.entry_load(entry). I process loaded in the past and initialized entries, and loading now entries, but only already called by this callback
        for previously_loaded_entry in system.entries().values():
            if previously_loaded_entry.is_local and previously_loaded_entry.id not in loading_defs and previously_loaded_entry.id not in previously_loaded_entries_to_reload:
                entry_invoke(previously_loaded_entry, 'entry_load', entry)

        # entry_load on currently loading entries and initialized ones: calls entry.entry_load(other_entry). I process all entries loading now, even if already processed by this callback. I consider loaded and initialized entries only if NOT in previously_loaded_entries_to_reload (they should be reloaded, it's useless to call methods on them).
        if entry.type == 'module' and has_handler(entry, 'entry_load'):
            for other_entry in system.entries().values():
                if other_entry.id != entry.id and (
                        not other_entry.loaded or other_entry.id
                        not in previously_loaded_entries_to_reload):
                    # WARN: Usually ALL previously loaded and initialized entries should be in previously_loaded_entries_to_reload (for the rule above, if i'm loading and entry with entry_load method, this invalidates all previously loaded entries), so the condition below should skip all of these entries. The only exception happens if the presence of "entry_load" method changes between the code above and the code below (for example, the "entry_load" method is defined by the scripting module via scripting.entry_load)
                    # In this situation, entry.entry_load(other_entry) is called, even if it's already called before (by a previous "entry" version). We print a warning about this. (It's not convenient to invalidate the entry now)
                    if other_entry.loaded and other_entry.id not in previously_loaded_entries_to_reload:
                        logging.warn(
                            "NODE_SYSTEM> Calling {eid}.entry_load on {eid2}, but {eid2} has not been reloaded (during the reloading check {eid}.entry_load was not present). So it's possibile {eid}.entry_load has been called before on {eid2} (by a previous {eid} version)."
                            .format(eid=entry.id, eid2=other_entry.id))
                    entry_invoke(entry, 'entry_load', other_entry)

    for entry in loading_defs.values():
        # entry_install: calls previously_loaded.entry_install(entry). I process loaded in the past and initialized entries, and loading now entries, but only already called by this callback
        for previously_loaded_entry in system.entries().values():
            if previously_loaded_entry.is_local and previously_loaded_entry.id not in loading_defs and previously_loaded_entry.id not in previously_loaded_entries_to_reload:
                if 'install_on' in previously_loaded_entry.definition:
                    conf = _entry_install_on_conf(
                        previously_loaded_entry,
                        previously_loaded_entry.definition['install_on'],
                        entry)
                    if conf:
                        entry_invoke(previously_loaded_entry, 'entry_install',
                                     entry, conf)

        # entry_install on currently loading entries and initialized ones: calls entry.entry_install(other_entry). I process all entries loading now, even if already processed by this callback. I consider loaded and initialized entries only if NOT in previously_loaded_entries_to_reload (they should be reloaded, it's useless to call methods on them).
        if entry.type == 'module' and 'install_on' in entry.definition:
            for other_entry in system.entries().values():
                if other_entry.id != entry.id and (
                        not other_entry.loaded or other_entry.id
                        not in previously_loaded_entries_to_reload):
                    conf = _entry_install_on_conf(
                        entry, entry.definition['install_on'], other_entry)
                    if conf:
                        # WARN: Usually ALL previously loaded and initialized entries (matching the install_on rule) should be in previously_loaded_entries_to_reload (for the rule above), so the condition below should skip all of these entries. The only exception happens if the presence of "entry_install" method, or the install_rule, changes between the code above and the code below (for example, the "entry_install" method is defined by the scripting module via scripting.entry_load)
                        # In this situation, entry.entry_install(other_entry) is called, even if it's already called before (by a previous "entry" version). We print a warning about this. (It's not convenient to invalidate the entry now)
                        if other_entry.loaded and other_entry.id not in previously_loaded_entries_to_reload:
                            logging.warn(
                                "NODE_SYSTEM> Calling {eid}.entry_install on {eid2}, but {eid2} has not been reloaded (during the reloading check {eid}.entry_install was not present or the install rule was different). So it's possibile {eid}.entry_install has been called before on {eid2} (by a previous {eid} version)."
                                .format(eid=entry.id, eid2=other_entry.id))
                        entry_invoke(entry, 'entry_install', other_entry, conf)

    return previously_loaded_entries_to_reload