예제 #1
0
def storeData(entry, blocking=True):
    if not entry.data:
        return False
    if not entry.data_lock.acquire(blocking):
        return False
    try:
        _s = system._stats_start()

        cmpdata = repr(entry.data)
        data = None
        if entry.store_data_saved != cmpdata:
            data = utils.json_export(entry.data)
            _storeDataTo(entry,
                         entry.node_name + '_data_' + entry.id_local + '.json',
                         data)
            entry.store_data_saved = cmpdata
        if system.time() - entry.store_backup_time > STORAGE_BACKUP_TIME:
            if not data:
                data = utils.json_export(entry.data)
            _storeDataTo(
                entry,
                entry.node_name + '_data_' + entry.id_local + '.backup.json',
                data)
            entry.store_backup_time = system.time()

        return True
    except:
        logging.exception("#{id}> failed storing data".format(id=entry.id))
        return False
    finally:
        entry.data_lock.release()
        system._stats_end('storage.store_data', _s)
예제 #2
0
def entry_invoke_delayed_wrapper(func, method, entry_id, *args, **kwargs):
    _s = system._stats_start()
    try:
        func(*args, **kwargs)
    except:
        logging.exception(
            "#{id}> exception in running method {method} (delayed)".format(
                id=entry_id, method=method))
    system._stats_end('entry_invoke_delayed:' + entry_id + '.' + str(method),
                      _s)
예제 #3
0
def entry_invoke_threaded_wrapper(func, method, entry_id, *args, **kwargs):
    _s = system._stats_start()
    try:
        if '_thread_init' in kwargs:
            f = kwargs['_thread_init']
            del kwargs['_thread_init']
            f(*args, **kwargs)
        func(*args, **kwargs)
    except:
        logging.exception(
            "#{id}> exception in running method {method} (threaded)".format(
                id=entry_id, method=method))
    system._stats_end('entry_invoke:' + entry_id + '.' + str(method), _s)
예제 #4
0
def script_eval(code, context={}, to_dict=False, cache=False):
    #global script_eval_quick_count
    _s = system._stats_start()
    """
  ret = _script_eval_quick(code, context)
  if ret and 'return' in ret:
    script_eval_quick_count = script_eval_quick_count + 1
    ret = ret['return']
  else:
  """
    ret = _script_eval_int(code, context, cache)
    if ret and to_dict and isinstance(ret, js2py.base.JsObjectWrapper):
        ret = ret.to_dict()

    system._stats_end('scripting_js.script_eval', _s)
    return ret
예제 #5
0
def _on_system_message(message):
    for sm in message.subscribedMessages():
        if sm.entry.is_local:
            if sm.topic_rule in sm.entry.subscription_handlers:
                for record in sm.entry.subscription_handlers[sm.topic_rule]:
                    system.entry_publish_current_default_topic(message.topic)
                    # TODO 2020CHANGE non dovrebbe servire più current_*
                    #record[1].request.current_action = 'subscribe'
                    #record[1].request.current_message = message
                    _s = system._stats_start()
                    record[0](record[1], sm.copy())
                    #20201012 Disabled, too time consuming (and no store_data() is present for a lot of other conditions like event listeners and so on). store_data is done every seconds by run()>run_step()
                    #record[1].store_data()
                    system._stats_end(
                        'subscribe_handler(' + sm.topic_rule + '@' +
                        sm.entry.id + ')', _s)
예제 #6
0
def run(entry):
    _s = system._stats_start()
    now = system.time()
    changed = False

    if entry.data['timer_to'] > 0 and now >= entry.data['timer_to']:
        entry.data['enabled'] = not entry.data['enabled']
        entry.data['timer_to'] = 0
        changed = True

    for groupname, group in entry.data['groups'].items():
        if group['timer_to'] > 0 and now > group['timer_to']:
            group['enabled'] = not group['enabled']
            group['timer_to'] = 0
            changed = True

    for jid, job in entry.data['jobs'].items():
        if job['timer_to'] > 0 and now > job['timer_to']:
            job['enabled'] = not job['enabled']
            job['timer_to'] = 0
            changed = True

    if entry.data['enabled']:
        for jid, job in entry.data['jobs'].items():
            if job['enabled'] and (
                    'group' not in job
                    or job['group'] not in entry.data['groups']
                    or entry.data['groups'][job['group']]['enabled']):
                if job['max_delay'] > 0 and now >= job['next_run'] + job[
                        'max_delay']:
                    logging.warn(
                        '#{id}> max_delay passed, run skipped for job {job}'.
                        format(id=entry.id, job=job))
                    job_set_next_run(job)
                if now >= job['next_run']:
                    run_job(entry, job)
                    job['last_run'] = now
                    job_set_next_run(job)
                    changed = True

    if changed:
        run_publish(entry, '', {})

    system._stats_end('scheduler', _s)
예제 #7
0
def _script_exec_js(code,
                    context={},
                    do_eval=True,
                    do_eval_function=False,
                    return_context=False):
    """
  @param return_context Used to access modified context variables (context passed could be NOT modified by script). Use True to return all context variables, ['name', ...] to return only variables referenced, False to return no variables
  @return { 'error': boolean, 'return': evalued expression if do_eval = True, 'context': modificed context variables if return_context set }
  """
    # @see https://github.com/PiotrDabkowski/Js2Py/blob/b16d7ce90ac9c03358010c1599c3e87698c9993f/js2py/evaljs.py#L174 (execute method)
    global script_js_compiled, script_js_compiled_hits, script_js_compiled_miss, script_js_compiled_lock, script_context_instance_lock

    _s = system._stats_start()
    try:
        keyhash = utils.md5_hexdigest(code)
        if keyhash in script_js_compiled:
            script_js_compiled_hits += 1
        else:
            script_js_compiled_miss += 1

            if len(script_js_compiled) > SCRIPT_JS_COMPILED_MAXSIZE:
                with script_js_compiled_lock:
                    t = SCRIPT_JS_COMPILED_PURGETIME
                    while len(script_js_compiled
                              ) > SCRIPT_JS_COMPILED_MAXSIZE * 0.9:
                        script_js_compiled = {
                            x: script_js_compiled[x]
                            for x in script_js_compiled
                            if script_js_compiled[x]['used'] > system.time() -
                            t
                        }
                        t = t / 2 if t > 1 else -1

            if do_eval:
                #code = 'PyJsEvalResult = eval(%s)' % json.dumps(code) # Metodo originale usato da js2py, molto lento
                if not do_eval_function:
                    code = 'PyJsEvalResult = ' + code
                else:
                    code = 'PyJsEvalResult = function() {' + code + '}()'

            code = js2py.translators.translate_js(
                code, '', use_compilation_plan=js2py_use_compilation_plan)

            script_js_compiled[keyhash] = {
                'compiled': compile(code, '<EvalJS snippet>', 'exec')
            }

        script_js_compiled[keyhash]['used'] = system.time()

        ret = {'error': False, 'return': None, 'context': {}}
        with script_context_instance_lock:
            contextjs = script_context(context)
            exec(script_js_compiled[keyhash]['compiled'], contextjs._context)
            if do_eval:
                ret['return'] = _var_to_python(contextjs['PyJsEvalResult'])
            if return_context:
                for k in return_context if isinstance(
                        return_context,
                        list) else ((context.__context.to_dict() if isinstance(
                            context.__context, js2py.base.JsObjectWrapper) else
                                     context.__context) if isinstance(
                                         context, js2py.evaljs.EvalJs) else
                                    (context.to_dict() if isinstance(
                                        context, js2py.base.JsObjectWrapper)
                                     else context)).keys():
                    ret['context'][k] = contextjs[k]

        return ret
    except:
        """
    cdebug = {}
    for k in contextjs.__context:
      cdebug[k] = contextjs[k]
    logging.exception('scripting_js> error executing js script: {code}\ncontext: {context}\ncontextjs: {contextjs}\n'.format(code = code, context = str(context if not isinstance(context, js2py.evaljs.EvalJs) else (str(context.__context) + ' (WARN! this is the source context, but changes could have been made before this call, because a result of another call has been passed!)')), contextjs = cdebug))
    """
        logging.exception(
            'scripting_js> error executing js script: {code}\ncontext: {context}\n'
            .format(
                code=code,
                context=str(context if not isinstance(
                    context, js2py.evaljs.EvalJs
                ) else (
                    str(context.__context) +
                    ' (WARN! this is the source context, but changes could have been made before this call, because a result of another call has been passed!)'
                ))))
        return {'error': True, 'return': None, 'context': {}}
    finally:
        system._stats_end(
            'scripting_js.script_' + ('eval' if do_eval else 'exec') +
            '(js2py)', _s)
예제 #8
0
def run_step():
    _s = system._stats_start()
    now = system.time()
    clone_entry_names = list(
        system.entries().keys()
    )  # I make a clone of entry names, because some handler could change "entries"
    for entry_id in clone_entry_names:
        entry = system.entry_get(entry_id)
        if entry and entry.is_local:
            # Initialization / check configuration validity
            if 'run_interval' in entry.definition and utils.read_duration(
                    entry.definition['run_interval']) <= 0:
                logging.error(
                    '#{id}> invalid run_interval: {run_interval}'.format(
                        id=entry_id,
                        run_interval=entry.definition['run_interval']))
                del entry.definition['run_interval']
            if 'run_cron' in entry.definition and entry_implements(
                    entry_id,
                    'run') and not ('cron' in entry.data and entry.data['cron']
                                    == entry.definition['run_cron']
                                    and 'next_run' in entry.data):
                if not croniter.is_valid(entry.definition['run_cron']):
                    logging.error('#{id}> invalid cron rule: {cron}'.format(
                        id=entry_id, cron=entry.definition['run_cron']))
                    del entry.definition['run_cron']
                else:
                    entry.data['cron'] = entry.definition['run_cron']
                    #itr = croniter(entry.data['cron'], datetime.datetime.now().astimezone())
                    itr = croniter(
                        entry.data['cron'],
                        datetime.datetime.fromtimestamp(now).astimezone())
                    entry.data['next_run'] = itr.get_next()
            if 'last_run' not in entry.data:
                entry.data['last_run'] = 0
            if 'next_run' not in entry.data:
                entry.data['next_run'] = now

            if entry_implements(entry_id,
                                'run') and ('run_interval' in entry.definition
                                            or 'run_cron' in entry.definition):
                throttle_policy = _run_step_throttle_policy(
                    entry, entry.definition, None)

                if now >= entry.data['next_run']:
                    if throttle_policy == 'force' or throttle_policy == 'skip' or (
                            isinstance(throttle_policy, int) and
                            now - entry.data['last_run'] > throttle_policy):
                        entry.data['last_run'] = now
                        if 'run_interval' in entry.definition:
                            entry.data['next_run'] = now + utils.read_duration(
                                entry.definition['run_interval'])
                        else:
                            #itr = croniter(entry.data['cron'], datetime.datetime.now().astimezone())
                            itr = croniter(
                                entry.data['cron'],
                                datetime.datetime.fromtimestamp(
                                    now).astimezone())
                            entry.data['next_run'] = itr.get_next()

                        if throttle_policy != 'skip':
                            entry_invoke_threaded(entry_id, 'run')
                        else:
                            logging.debug(
                                "#{entry}> system overload ({load}), skipped invokation of {method}."
                                .format(entry=entry.id,
                                        load=load_level(),
                                        method='run'))
                    else:
                        logging.debug(
                            "#{entry}> system overload ({load}), postponed invokation of {method}."
                            .format(entry=entry.id,
                                    load=load_level(),
                                    method='run'))

            if 'publish' in entry.definition:
                for topic_rule in entry.definition['publish']:
                    # Initialization / check configuration validity
                    if 'run_interval' in entry.definition['publish'][
                            topic_rule] and utils.read_duration(
                                entry.definition['publish'][topic_rule]
                                ['run_interval']) <= 0:
                        logging.error(
                            '#{id}> invalid run_interval for topic rule {topic_rule}: {run_interval}'
                            .format(id=entry_id,
                                    topic_rule=topic_rule,
                                    run_interval=entry.definition['publish']
                                    [topic_rule]['run_interval']))
                        del entry.definition['publish'][topic_rule][
                            'run_interval']
                    if 'run_cron' in entry.definition['publish'][
                            topic_rule] and not (
                                'cron_' + topic_rule in entry.data
                                and entry.data['cron_' + topic_rule] == entry.
                                definition['publish'][topic_rule]['run_cron']
                                and 'next_run_' + topic_rule in entry.data):
                        if not croniter.is_valid(entry.definition['publish']
                                                 [topic_rule]['run_cron']):
                            logging.error(
                                '#{id}> invalid cron rule for publishing topic rule {topic_rule}: {cron}'
                                .format(id=entry_id,
                                        topic_rule=topic_rule,
                                        cron=entry.definition['publish']
                                        [topic_rule]['run_cron']))
                            del entry.definition['publish'][topic_rule][
                                'run_cron']
                        else:
                            entry.data['cron_' +
                                       topic_rule] = entry.definition[
                                           'publish'][topic_rule]['run_cron']
                            #itr = croniter(entry.data['cron_' + topic_rule], datetime.datetime.now().astimezone())
                            itr = croniter(
                                entry.data['cron_' + topic_rule],
                                datetime.datetime.fromtimestamp(
                                    now).astimezone())
                            entry.data['next_run_' +
                                       topic_rule] = itr.get_next()
                    if 'last_run_' + topic_rule not in entry.data:
                        entry.data['last_run_' + topic_rule] = 0
                    if 'next_run_' + topic_rule not in entry.data:
                        entry.data['next_run_' + topic_rule] = now

                    if 'run_interval' in entry.definition['publish'][
                            topic_rule] or 'run_cron' in entry.definition[
                                'publish'][topic_rule]:
                        throttle_policy = _run_step_throttle_policy(
                            entry, entry.definition['publish'][topic_rule],
                            topic_rule)

                        if now >= entry.data['next_run_' + topic_rule]:
                            if throttle_policy == 'force' or throttle_policy == 'skip' or (
                                    isinstance(throttle_policy, int) and
                                    now - entry.data['last_run_' + topic_rule]
                                    > throttle_policy):
                                entry.data['last_run_' + topic_rule] = now
                                if 'run_interval' in entry.definition[
                                        'publish'][topic_rule]:
                                    entry.data[
                                        'next_run_' +
                                        topic_rule] = now + utils.read_duration(
                                            entry.definition['publish']
                                            [topic_rule]['run_interval'])
                                else:
                                    #itr = croniter(entry.data['cron_' + topic_rule], datetime.datetime.now().astimezone())
                                    itr = croniter(
                                        entry.data['cron_' + topic_rule],
                                        datetime.datetime.fromtimestamp(
                                            now).astimezone())
                                    entry.data['next_run_' +
                                               topic_rule] = itr.get_next()

                                if throttle_policy != 'skip':
                                    entry_invoke_publish(
                                        entry, topic_rule,
                                        entry.definition['publish']
                                        [topic_rule])
                                else:
                                    logging.debug(
                                        "#{entry}> system overload ({load}), skipped invokation of publish {method}."
                                        .format(entry=entry.id,
                                                load=load_level(),
                                                method=topic_rule))
                            else:
                                logging.debug(
                                    "#{entry}> system overload ({load}), postponed invokation of publish {method}."
                                    .format(entry=entry.id,
                                            load=load_level(),
                                            method=topic_rule))

            _s1 = system._stats_start()
            entry.store_data(False)
            system._stats_end('node.run.store_data', _s1)
    system._stats_end('node.run', _s)