def _decorate(obj=None, check_steps_end=False): # check that decorated function is not statimethod or classmethod if not obj: raise MolerStatusException( "Decorator for 'staticmethod' or 'classmethod' not implemented yet.", [MolerException()]) if hasattr(obj, "__dict__"): if obj.__dict__.items(): for attributeName in dir(obj): if attributeName == "_already_decorated": break attribute = getattr(obj, attributeName) if not attributeName.startswith("_"): if isinstance(attribute, (FunctionType, MethodType)): setattr( obj, attributeName, MolerTest._wrapper(attribute, check_steps_end)) else: obj = MolerTest._wrapper(obj, True) else: raise MolerStatusException("No '__dict__' in decorated object.", [MolerException()]) return obj
def submit(self, connection_observer): """ Submit connection observer to background execution. Returns Future that could be used to await for connection_observer done. """ self.logger.debug("go background: {!r}".format(connection_observer)) # TODO: check dependency - connection_observer.connection feed_started = threading.Event() stop_feeding = threading.Event() feed_done = threading.Event() connection_observer_future = self.executor.submit( self.feed, connection_observer, feed_started, stop_feeding, feed_done) # await feed thread to be really started start_timeout = 0.5 if not feed_started.wait(timeout=start_timeout): err_msg = "Failed to start observer feeding thread within {} sec".format( start_timeout) self.logger.error(err_msg) MolerException(err_msg) connection_observer.set_exception(err_msg) return None c_future = CancellableFuture(connection_observer_future, feed_started, stop_feeding, feed_done) return c_future
def add_event_occurred_callback(self, callback, callback_params): if not self.callback: callback = functools.partial(callback, **callback_params) self.callback = callback else: raise MolerException( "Cannot assign already assigned 'self.callback'.")
def _error(msg, raise_exception=False, dump=None): MolerTest._was_error = True msg = MolerTest._get_string_message(msg, dump) MolerTest._logger.error(msg, extra={'moler_error': True}) if raise_exception: raise MolerException(msg)
def secure_data_received(data, timestamp): try: if connection_observer.done() or self._in_shutdown: return # even not unsubscribed secure_data_received() won't pass data to done observer with observer_lock: connection_observer.data_received(data, timestamp) connection_observer.life_status.last_feed_time = time.time( ) except Exception as exc: # TODO: handling stacktrace # observers should not raise exceptions during data parsing # but if they do so - we fix it with observer_lock: self.logger.warning( "Unhandled exception from '{} 'caught by runner. '{}' : '{}'." .format(connection_observer, exc, repr(exc))) ex_msg = "Unexpected exception from {} caught by runner when processing data >>{}<< at '{}':" \ " >>>{}<<< -> repr: >>>{}<<<".format(connection_observer, data, timestamp, exc, repr(exc)) if connection_observer.is_command(): ex = CommandFailure(command=connection_observer, message=ex_msg) else: ex = MolerException(ex_msg) connection_observer.set_exception(ex) finally: if connection_observer.done( ) and not connection_observer.cancelled(): if connection_observer._exception: self.logger.debug("{} raised: {!r}".format( connection_observer, connection_observer._exception)) else: self.logger.debug("{} returned: {}".format( connection_observer, connection_observer._result))
def check_system_resources_limit(connection_observer, observer_lock, logger): # The number of file descriptors currently opened by this process curr_fds_open, curr_threads_nb = system_resources_usage() if curr_fds_open > max_open_files_limit_soft - 10: err_cause = "Can't run new asyncio loop - ALMOST REACHED MAX OPEN FILES LIMIT" msg = "{} ({}). Now {} FDs open, {} threads active.".format( err_cause, max_open_files_limit_soft, curr_fds_open, curr_threads_nb) logger.warning(msg) limit_exception = MolerException(msg) # make future done and observer done-with-exception with observer_lock: connection_observer.set_exception(limit_exception) # We need to return future informing "it's impossible to create new event loop" # However, it can't be asyncio.Future() since it requires event loop ;-) # We would get something like: # # impossible_future = asyncio.Future() # File "/opt/ute/python3/lib/python3.6/asyncio/events.py", line 676, in get_event_loop # return get_event_loop_policy().get_event_loop() # File "/opt/ute/python3/lib/python3.6/asyncio/events.py", line 584, in get_event_loop # % threading.current_thread().name) # RuntimeError: There is no current event loop in thread 'Thread-5090'. # # So, we use concurrent.futures.Future - it has almost same API (duck typing for runner.wait_for() below) impossible_future = concurrent.futures.Future() impossible_future.set_result(None) return impossible_future return None
def sshshell_thd_conn(host=None, port=None, username=None, login=None, password=None, name=None, reuse_ssh_of_shell=None, **kwargs): mlr_conn = mlr_conn_utf8_with_clean_vt100(moler_conn_class, name=name) if reuse_ssh_of_shell: if not ((host is None) and (port is None) and (username is None) and (login is None) and (password is None)): incorrect_params = "host/port/username/login/password" when = "building sshshell reusing ssh of other sshshell" err_msg = "Don't use {} when {}".format(incorrect_params, when) raise MolerException(err_msg) io_conn = ThreadedSshShell.from_sshshell( moler_connection=mlr_conn, # TODO: add name sshshell=reuse_ssh_of_shell, **kwargs) # logger_name else: if port is None: port = 22 io_conn = ThreadedSshShell( moler_connection=mlr_conn, # TODO: add name host=host, port=port, username=username, login=login, password=password, **kwargs ) # receive_buffer_size, logger_name, other login credentials return io_conn
def _error(cls, msg, raise_exception=False, dump=None): caller_msg = cls._caller_info() cls._was_error = True msg = cls._get_string_message(msg, dump, caller_msg) cls._logger.error(msg, extra={'moler_error': True}) if raise_exception: raise MolerException(msg)
def add_event_occurred_callback(self, callback, callback_params=None): if not self.callback: if callback_params is None: callback_params = dict() partial_callback = functools.partial(callback, **callback_params) self.callback = partial_callback else: raise MolerException("Cannot assign a callback '{}' to event '{}' when another callback '{}' is already " "assigned".format(callback, self, self.callback))
def _stop(self): self._stop_running.set() # force threaded-function to exit if not self._is_done.wait(timeout=self._stop_timeout): err_msg = "Failed to stop thread-running function within {} sec".format( self._stop_timeout) # TODO: should we break current thread or just set this exception inside connection-observer # (is it symetric to failed-start ?) # may cause leaking resources - no call to moler_conn.unsubscribe() raise MolerException(err_msg)
def register_builtin_connections(connection_factory, moler_conn_class): _register_builtin_connections(connection_factory, moler_conn_class) supported_systems = ['Linux', "FreeBSD", "Darwin", "SunOS"] if platform.system() in supported_systems: _register_builtin_unix_connections(connection_factory, moler_conn_class) else: err_msg = "Unsupported system {} detected! Supported systems: {}".format( platform.system(), supported_systems) raise MolerException(err_msg)
def _perform_device_tests(device, tested, states_to_test, max_time): device.set_all_prompts_on_line(True) start_time = time.time() while 0 < states_to_test.qsize(): source_state, target_state = states_to_test.get(0) if (source_state, target_state) in tested: continue try: state_before_test = device.current_state device.goto_state(source_state, keep_state=False, rerun=0) tested.add((state_before_test, source_state)) device.goto_state(target_state, keep_state=False, rerun=0) tested.add((source_state, target_state)) if device.last_wrong_wait4_occurrence is not None: msg = "More than 1 prompt match the same line!: '{}'".format( device.last_wrong_wait4_occurrence) raise MolerException(msg) except Exception as exc: raise MolerException( "Cannot trigger change state: '{}' -> '{}'\n{}".format( source_state, target_state, exc)) if max_time is not None and time.time() - start_time > max_time: return
def register_builtin_connections(connection_factory, moler_conn_class): _register_builtin_connections(connection_factory, moler_conn_class) # unix & windows connections if _running_python_3_5_or_above(): _register_python3_builtin_connections(connection_factory, moler_conn_class) if _running_on_supported_unix(): _register_builtin_unix_connections(connection_factory, moler_conn_class) # unix-only connections if _running_python_3_5_or_above(): _register_builtin_py3_unix_connections(connection_factory, moler_conn_class) elif _running_on_supported_windows(): pass # placeholder for windows-only connections else: err_msg = "Unsupported system {} detected! Supported systems: {}".format(platform.system(), supported_systems) raise MolerException(err_msg)
def _handle_unexpected_error_from_observer(self, exception, data, timestamp): self.logger.warning( "Unhandled exception from '{} 'caught by ObserverThreadWrapperForConnectionObserver" " (Runner normally). '{}' : '{}'.".format(self._observer_self, exception, repr(exception))) ex_msg = "Unexpected exception from {} caught by runner when processing data >>{}<< at '{}':" \ " >>>{}<<< -> repr: >>>{}<<<".format(self._observer_self, data, timestamp, exception, repr(exception)) if self._observer_self.is_command(): ex = CommandFailure(command=self._observer_self, message=ex_msg) else: ex = MolerException(ex_msg) self._observer_self.set_exception(exception=ex)
def start(self): """ We wan't this method to not return before it ensures that thread and it's enclosed loop are really running. """ super(AsyncioLoopThread, self).start() # await loop thread to be really started start_timeout = 0.5 if not self.ev_loop_started.wait(timeout=start_timeout): err_msg = "Failed to start asyncio loop thread within {} sec".format( start_timeout) self.ev_loop_done.set() raise MolerException(err_msg) self.logger.info("started new asyncio-loop-thrd ...")
def read_yaml_configfile(path): """ Read and convert YAML into dictionary :param path: location of yaml file :return: configuration as a python dictionary """ if os.path.isabs(path): with read_configfile(path) as content: return yaml.load(content, Loader=yaml.FullLoader) else: error = "Loading configuration requires absolute path and not '{}'".format( path) raise MolerException(error)
def iterate_over_device_states(device): source_states = _get_all_states_from_device(device=device) target_states = copy_list(source_states) random.shuffle(source_states) random.shuffle(target_states) for source_state in source_states: for target_state in target_states: try: device.goto_state(source_state) device.goto_state(target_state) except Exception as exc: raise MolerException( "Cannot trigger change state: '{}' -> '{}'\n{}".format( source_state, target_state, exc))
def load_config(config=None, from_env_var=None, config_type='yaml'): """ Load Moler's configuration from config file :param config: either dict or config filename directly provided (overwrites 'from_env_var' if both given) :param from_env_var: name of environment variable storing config filename :param config_type: 'dict' ('config' param is dict) or 'yaml' ('config' is filename of file with YAML content) :return: None """ global loaded_config if loaded_config == "NOT_LOADED_YET": loaded_config = config elif configs_are_same(loaded_config, config): return else: why = "Reloading configuration during Moler execution is not supported!" error = "Trial to load '{}' config while '{}' config already loaded.\n{}".format( config, loaded_config, why) raise MolerException(error) assert (config_type == 'dict') or (config_type == 'yaml' ) # no other format supported yet if not config: if not from_env_var: raise AssertionError( "Provide either 'config' or 'from_env_var' parameter (none given)" ) if from_env_var not in os.environ: raise KeyError( "Environment variable '{}' is not set".format(from_env_var)) path = os.environ[from_env_var] config = read_yaml_configfile(path) elif config_type == 'yaml': assert isinstance(config, six.string_types) path = config config = read_yaml_configfile(path) elif config_type == 'dict': assert isinstance(config, dict) # TODO: check schema load_logger_from_config(config) load_connection_from_config(config) load_device_from_config(config)
def iterate_over_device_states(device): states = device.states states.remove("NOT_CONNECTED") source_states = copy_list(states) target_states = copy_list(states) random.shuffle(source_states) random.shuffle(target_states) for source_state in source_states: for target_state in target_states: try: device.goto_state(source_state) device.goto_state(target_state) except Exception as exc: raise MolerException( "Cannot trigger change state: '{}' -> '{}'\n{}".format( source_state, target_state, exc))
def _inject_deferred(self): """ Inject response on connection. """ cmd_data_string = self.input_bytes.decode("utf-8") if cmd_data_string: if '\n' in cmd_data_string: cmd_data_string = cmd_data_string[: -1] # remove \n from command_string on connection else: cmd_data_string = self.input_bytes try: binary_cmd_ret = self.data[ self.device.state][cmd_data_string].encode('utf-8') self.inject([self.input_bytes + binary_cmd_ret]) except KeyError as exc: raise MolerException("No output for cmd: '{}' in state '{}'!\n" "Please update your device_output dict!\n" "{}".format(cmd_data_string, self.device.state, exc))
def add_event_occurred_callback(self, callback): if not self.callback: self.callback = callback else: raise MolerException( "Cannot assign already assigned 'self.callback'.")
def submit(self, connection_observer): """ Submit connection observer to background execution. Returns Future that could be used to await for connection_observer done. """ self.logger.debug("go background: {!r}".format(connection_observer)) # TODO: check dependency - connection_observer.connection # Our submit consists of two steps: # 1. scheduling start_feeder() in asyncio dedicated thread via run_async_coroutine() # 2. scheduling "background feed" via asyncio.ensure_future() # - internally it calls _start_feeding() which sets feed_started event # # Moreover, we await here (before returning from submit()) for "background feed" to be really started. # That is realized by 0.5sec timeout and awaiting for feed_started asyncio.Event. # It ensures that feed() coroutine is already running inside asyncio loop. # Such functionality is possible thanks to using thread. # # By using the code of _start_feeding() we ensure that after submit() connection data could reach # data_received() of observer. Another words, no data will be lost-for-observer after runner.submit(). # # Consequence of waiting for "background feed" to be running is that submit is blocking call till feed() start. # Generic scheme of any async-code is: methods should be as quick as possible. Because async frameworks # operate inside single thread blocking call means "nothing else could happen". Nothing here may # mean for example "handling data of other connections", "handling other observers". # So, if we put observer with AsyncioInThreadRunner inside some event loop then that loop will block # for duration of submit() which is measured as around 0.01sec (depends on machine). # # That 0.01sec price we want to pay since we gain another benefit for that price. # If anything goes wrong and start_feeder() can't be completed in 0.5sec we will be at least notified # by MolerException. async def start_feeder(): feed_started = asyncio.Event() self.logger.debug( "scheduling feed({})".format(connection_observer)) conn_observer_future = asyncio.ensure_future( self.feed(connection_observer, feed_started, subscribed_data_receiver=None)) self.logger.debug( "scheduled feed() - future: {}".format(conn_observer_future)) await feed_started.wait() self.logger.debug("feed() started - future: {}:{}".format( instance_id(conn_observer_future), conn_observer_future)) return conn_observer_future thread4async = get_asyncio_loop_thread() start_timeout = 0.5 try: connection_observer_future = thread4async.run_async_coroutine( start_feeder(), timeout=start_timeout) except MolerTimeout: err_msg = "Failed to start observer feeder within {} sec".format( start_timeout) self.logger.error(err_msg) exc = MolerException(err_msg) connection_observer.set_exception(exception=exc) return None self.logger.debug("runner submit() returning - future: {}:{}".format( instance_id(connection_observer_future), connection_observer_future)) return connection_observer_future
def error(msg, raise_exception=False): MolerTest._was_error = True MolerTest._logger.error(msg, extra={'moler_error': True}) if raise_exception: raise MolerException(msg)