def on_connection(self): try: with self._lock: self.login() yield self.content_object except RunnerError as e: self._session_errors.append(e) logger.warn( "Non critical error {name}; Reason: {error} (Attempt {real} from {allowed})" .format( name=self.host_alias, error=e, real=len(self._session_errors), allowed=self._fault_tolerance, )) except Exception as e: logger.error( "Critical Error {name}; Reason: {error} (Attempt {real} from {allowed})" .format( name=self.host_alias, error=e, real=len(self._session_errors), allowed=self._fault_tolerance, )) GlobalErrors().append(e) else: if len(self._session_errors): logger.debug( f"Host '{self}': Runtime errors occurred during tolerance period cleared" ) self._session_errors.clear() finally: self.exit()
def evaluate_duration(start_ts, expected_end_ts, alias): end_ts = datetime.now() if end_ts > expected_end_ts: logger.warn( "{}: Execution ({}) took longer then interval ({}); Recommended interval increasing up to {}s" .format(alias, (end_ts - start_ts).total_seconds(), (expected_end_ts - start_ts).total_seconds(), (end_ts - start_ts).total_seconds()))
def resume_plugins(self): for name, plugin in self._active_plugins.items(): try: plugin.start() except Exception as e: logger.warn(f"Plugin '{name}' resume error: {e}") else: logger.info(f"Plugin '{name}' resumed", also_console=True)
def __call__(self): try: # if len(self._args) > 0: # BuiltIn().run_keyword(self._kw, *self._args) # else: BuiltIn().run_keyword(self._kw) except HandlerExecutionFailed: logger.warn(f"Connections still not ready")
def __init__(self, location=DEFAULT_SYSTEM_TRACE_LOG, file_name=DEFAULT_SYSTEM_LOG_FILE, custom_plugins='', **kwargs): self.__doc__ = """ Remote Monitor CPU (wirth aTop), & Process (with Time) or any other data on linux hosts with custom plugins Allow periodical execution of commands set on one or more linux hosts with collecting data within SQL db following with some BI activity For current phase only data presentation in charts available. == Keywords & Usage == {} {} == BuiltIn plugins == System support following plugins: {} {} {} {} """.format(ConnectionKeywords.__doc__, BIKeywords.__doc__, plugins.atop_plugin.__doc__, plugins.sshlibrary_plugin.__doc__, plugins.time_plugin.__doc__, robotframework_portal_addon.__doc__) file_name = f"{file_name}.log" if not file_name.endswith( '.log') else file_name ConnectionKeywords.__init__(self, location, file_name, **kwargs) BIKeywords.__init__(self, location) try: current_dir = os.path.split( BuiltIn().get_variable_value('${SUITE SOURCE}'))[0] except RobotNotRunningError: current_dir = '' plugin_modules = load_modules( plugins, *[pl for pl in re.split(r'\s*,\s*', custom_plugins) if pl != ''], base_path=current_dir, base_class=SSHLibraryPlugInWrapper) db.PlugInService().update(**plugin_modules) print_plugins_table(db.PlugInService()) doc_path = self._get_doc_link() logger.warn( f'{self.__class__.__name__} <a href="{doc_path}">LibDoc</a>', html=True)
def pause_plugins(self): for name, plugin in self._active_plugins.items(): try: assert plugin is not None plugin.stop() except AssertionError: logger.info(f"Plugin '{name}' already stopped") except Exception as e: logger.warn(f"Plugin '{name}:{plugin}' pause error: {e}") else: logger.info(f"Plugin '{name}' paused", also_console=True)
def _close_ssh_library_connection_from_thread(self): try: with self._lock: self._ssh.close_connection() except RuntimeError: pass except Exception as e: if 'Logging background messages is only allowed from the main thread' in str( e): logger.warn(f"Ignore SSHLibrary error: '{e}'") return True raise
def __call__(self, outputs, datetime=None): time_output = outputs.get('stderr', None) rc = outputs.get('rc') if 'No such file or directory' in time_output and rc == 1: logger.warn(f"Time command still not completed first iteration") return True assert rc == 0, f"Error RC occur - {outputs}" time_stamp, time_output = time_output.split(',', 1) _, datetime = time_stamp.split(':', 1) outputs.update(**{'stderr': time_output}) return super().__call__(outputs, datetime=datetime)
def stop(self): try: assert self.event self.event.set() logger.debug(f"Terminating {self.alias}") self._configuration.update({'event': None}) active_plugins = list(self._active_plugins.keys()) while len(active_plugins) > 0: plugin = active_plugins.pop(0) self.plugin_terminate(plugin) # self._control_th.join() except AssertionError: logger.warn(f"Session '{self.alias}' not started yet") else: logger.info(f"Session '{self.alias}' stopped")
def _init(self): output_location = BuiltIn().get_variable_value('${OUTPUT_DIR}') db.DataHandlerService().init(os.path.join(output_location, self.location), self.file_name, self.cumulative) level = BuiltIn().get_variable_value('${LOG LEVEL}') logger.setLevel(level) rel_log_file_path = os.path.join(self.location, self.file_name) abs_log_file_path = os.path.join(output_location, self.location, self.file_name) logger.set_file_handler(abs_log_file_path) if is_truthy(self._log_to_db): db.TableSchemaService().register_table(db.tables.log()) logger.addHandler(db.services.SQLiteHandler()) db.DataHandlerService().start() logger.warn(f'<a href="{rel_log_file_path}">{self.file_name}</a>', html=True)
def generate_chart_data(self, query_results: Iterable[Iterable], extension=None) -> \ Iterable[Tuple[str, Iterable, Iterable, Iterable[Iterable]]]: result = [] for plugin, processes in ProcessMonitorRegistry().items(): for process in [p for p in processes.keys()]: data = [ entry[0:6] for entry in query_results if process in entry[7] ] if len(data) == 0: logger.warn( f"Process '{process}' doesn't have monitor data") continue result.append( (process, self.x_axes(data), self.y_axes(), data)) return result
def login(self): host = self.parameters.host port = self.parameters.port username = self.parameters.username password = self.parameters.password certificate = self.parameters.certificate if len(self._session_errors) == 0: logger.info(f"Host '{self.host_alias}': Connecting") else: logger.warn( f"Host '{self.host_alias}': Restoring at {len(self._session_errors)} time" ) self._ssh.open_connection(host, repr(self), port) start_ts = datetime.now() while True: try: if certificate: logger.debug( f"Host '{self.host_alias}': Login with user/certificate" ) self._ssh.login_with_public_key(username, certificate, '') else: logger.debug( f"Host '{self.host_alias}': Login with user/password") self._ssh.login(username, password) except paramiko.AuthenticationException: raise except Exception as e: logger.warn( f"Host '{self.host_alias}': Connection failed; Reason: {e}" ) else: self._is_logged_in = True logger.info( f"Host '{self.host_alias}': Connection established") break finally: duration = (datetime.now() - start_ts).total_seconds() if duration >= self.parameters.timeout: raise TimeoutError( f"Cannot connect to '{self.host_alias}' during {self.parameters.timeout}s" )
def _run_command(self, context_object, flow: Enum): total_output = '' try: flow_values = getattr(self, flow.value) if len(flow_values) == 0: raise EmptyCommandSet() logger.debug(f"Iteration {flow.name} started") for i, cmd in enumerate(flow_values): run_status = cmd(context_object, **self.parameters) total_output += ('\n' if len(total_output) > 0 else '' ) + "{} [Result: {}]".format(cmd, run_status) sleep(0.05) except EmptyCommandSet: logger.warn(f"Iteration {flow.name} ignored") except Exception as e: raise RunnerError(f"{self}", f"Command set '{flow.name}' failed", e) else: logger.info(f"Iteration {flow.name} completed\n{total_output}")
def start(self, event=Event()): if self._db.is_new: for name, table in TableSchemaService().tables.items(): try: assert not self._db.table_exist( table.name), f"Table '{name}' already exists" self._db.execute( sql_engine.create_table_sql(table.name, table.fields, table.foreign_keys)) except AssertionError as e: logger.warn(f"{e}") except Exception as e: logger.error(f"Cannot create table '{name}' -> Error: {e}") raise self._event = event dh = Thread(name='DataHandler', target=self._data_handler, daemon=True) dh.start() self._threads.append(dh)
def _persistent_worker(self): logger.info(f"\nPlugIn '{self}' started") while self.is_continue_expected: with self.on_connection() as context: self._run_command(context, self.flow_type.Setup) logger.info(f"Host {self}: Setup completed", also_console=True) while self.is_continue_expected: try: start_ts = datetime.now() _timedelta = timedelta(seconds=self.parameters.interval) \ if self.parameters.interval is not None else timedelta(seconds=0) next_ts = start_ts + _timedelta self._run_command(context, self.flow_type.Command) if self.parameters.interval is not None: evaluate_duration(start_ts, next_ts, self.host_alias) while datetime.now() < next_ts: if not self.is_continue_expected: break sleep(0.5) except RunnerError as e: self._session_errors.append(e) logger.warn( "Error execute on: {name}; Reason: {error} (Attempt {real} from {allowed})" .format( name=str(self), error=e, real=len(self._session_errors), allowed=self._fault_tolerance, )) else: if len(self._session_errors): logger.debug( f"Host '{self}': Runtime errors occurred during tolerance period cleared" ) self._session_errors.clear() sleep(2) self._run_command(context, self.flow_type.Teardown) logger.info(f"Host {self}: Teardown completed", also_console=True) sleep(2) logger.info(f"PlugIn '{self}' stopped")
def plugin_terminate(self, plugin_name, **options): try: plugins_to_stop = self.get_plugin(plugin_name, **options) assert len(plugins_to_stop ) > 0, f"Plugins '{plugin_name}' not matched in list" for plugin in plugins_to_stop: try: plugin.stop(timeout=options.get('timeout', None)) assert plugin.iteration_counter > 0 except AssertionError: logger.warn( f"Plugin '{plugin}' didn't got monitor data during execution" ) except (AssertionError, IndexError) as e: logger.info( f"Plugin '{plugin_name}' raised error: {type(e).__name__}: {e}" ) else: logger.info(f"PlugIn '{plugin_name}' gracefully stopped", also_console=True)
def load_modules(*modules, **options): base_class = options.get('base_class', None) base_path = options.get('base_path', os.getcwd()) result_modules = {} for module_ in [m for m in modules if m is not None]: if isinstance(module_, str): if os.path.isfile(os.path.normpath(os.path.join(base_path, module_))): result_modules.update(load_classes_from_module_by_name(base_path, module_, base_class)) else: result_modules.update( load_classes_from_module_from_dir(os.path.normpath(os.path.join(base_path, module_)), base_class)) elif ismodule(module_): for name, class_ in get_class_from_module(module_, base_class).items(): if name in result_modules.keys(): logger.warn(f"Module '{result_modules[name]}' overloaded with '{class_}'") result_modules.update({name: class_}) elif isclass(module_): result_modules.update({module_.__name__: module_}) return result_modules
def generate_module_statistics(self, period=None, plugin=None, alias=None, **options): """ Generate Chart for present monitor data in visual style Arguments: - period: - plugin: - alias: - options: :Return - html link to chart file Note: In case report portal used chart files will be uploaded into lunch report (See in `Report Portal integration`) """ if not os.path.exists(self._image_path): os.makedirs(self._image_path, exist_ok=True) module: HostModule = HostRegistryCache().get_connection(alias) chart_plugins = module.get_plugin(plugin, **options) chart_title = self._create_chart_title(period, plugin, f"{module}", **options) marks = _get_period_marks(period, module.host_id) if period else {} body_data = [] for plugin in chart_plugins: for chart in plugin.affiliated_charts(): try: sql_query = chart.compose_sql_query(host_name=plugin.host_alias, **marks) logger.debug("{}{}\n{}".format(plugin.type, f'_{period}' if period is not None else '', sql_query)) sql_data = db.DataHandlerService().execute(sql_query) for picture_name, file_path in generate_charts(chart, sql_data, self._image_path, prefix=chart_title): relative_image_path = os.path.relpath(file_path, os.path.normpath( os.path.join(self._output_dir, self._log_path))) body_data.append((picture_name, relative_image_path)) upload_file_to_portal(picture_name, file_path) except Exception as e: logger.error(f"Error: {e}") html_link_path = create_html(self._output_dir, self._log_path, chart_title, *body_data) html_link_text = f"Chart for <a href=\"{html_link_path}\">'{chart_title}'</a>" logger.warn(html_link_text, html=True) return html_link_text
def queue(self): if self._event.isSet(): logger.warn(f"Stop invoked; new data cannot be enqueued") return self._queue.__class__() return self._queue
def register(self, plugin_id, name): if name in self.get(plugin_id).keys(): logger.warn(f"Process '{name}' already registered in {plugin_id}") return self[plugin_id].update({name: {}}) logger.debug(f"Process '{name}' registered in {plugin_id}")