def _data_handler(self):
        logger.debug(
            f"{self.__class__.__name__} Started with event {id(self._event)}")
        while True:
            if self.queue.empty():
                if self._event.isSet():
                    break
                else:
                    continue

            item = self.queue.get()
            try:
                logger.debug(
                    f"Deque item: '{item}' (Current queue size {self.queue.qsize()})"
                )
                insert_sql_str, rows = item.sql_data
                result = self.execute(
                    insert_sql_str,
                    rows) if rows else self.execute(insert_sql_str)
                item.result = result
                logger.debug("Insert item: {}\n\t{}\n\t{}".format(
                    type(item).__name__, insert_sql_str,
                    '\n\t'.join([str(r) for r in rows])))
            except Exception as e:
                f, l = get_error_info()
                logger.error(
                    f"Unexpected error occurred on {type(item).__name__}: {e}; File: {f}:{l}"
                )
            else:
                logger.debug(
                    f"Item {type(item).__name__} successfully handled")
        logger.debug(f"Background task stopped invoked")
Ejemplo n.º 2
0
def unregister_logger_thread(thread_name):
    _temp = [
        _item for _item in list(librarylogger.LOGGING_THREADS)
        if _item != thread_name
    ]
    logger.debug(f'De-register logger thread: {thread_name}')
    librarylogger.LOGGING_THREADS = tuple(_temp)
 def get(self) -> Any:
     with self._lock:
         try:
             logger.debug(f"Item '{id(self[0])}' dequeued")
             return super().pop(0)
         except IndexError:
             return Empty()
    def on_connection(self):
        try:
            with self._lock:
                self.login()

                yield self.content_object
        except RunnerError as e:
            self._session_errors.append(e)
            logger.warn(
                "Non critical error {name}; Reason: {error} (Attempt {real} from {allowed})"
                .format(
                    name=self.host_alias,
                    error=e,
                    real=len(self._session_errors),
                    allowed=self._fault_tolerance,
                ))
        except Exception as e:
            logger.error(
                "Critical Error {name}; Reason: {error} (Attempt {real} from {allowed})"
                .format(
                    name=self.host_alias,
                    error=e,
                    real=len(self._session_errors),
                    allowed=self._fault_tolerance,
                ))
            GlobalErrors().append(e)
        else:
            if len(self._session_errors):
                logger.debug(
                    f"Host '{self}': Runtime errors occurred during tolerance period cleared"
                )
            self._session_errors.clear()
        finally:
            self.exit()
 def add_data_unit(self, item: DataUnit):
     if isinstance(item.table, PlugInTable):
         last_tl_id = cache_timestamp(item.timestamp)
         item(TL_ID=last_tl_id)
         logger.debug(f"Item updated: {item.sql_data}")
     self.queue.put(item)
     logger.debug(
         f"Item enqueued: '{item}' (Current queue size {self.queue.qsize()})"
     )
 def stop(self, timeout=5):
     if self._event:
         self._event.set()
     while len(self._threads) > 0:
         th = self._threads.pop(0)
         try:
             th.join(timeout)
             logger.debug(f"Thread '{th.name}' gracefully stopped")
         except Exception as e:
             logger.error(
                 f"Thread '{th.name}' gracefully stop failed; Error raised: {e}"
             )
Ejemplo n.º 7
0
    def _get_os_name(self, ssh_client: SSHLibrary):
        out, err, rc = ssh_client.execute_command(
            "cat /etc/os-release|grep -E '^ID_LIKE='|awk -F'=' '{print$2}'",
            return_rc=True,
            return_stderr=True)
        assert rc == 0, "Cannot occur OS name"
        out = out.replace(r'"', '')

        for _os in self.OS_DATE_FORMAT.keys():
            if _os in out:
                out = _os
                break

        logger.debug(f"OS resolved: {out}")
        return out
 def stop(self):
     try:
         assert self.event
         self.event.set()
         logger.debug(f"Terminating {self.alias}")
         self._configuration.update({'event': None})
         active_plugins = list(self._active_plugins.keys())
         while len(active_plugins) > 0:
             plugin = active_plugins.pop(0)
             self.plugin_terminate(plugin)
         # self._control_th.join()
     except AssertionError:
         logger.warn(f"Session '{self.alias}' not started yet")
     else:
         logger.info(f"Session '{self.alias}' stopped")
Ejemplo n.º 9
0
    def upgrade_plugin(self, *args, **kwargs):
        """
        Upgrade aTop plugin: add processes for monitor during execution

        Arguments:
        - args:     process names

        Future features:
        - kwargs:   process names following boolean flag (Default: False; If True error will raise if process diappearing)
        """
        kwargs.update(**{arg: False for arg in args})
        for process, control in kwargs.items():
            ProcessMonitorRegistry().activate(self.id, process, control)
            logger.debug(f"Process '{process}' activated")
        logger.info(
            f"Start monitor following processes: {', '.join([f'{k}={v}' for k, v in kwargs.items()])}"
        )
Ejemplo n.º 10
0
    def login(self):
        host = self.parameters.host
        port = self.parameters.port
        username = self.parameters.username
        password = self.parameters.password
        certificate = self.parameters.certificate

        if len(self._session_errors) == 0:
            logger.info(f"Host '{self.host_alias}': Connecting")
        else:
            logger.warn(
                f"Host '{self.host_alias}': Restoring at {len(self._session_errors)} time"
            )

        self._ssh.open_connection(host, repr(self), port)

        start_ts = datetime.now()
        while True:
            try:
                if certificate:
                    logger.debug(
                        f"Host '{self.host_alias}': Login with user/certificate"
                    )
                    self._ssh.login_with_public_key(username, certificate, '')
                else:
                    logger.debug(
                        f"Host '{self.host_alias}': Login with user/password")
                    self._ssh.login(username, password)
            except paramiko.AuthenticationException:
                raise
            except Exception as e:
                logger.warn(
                    f"Host '{self.host_alias}': Connection failed; Reason: {e}"
                )
            else:
                self._is_logged_in = True
                logger.info(
                    f"Host '{self.host_alias}': Connection established")
                break
            finally:
                duration = (datetime.now() - start_ts).total_seconds()
                if duration >= self.parameters.timeout:
                    raise TimeoutError(
                        f"Cannot connect to '{self.host_alias}' during {self.parameters.timeout}s"
                    )
    def upload(self, output, max_workers: int = DEFAULT_MAX_WORKERS):
        logger.debug(
            f"Cache invoked {'concurrently' if max_workers > 1 else 'as sequence'}"
        )
        lines_cache = list(
            self.concurrent_lines_cache(output, max_workers)
            if max_workers > 1 else self.sequence_line_cache(output))

        if any([_ref[0] is None for _ref in lines_cache]):
            output_data = DataHandlerService().execute(TableSchemaService(
            ).tables.LinesCacheMap.queries.last_output_id.sql)
            self.output_ref = output_data[0][0] + 1 if output_data != [(None, )
                                                                       ] else 0
            DataHandlerService().execute(
                insert_sql('LinesCacheMap',
                           ['OUTPUT_REF', 'ORDER_ID', 'LINE_REF']),
                [[self.output_ref] + lr[1:] for lr in lines_cache])
        return self.output_ref
 def _run_command(self, context_object, flow: Enum):
     total_output = ''
     try:
         flow_values = getattr(self, flow.value)
         if len(flow_values) == 0:
             raise EmptyCommandSet()
         logger.debug(f"Iteration {flow.name} started")
         for i, cmd in enumerate(flow_values):
             run_status = cmd(context_object, **self.parameters)
             total_output += ('\n' if len(total_output) > 0 else ''
                              ) + "{} [Result: {}]".format(cmd, run_status)
             sleep(0.05)
     except EmptyCommandSet:
         logger.warn(f"Iteration {flow.name} ignored")
     except Exception as e:
         raise RunnerError(f"{self}", f"Command set '{flow.name}' failed",
                           e)
     else:
         logger.info(f"Iteration {flow.name} completed\n{total_output}")
 def _persistent_worker(self):
     logger.info(f"\nPlugIn '{self}' started")
     while self.is_continue_expected:
         with self.on_connection() as context:
             self._run_command(context, self.flow_type.Setup)
             logger.info(f"Host {self}: Setup completed", also_console=True)
             while self.is_continue_expected:
                 try:
                     start_ts = datetime.now()
                     _timedelta = timedelta(seconds=self.parameters.interval) \
                         if self.parameters.interval is not None else timedelta(seconds=0)
                     next_ts = start_ts + _timedelta
                     self._run_command(context, self.flow_type.Command)
                     if self.parameters.interval is not None:
                         evaluate_duration(start_ts, next_ts,
                                           self.host_alias)
                     while datetime.now() < next_ts:
                         if not self.is_continue_expected:
                             break
                         sleep(0.5)
                 except RunnerError as e:
                     self._session_errors.append(e)
                     logger.warn(
                         "Error execute on: {name}; Reason: {error} (Attempt {real} from {allowed})"
                         .format(
                             name=str(self),
                             error=e,
                             real=len(self._session_errors),
                             allowed=self._fault_tolerance,
                         ))
                 else:
                     if len(self._session_errors):
                         logger.debug(
                             f"Host '{self}': Runtime errors occurred during tolerance period cleared"
                         )
                         self._session_errors.clear()
             sleep(2)
             self._run_command(context, self.flow_type.Teardown)
             logger.info(f"Host {self}: Teardown completed",
                         also_console=True)
     sleep(2)
     logger.info(f"PlugIn '{self}' stopped")
Ejemplo n.º 14
0
 def __call__(self, ssh_client: SSHLibrary, **runtime_options) -> Any:
     # ssh_client.switch_connection(str(ssh_client))
     if self._command is not None:
         command = self.command_template.format(**runtime_options)
         logger.debug(
             f"Executing: {self._method.__name__}({command}, "
             f"{', '.join([f'{k}={v}' for k, v in self._ssh_options.items()])})"
         )
         output = self._method(ssh_client, command, **self._ssh_options)
     else:
         logger.debug(
             f"Executing: {self._method.__name__}"
             f"({', '.join([f'{k}={v}' for k, v in self._ssh_options.items()])})"
         )
         output = self._method(ssh_client, **self._ssh_options)
     if self.parser:
         return self.parser(dict(self._result_template(output)))
     if self.variable_setter:
         self.variable_setter(output)
     return output
Ejemplo n.º 15
0
    def generate_module_statistics(self, period=None, plugin=None, alias=None, **options):
        """
        Generate Chart for present monitor data in visual style

        Arguments:
        - period:
        - plugin:
        - alias:
        - options:
        :Return - html link to chart file

        Note: In case report portal used chart files will be uploaded into lunch report (See in `Report Portal integration`)
        """
        if not os.path.exists(self._image_path):
            os.makedirs(self._image_path, exist_ok=True)

        module: HostModule = HostRegistryCache().get_connection(alias)
        chart_plugins = module.get_plugin(plugin, **options)
        chart_title = self._create_chart_title(period, plugin, f"{module}", **options)
        marks = _get_period_marks(period, module.host_id) if period else {}

        body_data = []
        for plugin in chart_plugins:
            for chart in plugin.affiliated_charts():
                try:
                    sql_query = chart.compose_sql_query(host_name=plugin.host_alias, **marks)
                    logger.debug("{}{}\n{}".format(plugin.type, f'_{period}' if period is not None else '', sql_query))
                    sql_data = db.DataHandlerService().execute(sql_query)
                    for picture_name, file_path in generate_charts(chart, sql_data, self._image_path, prefix=chart_title):
                        relative_image_path = os.path.relpath(file_path, os.path.normpath(
                            os.path.join(self._output_dir, self._log_path)))
                        body_data.append((picture_name, relative_image_path))
                        upload_file_to_portal(picture_name, file_path)
                except Exception as e:
                    logger.error(f"Error: {e}")

        html_link_path = create_html(self._output_dir, self._log_path, chart_title, *body_data)
        html_link_text = f"Chart for <a href=\"{html_link_path}\">'{chart_title}'</a>"
        logger.warn(html_link_text, html=True)
        return html_link_text
Ejemplo n.º 16
0
 def generate_chart_data(self, query_results: Iterable[Iterable]) \
         -> List[Tuple[str, Iterable, Iterable, Iterable[Iterable]]]:
     result = []
     for type_ in set([
             i[0] for i in query_results if any(
                 [i[0].startswith(section) for section in self._sections])
     ]):
         try:
             data = [i[1:] for i in query_results if i[0] == type_]
             x_axes = self.x_axes(data, 1)
             y_axes = self.y_axes(data)
             data = [i[2:] for i in data]
             data = [u[0:len(y_axes)] for u in data]
             chart_data = f"{type_}", x_axes, y_axes, data
             logger.debug(
                 "Create chart data: {}\n{}\n{}\n{} entries".format(
                     type_, x_axes, y_axes, len(data)))
             result.append(chart_data)
         except Exception as e:
             f, l = get_error_info()
             logger.error(f"Chart generation error: {e}; File: {f}:{l}")
     return result
Ejemplo n.º 17
0
def register_logger_thread(thread_name):
    _temp = list(librarylogger.LOGGING_THREADS)
    _temp.append(thread_name)
    librarylogger.LOGGING_THREADS = tuple(_temp)
    logger.debug(f'Register logger thread: {thread_name}')
Ejemplo n.º 18
0
 def register(self, plugin_id, name):
     if name in self.get(plugin_id).keys():
         logger.warn(f"Process '{name}' already registered in {plugin_id}")
         return
     self[plugin_id].update({name: {}})
     logger.debug(f"Process '{name}' registered in {plugin_id}")
Ejemplo n.º 19
0
def load_classes_from_module_by_name(path, module_name, base_class=None):
    importer = Importer("RemoteMonitorLibrary")
    abs_path = path_join(path, module_name)
    logger.debug(f"[ 'RemoteMonitorLibrary' ] Load Module: {abs_path}")
    reader = importer.import_class_or_module(abs_path)
    return get_class_from_module(reader, base_class)