Ejemplo n.º 1
0
async def get_porter_outputs(msg_body: dict,
                             message_logger_adapter: logging.LoggerAdapter,
                             config: dict):
    """
    Get outputs JSON from a Porter command
    """
    porter_command = await build_porter_command_for_outputs(msg_body)
    returncode, stdout, err = await run_porter(porter_command,
                                               message_logger_adapter, config)

    if returncode != 0:
        error_message = "Error context message = " + " ".join(err.split('\n'))
        message_logger_adapter.info(
            f"{get_installation_id(msg_body)}: Failed to get outputs with error = {error_message}"
        )
        return False, ""
    else:
        outputs_json = {}
        try:
            outputs_json = json.loads(stdout)
            message_logger_adapter.info(f"Got outputs as json: {outputs_json}")
        except ValueError:
            message_logger_adapter.error(f"Got outputs invalid json: {stdout}")

        return True, outputs_json
Ejemplo n.º 2
0
def authenticate_view(request: HttpRequest) -> HttpResponse:
    state_str = uuid4().hex
    client_ip, _ = get_client_ip(request)

    if request.session.get("OAC_STATE_STR") != "test":
        request.session["OAC_STATE_STR"] = state_str
        request.session["OAC_STATE_TIMESTAMP"] = timezone.now().timestamp()
        request.session["OAC_CLIENT_IP"] = client_ip or "unknown"

    logger = LoggerAdapter(
        getLogger(__package__),
        get_extra(
            "views.authenticate_view",
            request.session["OAC_CLIENT_IP"],
            request.session["OAC_STATE_STR"],
        ),
    )
    logger.info("authentication request")

    try:
        ret = redirect(f"{oac_settings.AUTHORIZE_URI}"
                       f"?scope=openid"
                       f"&client_id={oac_settings.CLIENT_ID}"
                       f"&redirect_uri={oac_settings.REDIRECT_URI}"
                       f"&state={state_str}"
                       "&response_type=code")
    except ConfigurationError as err:
        logger.error(str(err))
        ret = render(
            request,
            TEMPLATES_DIR / "500.html",
            {"message": "App config is incomplete, cannot continue."},
            status=500,
        )
    return ret
Ejemplo n.º 3
0
    def build_formatter_callable(self):
        """
        Description:
            Encapsulate some logic needed to create the callable used as the Attribute
            Formatter callable.
        """
        log = LoggerAdapter(
            logger, {'name_ext': 'AttributeModel.build_formatter_callable'})
        log.debug("entering: {}".format(self))

        if self.uses_named_render_method():
            try:
                log.debug(
                    'Getting reference to formatter callable from formatter name'
                )
                #- TODO: instantiate / get references to named arguments
                render_method = functools.partial(eval(self.render_method_name),\
                    source=self.source_object)
            except NameError:
                log.error(
                    "render_method_name: '{}' seems invalid. Skipping render_method"
                )
                render_method = None
        else:
            log.debug(
                'Getting reference to formatter callable from explicit callable'
            )
            render_method = self.render_method

        log.debug("exiting")
        return render_method
Ejemplo n.º 4
0
def set_up_config(logger_adapter: logging.LoggerAdapter) -> Optional[dict]:
    try:
        config = get_config(logger_adapter)
        return config
    except KeyError as e:
        logger_adapter.error(
            f"Environment variable {e} is not set correctly...Exiting")
        sys.exit(1)
Ejemplo n.º 5
0
    def _lookup(self, namespace_id, follow_symrefs=True):
        """
        Description:
            Get an object in the namespace by its namespace id
        Input:
            namespace_id: id of the object to retrieve
            follow_symrefs: whether or not to try to perform a deep lookup
                deep lookups can contain other NSIDs which will be looked up in turn until
                a final value is found and returned. For this feature to be enabled, this
                node's ._nsroot attribute must also be a valid NamespaceNode-like object
                that supports a lookup() method that will be passed an NSID.

        Output:
            item if found, else NamespaceLookupError raised
        """
        log = LoggerAdapter(logger, {'name_ext': 'NamespaceNode._lookup'})
        log.debug('[{}] lookup([{}])'.format(self._nsid, namespace_id))
        obj = None

        if follow_symrefs and is_nsid_ref(namespace_id):
            value = self._lookup_symbolic_ref(namespace_id)
            return value

        else:
            #- split the NSID by path seperator (normally a dot)
            path = self._name_to_path(namespace_id)
            #- fully qualified NSID or not?
            if self._nsroot and path[0] == self._nsroot._nsid:
                #- lookup fully qualified NSIDs using the root node
                next_nsid = '.'.join(path[1:])
                return self._nsroot.lookup(next_nsid,
                                           follow_symrefs=follow_symrefs)
            else:
                #- lookup relative NSIDs iteratively from current
                obj = self
                for name in path:
                    try:
                        obj = getattr(obj, name)

                        if follow_symrefs and is_nsid_ref(obj):
                            return self._lookup_symbolic_ref(obj)

                    except AttributeError as err:
                        log.error(
                            'thewired Failed to find value for [{}] in [{}]'.
                            format(namespace_id, self._nsid))
                        raise NamespaceLookupError("{}.{}".format(
                            self._nsid, namespace_id)) from err

            return obj
Ejemplo n.º 6
0
def parse_robots_txt(
        records: Iterable[Iterable[Tuple[int, str, str]]],
        logger: LoggerAdapter) -> Mapping[str, Iterable[Tuple[bool, str]]]:
    """Parses C{robots.txt} records.

    @param records:
        Tokenized records as produced by L{scan_robots_txt}.
    @param logger:
        Problems found while parsing are logged here.
    @return:
        rules_map: C{{ user_agent: (allowed, url_prefix)* }}
        A mapping from user agent name (case-folded) to a sequence of
        allow/disallow rules, where C{allowed} is C{True} iff the user agent
        is allowed to visit URLs starting with C{url_prefix}.
    """
    result: Dict[str, Iterable[Tuple[bool, str]]] = {}
    unknowns: Set[str] = set()
    for record in records:
        seen_user_agent = False
        rules: List[Tuple[bool, str]] = []
        for lineno, field, value in record:
            if field == 'user-agent':
                if rules:
                    logger.error(
                        'Line %d specifies user agent after rules; '
                        'assuming new record', lineno)
                    rules = []
                seen_user_agent = True
                name = value.casefold()
                if name in result:
                    logger.error(
                        'Line %d specifies user agent "%s", which was '
                        'already addressed in an earlier record; '
                        'ignoring new record', lineno, value)
                else:
                    result[name] = rules
            elif field in ('allow', 'disallow'):
                if seen_user_agent:
                    try:
                        path = unescape_path(value)
                    except ValueError as ex:
                        logger.error('Bad escape in %s URL on line %d: %s',
                                     field, lineno, ex)
                    else:
                        # Ignore allow/disallow directives without a path.
                        if path:
                            rules.append((field == 'allow', path))
                else:
                    logger.error(
                        'Line %d specifies %s rule without a preceding '
                        'user agent line; ignoring line', lineno, field)
            else:
                # Unknown fields are allowed for extensions.
                if field not in unknowns:
                    unknowns.add(field)
                    logger.info('Unknown field "%s" (line %d)', field, lineno)
    return result
Ejemplo n.º 7
0
def load_yaml_file(filename=None, dir=defaults.config_dir):
    """
    load and parse YAML into a dict
    """

    log = LoggerAdapter(logger, {'name_ext' : 'load_yaml_file'})
    if filename is None:
        raise ValueError("load_yaml_file: need a filename to load.")

    config_filepath = filename_to_fullpath(dir, filename)

    yaml_dict = dict()
    with open(config_filepath, 'rt') as fp:
        try:
            yaml_dict = ruamel.yaml.load(fp, ruamel.yaml.RoundTripLoader)
        except ruamel.yaml.YAMLError as err:
            log.error('load_yaml_file: Error loading {}: {}'.format(config_filepath, str(err)))
    return yaml_dict
Ejemplo n.º 8
0
    def __init__(self, mapping=None, provider_ns=None, fail_to_parent=False, fail_up_height=0):
        '''
        Input:
            mapping: the mapping object to use for lookups

            fail_to_parent: if True, in the case that the lookup for the specifically
                given id fails, we will return the parent's provider if the parent is within
                <fail_up_height> distance from the requested node.

            fail_up_height: if fail_to_parent is True, this determines the maximum
                distance that the parent can be from the child in order for the Fail Up to
                succeed. Set to math.inf to set to infinite.
        '''
        log_name_ext = {'name_ext' : '{}.__init__'.format(self.__class__.__name__)}
        log = LoggerAdapter(logger, log_name_ext)

        super().__init__()
        self._provider_ns = provider_ns
        self.fail_to_parent = fail_to_parent
        self.fail_up_height = fail_up_height

        #- Check ProviderMap initial mapping for valid Provider objects
        if mapping:
            if isinstance(mapping, collections.Mapping):
                if len(mapping.values()) > 0:
                    log.debug("Checking ProviderMap initial mapping for valid Provider objects")
                    for provider in mapping.values():
                        #- TODO: Duckify by catching this when providers are used, not
                        #-       instantiated
                        if provider is not None and not isinstance(Provider):
                            log.error('Invalid provider: {}'.format(str(p)))
                            msg = 'NamespaceNode providers must be an instance of Provider None, not {}'.format(p)
                            raise ValueError(msg)

                    providers = list(mapping.keys())
                    log.debug("Setting providers for the attributes: {}".format(providers))
                    self.data = mapping
            else:
                msg = "Non collections.Mapping type passed for ProviderMap mapping"
                raise ValueError(msg)

        else:
            self.data = dict()
Ejemplo n.º 9
0
def temp_hidden_window(logger: logging.LoggerAdapter = None):
    """
    Creates and destroys a temporary Window similar to the way that PySimpleGUI does in
    :func:`popup_get_folder<PySimpleGUI.popup_get_folder>` while creating a file prompt.  Mostly copied from that func.
    """
    logger = log if logger is None else logger
    if not Window.hidden_master_root:
        # if first window being created, make a throwaway, hidden master root.  This stops one user window from
        # becoming the child of another user window. All windows are children of this hidden window
        Window._IncrementOpenCount()
        Window.hidden_master_root = tkinter.Tk()
        Window.hidden_master_root.attributes(
            '-alpha', 0)  # HIDE this window really really really
        try:
            Window.hidden_master_root.wm_overrideredirect(True)
        except Exception:
            logger.error('* Error performing wm_overrideredirect *',
                         exc_info=True)
        Window.hidden_master_root.withdraw()

    root = tkinter.Toplevel()
    try:
        root.attributes(
            '-alpha',
            0)  # hide window while building it. makes for smoother 'paint'
        try:
            root.wm_overrideredirect(True)
        except Exception:
            logger.error('* Error performing wm_overrideredirect *',
                         exc_info=True)
        root.withdraw()
    except Exception:
        pass

    yield root

    root.destroy()
    if Window.NumOpenWindows == 1:
        Window.NumOpenWindows = 0
        Window.hidden_master_root.destroy()
        Window.hidden_master_root = None
Ejemplo n.º 10
0
def scan_robots_txt(
        lines: Iterable[str],
        logger: LoggerAdapter) -> Iterator[Iterable[Tuple[int, str, str]]]:
    """Tokenizes the contents of a C{robots.txt} file.

    @param lines:
        Contents of a C{robots.txt} file.
    @param logger:
        Problems found while scanning are logged here.
    @return:
        Yields records, where each record is a sequence of
        C{(lineno, token, value)} triples.
    """
    record: List[Tuple[int, str, str]] = []
    for lineno, line in enumerate(lines, 1):
        stripped_line = line.lstrip()
        if stripped_line.startswith('#'):
            # Comment-only lines are discarded and do not end records.
            continue
        if not stripped_line:
            # Empty lines end records.
            if record:
                yield record
                record = []
            continue
        if len(stripped_line) != len(line):
            logger.warning('Line %d has whitespace before field', lineno)

        nocomment_line = stripped_line.split('#', 1)[0]
        try:
            field, value = nocomment_line.split(':', 1)
        except ValueError:
            logger.error('Line %d contains no ":"; ignoring line', lineno)
        else:
            record.append((lineno, field.casefold(), value.strip()))

    if record:
        yield record
Ejemplo n.º 11
0
    def _add_ns(self, ns_node, iter=True, overwrite=True):
        """
        Description:
            convenience method for adding namespace nodes rooted at this namespace node
        Input:
            ns_node: namespace node to add
            iter: add this to ns_items dict
        """
        log = LoggerAdapter(logger, {'name_ext': 'NamespaceNode._add_ns'})
        log.debug("'{}' adding sub ns: {}".format(self._nsid, ns_node))

        try:
            nsid = ns_node._nsid
        except AttributeError as err:
            log.error("add_ns called on object without 'nsid' attribute:{}".format(\
                ns_node))
            raise

        nsid_path = nsid.split('.')
        for n, nsid_x in enumerate(nsid_path):
            try:
                #- must match a prefix
                if nsid_x != self._nsid.split('.')[n]:
                    break
            except IndexError:
                break
        else:
            raise ValueError("must have a sub-NSID to add namespacenode")

        #- found an nsid component in nsid_x (at index n) that begins the sub_nsid
        log.debug("found sub_nsid start: {}/{}".format(nsid_path[n],
                                                       nsid_path))
        sub_nsid = '.'.join(nsid_path[n:])
        return self._add_item(sub_nsid,
                              ns_node,
                              iter=iter,
                              overwrite=overwrite)
Ejemplo n.º 12
0
    def __getattr__(self, attr):
        """
        Description:
            called when attribute lookup fails. used to implement semantics for provided
            attributes. If Python calls this method we will use this class'
            provider_map to get the provider for this attribute access and then call
            the provider and return the provider's return value as the value of the
            attribute

        Input:
            attr: the name of the attribute that wasn't found via the normal Python
            attribute lookup mechanisms.

        Output:
            Output of the provider returned by the provider factory of this class.
        """
        log = LoggerAdapter(logger, {'name_ext': 'NamespaceNode.__getattr__'})
        log.debug('{}.__getattr__({})'.format(self._nsid, attr))
        if self._provider_map:
            try:
                if callable(self._provider_map[attr]):
                    self._ghost = self._provider_map[attr]()
                #- TODO: allow a string to be set. Use more specific logic
                #- TODO:     to determine if this is an NSID
                elif isinstance(provider_map[attr], str):
                    #- treat as NSID
                    try:
                        provider = self.provider_ns._lookup(
                            self._provider_map[attr])
                        self._ghost = provider()
                    except NamespaceLookupError as err:
                        raise ProviderError from err
                    except TypeError as err:
                        log.error(
                            f'{self._nsid}.{attr}: provider not callable')
            except ProviderError as err:
                log.error('{}: provider error: {}'.format(self._nsid, err))
            except ProviderMapLookupError as err:
                log.error('No mapped provider for {}.{}'.format(
                    self._nsid, attr))
                raise AttributeError from err

            log.debug('{}.{} provider returned: {}'.format(
                self._nsid, attr, self._ghost))
        else:
            raise AttributeError('{} object has no provider_map and no attribute \'{}\''.format(\
                self.__class__.__name__, attr))

        return self._ghost
Ejemplo n.º 13
0
class BaseJob(object):
    """ Base class for job implementation in spvd. """
    class BaseError(Exception):
        """ Base class for BaseJob Exceptions. """
        def __init__(self, error):
            """ Init method. """
            Exception.__init__(self, error)

    _valid_status = ('FINISHED', 'WARNING', 'ERROR')

    def __init__(self, options, infos, params):
        """ Init method. """

        self.infos = infos

        self.old_status = self.infos['status']['check_status']
        self.old_status_infos = self.infos['status']['status_infos']
        self.infos['status']['status_infos'] = {}

        logger_per_job = logging.getLogger(
            "spvd.jobs.%s.%s" % (self.infos['check']['plugin'],
                                 self.infos['check']['plugin_check']))

        if options.nodaemon:
            logger = logging.getLogger("spvd.jobs")
        else:
            logger = logger_per_job

        # critical section around logger.handlers
        global __handler_lock__
        with __handler_lock__:
            if len(logger.handlers) == 0:
                if options.nodaemon:
                    log_handler = logging.StreamHandler(sys.stdout)
                else:
                    log_dir = options.logdir + '/' + self.infos['check'][
                        'plugin']
                    if os.path.exists(log_dir) is False:
                        os.mkdir(log_dir)
                    log_file = "%s/%s.log" % (
                        log_dir, self.infos['check']['plugin_check'])
                    log_handler = logging.FileHandler(log_file)

                formatter_string = '%(asctime)s %(levelname)-8s %(statusid)5s ' + \
                        '%(plugin)s:%(check)s %(group)s %(object)s : %(message)s'
                log_handler.setFormatter(logging.Formatter(formatter_string))
                logger.addHandler(log_handler)

                if params.get('debug', False):
                    logger.setLevel(logging.DEBUG)
                else:
                    logger.setLevel(logging.INFO)

                logger.propagate = False

        # Jobs will always use logger_per_job here, even in nodaemon mode,
        # since "spvd.jobs" will trap all log messages in that case.
        self.log = LoggerAdapter(
            logger_per_job, {
                'plugin': self.infos['check']['plugin'],
                'check': self.infos['check']['plugin_check'],
                'statusid': "#" + str(self.infos['status']['status_id']),
                'group': self.infos['group']['name'],
                'object': self.infos['object']['address']
            })

    def set_check_status(self, check_status, check_message, status_infos=None):
        """ Helper function to prepare check's status. """

        #self.log.warning('This module is using [set_check_status] which is deprecated.'
        #    ' Please upgrade it or fill a bug report if an update does not exist.')

        if check_status not in self._valid_status:
            message = 'Job returned an invalid status <%s>' % check_status
            self.log.error(message)
            raise BaseJob.BaseError(message)

        self.infos['status']['check_message'] = check_message
        self.infos['status']['check_status'] = check_status
        if status_infos:
            self.infos['status']['status_infos'].update(status_infos)

    def run(self):
        """ Starts the job implemented by this plugin. """

        status, message = '', ''
        try:
            tmp_ret = self.go()
            try:
                # Done in a separate try..except to avoid shadowing
                # TypeError exceptions from plugins
                status, message = tmp_ret
            except TypeError, error:
                # Transitional catch
                # self.log.warning('This module is not returning its status like it should.'
                #    ' This is a deprecated behavior.'
                #    ' Please upgrade it or fill a bug report if an update does not exist.')
                status = self.infos['status']['check_status']
                message = self.infos['status']['check_message']

        except (BaseJob.BaseError, BaseJobRuntimeError), error:
            # Expected exception, nothing to worry about
            self.log.error(str(error))
            status, message = 'ERROR', str(error)

        except Exception, error:
            # Unexpected exception, should log a traceback
            self.log.critical('Fatal error: job stopped')
            self.log.critical(traceback.format_exc())
            status, message = 'ERROR', str(error)
Ejemplo n.º 14
0
class BaseJob(object):
    """ Base class for job implementation in spvd. """

    class BaseError(Exception):
        """ Base class for BaseJob Exceptions. """

        def __init__(self, error):
            """ Init method. """
            Exception.__init__(self, error)

    _valid_status = ('FINISHED', 'WARNING', 'ERROR')

    def __init__(self, options, infos, params):
        """ Init method. """

        self.infos = infos

        self.old_status = self.infos['status']['check_status']
        self.old_status_infos = self.infos['status']['status_infos']
        self.infos['status']['status_infos'] = {}

        logger_per_job = logging.getLogger("spvd.jobs.%s.%s" % (self.infos['check']['plugin'],
                                                                self.infos['check']['plugin_check']))

        if options.nodaemon:
            logger = logging.getLogger("spvd.jobs")
        else:
            logger = logger_per_job

        # critical section around logger.handlers
        global __handler_lock__
        with __handler_lock__:
            if len(logger.handlers) == 0:
                if options.nodaemon:
                    log_handler = logging.StreamHandler(sys.stdout)
                else:
                    log_dir = options.logdir + '/' + self.infos['check']['plugin']
                    if os.path.exists(log_dir) is False:
                        os.mkdir(log_dir)
                    log_file = "%s/%s.log" % (log_dir, self.infos['check']['plugin_check'])
                    log_handler = logging.FileHandler(log_file)

                formatter_string = '%(asctime)s %(levelname)-8s %(statusid)5s ' + \
                        '%(plugin)s:%(check)s %(group)s %(object)s : %(message)s'
                log_handler.setFormatter(logging.Formatter(formatter_string))
                logger.addHandler(log_handler)

                if params.get('debug', False):
                    logger.setLevel(logging.DEBUG)
                else:
                    logger.setLevel(logging.INFO)

                logger.propagate = False

        # Jobs will always use logger_per_job here, even in nodaemon mode,
        # since "spvd.jobs" will trap all log messages in that case.
        self.log = LoggerAdapter(logger_per_job, {
            'plugin':   self.infos['check']['plugin'],
            'check':    self.infos['check']['plugin_check'],
            'statusid': "#" + str(self.infos['status']['status_id']),
            'group':    self.infos['group']['name'],
            'object':   self.infos['object']['address']})

    def set_check_status(self, check_status, check_message, status_infos=None):
        """ Helper function to prepare check's status. """

        #self.log.warning('This module is using [set_check_status] which is deprecated.'
        #    ' Please upgrade it or fill a bug report if an update does not exist.')

        if check_status not in self._valid_status:
            message = 'Job returned an invalid status <%s>' % check_status
            self.log.error(message)
            raise BaseJob.BaseError(message)

        self.infos['status']['check_message'] = check_message
        self.infos['status']['check_status'] = check_status
        if status_infos:
            self.infos['status']['status_infos'].update(status_infos)

    def run(self):
        """ Starts the job implemented by this plugin. """

        status, message = '', ''
        try:
            tmp_ret = self.go()
            try:
                # Done in a separate try..except to avoid shadowing
                # TypeError exceptions from plugins
                status, message = tmp_ret
            except TypeError, error:
                # Transitional catch
                # self.log.warning('This module is not returning its status like it should.'
                #    ' This is a deprecated behavior.'
                #    ' Please upgrade it or fill a bug report if an update does not exist.')
                status = self.infos['status']['check_status']
                message = self.infos['status']['check_message']

        except (BaseJob.BaseError, BaseJobRuntimeError), error:
            # Expected exception, nothing to worry about
            self.log.error(str(error))
            status, message = 'ERROR', str(error)

        except Exception, error:
            # Unexpected exception, should log a traceback
            self.log.critical('Fatal error: job stopped')
            self.log.critical(traceback.format_exc())
            status, message = 'ERROR', str(error)
Ejemplo n.º 15
0
    def parse_submap(self, dictConfig, cur_ns, prev_ns=None):
        """
        Description:
            generic dictConfig sub parser. Essentially makes every key into a
            NamespaceNode and then looks at its sub-keys, if each subkey is another
            dictConfig object, it will recurse. At the last level, where the subkeys map to
            an immediate value, the immediate value is used as the value assigned and no
            recursion is performed.

        Input:
            dictConfig: current config root
            cur_ns: current namespace root
            prev_ns: previous namespace root - used for raw overwriting

        Output:
            None; directly adds nodes to cur_ns

        Notes:
            This method can be overridden in subclasses to allow for different styles of
            dictonary configuration, if they all share the property of a set of root-level
            keys that we want to parse into a collection of namespaces.

            See SdkNamespace class for an example of overriding this method.
        """

        log = LoggerAdapter(logger,
                            {'name_ext': 'NamespaceConfigParser.parse_submap'})
        log.debug('Entering with cur_ns: {}'.format(cur_ns))

        if cur_ns is None:
            log.error('Error parsing config: current namespace node is None.')
            raise ValueError('None is not a valid namespace object')

        log.debug('Iterating over keys: {}'.format(list(dictConfig.keys())))
        for key in dictConfig.keys():
            log.debug('----[cur_ns: {} | current key: {}'.format(cur_ns, key))

            if key == self._raw_marker:
                raw_dict = dict(dictConfig[key])
                if prev_ns:
                    prev_ns._add_item(cur_ns._name, raw_dict, overwrite=True)
                else:
                    msg = "Can't set raw item wihout parent node"
                    raise NamespaceConfigParsingError(msg)

            elif isinstance(dictConfig[key], Mapping):
                log.debug('dictConfig[{}] is another dictConfig'.format(key))

                #- recursive case
                next_ns = cur_ns._add_child(key)
                msg = 'recursing to parse dict config for key: [{}]'.format(
                    key)
                log.debug(msg)
                self.parse_submap(dictConfig[key],
                                  cur_ns=next_ns,
                                  prev_ns=cur_ns)

            else:
                #- leave it as a bare node
                msg = 'Setting immediate value for node {}.[{}]'.format(
                    cur_ns._nsid, key)
                log.debug(msg)
                log.debug('    {}'.format(dictConfig[key]))
                cur_ns._add_item(key, dictConfig[key])

        log.debug('exiting: cur_ns: {}'.format(cur_ns))
Ejemplo n.º 16
0
class BaseJob(object):
    """Base class for job implementation in spvd."""
    class BaseError(Exception):
        """Base class for BaseJob Exceptions."""
        def __init__(self, error):
            """Init method."""
            Exception.__init__(self, error)

    _valid_status = ("FINISHED", "WARNING", "ERROR")

    def __init__(self, options, infos, params):
        """Init method."""

        self.infos = infos

        self.old_status = self.infos["status"]["check_status"]
        self.old_status_infos = self.infos["status"]["status_infos"]
        self.infos["status"]["status_infos"] = {}

        logger_per_job = logging.getLogger(
            "spvd.jobs.%s.%s" % (self.infos["check"]["plugin"],
                                 self.infos["check"]["plugin_check"]))

        if options.nodaemon:
            logger = logging.getLogger("spvd.jobs")
        else:
            logger = logger_per_job

        # critical section around logger.handlers
        global __handler_lock__
        with __handler_lock__:
            if len(logger.handlers) == 0:
                if options.nodaemon:
                    log_handler = logging.StreamHandler(sys.stdout)
                else:
                    log_dir = options.logdir + "/" + self.infos["check"][
                        "plugin"]
                    if os.path.exists(log_dir) is False:
                        os.mkdir(log_dir)
                    log_file = "%s/%s.log" % (
                        log_dir,
                        self.infos["check"]["plugin_check"],
                    )
                    log_handler = logging.FileHandler(log_file)

                formatter_string = (
                    "%(asctime)s %(levelname)-8s %(statusid)5s " +
                    "%(plugin)s:%(check)s %(group)s %(object)s : %(message)s")
                log_handler.setFormatter(logging.Formatter(formatter_string))
                logger.addHandler(log_handler)

                if params.get("debug", False):
                    logger.setLevel(logging.DEBUG)
                else:
                    logger.setLevel(logging.INFO)

                logger.propagate = False

        # Jobs will always use logger_per_job here, even in nodaemon mode,
        # since "spvd.jobs" will trap all log messages in that case.
        self.log = LoggerAdapter(
            logger_per_job,
            {
                "plugin": self.infos["check"]["plugin"],
                "check": self.infos["check"]["plugin_check"],
                "statusid": "#" + str(self.infos["status"]["status_id"]),
                "group": self.infos["group"]["name"],
                "object": self.infos["object"]["address"],
            },
        )

    def set_check_status(self, check_status, check_message, status_infos=None):
        """Helper function to prepare check's status."""

        # self.log.warning('This module is using [set_check_status] which is deprecated.'
        #    ' Please upgrade it or fill a bug report if an update does not exist.')

        if check_status not in self._valid_status:
            message = "Job returned an invalid status <%s>" % check_status
            self.log.error(message)
            raise BaseJob.BaseError(message)

        self.infos["status"]["check_message"] = check_message
        self.infos["status"]["check_status"] = check_status
        if status_infos:
            self.infos["status"]["status_infos"].update(status_infos)

    def run(self):
        """Starts the job implemented by this plugin."""

        status, message = "", ""
        try:
            tmp_ret = self.go()
            try:
                # Done in a separate try..except to avoid shadowing
                # TypeError exceptions from plugins
                status, message = tmp_ret
            except TypeError as error:
                # Transitional catch
                # self.log.warning('This module is not returning its status like it should.'
                #    ' This is a deprecated behavior.'
                #    ' Please upgrade it or fill a bug report if an update does not exist.')
                status = self.infos["status"]["check_status"]
                message = self.infos["status"]["check_message"]

        except (BaseJob.BaseError, BaseJobRuntimeError) as error:
            # Expected exception, nothing to worry about
            self.log.error(str(error))
            status, message = "ERROR", str(error)

        except Exception as error:
            # Unexpected exception, should log a traceback
            self.log.critical("Fatal error: job stopped")
            self.log.critical(traceback.format_exc())
            status, message = "ERROR", str(error)

        if status not in self._valid_status:
            status, message = "ERROR", "Job returned an invalid status <%s>" % status
            self.log.error(message)

        self.infos["status"]["check_message"] = message
        self.infos["status"]["check_status"] = status

        if (self.infos["check"]["check_infos"].get("history", False) == "true"
                and self.old_status != self.infos["status"]["check_status"]):
            self.log.debug("Saving new history checkpoint: " +
                           str(self.old_status) + " -> " +
                           str(self.infos["status"]["check_status"]))
            self.infos["status"]["status_infos"].update({
                "history-%d-%s" % (
                    int(time.time()),
                    self.infos["status"]["check_status"].lower(),
                ):
                self.infos["status"]["check_message"]
            })

        self.log.debug("Job finished: <%s, %s>" % (status, message))
        return self.infos

    def go(self):
        """Calls specific check in BaseJob class of the plugin."""

        if hasattr(self, self.infos["check"]["plugin_check"]):
            return getattr(self, self.infos["check"]["plugin_check"])()
        else:
            message = ("Job does not implement <%s> method" %
                       self.infos["check"]["plugin_check"])
            raise BaseJob.BaseError(message)
Ejemplo n.º 17
0
    def _runtime_import(self, importable_name, ignore_errors=True):
        """
        Description:
            perform an import of given string.
            starts with the whole string and tries successively smaller prefixes

            Python imports within a block bind the name only to the local scope of the
            enclosing block within which the binding statement occurs (import, in this
            case).

            In order to use this importable_name elsewhere, the object the name is locally
            bound to must be passed into the execution scope of whatever relies on this
            method
        Input:
            importable_name: string that should resolve to a runtime object after needed
                imports
            ignore_errors: don't raise an error if this fails. Assume that its probably okay
                and that any error will be handled by code that uses the return value

        Output:
            Mapping of names imported to objects that resulted from import operations
            dict mapping: import_name --> module object
        """
        _nm_ex = {
            'name_ext': '{}._runtime_import'.format(self.__class__.__name__)
        }
        log = LoggerAdapter(logger, _nm_ex)

        all_imported = dict()
        try:
            imported = importlib.import_module(importable_name)
        except ImportError as err:
            pass
        else:
            return {importable_name, imported}

        #- if here, we could not import the name as given
        try:
            importable_name_components = importable_name.split('.')
        except AttributeError as err:
            msg = ["AttributeError when calling split method."]
            msg.append("Error in values when called from runtime import of \
            formatter keyword args: \"{}\" is not a string".format(
                importable_name))
            log.error(' '.join(msg))
            raise ValueError(
                "Couldn't split importable_name expression into components.")

        n = len(importable_name_components)
        while n > 0:
            import_candidate = '.'.join(importable_name.split('.')[0:n])
            try:
                log.debug("Attempting to import '{}'".format(import_candidate))
                imported = importlib.import_module(import_candidate)
                log.debug("Success importing: '{}'".format(import_candidate))
                all_imported[import_candidate] = imported

            except ImportError as err:
                imported = None
                msg = "Failed to import runtime value: {} (full: {})".format(\
                    import_candidate, importable_name)
                log.error(msg)

                #- it might be that the rest of the components are built by the import
                #- or some other idiosyncracy of the importable_name that prevents this
                #- function as written from being able to tell what it should be
                if ignore_errors:
                    pass
                else:
                    raise ValueError(msg)
            finally:
                n -= 1

        #- if we got this far, we imported everything that should be needed for the object
        #- to be used
        log.debug(f"all_imported: {all_imported}")
        return all_imported if all_imported else None
Ejemplo n.º 18
0
    def render_view(self):
        """
        Description:
            Abstract Method Implementation.
            Capture the current model state in an AttributeView and return the view
        """
        log = LoggerAdapter(logger, {'name_ext': 'AttributeModel.render_view'})
        log.debug("entering: {}".format(self))

        render_method = self.build_formatter_callable()

        try:
            #- first try the most specific; assume the attribute is an exact match
            attr = getattr(self.source_object, self.name)
            view_data = render_method(attr)
        except AttributeError:
            #- if there's not an exact match, it may be an expression, so try that too
            attr = eval('self.source_object.{}'.format(self.name))
            view_data = render_method(attr)
        except TypeError as err:
            log.error('Error rendering AttributeView: {}: {}'.format(
                self.name, err))
            view_data = '_error_'

        #- figure out the width of this view
        #- the render method may have returned a string or an iterable that will iterate
        #- over each line for this view
        if isinstance(view_data, str):
            log.debug("render method for '{}' returned string".format(
                self.name))
            view_width = len(view_data)
            #- create an Iterable to simplify the rest of the processing
            view_data = [view_data]

        elif isinstance(view_data, collections.Iterable):
            view_lines = list(view_data)
            msg = [
                "render method for '{}' returned iterable".format(self.name)
            ]
            msg.append(" of length: {}".format(len(view_lines)))
            log.debug(''.join(msg))
            if len(view_lines) == 0:
                view_width = 0
            elif len(view_lines) == 1:
                log.debug(
                    "render method returned iterable with a single value")
                view_width = len(view_lines[0])
            else:
                msg = "render method returned iterable with mulitple values ({})".format(\
                    len(view_lines))
                log.debug(msg)
                #- choose the longest line as the view width
                view_width = len(max(*view_lines, key=len))
        else:
            msg = ["render method for {}".format(self.name)]
            msg.append(" returned non-string, non-iterable object.")
            raise ValueError(''.join(msg))
        log.debug("view width for attribute '{}': {}".format(
            self.name, view_width))

        if self.length:
            if self.length > view_width:
                log.debug("padding view datum for '{}'".format(self.name))
                for n, view_datum in enumerate(view_data[:]):
                    log.debug("padding element {}".format(n))
                    #- pad it with spaces to fit
                    view_data[n] = view_datum + ' ' * (self.length -
                                                       view_width)

            elif self.length < view_width:
                log.debug("trimming view datum for '{}'".format(self.name))
                for n, view_datum in enumerate(view_data[:]):
                    log.debug("trimming element {}".format(n))
                    view_data[n] = view_datum[0:self.length]

        view_data = '\n'.join(view_data)
        log.debug("creating AttributeView with data: '{}'".format(view_data))
        view = AttributeView(view_data, color=self.color)
        return view