Пример #1
0
    def single_request(self, this_request):
        """
        Actually perform the request and retry x times if request times out.
        Program will exit if all retries failed!

        Parameters
        ----------
        this_request: requests.session.prepare_request
            object of the prepared request

        Returns
        -------
        requests.Response: response for this request
        """

        response = None

        if log.level == DEBUG3:
            pprint.pprint(vars(this_request))

        for _ in range(self.max_retry_attempts):

            log_message = f"Sending {this_request.method} to '{this_request.url}'"

            if this_request.body is not None:
                log_message += f" with data '{this_request.body}'."

                log.debug2(log_message)

            try:
                response = self.session.send(this_request,
                                             timeout=self.timeout,
                                             verify=self.validate_tls_certs)

            except (ConnectionError, requests.exceptions.ConnectionError,
                    requests.exceptions.ReadTimeout):
                log.warning(f"Request failed, trying again: {log_message}")
                continue
            else:
                break
        else:
            do_error_exit(
                f"Giving up after {self.max_retry_attempts} retries.")

        log.debug2("Received HTTP Status %s.", response.status_code)

        # print debugging information
        if log.level == DEBUG3:
            log.debug("Response Body:")
            try:
                pprint.pprint(response.json())
            except json.decoder.JSONDecodeError as e:
                log.error(e)

        return response
    def resolve_relations(self):

        o_id = self.data.get("assigned_object_id")
        o_type = self.data.get("assigned_object_type")

        # this needs special treatment as the object type depends on a second model key
        if o_type is not None and o_type not in self.data_model.get(
                "assigned_object_type"):

            log.error(
                f"Attribute 'assigned_object_type' for '{self.get_display_name()}' invalid: {o_type}"
            )
            do_error_exit(
                f"Error while resolving relations for {self.get_display_name()}"
            )

        if isinstance(o_id, int):
            self.data["assigned_object_id"] = self.inventory.get_by_id(
                self.data_model_relation.get(o_type), nb_id=o_id)

        super().resolve_relations()
Пример #3
0
def open_config_file(config_file):
    """
    Open config file with a ConfigParser and return handler. Bail out of opening or parsing fails

    Parameters
    ----------
    config_file: str
        absolute path of config file to open

    Returns
    -------
    ConfigParser: handler with supplied config file
    """

    if config_file is None or config_file == "":
        do_error_exit("ERROR: Config file not defined.")

    # setup config parser and read config
    config_handler = configparser.ConfigParser(strict=True, allow_no_value=True, empty_lines_in_values=False)

    # noinspection PyBroadException
    try:
        config_handler.read_file(open(config_file))
    except configparser.Error as e:
        do_error_exit(f"ERROR: Problem while config file parsing: {e}")
    # noinspection PyBroadException
    except Exception:
        do_error_exit(f"ERROR: Unable to open file '{config_file}'")

    return config_handler
Пример #4
0
def get_config_file(config_file):
    """
    get absolute path to provided config file string

    Parameters
    ----------
    config_file: str
        config file path

    Returns
    -------
    str: absolute path to config file
    """

    if config_file is None or config_file == "":
        do_error_exit("ERROR: Config file not defined.")

    base_dir = os.sep.join(__file__.split(os.sep)[0:-3])
    if config_file[0] != os.sep:
        config_file = f"{base_dir}{os.sep}{config_file}"

    return os.path.realpath(config_file)
Пример #5
0
    def __init__(self, settings=None, inventory=None):

        self.settings = settings
        self.inventory = inventory

        self.parse_config_settings(settings)

        # flood the console
        if log.level == DEBUG3:
            log.warning(
                "Log level is set to DEBUG3, Request logs will only be printed to console"
            )

            HTTPConnection.debuglevel = 1

        proto = "https"
        if bool(self.disable_tls) is True:
            proto = "http"

        # disable TLS insecure warnings if user explicitly switched off validation
        if bool(self.validate_tls_certs) is False:
            urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

        port = ""
        if self.port is not None:
            port = f":{self.port}"

        self.url = f"{proto}://{self.host_fqdn}{port}/api/"

        self.session = self.create_session()

        # check for minimum version
        api_version = self.get_api_version()
        if version.parse(api_version) < version.parse(
                self.minimum_api_version):
            do_error_exit(f"Netbox API version '{api_version}' not supported. "
                          f"Minimum API version: {self.minimum_api_version}")

        self.setup_caching()
Пример #6
0
    def get_api_version(self):
        """
        Perform a basic GET request to extract NetBox API version from header

        Returns
        -------
        str: NetBox API version
        """
        response = None
        try:
            response = self.session.get(self.url,
                                        timeout=self.timeout,
                                        verify=self.validate_tls_certs)
        except Exception as e:
            do_error_exit(str(e))

        result = str(response.headers["API-Version"])

        log.info(f"Successfully connected to NetBox '{self.host_fqdn}'")
        log.debug(f"Detected NetBox API version: {result}")

        return result
Пример #7
0
def setup_logging(log_level=None, log_file=None):
    """
    Set up logging for the whole program and return a log handler

    Parameters
    ----------
    log_level: str
        valid log level to set logging to
    log_file: str
        name of the log file to log to

    Returns
    -------
    log handler to use for logging
    """

    log_file_max_size_in_mb = 10
    log_file_max_rotation = 5

    log_format = '%(asctime)s - %(levelname)s: %(message)s'

    if log_level is None or log_level == "":
        do_error_exit("ERROR: log level undefined or empty. Check config please.")

    # check set log level against self defined log level array
    if not log_level.upper() in valid_log_levels:
        do_error_exit(f"ERROR: Invalid log level: {log_level}")

    # check the provided log level
    if log_level == "DEBUG2":
        numeric_log_level = DEBUG2
    elif log_level == "DEBUG3":
        numeric_log_level = DEBUG3
        logging.basicConfig(level=logging.DEBUG, format=log_format)
    else:
        numeric_log_level = getattr(logging, log_level.upper(), None)

    log_format = logging.Formatter(log_format)

    # create logger instance
    logger = get_logger()

    logger.setLevel(numeric_log_level)

    # setup stream handler
    # in DEBUG3 the root logger gets redefined, that would print every log message twice
    if log_level != "DEBUG3":
        log_stream = logging.StreamHandler()
        log_stream.setFormatter(log_format)
        logger.addHandler(log_stream)

    # setup log file handler
    if log_file is not None:
        # base directory is three levels up
        base_dir = os.sep.join(__file__.split(os.sep)[0:-3])
        if log_file[0] != os.sep:
            log_file = f"{base_dir}{os.sep}{log_file}"

        log_file_handler = None
        try:
            log_file_handler = RotatingFileHandler(
                filename=log_file,
                maxBytes=log_file_max_size_in_mb * 1024 * 1024,  # Bytes to Megabytes
                backupCount=log_file_max_rotation
            )
        except Exception as e:
            do_error_exit(f"ERROR: Problems setting up log file: {e}")

        log_file_handler.setFormatter(log_format)
        logger.addHandler(log_file_handler)

    return logger
Пример #8
0
    def query_current_data(self, netbox_objects_to_query=None):
        """
        Request all current NetBox objects. Use caching whenever possible.
        Objects must provide "last_updated" attribute to support caching for this object type.
        Otherwise it's not possible to query only changed objects since last run. If attribute is
        not present all objects will be requested (looking at you *Interfaces)

        Parameters
        ----------
        netbox_objects_to_query: list of NetBoxObject sub classes
            NetBox items to query

        """

        if netbox_objects_to_query is None:
            raise AttributeError(
                f"Attribute netbox_objects_to_query is: '{netbox_objects_to_query}'"
            )

        # query all dependencies
        for nb_object_class in netbox_objects_to_query:

            if nb_object_class not in NetBoxObject.__subclasses__():
                raise AttributeError(
                    f"Class '{nb_object_class.__name__}' must be a "
                    f"subclass of '{NetBoxObject.__name__}'")

            # if objects are multiple times requested but already retrieved
            if nb_object_class in self.resolved_dependencies:
                continue

            # initialize cache variables
            cached_nb_data = list()
            cache_file = f"{self.cache_directory}{os.sep}{nb_object_class.__name__}.cache"
            cache_this_class = False
            latest_update = None

            # check if cache file is accessible
            if self.use_caching is True:
                cache_this_class = True

                if os.path.exists(cache_file) and not os.access(
                        cache_file, os.R_OK):
                    log.warning(
                        f"Got no permission to read existing cache file: {cache_file}"
                    )
                    cache_this_class = False

                if os.path.exists(cache_file) and not os.access(
                        cache_file, os.W_OK):
                    log.warning(
                        f"Got no permission to write to existing cache file: {cache_file}"
                    )
                    cache_this_class = False

            # read data from cache file
            if cache_this_class is True:
                # noinspection PyBroadException
                try:
                    cached_nb_data = pickle.load(open(cache_file, "rb"))
                except Exception:
                    pass

                if cached_nb_data is None:
                    cached_nb_data = list()

                # get date of latest update in cache file
                if len(cached_nb_data) > 0:
                    latest_update_list = \
                        [x.get("last_updated") for x in cached_nb_data if x.get("last_updated") is not None]

                    if len(latest_update_list) > 0:
                        latest_update = sorted(latest_update_list)[-1]

                        log.debug(
                            f"Successfully read cached data with {len(cached_nb_data)} '{nb_object_class.name}%s'"
                            f", last updated '{latest_update}'" %
                            plural(len(cached_nb_data)))

                    elif self.testing_cache is False:
                        cache_this_class = False

            if self.testing_cache is True and len(cached_nb_data) > 0:
                for object_data in cached_nb_data:
                    self.inventory.add_object(nb_object_class,
                                              data=object_data,
                                              read_from_netbox=True)

                # mark this object class as retrieved
                self.resolved_dependencies.add(nb_object_class)

                continue

            full_nb_data = None
            brief_nb_data = None
            updated_nb_data = None

            # no cache data found
            if latest_update is None:

                # get all objects of this class
                log.debug(
                    f"Requesting all {nb_object_class.name}s from NetBox")
                full_nb_data = self.request(nb_object_class)

                if full_nb_data.get("results") is None:
                    log.error(
                        f"Result data from NetBox for object {nb_object_class.__name__} missing!"
                    )
                    do_error_exit("Reading data from NetBox failed.")

            else:

                # request a brief list of existing objects
                log.debug(
                    f"Requesting a brief list of {nb_object_class.name}s from NetBox"
                )
                brief_nb_data = self.request(nb_object_class,
                                             params={
                                                 "brief": 1,
                                                 "limit": 500
                                             })
                log.debug("NetBox returned %d results." %
                          len(brief_nb_data.get("results", list())))

                log.debug(
                    f"Requesting the last updates since {latest_update} of {nb_object_class.name}s from NetBox"
                )
                updated_nb_data = self.request(
                    nb_object_class,
                    params={"last_updated__gte": latest_update})
                log.debug("NetBox returned %d results." %
                          len(updated_nb_data.get("results", list())))

                if brief_nb_data.get("results") is None or updated_nb_data.get(
                        "results") is None:
                    log.error(
                        f"Result data from NetBox for object {nb_object_class.__name__} missing!"
                    )
                    do_error_exit("Reading data from NetBox failed.")

            # read a full set from NetBox
            nb_objects = list()
            if full_nb_data is not None:
                nb_objects = full_nb_data.get("results")

            elif self.testing_cache is True:
                nb_objects = cached_nb_data

            # read the delta from NetBox and
            else:

                currently_existing_ids = [
                    x.get("id") for x in brief_nb_data.get("results")
                ]
                changed_ids = [
                    x.get("id") for x in updated_nb_data.get("results")
                ]

                for this_object in cached_nb_data:

                    if this_object.get(
                            "id"
                    ) in currently_existing_ids and this_object.get(
                            "id") not in changed_ids:
                        nb_objects.append(this_object)

                nb_objects.extend(updated_nb_data.get("results"))

            if self.use_caching is True:
                try:
                    pickle.dump(nb_objects, open(cache_file, "wb"))
                    if cache_this_class is True:
                        log.debug("Successfully cached %d objects." %
                                  (len(nb_objects)))
                except Exception as e:
                    log.warning(
                        f"Failed to write NetBox data to cache file: {e}")

            log.debug(f"Processing %s returned {nb_object_class.name}%s" %
                      (len(nb_objects), plural(len(nb_objects))))

            for object_data in nb_objects:
                self.inventory.add_object(nb_object_class,
                                          data=object_data,
                                          read_from_netbox=True)

            # mark this object class as retrieved
            self.resolved_dependencies.add(nb_object_class)
Пример #9
0
    def request(self,
                object_class,
                req_type="GET",
                data=None,
                params=None,
                nb_id=None):
        """
        Perform a NetBox request for a certain object.

        Parameters
        ----------
        object_class: NetBoxObject sub class
            class definition of the desired NetBox object
        req_type: str
            GET, PATCH, PUT, DELETE
        data: dict
            data which shall be send to NetBox
        params: dict
            dict of URL params which should be passed to NetBox
        nb_id: int
            ID of the NetBox object which will be appended to the requested NetBox URL

        Returns
        -------
        (dict, bool, None): of returned NetBox data. If object was requested to be deleted and it was
                            successful then True will be returned. None if request failed or was empty
        """

        result = None

        request_url = f"{self.url}{object_class.api_path}/"

        # append NetBox ID
        if nb_id is not None:
            request_url += f"{nb_id}/"

        if params is not None and not isinstance(params, dict):
            log.debug(
                f"Params passed to NetBox request need to be a dict, got: {params}"
            )
            params = dict()

        if req_type == "GET":

            if params is None:
                params = dict()

            if "limit" not in params.keys():
                params["limit"] = self.default_netbox_result_limit

            # always exclude config context
            params["exclude"] = "config_context"

        # prepare request
        this_request = self.session.prepare_request(
            requests.Request(req_type, request_url, params=params, json=data))

        # issue request
        response = self.single_request(this_request)

        try:
            result = response.json()
        except json.decoder.JSONDecodeError:
            pass

        if response.status_code == 200:

            # retrieve paginated results
            if this_request.method == "GET" and result is not None:
                while response.json().get("next") is not None:
                    this_request.url = response.json().get("next")
                    log.debug2(
                        "NetBox results are paginated. Getting next page")

                    response = self.single_request(this_request)
                    result["results"].extend(response.json().get("results"))

        elif response.status_code in [201, 204]:

            action = "created" if response.status_code == 201 else "deleted"

            if req_type == "DELETE":
                object_name = self.inventory.get_by_id(object_class, nb_id)
                if object_name is not None:
                    object_name = object_name.get_display_name()
            else:
                object_name = result.get(object_class.primary_key)

            log.info(
                f"NetBox successfully {action} {object_class.name} object '{object_name}'."
            )

            if response.status_code == 204:
                result = True

        # token issues
        elif response.status_code == 403:

            do_error_exit("NetBox returned: %s: %s" %
                          (response.reason, grab(result, "detail")))

        # we screw up something else
        elif 400 <= response.status_code < 500:

            log.error(
                f"NetBox returned: {this_request.method} {this_request.path_url} {response.reason}"
            )
            log.error(f"NetBox returned body: {result}")
            result = None

        elif response.status_code >= 500:

            do_error_exit(
                f"NetBox returned: {response.status_code} {response.reason}")

        return result