def get_elevation_svg( self, face=DeviceFaceChoices.FACE_FRONT, user=None, unit_width=None, unit_height=None, legend_width=RACK_ELEVATION_LEGEND_WIDTH_DEFAULT, include_images=True, base_url=None, ): """ Return an SVG of the rack elevation :param face: Enum of [front, rear] representing the desired side of the rack elevation to render :param user: User instance to be used for evaluating device view permissions. If None, all devices will be included. :param unit_width: Width in pixels for the rendered drawing :param unit_height: Height of each rack unit for the rendered drawing. Note this is not the total height of the elevation :param legend_width: Width of the unit legend, in pixels :param include_images: Embed front/rear device images where available :param base_url: Base URL for links and images. If none, URLs will be relative. """ if unit_width is None: unit_width = get_settings_or_config("RACK_ELEVATION_DEFAULT_UNIT_WIDTH") if unit_height is None: unit_height = get_settings_or_config("RACK_ELEVATION_DEFAULT_UNIT_HEIGHT") elevation = RackElevationSVG(self, user=user, include_images=include_images, base_url=base_url) return elevation.render(face, unit_width, unit_height, legend_width)
def validate(self, attrs): attrs.setdefault( "unit_width", get_settings_or_config("RACK_ELEVATION_DEFAULT_UNIT_WIDTH")) attrs.setdefault( "unit_height", get_settings_or_config("RACK_ELEVATION_DEFAULT_UNIT_HEIGHT")) return attrs
def __init__(self, object_list, per_page, **kwargs): try: per_page = int(per_page) if per_page < 1: per_page = get_settings_or_config("PAGINATE_COUNT") except ValueError: per_page = get_settings_or_config("PAGINATE_COUNT") super().__init__(object_list, per_page, **kwargs)
def get_releases(pre_releases=False): url = get_settings_or_config("RELEASE_CHECK_URL") headers = { "Accept": "application/vnd.github.v3+json", } releases = [] # Check whether this URL has failed recently and shouldn't be retried yet try: if url == cache.get("latest_release_no_retry"): logger.info( "Skipping release check; URL failed recently: {}".format(url)) return [] except CacheMiss: pass try: logger.debug("Fetching new releases from {}".format(url)) response = requests.get(url, headers=headers, proxies=settings.HTTP_PROXIES) response.raise_for_status() total_releases = len(response.json()) for release in response.json(): if "tag_name" not in release: continue if not pre_releases and (release.get("devrelease") or release.get("prerelease")): continue releases.append( (version.parse(release["tag_name"]), release.get("html_url"))) logger.debug("Found {} releases; {} usable".format( total_releases, len(releases))) except requests.exceptions.RequestException: # The request failed. Set a flag in the cache to disable future checks to this URL for 15 minutes. logger.exception( "Error while fetching {}. Disabling checks for 15 minutes.".format( url)) cache.set("latest_release_no_retry", url, 900) return [] # Cache the most recent release cache.set("latest_release", max(releases), get_settings_or_config("RELEASE_CHECK_TIMEOUT")) # Since this is a Celery task, we can't return Version objects as they are not JSON serializable. return [(str(version), url) for version, url in releases]
def get_latest_release(pre_releases=False): """ Get latest known Nautobot release from cache, or if not available, queue up a background task to populate the cache. Returns: (Version, str): Latest release version and the release URL, if found in the cache ("unknown", None): If not present in the cache at this time """ if get_settings_or_config("RELEASE_CHECK_URL"): logger.debug("Checking for most recent release") try: latest_release = cache.get("latest_release") if latest_release: logger.debug("Found cached release: {}".format(latest_release)) return latest_release except CacheMiss: # Get the releases in the background worker, it will fill the cache logger.info( "Initiating background task to retrieve updated releases list") get_releases.delay(pre_releases=pre_releases) else: logger.debug("Skipping release check; RELEASE_CHECK_URL not defined") return "unknown", None
def primary_ip(self): if get_settings_or_config("PREFER_IPV4") and self.primary_ip4: return self.primary_ip4 elif self.primary_ip6: return self.primary_ip6 elif self.primary_ip4: return self.primary_ip4 else: return None
def get_limit(self, request): if self.limit_query_param: try: limit = int(request.query_params[self.limit_query_param]) if limit < 0: raise ValueError() # Enforce maximum page size, if defined max_page_size = get_settings_or_config("MAX_PAGE_SIZE") if max_page_size: if limit == 0: return max_page_size else: return min(limit, max_page_size) return limit except (KeyError, ValueError): pass return get_settings_or_config("PAGINATE_COUNT")
def get_paginate_count(request): """ Determine the length of a page, using the following in order: 1. per_page URL query parameter 2. Saved user preference 3. PAGINATE_COUNT global setting. """ if "per_page" in request.GET: try: per_page = int(request.GET.get("per_page")) if request.user.is_authenticated: request.user.set_config("pagination.per_page", per_page, commit=True) return per_page except ValueError: pass if request.user.is_authenticated: return request.user.get_config( "pagination.per_page", get_settings_or_config("PAGINATE_COUNT")) return get_settings_or_config("PAGINATE_COUNT")
def _handle_changed_object(request, sender, instance, **kwargs): """ Fires when an object is created or updated. """ m2m_changed = False # Determine the type of change being made if kwargs.get("created"): action = ObjectChangeActionChoices.ACTION_CREATE elif "created" in kwargs: action = ObjectChangeActionChoices.ACTION_UPDATE elif kwargs.get("action") in ["post_add", "post_remove" ] and kwargs["pk_set"]: # m2m_changed with objects added or removed m2m_changed = True action = ObjectChangeActionChoices.ACTION_UPDATE else: return # Record an ObjectChange if applicable if hasattr(instance, "to_objectchange"): if m2m_changed: ObjectChange.objects.filter( changed_object_type=ContentType.objects.get_for_model( instance), changed_object_id=instance.pk, request_id=request.id, ).update(object_data=instance.to_objectchange(action).object_data) else: objectchange = instance.to_objectchange(action) objectchange.user = _get_user_if_authenticated( request, objectchange) objectchange.request_id = request.id objectchange.save() # Enqueue webhooks enqueue_webhooks(instance, request.user, request.id, action) # Increment metric counters if action == ObjectChangeActionChoices.ACTION_CREATE: model_inserts.labels(instance._meta.model_name).inc() elif action == ObjectChangeActionChoices.ACTION_UPDATE: model_updates.labels(instance._meta.model_name).inc() # Housekeeping: 0.1% chance of clearing out expired ObjectChanges changelog_retention = get_settings_or_config("CHANGELOG_RETENTION") if changelog_retention and random.randint(1, 1000) == 1: cutoff = timezone.now() - timedelta(days=changelog_retention) ObjectChange.objects.filter(time__lt=cutoff).delete()
def queryset(self): """ Property getter for queryset that acts upon `settings.DISABLE_PREFIX_LIST_HIERARCHY` By default we annotate the prefix hierarchy such that child prefixes are indented in the table. When `settings.DISABLE_PREFIX_LIST_HIERARCHY` is True, we do not annotate the queryset, and the table is rendered as a flat list. TODO(john): When the base views support a formal `get_queryset()` method, this approach is not needed """ if self._queryset: return self._queryset if get_settings_or_config("DISABLE_PREFIX_LIST_HIERARCHY"): self._queryset = Prefix.objects.annotate(parents=Count(None)).order_by( F("vrf__name").asc(nulls_first=True), "network", "prefix_length", ) else: self._queryset = Prefix.objects.annotate_tree() return self._queryset
def settings_or_config(key): """Get a value from Django settings (if specified there) or Constance configuration (otherwise).""" return get_settings_or_config(key)
def available_ips(self, request, pk=None): """ A convenience method for returning available IP addresses within a prefix. By default, the number of IPs returned will be equivalent to PAGINATE_COUNT. An arbitrary limit (up to MAX_PAGE_SIZE, if set) may be passed, however results will not be paginated. The advisory lock decorator uses a PostgreSQL advisory lock to prevent this API from being invoked in parallel, which results in a race condition where multiple insertions can occur. """ prefix = get_object_or_404(Prefix.objects.restrict(request.user), pk=pk) # Create the next available IP within the prefix if request.method == "POST": with cache.lock("available-ips", blocking_timeout=5): # Normalize to a list of objects requested_ips = request.data if isinstance( request.data, list) else [request.data] # Determine if the requested number of IPs is available available_ips = prefix.get_available_ips() if available_ips.size < len(requested_ips): return Response( { "detail": "An insufficient number of IP addresses are available within the prefix {} ({} " "requested, {} available)".format( prefix, len(requested_ips), len(available_ips)) }, status=status.HTTP_204_NO_CONTENT, ) # Assign addresses from the list of available IPs and copy VRF assignment from the parent prefix available_ips = iter(available_ips) prefix_length = prefix.prefix.prefixlen for requested_ip in requested_ips: requested_ip["address"] = "{}/{}".format( next(available_ips), prefix_length) requested_ip["vrf"] = prefix.vrf.pk if prefix.vrf else None # Initialize the serializer with a list or a single object depending on what was requested context = {"request": request} if isinstance(request.data, list): serializer = serializers.IPAddressSerializer( data=requested_ips, many=True, context=context) else: serializer = serializers.IPAddressSerializer( data=requested_ips[0], context=context) # Create the new IP address(es) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) # Determine the maximum number of IPs to return else: try: limit = int( request.query_params.get( "limit", get_settings_or_config("PAGINATE_COUNT"))) except ValueError: limit = get_settings_or_config("PAGINATE_COUNT") if get_settings_or_config("MAX_PAGE_SIZE"): limit = min(limit, get_settings_or_config("MAX_PAGE_SIZE")) # Calculate available IPs within the prefix ip_list = [] for index, ip in enumerate(prefix.get_available_ips(), start=1): ip_list.append(ip) if index == limit: break serializer = serializers.AvailableIPSerializer( ip_list, many=True, context={ "request": request, "prefix": prefix.prefix, "vrf": prefix.vrf, }, ) return Response(serializer.data)
def test_config_if_no_setting(self): self.assertEqual(get_settings_or_config("BANNER_TOP"), "¡Hola, mundo!")
def test_null_settings_override_config(self): self.assertEqual(get_settings_or_config("BANNER_TOP"), None)
def test_empty_settings_override_config(self): self.assertEqual(get_settings_or_config("BANNER_TOP"), "")
def test_settings_override_config(self): self.assertEqual(get_settings_or_config("BANNER_TOP"), "Hello, world!")