Esempio n. 1
0
    def sync_full(cls, **kwargs):
        """
        Sync Cisco DNA Center as RQ job
        """
        data = {}

        # Get RQ queue
        queue = get_queue("default")

        # Get RQ Job ID and display results
        if "id" in kwargs:
            data = queue.fetch_job(str(kwargs["id"]))
            if data is None:
                return None
            return data.result

        # Check if ongoing RQ Job is ongoing
        try:
            job = cache.get("ciscodnacnetbox_bg")
        except CacheMiss:
            # If not, start full sync task
            job = full_sync.delay(**kwargs)
            cache.set("ciscodnacnetbox_bg", job.id, timeout=600)

        # Get Job Status
        j = queue.fetch_job(cache.get("ciscodnacnetbox_bg"))
        if "finished" == j.get_status():
            # Start again, if cache expired
            job = full_sync.delay(**kwargs)
            cache.set("ciscodnacnetbox_bg", job.id, timeout=600)
        data["id"] = str(j.id)
        data["task"] = str(j.func_name)
        return data
Esempio n. 2
0
def invalidate_router_cached_configuration(instance, **kwargs):
    cached_config_name = f"configuration_router_{instance.pk}"
    try:
        # Unset configuration cache if router instance is changed
        cache.get(cached_config_name)
        cache.delete(cached_config_name)
    except CacheMiss:
        logger.debug(f"unable to find cached config '{cached_config_name}'")
Esempio n. 3
0
def invalidate_router_cached_configuration(instance, **kwargs):
    cached_config_name = f"configuration_router_{instance.pk}"
    try:
        # Unset configuration cache if router instance is changed
        cache.get(cached_config_name)
        cache.delete(cached_config_name)
    except CacheMiss:
        pass
Esempio n. 4
0
def invalidate_cached_configuration_by_template(instance, **kwargs):
    # Unset configuration for each router using the changed template
    for router in Router.objects.filter(configuration_template=instance):
        cached_config_name = f"configuration_router_{router.pk}"
        try:
            # Unset configuration cache if router instance is changed
            cache.get(cached_config_name)
            cache.delete(cached_config_name)
        except CacheMiss:
            pass
Esempio n. 5
0
def get_super_powers_details(request):
    try:
        return cache.get('%s:%s' %
                         (request.user.pk, settings.SUPER_POWERS_KEY)).get(
                             'status', None)
    except CacheMiss:
        return None
Esempio n. 6
0
def get_latest_release(pre_releases=False):
    """
    Get latest known Nautobot release from cache, or if not available, queue up a background task to populate the cache.

    Returns:
        (Version, str): Latest release version and the release URL, if found in the cache
        ("unknown", None): If not present in the cache at this time
    """
    if get_settings_or_config("RELEASE_CHECK_URL"):
        logger.debug("Checking for most recent release")
        try:
            latest_release = cache.get("latest_release")
            if latest_release:
                logger.debug("Found cached release: {}".format(latest_release))
                return latest_release
        except CacheMiss:
            # Get the releases in the background worker, it will fill the cache
            logger.info(
                "Initiating background task to retrieve updated releases list")
            get_releases.delay(pre_releases=pre_releases)

    else:
        logger.debug("Skipping release check; RELEASE_CHECK_URL not defined")

    return "unknown", None
Esempio n. 7
0
def get_latest_release(pre_releases=False):
    if settings.RELEASE_CHECK_URL:
        logger.debug("Checking for most recent release")
        try:
            latest_release = cache.get("latest_release")
            if latest_release:
                logger.debug("Found cached release: {}".format(latest_release))
                return latest_release
        except CacheMiss:
            # Check for an existing job. This can happen if the RQ worker process is not running.
            queue = get_queue("check_releases")
            if queue.jobs:
                logger.warning(
                    "Job to check for new releases is already queued; skipping"
                )
            else:
                # Get the releases in the background worker, it will fill the cache
                logger.info(
                    "Initiating background task to retrieve updated releases list"
                )
                get_releases.delay(pre_releases=pre_releases)

    else:
        logger.debug("Skipping release check; RELEASE_CHECK_URL not defined")

    return "unknown", None
Esempio n. 8
0
def has_super_powers(request):
    try:
        return cache.get(
            '%s:%s' %
            (request.user.pk, settings.SUPER_POWERS_KEY))
    except CacheMiss:
        return False
Esempio n. 9
0
def get_super_powers_details(request):
    try:
        return cache.get(
            '%s:%s' %
            (request.user.pk, settings.SUPER_POWERS_KEY)).get(
            'status', None)
    except CacheMiss:
        return None
Esempio n. 10
0
def profile_to_location(handle):
    timeout = 60 * 20
    key_salt = '1'
    key = f'profile_to_location{handle}_{key_salt}'
    try:
        results = cache.get(key)
    except CacheMiss:
        results = None

    if not results:
        results = profile_to_location_helper(handle)
    cache.set(key, results, timeout)

    return results
Esempio n. 11
0
def build_stat_results(keyword=None):
    timeout = 60 * 60 * 24
    key_salt = '3'
    key = f'build_stat_results_{keyword}_{key_salt}'
    try:
        results = cache.get(key)
    except CacheMiss:
        results = None
    if results and not settings.DEBUG:
        return results

    results = build_stat_results_helper(keyword)
    cache.set(key, results, timeout)

    return results
Esempio n. 12
0
def get_history_cached(breakdown, i):
    timeout = 60 * 60 * 3
    key_salt = '0'
    key = f'get_history_cached_{breakdown}_{i}_{key_salt}'

    try:
        results = cache.get(key)
    except CacheMiss:
        results = None

    if results:
        return results

    results = gas_history(breakdown, i)
    cache.set(key, results, timeout)

    return results
Esempio n. 13
0
    def token(self) -> str:
        def get_token(host: str, username: str, password: str) -> dict:
            url = f'{host}/v2/authentication'
            data = {
                'username': (None, username),
                'password': (None, password),
            }
            r = requests.post(url=url, files=data, verify=False)  # nosec
            return r.json()

        try:
            token = cache.get(self.token_key)
        except CacheMiss:
            response = get_token(self.host, self.username, self.password)
            token = response.get('secure_token', '')
            expires_in = response.get('expires_in', 3600)
            cache.set(cache_key=self.token_key, data=token, timeout=expires_in)
        return token
Esempio n. 14
0
def get_virtual_machines(vcenter: ClusterVCenter):
    if not vcenter:
        return None

    logger.debug("Checking for VMs on {}".format(vcenter.server))
    try:
        cache_key = get_cache_key(vcenter)
        vms = cache.get(cache_key)
        if vms != 'FAILED':
            logger.debug("Found cached VMs on {}".format(vcenter.server))
            return vms
    except CacheMiss:
        # Get the VMs in the background worker, it will fill the cache
        logger.info(
            "Initiating background task to retrieve VMs from {}".format(
                vcenter.server))
        refresh_virtual_machines.delay(vcenter=vcenter)

    return None
def get_latest_release(pre_releases=False):
    if settings.RELEASE_CHECK_URL:
        logger.debug("checking for most recent release")
        try:
            latest_release = cache.get("latest_release")
            if latest_release:
                logger.debug(f"found cached release: {latest_release}")
                return latest_release
        except CacheMiss:
            queue = get_queue("check_releases")
            if queue.jobs:
                logger.warning("job to check for new releases already queued")
            else:
                logger.info(
                    "starting background task to retrieve updated releases list"
                )
                get_releases.delay(pre_releases=pre_releases)
    else:
        logger.debug("skipping release check; RELEASE_CHECK_URL not defined")

    return "unknown", None
Esempio n. 16
0
    def predict_status(self, request):
        project_id = self.request.query_params.get('project')
        if not project_id:
            return Response(data='query param "project" empty or not provided',
                            status=status.HTTP_400_BAD_REQUEST)
        project = Project.objects.get(pk=project_id)
        if not project.training_project:
            Response({'status': 'done'})

        cache_key = f'predict_status_{project_id}'
        try:
            resp = cache.get(cache_key)
        except CacheMiss:
            save_prediction_server_status_to_cache_job.delay(
                cache_key, cvat_project_id=project_id)
            resp = {
                'status': 'queued',
            }
            cache.set(cache_key=cache_key, data=resp, timeout=60)

        return Response(resp)
Esempio n. 17
0
    def predict_image(self, request):
        frame = self.request.query_params.get('frame')
        task_id = self.request.query_params.get('task')
        if not task_id:
            return Response(data='query param "task" empty or not provided',
                            status=status.HTTP_400_BAD_REQUEST)
        if not frame:
            return Response(data='query param "frame" empty or not provided',
                            status=status.HTTP_400_BAD_REQUEST)
        cache_key = f'predict_image_{task_id}_{frame}'
        try:
            resp = cache.get(cache_key)
        except CacheMiss:
            save_frame_prediction_to_cache_job.delay(cache_key,
                                                     task_id=task_id,
                                                     frame=frame)
            resp = {
                'status': 'queued',
            }
            cache.set(cache_key=cache_key, data=resp, timeout=60)

        return Response(resp)
Esempio n. 18
0
def has_super_powers(request):
    try:
        return cache.get('%s:%s' %
                         (request.user.pk, settings.SUPER_POWERS_KEY))
    except CacheMiss:
        return False
Esempio n. 19
0
def refresh_virtual_machines(vcenter: ClusterVCenter, force=False):
    config = settings.PLUGINS_CONFIG['netbox_vcenter']
    vcenter_cache_key = get_cache_key(vcenter)

    # Check whether this server has failed recently and shouldn't be retried yet
    try:
        cached_data = cache.get(vcenter_cache_key)
        if not force and cached_data == 'FAILED':
            logger.info(
                "Skipping vCenter update; server {} failed recently".format(
                    vcenter.server))
            return

        if not force:
            logger.info(
                "Skipping vCenter update; server {} already in cache".format(
                    vcenter.server))
            return cached_data
    except CacheMiss:
        pass

    service_instance = None
    try:
        logger.debug("Fetching VMs from {}".format(vcenter.server))

        # Connect to the vCenter server
        if vcenter.validate_certificate:
            service_instance = connect.Connect(vcenter.server,
                                               user=vcenter.username,
                                               pwd=vcenter.password)
        else:
            service_instance = connect.ConnectNoSSL(vcenter.server,
                                                    user=vcenter.username,
                                                    pwd=vcenter.password)

        content = service_instance.RetrieveContent()

        vms = get_objects_of_type(content, vim.VirtualMachine)
        all_stats = {'timestamp': time.time(), 'vms': {}}
        dvs_cache = {}
        portgroup_cache = {}
        for vm in vms:
            vm_stats = {
                'power': None,
                'vcpus': None,
                'memory': None,
                'disk': None,
                'nics': [],
            }

            try:
                if vm.runtime.powerState:
                    vm_stats[
                        'powered_on'] = vm.runtime.powerState == 'poweredOn'
                if vm.config.hardware.numCPU:
                    vm_stats['vcpus'] = vm.config.hardware.numCPU
                if vm.config.hardware.memoryMB:
                    vm_stats['memory'] = vm.config.hardware.memoryMB

                disk_devices = [
                    device for device in vm.config.hardware.device
                    if isinstance(device, vim.vm.device.VirtualDisk)
                ]
                if disk_devices:
                    # Sum and convert from KB to GB
                    total_capacity = 0
                    for device in disk_devices:
                        total_capacity += device.capacityInKB
                    vm_stats['disk'] = round(total_capacity / 1048576)

                for dev in vm.config.hardware.device:
                    if isinstance(dev, vim.vm.device.VirtualEthernetCard):
                        vlan = get_nic_vlan(content, dvs_cache,
                                            portgroup_cache, vm, dev)
                        vm_stats['nics'].append({
                            'label': dev.deviceInfo.label,
                            'mac_address': dev.macAddress,
                            'vlan': vlan,
                        })
            except Exception:
                logger.exception(
                    "Error while fetching virtual machine {} from {}".format(
                        vm.name, vcenter.server))
                continue

            # Collect all stats for returning
            all_stats['vms'][vm.name] = vm_stats

        # Cache a list of all VMs
        cache.set(vcenter_cache_key, all_stats, config['CACHE_TIMEOUT'])

        return all_stats
    except Exception:
        # Set a cookie in the cache so we don't keep retrying
        logger.exception("Error while fetching virtual machines from {}. "
                         "Disabling checks for 5 minutes.".format(
                             vcenter.server))
        cache.set(vcenter_cache_key, 'FAILED', config['CACHE_FAILURE_TIMEOUT'])
    finally:
        if service_instance:
            connect.Disconnect(service_instance)