Example #1
0
    def sync_full(cls, **kwargs):
        """
        Sync Cisco DNA Center as RQ job
        """
        data = {}

        # Get RQ queue
        queue = get_queue("default")

        # Get RQ Job ID and display results
        if "id" in kwargs:
            data = queue.fetch_job(str(kwargs["id"]))
            if data is None:
                return None
            return data.result

        # Check if ongoing RQ Job is ongoing
        try:
            job = cache.get("ciscodnacnetbox_bg")
        except CacheMiss:
            # If not, start full sync task
            job = full_sync.delay(**kwargs)
            cache.set("ciscodnacnetbox_bg", job.id, timeout=600)

        # Get Job Status
        j = queue.fetch_job(cache.get("ciscodnacnetbox_bg"))
        if "finished" == j.get_status():
            # Start again, if cache expired
            job = full_sync.delay(**kwargs)
            cache.set("ciscodnacnetbox_bg", job.id, timeout=600)
        data["id"] = str(j.id)
        data["task"] = str(j.func_name)
        return data
def get_releases(pre_releases=False):
    url = settings.RELEASE_CHECK_URL
    releases = []

    try:
        logger.debug(f"fetching new releases from {url}")
        response = requests.get(
            url, headers={"Accept": "application/vnd.github.v3+json"})
        response.raise_for_status()
        total_releases = len(response.json())

        for release in response.json():
            if "tag_name" not in release:
                continue
            if not pre_releases and (release.get("devrelease")
                                     or release.get("prerelease")):
                continue
            releases.append(
                (version.parse(release["tag_name"]), release.get("html_url")))
        logger.debug(
            f"found {total_releases} releases; {len(releases)} usable")
    except requests.exceptions.RequestException:
        logger.exception(f"error while fetching {url}")
        return []

    # Cache the most recent release
    cache.set("latest_release", max(releases), settings.RELEASE_CHECK_TIMEOUT)

    return releases
def activate_super_powers(request):
    cache.set('%s:%s' % (request.user.pk, settings.SUPER_POWERS_KEY),
              data={
                  'started': timezone.now(),
                  'status': PENDING_VALIDATION
              },
              timeout=settings.SUPER_POWERS_DURATION)
    if get_super_powers_details(request) is None:
        return False
    else:
        return True
Example #4
0
def save_prediction_server_status_to_cache_job(cache_key,
                                               cvat_project_id,
                                               timeout=60):
    cvat_project = Project.objects.get(pk=cvat_project_id)
    api = TrainingServerAPI(host=cvat_project.training_project.host,
                            username=cvat_project.training_project.username,
                            password=cvat_project.training_project.password)
    status = api.get_project_status(
        project_id=cvat_project.training_project.training_id)

    resp = {**status, 'status': 'done'}
    cache.set(cache_key=cache_key, data=resp, timeout=timeout)
Example #5
0
def profile_to_location(handle):
    timeout = 60 * 20
    key_salt = '1'
    key = f'profile_to_location{handle}_{key_salt}'
    try:
        results = cache.get(key)
    except CacheMiss:
        results = None

    if not results:
        results = profile_to_location_helper(handle)
    cache.set(key, results, timeout)

    return results
Example #6
0
def build_stat_results(keyword=None):
    timeout = 60 * 60 * 24
    key_salt = '3'
    key = f'build_stat_results_{keyword}_{key_salt}'
    try:
        results = cache.get(key)
    except CacheMiss:
        results = None
    if results and not settings.DEBUG:
        return results

    results = build_stat_results_helper(keyword)
    cache.set(key, results, timeout)

    return results
Example #7
0
def get_history_cached(breakdown, i):
    timeout = 60 * 60 * 3
    key_salt = '0'
    key = f'get_history_cached_{breakdown}_{i}_{key_salt}'

    try:
        results = cache.get(key)
    except CacheMiss:
        results = None

    if results:
        return results

    results = gas_history(breakdown, i)
    cache.set(key, results, timeout)

    return results
Example #8
0
    def token(self) -> str:
        def get_token(host: str, username: str, password: str) -> dict:
            url = f'{host}/v2/authentication'
            data = {
                'username': (None, username),
                'password': (None, password),
            }
            r = requests.post(url=url, files=data, verify=False)  # nosec
            return r.json()

        try:
            token = cache.get(self.token_key)
        except CacheMiss:
            response = get_token(self.host, self.username, self.password)
            token = response.get('secure_token', '')
            expires_in = response.get('expires_in', 3600)
            cache.set(cache_key=self.token_key, data=token, timeout=expires_in)
        return token
Example #9
0
    def predict_status(self, request):
        project_id = self.request.query_params.get('project')
        if not project_id:
            return Response(data='query param "project" empty or not provided',
                            status=status.HTTP_400_BAD_REQUEST)
        project = Project.objects.get(pk=project_id)
        if not project.training_project:
            Response({'status': 'done'})

        cache_key = f'predict_status_{project_id}'
        try:
            resp = cache.get(cache_key)
        except CacheMiss:
            save_prediction_server_status_to_cache_job.delay(
                cache_key, cvat_project_id=project_id)
            resp = {
                'status': 'queued',
            }
            cache.set(cache_key=cache_key, data=resp, timeout=60)

        return Response(resp)
Example #10
0
def activate_super_powers(request):
    if get_super_powers_details(request) is None:
        cache.set(
            '%s:%s' %
            (request.user.pk,
             settings.SUPER_POWERS_KEY),
            data={
                'started': timezone.now(),
                'status': PENDING_VALIDATION},
            timeout=settings.SUPER_POWERS_DURATION)
        return False
    else:
        cache.set(
            '%s:%s' %
            (request.user.pk,
             settings.SUPER_POWERS_KEY),
            data={
                'started': timezone.now(),
                'status': VALIDATED},
            timeout=settings.SUPER_POWERS_DURATION)
        return True
Example #11
0
    def predict_image(self, request):
        frame = self.request.query_params.get('frame')
        task_id = self.request.query_params.get('task')
        if not task_id:
            return Response(data='query param "task" empty or not provided',
                            status=status.HTTP_400_BAD_REQUEST)
        if not frame:
            return Response(data='query param "frame" empty or not provided',
                            status=status.HTTP_400_BAD_REQUEST)
        cache_key = f'predict_image_{task_id}_{frame}'
        try:
            resp = cache.get(cache_key)
        except CacheMiss:
            save_frame_prediction_to_cache_job.delay(cache_key,
                                                     task_id=task_id,
                                                     frame=frame)
            resp = {
                'status': 'queued',
            }
            cache.set(cache_key=cache_key, data=resp, timeout=60)

        return Response(resp)
Example #12
0
def save_frame_prediction_to_cache_job(cache_key: str,
                                       task_id: int,
                                       frame: int,
                                       timeout: int = 60):
    task = Task.objects.get(pk=task_id)
    training_project_image = TrainingProjectImage.objects.filter(
        idx=frame, task=task).first()
    if not training_project_image:
        cache.set(cache_key=cache_key,
                  data={
                      'annotation': [],
                      'status': 'done'
                  },
                  timeout=timeout)
        return

    cvat_labels = Label.objects.filter(project__id=task.project_id).all()
    training_project = Project.objects.get(pk=task.project_id).training_project
    api = TrainingServerAPI(host=training_project.host,
                            username=training_project.username,
                            password=training_project.password)
    image = Image.objects.get(frame=frame, data=task.data)
    labels_mapping = {
        TrainingProjectLabel.objects.get(
            cvat_label=cvat_label).training_label_id: cvat_label.id
        for cvat_label in cvat_labels
    }
    annotation = api.get_annotation(
        project_id=training_project.training_id,
        image_id=training_project_image.training_image_id,
        width=image.width,
        height=image.height,
        labels_mapping=labels_mapping,
        frame=frame)
    resp = {'annotation': annotation, 'status': 'done'}
    cache.set(cache_key=cache_key, data=resp, timeout=timeout)
Example #13
0
def refresh_virtual_machines(vcenter: ClusterVCenter, force=False):
    config = settings.PLUGINS_CONFIG['netbox_vcenter']
    vcenter_cache_key = get_cache_key(vcenter)

    # Check whether this server has failed recently and shouldn't be retried yet
    try:
        cached_data = cache.get(vcenter_cache_key)
        if not force and cached_data == 'FAILED':
            logger.info(
                "Skipping vCenter update; server {} failed recently".format(
                    vcenter.server))
            return

        if not force:
            logger.info(
                "Skipping vCenter update; server {} already in cache".format(
                    vcenter.server))
            return cached_data
    except CacheMiss:
        pass

    service_instance = None
    try:
        logger.debug("Fetching VMs from {}".format(vcenter.server))

        # Connect to the vCenter server
        if vcenter.validate_certificate:
            service_instance = connect.Connect(vcenter.server,
                                               user=vcenter.username,
                                               pwd=vcenter.password)
        else:
            service_instance = connect.ConnectNoSSL(vcenter.server,
                                                    user=vcenter.username,
                                                    pwd=vcenter.password)

        content = service_instance.RetrieveContent()

        vms = get_objects_of_type(content, vim.VirtualMachine)
        all_stats = {'timestamp': time.time(), 'vms': {}}
        dvs_cache = {}
        portgroup_cache = {}
        for vm in vms:
            vm_stats = {
                'power': None,
                'vcpus': None,
                'memory': None,
                'disk': None,
                'nics': [],
            }

            try:
                if vm.runtime.powerState:
                    vm_stats[
                        'powered_on'] = vm.runtime.powerState == 'poweredOn'
                if vm.config.hardware.numCPU:
                    vm_stats['vcpus'] = vm.config.hardware.numCPU
                if vm.config.hardware.memoryMB:
                    vm_stats['memory'] = vm.config.hardware.memoryMB

                disk_devices = [
                    device for device in vm.config.hardware.device
                    if isinstance(device, vim.vm.device.VirtualDisk)
                ]
                if disk_devices:
                    # Sum and convert from KB to GB
                    total_capacity = 0
                    for device in disk_devices:
                        total_capacity += device.capacityInKB
                    vm_stats['disk'] = round(total_capacity / 1048576)

                for dev in vm.config.hardware.device:
                    if isinstance(dev, vim.vm.device.VirtualEthernetCard):
                        vlan = get_nic_vlan(content, dvs_cache,
                                            portgroup_cache, vm, dev)
                        vm_stats['nics'].append({
                            'label': dev.deviceInfo.label,
                            'mac_address': dev.macAddress,
                            'vlan': vlan,
                        })
            except Exception:
                logger.exception(
                    "Error while fetching virtual machine {} from {}".format(
                        vm.name, vcenter.server))
                continue

            # Collect all stats for returning
            all_stats['vms'][vm.name] = vm_stats

        # Cache a list of all VMs
        cache.set(vcenter_cache_key, all_stats, config['CACHE_TIMEOUT'])

        return all_stats
    except Exception:
        # Set a cookie in the cache so we don't keep retrying
        logger.exception("Error while fetching virtual machines from {}. "
                         "Disabling checks for 5 minutes.".format(
                             vcenter.server))
        cache.set(vcenter_cache_key, 'FAILED', config['CACHE_FAILURE_TIMEOUT'])
    finally:
        if service_instance:
            connect.Disconnect(service_instance)