Exemple #1
0
 def getItems(self, parent: models.ServicePool, item: typing.Optional[str]):
     # Extract provider
     try:
         if not item:
             return [
                 AssignedService.itemToDict(k, True) for k in
                 parent.cachedUserServices().all().prefetch_related(
                     'properties', 'deployed_service', 'publication')
             ]
         cachedService: models.UserService = parent.cachedUserServices(
         ).get(uuid=processUuid(item))
         return AssignedService.itemToDict(cachedService, True)
     except Exception:
         logger.exception('getItems')
         raise self.invalidItemException()
    def reduceL1Cache(self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int):
        logger.debug("Reducing L1 cache erasing a service in cache for %s", servicePool)
        # We will try to destroy the newest cacheL1 element that is USABLE if the deployer can't cancel a new service creation
        cacheItems: typing.List[UserService] = list(
            servicePool.cachedUserServices().filter(
                userServiceManager().getCacheStateFilter(services.UserDeployment.L1_CACHE)
            ).exclude(
                Q(properties__name='destroy_after') & Q(properties__value='y')
            ).order_by(
                '-creation_date'
            ).iterator()
        )

        if not cacheItems:
            logger.debug('There is more services than max configured, but could not reduce cache L1 cause its already empty')
            return

        if cacheL2 < servicePool.cache_l2_srvs:
            valid = None
            for n in cacheItems:
                if n.needsOsManager():
                    if State.isUsable(n.state) is False or State.isUsable(n.os_state):
                        valid = n
                        break
                else:
                    valid = n
                    break

            if valid is not None:
                valid.moveToLevel(services.UserDeployment.L2_CACHE)
                return

        cache = cacheItems[0]
        cache.removeOrCancel()
    def growL1Cache(self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int) -> None:
        """
        This method tries to enlarge L1 cache.

        If for some reason the number of deployed services (Counting all, ACTIVE
        and PREPARING, assigned, L1 and L2) is over max allowed service deployments,
        this method will not grow the L1 cache
        """
        logger.debug('Growing L1 cache creating a new service for %s', servicePool.name)
        # First, we try to assign from L2 cache
        if cacheL2 > 0:
            valid = None
            with transaction.atomic():
                for n in servicePool.cachedUserServices().select_for_update().filter(userServiceManager().getCacheStateFilter(services.UserDeployment.L2_CACHE)).order_by('creation_date'):
                    if n.needsOsManager():
                        if State.isUsable(n.state) is False or State.isUsable(n.os_state):
                            valid = n
                            break
                    else:
                        valid = n
                        break

            if valid is not None:
                valid.moveToLevel(services.UserDeployment.L1_CACHE)
                return
        try:
            # This has a velid publication, or it will not be here
            userServiceManager().createCacheFor(
                typing.cast(ServicePoolPublication, servicePool.activePublication()), services.UserDeployment.L1_CACHE
            )
        except MaxServicesReachedError:
            log.doLog(servicePool, log.ERROR, 'Max number of services reached for this service', log.INTERNAL)
            logger.warning('Max user services reached for %s: %s. Cache not created', servicePool.name, servicePool.max_srvs)
        except Exception:
            logger.exception('Exception')
Exemple #4
0
 def getLogs(self, parent: models.ServicePool,
             item: str) -> typing.List[typing.Any]:
     try:
         item = parent.cachedUserServices().get(uuid=processUuid(item))
         logger.debug('Getting logs for %s', item)
         return log.getLogs(item)
     except Exception:
         raise self.invalidItemException()
 def reduceL2Cache(self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int):
     logger.debug("Reducing L2 cache erasing a service in cache for %s", servicePool.name)
     if cacheL2 > 0:
         cacheItems: typing.List[UserService] = servicePool.cachedUserServices().filter(
             userServiceManager().getCacheStateFilter(services.UserDeployment.L2_CACHE)
         ).order_by('creation_date')
         # TODO: Look first for non finished cache items and cancel them?
         cache = cacheItems[0]
         cache.removeOrCancel()
Exemple #6
0
    def getAssignationForUser(
        self, servicePool: ServicePool, user: User
    ) -> typing.Optional[UserService]:  # pylint: disable=too-many-branches
        if servicePool.service.getInstance().spawnsNew is False:
            assignedUserService = self.getExistingAssignationForUser(
                servicePool, user)
        else:
            assignedUserService = None

        # If has an assigned user service, returns this without any more work
        if assignedUserService:
            return assignedUserService

        if servicePool.isRestrained():
            raise InvalidServiceException(
                _('The requested service is restrained'))

        cache: typing.Optional[UserService] = None
        # Now try to locate 1 from cache already "ready" (must be usable and at level 1)
        with transaction.atomic():
            caches = typing.cast(
                typing.List[UserService],
                servicePool.cachedUserServices().select_for_update().filter(
                    cache_level=services.UserDeployment.L1_CACHE,
                    state=State.USABLE,
                    os_state=State.USABLE)[:1])
            if caches:
                cache = caches[0]
                # Ensure element is reserved correctly on DB
                if servicePool.cachedUserServices().select_for_update().filter(
                        user=None, uuid=typing.cast(
                            UserService, cache).uuid).update(
                                user=user, cache_level=0) != 1:
                    cache = None
            else:
                cache = None

        # Out of previous atomic
        if not cache:
            with transaction.atomic():
                caches = typing.cast(
                    typing.List[UserService],
                    servicePool.cachedUserServices().select_for_update().
                    filter(cache_level=services.UserDeployment.L1_CACHE,
                           state=State.USABLE)[:1])
                if cache:
                    cache = caches[0]
                    if servicePool.cachedUserServices().select_for_update(
                    ).filter(user=None,
                             uuid=typing.cast(UserService, cache).uuid).update(
                                 user=user, cache_level=0) != 1:
                        cache = None
                else:
                    cache = None

        # Out of atomic transaction
        if cache:
            # Early assign
            cache.assignToUser(user)

            logger.debug(
                'Found a cached-ready service from %s for user %s, item %s',
                servicePool, user, cache)
            events.addEvent(servicePool,
                            events.ET_CACHE_HIT,
                            fld1=servicePool.cachedUserServices().filter(
                                cache_level=services.UserDeployment.L1_CACHE,
                                state=State.USABLE).count())
            return cache

        # Cache missed

        # Now find if there is a preparing one
        with transaction.atomic():
            caches = servicePool.cachedUserServices().select_for_update(
            ).filter(cache_level=services.UserDeployment.L1_CACHE,
                     state=State.PREPARING)[:1]
            if caches:
                cache = caches[0]
                if servicePool.cachedUserServices().select_for_update().filter(
                        user=None, uuid=typing.cast(
                            UserService, cache).uuid).update(
                                user=user, cache_level=0) != 1:
                    cache = None
            else:
                cache = None

        # Out of atomic transaction
        if cache:
            cache.assignToUser(user)

            logger.debug(
                'Found a cached-preparing service from %s for user %s, item %s',
                servicePool, user, cache)
            events.addEvent(servicePool,
                            events.ET_CACHE_MISS,
                            fld1=servicePool.cachedUserServices().filter(
                                cache_level=services.UserDeployment.L1_CACHE,
                                state=State.PREPARING).count())
            return cache

        # Can't assign directly from L2 cache... so we check if we can create e new service in the limits requested
        serviceType = servicePool.service.getType()
        if serviceType.usesCache:
            # inCacheL1 = ds.cachedUserServices().filter(UserServiceManager.getCacheStateFilter(services.UserDeployment.L1_CACHE)).count()
            inAssigned = servicePool.assignedUserServices().filter(
                UserServiceManager.getStateFilter()).count()
            # totalL1Assigned = inCacheL1 + inAssigned
            if inAssigned >= servicePool.max_srvs:  # cacheUpdater will drop unnecesary L1 machines, so it's not neccesary to check against inCacheL1
                log.doLog(
                    servicePool, log.WARN,
                    'Max number of services reached: {}'.format(
                        servicePool.max_srvs), log.INTERNAL)
                raise MaxServicesReachedError()

        # Can create new service, create it
        events.addEvent(servicePool, events.ET_CACHE_MISS, fld1=0)
        return self.createAssignedFor(servicePool, user)