Esempio n. 1
0
 def run(self):
     logger.debug('Publishing')
     servicePoolPub = None
     try:
         now = getSqlDatetime()
         with transaction.atomic():
             servicePoolPub = DeployedServicePublication.objects.select_for_update().get(pk=self._publishId)
             if servicePoolPub.state != State.LAUNCHING:  # If not preparing (may has been canceled by user) just return
                 return
             servicePoolPub.state = State.PREPARING
             servicePoolPub.save()
         pi = servicePoolPub.getInstance()
         state = pi.publish()
         deployedService = servicePoolPub.deployed_service
         deployedService.current_pub_revision += 1
         deployedService.storeValue('toBeReplacedIn', pickle.dumps(now + datetime.timedelta(hours=GlobalConfig.SESSION_EXPIRE_TIME.getInt(True))))
         deployedService.save()
         PublicationFinishChecker.checkAndUpdateState(servicePoolPub, pi, state)
     except DeployedServicePublication.DoesNotExist:  # Deployed service publication has been removed from database, this is ok, just ignore it
         pass
     except Exception:
         logger.exception("Exception launching publication")
         try:
             servicePoolPub.state = State.ERROR
             servicePoolPub.save()
         except Exception:
             logger.error('Error saving ERROR state for pool {}'.format(servicePoolPub))
Esempio n. 2
0
    def addCounter(self, owner_type, owner_id, counterType, counterValue, stamp=None):
        '''
        Adds a new counter stats to database.

        Args:

            owner_type: type of owner (integer, from internal tables)
            owner_id:  id of the owner
            counterType: The type of counter that will receive the value (look at uds.core.util.stats.counters module)
            counterValue: Counter to store. Right now, this must be an integer value (-2G ~ 2G)
            stamp: if not None, this will be used as date for cuounter, else current date/time will be get
                   (this has a granurality of seconds)

        Returns:

            Nothing
        '''
        if stamp is None:
            stamp = getSqlDatetime()

        # To Unix epoch
        stamp = int(time.mktime(stamp.timetuple()))  # pylint: disable=maybe-no-member

        try:
            StatsCounters.objects.create(owner_type=owner_type, owner_id=owner_id, counter_type=counterType, value=counterValue, stamp=stamp)
            return True
        except Exception:
            logger.error('Exception handling counter stats saving (maybe database is full?)')
        return False
Esempio n. 3
0
 def run(self):
     """
     Look for "hanged" scheduler tasks and reset them
     """
     since = getSqlDatetime() - timedelta(minutes=15)
     with transaction.atomic():
         Scheduler.objects.select_for_update().filter(last_execution__lt=since, state=State.RUNNING).update(owner_server='', state=State.FOR_EXECUTE)
Esempio n. 4
0
    def __log(self, owner_type, owner_id, level, message, source, avoidDuplicates):
        """
        Logs a message associated to owner
        """
        from uds.models import getSqlDatetime
        from uds.models import Log

        # Ensure message fits on space
        message = message[:255]

        qs = Log.objects.filter(owner_id=owner_id, owner_type=owner_type)
        # First, ensure we do not have more than requested logs, and we can put one more log item
        if qs.count() >= GlobalConfig.MAX_LOGS_PER_ELEMENT.getInt():
            for i in qs.order_by('-created',)[GlobalConfig.MAX_LOGS_PER_ELEMENT.getInt() - 1:]:
                i.delete()

        if avoidDuplicates is True:
            try:
                lg = Log.objects.filter(owner_id=owner_id, owner_type=owner_type, level=level, source=source).order_by('-created', '-id')[0]
                if lg.message == message:
                    # Do not log again, already logged
                    return
            except Exception:  # Do not exists log
                pass

        # now, we add new log
        try:
            Log.objects.create(owner_type=owner_type, owner_id=owner_id, created=getSqlDatetime(), source=source, level=level, data=message)
        except:
            # Some objects will not get logged, such as System administrator objects
            pass
Esempio n. 5
0
    def executeOneDelayedTask(self):
        now = getSqlDatetime()
        filt = Q(execution_time__lt=now) | Q(insert_date__gt=now + timedelta(seconds=30))
        # If next execution is before now or last execution is in the future (clock changed on this server, we take that task as executable)
        try:
            with transaction.atomic():  # Encloses
                # Throws exception if no delayed task is avilable
                task = dbDelayedTask.objects.select_for_update().filter(filt).order_by('execution_time')[0]  # @UndefinedVariable
                if task.insert_date > now + timedelta(seconds=30):
                    logger.warning('EXecuted {} due to insert_date being in the future!'.format(task.type))
                taskInstanceDump = encoders.decode(task.instance, 'base64')
                task.delete()
            taskInstance = loads(taskInstanceDump)
        except IndexError:
            return  # No problem, there is no waiting delayed task
        except Exception:
            # Transaction have been rolled back using the "with atomic", so here just return
            # Note that is taskInstance can't be loaded, this task will not be retried
            logger.exception('Executing one task')
            return

        if taskInstance is not None:
            logger.debug('Executing delayedTask:>{0}<'.format(task))
            taskInstance.env = Environment.getEnvForType(taskInstance.__class__)
            DelayedTaskThread(taskInstance).start()
Esempio n. 6
0
    def _save(self, name, content):
        name = self.get_valid_name(name)
        try:
            f = self._dbFileForReadWrite(name)
        except DBFile.DoesNotExist:
            now = getSqlDatetime()
            f = DBFile.objects.create(owner=self.owner, name=name, created=now, modified=now)

        f.data = content.read()
        f.modified = getSqlDatetime()
        f.save()

        # Store on cache also
        self._storeInCache(f)

        return name
Esempio n. 7
0
def getServicesPoolsCounters(servicePool, counter_type):
    # pylint: disable=no-value-for-parameter
    try:
        cacheKey = (servicePool and servicePool.id or 'all') + str(counter_type) + str(POINTS) + str(SINCE)
        to = getSqlDatetime()
        since = to - timedelta(days=SINCE)
        val = cache.get(cacheKey)
        if val is None:
            if servicePool is None:
                us = DeployedService()
                complete = True  # Get all deployed services stats
            else:
                us = servicePool
                complete = False
            val = []
            for x in counters.getCounters(us, counter_type, since=since, to=to, limit=POINTS, use_max=USE_MAX, all=complete):
                val.append({'stamp': x[0], 'value': int(x[1])})
            if len(val) > 2:
                cache.put(cacheKey, pickle.dumps(val).encode('zip'), 600)
            else:
                val = [{'stamp': since, 'value': 0}, {'stamp': to, 'value': 0}]
        else:
            val = pickle.loads(val.decode('zip'))

        return val
    except:
        logger.exception('exception')
        raise ResponseError('can\'t create stats for objects!!!')
Esempio n. 8
0
 def get(self, rangeStart=0, rangeEnd=MAX_SEQ):
     '''
     Tries to generate a new unique id in the range provided. This unique id
     is global to "unique ids' database
     '''
     # First look for a name in the range defined
     stamp = getSqlDatetime(True)
     # logger.debug(UniqueId)
     try:
         UniqueId.objects.lock()  # @UndefinedVariable
         flt = self.__filter(rangeStart, rangeEnd)
         try:
             item = flt.filter(assigned=False).order_by('seq')[0]
             UniqueId.objects.filter(id=item.id).update(owner=self._owner, assigned=True, stamp=stamp)  # @UndefinedVariable
             seq = item.seq
         except Exception:  # No free element found
             try:
                 last = flt.filter(assigned=True)[0]  # DB Returns correct order so the 0 item is the last
                 seq = last.seq + 1
             except Exception:  # If there is no assigned at database
                 seq = rangeStart
             # logger.debug('Found seq {0}'.format(seq))
             if seq > rangeEnd:
                 return -1  # No ids free in range
             UniqueId.objects.create(owner=self._owner, basename=self._baseName, seq=seq, assigned=True, stamp=stamp)  # @UndefinedVariable
         logger.debug('Seq: {}'.format(seq))
         return seq
     except Exception:
         logger.exception('Generating unique id sequence')
         return None
     finally:
         UniqueId.objects.unlock()  # @UndefinedVariable
    def publish(self, servicePool, changeLog=None):  # pylint: disable=no-self-use
        '''
        Initiates the publication of a service pool, or raises an exception if this cannot be done
        :param servicePool: Service pool object (db object)
        '''
        if servicePool.publications.filter(state__in=State.PUBLISH_STATES).count() > 0:
            raise PublishException(_('Already publishing. Wait for previous publication to finish and try again'))

        if servicePool.isInMaintenance():
            raise PublishException(_('Service is in maintenance mode and new publications are not allowed'))

        try:
            now = getSqlDatetime()
            dsp = None
            dsp = servicePool.publications.create(state=State.LAUNCHING, state_date=now, publish_date=now, revision=servicePool.current_pub_revision)
            if changeLog:
                servicePool.changelog.create(revision=servicePool.current_pub_revision, log=changeLog, stamp=now)
            DelayedTaskRunner.runner().insert(PublicationLauncher(dsp), 4, PUBTAG + str(dsp.id))
        except Exception as e:
            logger.debug('Caught exception at publish: {0}'.format(e))
            if dsp is not None:
                try:
                    dsp.delete()
                except Exception:
                    logger.info('Could not delete {}'.format(dsp))
            raise PublishException(str(e))
Esempio n. 10
0
    def executeOneJob(self):
        '''
        Looks for the best waiting job and executes it
        '''
        jobInstance = None
        try:
            now = getSqlDatetime()  # Datetimes are based on database server times
            fltr = Q(state=State.FOR_EXECUTE) & (Q(last_execution__gt=now) | Q(next_execution__lt=now))
            with transaction.atomic():
                # If next execution is before now or last execution is in the future (clock changed on this server, we take that task as executable)
                # This params are all set inside fltr (look at __init__)
                job = dbScheduler.objects.select_for_update().filter(fltr).order_by('next_execution')[0]  # @UndefinedVariable
                job.state = State.RUNNING
                job.owner_server = self._hostname
                job.last_execution = now
                job.save()

            jobInstance = job.getInstance()

            if jobInstance is None:
                logger.error('Job instance can\'t be resolved for {0}, removing it'.format(job))
                job.delete()
                return
            logger.debug('Executing job:>{0}<'.format(job.name))
            JobThread(jobInstance, job).start()  # Do not instatiate thread, just run it
        except IndexError:
            # Do nothing, there is no jobs for execution
            return
        except DatabaseError:
            # Whis will happen whenever a connection error or a deadlock error happens
            # This in fact means that we have to retry operation, and retry will happen on main loop
            # Look at this http://dev.mysql.com/doc/refman/5.0/en/innodb-deadlocks.html
            # I have got some deadlock errors, but looking at that url, i found that it is not so abnormal
            # logger.debug('Deadlock, no problem at all :-) (sounds hards, but really, no problem, will retry later :-) )')
            raise DatabaseError('Database access problems. Retrying connection')
Esempio n. 11
0
 def releaseOlderThan(self, stamp=None):
     stamp = getSqlDatetime(True) if stamp == None else stamp
     try:
         UniqueId.objects.lock()  # @UndefinedVariable
         UniqueId.objects.filter(owner=self._owner, stamp__lt=stamp).update(assigned=False, owner='', stamp=stamp)  # @UndefinedVariable
         self.__purge()
     finally:
         UniqueId.objects.unlock()  # @UndefinedVariable
Esempio n. 12
0
    def __doCleanup(self, model):
        minTime = time.mktime((getSqlDatetime() - datetime.timedelta(days=GlobalConfig.STATS_DURATION.getInt())).timetuple())

        # Newer Django versions (at least 1.7) does this deletions as it must (executes a DELETE FROM ... WHERE...)
        model.objects.filter(stamp__lt=minTime).delete()

        # Optimize mysql tables after deletions
        optimizeTable(model._meta.db_table)
Esempio n. 13
0
 def run(self):
     removeFrom = getSqlDatetime() - timedelta(seconds=10)  # We keep at least 10 seconds the machine before removing it, so we avoid connections errors
     removables = UserService.objects.filter(state=State.REMOVABLE, state_date__lt=removeFrom,
                                             deployed_service__service__provider__maintenance_mode=False)[0:UserServiceRemover.removeAtOnce]
     for us in removables:
         try:
             UserServiceManager.manager().remove(us)
         except Exception:
             logger.exception('Exception invoking remove user service {}'.format(us))
Esempio n. 14
0
 def releaseOwnShedules():
     """
     Releases all scheduleds being executed by this server
     """
     logger.debug('Releasing all owned scheduled tasks')
     with transaction.atomic():
         dbScheduler.objects.select_for_update().filter(owner_server=platform.node()).update(owner_server='')  # @UndefinedVariable
         dbScheduler.objects.select_for_update().filter(last_execution__lt=getSqlDatetime() - timedelta(minutes=15), state=State.RUNNING).update(owner_server='', state=State.FOR_EXECUTE)  # @UndefinedVariable
         dbScheduler.objects.select_for_update().filter(owner_server='').update(state=State.FOR_EXECUTE)  # @UndefinedVariable
Esempio n. 15
0
 def release(self):
     try:
         UniqueId.objects.lock()  # @UndefinedVariable
         UniqueId.objects.filter(owner=self._owner).update(
             assigned=False, owner="", stamp=getSqlDatetime(True)
         )  # @UndefinedVariable
         self.__purge()
     finally:
         UniqueId.objects.unlock()  # @UndefinedVariable
Esempio n. 16
0
    def get(self, rangeStart=0, rangeEnd=MAX_SEQ):
        """
        Tries to generate a new unique id in the range provided. This unique id
        is global to "unique ids' database
        """
        # First look for a name in the range defined
        stamp = getSqlDatetime(True)
        seq = rangeStart
        # logger.debug(UniqueId)
        counter = 0
        while True:
            counter += 1
            try:
                # logger.debug('Creating new seq in range {}-{}'.format(rangeStart, rangeEnd))
                with transaction.atomic():
                    flt = self.__filter(rangeStart, rangeEnd, forUpdate=True)
                    try:
                        item = flt.filter(assigned=False).order_by('seq')[0]
                        item.owner = self._owner
                        item.assigned = True
                        item.stamp = stamp
                        item.save()
                        # UniqueId.objects.filter(id=item.id).update(owner=self._owner, assigned=True, stamp=stamp)  # @UndefinedVariable
                        seq = item.seq
                        break
                    except IndexError:  # No free element found
                        item = None

                    # No item was found on first instance (already created, but freed)
                    if item is None:
                        # logger.debug('No free found, creating new one')
                        try:
                            last = flt.filter(assigned=True)[0]  # DB Returns correct order so the 0 item is the last
                            seq = last.seq + 1
                        except IndexError:  # If there is no assigned at database
                            seq = rangeStart
                        # logger.debug('Found seq {0}'.format(seq))
                        if seq > rangeEnd:
                            return -1  # No ids free in range
                        # May ocurr on some circustance that a concurrency access gives same item twice, in this case, we
                        # will get an "duplicate key error",
                        UniqueId.objects.create(owner=self._owner, basename=self._baseName, seq=seq, assigned=True, stamp=stamp)  # @UndefinedVariable
                        break
            except OperationalError:  # Locked, may ocurr for example on sqlite. We will wait a bit
                # logger.exception('Got database locked')
                if counter % 5 == 0:
                    connection.close()
                time.sleep(1)
            except IntegrityError:  # Concurrent creation, may fail, simply retry
                pass
            except Exception:
                logger.exception('Error')
                return -1

        # logger.debug('Seq: {}'.format(seq))
        return seq
Esempio n. 17
0
 def run(self):
     since_state = getSqlDatetime() - timedelta(seconds=MAX_STUCK_TIME)
     # Filter for locating machine not ready
     for ds in DeployedService.objects.filter(service__provider__maintenance_mode=False):
         logger.debug('Searching for stuck states for {0}'.format(ds))
         # Info states are removed on UserServiceCleaner and VALID_STATES are ok, or if "hanged", checked on "HangedCleaner"
         for us in ds.userServices.filter(state_date__lt=since_state).exclude(state__in=State.INFO_STATES + State.VALID_STATES):
             logger.debug('Found stuck user service {0}'.format(us))
             log.doLog(ds, log.ERROR, 'User service {0} has been hard removed because it\'s stuck'.format(us.friendly_name))
             us.delete()
Esempio n. 18
0
 def __createAssignedAtDb(self, deployedServicePublication, user):
     '''
     Private method to instatiate an assigned element at database with default state
     '''
     self.__checkMaxDeployedReached(deployedServicePublication.deployed_service)
     now = getSqlDatetime()
     return deployedServicePublication.userServices.create(cache_level=0, state=State.PREPARING, os_state=State.PREPARING,
                                                           state_date=now, creation_date=now, data='',
                                                           deployed_service=deployedServicePublication.deployed_service,
                                                           user=user, in_use=False)
Esempio n. 19
0
 def refresh(self, skey):
     # logger.debug('Refreshing key "%s" for cache "%s"' % (skey, self._owner,))
     try:
         key = self.__getKey(skey)
         c = dbCache.objects.get(pk=key)  # @UndefinedVariable
         c.created = getSqlDatetime()
         c.save()
     except dbCache.DoesNotExist:  # @UndefinedVariable
         logger.debug("Can't refresh cache key %s because it doesn't exists" % skey)
         return
Esempio n. 20
0
 def __createAssignedAtDbForNoPublication(self, deployedService, user):
     '''
     __createCacheAtDb and __createAssignedAtDb uses a publication for create the UserService.
     There is cases where deployed services do not have publications (do not need them), so we need this method to create
     an UserService with no publications, and create them from an DeployedService
     '''
     self.__checkMaxDeployedReached(deployedService)
     now = getSqlDatetime()
     return deployedService.userServices.create(cache_level=0, state=State.PREPARING, os_state=State.PREPARING,
                                                state_date=now, creation_date=now, data='', publication=None, user=user, in_use=False)
Esempio n. 21
0
 def __updateDb(self):
     '''
     Atomically updates the scheduler db to "release" this job
     '''
     with transaction.atomic():
         job = dbScheduler.objects.select_for_update().get(id=self._dbJobId)  # @UndefinedVariable
         job.state = State.FOR_EXECUTE
         job.owner_server = ''
         job.next_execution = getSqlDatetime() + timedelta(seconds=job.frecuency)
         # Update state and last execution time at database
         job.save()
Esempio n. 22
0
 def __createCacheAtDb(self, deployedServicePublication, cacheLevel):
     '''
     Private method to instatiate a cache element at database with default states
     '''
     # Checks if maxDeployed has been reached and if so, raises an exception
     self.__checkMaxDeployedReached(deployedServicePublication.deployed_service)
     now = getSqlDatetime()
     return deployedServicePublication.userServices.create(cache_level=cacheLevel, state=State.PREPARING, os_state=State.PREPARING,
                                                           state_date=now, creation_date=now, data='',
                                                           deployed_service=deployedServicePublication.deployed_service,
                                                           user=None, in_use=False)
Esempio n. 23
0
    def __insert(self, instance, delay, tag):
        now = getSqlDatetime()
        exec_time = now + timedelta(seconds=delay)
        cls = instance.__class__
        instanceDump = dumps(instance).encode(self.CODEC)
        typeName = str(cls.__module__ + '.' + cls.__name__)

        logger.debug('Inserting delayed task {0} with {1} bytes ({2})'.format(typeName, len(instanceDump), exec_time))

        dbDelayedTask.objects.create(type=typeName, instance=instanceDump,  # @UndefinedVariable
                                     insert_date=now, execution_delay=delay, execution_time=exec_time, tag=tag)
Esempio n. 24
0
 def run(self):
     with transaction.atomic():
         removeFrom = getSqlDatetime() - timedelta(seconds=10)  # We keep at least 10 seconds the machine before removing it, so we avoid connections errors
         removables = UserService.objects.filter(state=State.REMOVABLE, state_date__lt=removeFrom,
                                                 deployed_service__service__provider__maintenance_mode=False)[0:UserServiceRemover.removeAtOnce]
     for us in removables:
         logger.debug('Checking removal of {}'.format(us))
         try:
             if managers.userServiceManager().canRemoveServiceFromDeployedService(us.deployed_service) is True:
                 managers.userServiceManager().remove(us)
         except Exception:
             logger.exception('Exception removing user service')
Esempio n. 25
0
    def run(self):
        since_state = getSqlDatetime() - timedelta(seconds=GlobalConfig.MAX_INITIALIZING_TIME.getInt())
        # Filter for locating machine not ready
        flt = Q(state_date__lt=since_state, state=State.PREPARING) | Q(state_date__lt=since_state, state=State.USABLE, os_state=State.PREPARING)

        for ds in DeployedService.objects.exclude(osmanager=None, state__in=State.VALID_STATES, service__provider__maintenance_mode=True):
            logger.debug('Searching for hanged services for {0}'.format(ds))
            for us in ds.userServices.filter(flt):
                logger.debug('Found hanged service {0}'.format(us))
                log.doLog(us, log.ERROR, 'User Service seems to be hanged. Removing it.', log.INTERNAL)
                log.doLog(ds, log.ERROR, 'Removing user service {0} because it seems to be hanged'.format(us.friendly_name))
                us.removeOrCancel()
Esempio n. 26
0
    def run(self):
        try:
            servicePoolPub = DeployedServicePublication.objects.get(pk=self._id)
            if servicePoolPub.state != State.REMOVABLE:
                logger.info('Already removed')

            now = getSqlDatetime()
            activePub = servicePoolPub.deployed_service.activePublication()
            servicePoolPub.deployed_service.userServices.filter(in_use=True).update(in_use=False, state_date=now)
            servicePoolPub.deployed_service.markOldUserServicesAsRemovables(activePub)
        except Exception:
            logger.exception('Trace (treated exception, not fault)')
Esempio n. 27
0
 def createAssignable(self, ds, deployed, user):
     '''
     Creates an assignable service
     '''
     now = getSqlDatetime()
     assignable = ds.userServices.create(cache_level=0, state=State.PREPARING, os_state=State.PREPARING,
                                         state_date=now, creation_date=now, data='', user=user, in_use=False)
     state = deployed.deployForUser(user)
     try:
         UserServiceOpChecker.makeUnique(assignable, deployed, state)
     except Exception, e:
         logger.exception("Exception {0}".format(e))
Esempio n. 28
0
 def free(self, seq):
     try:
         logger.debug("Freeing seq {0} from {1}  ({2})".format(seq, self._owner, self._baseName))
         UniqueId.objects.lock()  # @UndefinedVariable
         flt = (
             self.__filter(0)
             .filter(owner=self._owner, seq=seq)
             .update(owner="", assigned=False, stamp=getSqlDatetime(True))
         )
         if flt > 0:
             self.__purge()
     finally:
         UniqueId.objects.unlock()  # @UndefinedVariable
Esempio n. 29
0
 def get(self, skey, defValue=None):
     now = getSqlDatetime()
     # logger.debug('Requesting key "%s" for cache "%s"' % (skey, self._owner,))
     try:
         key = self.__getKey(skey)
         c = dbCache.objects.get(pk=key)  # @UndefinedVariable
         expired = now > c.created + timedelta(seconds=c.validity)
         if expired:
             return defValue
         val = pickle.loads(c.value.decode(Cache.CODEC))
         return val
     except dbCache.DoesNotExist:  # @UndefinedVariable
         logger.debug("key not found: {}".format(skey))
         return defValue
Esempio n. 30
0
    def transfer(self, seq, toUidGen):
        try:
            UniqueId.objects.lock()  # @UndefinedVariable

            obj = UniqueId.objects.get(owner=self._owner, seq=seq)  # @UndefinedVariable
            obj.owner = toUidGen._owner
            obj.basename = toUidGen._baseName
            obj.stamp = getSqlDatetime(True)
            obj.save()

            return True
        except Exception:
            logger.exception('EXCEPTION AT transfer')
            return False
        finally:
            UniqueId.objects.unlock()  # @UndefinedVariable
Esempio n. 31
0
    def run(self):
        try:
            servicePoolPub: ServicePoolPublication = ServicePoolPublication.objects.get(
                pk=self._id)
            if servicePoolPub.state != State.REMOVABLE:
                logger.info('Already removed')

            now = getSqlDatetime()
            activePub: typing.Optional[
                ServicePoolPublication] = servicePoolPub.deployed_service.activePublication(
                )
            servicePoolPub.deployed_service.userServices.filter(
                in_use=True).update(in_use=False, state_date=now)
            servicePoolPub.deployed_service.markOldUserServicesAsRemovables(
                activePub)
        except Exception:
            pass
Esempio n. 32
0
 def __createAssignedAtDbForNoPublication(self, deployedService, user):
     '''
     __createCacheAtDb and __createAssignedAtDb uses a publication for create the UserService.
     There is cases where deployed services do not have publications (do not need them), so we need this method to create
     an UserService with no publications, and create them from an DeployedService
     '''
     self.__checkMaxDeployedReached(deployedService)
     now = getSqlDatetime()
     return deployedService.userServices.create(cache_level=0,
                                                state=State.PREPARING,
                                                os_state=State.PREPARING,
                                                state_date=now,
                                                creation_date=now,
                                                data='',
                                                publication=None,
                                                user=user,
                                                in_use=False)
Esempio n. 33
0
 def __createAssignedAtDb(self, deployedServicePublication, user):
     '''
     Private method to instatiate an assigned element at database with default state
     '''
     self.__checkMaxDeployedReached(
         deployedServicePublication.deployed_service)
     now = getSqlDatetime()
     return deployedServicePublication.userServices.create(
         cache_level=0,
         state=State.PREPARING,
         os_state=State.PREPARING,
         state_date=now,
         creation_date=now,
         data='',
         deployed_service=deployedServicePublication.deployed_service,
         user=user,
         in_use=False)
    def __insert(self, instance: DelayedTask, delay: int, tag: str) -> None:
        now = getSqlDatetime()
        exec_time = now + timedelta(seconds=delay)
        cls = instance.__class__
        instanceDump = encoders.encodeAsStr(pickle.dumps(instance), 'base64')
        typeName = str(cls.__module__ + '.' + cls.__name__)

        logger.debug('Inserting delayed task %s with %s bytes (%s)', typeName,
                     len(instanceDump), exec_time)

        DBDelayedTask.objects.create(
            type=typeName,
            instance=instanceDump,  # @UndefinedVariable
            insert_date=now,
            execution_delay=delay,
            execution_time=exec_time,
            tag=tag)
Esempio n. 35
0
 def __createCacheAtDb(self, publication: ServicePoolPublication,
                       cacheLevel: int) -> UserService:
     """
     Private method to instatiate a cache element at database with default states
     """
     # Checks if maxDeployed has been reached and if so, raises an exception
     self.__checkMaxDeployedReached(publication.deployed_service)
     now = getSqlDatetime()
     return publication.userServices.create(
         cache_level=cacheLevel,
         state=State.PREPARING,
         os_state=State.PREPARING,
         state_date=now,
         creation_date=now,
         data='',
         deployed_service=publication.deployed_service,
         user=None,
         in_use=False)
Esempio n. 36
0
 def run(self):
     since_state = getSqlDatetime() - timedelta(seconds=self.frecuency)
     for ds in DeployedService.objects.all():
         # Skips checking deployed services in maintenance mode
         if ds.isInMaintenance() is True:
             continue
         # If do not needs os manager, this is
         if ds.osmanager is not None:
             osm = ds.osmanager.getInstance()
             if osm.processUnusedMachines is True:
                 logger.debug('Processing unused services for {}, {}'.format(ds, ds.osmanager))
                 for us in ds.assignedUserServices().filter(in_use=False, state_date__lt=since_state, state=State.USABLE, os_state=State.USABLE):
                     logger.debug('Found unused assigned service {0}'.format(us))
                     osm.processUnused(us)
         else:  # No os manager, simply remove unused services in specified time
             for us in ds.assignedUserServices().filter(in_use=False, state_date__lt=since_state, state=State.USABLE, os_state=State.USABLE):
                 logger.debug('Found unused assigned service with no OS Manager {0}'.format(us))
                 us.remove()
Esempio n. 37
0
 def createAssignable(self, ds, deployed, user):
     '''
     Creates an assignable service
     '''
     now = getSqlDatetime()
     assignable = ds.userServices.create(cache_level=0,
                                         state=State.PREPARING,
                                         os_state=State.PREPARING,
                                         state_date=now,
                                         creation_date=now,
                                         data='',
                                         user=user,
                                         in_use=False)
     state = deployed.deployForUser(user)
     try:
         UserServiceOpChecker.makeUnique(assignable, deployed, state)
     except Exception, e:
         logger.exception("Exception {0}".format(e))
Esempio n. 38
0
    def publish(self,
                servicePool: ServicePool,
                changeLog: typing.Optional[str] = None):  # pylint: disable=no-self-use
        """
        Initiates the publication of a service pool, or raises an exception if this cannot be done
        :param servicePool: Service pool object (db object)
        :param changeLog: if not None, store change log string on "change log" table
        """
        if servicePool.publications.filter(
                state__in=State.PUBLISH_STATES).count() > 0:
            raise PublishException(
                _('Already publishing. Wait for previous publication to finish and try again'
                  ))

        if servicePool.isInMaintenance():
            raise PublishException(
                _('Service is in maintenance mode and new publications are not allowed'
                  ))

        publication: typing.Optional[ServicePoolPublication] = None
        try:
            now = getSqlDatetime()
            publication = servicePool.publications.create(
                state=State.LAUNCHING,
                state_date=now,
                publish_date=now,
                revision=servicePool.current_pub_revision)
            if changeLog:
                servicePool.changelog.create(
                    revision=servicePool.current_pub_revision,
                    log=changeLog,
                    stamp=now)
            if publication:
                DelayedTaskRunner.runner().insert(
                    PublicationLauncher(publication), 4,
                    PUBTAG + str(publication.id))
        except Exception as e:
            logger.debug('Caught exception at publish: %s', e)
            if publication is not None:
                try:
                    publication.delete()
                except Exception:
                    logger.info('Could not delete %s', publication)
            raise PublishException(str(e))
Esempio n. 39
0
 def run(self):
     configuredAction: CalendarAction
     for configuredAction in CalendarAction.objects.filter(
             service_pool__service__provider__maintenance_mode=
             False,  # Avoid maintenance
             service_pool__state=states.servicePool.
             ACTIVE,  # Avoid Non active pools
             next_execution__lt=getSqlDatetime()).order_by(
                 'next_execution'):
         logger.info('Executing calendar action %s.%s (%s)',
                     configuredAction.service_pool.name,
                     configuredAction.calendar.name,
                     configuredAction.action)
         try:
             configuredAction.execute()
         except Exception:
             logger.exception(
                 'Got an exception executing calendar access action: %s',
                 configuredAction)
Esempio n. 40
0
    def __log(self, owner_type: int, owner_id: int, level: int, message: str,
              source: str, avoidDuplicates: bool):
        """
        Logs a message associated to owner
        """
        from uds.models import getSqlDatetime
        from uds.models import Log

        # Ensure message fits on space
        message = str(message)[:255]

        qs = Log.objects.filter(owner_id=owner_id, owner_type=owner_type)
        # First, ensure we do not have more than requested logs, and we can put one more log item
        if qs.count() >= GlobalConfig.MAX_LOGS_PER_ELEMENT.getInt():
            for i in qs.order_by(
                    '-created', )[GlobalConfig.MAX_LOGS_PER_ELEMENT.getInt() -
                                  1:]:
                i.delete()

        if avoidDuplicates is True:
            try:
                lg = Log.objects.filter(owner_id=owner_id,
                                        owner_type=owner_type,
                                        level=level,
                                        source=source).order_by(
                                            '-created', '-id')[0]
                if lg.message == message:
                    # Do not log again, already logged
                    return
            except Exception:  # Do not exists log
                pass

        # now, we add new log
        try:
            Log.objects.create(owner_type=owner_type,
                               owner_id=owner_id,
                               created=getSqlDatetime(),
                               source=source,
                               level=level,
                               data=message)
        except Exception:
            # Some objects will not get logged, such as System administrator objects, but this is fine
            pass
Esempio n. 41
0
 def run(self) -> None:
     since_state = getSqlDatetime() - timedelta(
         seconds=GlobalConfig.CHECK_UNUSED_TIME.getInt())
     # Locate service pools with pending assigned service in use
     outdatedServicePools = ServicePool.objects.annotate(outdated=Count(
         'userServices',
         filter=Q(
             userServices__in_use=False,
             userServices__state_date__lt=since_state,
             userServices__state=State.USABLE,
             userServices__os_state=State.USABLE,
             userServices__cache_level=0,
         ),
     )).filter(outdated__gt=0, state=State.ACTIVE)
     for ds in outdatedServicePools:
         # Skips checking deployed services in maintenance mode or ignores assigned and unused
         if ds.isInMaintenance() is True or ds.ignores_unused:
             continue
         # If do not needs os manager, this is
         if ds.osmanager:
             osm = ds.osmanager.getInstance()
             if osm.processUnusedMachines is True:
                 logger.debug('Processing unused services for %s, %s', ds,
                              ds.osmanager)
                 for us in ds.assignedUserServices().filter(
                         in_use=False,
                         state_date__lt=since_state,
                         state=State.USABLE,
                         os_state=State.USABLE,
                 ):
                     logger.debug('Found unused assigned service %s', us)
                     osm.processUnused(us)
         else:  # No os manager, simply remove unused services in specified time
             for us in ds.assignedUserServices().filter(
                     in_use=False,
                     state_date__lt=since_state,
                     state=State.USABLE,
                     os_state=State.USABLE,
             ):
                 logger.debug(
                     'Found unused assigned service with no OS Manager %s',
                     us)
                 us.remove()
Esempio n. 42
0
 def run(self):
     with transaction.atomic():
         removeFrom = getSqlDatetime() - timedelta(
             seconds=10
         )  # We keep at least 10 seconds the machine before removing it, so we avoid connections errors
         removables = UserService.objects.filter(
             state=State.REMOVABLE,
             state_date__lt=removeFrom,
             deployed_service__service__provider__maintenance_mode=False
         )[0:UserServiceRemover.removeAtOnce]
     for us in removables:
         logger.debug('Checking removal of {}'.format(us))
         try:
             if managers.userServiceManager(
             ).canRemoveServiceFromDeployedService(
                     us.deployed_service) is True:
                 managers.userServiceManager().remove(us)
         except Exception:
             logger.exception('Exception removing user service')
Esempio n. 43
0
    def executeOneJob(self):
        '''
        Looks for the best waiting job and executes it
        '''
        jobInstance = None
        try:
            now = getSqlDatetime(
            )  # Datetimes are based on database server times
            fltr = Q(state=State.FOR_EXECUTE) & (Q(last_execution__gt=now)
                                                 | Q(next_execution__lt=now))
            with transaction.atomic():
                # If next execution is before now or last execution is in the future (clock changed on this server, we take that task as executable)
                # This params are all set inside fltr (look at __init__)
                job = dbScheduler.objects.select_for_update().filter(
                    fltr).order_by('next_execution')[0]  # @UndefinedVariable
                job.state = State.RUNNING
                job.owner_server = self._hostname
                job.last_execution = now
                job.save()

            jobInstance = job.getInstance()

            if jobInstance is None:
                logger.error(
                    'Job instance can\'t be resolved for {0}, removing it'.
                    format(job))
                job.delete()
                return
            logger.debug('Executing job:>{0}<'.format(job.name))
            JobThread(jobInstance,
                      job).start()  # Do not instatiate thread, just run it
        except IndexError:
            # Do nothing, there is no jobs for execution
            return
        except DatabaseError as e:
            # Whis will happen whenever a connection error or a deadlock error happens
            # This in fact means that we have to retry operation, and retry will happen on main loop
            # Look at this http://dev.mysql.com/doc/refman/5.0/en/innodb-deadlocks.html
            # I have got some deadlock errors, but looking at that url, i found that it is not so abnormal
            # logger.debug('Deadlock, no problem at all :-) (sounds hards, but really, no problem, will retry later :-) )')
            raise DatabaseError(
                'Database access problems. Retrying connection ({})'.format(e))
Esempio n. 44
0
    def executeOneDelayedTask(self):
        now = getSqlDatetime()
        filt = Q(execution_time__lt=now) | Q(insert_date__gt=now + timedelta(seconds=30))
        # If next execution is before now or last execution is in the future (clock changed on this server, we take that task as executable)
        taskInstance = None
        try:
            with transaction.atomic():  # Encloses
                task = dbDelayedTask.objects.select_for_update().filter(filt).order_by('execution_time')[0]  # @UndefinedVariable
                taskInstanceDump = encoders.decode(task.instance, 'base64')
                task.delete()
            taskInstance = loads(taskInstanceDump)
        except Exception:
            # Transaction have been rolled back using the "with atomic", so here just return
            # Note that is taskInstance can't be loaded, this task will not be retried
            return

        if taskInstance is not None:
            logger.debug('Executing delayedTask:>{0}<'.format(task))
            taskInstance.env = Environment.getEnvForType(taskInstance.__class__)
            DelayedTaskThread(taskInstance).start()
Esempio n. 45
0
def getServicesPoolsCounters(
        servicePool: typing.Optional[models.ServicePool],
        counter_type: int) -> typing.List[typing.Mapping[str, typing.Any]]:
    try:
        cacheKey = ((servicePool and str(servicePool.id) or 'all') +
                    str(counter_type) + str(POINTS) + str(SINCE))
        to = models.getSqlDatetime()
        since: datetime.datetime = to - datetime.timedelta(days=SINCE)

        cachedValue: typing.Optional[bytes] = cache.get(cacheKey)
        if not cachedValue:
            if not servicePool:
                us = models.ServicePool()
                complete = True  # Get all deployed services stats
            else:
                us = servicePool
                complete = False
            val: typing.List[typing.Mapping[str, typing.Any]] = []
            for x in counters.getCounters(
                    us,
                    counter_type,
                    since=since,
                    to=to,
                    max_intervals=POINTS,
                    use_max=USE_MAX,
                    all=complete,
            ):
                val.append({'stamp': x[0], 'value': int(x[1])})
            if len(val) > 2:
                cache.put(cacheKey, codecs.encode(pickle.dumps(val), 'zip'),
                          600)
            else:
                val = [{'stamp': since, 'value': 0}, {'stamp': to, 'value': 0}]
        else:
            val = pickle.loads(codecs.decode(cachedValue, 'zip'))

        # return [{'stamp': since + datetime.timedelta(hours=i*10), 'value': i*i} for i in range(300)]
        return val
    except:
        logger.exception('exception')
        raise ResponseError('can\'t create stats for objects!!!')
Esempio n. 46
0
    def run(self):
        since_state = getSqlDatetime() - timedelta(
            seconds=GlobalConfig.MAX_INITIALIZING_TIME.getInt())
        # Filter for locating machine not ready
        flt = (Q(state_date__lt=since_state, state=State.PREPARING)
               | Q(state_date__lt=since_state,
                   state=State.USABLE,
                   os_state=State.PREPARING))

        withHangedServices = ServicePool.objects.annotate(hanged=Count(
            'userServices',
            # Rewrited Filter for servicePool
            filter=Q(userServices__state_date__lt=since_state,
                     userServices__state=State.PREPARING)
            | Q(userServices__state_date__lt=since_state,
                userServices__state=State.USABLE,
                userServices__os_state=State.PREPARING))).exclude(
                    hanged=0).exclude(
                        service__provider__maintenance_mode=True).filter(
                            state=State.ACTIVE)

        # Type
        servicePool: ServicePool

        for servicePool in withHangedServices:
            logger.debug('Searching for hanged services for %s', servicePool)
            us: UserService
            for us in servicePool.userServices.filter(flt):
                if us.getProperty(
                        'destroy_after'
                ):  # It's waiting for removal, skip this very specific case
                    continue
                logger.debug('Found hanged service %s', us)
                log.doLog(us, log.ERROR,
                          'User Service seems to be hanged. Removing it.',
                          log.INTERNAL)
                log.doLog(
                    servicePool, log.ERROR,
                    'Removing user service {} because it seems to be hanged'.
                    format(us.friendly_name))
                us.removeOrCancel()
Esempio n. 47
0
    def continueRemovalOf(self, ds):
        # Recheck that there is no publication created in "bad moment"
        try:
            for p in ds.publications.filter(state=State.PREPARING):
                p.cancel()
        except Exception:
            pass

        try:
            # Now all publishments are canceling, let's try to cancel cache and assigned
            uServices = ds.userServices.filter(state=State.PREPARING)
            for u in uServices:
                logger.debug('Canceling {0}'.format(u))
                u.cancel()
        except Exception:
            pass

        # First, we remove all publications and user services in "info_state"
        with transaction.atomic():
            ds.userServices.select_for_update().filter(state__in=State.INFO_STATES).delete()

        # Mark usable user services as removable
        now = getSqlDatetime()

        with transaction.atomic():
            ds.userServices.select_for_update().filter(state=State.USABLE).update(state=State.REMOVABLE, state_date=now)

        # When no service is at database, we start with publications
        if ds.userServices.all().count() == 0:
            try:
                logger.debug('All services removed, checking active publication')
                if ds.activePublication() is not None:
                    logger.debug('Active publication found, unpublishing it')
                    ds.unpublish()
                else:
                    logger.debug('No active publication found, removing info states and checking if removal is done')
                    ds.publications.filter(state__in=State.INFO_STATES).delete()
                    if ds.publications.count() is 0:
                        ds.removed()  # Mark it as removed, clean later from database
            except Exception:
                logger.exception('Cought unexpected exception at continueRemovalOf: ')
Esempio n. 48
0
    def addCounter(self,
                   owner_type,
                   owner_id,
                   counterType,
                   counterValue,
                   stamp=None):
        """
        Adds a new counter stats to database.

        Args:

            owner_type: type of owner (integer, from internal tables)
            owner_id:  id of the owner
            counterType: The type of counter that will receive the value (look at uds.core.util.stats.counters module)
            counterValue: Counter to store. Right now, this must be an integer value (-2G ~ 2G)
            stamp: if not None, this will be used as date for cuounter, else current date/time will be get
                   (this has a granurality of seconds)

        Returns:

            Nothing
        """
        if stamp is None:
            stamp = getSqlDatetime()

        # To Unix epoch
        stamp = int(time.mktime(stamp.timetuple()))  # pylint: disable=maybe-no-member

        try:
            StatsCounters.objects.create(owner_type=owner_type,
                                         owner_id=owner_id,
                                         counter_type=counterType,
                                         value=counterValue,
                                         stamp=stamp)
            return True
        except Exception:
            logger.error(
                'Exception handling counter stats saving (maybe database is full?)'
            )
        return False
Esempio n. 49
0
 def get(self, rangeStart=0, rangeEnd=MAX_SEQ):
     '''
     Tries to generate a new unique id in the range provided. This unique id
     is global to "unique ids' database
     '''
     # First look for a name in the range defined
     stamp = getSqlDatetime(True)
     # logger.debug(UniqueId)
     try:
         UniqueId.objects.lock()  # @UndefinedVariable
         flt = self.__filter(rangeStart, rangeEnd)
         try:
             item = flt.filter(assigned=False).order_by('seq')[0]
             UniqueId.objects.filter(id=item.id).update(
                 owner=self._owner, assigned=True,
                 stamp=stamp)  # @UndefinedVariable
             seq = item.seq
         except Exception:  # No free element found
             try:
                 last = flt.filter(
                     assigned=True
                 )[0]  # DB Returns correct order so the 0 item is the last
                 seq = last.seq + 1
             except Exception:  # If there is no assigned at database
                 seq = rangeStart
             # logger.debug('Found seq {0}'.format(seq))
             if seq > rangeEnd:
                 return -1  # No ids free in range
             UniqueId.objects.create(owner=self._owner,
                                     basename=self._baseName,
                                     seq=seq,
                                     assigned=True,
                                     stamp=stamp)  # @UndefinedVariable
         logger.debug('Seq: {}'.format(seq))
         return seq
     except Exception:
         logger.exception('Generating unique id sequence')
         return None
     finally:
         UniqueId.objects.unlock()  # @UndefinedVariable
Esempio n. 50
0
 def put(self, skey, value, validity=None):
     # logger.debug('Saving key "%s" for cache "%s"' % (skey, self._owner,))
     if validity is None:
         validity = Cache.DEFAULT_VALIDITY
     key = self.__getKey(skey)
     value = cPickle.dumps(value).encode(Cache.CODEC)
     now = getSqlDatetime()
     try:
         dbCache.objects.create(owner=self._owner,
                                key=key,
                                value=value,
                                created=now,
                                validity=validity)  # @UndefinedVariable
     except Exception:
         # Already exists, modify it
         c = dbCache.objects.get(pk=key)  # @UndefinedVariable
         c.owner = self._owner
         c.key = key
         c.value = value
         c.created = datetime.now()
         c.validity = validity
         c.save()
Esempio n. 51
0
def getServicesPoolsCounters(
        servicePool: typing.Optional[models.ServicePool],
        counter_type: int) -> typing.List[typing.Dict[str, typing.Any]]:
    # pylint: disable=no-value-for-parameter
    try:
        cacheKey = (servicePool and servicePool.id
                    or 'all') + str(counter_type) + str(POINTS) + str(SINCE)
        to = models.getSqlDatetime()
        since: datetime.datetime = to - datetime.timedelta(days=SINCE)

        val: typing.Any = cache.get(cacheKey)
        if not val:
            if not servicePool:
                us = models.ServicePool()
                complete = True  # Get all deployed services stats
            else:
                us = servicePool
                complete = False
            val = []
            for x in counters.getCounters(us,
                                          counter_type,
                                          since=since,
                                          to=to,
                                          max_intervals=POINTS,
                                          use_max=USE_MAX,
                                          all=complete):
                val.append({'stamp': x[0], 'value': int(x[1])})
            if len(val) > 2:
                cache.put(cacheKey, encoders.encode(pickle.dumps(val), 'zip'),
                          600)
            else:
                val = [{'stamp': since, 'value': 0}, {'stamp': to, 'value': 0}]
        else:
            val = pickle.loads(typing.cast(bytes, encoders.decode(val, 'zip')))

        return val
    except:
        logger.exception('exception')
        raise ResponseError('can\'t create stats for objects!!!')
    def publish(self, servicePool, changeLog=None):  # pylint: disable=no-self-use
        '''
        Initiates the publication of a service pool, or raises an exception if this cannot be done
        :param servicePool: Service pool object (db object)
        '''
        if servicePool.publications.filter(
                state__in=State.PUBLISH_STATES).count() > 0:
            raise PublishException(
                _('Already publishing. Wait for previous publication to finish and try again'
                  ))

        if servicePool.isInMaintenance():
            raise PublishException(
                _('Service is in maintenance mode and new publications are not allowed'
                  ))

        try:
            now = getSqlDatetime()
            dsp = None
            dsp = servicePool.publications.create(
                state=State.LAUNCHING,
                state_date=now,
                publish_date=now,
                revision=servicePool.current_pub_revision)
            if changeLog:
                servicePool.changelog.create(
                    revision=servicePool.current_pub_revision,
                    log=changeLog,
                    stamp=now)
            DelayedTaskRunner.runner().insert(PublicationLauncher(dsp), 4,
                                              PUBTAG + str(dsp.id))
        except Exception as e:
            logger.debug('Caught exception at publish: {0}'.format(e))
            if dsp is not None:
                try:
                    dsp.delete()
                except Exception:
                    logger.info('Could not delete {}'.format(dsp))
            raise PublishException(str(e))
Esempio n. 53
0
    def run(self):
        since_state: datetime = getSqlDatetime() - timedelta(
            seconds=MAX_STUCK_TIME)
        # Filter for locating machine stuck on removing, cancelling, etc..
        # Locate service pools with pending assigned service in use
        servicePoolswithStucks = (ServicePool.objects.annotate(
            stuckCount=Count(
                'userServices',
                filter=Q(userServices__state_date__lt=since_state)
                & (Q(
                    userServices__state=State.PREPARING,
                    userServices__properties__name='destroy_after',
                )
                   | ~Q(userServices__state__in=State.INFO_STATES +
                        State.VALID_STATES)),
            )).filter(service__provider__maintenance_mode=False,
                      state=State.ACTIVE).exclude(stuckCount=0))

        # Info states are removed on UserServiceCleaner and VALID_STATES are ok, or if "hanged", checked on "HangedCleaner"
        def stuckUserServices(
                servicePool: ServicePool) -> typing.Iterable[UserService]:
            q = servicePool.userServices.filter(state_date__lt=since_state)
            yield from q.exclude(state__in=State.INFO_STATES +
                                 State.VALID_STATES)
            yield from q.filter(state=State.PREPARING,
                                properties__name='destroy_after')

        for servicePool in servicePoolswithStucks:
            # logger.debug('Searching for stuck states for %s', servicePool.name)
            for stuck in stuckUserServices(servicePool):
                logger.debug('Found stuck user service %s', stuck)
                log.doLog(
                    servicePool,
                    log.ERROR,
                    'User service {} has been hard removed because it\'s stuck'
                    .format(stuck.name),
                )
                # stuck.setState(State.ERROR)
                stuck.delete()
Esempio n. 54
0
 def getItems(self, *args, **kwargs):
     # Optimized query, due that there is a lot of info needed for theee
     d = getSqlDatetime() - datetime.timedelta(
         seconds=GlobalConfig.RESTRAINT_TIME.getInt())
     return super().getItems(
         overview=kwargs.get('overview', True),
         query=(ServicePool.objects.prefetch_related(
             'service', 'service__provider', 'servicesPoolGroup', 'image',
             'tags', 'meta', 'account').annotate(valid_count=Count(
                 'userServices',
                 filter=~Q(userServices__state__in=State.INFO_STATES))).
                annotate(preparing_count=Count(
                    'userServices',
                    filter=Q(
                        userServices__state=State.PREPARING))).annotate(
                            error_count=Count(
                                'userServices',
                                filter=Q(userServices__state=State.ERROR,
                                         state_date__gt=d))).
                annotate(usage_count=Count(
                    'userServices',
                    filter=Q(userServices__state__in=State.VALID_STATES,
                             userServices__cache_level=0)))))
Esempio n. 55
0
def getServicesPoolsCounters(servicePool, counter_type):
    # pylint: disable=no-value-for-parameter
    try:
        cacheKey = (servicePool and servicePool.id
                    or 'all') + str(counter_type) + str(POINTS) + str(SINCE)
        to = getSqlDatetime()
        since = to - timedelta(days=SINCE)
        val = cache.get(cacheKey)
        if val is None:
            if servicePool is None:
                us = DeployedService()
                complete = True  # Get all deployed services stats
            else:
                us = servicePool
                complete = False
            val = []
            for x in counters.getCounters(us,
                                          counter_type,
                                          since=since,
                                          to=to,
                                          limit=POINTS,
                                          use_max=USE_MAX,
                                          all=complete):
                val.append({'stamp': x[0], 'value': int(x[1])})
            if len(val) > 2:
                cache.put(cacheKey, encoders.encode(pickle.dumps(val), 'zip'),
                          600)
            else:
                val = [{'stamp': since, 'value': 0}, {'stamp': to, 'value': 0}]
        else:
            val = pickle.loads(encoders.decode(val, 'zip'))

        return val
    except:
        logger.exception('exception')
        raise ResponseError('can\'t create stats for objects!!!')
Esempio n. 56
0
    def run(self):
        since_state = getSqlDatetime() - timedelta(
            seconds=GlobalConfig.MAX_INITIALIZING_TIME.getInt())
        # Filter for locating machine not ready
        flt = Q(state_date__lt=since_state, state=State.PREPARING) | Q(
            state_date__lt=since_state,
            state=State.USABLE,
            os_state=State.PREPARING)

        for ds in DeployedService.objects.exclude(
                osmanager=None,
                state__in=State.VALID_STATES,
                service__provider__maintenance_mode=True):
            logger.debug('Searching for hanged services for {0}'.format(ds))
            for us in ds.userServices.filter(flt):
                logger.debug('Found hanged service {0}'.format(us))
                log.doLog(us, log.ERROR,
                          'User Service seems to be hanged. Removing it.',
                          log.INTERNAL)
                log.doLog(
                    ds, log.ERROR,
                    'Removing user service {0} because it seems to be hanged'.
                    format(us.friendly_name))
                us.removeOrCancel()
Esempio n. 57
0
    def run(self):
        removeAtOnce: int = GlobalConfig.USER_SERVICE_CLEAN_NUMBER.getInt(
        )  # Same, it will work at reload

        with transaction.atomic():
            removeFrom = getSqlDatetime() - timedelta(
                seconds=10
            )  # We keep at least 10 seconds the machine before removing it, so we avoid connections errors
            removableUserServices: typing.Iterable[
                UserService] = UserService.objects.filter(
                    state=State.REMOVABLE,
                    state_date__lt=removeFrom,
                    deployed_service__service__provider__maintenance_mode=False
                )[0:removeAtOnce].iterator()

        manager = managers.userServiceManager()
        for removableUserService in removableUserServices:
            logger.debug('Checking removal of %s', removableUserService.name)
            try:
                if manager.canRemoveServiceFromDeployedService(
                        removableUserService.deployed_service) is True:
                    manager.remove(removableUserService)
            except Exception:
                logger.exception('Exception removing user service')
Esempio n. 58
0
 def run(self):
     removeFrom = getSqlDatetime() - timedelta(seconds=GlobalConfig.KEEP_INFO_TIME.getInt())
     DeployedService.objects.filter(state__in=State.INFO_STATES, state_date__lt=removeFrom).delete()
Esempio n. 59
0
def getServicesData(
    request: 'ExtendedHttpRequestWithUser',
) -> typing.Dict[str, typing.Any]:  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
    """Obtains the service data dictionary will all available services for this request

    Arguments:
        request {ExtendedHttpRequest} -- request from where to xtract credentials

    Returns:
        typing.Dict[str, typing.Any] --  Keys has this:
            'services': services,
            'ip': request.ip,
            'nets': nets,
            'transports': validTrans,
            'autorun': autorun

    """
    # We look for services for this authenticator groups. User is logged in in just 1 authenticator, so his groups must coincide with those assigned to ds
    groups = list(request.user.getGroups())
    availServicePools = list(
        ServicePool.getDeployedServicesForGroups(
            groups,
            request.user))  # Pass in user to get "number_assigned" to optimize
    availMetaPools = list(MetaPool.getForGroups(
        groups,
        request.user))  # Pass in user to get "number_assigned" to optimize
    now = getSqlDatetime()

    # Information for administrators
    nets = ''
    validTrans = ''

    osName = request.os['OS']
    logger.debug('OS: %s', osName)

    if request.user.isStaff():
        nets = ','.join([n.name for n in Network.networksFor(request.ip)])
        tt = []
        t: Transport
        for t in Transport.objects.all().prefetch_related('networks'):
            if t.validForIp(request.ip):
                tt.append(t.name)
        validTrans = ','.join(tt)

    logger.debug('Checking meta pools: %s', availMetaPools)
    services = []

    # Metapool helpers
    def transportIterator(member) -> typing.Iterable[Transport]:
        for t in member.pool.transports.all().order_by('priority'):
            typeTrans = t.getType()
            if (typeTrans and t.validForIp(request.ip)
                    and typeTrans.supportsOs(osName) and t.validForOs(osName)):
                yield t

    def buildMetaTransports(
        transports: typing.Iterable[Transport],
        isLabel: bool,
    ) -> typing.List[typing.Mapping[str, typing.Any]]:
        idd = lambda i: i.uuid if not isLabel else 'LABEL:' + i.label
        return [{
            'id': idd(i),
            'name': i.name,
            'link': html.udsAccessLink(request, 'M' + meta.uuid, idd(i)),
            'priority': 0,
        } for i in transports]

    # Preload all assigned user services for this user
    # Add meta pools data first
    for meta in availMetaPools:
        # Check that we have access to at least one transport on some of its children
        metaTransports: typing.List[typing.Mapping[str, typing.Any]] = []
        in_use = meta.number_in_use > 0  # type: ignore # anotated value

        inAll: typing.Optional[typing.Set[str]] = None
        tmpSet: typing.Set[str]
        if (meta.transport_grouping == MetaPool.COMMON_TRANSPORT_SELECT
            ):  # If meta.use_common_transports
            # only keep transports that are in ALL members
            for member in meta.members.all().order_by('priority'):
                tmpSet = set()
                # if first pool, get all its transports and check that are valid
                for t in transportIterator(member):
                    if inAll is None:
                        tmpSet.add(t.uuid)
                    elif t.uuid in inAll:  # For subsequent, reduce...
                        tmpSet.add(t.uuid)

                inAll = tmpSet
            # tmpSet has ALL common transports
            metaTransports = buildMetaTransports(
                Transport.objects.filter(uuid__in=inAll or []), isLabel=False)
        elif meta.transport_grouping == MetaPool.LABEL_TRANSPORT_SELECT:
            ltrans: typing.MutableMapping[str, Transport] = {}
            for member in meta.members.all().order_by('priority'):
                tmpSet = set()
                # if first pool, get all its transports and check that are valid
                for t in transportIterator(member):
                    if not t.label:
                        continue
                    if t.label not in ltrans or ltrans[
                            t.label].priority > t.priority:
                        ltrans[t.label] = t
                    if inAll is None:
                        tmpSet.add(t.label)
                    elif t.label in inAll:  # For subsequent, reduce...
                        tmpSet.add(t.label)

                inAll = tmpSet
            # tmpSet has ALL common transports
            metaTransports = buildMetaTransports(
                (v for k, v in ltrans.items() if k in (inAll or set())),
                isLabel=True)
        else:
            for member in meta.members.all():
                # if pool.isInMaintenance():
                #    continue
                for t in member.pool.transports.all():
                    typeTrans = t.getType()
                    if (typeTrans and t.validForIp(request.ip)
                            and typeTrans.supportsOs(osName)
                            and t.validForOs(osName)):
                        metaTransports = [{
                            'id':
                            'meta',
                            'name':
                            'meta',
                            'link':
                            html.udsAccessLink(request, 'M' + meta.uuid, None),
                            'priority':
                            0,
                        }]
                        break

                # if not in_use and meta.number_in_use:  # Only look for assignation on possible used
                #     assignedUserService = userServiceManager().getExistingAssignationForUser(pool, request.user)
                #     if assignedUserService:
                #         in_use = assignedUserService.in_use

                # Stop when 1 usable pool is found (metaTransports is filled)
                if metaTransports:
                    break

        # If no usable pools, this is not visible
        if metaTransports:
            group = (meta.servicesPoolGroup.as_dict if meta.servicesPoolGroup
                     else ServicePoolGroup.default().as_dict)

            services.append({
                'id': 'M' + meta.uuid,
                'name': meta.name,
                'visual_name': meta.visual_name,
                'description': meta.comments,
                'group': group,
                'transports': metaTransports,
                'imageId': meta.image and meta.image.uuid or 'x',
                'show_transports': len(metaTransports) > 1,
                'allow_users_remove': False,
                'allow_users_reset': False,
                'maintenance': meta.isInMaintenance(),
                'not_accesible': not meta.isAccessAllowed(now),
                'in_use': in_use,
                'to_be_replaced': None,
                'to_be_replaced_text': '',
                'custom_calendar_text': meta.calendar_message,
            })

    # Now generic user service
    for sPool in availServicePools:
        # Skip pools that are part of meta pools
        if sPool.owned_by_meta:
            continue

        use_percent = str(sPool.usage(
            sPool.usage_count)) + '%'  # type: ignore # anotated value
        use_count = str(sPool.usage_count)  # type: ignore # anotated value
        left_count = str(sPool.max_srvs -
                         sPool.usage_count)  # type: ignore # anotated value

        trans: typing.List[typing.MutableMapping[str, typing.Any]] = []
        for t in sorted(
                sPool.transports.all(), key=lambda x: x.priority
        ):  # In memory sort, allows reuse prefetched and not too big array
            try:
                typeTrans = t.getType()
            except Exception:
                continue
            if (t.validForIp(request.ip) and typeTrans.supportsOs(osName)
                    and t.validForOs(osName)):
                if typeTrans.ownLink:
                    link = reverse('TransportOwnLink',
                                   args=('F' + sPool.uuid, t.uuid))
                else:
                    link = html.udsAccessLink(request, 'F' + sPool.uuid,
                                              t.uuid)
                trans.append({
                    'id': t.uuid,
                    'name': t.name,
                    'link': link,
                    'priority': t.priority
                })

        # If empty transports, do not include it on list
        if not trans:
            continue

        if sPool.image:
            imageId = sPool.image.uuid
        else:
            imageId = 'x'

        # Locate if user service has any already assigned user service for this. Use "pre cached" number of assignations in this pool to optimize
        in_use = typing.cast(typing.Any, sPool).number_in_use > 0
        # if svr.number_in_use:  # Anotated value got from getDeployedServicesForGroups(...). If 0, no assignation for this user
        #     ads = userServiceManager().getExistingAssignationForUser(svr, request.user)
        #     if ads:
        #         in_use = ads.in_use

        group = (sPool.servicesPoolGroup.as_dict if sPool.servicesPoolGroup
                 else ServicePoolGroup.default().as_dict)

        # Only add toBeReplaced info in case we allow it. This will generate some "overload" on the services
        toBeReplaced = (sPool.toBeReplaced(request.user)
                        if typing.cast(typing.Any, sPool).pubs_active > 0
                        and GlobalConfig.NOTIFY_REMOVAL_BY_PUB.getBool(False)
                        else None)
        # tbr = False
        if toBeReplaced:
            toBeReplaced = formats.date_format(toBeReplaced,
                                               'SHORT_DATETIME_FORMAT')
            toBeReplacedTxt = ugettext(
                'This service is about to be replaced by a new version. Please, close the session before {} and save all your work to avoid loosing it.'
            ).format(toBeReplaced)
        else:
            toBeReplacedTxt = ''

        # Calculate max deployed
        maxDeployed = str(sPool.max_srvs)

        # if sPool.service.getType().usesCache is False:
        #    maxDeployed = sPool.service.getInstance().maxDeployed

        def datator(x) -> str:
            return (x.replace('{use}', use_percent).replace(
                '{total}',
                str(sPool.max_srvs)).replace('{usec}', use_count).replace(
                    '{left}', left_count))

        services.append({
            'id':
            'F' + sPool.uuid,
            'name':
            datator(sPool.name),
            'visual_name':
            datator(
                sPool.visual_name.replace('{use}', use_percent).replace(
                    '{total}', maxDeployed)),
            'description':
            sPool.comments,
            'group':
            group,
            'transports':
            trans,
            'imageId':
            imageId,
            'show_transports':
            sPool.show_transports,
            'allow_users_remove':
            sPool.allow_users_remove,
            'allow_users_reset':
            sPool.allow_users_reset,
            'maintenance':
            sPool.isInMaintenance(),
            'not_accesible':
            not sPool.isAccessAllowed(now),
            'in_use':
            in_use,
            'to_be_replaced':
            toBeReplaced,
            'to_be_replaced_text':
            toBeReplacedTxt,
            'custom_calendar_text':
            sPool.calendar_message,
        })

    # logger.debug('Services: %s', services)

    # Sort services and remove services with no transports...
    services = [
        s for s in sorted(services, key=lambda s: s['name'].upper())
        if s['transports']
    ]

    autorun = False
    if (len(services) == 1 and GlobalConfig.AUTORUN_SERVICE.getBool(False)
            and services[0]['transports']):
        if request.session.get('autorunDone', '0') == '0':
            request.session['autorunDone'] = '1'
            autorun = True

    return {
        'services': services,
        'ip': request.ip,
        'nets': nets,
        'transports': validTrans,
        'autorun': autorun
    }
Esempio n. 60
0
 def run(self):
     removeFrom = getSqlDatetime() - timedelta(
         seconds=GlobalConfig.KEEP_INFO_TIME.getInt(True))
     ServicePoolPublication.objects.filter(
         state__in=State.INFO_STATES, state_date__lt=removeFrom).delete()