示例#1
0
 def store(uuid, data, validator=None, validity=DEFAULT_VALIDITY):
     if validator is not None:
         validator = pickle.dumps(validator)
     try:
         t = TicketStore.objects.get(uuid=uuid)
         t.data = pickle.dumps(data)
         t.stamp = getSqlDatetime()
         t.validity = validity
         t.save()
     except TicketStore.DoesNotExist:
         t = TicketStore.objects.create(uuid=uuid, stamp=getSqlDatetime(), data=pickle.dumps(data), validator=validator, validity=validity)
示例#2
0
    def nextEvent(self, checkFrom=None, startEvent=True, offset=None):
        """
        Returns next event for this interval
        Returns a list of two elements. First is datetime of event begining, second is timedelta of duration
        """
        logger.debug('Obtaining nextEvent')
        if checkFrom is None:
            checkFrom = getSqlDatetime()

        if offset is None:
            offset = datetime.timedelta(minutes=0)

        cacheKey = six.text_type(hash(self.calendar.modified)) + self.calendar.uuid + six.text_type(
            offset.seconds) + six.text_type(int(time.mktime(checkFrom.timetuple()))) + 'event' + (
                       'x' if startEvent is True else '_')
        next_event = CalendarChecker.cache.get(cacheKey, None)
        if next_event is None:
            logger.debug('Regenerating cached nextEvent')
            next_event = self._updateEvents(checkFrom + offset,
                                            startEvent)  # We substract on checkin, so we can take into account for next execution the "offset" on start & end (just the inverse of current, so we substract it)
            if next_event is not None:
                next_event += offset
            CalendarChecker.cache.put(cacheKey, next_event, 3600)
        else:
            logger.debug('nextEvent cache hit')
            CalendarChecker.hits += 1

        return next_event
示例#3
0
    def markOldUserServicesAsRemovables(self, activePub):
        '''
        Used when a new publication is finished.

        Marks all user deployed services that belongs to this deployed service, that do not belongs
        to "activePub" and are not in use as removable.

        Also cancels all preparing user services

        Better see the code, it's easier to understand :-)

        Args:
            activePub: Active publication used as "current" publication to make checks
        '''
        logger.debug('Marking old user services as removable...')
        now = getSqlDatetime()
        if activePub is None:
            logger.error('No active publication, don\'t know what to erase!!! (ds = {0})'.format(self))
            return
        for ap in self.publications.exclude(id=activePub.id):
            for u in ap.userServices.filter(state=states.userService.PREPARING):
                u.cancel()
            with transaction.atomic():
                ap.userServices.exclude(cache_level=0).filter(state=states.userService.USABLE).update(state=states.userService.REMOVABLE, state_date=now)
                ap.userServices.filter(cache_level=0, state=states.userService.USABLE, in_use=False).update(state=states.userService.REMOVABLE, state_date=now)
示例#4
0
    def get(uuid, invalidate=True, owner=None, secure=False):
        try:
            t = TicketStore.objects.get(uuid=uuid, owner=owner)
            validity = datetime.timedelta(seconds=t.validity)
            now = getSqlDatetime()

            logger.debug('Ticket validity: {} {}'.format(t.stamp + validity, now))
            if t.stamp + validity < now:
                raise TicketStore.InvalidTicket('Not valid anymore')

            if secure is True:
                data = pickle.loads(cryptoManager().decrypt(t.data))
            else:
                data = pickle.loads(t.data)

            # If has validator, execute it
            if t.validator is not None:
                validator = pickle.loads(t.validator)

                if validator(data) is False:
                    raise TicketStore.InvalidTicket('Validation failed')

            if invalidate is True:
                t.stamp = now - validity - datetime.timedelta(seconds=1)
                t.save()

            return data
        except TicketStore.DoesNotExist:
            raise TicketStore.InvalidTicket('Does not exists')
示例#5
0
    def check(self, dtime=None):
        """
        Checks if the given time is a valid event on calendar
        @param dtime: Datetime object to check
        TODO: We can improve performance of this by getting from a cache first if we can
        """
        if dtime is None:
            dtime = getSqlDatetime()

        # First, try to get data from cache if it is valid
        cacheKey = six.text_type(hash(self.calendar.modified)) + six.text_type(
            dtime.date().toordinal()) + self.calendar.uuid + 'checker'
        cached = CalendarChecker.cache.get(cacheKey, None)

        if cached is not None:
            data = bitarray.bitarray()  # Empty bitarray
            data.frombytes(cached)
            CalendarChecker.cache_hit += 1
        else:
            data = self._updateData(dtime)

            # Now data can be accessed as an array of booleans.
            # Store data on persistent cache
            CalendarChecker.cache.put(cacheKey, data.tobytes(), 3600 * 24)

        return data[dtime.hour * 60 + dtime.minute]
示例#6
0
 def __unicode__(self):
     expired = getSqlDatetime() > self.created + timedelta(seconds=self.validity)
     if expired:
         expired = "Expired"
     else:
         expired = "Active"
     return u"{0} {1} = {2} ({3})".format(self.owner, self.key, self.value, expired)
示例#7
0
    def execute(self, save=True):
        logger.debug('Executing action')
        self.last_execution = getSqlDatetime()
        params = json.loads(self.params)

        saveServicePool = save

        if CALENDAR_ACTION_CACHE_L1['id'] == self.action:
            self.service_pool.cache_l1_srvs = int(params['size'])
        elif CALENDAR_ACTION_CACHE_L2['id'] == self.action:
            self.service_pool.cache_l1_srvs = int(params['size'])
        elif CALENDAR_ACTION_INITIAL['id'] == self.action:
            self.service_pool.initial_srvs = int(params['size'])
        elif CALENDAR_ACTION_MAX['id'] == self.action:
            self.service_pool.max_srvs = int(params['size'])
        elif CALENDAR_ACTION_PUBLISH['id'] == self.action:
            self.service_pool.publish(changeLog='Scheduled publication action')
            saveServicePool = False

        # On save, will regenerate nextExecution
        if save:
            self.save()

        if saveServicePool:
            self.service_pool.save()
示例#8
0
    def get_stats(owner_type, event_type, **kwargs):
        '''
        Returns the average stats grouped by interval for owner_type and owner_id (optional)

        Note: if someone cant get this more optimized, please, contribute it!
        '''
        if isinstance(event_type, (list, tuple)):
            fltr = StatsEvents.objects.filter(event_type__in=event_type)
        else:
            fltr = StatsEvents.objects.filter(event_type=event_type)

        if type(owner_type) in (list, tuple):
            fltr = fltr.filter(owner_type__in=owner_type)
        else:
            fltr = fltr.filter(owner_type=owner_type)

        if kwargs.get('owner_id', None) is not None:
            oid = kwargs.get('owner_id')
            if isinstance(oid, (list, tuple)):
                fltr = fltr.filter(owner_id__in=oid)
            else:
                fltr = fltr.filter(owner_id=oid)

        since = kwargs.get('since', None)
        to = kwargs.get('to', None)

        since = since and int(since) or NEVER_UNIX
        to = to and int(to) or getSqlDatetime(True)

        fltr = fltr.filter(stamp__gte=since, stamp__lt=to)

        # We use result as an iterator
        return fltr
示例#9
0
 def get(self, skey, defValue=None):
     now = getSqlDatetime()
     logger.debug('Requesting key "{}" for cache "{}"'.format(skey, self._owner))
     try:
         key = self.__getKey(skey)
         logger.debug('Key: {}'.format(key))
         c = uds.models.Cache.objects.get(pk=key)  # @UndefinedVariable
         expired = now > c.created + timedelta(seconds=c.validity)
         if expired:
             return defValue
         try:
             logger.debug('value: {}'.format(c.value))
             val = pickle.loads(encoders.decode(c.value, 'base64'))
         except Exception:  # If invalid, simple do no tuse it
             logger.exception('Invalid pickle from cache')
             c.delete()
             return defValue
         Cache.hits += 1
         return val
     except uds.models.Cache.DoesNotExist:  # @UndefinedVariable
         Cache.misses += 1
         logger.debug('key not found: {}'.format(skey))
         return defValue
     except Exception as e:
         Cache.misses += 1
         logger.debug('Cache inaccesible: {}:{}'.format(skey, e))
         return defValue
示例#10
0
    def getDeadline(self, chkDateTime=None):
        '''
        Gets the deadline for an access on chkDateTime
        '''
        if chkDateTime is None:
            chkDateTime = getSqlDatetime()

        if self.isAccessAllowed(chkDateTime) is False:
            return -1

        deadLine = None

        for ac in self.calendaraccess_set.all():
            if ac.access == states.action.ALLOW and self.fallbackAccess == states.action.DENY:
                nextE = CalendarChecker(ac.calendar).nextEvent(chkDateTime, False)
                if deadLine is None or deadLine > nextE:
                    deadLine = nextE
            elif ac.access == states.action.DENY:  # DENY
                nextE = CalendarChecker(ac.calendar).nextEvent(chkDateTime, True)
                if deadLine is None or deadLine > nextE:
                    deadLine = nextE

        if deadLine is None:
            if self.fallbackAccess == states.action.ALLOW:
                return None
            else:
                return -1

        return int((deadLine - chkDateTime).total_seconds())
示例#11
0
    def get(uuid, invalidate=True):
        try:
            t = TicketStore.objects.get(uuid=uuid)
            validity = datetime.timedelta(seconds=t.validity)
            now = getSqlDatetime()

            logger.debug("Ticket validity: {} {}".format(t.stamp + validity, now))
            if t.stamp + validity < now:
                raise TicketStore.InvalidTicket("Not valid anymore")

            data = pickle.loads(t.data)

            # If has validator, execute it
            if t.validator is not None:
                validator = pickle.loads(t.validator)

                if validator(data) is False:
                    raise TicketStore.InvalidTicket("Validation failed")

            if invalidate is True:
                t.stamp = now - validity - datetime.timedelta(seconds=1)
                t.save()

            return data
        except TicketStore.DoesNotExist:
            raise TicketStore.InvalidTicket("Does not exists")
示例#12
0
    def get(uuid, invalidate=True):
        try:
            t = TicketStore.objects.get(uuid=uuid)
            validity = datetime.timedelta(seconds=t.validity)
            now = getSqlDatetime()

            if t.stamp + validity < now:
                raise Exception('Not valid anymore')

            data = pickle.loads(t.data)

            # If has validator, execute it
            if t.validator is not None:
                validator = pickle.loads(t.validator)

                if validator(data) is False:
                    raise Exception('Validation failed')

            if invalidate is True:
                t.stamp = now - validity - datetime.timedelta(seconds=1)
                t.save()

            return data
        except TicketStore.DoesNotExist:
            raise Exception('Does not exists')
示例#13
0
    def addPermission(**kwargs):
        """
        Adds a permission to an object and an user or group
        """
        user = kwargs.get('user', None)
        group = kwargs.get('group', None)

        if user is not None and group is not None:
            raise Exception('Use only user or group, but not both')

        if user is None and group is None:
            raise Exception('Must at least indicate user or group')

        object_type = kwargs.get('object_type', None)

        if object_type is None:
            raise Exception('At least an object type is required')

        object_id = kwargs.get('object_id', None)

        permission = kwargs.get('permission', Permissions.PERMISSION_NONE)

        if user is not None:
            q = Q(user=user)
        else:
            q = Q(group=group)

        try:
            existing = Permissions.objects.filter(q, object_type=object_type, object_id=object_id)[0]
            existing.permission = permission
            existing.save()
            return existing
        except Exception:  # Does not exists
            return Permissions.objects.create(created=getSqlDatetime(), ends=None, user=user, group=group,
                                              object_type=object_type, object_id=object_id, permission=permission)
示例#14
0
    def stopUsageAccounting(self, service):
        if hasattr(service, 'accounting') is False:
            return

        tmp = service.accounting
        tmp.user_service = None
        tmp.end = getSqlDatetime()
        tmp.save()
示例#15
0
 def revalidate(uuid, validity=None):
     try:
         t = TicketStore.objects.get(uuid=uuid)
         t.stamp = getSqlDatetime()
         if validity is not None:
             t.validity = validity
         t.save()
     except TicketStore.DoesNotExist:
         raise Exception('Does not exists')
示例#16
0
    def assignToUser(self, user):
        '''
        Assigns this user deployed service to an user.

        Args:
            user: User to assing to (db record)
        '''
        self.cache_level = 0
        self.state_date = getSqlDatetime()
        self.user = user
示例#17
0
 def cleanUp():
     '''
     Purges the cache items that are no longer vaild.
     '''
     from django.db import transaction
     now = getSqlDatetime()
     with transaction.atomic():
         for v in Cache.objects.all():
             if now > v.created + timedelta(seconds=v.validity):
                 v.delete()
示例#18
0
 def refresh(self, skey):
     # logger.debug('Refreshing key "%s" for cache "%s"' % (skey, self._owner,))
     try:
         key = self.__getKey(skey)
         c = uds.models.Cache.objects.get(pk=key)  # @UndefinedVariable
         c.created = getSqlDatetime()
         c.save()
     except uds.models.Cache.DoesNotExist:  # @UndefinedVariable
         logger.debug('Can\'t refresh cache key %s because it doesn\'t exists' % skey)
         return
示例#19
0
    def assignToUser(self, user):
        """
        Assigns this user deployed service to an user.

        Args:
            user: User to assing to (db record)
        """
        self.cache_level = 0
        self.state_date = getSqlDatetime()
        self.user = user
        self.save(update_fields=['cache_level', 'state_date', 'user'])
示例#20
0
    def create(data, validator=None, validity=DEFAULT_VALIDITY, owner=None, secure=False):
        '''
        validity is in seconds
        '''
        if validator is not None:
            validator = pickle.dumps(validator)
        data = pickle.dumps(data)
        if secure:
            data = cryptoManager().encrypt(data)

        return TicketStore.objects.create(stamp=getSqlDatetime(), data=data, validator=validator, validity=validity, owner=owner).uuid
示例#21
0
    def store(uuid, data, validator=None, validity=DEFAULT_VALIDITY, owner=owner, secure=False):
        '''
        Stores an ticketstore. If one with this uuid already exists, replaces it. Else, creates a new one
        validity is in seconds
        '''
        if validator is not None:
            validator = pickle.dumps(validator)

        data = pickle.dumps(data)
        if secure:
            data = cryptoManager().encrypt()

        try:
            t = TicketStore.objects.get(uuid=uuid, owner=owner)
            t.data = data
            t.stamp = getSqlDatetime()
            t.validity = validity
            t.save()
        except TicketStore.DoesNotExist:
            t = TicketStore.objects.create(uuid=uuid, stamp=getSqlDatetime(), data=data, validator=validator, validity=validity, owner=owner)
示例#22
0
    def deleteItem(self, parent, item):
        logger.debug('Deleting rule {} from {}'.format(item, parent))
        try:
            calRule = parent.rules.get(uuid=processUuid(item))
            calRule.calendar.modified = getSqlDatetime()
            calRule.calendar.save()
            calRule.delete()
        except Exception:
            logger.exception('Exception')
            self.invalidItemException()

        return 'deleted'
    def setState(self, state):
        '''
        Updates the state of this object and, optionally, saves it

        Args:
            state: new State to store at record

            save: Defaults to true. If false, record will not be saved to db, just modified

        '''
        self.state_date = getSqlDatetime()
        self.state = state
示例#24
0
    def setOsState(self, state):
        """
        Updates the os state (state of the os) of this object and, optionally, saves it

        Args:
            state: new State to store at record

            save: Defaults to true. If false, record will not be saved to db, just modified

        """
        if state != self.os_state:
            self.state_date = getSqlDatetime()
            self.os_state = state
            self.save(update_fields=['os_state', 'state_date'])
示例#25
0
    def setState(self, state, save=True):
        """
        Updates the state of this object and, optionally, saves it

        Args:
            state: new State to store at record

            save: Defaults to true. If false, record will not be saved to db, just modified

        """
        self.state = state
        self.state_date = getSqlDatetime()
        if save is True:
            self.save()
示例#26
0
    def setInUse(self, state):
        '''
        Set the "in_use" flag for this user deployed service

        Args:
            state: State to set to the "in_use" flag of this record

        :note: If the state is Fase (set to not in use), a check for removal of this deployed service is launched.
        '''
        from uds.core.managers.UserServiceManager import UserServiceManager
        self.in_use = state
        self.in_use_date = getSqlDatetime()
        if state is False:  # Service released, check y we should mark it for removal
            # If our publication is not current, mark this for removal
            UserServiceManager.manager().checkForRemoval(self)
示例#27
0
    def isAccessAllowed(self, chkDateTime=None) -> bool:
        """
        Checks if the access for a service pool is allowed or not (based esclusively on associated calendars)
        """
        if chkDateTime is None:
            chkDateTime = getSqlDatetime()

        access = self.fallbackAccess
        # Let's see if we can access by current datetime
        for ac in self.calendarAccess.order_by('priority'):
            if CalendarChecker(ac.calendar).check(chkDateTime) is True:
                access = ac.access
                break  # Stops on first rule match found

        return access == states.action.ALLOW
示例#28
0
    def setState(self, state):
        '''
        Updates the state of this object and, optionally, saves it

        Args:
            state: new State to store at record

            save: Defaults to true. If false, record will not be saved to db, just modified

        '''
        logger.debug(' *** Setting state to {} from {} for {}'.format(State.toString(state), State.toString(self.state), self))

        if state != self.state:
            self.state_date = getSqlDatetime()
            self.state = state
示例#29
0
    def getRestraineds():
        from uds.models.UserService import UserService
        from uds.core.util.Config import GlobalConfig
        from django.db.models import Count

        if GlobalConfig.RESTRAINT_TIME.getInt() <= 0:
            return []  # Do not perform any restraint check if we set the globalconfig to 0 (or less)

        date = getSqlDatetime() - timedelta(seconds=GlobalConfig.RESTRAINT_TIME.getInt())
        min_ = GlobalConfig.RESTRAINT_COUNT.getInt()

        res = []
        for v in UserService.objects.filter(state=states.userService.ERROR, state_date__gt=date).values('deployed_service').annotate(how_many=Count('deployed_service')).order_by('deployed_service'):
            if v['how_many'] >= min_:
                res.append(v['deployed_service'])
        return DeployedService.objects.filter(pk__in=res)
示例#30
0
 def get(self, skey, defValue=None):
     now = getSqlDatetime()
     # logger.debug('Requesting key "%s" for cache "%s"' % (skey, self._owner,))
     try:
         key = self.__getKey(skey)
         c = uds.models.Cache.objects.get(pk=key)  # @UndefinedVariable
         expired = now > c.created + timedelta(seconds=c.validity)
         if expired:
             return defValue
         val = pickle.loads(c.value.decode(Cache.CODEC))
         Cache.hits += 1
         return val
     except uds.models.Cache.DoesNotExist:  # @UndefinedVariable
         Cache.misses += 1
         logger.debug('key not found: {}'.format(skey))
         return defValue
示例#31
0
    def markOldUserServicesAsRemovables(self, activePub):
        """
        Used when a new publication is finished.

        Marks all user deployed services that belongs to this deployed service, that do not belongs
        to "activePub" and are not in use as removable.

        Also cancels all preparing user services

        Better see the code, it's easier to understand :-)

        Args:
            activePub: Active publication used as "current" publication to make checks
        """
        now = getSqlDatetime()
        if activePub is None:
            logger.error('No active publication, don\'t know what to erase!!! (ds = {0})'.format(self))
            return
        for ap in self.publications.exclude(id=activePub.id):
            for u in ap.userServices.filter(state=states.userService.PREPARING):
                u.cancel()
            with transaction.atomic():
                ap.userServices.exclude(cache_level=0).filter(state=states.userService.USABLE).update(state=states.userService.REMOVABLE, state_date=now)
                ap.userServices.filter(cache_level=0, state=states.userService.USABLE, in_use=False).update(state=states.userService.REMOVABLE, state_date=now)
示例#32
0
    def isRestrained(self) -> bool:
        """
        Maybe this deployed service is having problems, and that may block some task in some
        situations.

        To avoid this, we will use a "restrain" policy, where we restrain a deployed service for,
        for example, create new cache elements is reduced.

        The policy to check is that if a Deployed Service has 3 errors in the last 20 Minutes (by default), it is
        considered restrained.

        The time that a service is in restrain mode is 20 minutes by default (1200 secs), but it can be modified
        at globalconfig variables
        """
        from uds.core.util.Config import GlobalConfig

        if GlobalConfig.RESTRAINT_TIME.getInt() <= 0:
            return False  # Do not perform any restraint check if we set the globalconfig to 0 (or less)

        date = getSqlDatetime() - timedelta(seconds=GlobalConfig.RESTRAINT_TIME.getInt())
        if self.userServices.filter(state=states.userService.ERROR, state_date__gt=date).count() >= GlobalConfig.RESTRAINT_COUNT.getInt():
            return True

        return False
示例#33
0
    def get_grouped(owner_type, counter_type, **kwargs):
        """
        Returns the average stats grouped by interval for owner_type and owner_id (optional)

        Note: if someone cant get this more optimized, please, contribute it!
        """

        filt = 'owner_type'
        if type(owner_type) in (list, tuple):
            filt += ' in (' + ','.join((str(x) for x in owner_type)) + ')'
        else:
            filt += '=' + str(owner_type)

        owner_id = None
        if kwargs.get('owner_id', None) is not None:
            filt += ' AND OWNER_ID'
            oid = kwargs['owner_id']
            if type(oid) in (list, tuple):
                filt += ' in (' + ','.join(str(x) for x in oid) + ')'
            else:
                filt += '=' + str(oid)

        filt += ' AND counter_type=' + str(counter_type)

        since = kwargs.get('since', None)
        to = kwargs.get('to', None)

        since = since and int(since) or NEVER_UNIX
        to = to and int(to) or getSqlDatetime(True)

        interval = 600  # By default, group items in ten minutes interval (600 seconds)

        limit = kwargs.get('limit', None)

        if limit is not None:
            limit = int(limit)
            elements = kwargs['limit']

            # Protect for division a few lines below... :-)
            if elements < 2:
                elements = 2

            if owner_id is None:
                q = StatsCounters.objects.filter(stamp__gte=since, stamp__lte=to)
            else:
                q = StatsCounters.objects.filter(owner_id=owner_id, stamp__gte=since, stamp__lte=to)

            if type(owner_type) in (list, tuple):
                q = q.filter(owner_type__in=owner_type)
            else:
                q = q.filter(owner_type=owner_type)

            if q.count() > elements:
                first = q.order_by('stamp')[0].stamp
                last = q.order_by('stamp').reverse()[0].stamp
                interval = int((last - first) / (elements - 1))

        stampValue = '{ceil}(stamp/{interval})'.format(ceil=getSqlFnc('CEIL'), interval=interval)
        filt += ' AND stamp>={0} AND stamp<={1} GROUP BY {2} ORDER BY stamp'.format(since, to, stampValue)

        fnc = getSqlFnc('MAX' if kwargs.get('use_max', False) else 'AVG')

        query = ('SELECT -1 as id,-1 as owner_id,-1 as owner_type,-1 as counter_type, ' + stampValue + '*{}'.format(interval) + ' AS stamp,' +
                        getSqlFnc('CEIL') + '({0}(value)) AS value '
                 'FROM {1} WHERE {2}').format(fnc, StatsCounters._meta.db_table, filt)

        logger.debug('Stats query: {0}'.format(query))

        # We use result as an iterator
        return StatsCounters.objects.raw(query)
示例#34
0
    def execute(self, save=True):
        logger.debug('Executing action')
        self.last_execution = getSqlDatetime()
        params = json.loads(self.params)

        saveServicePool = save

        def sizeVal():
            v = int(params['size'])
            return v if v >= 0 else 0

        executed = False
        if CALENDAR_ACTION_CACHE_L1['id'] == self.action:
            self.service_pool.cache_l1_srvs = sizeVal()
            executed = True
        elif CALENDAR_ACTION_CACHE_L2['id'] == self.action:
            self.service_pool.cache_l1_srvs = sizeVal()
            executed = True
        elif CALENDAR_ACTION_INITIAL['id'] == self.action:
            self.service_pool.initial_srvs = sizeVal()
            executed = True
        elif CALENDAR_ACTION_MAX['id'] == self.action:
            self.service_pool.max_srvs = sizeVal()
            executed = True
        elif CALENDAR_ACTION_PUBLISH['id'] == self.action:
            self.service_pool.publish(changeLog='Scheduled publication action')
            saveServicePool = False
            executed = True
        # Add transport
        elif CALENDAR_ACTION_ADD_TRANSPORT['id'] == self.action:
            try:
                t = Transport.objects.get(uuid=params['transport'])
                self.service_pool.transports.add(t)
                executed = True
            except Exception:
                self.service_pool.log(
                    'Scheduled action not executed because transport is not available anymore'
                )
            saveServicePool = False
        # Remove transport
        elif CALENDAR_ACTION_DEL_TRANSPORT['id'] == self.action:
            try:
                t = Transport.objects.get(uuid=params['transport'])
                self.service_pool.transports.remove(t)
                executed = True
            except Exception:
                self.service_pool.log(
                    'Scheduled action not executed because transport is not available anymore',
                    level=log.ERROR)
            saveServicePool = False
        elif CALENDAR_ACTION_ADD_GROUP['id'] == self.action:
            try:
                auth, grp = params['group'].split('@')
                grp = Authenticator.objects.get(uuid=auth).groups.get(uuid=grp)

            except Exception:
                pass
        elif CALENDAR_ACTION_DEL_GROUP['id'] == self.action:
            pass

        if executed:
            try:
                self.service_pool.log('Executed action {} [{}]'.format(
                    CALENDAR_ACTION_DICT.get(self.action)['description'],
                    self.prettyParams),
                                      level=log.INFO)
            except Exception:
                # Avoid invalid ACTIONS errors on log
                self.service_pool.log(
                    'Action {} is not a valid scheduled action! please, remove it from your list.'
                    .format(self.action))

        # On save, will regenerate nextExecution
        if save:
            self.save()

        if saveServicePool:
            self.service_pool.save()
示例#35
0
文件: User.py 项目: spofa/openuds
 def updateLastAccess(self):
     '''
     Updates the last access for this user with the current time of the sql server
     '''
     self.last_access = getSqlDatetime()
     self.save()
示例#36
0
    def get_grouped(owner_type: typing.Union[str, typing.Iterable[str]], counter_type: str, **kwargs):  # pylint: disable=too-many-locals
        """
        Returns the average stats grouped by interval for owner_type and owner_id (optional)

        Note: if someone cant get this more optimized, please, contribute it!
        """

        filt = 'owner_type'
        if isinstance(owner_type, Iterable):
            filt += ' in (' + ','.join((str(x) for x in owner_type)) + ')'
        else:
            filt += '=' + str(owner_type)

        owner_id = kwargs.get('owner_id', None)
        if owner_id is not None:
            filt += ' AND OWNER_ID'
            if isinstance(owner_id, Iterable):
                filt += ' in (' + ','.join(str(x) for x in owner_id) + ')'
            else:
                filt += '=' + str(owner_id)

        filt += ' AND counter_type=' + str(counter_type)

        since = kwargs.get('since', None)
        to = kwargs.get('to', None)

        since = int(since) if since else NEVER_UNIX
        to = int(to) if to else getSqlDatetime(True)

        interval = 600  # By default, group items in ten minutes interval (600 seconds)

        elements = kwargs.get('limit', None)

        if elements:
            # Protect against division by "elements-1" a few lines below
            elements = int(elements) if int(elements) > 1 else 2

            if owner_id is None:
                q = StatsCounters.objects.filter(stamp__gte=since, stamp__lte=to)
            else:
                if isinstance(owner_id, Iterable):
                    q = StatsCounters.objects.filter(owner_id__in=owner_id, stamp__gte=since, stamp__lte=to)
                else:
                    q = StatsCounters.objects.filter(owner_id=owner_id, stamp__gte=since, stamp__lte=to)

            if isinstance(owner_type, Iterable):
                q = q.filter(owner_type__in=owner_type)
            else:
                q = q.filter(owner_type=owner_type)

            if q.count() > elements:
                first = q.order_by('stamp')[0].stamp
                last = q.order_by('stamp').reverse()[0].stamp
                interval = int((last - first) / (elements - 1))

        stampValue = '{ceil}(stamp/{interval})'.format(ceil=getSqlFnc('CEIL'), interval=interval)
        filt += ' AND stamp>={0} AND stamp<={1} GROUP BY {2} ORDER BY stamp'.format(since, to, stampValue)

        fnc = getSqlFnc('MAX' if kwargs.get('use_max', False) else 'AVG')

        query = (
            'SELECT -1 as id,-1 as owner_id,-1 as owner_type,-1 as counter_type, ' + stampValue + '*{}'.format(interval) + ' AS stamp,' +
            getSqlFnc('CEIL') + '({0}(value)) AS value '
            'FROM {1} WHERE {2}'
        ).format(fnc, StatsCounters._meta.db_table, filt)

        logger.debug('Stats query: %s', query)

        # We use result as an iterator
        return StatsCounters.objects.raw(query)
示例#37
0
 def publish(self):
     '''
     Realizes the publication of the service
     '''
     self._name = 'Publication {}'.format(getSqlDatetime())
     return State.FINISHED
示例#38
0
    def get(self, rangeStart: int = 0, rangeEnd: int = MAX_SEQ) -> int:
        """
        Tries to generate a new unique id in the range provided. This unique id
        is global to "unique ids' database
        """
        # First look for a name in the range defined
        stamp = getSqlDatetime(True)
        seq = rangeStart
        # logger.debug(UniqueId)
        counter = 0
        while True:
            counter += 1
            try:
                # logger.debug('Creating new seq in range {}-{}'.format(rangeStart, rangeEnd))
                with transaction.atomic():
                    flt = self.__filter(rangeStart, rangeEnd, forUpdate=True)
                    try:
                        item = flt.filter(assigned=False).order_by('seq')[0]
                        item.owner = self._owner
                        item.assigned = True
                        item.stamp = stamp
                        item.save()
                        # UniqueId.objects.filter(id=item.id).update(owner=self._owner, assigned=True, stamp=stamp)  # @UndefinedVariable
                        seq = item.seq
                        break
                    except IndexError:  # No free element found
                        item = None

                    # No item was found on first instance (already created, but freed)
                    if not item:
                        # logger.debug('No free found, creating new one')
                        try:
                            last = flt.filter(
                                assigned=True
                            )[0]  # DB Returns correct order so the 0 item is the last
                            seq = last.seq + 1
                        except IndexError:  # If there is no assigned at database
                            seq = rangeStart
                        # logger.debug('Found seq {0}'.format(seq))
                        if seq > rangeEnd:
                            return -1  # No ids free in range
                        # May ocurr on some circustance that a concurrency access gives same item twice, in this case, we
                        # will get an "duplicate key error",
                        UniqueId.objects.create(
                            owner=self._owner,
                            basename=self._baseName,
                            seq=seq,
                            assigned=True,
                            stamp=stamp)  # @UndefinedVariable
                        break
            except OperationalError:  # Locked, may ocurr for example on sqlite. We will wait a bit
                # logger.exception('Got database locked')
                if counter % 5 == 0:
                    connection.close()
                time.sleep(1)
            except IntegrityError:  # Concurrent creation, may fail, simply retry
                pass
            except Exception:
                logger.exception('Error')
                return -1

        # logger.debug('Seq: {}'.format(seq))
        return seq
示例#39
0
 def release(self) -> None:
     UniqueId.objects.select_for_update().filter(owner=self._owner).update(
         assigned=False, owner='',
         stamp=getSqlDatetime(True))  # @UndefinedVariable
     self.__purge()
示例#40
0
 def save(self, *args, **kwargs):
     self.stamp = getSqlDatetime()
     return UUIDModel.save(self, *args, **kwargs)
示例#41
0
 def cleanup():
     now = getSqlDatetime()
     cleanSince = now - datetime.timedelta(seconds=TicketStore.MAX_VALIDITY)
     number = TicketStore.objects.filter(stamp__lt=cleanSince).delete()
     logger.debug('Cleaned {} tickets'.format(number))