Ejemplo n.º 1
0
 def _get_aging_factor(self, lease, time):
     """Returns an aging factor for the preemptability score
     
     This is a convenience function that can be used to "age" a
     preemptability score (allowing leases that have been submitted
     long ago to avoid preemption). The method returns a factor
     between 0 and 1 that can be multiplied by the score, reducing
     the score based on the lease's "age".
     
     Currently, this method uses a hard-coded horizon of 31 days
     (any lease older than 7 days cannot be preempted, and leases
     less than 7 days are assigned a factor proportional to their age)
     
     Arguments:
     lease -- Lease that is going to be preempted
     time -- Time at which preemption would take place        
     """
     # TODO: Make horizon configurable
     horizon = time - DateTimeDelta(7)
     if lease.submit_time <= horizon:
         return -1
     else:
         seconds = (time - lease.submit_time).seconds
         horizon_seconds = DateTimeDelta(31).seconds
         return float(horizon_seconds - seconds) / horizon_seconds
Ejemplo n.º 2
0
class ExpireDateMixin(object):
    """ A mixin for providing expire dates. """

    expire_attr = 'expire_date'
    expire_dates = [None, now() - DateTimeDelta(10), now() + DateTimeDelta(10)]

    def __init__(self):
        parent = super(ExpireDateMixin, self)
        if hasattr(parent, '__init__'):
            getattr(parent, '__init__')()

        self.register_attr(self.expire_attr, self.get_expire_date)

    def get_expire_date(self, ident):
        """ Get the expire_date for a given ident.

        @type ident: str
        @param ident: The 'ident' or 'id of an item

        @rtype: mx.DateTime.DateTime or NoneType
        @return: An expire date or None

        """
        if ident in getattr(self, 'items'):
            return getattr(self, 'items')[ident][self.expire_attr]

        return self._get_cyclic_value(ident, self.expire_dates)
Ejemplo n.º 3
0
    def is_active(self, date=Date(*time.localtime()[:3])):
        # IVR 2009-04-29 Lars Gustav Gudbrandsen requested on 2009-04-22 that
        # all employment-related info should be considered active 14 days
        # prior to the actual start day.
        # Jazz 2011-03-23: In order to avoid revoking affiliations/priviledges
        # for people changing role/position at UiO we should let the
        # affiliation be valid for a few days after it has been revoked
        # in the authoritative source
        if self.start:
            return ((self.start - DateTimeDelta(14) <= date) and
                    ((not self.end) or (date <= self.end + DateTimeDelta(3))))

        return ((not self.end) or (date <= self.end + DateTimeDelta(3)))
Ejemplo n.º 4
0
class BasicPersonSource(BaseDataSource):

    birth_dates = [
        now(),
        now() - DateTimeDelta(365 * 100),
        now() - DateTimeDelta(356 * 20)
    ]

    genders = ['M', 'F', None]

    birth_date_attr = 'birth_date'
    gender_attr = 'gender'

    def __init__(self):
        parent = super(BasicPersonSource, self)
        if hasattr(parent, '__init__'):
            getattr(parent, '__init__')()

        self.register_attr(self.birth_date_attr, self.get_birth_date)
        self.register_attr(self.gender_attr, self.get_gender)

    def get_gender(self, ident):
        """ Get the gender for a given ident.

        @type ident: str
        @param ident: The 'ident' or 'id of an item

        @rtype: str or NoneType
        @return: One of the values in self.genders

        """
        if ident in getattr(self, 'items'):
            return getattr(self, 'items')[ident][self.expire_attr]

        return self._get_cyclic_value(ident, self.genders)

    def get_birth_date(self, ident):
        """ Get the expire_date for a given ident.

        @type ident: str
        @param ident: The 'ident' or 'id of an item

        @rtype: mx.DateTime.DateTime or NoneType
        @return: One of the values in self.birth_dates

        """
        if ident in getattr(self, 'items'):
            return getattr(self, 'items')[ident][self.expire_attr]

        return self._get_cyclic_value(ident, self.birth_dates)
Ejemplo n.º 5
0
def delete_stale_events(cl_events, db):
    """Remove all events of type cl_events older than GRACE_PERIOD.

    cl_events is an iterable listing change_log event types that we want
    expunged. These events cannot require any state change in Cerebrum (other
    than their own deletion). It is the caller's responsibility to check that
    this is so.
    """

    if not isinstance(cl_events, (list, tuple, set)):
        cl_events = [
            cl_events,
        ]

    const = Factory.get("Constants")()
    typeset_request = ", ".join(str(const.ChangeType(x)) for x in cl_events)
    logger.debug("Deleting stale requests: %s", typeset_request)
    for event in db.get_log_events(types=cl_events):
        tstamp = event["tstamp"]
        timeout = cereconf.GRACE_PERIOD
        try:
            params = pickle.loads(event["change_params"])
            if params['timeout'] is not None:
                timeout = DateTimeDelta(params['timeout'])
                logger.debug('Timeout set to %s for %s',
                             (now() + timeout).strftime('%Y-%m-%d'),
                             event['change_id'])

                if timeout > cereconf.MAX_INVITE_PERIOD:
                    logger.warning('Too long timeout (%s) for for %s',
                                   timeout.strftime('%Y-%m-%d'),
                                   event['change_id'])
                    timeout = cereconf.MAX_INVITE_PERIOD
        except KeyError:
            pass
        if now() - tstamp <= timeout:
            continue

        logger.debug("Deleting stale event %s (@%s) for entity %s (id=%s)",
                     str(const.ChangeType(event["change_type_id"])),
                     event["tstamp"].strftime("%Y-%m-%d"),
                     fetch_name(event["subject_entity"],
                                db), event["subject_entity"])

        db.remove_log_event(event["change_id"])
        db.commit()

    logger.debug("Deleted all stale requests: %s", typeset_request)
Ejemplo n.º 6
0
def delete_stale_events(cl_events, db):
    """Remove all events of type cl_events older than GRACE_PERIOD.

    cl_events is an iterable listing change_log event types that we want
    expunged. These events cannot require any state change in Cerebrum (other
    than their own deletion). It is the caller's responsibility to check that
    this is so.
    """

    if not isinstance(cl_events, (list, tuple, set)):
        cl_events = [cl_events, ]

    clconst = Factory.get("CLConstants")()
    typeset_request = ", ".join(str(clconst.ChangeType(x))
                                for x in cl_events)
    logger.debug("Deleting stale requests: %s", typeset_request)
    for event in db.get_log_events(types=cl_events):
        tstamp = event["tstamp"]
        timeout = cereconf.GRACE_PERIOD
        try:
            params = json.loads(event["change_params"])
            if params['timeout'] is not None:
                timeout = DateTimeDelta(params['timeout'])
                logger.debug('Timeout set to %s for %s',
                             (now() + timeout).strftime('%Y-%m-%d'),
                             event['change_id'])

                if timeout > cereconf.MAX_INVITE_PERIOD:
                    logger.warning('Too long timeout (%s) for for %s',
                                   timeout.strftime('%Y-%m-%d'),
                                   event['change_id'])
                    timeout = cereconf.MAX_INVITE_PERIOD
        except KeyError:
            pass
        if now() - tstamp <= timeout:
            continue

        logger.debug("Deleting stale event %s (@%s) for entity %s (id=%s)",
                     str(clconst.ChangeType(event["change_type_id"])),
                     event["tstamp"].strftime("%Y-%m-%d"),
                     fetch_name(event["subject_entity"], db),
                     event["subject_entity"])

        db.remove_log_event(event["change_id"])
        db.commit()

    logger.debug("Deleted all stale requests: %s", typeset_request)
Ejemplo n.º 7
0
def sync_group(affil, gname, descr, mtype, memb, recurse=True):
    logger.debug(
        ("sync_group(parent:'%s'; groupname:'%s'; description:'%s'; " +
         "membertype:'%s'; members:'%s'; recurse:'%s')") %
        (affil, gname, descr, mtype, memb.keys(), recurse))
    if mtype == constants.entity_group:  # memb has group_name as keys
        members = {}
        for tmp_gname in memb.keys():
            grp = get_group(tmp_gname)
            members[int(grp.entity_id)] = 1
    else:
        # memb has account_id as keys
        members = memb.copy()

    if affil is not None:
        AffiliatedGroups.setdefault(affil, {})[gname] = 1

    try:
        group = get_group(gname)
    except Errors.NotFoundError:
        group = Factory.get('Group')(db)
        group.clear()
        group.populate(
            creator_id=group_creator,
            visibility=constants.group_visibility_all,
            name=gname,
            description=descr,
            group_type=constants.group_type_unknown,
        )
        group.write_db()
    else:
        # Update description if it has changed
        if group.description != descr:
            group.description = descr
            group.write_db()

        if group.is_expired():
            # Extend the group's life by 6 months
            from mx.DateTime import now, DateTimeDelta
            group.expire_date = now() + DateTimeDelta(6 * 30)
            group.write_db()

        # Make sure the group is listed for export to LMS
        if not group.has_spread(constants.spread_lms_group):
            group.add_spread(constants.spread_lms_group)

        for member in group.search_members(group_id=group.entity_id,
                                           member_type=mtype,
                                           member_filter_expired=False):
            member = int(member["member_id"])
            if members.has_key(member):
                del members[member]
            else:
                logger.debug("sync_group(): Deleting member %d" % member)
                group.remove_member(member)

    for member in members.keys():
        group.add_member(member)
Ejemplo n.º 8
0
    def group_invite_user(self, inviter, group, email, timeout=3):
        """ This method sets up an event that will allow a user to join a
        group. 

        @type inviter: self.account_class
        @param inviter: The account that is setting up the invitation

        @type group: self.group_class
        @param group_name: The group that should be joined

        @type email: str
        @param : The email adrress of the user that is invited

        @type timeout: int
        @param timeout: The number of days until the confirmation key expires.

        @rtype: dict
        @return: A dictionary with the following keys:
                'confirmation_key': <str> The code that is used to confirm the invitation
                'match_user': <str> A username, if a user exists with that email-address
                'match_user_email': An e-mailaddress. Not sure why?
        """
        ac = self.account_class(self.db)

        assert hasattr(inviter, 'entity_id')
        assert hasattr(group, 'entity_id')

        timeout = DateTimeDelta(int(timeout))
        if timeout.day < 1:
            raise CerebrumError('Timeout too short (%d)' % timeout.day)
        if (timeout > cereconf.MAX_INVITE_PERIOD):
            raise CerebrumError("Timeout too long (%d)" % timeout.day)

        ret = {'confirmation_key': self.vhutils.setup_event_request(
                                       group.entity_id,
                                       self.co.va_group_invitation,
                                       params={'inviter_id': inviter.entity_id,
                                               'group_id': group.entity_id,
                                               'invitee_mail': email,
                                               'timeout': timeout.day,},
                                       change_by=inviter.entity_id)}

        # check if e-mail matches a valid username
        try:
            ac.find_by_name(email)
            ret['match_user'] = ac.account_name
            if ac.np_type in (self.co.fedaccount_type, self.co.virtaccount_type):
                ret['match_user_email'] = ac.get_email_address()
        except NotFoundError:
            pass

        return ret
Ejemplo n.º 9
0
 def calc_bin_span(self, days):
     """
     Given a desired bin span in days, calculate number of bins that
     comes closest.
     """
     assert self.span
     if (days % 7) == 0:
         # If user asked for a bin width that is a multiple of 7 days, shift
         # bins to align with weeks.
         self.first += RelativeDateTime(weekday=(Monday,0))
         self.span = self.last - self.first
     self.bin_span = DateTimeDelta(days)
     self.n_bins = int(math.ceil(self.span / self.bin_span))
     if self.n_bins > self.max_bins:
         raise Error('Too many bins (%d, maximum is %d) - make bins bigger, or limit date range' % (self.n_bins, self.max_bins))
Ejemplo n.º 10
0
def collect_warnable_accounts(database):
    """Collect all FA/VA that are about to expire.

    Both VA and FA are collected as candidates.
    """

    account = Factory.get("Account")(database)
    const = Factory.get("Constants")()
    return set(
        x["account_id"]
        # collect all accounts
        for x in account.list(filter_expired=True)
        # ... that are VA/FA
        if (x["np_type"] in (const.virtaccount_type, const.fedaccount_type) and
            # ... and have an expire date
            x["expire_date"] and
            # ... and that expire date is within the right window
            DateTimeDelta(0) <= x["expire_date"] -
            now() <= get_config("EXPIRE_WARN_WINDOW")))
Ejemplo n.º 11
0
def get_printings(original_story, first_publication_date):
    printings = Story.objects.filter(story_version__base_story =\
                original_story).filter(issue__publication_date__gt = "")\
                .order_by('issue__publication_date')
    if len(first_publication_date) <= 4 or 'Q' in first_publication_date:
        first_printings = printings.filter(issue__publication_date__icontains\
            = first_publication_date[0:4])
    else:
        end_date = ParseDateTimeUTC(first_publication_date[0:7])
        end_date += DateTimeDelta(120)
        end_date = end_date.strftime("%Y-%m")
        first_printings = printings.filter(issue__publication_date__gte \
            = first_publication_date[0:7]).filter(issue__publication_date__lte \
            = end_date)
        if not first_printings:
            first_printings = \
                printings.filter(issue__publication_date__icontains \
                = first_publication_date[0:4])
    return printings, first_printings
Ejemplo n.º 12
0
def adjustDateTime(adjustDelta, dateTime):
    """
    sometimes a DateTime object has to be adjusted, when illegal components were specified
    adjustDateTime corrects DateTime object with specifications obtained from
    fixTimeComponents()

    input: adjustFlags is tuple/list of flags for adjusting ( increaseDay, increaseHour, increaseMin )
    output: corrected DateTime object
    """

    # Increase day
    dateTime += DateTimeDelta(adjustDelta[0])

    # Increase hour
    dateTime += TimeDelta(adjustDelta[1])

    # Increase minute
    dateTime += DateTimeDeltaFromSeconds(60.0 * adjustDelta[2])

    return dateTime
Ejemplo n.º 13
0
 def handle_match(self, match):
     """Handles cases where the log_regex is matched."""
     message = match.group('message')
     if not self.status_regex.match(message):
         return
     date_time = match.group('date_time')
     date_time, ms = date_time.split(',')
     date_time = DateTime.strptime(date_time, self.date_time_fmt)
     ms = DateTimeDelta(0, 0, 0, int(ms) / 1000.0)
     date_time = date_time + ms
     if message.startswith('LOADING_IMAGE'):
         name = message[message.find(':') + 2:]
         self.last_import = Import(date_time, name)
         self.imports.append(self.last_import)
     elif not hasattr(self, 'last_import') or self.last_import is None:
         return
     elif message.startswith('LOADED_IMAGE'):
         self.last_import.setid_end = date_time
     elif message.startswith('BEGIN_POST_PROCESS'):
         self.last_import.post_process_start = date_time
     elif message.startswith('END_POST_PROCESS'):
         self.last_import.post_process_end = date_time
     elif message.startswith('BEGIN_SAVE_TO_DB'):
         self.last_import.save_to_db_start = date_time
     elif message.startswith('END_SAVE_TO_DB'):
         self.last_import.save_to_db_end = date_time
     elif message.startswith('IMPORT_OVERLAYS'):
         self.last_import.overlays_start = date_time
     elif message.startswith('IMPORT_THUMBNAILING'):
         self.last_import.thumbnailing_start = date_time
     elif message.startswith('IMPORT_DONE'):
         self.last_import.end = date_time
         self.last_import = None
     elif message.startswith('DATASET_STORED'):
         self.last_series = Series(date_time)
         self.last_import.series.append(self.last_series)
     elif message.startswith('DATA_STORED'):
         self.last_import.series[-1].end = date_time
     elif message.startswith('IMPORT_STEP'):
         self.last_series.planes.append(Plane(date_time))
Ejemplo n.º 14
0
    def test_phonenumberchange_fresh_person(self):
        """Fresh phone numbers should still be available for new persons"""
        cereconf.INDIVIDUATION_PHONE_TYPES['system_fs']['types']['contact_mobile_phone']['delay'] = 7
        cereconf.INDIVIDUATION_PHONE_TYPES['system_fs']['types']['contact_private_mobile']['delay'] = 7

        ou = Factory.get('OU')(self.db)
        co = Factory.get('Constants')(self.db)
        ou.find_stedkode(0, 0, 0, 0)
        pe = self.createPerson(first_name='Miss', last_name='Test2', studnr='008')
        ac = self.createAccount(pe, 'mstest2')

        pe.populate_affiliation(source_system=co.system_fs, ou_id=ou.entity_id,
                affiliation=co.affiliation_student,
                status=co.affiliation_status_student_aktiv)
        pe.write_db()
        pe.populate_contact_info(source_system=co.system_fs,
                                 type=co.contact_mobile_phone,
                                 value='98012345')
        pe.write_db()
        self.db.commit()

        # hack the create_date in change_log
        print self.db.execute("""
            UPDATE [:table schema=cerebrum name=change_log]
            SET tstamp = :tid 
            WHERE 
                subject_entity = :s_id AND
                change_type_id = :ct_id""", 
                    {'s_id': pe.entity_id,
                    'ct_id': co.person_create,
                    'tid': now() - DateTimeDelta(5),})

        d = self.client.callRemote('generate_token',
                id_type="externalid_studentnr", ext_id='008',
                username='******', phone_no='98012345',
                browser_token='a')
        d.addCallback(self.assertEquals, 'true')
        return d
Ejemplo n.º 15
0
        def CheckDateTypes(self):
            dt = DateTime(2002, 6, 15)
            dtd = DateTimeDelta(0, 0, 0, 1)

            self.cur.execute("create table test (t timestamp)")
            self.cur.execute("insert into test(t) values (?)", (dt, ))
            self.cur.execute("select t from test")
            res = self.cur.fetchone()

            self.failUnlessEqual(
                dt, res.t, "DateTime object should have been %s, was %s" %
                (repr(dt), repr(res.t)))

            self.cur.execute("drop table test")
            self.cur.execute("create table test(i interval)")
            self.cur.execute("insert into test(i) values (?)", (dtd, ))
            self.cur.execute("select i from test")
            res = self.cur.fetchone()

            self.failUnlessEqual(
                dtd, res.i,
                "DateTimeDelta object should have been %s, was %s" %
                (repr(dtd), repr(res.i)))
Ejemplo n.º 16
0
def disable_account(account_id, db):
    """Disable account corresponding to account_id.

    Once disabled, an account is just a placeholder for the username. It has
    no other value/associated attributes.

    NB! We keep entity_contact_info around, since we may want to know at least
    some human-'relatable' info about the account after it's disabled
    """
    account = get_account(account_id, db)
    if not account:
        return

    # If the account has already been deleted, we cannot disable it any
    # further.
    if account.is_deleted():
        return

    delete_common(account.entity_id, db)

    account.expire_date = now() - DateTimeDelta(1)
    account.write_db()
    logger.debug("Disabled account %s (id=%s)", account.account_name,
                 account.entity_id)
Ejemplo n.º 17
0
from logilab.common.db import get_dbapi_compliant_module

from maay.dbentity import ScoredDocument, Document, FileInfo, \
     DBEntity, DocumentProvider, DocumentScore, Word, Node, Result
from maay.texttool import normalizeText, WORDS_RGX, MAX_STORED_SIZE

IntegrityError = None


class MaayAuthenticationError(Exception):
    """raised on db authentication failure"""


ANONYMOUS_AVATARID = '__anonymous__'
WEB_AVATARID = '__may__'
ONE_HOUR = DateTimeDelta(0, 1)


class IQuerier(Interface):
    """defines the High-Level interface to Maay SQL database"""
    def findDocuments(query):
        """returns all Documents matching <query>"""

    def getFileInformations(filename):
        """returns a list of FileInfo's instances according
        to DB's content
        """

    def getIndexedFiles():
        """returns a list of indexed file names as strings
        """
Ejemplo n.º 18
0
    def run(self):
        self.parse_options()

        if self.opt.file != None:
            lease_elem = ET.parse(self.opt.file).getroot()
            # If a relative starting time is used, replace for an
            # absolute starting time.
            exact = lease.find("start/exact")
            if exact != None:
                exact_time = exact.get("time")
                exact.set("time", str(self.__absolute_time(exact_time)))
            lease_xml_str = ET.tostring(lease_elem)
        else:
            if self.opt.preemptible == None:
                preemptible = False
            else:
                preemptible = self.opt.preemptible

            capacity = Capacity([constants.RES_CPU, constants.RES_MEM])
            capacity.set_quantity(constants.RES_CPU, int(self.opt.cpu) * 100)
            capacity.set_quantity(constants.RES_MEM, int(self.opt.mem))
            requested_resources = dict([(i + 1, capacity)
                                        for i in range(self.opt.numnodes)])
            if self.opt.duration == haizea_request_lease.DURATION_UNLIMITED:
                # This is an interim solution (make it run for a century).
                # TODO: Integrate concept of unlimited duration in the lease datastruct
                duration = DateTimeDelta(36500)
            else:
                duration = ISO.ParseTimeDelta(self.opt.duration)

            if self.opt.start == haizea_request_lease.START_NOW:
                lease = Lease(lease_id=None,
                              submit_time=None,
                              requested_resources=requested_resources,
                              start=Timestamp(Timestamp.NOW),
                              duration=Duration(duration),
                              deadline=None,
                              preemptible=preemptible,
                              software=DiskImageSoftwareEnvironment(
                                  self.opt.vmimage, self.opt.vmimagesize),
                              state=None)
            elif self.opt.start == haizea_request_lease.START_BESTEFFORT:
                lease = Lease(lease_id=None,
                              submit_time=None,
                              requested_resources=requested_resources,
                              start=Timestamp(Timestamp.UNSPECIFIED),
                              duration=Duration(duration),
                              deadline=None,
                              preemptible=preemptible,
                              software=DiskImageSoftwareEnvironment(
                                  self.opt.vmimage, self.opt.vmimagesize),
                              state=None)
            else:
                start = self.__absolute_time(self.opt.start)
                lease = Lease(lease_id=None,
                              submit_time=None,
                              requested_resources=requested_resources,
                              start=Timestamp(start),
                              duration=Duration(duration),
                              deadline=None,
                              preemptible=preemptible,
                              software=DiskImageSoftwareEnvironment(
                                  self.opt.vmimage, self.opt.vmimagesize),
                              state=None)

            lease_xml_str = ET.tostring(lease.to_xml())

        server = self.create_rpc_proxy(self.opt.server)

        try:
            lease_id = server.create_lease(lease_xml_str)
            print "Lease submitted correctly."
            print "Lease ID: %i" % lease_id
        except xmlrpclib.Fault, err:
            print >> sys.stderr, "XMLRPC fault: %s" % err.faultString
            if self.opt.debug:
                raise
Ejemplo n.º 19
0
    def __init__(self, opennebula_vm):                        
        # If there is no HAIZEA parameter, the default is to treat the
        # request as an immediate request with unlimited duration
        if not opennebula_vm.template.has_key(OpenNebulaHaizeaVM.HAIZEA_PARAM):
            self.start = OpenNebulaHaizeaVM.HAIZEA_START_NOW
            self.duration = OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED
            self.preemptible = OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_NO
            self.group = None
        else:
            self.start = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_START]
            self.duration = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_DURATION]
            self.preemptible = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE]
            if opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM].has_key(OpenNebulaHaizeaVM.HAIZEA_GROUP):
                self.group = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_GROUP]
            else:
                self.group = None
                
        self.submit_time = UNIX2DateTime(opennebula_vm.stime)
                
        # Create Timestamp object
        if self.start == OpenNebulaHaizeaVM.HAIZEA_START_NOW:
            self.start = Timestamp(Timestamp.NOW)
        elif self.start == OpenNebulaHaizeaVM.HAIZEA_START_BESTEFFORT:
            self.start = Timestamp(Timestamp.UNSPECIFIED)
        elif self.start[0] == "+":
            # Relative time
            self.start = Timestamp(round_datetime(self.submit_time + ISO.ParseTime(self.start[1:])))
        else:
            self.start = Timestamp(ISO.ParseDateTime(self.start))
            
        # Create Duration object
        if self.duration == OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED:
            # This is an interim solution (make it run for a century).
            # TODO: Integrate concept of unlimited duration in the lease datastruct
            self.duration = Duration(DateTimeDelta(36500))
        else:
            self.duration = Duration(ISO.ParseTimeDelta(self.duration))
            

        self.preemptible = (self.preemptible == OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_YES)

    
        self.capacity = Capacity([constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])
        
        # CPU
        # CPUs in VMs are not reported the same as in hosts.
        # THere are two template values: CPU and VCPU.
        # CPU reports the percentage of the CPU needed by the VM.
        # VCPU, which is optional, reports how many CPUs are needed.
        cpu = int(float(opennebula_vm.template["CPU"]) * 100)
        if opennebula_vm.template.has_key("VCPU"):
            ncpu = int(opennebula_vm.template["VCPU"])
        else:
            ncpu = 1
        self.capacity.set_ninstances(constants.RES_CPU, ncpu)
        for i in range(ncpu):
            self.capacity.set_quantity_instance(constants.RES_CPU, i+1, cpu)            
        
        # Memory. Unlike hosts, memory is reported directly in MBs
        self.capacity.set_quantity(constants.RES_MEM, int(opennebula_vm.template["MEMORY"]))

        self.one_id = opennebula_vm.id
Ejemplo n.º 20
0
def runtest(lmaManager=None, lma_view=None, HDFmanagers=None):
    # colormap = get_cmap('gist_yarg_r')
    colormap = get_cmap('gist_earth')

    density_maxes = []
    total_counts = []
    all_t = []

    for delta_minutes in minute_intervals:
        time_delta = DateTimeDelta(0, 0, delta_minutes, 0)

        n_frames = int(ceil((end_time - start_time) / time_delta))
        n_cols = 6
        n_rows = int(ceil(float(n_frames) / n_cols))
        w, h = figaspect(float(n_rows) / n_cols)

        xedge = np.arange(b.x[0], b.x[1] + dx, dx)
        yedge = np.arange(b.y[0], b.y[1] + dy, dy)
        x_range = b.x[1] - b.x[0]
        y_range = b.y[1] - b.y[0]

        min_count, max_count = 1, max_count_baseline * delta_minutes

        f = figure(figsize=(w, h))
        p = small_multiples_plot(fig=f, rows=n_rows, columns=n_cols)
        p.label_edges(True)

        for ax in p.multiples.flat:
            ax.yaxis.set_major_formatter(kilo_formatter)
            ax.xaxis.set_major_formatter(kilo_formatter)

        for i in range(n_frames):
            frame_start = start_time + i * time_delta
            frame_end = frame_start + time_delta
            b.sec_of_day = (frame_start.abstime, frame_end.abstime)
            b.t = (frame_start, frame_end)

            do_plot = False
            flash_extent_density = True
            density = None

            if source_density == True:
                lmaManager.refresh(b)
                lma_view.transformed.cache_is_old()
                x, y, t = lma_view.transformed['x', 'y', 't']
                density, edges = np.histogramdd((x, y), bins=(xedge, yedge))
                do_plot = True
            else:
                for lmaManager in HDFmanagers:
                    # yes, loop through every file every time and reselect data.
                    # so wrong, yet so convenient.
                    h5 = lmaManager.h5file
                    if flash_extent_density == False:
                        lmaManager.refresh(b)
                        lma_view = AcuityView(DataSelection(
                            lmaManager.data, b),
                                              mapProj,
                                              bounds=b)
                        # lma_view.transformed.cache_is_old()
                        x, y, t = lma_view.transformed['x', 'y', 't']
                        if x.shape[0] > 1: do_plot = True
                        break
                    else:
                        # assume here that the bounds sec_of_day day is the same as
                        # the dataset day
                        t0, t1 = b.sec_of_day
                        # events = getattr(h5.root.events, lmaManager.table.name)[:]
                        # flashes = getattr(h5.root.flashes, lmaManager.table.name)[:]

                        event_dtype = getattr(h5.root.events,
                                              lmaManager.table.name)[0].dtype
                        events_all = getattr(h5.root.events,
                                             lmaManager.table.name)[:]
                        flashes = getattr(h5.root.flashes,
                                          lmaManager.table.name)

                        def event_yielder(evs, fls):
                            these_events = []
                            for fl in fls:
                                if ((fl['n_points'] > 9) & (t0 < fl['start']) &
                                    (fl['start'] <= t1)):
                                    these_events = evs[evs['flash_id'] ==
                                                       fl['flash_id']]
                                    if len(these_events) != fl['n_points']:
                                        print('not giving all ',
                                              fl['n_points'], ' events? ',
                                              these_events.shape)
                                    for an_ev in these_events:
                                        yield an_ev

                        # events = np.fromiter((an_ev for an_ev in ( events_all[events_all['flash_id'] == fl['flash_id']]
                        #                 for fl in flashes if (
                        #                   (fl['n_points']>9) & (t0 < fl['start']) & (fl['start'] <= t1)
                        #                 )
                        #               ) ), dtype=event_dtype)
                        events = np.fromiter(event_yielder(
                            events_all, flashes),
                                             dtype=event_dtype)

                        # print events['flash_id'].shape

                        ### Flash extent density ###
                        x, y, z = mapProj.fromECEF(*geoProj.toECEF(
                            events['lon'], events['lat'], events['alt']))

                        # Convert to integer grid coordinate bins
                        #      0    1    2    3
                        #   |    |    |    |    |
                        # -1.5  0.0  1.5  3.0  4.5

                        if x.shape[0] > 1:
                            density, edges = extent_density(
                                x, y, events['flash_id'].astype('int32'),
                                b.x[0], b.y[0], dx, dy, xedge, yedge)
                            do_plot = True
                            break
                # print 'density values: ', density.min(), density.max()

            if do_plot == True:  # need some data
                # density,edges = np.histogramdd((x,y), bins=(xedge,yedge))
                density_plot = p.multiples.flat[i].pcolormesh(
                    xedge,
                    yedge,
                    np.log10(density.transpose()),
                    vmin=-0.2,
                    vmax=np.log10(max_count),
                    cmap=colormap)
                label_string = frame_start.strftime('%H%M:%S')
                text_label = p.multiples.flat[i].text(
                    b.x[0] - pad + x_range * .01,
                    b.y[0] - pad + y_range * .01,
                    label_string,
                    color=(0.5, ) * 3,
                    size=6)
                density_plot.set_rasterized(True)
                density_maxes.append(density.max())
                total_counts.append(density.sum())
                all_t.append(frame_start)
                print(label_string, x.shape, density.max(), density.sum())

        color_scale = ColorbarBase(p.colorbar_ax,
                                   cmap=density_plot.cmap,
                                   norm=density_plot.norm,
                                   orientation='horizontal')
        # color_scale.set_label('count per pixel')
        color_scale.set_label('log10(count per pixel)')

        # moving reference frame correction. all panels will have same limits, based on time of last frame
        view_dt = 0.0  # (frame_start - t0).seconds
        x_ctr = x0 + view_dt * u
        y_ctr = y0 + view_dt * v
        view_x = (x_ctr - view_dx / 2.0 - pad, x_ctr + view_dx / 2.0 + pad)
        view_y = (y_ctr - view_dy / 2.0 - pad, y_ctr + view_dy / 2.0 + pad)
        # view_x  = (b.x[0]+view_dt*u, b.x[1]+view_dt*u)
        # view_y  = (b.y[0]+view_dt*v, b.y[1]+view_dt*v)

        # print 'making timeseries',
        # time_series = figure(figsize=(16,9))
        # ts_ax = time_series.add_subplot(111)
        # ts_ax.plot_date(mx2num(all_t),total_counts,'-', label='total sources', tz=tz)
        # ts_ax.plot_date(mx2num(all_t),density_maxes,'-', label='max pixel', tz=tz)
        # ts_ax.xaxis.set_major_formatter(time_series_x_fmt)
        # ts_ax.legend()
        # time_filename = 'out/LMA-timeseries_%s_%5.2fkm_%5.1fs.pdf' % (start_time.strftime('%Y%m%d_%H%M%S'), dx/1000.0, time_delta.seconds)
        # time_series.savefig(time_filename)
        # print ' ... done'

        print('making multiples', end=' ')
        p.multiples.flat[0].axis(view_x + view_y)
        filename = 'out/LMA-density_%s_%5.2fkm_%5.1fs.pdf' % (
            start_time.strftime('%Y%m%d_%H%M%S'), dx / 1000.0,
            time_delta.seconds)
        f.savefig(filename, dpi=150)
        print(' ... done')
        f.clf()
        return events
Ejemplo n.º 21
0
class VirtGroup(Group_class, EntityContactInfo, EntityTrait):
    """VirtGroup extension of VirtHome.

    This class tailors Cerebrum.Group:Group to VirtHome's needs.

    TBD: Do we need to make a special version of *_member() functions that
    check the member type? (i.e. can a virtgroup have any other members than
    VA/FA?)
    """

    DEFAULT_GROUP_LIFETIME = DateTimeDelta(180)

    def __init__(self, *rest, **kw):
        self.__super.__init__(*rest, **kw)

        self.legal_chars = set(string.letters + string.digits + " .@-")

    # end __init__

    def populate(self, creator_id, name, description):
        """Populate a VirtGroup's instance, subject to some constraints."""

        assert not self.illegal_name(name), "Invalid group name %s" % (name, )
        assert description.strip()

        self.__super.populate(
            creator_id=creator_id,
            visibility=self.const.group_visibility_all,
            name=name,
            description=description,
            group_type=self.const.group_type_unknown,
        )
        #self.expire_date = now() + self.DEFAULT_GROUP_LIFETIME

    # end populate

    def set_group_resource(self, url):
        """Unconditionally reassign a new URL to this group.

        If URL is None (or ''), we'll remove whatever URL was in the database.
        """

        # Check the URL's validity before doing anything to the database.
        self.verify_group_url(url)
        resources = self.get_contact_info(self.const.system_virthome,
                                          self.const.virthome_group_url)
        if resources:
            # There can be at most one URL...
            r = resources[0]
            # If the old value matches the new one, there is nothing to do.
            if r["contact_value"] == url:
                return

            # If the old value doesn't match the new one, we delete the old one
            # first. Helps us avoid multiple URLs for one group.
            self.delete_contact_info(self.const.system_virthome,
                                     self.const.virthome_group_url)

        if url:
            self.add_contact_info(self.const.system_virthome,
                                  self.const.virthome_group_url, url)

    # end set_group_resource

    def verify_group_url(self, url):
        """Check that the URL at least looks sane.

        We allow empty/None values here.
        """
        if not url:
            return True

        resource = urlparse.urlparse(url)
        if resource.scheme not in (
                "http",
                "https",
                "ftp",
        ):
            raise ValueError("Invalid url for group <%s>: <%s>" %
                             (self.group_name, url))

        return True

    # end verify_group_url

    def illegal_name(self, name):
        """Return a string with error message if groupname is illegal"""

        if not name.strip():
            return "Group name is empty"

        if (name.startswith(" ") or name.endswith(" ")):
            return "Group name cannot start/end with space"

        if any(x not in self.legal_chars for x in name):
            return "Illegal character in group name"

        if name.count("@") != 1:
            return "Group name is missing a realm"

        tmp_name, tmp_realm = name.split("@")
        if tmp_realm != cereconf.VIRTHOME_REALM:
            return "Wrong realm <%s> for VirtGroup <%s>"

        return False
Ejemplo n.º 22
0
def process_affiliations(employment_file,
                         person_file,
                         use_fok,
                         people_to_ignore=None):
    """Parse employment_file and determine all affiliations.

    There are roughly 3 distinct parts:

    #. Cache all the affiliations in Cerebrum
    #. Scan the file and compare the file data with the cache. When there is a
       match, remove the entry from the cache.
    #. Remove from Cerebrum whatever is left in the cache (once we are done
       with the file, the cache contains those entries that were in Cerebrum
    """

    expired = load_expired_employees(file(person_file), use_fok, logger)

    # First we cache all existing affiliations. It's a mapping person-id =>
    # mapping (ou-id, affiliation) => status.
    affiliation_cache = cache_db_affiliations()
    person_cache = dict()

    def person_cacher(empid):
        ret = person_cache.get(empid, NotSet)
        if ret is NotSet:
            ret = person_cache[empid] = get_person(empid)
        return ret

    for tpl in make_employment_iterator(file(employment_file), use_fok,
                                        logger):
        if not tpl.valid():
            logger.debug(
                "Ignored invalid entry for person while "
                "processing affiliation: «%s»", tpl.sap_ansattnr)
            continue

        if people_to_ignore and tpl.sap_ansattnr in people_to_ignore:
            logger.debug("Invalid person with sap_id=%s", tpl.sap_ansattnr)
            continue

        if tpl.sap_ansattnr in expired:
            logger.debug(
                "Person sap_id=%s is no longer an employee; "
                "all employment info will be ignored", tpl.sap_ansattnr)
            continue

        # is the entry within a valid time frame?
        # The shift by 180 days has been requested by UiA around 2007-03-27
        if not (tpl.start_date - DateTimeDelta(180) <= today() <=
                tpl.end_date):
            logger.debug("Entry %s has wrong timeframe (start: %s, end: %s)",
                         tpl, tpl.start_date, tpl.end_date)
            continue

        ou_id = get_ou_id(tpl.sap_ou_id)
        if ou_id is None:
            logger.warn(
                "Cannot map SAP OU %s to Cerebrum ou_id (employment "
                "for person sap_id=%s).", tpl.sap_ou_id, tpl.sap_ansattnr)
            continue

        person = person_cacher(tpl.sap_ansattnr)
        if person is None:
            logger.warn("Cannot map SAP ansattnr %s to cerebrum person_id",
                        tpl.sap_ansattnr)
            continue

        (affiliation,
         affiliation_status) = sap_employment2affiliation(tpl.lonnstittel)

        synchronize_affiliations(affiliation_cache, person, ou_id, affiliation,
                                 affiliation_status)

    # We are done with fetching updates from file.
    # Need to write persons
    for p in person_cache.values():
        if p is None:
            continue
        logger.info("Writing cached affs for person id:%s", p.entity_id)
        p.write_db()

    # All the affiliations left in the cache exist in Cerebrum, but NOT in the
    # datafile. Thus delete them!
    remove_affiliations(affiliation_cache)
Ejemplo n.º 23
0
_make_timezone(
    LocalTimezone,
    ('LOCAL', ),
)

# -------------------------------------------------
# STANDARD (NON-DAYLIGHT) TIMEZONES
#
# The following timezones do not implement daylight
# saving time. Thus they are Timezone instances,
# and not TimezoneDST instances.

# good old UTC
_make_timezone(Timezone, ('UTC', 'GMT', 'UCT', 'Universal', 'Greenwich'),
               offset='-0000',
               offset_delta=DateTimeDelta(0, 0, 0))

# Argentina and Uruguay (GMT-3)
_make_timezone(Timezone, (
    'Argentina',
    'ar',
    'Uruguay',
    'Uruguai',
    'uy',
),
               offset='-0300',
               offset_delta=DateTimeDelta(0, 0, -180))

# GMT-4 for LatAms
_make_timezone(
    Timezone,
Ejemplo n.º 24
0
def _make_offset_delta(tup):
    delta = DateTimeDelta(0, int(tup[1]), int(tup[2]))
    if tup[0] == '-': delta = -1 * delta
    return delta
Ejemplo n.º 25
0
def get_matching_accs(db):
    """ Get defunct account data.

    This function searches the database for accounts where:
      - account is not expired
      - account is owned by a person with no affiliations
      - account has been quarantined for > 1 year

    :return generator:
        A generator that yields dicts with account and quarantine data
    """
    ac = Factory.get('Account')(db)
    pe = Factory.get('Person')(db)
    co = Factory.get('Constants')(db)

    def _u(db_value):
        if db_value is None:
            return text_type('')
        if isinstance(db_value, bytes):
            return db_value.decode(db.encoding)
        return text_type(db_value)

    def _row_to_quar(row):
        """ list_entity_quarantines row to dict """
        return {
            'q_type': text_type(co.Quarantine(row['quarantine_type'])),
            'q_desc': _u(row['description']),
            'q_date': text_type(row['start_date'].strftime('%Y-%m-%d')),
        }

    logger.debug('caching personal accounts ...')
    owner_type = co.entity_person
    accounts = ac.search(owner_type=owner_type)
    logger.info('found %d accounts with owner_type=%r', len(accounts),
                text_type(owner_type))

    logger.debug('caching account homedirs ...')
    acc2disk = dict(
        (r['account_id'], r['path']) for r in ac.list_account_home())
    logger.info('found %d accounts assigned to a disk', len(acc2disk))

    logger.debug('caching active account quarantines ...')
    acc2quar = defaultdict(list)
    for q in ac.list_entity_quarantines(only_active=True,
                                        entity_types=co.entity_account):
        acc2quar[q['entity_id']].append(q)
    logger.info('found quarantines for %d accounts', len(acc2quar))

    logger.debug('caching person names ...')
    person2name = dict(
        (r['person_id'], r['name'])
        for r in pe.search_person_names(name_variant=co.name_full,
                                        source_system=co.system_cached))
    logger.info('found full names for %d persons', len(person2name))

    # Add person_id to the list if the person has an affiliation
    logger.debug('caching person affiliations ...')
    person_has_affs = set((r['person_id'] for r in pe.list_affiliations()))
    logger.info('found %d persons with affiliations', len(person_has_affs))

    for acc in accounts:
        # Is the account owner still affiliated?
        if acc['owner_id'] in person_has_affs:
            continue

        for quar in acc2quar[acc['account_id']]:
            if (quar['start_date'] + DateTimeDelta(365)) < now():
                break
        else:
            # loop terminated wihtout finding a 'quar' -- i.e. no active
            # quarantine older than one year
            continue

        yield {
            'account_name': _u(acc['name']),
            'full_name': _u(person2name.get(acc['owner_id'])) or u'(not set)',
            'disk_path': _u(acc2disk.get(acc['account_id'])) or u'(not set)',
            'q_type': text_type(co.Quarantine(quar['quarantine_type'])),
            'q_desc': _u(quar['description']),
            'q_date': text_type(quar['start_date'].strftime('%Y-%m-%d')),
        }