Beispiel #1
0
class RepositoryContentUnit(AutoRetryDocument):
    """
    Represents the link between a repository and the units associated with it.

    This inherits from mongoengine.Document and defines the schema for the documents
    in repo_content_units collection.


    :ivar repo_id: string representation of the repository id
    :type repo_id: mongoengine.StringField
    :ivar unit_id: string representation of content unit id
    :type unit_id: mongoengine.StringField
    :ivar unit_type_id: string representation of content unit type
    :type unit_type_id: mongoengine.StringField
    :ivar created: ISO8601 representation of the time the association was created
    :type created: pulp.server.db.fields.ISO8601StringField
    :ivar updated: ISO8601 representation of last time a copy, sync, or upload ensured that
                   the association existed
    :type updated: pulp.server.db.fields.ISO8601StringField
    :ivar _ns: The namespace field (Deprecated), reading
    :type _ns: mongoengine.StringField
    """

    repo_id = StringField(required=True)
    unit_id = StringField(required=True)
    unit_type_id = StringField(required=True)

    created = ISO8601StringField(
        required=True,
        default=lambda: dateutils.format_iso8601_utc_timestamp(
            dateutils.now_utc_timestamp()))
    updated = ISO8601StringField(
        required=True,
        default=lambda: dateutils.format_iso8601_utc_timestamp(
            dateutils.now_utc_timestamp()))

    # For backward compatibility
    _ns = StringField(default='repo_content_units')

    meta = {
        'collection':
        'repo_content_units',
        'allow_inheritance':
        False,
        'indexes': [
            {
                'fields': ['repo_id', 'unit_type_id', 'unit_id'],
                'unique': True
            },
            {
                # Used for reverse lookup of units to repositories
                'fields': ['unit_id']
            }
        ]
    }
Beispiel #2
0
    def calculate_next_run(self):
        """
        This algorithm starts by determining when the first call was or should
        have been. If that is in the future, it just returns that time. If not,
        it adds as many intervals as it can without exceeding the current time,
        adds one more interval, and returns the result.

        For a schedule with no historically-recorded or scheduled start time,
        it will run immediately.

        :return:    ISO8601 string representing the next time this call should run.
        :rtype:     str
        """
        now_s, first_run_s, since_first_s, run_every_s, \
            last_scheduled_run_s, expected_runs = self._calculate_times()

        # if first run is in the future, return that time
        if first_run_s > now_s:
            next_run_s = first_run_s
        # if I've never run before and my first run is not in the future, run now!
        elif self.total_run_count == 0:
            next_run_s = now_s
        else:
            next_run_s = last_scheduled_run_s + run_every_s

        return dateutils.format_iso8601_utc_timestamp(next_run_s)
Beispiel #3
0
    def calculate_next_run(self):
        """
        This algorithm starts by determining when the first call was or should
        have been. If that is in the future, it just returns that time. If not,
        it adds as many intervals as it can without exceeding the current time,
        adds one more interval, and returns the result.

        For a schedule with no historically-recorded or scheduled start time,
        it will run immediately.

        :return:    ISO8601 string representing the next time this call should run.
        :rtype:     str
        """
        now_s, first_run_s, since_first_s, run_every_s, \
            last_scheduled_run_s, expected_runs = self._calculate_times()

        # if first run is in the future, return that time
        if first_run_s > now_s:
            next_run_s = first_run_s
        # if I've never run before and my first run is not in the future, run now!
        elif self.total_run_count == 0:
            next_run_s = now_s
        else:
            next_run_s = last_scheduled_run_s + run_every_s

        return dateutils.format_iso8601_utc_timestamp(next_run_s)
Beispiel #4
0
def content_unit_obj(content_unit):
    """
    Serialize a content unit.
    """
    serial = db.scrub_mongo_fields(content_unit)
    last_updated = content_unit.get('_last_updated')
    if last_updated:
        content_unit['_last_updated'] = dateutils.format_iso8601_utc_timestamp(last_updated)
    return serial
Beispiel #5
0
def content_unit_obj(content_unit):
    """
    Serialize a content unit.
    """
    serial = db.scrub_mongo_fields(content_unit)
    serialize_unit_with_serializer(content_unit)
    last_updated = content_unit.get('_last_updated')
    if last_updated:
        content_unit['_last_updated'] = dateutils.format_iso8601_utc_timestamp(last_updated)
    return serial
Beispiel #6
0
    def __init__(self, repo_id, unit_id, unit_type_id):
        super(RepoContentUnit, self).__init__()

        # Mapping Identity Information
        self.repo_id = repo_id
        self.unit_id = unit_id
        self.unit_type_id = unit_type_id

        # store time in UTC
        utc_timestamp = dateutils.now_utc_timestamp()
        self.created = dateutils.format_iso8601_utc_timestamp(utc_timestamp)
        self.updated = self.created
Beispiel #7
0
    def __init__(self, repo_id, unit_id, unit_type_id):
        super(RepoContentUnit, self).__init__()

        # Mapping Identity Information
        self.repo_id = repo_id
        self.unit_id = unit_id
        self.unit_type_id = unit_type_id

        # store time in UTC
        utc_timestamp = dateutils.now_utc_timestamp()
        self.created = dateutils.format_iso8601_utc_timestamp(utc_timestamp)
        self.updated = self.created
Beispiel #8
0
def associate_single_unit(repository, unit):
    """
    Associate a single unit to a repository.

    :param repository: The repository to update.
    :type repository: pulp.server.db.model.Repository
    :param unit: The unit to associate to the repository.
    :type unit: pulp.server.db.model.ContentUnit
    """
    current_timestamp = dateutils.now_utc_timestamp()
    formatted_datetime = dateutils.format_iso8601_utc_timestamp(
        current_timestamp)
    qs = model.RepositoryContentUnit.objects(repo_id=repository.repo_id,
                                             unit_id=unit.id,
                                             unit_type_id=unit.unit_type_id)
    qs.update_one(set_on_insert__created=formatted_datetime,
                  set__updated=formatted_datetime,
                  upsert=True)
Beispiel #9
0
    def _next_instance(self, last_run_at=None):
        """
        Returns an instance of this class with the appropriate fields incremented
        and updated to reflect that its task has been queued. The parent
        ScheduledCall gets saved to the database with these updated values.

        :param last_run_at: not used here, but it is part of the superclass
                            function signature

        :return:    a new instance of the same class, but with
                    its date and count fields updated.
        :rtype:     pulp.server.db.model.dispatch.ScheduleEntry
        """
        self._scheduled_call.last_run_at = dateutils.format_iso8601_utc_timestamp(time.time())
        self._scheduled_call.total_run_count += 1
        if self._scheduled_call.remaining_runs:
            self._scheduled_call.remaining_runs -= 1
        if self._scheduled_call.remaining_runs == 0:
            logger.info('disabling schedule with 0 remaining runs: %s' % self._scheduled_call.id)
            self._scheduled_call.enabled = False
        self._scheduled_call.save()
        return self._scheduled_call.as_schedule_entry()
Beispiel #10
0
 def test_formatting_utc_timestamp(self):
     dt = datetime.datetime(2012, 10, 24, 10, 20, tzinfo=dateutils.utc_tz())
     ts = dateutils.datetime_to_utc_timestamp(dt)
     formatted = dateutils.format_iso8601_utc_timestamp(ts)
     self.assertEqual(formatted, '2012-10-24T10:20:00Z')
Beispiel #11
0
    def setUp(self):
        super(RemoveRepoDuplicateNevra, self).setUp()

        # repo_a is based on the test repo defined in TestPurgeBase
        self.repo_a = platform_model.Repository(repo_id=self.repo.id)
        self.repo_a.save()

        # repo_b is a control repo, that should be untouched by purge functions
        self.repo_b = platform_model.Repository(repo_id='b')
        self.repo_b.save()

        # create units
        unit_key_base = {
            'epoch': '0',
            'version': '0',
            'release': '23',
            'arch': 'noarch',
            'checksumtype': 'sha256',
            '_last_updated': 0,
        }

        units = []
        self.duplicate_unit_ids = set()
        for unit_type in self.UNIT_TYPES:
            unit_key_dupe = unit_key_base.copy()
            unit_key_uniq = unit_key_base.copy()

            # account for slightly different unit key field on drpm
            if unit_type is models.DRPM:
                unit_key_dupe['filename'] = 'dupe'
                unit_key_uniq['filename'] = 'uniq'
            else:
                unit_key_dupe['name'] = 'dupe'
                unit_key_uniq['name'] = 'uniq'

            # create units with duplicate nevra for this type
            # after purging, only one of the three should remain
            for i in range(3):
                unit_dupe = unit_type(**unit_key_dupe)
                # use the unit's python id to guarantee a unique "checksum"
                unit_dupe.checksum = str(id(unit_dupe))
                unit_dupe.save()
                units.append(unit_dupe)
                if i != 0:
                    # after the first unit, stash the "extra" duplicates to make it easier
                    # to modify the unit association updated timestamps for predictable sorting
                    self.duplicate_unit_ids.add(unit_dupe.id)

            # use the incrementing unit count to make the uniq unit's nevra unique
            unit_key_uniq['version'] = str(len(units))

            # create a unit with unique nevra
            unit_uniq = unit_type(**unit_key_uniq)
            unit_uniq.checksum = str(hash(unit_uniq))
            unit_uniq.save()
            units.append(unit_uniq)

        # associate each unit with each repo
        for repo in self.repo_a, self.repo_b:
            for i, unit in enumerate(units):
                repo_controller.associate_single_unit(repo, unit)

        # Sanity check: 3 dupe units and 1 uniq unit for n unit types, for each repo
        expected_rcu_count = 4 * len(self.UNIT_TYPES)
        for repo_id in self.repo_a.repo_id, self.repo_b.repo_id:
            self.assertEqual(
                platform_model.RepositoryContentUnit.objects.filter(
                    repo_id=repo_id).count(), expected_rcu_count)

        # To ensure the purge mechanism behavior is predictable for testing,
        # go through the duplicate unit IDs and set their updated time to be in the past,
        # since unit associations were all just created at the same time.
        # The older associations are the ones that should be purged.
        earlier_timestamp = dateutils.now_utc_timestamp() - 3600
        formatted_timestamp = dateutils.format_iso8601_utc_timestamp(
            earlier_timestamp)
        platform_model.RepositoryContentUnit.objects.filter(unit_id__in=self.duplicate_unit_ids)\
            .update(set__updated=formatted_timestamp)