Ejemplo n.º 1
0
    def get(self, schedule_id):
        """
        Get the call request and the schedule for the given schedule id
        @param schedule_id: id of the schedule for the call request
        @type  schedule_id: str
        @return: scheduled call report dictionary
        @rtype:  dict
        """
        if isinstance(schedule_id, basestring):
            schedule_id = ObjectId(schedule_id)

        scheduled_call = self.scheduled_call_collection.find_one({'_id': schedule_id})

        if scheduled_call is None:
            raise pulp_exceptions.MissingResource(schedule=str(schedule_id))

        report = scheduled_call_to_report_dict(scheduled_call)
        return report
Ejemplo n.º 2
0
    def test_expired_object_id(self):
        """
        Make sure that _create_expired_object_id() generates correct ObjectIds.
        """
        expired_oid = _create_expired_object_id(timedelta(seconds=1))

        # The oid should be about a second, but since we didn't create the now_oid until after we
        # ran the _create_expired_object_id() function, we can't assert that the timedelta is
        # exactly one second. It should definitely be less than two seconds of difference between
        # them, however (unless Nose is really struggling to run the tests), so we can make sure
        # it fits inside the window of 1 - 2 seconds.
        now_oid = ObjectId()
        self.assertTrue((now_oid.generation_time -
                         expired_oid.generation_time) >= timedelta(seconds=1))
        self.assertTrue((now_oid.generation_time -
                         expired_oid.generation_time) < timedelta(seconds=2))
        # Also, let's make sure the type is correct
        self.assertTrue(isinstance(expired_oid, ObjectId))
Ejemplo n.º 3
0
    def test_update_only_config(self):
        # Setup
        manager = manager_factory.event_listener_manager()
        created = manager.create(http.TYPE_ID, {'a' : 'a', 'b' : 'b'}, [event_data.TYPE_REPO_SYNC_STARTED])

        # Test
        new_config = {'a' : 'x', 'c' : 'c'}
        body = {
            'notifier_config' : new_config,
        }

        status, body = self.put('/v2/events/%s/' % created['id'], body)

        # Verify
        self.assertEqual(200, status)

        updated = EventListener.get_collection().find_one({'_id' : ObjectId(created['_id'])})
        expected_config = {'a' : 'x', 'b' : 'b', 'c' : 'c'}
        self.assertEqual(updated['notifier_config'], expected_config)
Ejemplo n.º 4
0
def _repos(v1_database, v2_database):
    v1_coll = v1_database.repos
    v2_coll = v2_database.repos

    # Idempotency: By repo_id
    v2_repo_ids = [x['id'] for x in v2_coll.find({}, {'id': 1})]
    spec = {
        '$and': [
            {
                'id': {
                    '$nin': v2_repo_ids
                }
            },
            {
                'content_types': V1_ISO_REPO
            },
        ]
    }
    missing_v1_repos = v1_coll.find(spec)

    new_repos = []
    for v1_repo in missing_v1_repos:
        id = ObjectId()

        # Identifying tag for the CLI
        v2_notes = v1_repo.get('notes', {})
        v2_notes[REPO_NOTE_KEY] = REPO_NOTE_ISO

        v2_repo = {
            '_id': id,  # technically not needed but added for clarity
            'id': v1_repo['id'],
            'display_name': v1_repo['name'],
            'description': None,
            'notes': v2_notes,
            'scratchpad': {},
            'content_unit_count': 0
        }
        new_repos.append(v2_repo)

    if new_repos:
        v2_coll.insert(new_repos, safe=True)

    return True
Ejemplo n.º 5
0
    def update(self, schedule_id, **schedule_updates):
        """
        Update a scheduled call request

        Valid schedule updates:
         * call_request
         * schedule
         * failure_threshold
         * remaining_runs
         * enabled

        @param schedule_id: id of the schedule for the call request
        @type  schedule_id: str
        @param schedule_updates: updates for scheduled call
        @type  schedule_updates: dict
        """
        if isinstance(schedule_id, basestring):
            schedule_id = ObjectId(schedule_id)

        if self.scheduled_call_collection.find_one(schedule_id) is None:
            raise pulp_exceptions.MissingResource(schedule=str(schedule_id))

        validate_schedule_updates(schedule_updates)

        call_request = schedule_updates.pop('call_request', None)

        if call_request is not None:
            schedule_updates['serialized_call_request'] = call_request.serialize()

        schedule = schedule_updates.get('schedule', None)

        if schedule is not None:
            interval, start, runs = dateutils.parse_iso8601_interval(schedule)
            schedule_updates.setdefault('remaining_runs', runs) # honor explicit update
            # XXX (jconnor) it'd be nice to update the next_run if the schedule
            # has changed, but it requires mucking with the internals of the
            # of the scheduled call instance, which is all encapsulated in the
            # ScheduledCall constructor
            # the next_run field will be correctly updated after the next run

        self.scheduled_call_collection.update({'_id': schedule_id}, {'$set': schedule_updates}, safe=True)
Ejemplo n.º 6
0
def _associate_distribution(v1_distribution, v2_distribution, v2_ass_coll):

    # This functions just like errata associations so check _errata for comments
    # on this approach.

    new_associations = []
    for repo_id in v1_distribution['repoids']:
        new_association = {
            '_id': ObjectId(),
            'repo_id': repo_id,
            'unit_id': v2_distribution['_id'],
            'unit_type_id': 'distribution',
            'owner_type': DEFAULT_OWNER_TYPE,
            'owner_id': DEFAULT_OWNER_ID,
            'created': DEFAULT_CREATED,
            'updated': DEFAULT_UPDATED,
        }
        new_associations.append(new_association)

    if new_associations:
        v2_ass_coll.insert(new_associations, safe=True)
Ejemplo n.º 7
0
def _consumer_bindings(v2_database, v1_consumer):
    v2_coll = v2_database.consumer_bindings
    consumer_id = v1_consumer['id']
    repo_ids = v1_consumer['repoids']

    # Idempotency: Uniqueness is determined by the tuple of consumer and repo ID
    bound_repo_ids = [x['repo_id'] for x in v2_coll.find({'consumer_id' : consumer_id})]
    unbound_repo_ids = set(repo_ids) - set(bound_repo_ids)

    new_bindings = []
    for repo_id in unbound_repo_ids:
        binding = {
            '_id' : ObjectId(),
            'consumer_id' : consumer_id,
            'repo_id' : repo_id,
            'distributor_id' : YUM_DISTRIBUTOR_ID,
            'deleted' : False,
        }
        new_bindings.append(binding)

    if new_bindings:
        v2_coll.insert(new_bindings, safe=True)
Ejemplo n.º 8
0
def _consumer_groups(v1_database, v2_database):
    v1_coll = v1_database.consumergroups  # seriously, no underscore
    v2_coll = v2_database.consumer_groups

    # Idempotency: By consumer group ID
    v1_group_ids = [x['id'] for x in v1_coll.find({}, {'id' : 1})]
    v2_existing_ids = [x['id'] for x in v2_coll.find({}, {'id' : 1})]
    ids_to_add = set(v1_group_ids) - set(v2_existing_ids)

    for group_id in ids_to_add:
        v1_group = v1_coll.find_one({'id' : group_id})
        v2_group = {
            '_id' : ObjectId(),
            'id' : v1_group['id'],
            'display_name' : v1_group['id'],  # no display name in v1
            'description' : v1_group['description'],
            'consumer_ids' : v1_group['consumerids'],  # again, no underscores
            'notes' : v1_group['key_value_pairs'],
            'scratchpad' : None, # no idea why this is in the model, this doesn't make any sense
        }

        # Likely a small amount of groups, not worried about batching the inserts
        v2_coll.insert(v2_group, safe=True)
Ejemplo n.º 9
0
def _package_group_categories(v1_database, v2_database, report):

    # These act nearly identically to groups, so see the comments in there
    # for more information.

    # Idempotency: As with groups, pre-load the tuples of repo ID to category
    # ID into memory and use that to check for the category's existed before
    # inserting.

    v2_coll = v2_database.units_package_category
    v2_ass_coll = v2_database.repo_content_units

    # Tuple of repo ID and group ID
    already_added_tuples = [(x['repo_id'], x['id'])
                            for x in v2_coll.find({}, {
                                'repo_id': 1,
                                'id': 1
                            })]

    v1_repos = v1_database.repos.find({}, {
        'id': 1,
        'packagegroupcategories': 1
    })
    for v1_repo in v1_repos:

        for category_id in v1_repo.get('packagegroupcategories', {}).keys():

            # Idempotency check
            if (v1_repo['id'], category_id) in already_added_tuples:
                continue

            v1_category = v1_repo['packagegroupcategories'][category_id]
            category_id = str(uuid.uuid4())
            new_category = {
                '_id': category_id,
                '_storage_path': None,
                '_content_type_id': 'package_category',
                'description': v1_category['description'],
                'display_order': v1_category['display_order'],
                'id': v1_category['id'],
                'name': v1_category['name'],
                'packagegroupids': v1_category['packagegroupids'],
                'repo_id': v1_repo['id'],
                'translated_description':
                v1_category['translated_description'],
                'translated_name': v1_category['translated_name'],
            }
            v2_coll.insert(new_category, safe=True)

            new_association = {
                '_id': ObjectId(),
                'repo_id': v1_repo['id'],
                'unit_id': category_id,
                'unit_type_id': 'package_category',
                'owner_type': DEFAULT_OWNER_TYPE,
                'owner_id': DEFAULT_OWNER_ID,
                'created': DEFAULT_CREATED,
                'updated': DEFAULT_UPDATED,
            }
            v2_ass_coll.insert(new_association, safe=True)

    return True
Ejemplo n.º 10
0
def _package_groups(v1_database, v2_database, report):

    # In v2, the unique identifier for a package group is the
    # pairing of the group ID and the repository it's in. In v1, the package
    # group is embedded in the repo document itself, which is where we get
    # that information from.

    # In v1 the repository owns the relationship to a package group. Don't look
    # at the model class itself, it wasn't added there, but the code will still
    # stuff it in under the key "packagegroups". The value at that key is a dict
    # of group ID to a PackageGroup instance (v1 model).

    # Idempotency: The simplest way to handle this is to pre-load the set of
    # repo ID/group ID tuples into memory and verify each group found in each
    # v1 repo against that to determine if it has successfully been added or
    # not. The nice part about this over letting the uniqueness checks in mongo
    # itself is that we can batch the group inserts.

    v2_coll = v2_database.units_package_group
    v2_ass_coll = v2_database.repo_content_units

    # Tuple of repo ID and group ID
    already_added_tuples = [(x['repo_id'], x['id'])
                            for x in v2_coll.find({}, {
                                'repo_id': 1,
                                'id': 1
                            })]

    v1_repos = v1_database.repos.find({}, {'id': 1, 'packagegroups': 1})
    for v1_repo in v1_repos:

        for group_id in v1_repo.get('packagegroups', {}).keys():

            # Idempotency check
            if (v1_repo['id'], group_id) in already_added_tuples:
                continue

            v1_group = v1_repo['packagegroups'][group_id]
            v2_group_id = str(uuid.uuid4())
            new_group = {
                '_id': v2_group_id,
                '_storage_path': None,
                '_content_type_id': 'package_group',
                'conditional_package_names':
                v1_group['conditional_package_names'],
                'default': v1_group['default'],
                'default_package_names': v1_group['default_package_names'],
                'description': v1_group['description'],
                'display_order': v1_group['display_order'],
                'id': v1_group['id'],
                'langonly': v1_group['langonly'],
                'mandatory_package_names': v1_group['mandatory_package_names'],
                'name': v1_group['name'],
                'optional_package_names': v1_group['optional_package_names'],
                'repo_id': v1_repo['id'],
                'translated_description': v1_group['translated_description'],
                'translated_name': v1_group['translated_name'],
                'user_visible': v1_group['user_visible'],
            }
            v2_coll.insert(new_group, safe=True)

            new_association = {
                '_id': ObjectId(),
                'repo_id': v1_repo['id'],
                'unit_id': v2_group_id,
                'unit_type_id': 'package_group',
                'owner_type': DEFAULT_OWNER_TYPE,
                'owner_id': DEFAULT_OWNER_ID,
                'created': DEFAULT_CREATED,
                'updated': DEFAULT_UPDATED,
            }
            v2_ass_coll.insert(new_association, safe=True)

    return True
Ejemplo n.º 11
0
def _errata(v1_database, v2_database, report):

    v1_coll = v1_database.errata
    v2_coll = v2_database.units_erratum
    v2_ass_coll = v2_database.repo_content_units

    # Idempotency: We're lucky here, the uniqueness is just by ID, so we can
    # do a pre-fetch and determine what needs to be added.

    v2_errata_ids = [x['id'] for x in v2_coll.find({}, {'id': 1})]
    missing_v1_errata = v1_coll.find({'id': {'$nin': v2_errata_ids}})

    for v1_erratum in missing_v1_errata:
        erratum_id = str(uuid.uuid4())
        new_erratum = {
            '_id': erratum_id,
            '_storage_path': None,
            '_content_type_id': 'erratum',
            'description': v1_erratum['description'],
            'from_str': v1_erratum['from_str'],
            'id': v1_erratum['id'],
            'issued': v1_erratum['issued'],
            'pkglist': v1_erratum.get('pkglist', []),
            'pushcount': v1_erratum['pushcount'],
            'reboot_suggested': v1_erratum['reboot_suggested'],
            'references': v1_erratum['references'],
            'release': v1_erratum['release'],
            'rights': v1_erratum['rights'],
            'severity': v1_erratum['severity'],
            'solution': v1_erratum['solution'],
            'status': v1_erratum['status'],
            'summary': v1_erratum['summary'],
            'title': v1_erratum['title'],
            'type': v1_erratum['type'],
            'updated': v1_erratum['updated'],
            'version': v1_erratum['version'],
        }
        v2_coll.insert(new_erratum, safe=True)

        # Throughout most of the upgrade scripts, they can be cancelled and
        # resumed at any point and it will do the right thing. In this case,
        # it's a nightmare to cross-reference the v1 erratum against the v2
        # _id field. So adding the association here isn't 100% safe in the event
        # the user ctrl+c's the upgrade (which they shouldn't do anyway) but
        # it's close enough.
        new_associations = []
        for repo_id in v1_erratum['repoids']:
            new_association = {
                '_id': ObjectId(),
                'repo_id': repo_id,
                'unit_id': erratum_id,
                'unit_type_id': 'erratum',
                'owner_type': DEFAULT_OWNER_TYPE,
                'owner_id': DEFAULT_OWNER_ID,
                'created': DEFAULT_CREATED,
                'updated': DEFAULT_UPDATED,
            }
            new_associations.append(new_association)

        if new_associations:
            v2_ass_coll.insert(new_associations, safe=True)

    return True
Ejemplo n.º 12
0
Archivo: base.py Proyecto: beav/pulp
 def __init__(self):
     self._id = ObjectId()
     self.id = str(
         self._id)  # legacy behavior, would love to rid ourselves of this
Ejemplo n.º 13
0
def _repo_distributors(v1_database, v2_database, report):
    v1_coll = v1_database.repos
    v2_repo_coll = v2_database.repos
    v2_dist_coll = v2_database.repo_distributors

    # Only the yum distributor is added in this process. The export distributor
    # will be added to any repositories that do not have it as part of the
    # normal DB migration process.

    # Idempotency: Only one distributor is added per repo, so check for that
    v2_distributor_repo_ids = [x['repo_id'] for x in v2_dist_coll.find({}, {'repo_id' : 1})]
    spec = {
        '$and' : [
            {'id' : {'$nin' : v2_distributor_repo_ids}},
            {'content_types' : V1_YUM_REPO},
        ]
    }
    missing_v1_repos = v1_coll.find(spec)

    new_distributors = []
    for v1_repo in missing_v1_repos:

        # Sanity check that the repository was added to v2. This should never
        # happen, but we should account for it anyway.
        v2_repo = v2_repo_coll.find_one({'id' : v1_repo['id']})
        if v2_repo is None:
            report.error('Repository [%s] not found in the v2 database; '
                         'distributor addition being canceled' % v1_repo['id'])
            return False

        new_distributor = {
            '_id' : ObjectId(),
            'id' : YUM_DISTRIBUTOR_ID,
            'repo_id' : v1_repo['id'],
            'distributor_type_id' : YUM_DISTRIBUTOR_TYPE_ID,
            'auto_publish' : True,
            'scratchpad' : None,
            'last_publish' : v1_repo['last_sync'], # in v1 sync and publish are the same, so close enough
            'scheduled_publishes' : [],  # likely don't need to revisit, the sync/auto-publish will take care
        }

        config = {
            'relative_url' : v1_repo['relative_path'],
            'http' : False,
            'https' : True,
        }

        # Load values from the static server.conf file
        if not SKIP_SERVER_CONF:
            parser = SafeConfigParser()
            parser.read(V1_SERVER_CONF)

            if parser.has_option('security', 'ssl_ca_certificate'):
                # Read in the contents of the certificate and store in the
                # configuration in the DB.
                ca_filename = parser.get('security', 'ssl_ca_certificate')
                try:
                    # Not bothering with a existence check, let any problems
                    # trigger the warning in the except
                    f = open(ca_filename, 'r')
                    ca_cert_contents = f.read()
                    f.close()
                    config['https_ca'] = ca_cert_contents
                except:
                    report.warning('Could not read SSL CA certificate at [%s] for '
                                   'repository [%s]' % (ca_filename, v1_repo['id']))

        # Load the GPG keys from disk if present
        repo_key_dir = os.path.join(GPG_KEY_ROOT, v1_repo['relative_path'])
        if os.path.exists(repo_key_dir) and not SKIP_GPG_KEYS:
            key_filenames = os.listdir(repo_key_dir)
            if len(key_filenames) > 0:
                filename = os.path.join(repo_key_dir, key_filenames[0])

                try:
                    f = open(filename, 'r')
                    key_contents = f.read()
                    f.close()
                    config['gpgkey'] = key_contents
                except:
                    report.warning('Could not read GPG key at [%s] for '
                                   'repository [%s]' % (filename, v1_repo['id']))

        new_distributor['config'] = config
        new_distributors.append(new_distributor)

    if new_distributors:
        v2_dist_coll.insert(new_distributors, safe=True)

    return True
Ejemplo n.º 14
0
def _repo_importers(v1_database, v2_database, report):
    v1_coll = v1_database.repos
    v2_repo_coll = v2_database.repos
    v2_imp_coll = v2_database.repo_importers

    # Idempotency: There is a single importer per repo, so we can simply check
    # for an importer with the given repo ID
    v2_importer_repo_ids = [x['repo_id'] for x in v2_imp_coll.find({}, {'repo_id' : 1})]
    spec = {
        '$and' : [
            {'id' : {'$nin' : v2_importer_repo_ids}},
            {'content_types' : V1_YUM_REPO},
        ]
    }
    missing_v1_repos = v1_coll.find(spec)

    new_importers = []
    for v1_repo in missing_v1_repos:

        # Sanity check that the repository was added to v2. This should never
        # happen, but we should account for it anyway.
        v2_repo = v2_repo_coll.find_one({'id' : v1_repo['id']})
        if v2_repo is None:
            report.error('Repository [%s] not found in the v2 database; '
                         'importer addition being canceled' % v1_repo['id'])
            return False

        new_importer = {
            '_id' : ObjectId(),
            'id' : YUM_IMPORTER_ID,
            'repo_id' : v1_repo['id'],
            'importer_type_id' : YUM_IMPORTER_TYPE_ID,
            'scratchpad' : None,
            'last_sync' : v1_repo['last_sync'],
            'scheduled_syncs' : [],
        }

        # The configuration intentionally omits the importer configuration
        # values: num_threads, num_old_packages, remove_old, verify_checksum,
        #         verify_size, max_speed
        # Being omitted will cause the yum importer to use the default, which
        # is the desired behavior of the upgrade.

        # All are set below. To keep consistent with a fresh install, the fields aren't
        # defaulted to None but rather omitted entirely.
        config = {}

        if v1_repo['source']:  # will be None for a feedless repo
            config['feed_url'] = v1_repo['source']['url']

        # Load the certificate content into the database. It needs to be written to the
        # working directory as well, but that will be done in the filesystem scripts.
        if v1_repo['feed_ca']:
            if not os.path.exists(v1_repo['feed_ca']):
                continue

            f = open(v1_repo['feed_ca'], 'r')
            cert = f.read()
            f.close()

            config['ssl_ca_cert'] = cert

        if v1_repo['feed_cert']:
            if not os.path.exists(v1_repo['feed_cert']):
                continue

            f = open(v1_repo['feed_cert'], 'r')
            cert = f.read()
            f.close()

            config['ssl_client_cert'] = cert

        # Load values from the static server.conf file
        if not SKIP_SERVER_CONF:
            parser = SafeConfigParser()
            parser.read(V1_SERVER_CONF)

            for o in ('proxy_url', 'proxy_port', 'proxy_user', 'proxy_pass'):
                if parser.has_option('yum', o):
                    config[o] = parser.get('yum', o)

        new_importer['config'] = config
        new_importers.append(new_importer)

    if new_importers:
        v2_imp_coll.insert(new_importers, safe=True)

    return True
Ejemplo n.º 15
0
def _sync_schedules(v1_database, v2_database, report):
    v1_repo_collection = v1_database.repos
    v2_repo_importer_collection = v2_database.repo_importers
    v2_scheduled_call_collection = v2_database.scheduled_calls

    # ugly hack to find out which repos have already been scheduled
    # necessary because $size is not a meta-query and doesn't support $gt, etc
    repos_without_schedules = v2_repo_importer_collection.find(
        {'scheduled_syncs': {
            '$size': 0
        }}, fields=['repo_id'])

    repo_ids_without_schedules = [
        r['repo_id'] for r in repos_without_schedules
    ]

    repos_with_schedules = v2_repo_importer_collection.find(
        {'repo_id': {
            '$nin': repo_ids_without_schedules
        }}, fields=['repo_id'])

    repo_ids_with_schedules = [r['repo_id'] for r in repos_with_schedules]

    repos_to_schedule = v1_repo_collection.find(
        {
            'id': {
                '$nin': repo_ids_with_schedules
            },
            'sync_schedule': {
                '$ne': None
            }
        },
        fields=['id', 'sync_schedule', 'sync_options', 'last_sync'])

    for repo in repos_to_schedule:

        if repo['id'] not in repo_ids_without_schedules:
            report.error('Repository [%s] not found in the v2 database.'
                         'sync scheduling being canceled.' % repo['id'])
            return False

        args = [repo['id']]
        kwargs = {'overrides': {}}
        call_request = CallRequest(sync_with_auto_publish_itinerary,
                                   args,
                                   kwargs,
                                   principal=SystemUser())

        scheduled_call_document = {
            '_id': ObjectId(),
            'id': None,
            'serialized_call_request': None,
            'schedule': repo['sync_schedule'],
            'failure_threshold': None,
            'consecutive_failures': 0,
            'first_run': None,
            'last_run': None,
            'next_run': None,
            'remaining_runs': None,
            'enabled': True
        }

        scheduled_call_document['id'] = str(scheduled_call_document['_id'])

        schedule_tag = resource_tag(dispatch_constants.RESOURCE_SCHEDULE_TYPE,
                                    scheduled_call_document['id'])
        call_request.tags.append(schedule_tag)

        scheduled_call_document[
            'serialized_call_request'] = call_request.serialize()

        if isinstance(repo['sync_options'], dict):
            scheduled_call_document['failure_threshold'] = repo[
                'sync_options'].get('failure_threshold', None)

        interval, start, recurrences = dateutils.parse_iso8601_interval(
            scheduled_call_document['schedule'])

        scheduled_call_document['first_run'] = start or datetime.utcnow()
        scheduled_call_document['remaining_runs'] = recurrences
        scheduled_call_document['next_run'] = _calculate_next_run(
            scheduled_call_document)

        if repo['last_sync'] is not None:
            scheduled_call_document[
                'last_run'] = dateutils.to_naive_utc_datetime(
                    dateutils.parse_iso8601_datetime(repo['last_sync']))

        v2_scheduled_call_collection.insert(scheduled_call_document, safe=True)
        v2_repo_importer_collection.update(
            {'repo_id': repo['id']},
            {'$push': {
                'scheduled_syncs': scheduled_call_document['id']
            }},
            safe=True)

    return True
Ejemplo n.º 16
0
 def test_expired_object_id(self):
     expired_oid = self.reaper._create_expired_object_id(
         timedelta(seconds=1))
     self.assertTrue(isinstance(expired_oid, ObjectId))
     now_oid = ObjectId()
     self.assertTrue(now_oid > expired_oid)