示例#1
0
文件: database.py 项目: hgschmie/pulp
def _create_or_update_type(type_def):
    """
    This method creates or updates a type definition in MongoDB.

    :param type_def: the type definition to update or create. If a type definition with the same
                     as an existing type, the type is updated, otherwise it is created.
    :type  type_def: ContentType

    :return: This method will always return None
    :rtype:  None
    """
    # Make sure a collection exists for the type
    database = pulp_db.get_database()
    collection_name = unit_collection_name(type_def.id)

    if collection_name not in database.collection_names():
        pulp_db.get_collection(collection_name, create=True)

    # Add or update an entry in the types list
    content_type_collection = ContentType.get_collection()
    content_type = ContentType(
        type_def.id, type_def.display_name, type_def.description, type_def.unit_key,
        type_def.search_indexes, type_def.referenced_types)
    # no longer rely on _id = id
    existing_type = content_type_collection.find_one({'id': type_def.id}, fields=[])
    if existing_type is not None:
        content_type._id = existing_type['_id']
    # XXX this still causes a potential race condition when 2 users are updating the same type
    content_type_collection.save(content_type, safe=True)
示例#2
0
def migrate(*args, **kwargs):
    """
    For each repository with a yum distributor, clean up the old yum distributor's
    mess and re-publish the repository with the new distributor.
    """
    if not api._is_initialized():
        api.initialize()

    distributor_collection = get_collection('repo_distributors')
    yum_distributors = list(
        distributor_collection.find(
            {'distributor_type_id': YUM_DISTRIBUTOR_ID}))

    repo_collection = get_collection('repos')
    repo_ids = list(set(d['repo_id'] for d in yum_distributors))
    repos = dict((r['id'], r)
                 for r in repo_collection.find({'id': {
                     '$in': repo_ids
                 }}))

    for d in yum_distributors:
        repo = repos[d['repo_id']]
        config = d['config'] or {}

        if d['last_publish'] is None:
            continue

        _clear_working_dir(repo)
        _clear_old_publish_dirs(repo, config)
        _re_publish_repository(repo, d)

    _remove_legacy_publish_dirs()
示例#3
0
def _create_or_update_type(type_def):
    """
    This method creates or updates a type definition in MongoDB.

    :param type_def: the type definition to update or create. If a type definition with the same
                     as an existing type, the type is updated, otherwise it is created.
    :type  type_def: ContentType

    :return: This method will always return None
    :rtype:  None
    """
    # Make sure a collection exists for the type
    database = connection.get_database()
    collection_name = unit_collection_name(type_def.id)

    if collection_name not in database.collection_names():
        connection.get_collection(collection_name, create=True)

    # Add or update an entry in the types list
    content_type_collection = ContentType.get_collection()
    content_type = ContentType(type_def.id, type_def.display_name,
                               type_def.description, type_def.unit_key,
                               type_def.search_indexes,
                               type_def.referenced_types)
    # no longer rely on _id = id
    existing_type = content_type_collection.find_one({'id': type_def.id},
                                                     projection=[])
    if existing_type is not None:
        content_type._id = existing_type['_id']
    # XXX this still causes a potential race condition when 2 users are updating the same type
    content_type_collection.save(content_type)
def migrate(*args, **kwargs):
    """
    For each repository with a yum distributor, clean up the old yum distributor's
    mess and re-publish the repository with the new distributor.
    """

    distributor_collection = get_collection('repo_distributors')
    yum_distributors = list(distributor_collection.find({'distributor_type_id': YUM_DISTRIBUTOR_ID}))

    repo_collection = get_collection('repos')
    repo_ids = list(set(d['repo_id'] for d in yum_distributors))
    repos = dict((r['id'], r) for r in repo_collection.find({'id': {'$in': repo_ids}}))

    for d in yum_distributors:
        repo = repos[d['repo_id']]
        config = d['config'] or {}

        if d['last_publish'] is None:
            continue

        _clear_working_dir(repo)
        _clear_old_publish_dirs(repo, config)
        _re_publish_repository(repo, d)

    _remove_legacy_publish_dirs()
def migrate(*args, **kwargs):
    schedule_collection = connection.get_collection('scheduled_calls')
    importer_collection = connection.get_collection('repo_importers')
    distributor_collection = connection.get_collection('repo_distributors')

    map(functools.partial(convert_schedule, schedule_collection.save), schedule_collection.find())
    move_scheduled_syncs(importer_collection, schedule_collection)
    move_scheduled_publishes(distributor_collection, schedule_collection)
def migrate(*args, **kwargs):
    schedule_collection = connection.get_collection('scheduled_calls')
    importer_collection = connection.get_collection('repo_importers')
    distributor_collection = connection.get_collection('repo_distributors')

    map(functools.partial(convert_schedule, schedule_collection.save), schedule_collection.find())
    move_scheduled_syncs(importer_collection, schedule_collection)
    move_scheduled_publishes(distributor_collection, schedule_collection)
def repositories_with_yum_importers():
    repo_importer_collection = get_collection('repo_importers')
    repo_yum_importers = repo_importer_collection.find({'importer_type_id': _TYPE_YUM_IMPORTER},
                                                       fields=['repo_id'])
    yum_repo_ids = [i['repo_id'] for i in repo_yum_importers]
    repo_collection = get_collection('repos')
    yum_repos = repo_collection.find({'repo_id': {'$in': yum_repo_ids}},
                                     fields=['repo_id', 'scratchpad'])
    return list(yum_repos)
    def tearDown(self):
        super(Migration0004Tests, self).tearDown()

        # Delete any sample data added for the test
        types_db.clean()

        RepoContentUnit.get_collection().remove()
        get_collection('repo_importers').remove()
        model.Repository.drop_collection()
示例#9
0
    def tearDown(self):
        super(Migration0004Tests, self).tearDown()

        # Delete any sample data added for the test
        types_db.clean()

        RepoContentUnit.get_collection().remove()
        get_collection('repo_importers').remove()
        model.Repository.drop_collection()
def _migrate_rpmlike_units(unit_type):
    """
    This function performs the migration on RPMs, DRPMs, and SRPMs. These all have the same schema
    when it comes to checksumtype, so they can be treated the same way.

    :param unit_type:          The unit_type_id, as found in pulp_rpm.common.ids.
    :type  unit_type:          basestring
    """
    repos = connection.get_collection('repos')
    repo_content_units = connection.get_collection('repo_content_units')
    unit_collection = connection.get_collection('units_%s' % unit_type)

    for unit in unit_collection.find():
        try:
            sanitized_type = verification.sanitize_checksum_type(unit['checksumtype'])
            if sanitized_type != unit['checksumtype']:
                # Let's see if we can get away with changing its checksumtype to the sanitized
                # value. If this works, we won't have to do anything else.
                unit_collection.update({'_id': unit['_id']},
                                       {'$set': {'checksumtype': sanitized_type}})
        except errors.DuplicateKeyError:
            # Looks like there is already an identical unit with the sanitized checksum type. This
            # means we need to remove the current unit, but first we will need to change any
            # references to this unit to point to the other.
            conflicting_unit = unit_collection.find_one(
                {'name': unit['name'], 'epoch': unit['epoch'], 'version': unit['version'],
                 'release': unit['release'], 'arch': unit['arch'], 'checksum': unit['checksum'],
                 'checksumtype': sanitized_type})
            for rcu in repo_content_units.find({'unit_type_id': unit_type, 'unit_id': unit['_id']}):
                # Now we must either switch the rcu from pointing to unit to pointing to
                # conflicting_unit, or delete the rcu if there is already one in the same repo.
                try:
                    msg = _('Updating %(repo_id)s to contain %(type)s %(conflicting)s instead of '
                            '%(old_id)s.')
                    msg = msg % {'repo_id': rcu['repo_id'], 'type': unit_type,
                                 'conflicting': conflicting_unit['_id'], 'old_id': unit['_id']}
                    _logger.debug(msg)
                    repo_content_units.update({'_id': rcu['_id']},
                                              {'$set': {'unit_id': conflicting_unit['_id']}})
                except errors.DuplicateKeyError:
                    # We will delete this RepoContentUnit since the sha1 RPM is already in the
                    # repository.
                    msg = _('Removing %(type)s %(old_id)s from repo %(repo_id)s since it conflicts '
                            'with %(conflicting)s.')
                    msg = msg % {'repo_id': rcu['repo_id'], 'type': unit_type,
                                 'conflicting': conflicting_unit['_id'], 'old_id': unit['_id']}
                    _logger.debug(msg)
                    repo_content_units.remove({'_id': rcu['_id']})
                    # In this case, we now need to decrement the repository's "content_unit_counts"
                    # for this unit_type by one, since we removed a unit from a repository.
                    repos.update(
                        {'id': rcu['repo_id']},
                        {'$inc': {'content_unit_counts.%s' % unit_type: -1}})
            # Now that we have removed or altered all references to the "sha" Unit, we need to
            # remove it since it is a duplicate.
            unit_collection.remove({'_id': unit['_id']})
示例#11
0
    def create_user(login, password=None, name=None, roles=None):
        """
        Creates a new Pulp user and adds it to specified to roles.

        @param login: login name / unique identifier for the user
        @type  login: str

        @param password: password for login credentials
        @type  password: str

        @param name: user's full name
        @type  name: str

        @param roles: list of roles user will belong to
        @type  roles: list

        @raise DuplicateResource: if there is already a user with the requested login
        @raise InvalidValue: if any of the fields are unacceptable
        """

        existing_user = get_collection('users').find_one({'login': login})
        if existing_user is not None:
            raise DuplicateResource(login)

        invalid_values = []

        if login is None or _USER_LOGIN_REGEX.match(login) is None:
            invalid_values.append('login')
        if invalid_type(name, basestring):
            invalid_values.append('name')
        if invalid_type(roles, list):
            invalid_values.append('roles')

        if invalid_values:
            raise InvalidValue(invalid_values)

        # Use the login for name of the user if one was not specified
        name = name or login
        roles = roles or None

        # Creation
        create_me = model.User(login=login, name=name, roles=roles)
        create_me.set_password(password)
        create_me.save()

        # Grant permissions
        permission_manager = factory.permission_manager()
        permission_manager.grant_automatic_permissions_for_user(
            create_me.login)

        # Retrieve the user to return the SON object
        created = get_collection('users').find_one({'login': login})
        created.pop('password')

        return created
示例#12
0
    def create_user(login, password=None, name=None, roles=None):
        """
        Creates a new Pulp user and adds it to specified to roles.

        @param login: login name / unique identifier for the user
        @type  login: str

        @param password: password for login credentials
        @type  password: str

        @param name: user's full name
        @type  name: str

        @param roles: list of roles user will belong to
        @type  roles: list

        @raise DuplicateResource: if there is already a user with the requested login
        @raise InvalidValue: if any of the fields are unacceptable
        """

        existing_user = get_collection('users').find_one({'login': login})
        if existing_user is not None:
            raise DuplicateResource(login)

        invalid_values = []

        if login is None or _USER_LOGIN_REGEX.match(login) is None:
            invalid_values.append('login')
        if invalid_type(name, basestring):
            invalid_values.append('name')
        if invalid_type(roles, list):
            invalid_values.append('roles')

        if invalid_values:
            raise InvalidValue(invalid_values)

        # Use the login for name of the user if one was not specified
        name = name or login
        roles = roles or None

        # Creation
        create_me = model.User(login=login, name=name, roles=roles)
        create_me.set_password(password)
        create_me.save()

        # Grant permissions
        permission_manager = factory.permission_manager()
        permission_manager.grant_automatic_permissions_for_user(create_me.login)

        # Retrieve the user to return the SON object
        created = get_collection('users').find_one({'login': login})
        created.pop('password')

        return created
def repositories_with_yum_importers():
    repo_importer_collection = get_collection('repo_importers')
    repo_yum_importers = repo_importer_collection.find(
        {'importer_type_id': _TYPE_YUM_IMPORTER}, fields=['repo_id'])
    yum_repo_ids = [i['repo_id'] for i in repo_yum_importers]
    repo_collection = get_collection('repos')
    yum_repos = repo_collection.find({'repo_id': {
        '$in': yum_repo_ids
    }},
                                     fields=['repo_id', 'scratchpad'])
    return list(yum_repos)
def _remove_prestodelta_repo_units():
    """
    Remove all prestodelta repo_content_units since they should not have been created
    to begin with.
    """
    metadata_collection = get_collection('units_yum_repo_metadata_file')
    repo_units_collection = get_collection('repo_content_units')
    for presto_unit in metadata_collection.find({'data_type': 'prestodelta'}):
        # remove any repo repo units that reference it, the unit itself will
        # be removed by the orphan cleanup at some point in the future
        repo_units_collection.remove({'unit_id': presto_unit['_id']})
示例#15
0
def _remove_prestodelta_repo_units():
    """
    Remove all prestodelta repo_content_units since they should not have been created
    to begin with.
    """
    metadata_collection = get_collection('units_yum_repo_metadata_file')
    repo_units_collection = get_collection('repo_content_units')
    for presto_unit in metadata_collection.find({'data_type': 'prestodelta'}):
        # remove any repo repo units that reference it, the unit itself will
        # be removed by the orphan cleanup at some point in the future
        repo_units_collection.remove({'unit_id': presto_unit['_id']})
示例#16
0
    def test_with_db(self):
        REPO_ID = 'repo123'
        repo_collection = get_collection('repos')
        repo_collection.save({'id': REPO_ID, 'content_unit_count': 0})

        assoc_collection = RepoContentUnit.get_collection()
        assoc_collection.insert({
            'repo_id': REPO_ID,
            'unit_type_id': 'rpm',
            'unit_id': 'unit1'
        })
        assoc_collection.insert({
            'repo_id': REPO_ID,
            'unit_type_id': 'rpm',
            'unit_id': 'unit2'
        })

        self.module.migrate()

        repo = repo_collection.find({'id': REPO_ID})[0]

        self.assertTrue('content_unit_count' not in repo)
        self.assertEqual(repo['content_unit_counts'], {'rpm': 2})

        # cleanup
        repo_collection.remove({'id': REPO_ID})
        assoc_collection.remove({'repo_id': REPO_ID})
示例#17
0
def _update_indexes(type_def, unique):

    collection_name = unit_collection_name(type_def.id)
    collection = connection.get_collection(collection_name, create=False)

    if unique:
        index_list = [type_def.unit_key]  # treat the key as a compound key
    else:
        index_list = type_def.search_indexes

    if index_list is None:
        return

    for index in index_list:

        if isinstance(index, (list, tuple)):
            msg = 'Ensuring index [%s] (unique: %s) on type definition [%s]'
            msg = msg % (', '.join(index), unique, type_def.id)
            _logger.debug(msg)
            mongo_index = _create_index_keypair(index)
        else:
            msg = 'Ensuring index [%s] (unique: %s) on type definition [%s]'
            msg = msg % (index, unique, type_def.id)
            _logger.debug(msg)
            mongo_index = index

        index_name = collection.ensure_index(mongo_index, unique=unique)

        if index_name is not None:
            _logger.debug('Index [%s] created on type definition [%s]' %
                          (index_name, type_def.id))
        else:
            _logger.debug('Index already existed on type definition [%s]' %
                          type_def.id)
示例#18
0
 def __init__(self):
     """
     Call super with collection and fields.
     """
     collection = connection.get_collection('units_python_package')
     super(Package, self).__init__(collection, ('name', 'version'))
     self.fields.add('_filename')
 def __init__(self):
     """
     Call super with collection and fields.
     """
     key_fields = ("data_type", "repo_id")
     collection = connection.get_collection("units_yum_repo_metadata_file")
     super(YumMetadataFile, self).__init__(collection, key_fields)
示例#20
0
def migrate(*args, **kwargs):
    """
    For each repository with a yum distributor, clean up the old yum distributor's
    mess and re-publish the repository with the new distributor.
    """
    if not api._is_initialized():
        api.initialize()

    distributor_collection = get_collection('repo_distributors')
    yum_distributors = list(
        distributor_collection.find({'distributor_type_id': YUM_DISTRIBUTOR_ID}))

    repo_ids = list(set(d['repo_id'] for d in yum_distributors))
    repo_objs = model.Repository.objects(repo_id__in=repo_ids)
    repos = dict((repo_obj.repo_id, repo_obj.to_transfer_repo()) for repo_obj in repo_objs)

    for d in yum_distributors:
        repo = repos[d['repo_id']]
        config = d['config'] or {}

        if d.get('last_publish') is None:
            continue

        _clear_working_dir(repo)
        _clear_old_publish_dirs(repo, config)
        _re_publish_repository(repo, d)

    _remove_legacy_publish_dirs()
def _migrate_rpm_repositories():
    '''
    This migration takes care of adding export_distributor to all the old rpm repos
    with no export_distributor already associated to them. Since we have renamed iso_distributor
    to export_distributor, it also removes iso_distributor associated with an rpm repo.
    '''
    collection = get_collection('repo_distributors')
    for repo_distributor in collection.find():

        # Check only for rpm repos
        if repo_distributor['distributor_type_id'] == ids.TYPE_ID_DISTRIBUTOR_YUM:

            # Check if an export_distributor exists for the same repo
            if collection.find_one({'repo_id': repo_distributor['repo_id'],
                                    'distributor_type_id': ids.TYPE_ID_DISTRIBUTOR_EXPORT}) is None:
                # If not, create a new one with default config
                export_distributor = model.Distributor(
                    repo_id=repo_distributor['repo_id'],
                    distributor_id=ids.EXPORT_DISTRIBUTOR_ID,
                    distributor_type_id=ids.TYPE_ID_DISTRIBUTOR_EXPORT,
                    config=EXPORT_DISTRIBUTOR_CONFIG,
                    auto_publish=False)
                collection.save(export_distributor)

            # Remove iso_distributor associated with the repo
            iso_distributor = collection.find_one(
                {'repo_id': repo_distributor['repo_id'], 'distributor_type_id': 'iso_distributor'})
            if iso_distributor is not None:
                collection.remove(iso_distributor)
 def __init__(self):
     """
     Call super with collection and fields.
     """
     key_fields = ("name", "checksum", "size")
     collection = connection.get_collection("units_iso")
     super(ISO, self).__init__(collection, key_fields)
    def setUp(self):
        super(self.__class__, self).setUp()
        self.repo_importers = get_collection('repo_importers')

        importers = (
            {"repo_id": "proxy",
             "importer_type_id": "iso_importer", "last_sync": "2013-04-09T16:57:06-04:00",
             "scheduled_syncs": [], "scratchpad": None,
             "config": {
                 "proxy_user": "******",
                 "feed_url": "http://pkilambi.fedorapeople.org/test_file_repo/",
                 "proxy_url": "localhost", "proxy_password": "******", "proxy_port": 3128,
                 "id": "proxy"},
             "id": "iso_importer"},
            # This one has only the configs that were changed set
            {'repo_id': 'test', 'importer_type_id': 'iso_importer',
             'config': {
                 'feed_url': 'http://feed.com/isos', 'num_threads': 42,
                 'proxy_url': 'proxy.com', 'proxy_user': '******',
                 'remove_missing_units': False, 'validate_units': True},
             'id': 'iso_importer'},
            # This is here just to make sure we ignore it with our query, since this
            # migration should only alter ISOImporters
            {'repo_id': 'a_yum_repo', 'importer_type_id': 'yum_importer',
             'config': {'feed_url': 'This should not change.'}},
        )

        for importer in importers:
            self.repo_importers.save(importer)
示例#24
0
def migrate(*args, **kwargs):
    """
    Add last_updated and last_override_config to the importer collection.
    """
    updated_key = 'last_updated'
    config_key = 'last_override_config'
    collection = get_collection('repo_importers')

    for importer in collection.find():
        modified = False

        if config_key not in importer:
            importer[config_key] = {}
            modified = True

        # If the key doesn't exist, or does exist but has no value, set it based on the
        # last sync time, if possible. Otherwise, set it to now.
        if not importer.get(updated_key, None):
            try:
                importer[updated_key] = isodate.parse_datetime(importer['last_sync'])
            # The attribute doesn't exist, or parsing failed. It's safe to set a newer timestamp.
            except: # noqa: 722
                importer[updated_key] = datetime.datetime.now(tz=isodate.UTC)
            modified = True

        if modified:
            collection.save(importer)
def migrate(*args, **kwargs):
    """
    Migrate existing yum importers to use the new configuration key names.

    This migration has the consolidation of verify_checksum and verify_size into a single
    config value. For simplicity, the value for verify_checksum is used as the new setting
    and verify_size is discarded.

    The newest flag in the old config was redundant; the num_old_packages serves the
    same purpose. The newest flag is discarded.

    The purge_orphaned flag was a carry over from v1 and has no effect. It's documented in
    the old yum importer but I'm not sure it was actually used. This migration will attempt
    to delete it anyway just in case.
    """

    repo_importers = get_collection('repo_importers')

    rename_query = {'$rename': {
        'config.feed_url'         : 'config.feed',
        'config.ssl_verify'       : 'config.ssl_validation',
        'config.proxy_url'        : 'config.proxy_host',
        'config.proxy_user'       : 'config.proxy_username',
        'config.proxy_pass'       : 'config.proxy_password',
        'config.num_threads'      : 'config.max_downloads',
        'config.verify_checksum'  : 'config.validate', # see comment above
        'config.remove_old'       : 'config.remove_missing',
        'config.num_old_packages' : 'config.retain_old_count',
    }}
    repo_importers.update({'importer_type_id': 'yum_importer'}, rename_query, safe=True, multi=True)

    remove_query = {'$unset' : {'config.newest' : 1,
                                'config.verify_size' : 1,
                                'config.purge_orphaned' : 1}}
    repo_importers.update({'importer_type_id': 'yum_importer'}, remove_query, safe=True, multi=True)
def migrate(*args, **kwargs):
    """
    Migrate existing errata to have the key "from" instead of "from_str"
    """
    errata_collection = get_collection('units_erratum')
    rename_query = {'$rename': {'from_str': 'from'}}
    errata_collection.update({}, rename_query, multi=True)
示例#27
0
def _update_indexes(type_def, unique):

    collection_name = unit_collection_name(type_def.id)
    collection = connection.get_collection(collection_name, create=False)

    if unique:
        index_list = [type_def.unit_key]  # treat the key as a compound key
    else:
        index_list = type_def.search_indexes

    if index_list is None:
        return

    for index in index_list:

        if isinstance(index, (list, tuple)):
            msg = "Ensuring index [%s] (unique: %s) on type definition [%s]"
            msg = msg % (", ".join(index), unique, type_def.id)
            _logger.debug(msg)
            mongo_index = _create_index_keypair(index)
        else:
            msg = "Ensuring index [%s] (unique: %s) on type definition [%s]"
            msg = msg % (index, unique, type_def.id)
            _logger.debug(msg)
            mongo_index = index

        index_name = collection.ensure_index(mongo_index, unique=unique)

        if index_name is not None:
            _logger.debug("Index [%s] created on type definition [%s]" % (index_name, type_def.id))
        else:
            _logger.debug("Index already existed on type definition [%s]" % type_def.id)
示例#28
0
    def test_update_unit_key_multiple_fields(self):
        """
        Tests that a multiple field unit key is built as a single, compound index
        """

        # Setup
        unit_key = ['compound_1', 'compound_2']
        type_def = TypeDefinition('rpm', 'RPM', 'RPM Packages', unit_key, None,
                                  [])

        # Test
        types_db._update_unit_key(type_def)

        # Verify
        collection_name = types_db.unit_collection_name(type_def.id)
        collection = pulp_db.get_collection(collection_name)

        index_dict = collection.index_information()

        self.assertEqual(2, len(index_dict))  # default (_id) + unit key

        index = index_dict['compound_1_1_compound_2_1']
        self.assertTrue(index['unique'])

        keys = index['key']
        self.assertEqual(2, len(keys))
        self.assertEqual('compound_1', keys[0][0])
        self.assertEqual(types_db.ASCENDING, keys[0][1])
        self.assertEqual('compound_2', keys[1][0])
        self.assertEqual(types_db.ASCENDING, keys[1][1])
示例#29
0
    def test_drop_indexes(self):
        """
        Tests updating indexes on an existing collection with different indexes correctly changes them.
        """

        # Setup
        old_key = ['compound_1', 'compound_2']

        type_def = TypeDefinition('rpm', 'RPM', 'RPM Packages', old_key, None,
                                  [])
        types_db._update_unit_key(type_def)

        # Test
        new_key = ['new_1']
        type_def.unit_key = new_key

        types_db._drop_indexes(type_def)
        types_db._update_unit_key(type_def)

        # Verify
        collection_name = types_db.unit_collection_name(type_def.id)
        collection = pulp_db.get_collection(collection_name)

        index_dict = collection.index_information()

        self.assertEqual(2, len(index_dict))  # default (_id) + new one
示例#30
0
    def test_drop_indexes(self):
        """
        Tests updating indexes on an existing collection with different indexes correctly changes them.
        """

        # Setup
        old_key = ['compound_1', 'compound_2']

        type_def = TypeDefinition('rpm', 'RPM', 'RPM Packages', old_key, None, [])
        types_db._update_unit_key(type_def)

        # Test
        new_key = ['new_1']
        type_def.unit_key = new_key

        types_db._drop_indexes(type_def)
        types_db._update_unit_key(type_def)

        # Verify
        collection_name = types_db.unit_collection_name(type_def.id)
        collection = pulp_db.get_collection(collection_name)

        index_dict = collection.index_information()

        self.assertEqual(2, len(index_dict)) # default (_id) + new one
示例#31
0
    def test_update_unit_key_multiple_fields(self):
        """
        Tests that a multiple field unit key is built as a single, compound index
        """

        # Setup
        unit_key = ['compound_1', 'compound_2']
        type_def = TypeDefinition('rpm', 'RPM', 'RPM Packages', unit_key, None, [])

        # Test
        types_db._update_unit_key(type_def)

        # Verify
        collection_name = types_db.unit_collection_name(type_def.id)
        collection = pulp_db.get_collection(collection_name)

        index_dict = collection.index_information()

        self.assertEqual(2, len(index_dict)) # default (_id) + unit key

        index = index_dict['compound_1_1_compound_2_1']
        self.assertTrue(index['unique'])

        keys = index['key']
        self.assertEqual(2, len(keys))
        self.assertEqual('compound_1', keys[0][0])
        self.assertEqual(types_db.ASCENDING, keys[0][1])
        self.assertEqual('compound_2', keys[1][0])
        self.assertEqual(types_db.ASCENDING, keys[1][1])
def _migrate_task_status():
    """
    Find all task_status documents in an incomplete state and set the state to canceled.
    """
    task_status = connection.get_collection('task_status')
    task_status.update({'state': {'$in': CALL_INCOMPLETE_STATES}},
                       {'$set': {'state': CALL_CANCELED_STATE }}, multi=True)
示例#33
0
def migrate(*args, **kwargs):
    """
    Add last_updated and last_override_config to the importer collection.
    """
    updated_key = 'last_updated'
    config_key = 'last_override_config'
    collection = get_collection('repo_importers')

    for importer in collection.find():
        modified = False

        if config_key not in importer:
            importer[config_key] = {}
            modified = True

        # If the key doesn't exist, or does exist but has no value, set it based on the
        # last sync time, if possible. Otherwise, set it to now.
        if not importer.get(updated_key, None):
            try:
                importer[updated_key] = isodate.parse_datetime(importer['last_sync'])
            # The attribute doesn't exist, or parsing failed. It's safe to set a newer timestamp.
            except:  # noqa: 722
                importer[updated_key] = datetime.datetime.now(tz=isodate.UTC)
            modified = True

        if modified:
            collection.save(importer)
示例#34
0
    def test_update_unit_key_single_field(self):
        """
        Tests a single field unit key is handled correctly.
        """

        # Setup
        unit_key = 'individual_1',
        type_def = TypeDefinition('rpm', 'RPM', 'RPM Packages', unit_key, None, [])

        # Test
        types_db._update_unit_key(type_def)

        # Verify
        collection_name = types_db.unit_collection_name(type_def.id)
        collection = pulp_db.get_collection(collection_name)

        index_dict = collection.index_information()

        self.assertEqual(2, len(index_dict)) # default (_id) + unit key

        index = index_dict['individual_1_1']
        self.assertTrue(index['unique'])

        keys = index['key']
        self.assertEqual(1, len(keys))
        self.assertEqual('individual_1', keys[0][0])
        self.assertEqual(types_db.ASCENDING, keys[0][1])
示例#35
0
def _update_indexes(type_def, unique):

    collection_name = unit_collection_name(type_def.id)
    collection = pulp_db.get_collection(collection_name, create=False)

    if unique:
        index_list = [type_def.unit_key] # treat the key as a compound key
    else:
        index_list = type_def.search_indexes

    if index_list is None:
        return

    for index in index_list:

        if isinstance(index, (list, tuple)):
            LOG.debug('Ensuring index [%s] (unique: %s) on type definition [%s]' % (', '.join(index), unique, type_def.id))
            mongo_index = _create_index_keypair(index)
        else:
            LOG.debug('Ensuring index [%s] (unique: %s) on type definition [%s]' % (index, unique, type_def.id))
            mongo_index = index

        index_name = collection.ensure_index(mongo_index, unique=unique, drop_dups=False)

        if index_name is not None:
            LOG.debug('Index [%s] created on type definition [%s]' % (index_name, type_def.id))
        else:
            LOG.debug('Index already existed on type definition [%s]' % type_def.id)
    def setUp(self):
        super(self.__class__, self).setUp()
        self.repo_importers = get_collection('repo_importers')

        importers = (
            {"repo_id": "proxy",
             "importer_type_id": "iso_importer", "last_sync": "2013-04-09T16:57:06-04:00",
             "scheduled_syncs": [], "scratchpad": None,
             "config": {
                "proxy_user": "******",
                "feed_url": "http://pkilambi.fedorapeople.org/test_file_repo/",
                "proxy_url": "localhost", "proxy_password": "******", "proxy_port": 3128,
                "id": "proxy" },
             "id": "iso_importer"},
            # This one has only the configs that were changed set
            {'repo_id': 'test', 'importer_type_id': 'iso_importer',
             'config': {
                'feed_url': 'http://feed.com/isos', 'num_threads': 42,
                'proxy_url': 'proxy.com', 'proxy_user': '******',
                'remove_missing_units': False, 'validate_units': True},
             'id': 'iso_importer'},
            # This is here just to make sure we ignore it with our query, since this
            # migration should only alter ISOImporters
            {'repo_id': 'a_yum_repo', 'importer_type_id': 'yum_importer',
             'config': {'feed_url': 'This should not change.'}},
        )

        for importer in importers:
            self.repo_importers.save(importer, safe=True)
 def __init__(self):
     """
     Call super with collection and fields.
     """
     collection = connection.get_collection('units_python_package')
     super(Package, self).__init__(collection, ('name', 'version'))
     self.fields.add('_filename')
示例#38
0
    def test_update_unit_key_single_field(self):
        """
        Tests a single field unit key is handled correctly.
        """

        # Setup
        unit_key = 'individual_1',
        type_def = TypeDefinition('rpm', 'RPM', 'RPM Packages', unit_key, None,
                                  [])

        # Test
        types_db._update_unit_key(type_def)

        # Verify
        collection_name = types_db.unit_collection_name(type_def.id)
        collection = pulp_db.get_collection(collection_name)

        index_dict = collection.index_information()

        self.assertEqual(2, len(index_dict))  # default (_id) + unit key

        index = index_dict['individual_1_1']
        self.assertTrue(index['unique'])

        keys = index['key']
        self.assertEqual(1, len(keys))
        self.assertEqual('individual_1', keys[0][0])
        self.assertEqual(types_db.ASCENDING, keys[0][1])
 def __init__(self):
     """
     Call super with collection and fields.
     """
     key_fields = ('data_type', 'repo_id')
     collection = connection.get_collection('units_yum_repo_metadata_file')
     super(YumMetadataFile, self).__init__(collection, key_fields)
 def __init__(self):
     """
     Call super with collection and fields.
     """
     key_fields = ('name', 'checksum', 'size')
     collection = connection.get_collection('units_iso')
     super(ISO, self).__init__(collection, key_fields)
 def __init__(self):
     """
     Call super with collection and fields.
     """
     key_fields = ("distribution_id", "family", "variant", "version", "arch")
     collection = connection.get_collection("units_distribution")
     super(DistributionPlan, self).__init__(collection, key_fields, join_leaf=False)
     self.fields.add("files")
示例#42
0
 def setUp(self):
     self.clean()
     super(TestMigration_0005, self).setUp()
     for collection in [
             connection.get_collection(n, True) for n in TEST_COLLECTIONS
     ]:
         for unit in TEST_UNITS:
             collection.save(unit, safe=True)
def remove_repodata_from_scratchpad(repo_id):
    repo_collection = get_collection('repos')
    repo = repo_collection.find_one({'repo_id': repo_id},
                                    fields=['scratchpad'])
    repo['scratchpad'].pop('repodata', None)
    repo_collection.update({'repo_id': repo_id},
                           {'$set': {
                               'scratchpad': repo['scratchpad']
                           }})
def srpm_plan():
    """
    Factory to create an SRPM migration plan.

    :return: A configured plan.
    :rtype: Plan
    """
    collection = connection.get_collection('units_srpm')
    return package_plan(collection)
def srpm_plan():
    """
    Factory to create an SRPM migration plan.

    :return: A configured plan.
    :rtype: Plan
    """
    collection = connection.get_collection('units_srpm')
    return package_plan(collection)
示例#46
0
文件: database.py 项目: omps/pulp
def _create_or_update_type(type_def):

    # Make sure a collection exists for the type
    database = pulp_db.get_database()
    collection_name = unit_collection_name(type_def.id)

    if collection_name not in database.collection_names():
        pulp_db.get_collection(collection_name, create=True)

    # Add or update an entry in the types list
    content_type_collection = ContentType.get_collection()
    content_type = ContentType(type_def.id, type_def.display_name, type_def.description,
                               type_def.unit_key, type_def.search_indexes, type_def.referenced_types)
    # no longer rely on _id = id
    existing_type = content_type_collection.find_one({'id': type_def.id}, fields=[])
    if existing_type is not None:
        content_type._id = existing_type['_id']
    # XXX this still causes a potential race condition when 2 users are updating the same type
    content_type_collection.save(content_type, safe=True)
示例#47
0
def migrate(*args, **kwargs):
    """
    Regenerates the 'content_unit_counts' attribute of each repository, and
    removes the obsolete attribute 'content_unit_count'. The normal use case
    will be that the 'content_unit_counts' attribute does not yet exist, but
    this migration is idempotent just in case.
    """
    managers.RepoManager().rebuild_content_unit_counts()
    repo_collection = connection.get_collection('repos')
    repo_collection.update({}, {'$unset': {'content_unit_count': 1}})
    def setUp(self):
        super(BaseMigrationTests, self).setUp()

        self.distributors_collection = get_collection('repo_distributors')

        self.root_test_dir = tempfile.mkdtemp(prefix='test_0016_migration_')
        self.http_publish_dir = os.path.join(self.root_test_dir, 'http', 'repos')
        self.https_publish_dir = os.path.join(self.root_test_dir, 'https', 'repos')

        self.migration_module = _import_all_the_way(MIGRATION_MODULE)
def image_plan():
    """
    Factory to create an image migration plan.

    :return: A configured plan.
    :rtype: Plan
    """
    key_fields = ('image_id',)
    collection = connection.get_collection('units_docker_image')
    return Plan(collection, key_fields, join_leaf=False)
def drpm_plan():
    """
    Factory to create an DRPM migration plan.

    :return: A configured plan.
    :rtype: Plan
    """
    key_fields = ("epoch", "version", "release", "filename", "checksumtype", "checksum")
    collection = connection.get_collection("units_drpm")
    return Plan(collection, key_fields)
示例#51
0
def migrate(*args, **kwargs):
    """
    Make sure last_updated field is set for every distributor.
    """
    updated_key = 'last_updated'
    collection = get_collection('repo_distributors')
    for distributor in collection.find():
        if distributor.get(updated_key) is None:
            distributor[updated_key] = dateutils.now_utc_datetime_with_tzinfo()
            collection.save(distributor)
def manifest_plan():
    """
    Factory to create a manifest migration plan.

    :return: A configured plan.
    :rtype: Plan
    """
    key_fields = ('digest', )
    collection = connection.get_collection('units_docker_manifest')
    return Plan(collection, key_fields)
示例#53
0
def iso_plan():
    """
    Factory to create an ISO migration plan.

    :return: A configured plan.
    :rtype: Plan
    """
    key_fields = ('name', 'checksum', 'size')
    collection = connection.get_collection('units_iso')
    return Plan(collection, key_fields)
示例#54
0
def distribution_plan():
    """
    Factory to create an Distribution migration plan.

    :return: A configured plan.
    :rtype: Plan
    """
    key_fields = ('distribution_id', 'family', 'variant', 'version', 'arch')
    collection = connection.get_collection('units_distribution')
    return Plan(collection, key_fields, join_leaf=False)
示例#55
0
def yum_metadata_plan():
    """
    Factory to create an YUM metadata migration plan.

    :return: A configured plan.
    :rtype: Plan
    """
    key_fields = ('data_type', 'repo_id')
    collection = connection.get_collection('units_yum_repo_metadata_file')
    return Plan(collection, key_fields)