示例#1
0
def _repo_distributors(v1_database, v2_database, report):
    v1_coll = v1_database.repos
    v2_repo_coll = v2_database.repos
    v2_dist_coll = v2_database.repo_distributors

    # Idempotency: Only one distributor is added per repo, so check for that
    v2_distributor_repo_ids = [
        x['repo_id'] for x in v2_dist_coll.find({}, {'repo_id': 1})
    ]
    spec = {
        '$and': [
            {
                'id': {
                    '$nin': v2_distributor_repo_ids
                }
            },
            {
                'content_types': V1_ISO_REPO
            },
        ]
    }
    missing_v1_repos = v1_coll.find(spec)

    new_distributors = []
    for v1_repo in missing_v1_repos:

        # Sanity check that the repository was added to v2. This should never
        # happen, but we should account for it anyway.
        v2_repo = v2_repo_coll.find_one({'id': v1_repo['id']})
        if v2_repo is None:
            report.error('Repository [%s] not found in the v2 database; '
                         'distributor addition being canceled' % v1_repo['id'])
            return False

        new_distributor = {
            '_id': ObjectId(),
            'id': ISO_DISTRIBUTOR_ID,
            'repo_id': v1_repo['id'],
            'distributor_type_id': ISO_DISTRIBUTOR_TYPE_ID,
            'auto_publish': True,
            'scratchpad': None,
            'last_publish': v1_repo[
                'last_sync'],  # in v1 sync and publish are the same, so close enough
            'scheduled_publishes':
            [],  # scheduling a publish doesn't exist in v1, leave this empty
        }

        config = {
            'relative_url': v1_repo['relative_path'],
            'http': False,
            'https': True,
        }

        new_distributor['config'] = config
        new_distributors.append(new_distributor)

    if new_distributors:
        v2_dist_coll.insert(new_distributors, safe=True)

    return True
示例#2
0
文件: crud.py 项目: taftsanders/pulp
    def get(self, event_listener_id):
        """
        Retrieves the given event listener if it exists. If not, an exception
        is raised.

        @param event_listener_id: listener to retrieve
        @type  event_listener_id: str

        @return: listener instance from the database
        @rtype:  dict

        @raise MissingResource: if no listener exists at the given ID
        """
        collection = EventListener.get_collection()

        try:
            id = ObjectId(event_listener_id)
        except InvalidId:
            raise MissingResource(
                event_listener=event_listener_id), None, sys.exc_info()[2]

        listener = collection.find_one({'_id': id})

        if listener is None:
            raise MissingResource(event_listener=event_listener_id)
        else:
            return listener
示例#3
0
def _consumers(v1_database, v2_database):
    v1_coll = v1_database.consumers
    v2_coll = v2_database.consumers

    # Idempotency: consumer_id is unique, so only process consumers whose ID is
    # not in v2 already
    v2_ids = [x['id'] for x in v2_coll.find({}, {'id' : 1})]
    missing_v1_consumers = v1_coll.find({'id' : {'$nin' : v2_ids}})

    for v1_consumer in missing_v1_consumers:
        v2_consumer = {
            '_id' : ObjectId(),
            'id' : v1_consumer['id'],
            'display_name' : v1_consumer['id'],
            'description' : v1_consumer['description'],
            'notes' : v1_consumer['key_value_pairs'],
            'capabilities' : v1_consumer['capabilities'],
            'certificate' : v1_consumer['certificate'],
        }
        v2_coll.insert(v2_consumer)

        # Ideally, this should be atomic with the consumer. That's also horribly
        # complicated to attempt to honor. So while there's a small chance the
        # following call could be interrupted and the bindings for this consumer
        # lost, the chance is low enough that I'm willing to take it.
        # jdob, Oct 23, 2012

        _consumer_bindings(v2_database, v1_consumer)

        # This suffers from the same atomic issue, but is even less risky since
        # the consumer will resend this eventually anyway, so if it gets lost
        # it will be replaced.
        _unit_profile(v2_database, v1_consumer)
示例#4
0
文件: users.py 项目: tomlanyon/pulp
def _roles(v1_database, v2_database):
    v1_roles_coll = v1_database.roles
    v2_roles_coll = v2_database.roles

    all_v1_roles = list(v1_roles_coll.find())
    v2_roles_to_add = []

    for v1_role in all_v1_roles:

        # Idempotency: If there's already a v2 role whose id is the name in v1,
        # don't re-add it.

        existing = v2_roles_coll.find_one({'id': v1_role['name']})
        if existing is not None:
            continue

        # Skip adding default consumer-users role from v1 database to v2
        # since it is no longer required in v2

        if v1_role['name'] == 'consumer-users':
            continue

        v2_role = {
            '_id': ObjectId(),
            'id': v1_role['name'],
            'display_name': v1_role['name'],
            'description': None,
            'permissions': v1_role['permissions'],
        }
        v2_roles_to_add.append(v2_role)

    if v2_roles_to_add:
        v2_roles_coll.insert(v2_roles_to_add, safe=True)
示例#5
0
文件: users.py 项目: tomlanyon/pulp
def _users(v1_database, v2_database):
    v1_coll = v1_database.users
    v2_coll = v2_database.users

    # Idempotency: Check already upgraded users by login
    v2_logins = [x['login'] for x in list(v2_coll.find({}, {'login': 1}))]
    missing_v1_users = list(v1_coll.find({'login': {'$nin': v2_logins}}))

    v2_users_to_add = []
    v1_consumer_user_logins = []

    for v1_user in missing_v1_users:
        if 'consumer-users' in v1_user['roles']:
            v1_consumer_user_logins.append(v1_user['login'])
            continue

        v2_user = {
            '_id': ObjectId(),
            'id': v1_user['id'],
            'login': v1_user['login'],
            'password': v1_user['password'],
            'name': v1_user['name'] or v1_user['login'],
            'roles': v1_user['roles'],
        }
        v2_users_to_add.append(v2_user)

    if v2_users_to_add:
        v2_coll.insert(v2_users_to_add, safe=True)

    return v1_consumer_user_logins
示例#6
0
def _associate_package(v1_database, v2_database, v1_id, v2_id, unit_type):

    v1_coll = v1_database.repos
    v2_coll = v2_database.repo_content_units

    # Idempotency: Easiest to let mongo handle it on insert

    repos_with_package = v1_coll.find({'packages': v1_id}, {'id': 1})

    for repo in repos_with_package:
        new_association = {
            '_id': ObjectId(),
            'repo_id': repo['id'],
            'unit_id': v2_id,
            'unit_type_id': unit_type,
            'owner_type': DEFAULT_OWNER_TYPE,
            'owner_id': DEFAULT_OWNER_ID,
            'created': DEFAULT_CREATED,
            'updated': DEFAULT_UPDATED,
        }
        try:
            v2_coll.insert(new_association, safe=True)
        except DuplicateKeyError:
            # Still hate this model, still the simplest
            pass
示例#7
0
文件: users.py 项目: tomlanyon/pulp
def _permissions(v1_database, v2_database, v1_consumer_user_logins):
    v1_coll = v1_database.permissions
    v2_coll = v2_database.permissions

    # Idempotency: The resource can be used as a uniqueness check
    v2_resources = [
        x['resource'] for x in list(v2_coll.find({}, {'resource': 1}))
    ]
    missing_v1_permissions = list(
        v1_coll.find({'resource': {
            '$nin': v2_resources
        }}))

    v2_permissions_to_add = []

    for v1_permission in missing_v1_permissions:
        # Before migrating v1 permissions remove v1 consumer users from each of the permissions
        if v1_consumer_user_logins:
            for user, permissions in v1_permission['users'].items():
                if user in v1_consumer_user_logins:
                    del v1_permission['users'][user]

        v2_permission = {
            '_id': ObjectId(),
            'resource': v1_permission['resource'],
            'users': v1_permission['users'],
        }
        v2_permissions_to_add.append(v2_permission)

    if v2_permissions_to_add:
        v2_coll.insert(v2_permissions_to_add, safe=True)
示例#8
0
文件: crud.py 项目: taftsanders/pulp
    def update(self,
               event_listener_id,
               notifier_config=None,
               event_types=None):
        """
        Changes the configuration of an existing event listener. The notifier
        type cannot be changed; in such cases the event listener should be
        deleted and a new one created.

        If specified, the notifier_config follows the given conventions:
        - If a key is specified with a value of None, the effect is that the
          key is removed from the configuration
        - If an existing key is unspecified, its value is unaffected

        Event types must be the *complete* list of event types to listen for.
        This method does not support deltas on the event types.

        @param event_listener_id: listener being edited
        @type  event_listener_id: str

        @param notifier_config: contains only configuration properties to change
        @type  notifier_config: dict

        @param event_types: complete list of event types that should be fired on
        @type  event_types: list

        @return: updated listener instance from the database
        """
        collection = EventListener.get_collection()

        # Validation
        existing = self.get(event_listener_id)  # will raise MissingResource

        # Munge the existing configuration if it was specified
        if notifier_config is not None:
            munged_config = dict(existing['notifier_config'])

            remove_us = [
                k for k in notifier_config.keys() if notifier_config[k] is None
            ]
            for k in remove_us:
                munged_config.pop(k, None)
                notifier_config.pop(k)

            munged_config.update(notifier_config)
            existing['notifier_config'] = munged_config

        # Update the event list
        if event_types is not None:
            _validate_event_types(event_types)
            existing['event_types'] = event_types

        # Update the database
        collection.save(existing)

        # Reload to return
        existing = collection.find_one({'_id': ObjectId(event_listener_id)})
        return existing
示例#9
0
def _isos(v1_database, v2_database, report):

    v1_repo_coll = v1_database.repos
    v2_ass_coll = v2_database.repo_content_units
    v2_iso_coll = v2_database.units_iso

    # Idempotency: I still dislike this as a strategy, but the easiest approach
    # is to attempt the insert and let the uniqueness check kick out anything
    # that's already been added.

    v1_files = v1_database.file.find()
    for v1_file in v1_files:
        new_iso_id = str(uuid.uuid4())

        v2_iso = {
            '_id': new_iso_id,
            '_content_type_id': 'iso',
            'name': v1_file['filename'],
            'size': v1_file['size'],
        }

        # Checksum is stored as a dict from type to checksum, but in v1 we
        # only ever used sha256. The model has been flattened in 2.0 to just
        # store the checksum itself.
        v2_iso['checksum'] = v1_file['checksum'].values()[0]

        try:
            v2_iso_coll.insert(v2_iso, safe=True)
        except DuplicateKeyError:
            # Still try to do the association in the event the unit already
            # existed, so find the existing unit for its ID.
            spec = dict([(k, v2_iso[k]) for k in v2_iso
                         if k in ('name', 'checksum', 'size')])
            existing = v2_iso_coll.find_one(spec)
            new_iso_id = existing['_id']

        repos_with_iso = v1_repo_coll.find({'files': v1_file['_id']},
                                           {'id': 1})
        for v1_repo in repos_with_iso:
            new_association = {
                '_id': ObjectId(),
                'repo_id': v1_repo['id'],
                'unit_id': new_iso_id,
                'unit_type_id': 'iso',
                'owner_type': DEFAULT_OWNER_TYPE,
                'owner_id': DEFAULT_OWNER_ID,
                'created': DEFAULT_CREATED,
                'updated': DEFAULT_UPDATED,
            }
            try:
                v2_ass_coll.insert(new_association, safe=True)
            except DuplicateKeyError:
                # Still hate this model, still the simplest
                pass

    return True
示例#10
0
    def test_delete_schedule(self):
        sync_options = {'override_config': {}}
        schedule_data = {'schedule': 'R1/P1DT'}
        schedule_id = self.schedule_manager.create_sync_schedule(self.repo_id,
                                                                 self.importer_type_id,
                                                                 sync_options,
                                                                 schedule_data)
        collection = ScheduledCall.get_collection()
        schedule = collection.find_one(ObjectId(schedule_id))
        self.assertFalse(schedule is None)

        self.schedule_manager.delete_sync_schedule(self.repo_id,
                                                   self.importer_type_id,
                                                   schedule_id)
        schedule = collection.find_one(ObjectId(schedule_id))
        self.assertTrue(schedule is None)

        schedule_list = self._importer_manager.list_sync_schedules(self.repo_id)
        self.assertFalse(schedule_id in schedule_list)
示例#11
0
def _repo_groups(v1_database, v2_database, report):
    v1_coll = v1_database.repos
    v2_coll = v2_database.repo_groups

    # Idempotency: Two-fold. All group IDs will be collected and groups created
    # from those IDs, using the ID to determine if it already exists. The second
    # is the addition of repo IDs to the group, which will be handled by mongo.

    # I should probably use a map reduce here, but frankly this is simpler and
    # I'm not terribly worried about either the mongo performance or memory
    # consumption from the approach below.
    repo_and_group_ids = [(x['id'], x['groupid'])
                          for x in v1_coll.find({}, {
                              'id': 1,
                              'groupid': 1,
                              'content_types': 1
                          }) if x.has_key('groupid')]
    repo_ids_by_group = {}
    for repo_id, group_id_list in repo_and_group_ids:

        # Yes, "groupid" in the repo is actually a list. Ugh.
        for group_id in group_id_list:
            l = repo_ids_by_group.setdefault(group_id, [])
            l.append(repo_id)

    v1_group_ids = repo_ids_by_group.keys()
    existing_v2_group_ids = v2_coll.find({'id': {'$nin': v1_group_ids}})

    missing_group_ids = set(v1_group_ids) - set(existing_v2_group_ids)

    new_groups = []
    for group_id in missing_group_ids:
        new_group = {
            '_id': ObjectId(),
            'id': group_id,
            'display_name': None,
            'description': None,
            'repo_ids': [],
            'notes': {},
        }
        new_groups.append(new_group)

    if new_groups:
        v2_coll.insert(new_groups, safe=True)

    for group_id, repo_ids in repo_ids_by_group.items():
        v2_coll.update({'id': group_id},
                       {'$addToSet': {
                           'repo_ids': {
                               '$each': repo_ids
                           }
                       }})

    return True
示例#12
0
    def test_delete_schedule(self):
        publish_options = {'override_config': {}}
        schedule_data = {'schedule': 'R1/P1DT'}
        schedule_id = self.schedule_manager.create_publish_schedule(self.repo_id,
                                                                    self.distributor_id,
                                                                    publish_options,
                                                                    schedule_data)
        collection = ScheduledCall.get_collection()
        schedule = collection.find_one(ObjectId(schedule_id))
        self.assertFalse(schedule is None)

        self.schedule_manager.delete_publish_schedule(self.repo_id,
                                                      self.distributor_id,
                                                      schedule_id)
        schedule = collection.find_one(ObjectId(schedule_id))
        self.assertTrue(schedule is None)

        schedule_list = self._distributor_manager.list_publish_schedules(self.repo_id,
                                                                         self.distributor_id)
        self.assertFalse(schedule_id in schedule_list)
示例#13
0
    def disable(self, schedule_id):
        """
        Disable a scheduled call request without removing it
        @deprecated: use update instead
        @param schedule_id: id of the schedule for the call request
        @type  schedule_id: str
        """
        if isinstance(schedule_id, basestring):
            schedule_id = ObjectId(schedule_id)

        update = {'$set': {'enabled': False}}
        self.scheduled_call_collection.update({'_id': schedule_id}, update, safe=True)
示例#14
0
    def enable(self, schedule_id):
        """
        Enable a previously disabled scheduled call request
        @deprecated: use update instead
        @param schedule_id: id of the schedule for the call request
        @type  schedule_id: str
        """
        if isinstance(schedule_id, basestring):
            schedule_id = ObjectId(schedule_id)

        update = {'$set': {'enabled': True}}
        self.scheduled_call_collection.update({'_id': schedule_id}, update, safe=True)
示例#15
0
    def test_find_by_schedule_id(self):
        schedule_id = str(ObjectId())
        call_request = call.CallRequest(find_dummy_call)
        call_report = call.CallReport.from_call_request(call_request)
        call_report.schedule_id = schedule_id
        task = Task(call_request, call_report)
        self.set_task_queue([task])

        call_report_list = self.coordinator.find_call_reports(
            schedule_id=schedule_id)
        self.assertEqual(len(call_report_list), 1)
        self.assertEqual(call_report_list[0].schedule_id, schedule_id)
示例#16
0
    def remove(self, schedule_id):
        """
        Remove a scheduled call request
        @param schedule_id: id of the schedule for the call request
        @type  schedule_id: str
        """
        if isinstance(schedule_id, basestring):
            schedule_id = ObjectId(schedule_id)

        if ScheduledCall.get_collection().find_one(schedule_id) is None:
            raise pulp_exceptions.MissingResource(schedule=str(schedule_id))

        self.scheduled_call_collection.remove({'_id': schedule_id}, safe=True)
示例#17
0
    def test_delete(self):
        # Setup
        manager = manager_factory.event_listener_manager()
        created = manager.create(http.TYPE_ID, {'a' : 'a'}, [event_data.TYPE_REPO_SYNC_STARTED])

        # Test
        status, body = self.delete('/v2/events/%s/' % created['id'])

        # Verify
        self.assertEqual(200, status)

        deleted = EventListener.get_collection().find_one({'_id' : ObjectId(created['_id'])})
        self.assertTrue(deleted is None)
示例#18
0
def scheduler_complete_callback(call_request, call_report):
    """
    Call back for call request results and rescheduling
    """
    scheduled_call_collection = ScheduledCall.get_collection()
    schedule_id = call_report.schedule_id
    scheduled_call = scheduled_call_collection.find_one({'_id': ObjectId(schedule_id)})

    if scheduled_call is None: # schedule was deleted while call was running
        return

    scheduler = dispatch_factory.scheduler()
    scheduler.update_last_run(scheduled_call, call_report)
示例#19
0
def _initialize_content_types(v2_database):

    # See module-level docstring for information about why this exists.

    # Collection initialization
    types_coll = v2_database.content_types
    migrations_coll = v2_database.migration_trackers

    # These calls mimic what the base Model class does, which is why the
    # DESCENDING IS used.
    types_coll.ensure_index([('id', DESCENDING)], unique=True)
    migrations_coll.ensure_index([('name', DESCENDING)], unique=True)

    # Idempotency: The type definition id is the uniqueness. There are so few
    # that we can simply iterate over each one to see if it is already in the
    # v2 database.

    for type_def in TYPE_DEFS:
        existing = types_coll.find_one({'id': type_def['id']})
        if not existing:
            # The ObjectId will be added by mongo, no need to do it here
            types_coll.insert(type_def, safe=True)

            # Apply the uniqueness for the unit key. This is necessary as the
            # upgrade calls in this module rely on that for idempotency checks.
            units_coll = getattr(v2_database,
                                 _units_collection_name(type_def['id']))

            # These indexes mimic how the types database code creates indexes,
            # which is admittedly different than how Model uses DESCENDING above.
            unit_index = [(k, ASCENDING) for k in type_def['unit_key']]
            units_coll.ensure_index(unit_index, unique=True)

            # These are less important to the actual execution of the upgrade,
            # but I'd rather have these created now than after the data is
            # inserted
            for search_index in type_def['search_indexes']:
                units_coll.ensure_index(search_index, unique=False)

        existing = migrations_coll.find_one({'name': type_def['display_name']})
        if not existing:
            new_migration = {
                '_id': ObjectId(),
                'name': type_def['display_name'],
                'version': 0,
            }
            migrations_coll.insert(new_migration, safe=True)

    return True
示例#20
0
    def _insert_scheduled_v2_repo(self, repo_id, schedule):
        importer_id = ObjectId()
        schedule_id = ObjectId()

        importer_doc = {'importer_id': importer_id,
                        'importer_type_id': yum_repos.YUM_IMPORTER_TYPE_ID,
                        'scheduled_syncs': [str(schedule_id)]}
        self.tmp_test_db.database.repo_importers.update({'repo_id': repo_id}, {'$set': importer_doc}, safe=True)

        call_request = CallRequest(sync_with_auto_publish_itinerary, [repo_id], {'overrides': {}})
        interval, start, recurrences = dateutils.parse_iso8601_interval(schedule)
        scheduled_call_doc = {'_id': schedule_id,
                              'id': str(schedule_id),
                              'serialized_call_request': call_request.serialize(),
                              'schedule': schedule,
                              'failure_threshold': None,
                              'consecutive_failures': 0,
                              'first_run': start or datetime.datetime.utcnow(),
                              'next_run': None,
                              'last_run': None,
                              'remaining_runs': recurrences,
                              'enabled': True}
        scheduled_call_doc['next_run'] = all_repos._calculate_next_run(scheduled_call_doc)
        self.tmp_test_db.database.scheduled_calls.insert(scheduled_call_doc, safe=True)
示例#21
0
文件: crud.py 项目: taftsanders/pulp
    def delete(self, event_listener_id):
        """
        Deletes the event listener with the given ID. No exception is raised
        if no event listener exists at the given ID.

        @param event_listener_id: database ID for the event listener
        @type  event_listener_id: str

        @raise MissingResource: if no listener exists at the given ID
        """
        collection = EventListener.get_collection()

        self.get(event_listener_id)  # check for MissingResource

        collection.remove({'_id': ObjectId(event_listener_id)})
示例#22
0
def _drpms(v1_database, v2_database, report):
    v2_coll = v2_database.units_drpm
    v1_coll = v1_database.repos
    v2_ass_coll = v2_database.repo_content_units
    repos = v1_coll.find()
    for repo in repos:
        deltarpms = presto_parser.get_deltas(repo)
        new_associations = []
        for nevra, dpkg in deltarpms.items():
            for drpm in dpkg.deltas.values():
                drpm_id = str(uuid.uuid4())
                new_drpm = {
                    "_id": drpm_id,
                    "_storage_path": os.path.join(DIR_DRPM, drpm.filename),
                    "_content_type_id": 'drpm',
                    "checksumtype": drpm.checksum_type,
                    "sequence": drpm.sequence,
                    "checksum": drpm.checksum,
                    "filename": drpm.filename,
                    "new_package": nevra,
                    "epoch": drpm.epoch,
                    "version": drpm.version,
                    "release": drpm.release,
                    "size": drpm.size,
                }
                try:
                    v2_coll.insert(new_drpm, safe=True)
                except DuplicateKeyError:
                    # Still hate this model, still the simplest
                    pass
                new_association = {
                    '_id': ObjectId(),
                    'repo_id': repo['id'],
                    'unit_id': drpm_id,
                    'unit_type_id': 'drpm',
                    'owner_type': DEFAULT_OWNER_TYPE,
                    'owner_id': DEFAULT_OWNER_ID,
                    'created': DEFAULT_CREATED,
                    'updated': DEFAULT_UPDATED,
                }
                new_associations.append(new_association)
        if new_associations:
            try:
                v2_ass_coll.insert(new_associations, safe=True)
            except DuplicateKeyError, e:
                pass
示例#23
0
def _unit_profile(v2_database, v1_consumer):
    v2_coll = v2_database.consumer_unit_profiles
    consumer_id = v1_consumer['id']

    # Idempotency: There's only a single profile stored in v1, so this check
    # is simply if there's a profile for the consumer
    existing = v2_coll.find_one({'consumer_id' : consumer_id})
    if existing:
        return

    unit_profile = {
        '_id' : ObjectId(),
        'consumer_id' : consumer_id,
        'content_type' : RPM_TYPE,
        'profile' : v1_consumer['package_profile']
    }
    v2_coll.insert(unit_profile, safe=True)
示例#24
0
    def test_expired_object_id(self):
        """
        Make sure that _create_expired_object_id() generates correct ObjectIds.
        """
        expired_oid = _create_expired_object_id(timedelta(seconds=1))

        # The oid should be about a second, but since we didn't create the now_oid until after we
        # ran the _create_expired_object_id() function, we can't assert that the timedelta is
        # exactly one second. It should definitely be less than two seconds of difference between
        # them, however (unless Nose is really struggling to run the tests), so we can make sure
        # it fits inside the window of 1 - 2 seconds.
        now_oid = ObjectId()
        self.assertTrue(
            (now_oid.generation_time - expired_oid.generation_time) >= timedelta(seconds=1))
        self.assertTrue(
            (now_oid.generation_time - expired_oid.generation_time) < timedelta(seconds=2))
        # Also, let's make sure the type is correct
        self.assertTrue(isinstance(expired_oid, ObjectId))
示例#25
0
    def test_update_only_event_types(self):
        # Setup
        manager = manager_factory.event_listener_manager()
        created = manager.create(http.TYPE_ID, {'a' : 'a', 'b' : 'b'}, [event_data.TYPE_REPO_SYNC_STARTED])

        # Test
        new_event_types = [event_data.TYPE_REPO_SYNC_FINISHED]
        body = {
            'event_types' : new_event_types,
        }

        status, body = self.put('/v2/events/%s/' % created['id'], body)

        # Verify
        self.assertEqual(200, status)

        updated = EventListener.get_collection().find_one({'_id' : ObjectId(created['_id'])})
        self.assertEqual(updated['event_types'], new_event_types)
示例#26
0
    def get(self, schedule_id):
        """
        Get the call request and the schedule for the given schedule id
        @param schedule_id: id of the schedule for the call request
        @type  schedule_id: str
        @return: scheduled call report dictionary
        @rtype:  dict
        """
        if isinstance(schedule_id, basestring):
            schedule_id = ObjectId(schedule_id)

        scheduled_call = self.scheduled_call_collection.find_one({'_id': schedule_id})

        if scheduled_call is None:
            raise pulp_exceptions.MissingResource(schedule=str(schedule_id))

        report = scheduled_call_to_report_dict(scheduled_call)
        return report
示例#27
0
def _repos(v1_database, v2_database):
    v1_coll = v1_database.repos
    v2_coll = v2_database.repos

    # Idempotency: By repo_id
    v2_repo_ids = [x['id'] for x in v2_coll.find({}, {'id': 1})]
    spec = {
        '$and': [
            {
                'id': {
                    '$nin': v2_repo_ids
                }
            },
            {
                'content_types': V1_ISO_REPO
            },
        ]
    }
    missing_v1_repos = v1_coll.find(spec)

    new_repos = []
    for v1_repo in missing_v1_repos:
        id = ObjectId()

        # Identifying tag for the CLI
        v2_notes = v1_repo.get('notes', {})
        v2_notes[REPO_NOTE_KEY] = REPO_NOTE_ISO

        v2_repo = {
            '_id': id,  # technically not needed but added for clarity
            'id': v1_repo['id'],
            'display_name': v1_repo['name'],
            'description': None,
            'notes': v2_notes,
            'scratchpad': {},
            'content_unit_count': 0
        }
        new_repos.append(v2_repo)

    if new_repos:
        v2_coll.insert(new_repos, safe=True)

    return True
示例#28
0
    def test_update_only_config(self):
        # Setup
        manager = manager_factory.event_listener_manager()
        created = manager.create(http.TYPE_ID, {'a' : 'a', 'b' : 'b'}, [event_data.TYPE_REPO_SYNC_STARTED])

        # Test
        new_config = {'a' : 'x', 'c' : 'c'}
        body = {
            'notifier_config' : new_config,
        }

        status, body = self.put('/v2/events/%s/' % created['id'], body)

        # Verify
        self.assertEqual(200, status)

        updated = EventListener.get_collection().find_one({'_id' : ObjectId(created['_id'])})
        expected_config = {'a' : 'x', 'b' : 'b', 'c' : 'c'}
        self.assertEqual(updated['notifier_config'], expected_config)
示例#29
0
文件: reaper.py 项目: preethit/pulp-1
def _create_expired_object_id(age):
    """
    By default, MongoDB uses a primary key that has the date that each document was created encoded
    into it. This method generates a pulp.server.compat.ObjectId that corresponds to the timstamp of
    now minues age, where age is a timedelta. For example, if age is 60 seconds, this will
    return an ObjectId that has the UTC time that it was 60 seconds ago encoded into it. This is
    useful in this module, as we want to automatically delete documents that are older than a
    particular age, and so we can issue a remove query to MongoDB for objects with _id attributes
    that are less than the ObjectId returned by this method.

    :param age: A timedelta representing the relative time in the past that you wish an ObjectId
                to be generated against.
    :type  age: datetime.timedelta
    :return:    An ObjectId containing the encoded time (now - age).
    :rtype:     pulp.server.compat.ObjectId
    """
    now = datetime.now(dateutils.utc_tz())
    expired_datetime = now - age
    expired_object_id = ObjectId.from_datetime(expired_datetime)
    return expired_object_id
示例#30
0
def _create_expired_object_id(age):
    """
    By default, MongoDB uses a primary key that has the date that each document was created encoded
    into it. This method generates a pulp.server.compat.ObjectId that corresponds to the timstamp of
    now minus age, where age is a timedelta. For example, if age is 60 seconds, this will
    return an ObjectId that has the UTC time that it was 60 seconds ago encoded into it. This is
    useful in this module, as we want to automatically delete documents that are older than a
    particular age, and so we can issue a remove query to MongoDB for objects with _id attributes
    that are less than the ObjectId returned by this method.

    :param age: A timedelta representing the relative time in the past that you wish an ObjectId
                to be generated against.
    :type  age: datetime.timedelta
    :return:    An ObjectId containing the encoded time (now - age).
    :rtype:     pulp.server.compat.ObjectId
    """
    now = datetime.now(dateutils.utc_tz())
    expired_datetime = now - age
    expired_object_id = ObjectId.from_datetime(expired_datetime)
    return expired_object_id
示例#31
0
    def update(self, schedule_id, **schedule_updates):
        """
        Update a scheduled call request

        Valid schedule updates:
         * call_request
         * schedule
         * failure_threshold
         * remaining_runs
         * enabled

        @param schedule_id: id of the schedule for the call request
        @type  schedule_id: str
        @param schedule_updates: updates for scheduled call
        @type  schedule_updates: dict
        """
        if isinstance(schedule_id, basestring):
            schedule_id = ObjectId(schedule_id)

        if self.scheduled_call_collection.find_one(schedule_id) is None:
            raise pulp_exceptions.MissingResource(schedule=str(schedule_id))

        validate_schedule_updates(schedule_updates)

        call_request = schedule_updates.pop('call_request', None)

        if call_request is not None:
            schedule_updates['serialized_call_request'] = call_request.serialize()

        schedule = schedule_updates.get('schedule', None)

        if schedule is not None:
            interval, start, runs = dateutils.parse_iso8601_interval(schedule)
            schedule_updates.setdefault('remaining_runs', runs) # honor explicit update
            # XXX (jconnor) it'd be nice to update the next_run if the schedule
            # has changed, but it requires mucking with the internals of the
            # of the scheduled call instance, which is all encapsulated in the
            # ScheduledCall constructor
            # the next_run field will be correctly updated after the next run

        self.scheduled_call_collection.update({'_id': schedule_id}, {'$set': schedule_updates}, safe=True)
示例#32
0
文件: reaper.py 项目: ashcrow/pulp
 def _create_expired_object_id(self, delta):
     now = datetime.now(dateutils.utc_tz())
     expired_datetime = now - delta
     expired_object_id = ObjectId.from_datetime(expired_datetime)
     return expired_object_id