def _do_sync(self, repo, importer_instance, transfer_repo, conduit, call_config): """ Once all of the preparation for a sync has taken place, this call will perform the sync, making the necessary database updates. It returns the sync result instance (already saved to the database). This call does not have any behavior based on the success/failure of the sync; it is up to the caller to raise an exception in the event of a failed sync if that behavior is desired. """ importer_coll = RepoImporter.get_collection() sync_result_coll = RepoSyncResult.get_collection() repo_id = repo['id'] repo_importer = importer_coll.find_one({'repo_id' : repo_id}) # Perform the sync sync_start_timestamp = _now_timestamp() sync_end_timestamp = None result = None try: sync_report = importer_instance.sync_repo(transfer_repo, conduit, call_config) except Exception, e: sync_end_timestamp = _now_timestamp() result = RepoSyncResult.error_result(repo_id, repo_importer['id'], repo_importer['importer_type_id'], sync_start_timestamp, sync_end_timestamp, e, sys.exc_info()[2]) _LOG.exception(_('Exception caught from plugin during sync for repo [%(r)s]' % {'r' : repo_id})) raise PulpExecutionException(), None, sys.exc_info()[2]
def _do_publish(self, repo, distributor_id, distributor_instance, transfer_repo, conduit, call_config): distributor_coll = RepoDistributor.get_collection() publish_result_coll = RepoPublishResult.get_collection() repo_id = repo['id'] # Perform the publish publish_start_timestamp = _now_timestamp() try: publish_report = distributor_instance.publish_repo( transfer_repo, conduit, call_config) except Exception, e: publish_end_timestamp = _now_timestamp() # Reload the distributor in case the scratchpad is set by the plugin repo_distributor = distributor_coll.find_one({ 'repo_id': repo_id, 'id': distributor_id }) repo_distributor['last_publish'] = publish_end_timestamp distributor_coll.save(repo_distributor, safe=True) # Add a publish history entry for the run result = RepoPublishResult.error_result( repo_id, repo_distributor['id'], repo_distributor['distributor_type_id'], publish_start_timestamp, publish_end_timestamp, e, sys.exc_info()[2]) publish_result_coll.save(result, safe=True) _LOG.exception( _('Exception caught from plugin during publish for repo [%(r)s]' % {'r': repo_id})) raise PulpExecutionException(), None, sys.exc_info()[2]
def update_unit_count(repo_id, unit_type_id, delta): """ Updates the total count of units associated with the repo. Each repo has an attribute 'content_unit_counts' which is a dict where keys are content type IDs, and values are the number of content units of that type in the repository. {'rpm': 12, 'srpm': 3} :param repo_id: identifies the repo :type repo_id: str :param unit_type_id: identifies the unit type to update :type unit_type_id: str :param delta: amount by which to change the total count :type delta: int """ spec = {'id': repo_id} operation = {'$inc': {'content_unit_counts.%s' % unit_type_id: delta}} repo_coll = Repo.get_collection() if delta: try: repo_coll.update(spec, operation, safe=True) except pymongo.errors.OperationFailure: message = 'There was a problem updating repository %s' % repo_id raise PulpExecutionException(message), None, sys.exc_info()[2]
def _do_publish(self, group, distributor_id, distributor_instance, conduit, call_config): distributor_coll = RepoGroupDistributor.get_collection() publish_result_coll = RepoGroupPublishResult.get_collection() group_id = group.id # Perform the publish publish_start_timestamp = _now_timestamp() try: report = distributor_instance.publish_group( group, conduit, call_config) except Exception, e: publish_end_timestamp = _now_timestamp() # Reload the distributor in case the scratchpad is changed by the plugin distributor = distributor_coll.find_one({ 'id': distributor_id, 'repo_group_id': group_id }) distributor['last_publish'] = publish_end_timestamp distributor_coll.save(distributor) # Add a publish history entry for the run result = RepoGroupPublishResult.error_result( group_id, distributor_id, distributor['distributor_type_id'], publish_start_timestamp, publish_end_timestamp, e, sys.exc_info()[2]) publish_result_coll.save(result, safe=True) _LOG.exception( 'Exception caught from plugin during publish call for group [%s]' % group_id) raise PulpExecutionException(e), None, sys.exc_info()[2]
def __call__(self, *args, **kwargs): try: return self.__method(*args, **kwargs) except Exception, e: msg = str(e) tb = sys.exc_info()[2] raise PulpExecutionException(msg), None, tb
def resolve_dependencies_by_units(repo_id, units, options): """ Calculates dependencies for the given set of units in the given repository. :param repo_id: identifies the repository :type repo_id: str :param units: list of database representations of units to resolve dependencies for :type units: list :param options: dict of options to pass the importer to drive the resolution :type options: dict or None :return: report from the plugin :rtype: object :raise MissingResource: if the repo does not exist or does not have an importer """ # Validation repo_query_manager = manager_factory.repo_query_manager() importer_manager = manager_factory.repo_importer_manager() # The following will raise MissingResource as appropriate repo = repo_query_manager.get_repository(repo_id) repo_importer = importer_manager.get_importer(repo_id) try: importer_instance, plugin_config = plugin_api.get_importer_by_id( repo_importer['importer_type_id']) except plugin_exceptions.PluginNotFound: raise MissingResource(repo_id), None, sys.exc_info()[2] # Package for the importer call call_config = PluginCallConfiguration(plugin_config, repo_importer['config'], options) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.importer_working_dir( repo_importer['importer_type_id'], repo_id, mkdir=True) conduit = DependencyResolutionConduit(repo_id, repo_importer['id']) # Convert all of the units into the plugin standard representation transfer_units = [] # Preload all the type defs so we don't hammer the database unnecessarily type_defs = {} all_type_def_ids = set([u['unit_type_id'] for u in units]) for def_id in all_type_def_ids: type_def = types_db.type_definition(def_id) type_defs[def_id] = type_def for unit in units: type_id = unit['unit_type_id'] u = conduit_common_utils.to_plugin_associated_unit(unit, type_defs[type_id]) transfer_units.append(u) # Invoke the importer try: dep_report = importer_instance.resolve_dependencies(transfer_repo, transfer_units, conduit, call_config) except Exception, e: raise PulpExecutionException(), None, sys.exc_info()[2]
def set_importer(repo_id, importer_type_id, repo_plugin_config): """ Configures an importer to be used for the given repository. Keep in mind this method is written assuming single importer for a repo. The domain model technically supports multiple importers, but this call is what enforces the single importer behavior. :param repo_id: identifies the repo :type repo_id: str :param importer_type_id: identifies the type of importer being added; must correspond to an importer loaded at server startup :type importer_type_id: str :param repo_plugin_config: configuration values for the importer; may be None :type repo_plugin_config: dict :raise MissingResource: if repo_id does not represent a valid repo :raise InvalidImporterConfiguration: if the importer cannot be initialized for the given repo """ RepoImporterManager.validate_importer_config(repo_id, importer_type_id, repo_plugin_config) importer_coll = RepoImporter.get_collection() repo_obj = model.Repository.objects.get_repo_or_missing_resource(repo_id) importer_instance, plugin_config = plugin_api.get_importer_by_id(importer_type_id) # Convention is that a value of None means unset. Remove any keys that # are explicitly set to None so the plugin will default them. if repo_plugin_config is not None: clean_config = dict([(k, v) for k, v in repo_plugin_config.items() if v is not None]) else: clean_config = None # Let the importer plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, clean_config) transfer_repo = repo_obj.to_transfer_repo() # Remove old importer if one exists try: RepoImporterManager.remove_importer(repo_id) except MissingResource: pass # it didn't exist, so no harm done # Let the importer plugin initialize the repository try: importer_instance.importer_added(transfer_repo, call_config) except Exception: _logger.exception( 'Error initializing importer [%s] for repo [%s]' % (importer_type_id, repo_id)) raise PulpExecutionException(), None, sys.exc_info()[2] # Database Update importer_id = importer_type_id # use the importer name as its repo ID importer = RepoImporter(repo_id, importer_id, importer_type_id, clean_config) importer_coll.save(importer) return importer
def unregister(consumer_id): """ Unregisters given consumer. :param consumer_id: identifies the consumer being unregistered :type consumer_id: str :raises MissingResource: if the given consumer does not exist :raises OperationFailed: if any part of the unregister process fails; the exception will contain information on which sections failed :raises PulpExecutionException: if error during updating database collection """ ConsumerManager.get_consumer(consumer_id) # Remove associate bind manager = factory.consumer_bind_manager() manager.consumer_deleted(consumer_id) # Remove associated profiles manager = factory.consumer_profile_manager() manager.consumer_deleted(consumer_id) # Notify agent agent_consumer = factory.consumer_agent_manager() agent_consumer.unregistered(consumer_id) # remove from consumer groups group_manager = factory.consumer_group_manager() group_manager.remove_consumer_from_groups(consumer_id) # delete any scheduled unit installs schedule_manager = factory.consumer_schedule_manager() for schedule in schedule_manager.get(consumer_id): # using "delete" on utils skips validation that the consumer exists. schedule_utils.delete(schedule.id) # Database Updates try: Consumer.get_collection().remove({'id': consumer_id}, safe=True) except Exception: _logger.exception( 'Error updating database collection while removing consumer [%s]' % consumer_id) raise PulpExecutionException( "database-error"), None, sys.exc_info()[2] # remove the consumer from any groups it was a member of group_manager = factory.consumer_group_manager() group_manager.remove_consumer_from_groups(consumer_id) factory.consumer_history_manager().record_event( consumer_id, 'consumer_unregistered')
def remove_distributor(repo_group_id, distributor_id, force=False): """ Removes a distributor from a group. @param repo_group_id: identifies the group @type repo_group_id: str @param distributor_id: identifies the distributor on the group @type distributor_id: str @param force: if true, the distributor will be removed from the database regardless of whether or not the plugin's clean up method raises an exception @raise MissingResource: if there is no group or distributor with the given ID @raise PulpExecutionException: if the distributor raises an error on cleanup """ distributor_coll = RepoGroupDistributor.get_collection() # Validation - calls will raise MissingResource group = manager_factory.repo_group_query_manager().get_group( repo_group_id) distributor = RepoGroupDistributorManager.get_distributor( repo_group_id, distributor_id) # Call the distributor's cleanup method distributor_type_id = distributor['distributor_type_id'] distributor_instance, plugin_config = plugin_api.get_group_distributor_by_id( distributor_type_id) call_config = PluginCallConfiguration(plugin_config, distributor['config']) transfer_group = common_utils.to_transfer_repo_group(group) transfer_group.working_dir = common_utils.distributor_working_dir( distributor_type_id, repo_group_id) try: distributor_instance.distributor_removed(transfer_group, call_config) except Exception: _logger.exception( 'Exception cleaning up distributor [%s] on group [%s]' % (distributor_id, repo_group_id)) if not force: raise PulpExecutionException(), None, sys.exc_info()[2] # Clean up the database distributor_coll.remove(distributor, safe=True)
def create_bind_payload(self, repo_id, distributor_id, binding_config): """ Requests the distributor plugin to generate the consumer bind payload. @param repo_id: identifies the repo being bound @type repo_id: str @param distributor_id: identifies the distributor @type distributor_id: str @param binding_config: configuration applicable only to the binding whose payload is being created; may be None @type binding_config: object or None @return: payload object to pass to the consumer @rtype: dict @raise MissingResource: if the repo or distributor do not exist @raise PulpExecutionException: if the distributor raises an error """ # Input Validation repo_distributor = self.get_distributor(repo_id, distributor_id) repo = Repo.get_collection().find_one({'id': repo_id}) distributor_type_id = repo_distributor['distributor_type_id'] distributor_instance, plugin_config = plugin_api.get_distributor_by_id( distributor_type_id) # Let the distributor plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, repo_distributor['config']) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.distributor_working_dir( distributor_type_id, repo_id) try: payload = distributor_instance.create_consumer_payload( transfer_repo, call_config, binding_config) return payload except Exception: msg = _( 'Exception raised from distributor [%(d)s] generating consumer payload' ) msg = msg % {'d': distributor_id} _logger.exception(msg) raise PulpExecutionException(), None, sys.exc_info()[2]
def grant_automatic_permissions_for_resource(self, resource): """ Grant CRUDE permissions for a newly created resource to current principal. :param resource: resource path to grant permissions to :type resource: str :raises PulpExecutionException: if the system principal has not been set """ principal_manager = factory.principal_manager() user = principal_manager.get_principal() if principal_manager.is_system_principal(): raise PulpExecutionException( _('Cannot grant automatic permissions for [%(user)s] on resource [%(resource)s]') % {'user': user, 'resource': resource}) self.grant(resource, user['login'], authorization.OPERATION_NAMES)
def auto_publish_for_repo(self, repo_id): """ Calls publish on all distributors that are configured to be automatically called for the given repo. Each distributor is called serially. The order in which they are executed is determined simply by distributor ID (sorted ascending alphabetically). All automatic distributors will be called, regardless of whether or not one raises an error. All failed publish calls will be collaborated into a single exception. If no distributors are configured for automatic publishing, this call does nothing. @param repo_id: identifies the repo @type repo_id: str @raise OperationFailed: if one or more of the distributors errors during publishing; the exception will contain information on all failures """ # Retrieve all auto publish distributors for the repo auto_distributors = self.auto_distributors(repo_id) if len(auto_distributors) is 0: return # Call publish on each matching distributor, keeping a running track # of failed calls error_runs = [] # contains tuple of dist_id and error string for dist in auto_distributors: dist_id = dist['id'] try: self.publish(repo_id, dist_id, None) except Exception: logger.exception( 'Exception on auto distribute call for repo [%s] distributor [%s]' % (repo_id, dist_id)) error_string = traceback.format_exc() error_runs.append((dist_id, error_string)) if len(error_runs) > 0: raise PulpExecutionException()
def grant_automatic_permissions_for_resource(self, resource): """ Grant CRUDE permissions for a newly created resource to current principal. @type resource: str @param resource: resource path to grant permissions to @rtype: bool @return: True on success, False otherwise @raise PulpExecutionException: if the system principal has not been set """ principal_manager = factory.principal_manager() user = principal_manager.get_principal() if principal_manager.is_system_principal(): raise PulpExecutionException( _('Cannot grant automatic permissions for [%s] on resource [%s]' ) % (user, resource)) operations = [ self.CREATE, self.READ, self.UPDATE, self.DELETE, self.EXECUTE ] self.grant(resource, user['login'], operations)
RepoContentUnit.get_collection().remove({'repo_id': repo_id}, safe=True) except Exception, e: msg = _( 'Error updating one or more database collections while removing repo [%(r)s]' ) msg = msg % {'r': repo_id} logger.exception(msg) error_tuples.append(e) # remove the repo from any groups it was a member of group_manager = manager_factory.repo_group_manager() group_manager.remove_repo_from_groups(repo_id) if len(error_tuples) > 0: pe = PulpExecutionException() pe.child_exceptions = error_tuples raise pe @staticmethod def update_repo(repo_id, delta): """ Updates metadata about the given repository. Only the following fields may be updated through this call: * display_name * description Other fields found in delta will be ignored. :param repo_id: identifies the repo :type repo_id: str
def __init__(self, vertex): PulpExecutionException.__init__(self, vertex) self.vertex = vertex
RepoPublishResult.get_collection().remove({'repo_id' : repo_id}, safe=True) # Remove all associations from the repo RepoContentUnit.get_collection().remove({'repo_id' : repo_id}, safe=True) except Exception, e: msg = _('Error updating one or more database collections while removing repo [%(r)s]') msg = msg % {'r': repo_id} logger.exception(msg) error_tuples.append(e) # remove the repo from any groups it was a member of group_manager = manager_factory.repo_group_manager() group_manager.remove_repo_from_groups(repo_id) if len(error_tuples) > 0: pe = PulpExecutionException() pe.child_exceptions = error_tuples raise pe @staticmethod def update_repo(repo_id, delta): """ Updates metadata about the given repository. Only the following fields may be updated through this call: * display_name * description Other fields found in delta will be ignored. :param repo_id: identifies the repo :type repo_id: str
def add_distributor(repo_id, distributor_type_id, repo_plugin_config, auto_publish, distributor_id=None): """ Adds an association from the given repository to a distributor. The association will be tracked through the distributor_id; each distributor on a given repository must have a unique ID. If this is not specified, one will be generated. If a distributor already exists on the repo for the given ID, the existing one will be removed and replaced with the newly configured one. :param repo_id: identifies the repo :type repo_id: str :param distributor_type_id: identifies the distributor; must correspond to a distributor loaded at server startup :type distributor_type_id: str :param repo_plugin_config: configuration the repo will use with this distributor; may be None :type repo_plugin_config: dict :param auto_publish: if true, this distributor will be invoked at the end of every sync :type auto_publish: bool :param distributor_id: unique ID to refer to this distributor for this repo :type distributor_id: str :return: ID assigned to the distributor (only valid in conjunction with the repo) :raise MissingResource: if the given repo_id does not refer to a valid repo :raise InvalidValue: if the distributor ID is provided and unacceptable :raise InvalidDistributorConfiguration: if the distributor plugin does not accept the given configuration """ distributor_coll = RepoDistributor.get_collection() repo_obj = model.Repository.objects.get_repo_or_missing_resource(repo_id) if not plugin_api.is_valid_distributor(distributor_type_id): raise InvalidValue(['distributor_type_id']) # Determine the ID for this distributor on this repo; will be # unique for all distributors on this repository but not globally if distributor_id is None: distributor_id = str(uuid.uuid4()) else: # Validate if one was passed in if not is_distributor_id_valid(distributor_id): raise InvalidValue(['distributor_id']) distributor_instance, plugin_config = plugin_api.get_distributor_by_id(distributor_type_id) # Convention is that a value of None means unset. Remove any keys that # are explicitly set to None so the plugin will default them. if repo_plugin_config is not None: clean_config = dict([(k, v) for k, v in repo_plugin_config.items() if v is not None]) else: clean_config = None # Let the distributor plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, clean_config) config_conduit = RepoConfigConduit(distributor_type_id) transfer_repo = repo_obj.to_transfer_repo() result = distributor_instance.validate_config(transfer_repo, call_config, config_conduit) # For backward compatibility with plugins that don't yet return the tuple if isinstance(result, bool): valid_config = result message = None else: valid_config, message = result if not valid_config: raise PulpDataException(message) # Remove the old distributor if it exists try: RepoDistributorManager.remove_distributor(repo_id, distributor_id) except MissingResource: pass # if it didn't exist, no problem # Let the distributor plugin initialize the repository try: distributor_instance.distributor_added(transfer_repo, call_config) except Exception: msg = _('Error initializing distributor [%(d)s] for repo [%(r)s]') msg = msg % {'d': distributor_type_id, 'r': repo_id} _logger.exception(msg) raise PulpExecutionException(), None, sys.exc_info()[2] # Database Update distributor = RepoDistributor(repo_id, distributor_id, distributor_type_id, clean_config, auto_publish) distributor_coll.save(distributor, safe=True) return distributor
class RepoDistributorManager(object): def get_distributor(self, repo_id, distributor_id): """ Returns an individual distributor on the given repo. @param repo_id: identifies the repo @type repo_id: str @param distributor_id: identifies the distributor @type distributor_id: str @return: key-value pairs describing the distributor @rtype: dict @raise MissingResource: if either the repo doesn't exist or there is no distributor with the given ID """ distributor = RepoDistributor.get_collection().find_one({ 'repo_id': repo_id, 'id': distributor_id }) if distributor is None: raise MissingResource(distributor=distributor_id) return distributor def get_distributors(self, repo_id): """ Returns all distributors on the given repo. @param repo_id: identifies the repo @type repo_id: str @return: list of key-value pairs describing the distributors; empty list if there are none for the given repo @rtype: list, None @raise MissingResource: if the given repo doesn't exist """ repo = Repo.get_collection().find_one({'id': repo_id}) if repo is None: raise MissingResource(repository=repo_id) distributors = list(RepoDistributor.get_collection().find( {'repo_id': repo_id})) return distributors @staticmethod def find_by_repo_list(repo_id_list): """ Returns serialized versions of all distributors for given repos. Any IDs that do not refer to valid repos are ignored and will not raise an error. @param repo_id_list: list of distributor IDs to fetch @type repo_id_list: list of str @return: list of serialized distributors @rtype: list of dict """ spec = {'repo_id': {'$in': repo_id_list}} projection = {'scratchpad': 0} return list(RepoDistributor.get_collection().find(spec, projection)) @staticmethod def add_distributor(repo_id, distributor_type_id, repo_plugin_config, auto_publish, distributor_id=None): """ Adds an association from the given repository to a distributor. The association will be tracked through the distributor_id; each distributor on a given repository must have a unique ID. If this is not specified, one will be generated. If a distributor already exists on the repo for the given ID, the existing one will be removed and replaced with the newly configured one. :param repo_id: identifies the repo :type repo_id: str :param distributor_type_id: identifies the distributor; must correspond to a distributor loaded at server startup :type distributor_type_id: str :param repo_plugin_config: configuration the repo will use with this distributor; may be None :type repo_plugin_config: dict :param auto_publish: if true, this distributor will be invoked at the end of every sync :type auto_publish: bool :param distributor_id: unique ID to refer to this distributor for this repo :type distributor_id: str :return: ID assigned to the distributor (only valid in conjunction with the repo) :raise MissingResource: if the given repo_id does not refer to a valid repo :raise InvalidValue: if the distributor ID is provided and unacceptable :raise InvalidDistributorConfiguration: if the distributor plugin does not accept the given configuration """ repo_coll = Repo.get_collection() distributor_coll = RepoDistributor.get_collection() # Validation repo = repo_coll.find_one({'id': repo_id}) if repo is None: raise MissingResource(repository=repo_id) if not plugin_api.is_valid_distributor(distributor_type_id): raise InvalidValue(['distributor_type_id']) # Determine the ID for this distributor on this repo; will be # unique for all distributors on this repository but not globally if distributor_id is None: distributor_id = str(uuid.uuid4()) else: # Validate if one was passed in if not is_distributor_id_valid(distributor_id): raise InvalidValue(['distributor_id']) distributor_instance, plugin_config = plugin_api.get_distributor_by_id( distributor_type_id) # Convention is that a value of None means unset. Remove any keys that # are explicitly set to None so the plugin will default them. if repo_plugin_config is not None: clean_config = dict([(k, v) for k, v in repo_plugin_config.items() if v is not None]) else: clean_config = None # Let the distributor plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, clean_config) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.distributor_working_dir( distributor_type_id, repo_id) config_conduit = RepoConfigConduit(distributor_type_id) try: result = distributor_instance.validate_config( transfer_repo, call_config, config_conduit) # For backward compatibility with plugins that don't yet return the tuple if isinstance(result, bool): valid_config = result message = None else: valid_config, message = result except Exception, e: logger.exception( 'Exception received from distributor [%s] while validating config' % distributor_type_id) raise PulpDataException(e.args), None, sys.exc_info()[2] if not valid_config: raise PulpDataException(message) # Remove the old distributor if it exists try: RepoDistributorManager.remove_distributor(repo_id, distributor_id) except MissingResource: pass # if it didn't exist, no problem # Let the distributor plugin initialize the repository try: distributor_instance.distributor_added(transfer_repo, call_config) except Exception: logger.exception( 'Error initializing distributor [%s] for repo [%s]' % (distributor_type_id, repo_id)) raise PulpExecutionException(), None, sys.exc_info()[2] # Database Update distributor = RepoDistributor(repo_id, distributor_id, distributor_type_id, clean_config, auto_publish) distributor_coll.save(distributor, safe=True) return distributor
# Remove all associations from the repo RepoContentUnit.get_collection().remove({'repo_id': repo_id}, safe=True) except Exception, e: _LOG.exception( 'Error updating one or more database collections while removing repo [%s]' % repo_id) error_tuples.append((_('Database Removal Error'), e.args)) # remove the repo from any groups it was a member of group_manager = manager_factory.repo_group_manager() group_manager.remove_repo_from_groups(repo_id) if len(error_tuples) > 0: raise PulpExecutionException(error_tuples) def update_repo(self, repo_id, delta): """ Updates metadata about the given repository. Only the following fields may be updated through this call: * display_name * description Other fields found in delta will be ignored. :param repo_id: identifies the repo :type repo_id: str :param delta: list of attributes and their new values to change :type delta: dict
def sync(repo_id, sync_config_override=None): """ Performs a synchronize operation on the given repository. The given repo must have an importer configured. The identity of the importer is not a parameter to this call; if multiple importers are eventually supported this will have to change to indicate which importer to use. This method is intentionally limited to synchronizing a single repo. Performing multiple repository syncs concurrently will require a more global view of the server and must be handled outside the scope of this class. @param repo_id: identifies the repo to sync @type repo_id: str @param sync_config_override: optional config containing values to use for this sync only @type sync_config_override: dict @return: The synchronization report. @rtype: L{pulp.server.plugins.model.SyncReport} @raise MissingResource: if repo_id does not refer to a valid repo @raise OperationFailed: if the given repo does not have an importer set """ repo_coll = Repo.get_collection() # Validation repo = repo_coll.find_one({'id': repo_id}) if repo is None: raise MissingResource(repo_id) importer_instance, importer_config = RepoSyncManager._get_importer_instance_and_config( repo_id) if importer_instance is None: raise MissingResource(repo_id) importer_manager = manager_factory.repo_importer_manager() repo_importer = importer_manager.get_importer(repo_id) # Assemble the data needed for the sync conduit = RepoSyncConduit(repo_id, repo_importer['id']) call_config = PluginCallConfiguration(importer_config, repo_importer['config'], sync_config_override) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.get_working_directory() # Fire an events around the call fire_manager = manager_factory.event_fire_manager() fire_manager.fire_repo_sync_started(repo_id) sync_result = RepoSyncManager._do_sync(repo, importer_instance, transfer_repo, conduit, call_config) fire_manager.fire_repo_sync_finished(sync_result) if sync_result['result'] == RepoSyncResult.RESULT_FAILED: raise PulpExecutionException( _('Importer indicated a failed response')) repo_publish_manager = manager_factory.repo_publish_manager() auto_distributors = repo_publish_manager.auto_distributors(repo_id) spawned_tasks = [] for distributor in auto_distributors: distributor_id = distributor['id'] spawned_tasks.append( repo_publish_manager.queue_publish(repo_id, distributor_id).task_id) return TaskResult(sync_result, spawned_tasks=spawned_tasks)
def import_uploaded_unit(repo_id, unit_type_id, unit_key, unit_metadata, upload_id, override_config=None): """ Called to trigger the importer's handling of an uploaded unit. This should not be called until the bits have finished uploading. The importer is then responsible for moving the file to the correct location, adding it to the Pulp server's inventory, and associating it with the repository. This call will first call is_valid_upload to check the integrity of the destination repository. See that method's documentation for exception possibilities. :param repo_id: identifies the repository into which the unit is uploaded :type repo_id: str :param unit_type_id: type of unit being uploaded :type unit_type_id: str :param unit_key: unique identifier for the unit (user-specified) :type unit_key: dict :param unit_metadata: any user-specified information about the unit :type unit_metadata: dict :param upload_id: upload being imported :type upload_id: str :return: A SyncReport indicating the success or failure of the upload :rtype: pulp.plugins.model.SyncReport """ # If it doesn't raise an exception, it's good to go ContentUploadManager.is_valid_upload(repo_id, unit_type_id) repo_query_manager = manager_factory.repo_query_manager() importer_manager = manager_factory.repo_importer_manager() repo = repo_query_manager.find_by_id(repo_id) repo_importer = importer_manager.get_importer(repo_id) try: importer_instance, plugin_config = plugin_api.get_importer_by_id( repo_importer['importer_type_id']) except plugin_exceptions.PluginNotFound: raise MissingResource(repo_id), None, sys.exc_info()[2] # Assemble the data needed for the import conduit = UploadConduit(repo_id, repo_importer['id']) call_config = PluginCallConfiguration(plugin_config, repo_importer['config'], override_config) transfer_repo = repo_common_utils.to_transfer_repo(repo) file_path = ContentUploadManager._upload_file_path(upload_id) # Invoke the importer try: return importer_instance.upload_unit(transfer_repo, unit_type_id, unit_key, unit_metadata, file_path, conduit, call_config) except PulpException: msg = _('Error from the importer while importing uploaded unit to repository [%(r)s]') msg = msg % {'r': repo_id} logger.exception(msg) raise except Exception, e: msg = _('Error from the importer while importing uploaded unit to repository [%(r)s]') msg = msg % {'r': repo_id} logger.exception(msg) raise PulpExecutionException(e), None, sys.exc_info()[2]
class RepoImporterManager(object): def get_importer(self, repo_id): """ Returns metadata about an importer associated with the given repo. @return: key-value pairs describing the importer in use @rtype: dict @raise MissingResource: if the repo does not exist or has no importer associated """ importer = RepoImporter.get_collection().find_one({'repo_id': repo_id}) if importer is None: raise MissingResource(repository=repo_id) return importer def get_importers(self, repo_id): """ Returns a list of all importers associated with the given repo. @return: list of key-value pairs describing the importers in use; empty list if the repo has no importers @rtype: list of dict @raise MissingResource: if the given repo doesn't exist """ repo = Repo.get_collection().find_one({'id': repo_id}) if repo is None: raise MissingResource(repo_id) importers = list(RepoImporter.get_collection().find( {'repo_id': repo_id})) return importers @staticmethod def find_by_repo_list(repo_id_list): """ Returns serialized versions of all importers for given repos. Any IDs that do not refer to valid repos are ignored and will not raise an error. @param repo_id_list: list of importer IDs to fetch @type repo_id_list: list of str @return: list of serialized importers @rtype: list of dict """ spec = {'repo_id': {'$in': repo_id_list}} projection = {'scratchpad': 0} importers = list(RepoImporter.get_collection().find(spec, projection)) # Process any scheduled syncs and get schedule details using schedule id for importer in importers: scheduled_sync_ids = importer.get('scheduled_syncs', None) if scheduled_sync_ids is not None: scheduled_sync_details = list( ScheduledCall.get_collection().find( {"id": { "$in": scheduled_sync_ids }})) importer['scheduled_syncs'] = [ s["schedule"] for s in scheduled_sync_details ] return importers def set_importer(self, repo_id, importer_type_id, repo_plugin_config): """ Configures an importer to be used for the given repository. Keep in mind this method is written assuming single importer for a repo. The domain model technically supports multiple importers, but this call is what enforces the single importer behavior. @param repo_id: identifies the repo @type repo_id; str @param importer_type_id: identifies the type of importer being added; must correspond to an importer loaded at server startup @type importer_type_id: str @param repo_plugin_config: configuration values for the importer; may be None @type repo_plugin_config: dict @raise MissingResource: if repo_id does not represent a valid repo @raise InvalidImporterConfiguration: if the importer cannot be initialized for the given repo """ repo_coll = Repo.get_collection() importer_coll = RepoImporter.get_collection() # Validation repo = repo_coll.find_one({'id': repo_id}) if repo is None: raise MissingResource(repo_id) if not plugin_api.is_valid_importer(importer_type_id): raise InvalidValue(['importer_type_id']) importer_instance, plugin_config = plugin_api.get_importer_by_id( importer_type_id) # Convention is that a value of None means unset. Remove any keys that # are explicitly set to None so the plugin will default them. if repo_plugin_config is not None: clean_config = dict([(k, v) for k, v in repo_plugin_config.items() if v is not None]) else: clean_config = None # Let the importer plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, clean_config) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.importer_working_dir( importer_type_id, repo_id) query_manager = manager_factory.repo_query_manager() related_repos = query_manager.find_with_importer_type(importer_type_id) transfer_related_repos = [] for r in related_repos: all_configs = [d['config'] for d in r['importers']] trr = common_utils.to_related_repo(r, all_configs) transfer_related_repos.append(trr) try: result = importer_instance.validate_config(transfer_repo, call_config, transfer_related_repos) # For backward compatibility with plugins that don't yet return the tuple if isinstance(result, bool): valid_config = result message = None else: valid_config, message = result except Exception, e: _LOG.exception( 'Exception received from importer [%s] while validating config' % importer_type_id) raise PulpDataException(e.args), None, sys.exc_info()[2] if not valid_config: raise PulpDataException(message) # Remove old importer if one exists try: self.remove_importer(repo_id) except MissingResource: pass # it didn't exist, so no harm done # Let the importer plugin initialize the repository try: importer_instance.importer_added(transfer_repo, call_config) except Exception: _LOG.exception('Error initializing importer [%s] for repo [%s]' % (importer_type_id, repo_id)) raise PulpExecutionException(), None, sys.exc_info()[2] # Database Update importer_id = importer_type_id # use the importer name as its repo ID importer = RepoImporter(repo_id, importer_id, importer_type_id, clean_config) importer_coll.save(importer, safe=True) return importer
def import_uploaded_unit(repo_id, unit_type_id, unit_key, unit_metadata, upload_id, override_config=None): """ Called to trigger the importer's handling of an uploaded unit. This should not be called until the bits have finished uploading. The importer is then responsible for moving the file to the correct location, adding it to the Pulp server's inventory, and associating it with the repository. This call will first call is_valid_upload to check the integrity of the destination repository. See that method's documentation for exception possibilities. :param repo_id: identifies the repository into which the unit is uploaded :type repo_id: str :param unit_type_id: type of unit being uploaded :type unit_type_id: str :param unit_key: unique identifier for the unit (user-specified) :type unit_key: dict :param unit_metadata: any user-specified information about the unit :type unit_metadata: dict :param upload_id: upload being imported :type upload_id: str :return: A dictionary describing the success or failure of the upload. It must contain the following keys: 'success_flag': bool. Indicates whether the upload was successful 'summary': json-serializable object, providing summary 'details': json-serializable object, providing details :rtype: dict :raises MissingResource: if upload request was for the non-existent repository :raises PulpCodedException: if import was unsuccessful and it was handled by the importer :raises PulpException: if import was unsuccessful and it was not handled by the importer :raises PulpExecutionException: if an unexpected error occured during the upload """ # If it doesn't raise an exception, it's good to go ContentUploadManager.is_valid_upload(repo_id, unit_type_id) repo_obj = model.Repository.objects.get_repo_or_missing_resource( repo_id) repo_importer = model.Importer.objects.get_or_404(repo_id=repo_id) try: importer_instance, plugin_config = plugin_api.get_importer_by_id( repo_importer['importer_type_id']) except plugin_exceptions.PluginNotFound: raise MissingResource(repo_id), None, sys.exc_info()[2] # Assemble the data needed for the import conduit = UploadConduit(repo_id, repo_importer['id']) call_config = PluginCallConfiguration(plugin_config, repo_importer['config'], override_config) transfer_repo = repo_obj.to_transfer_repo() file_path = ContentUploadManager._upload_file_path(upload_id) # Invoke the importer try: result = importer_instance.upload_unit(transfer_repo, unit_type_id, unit_key, unit_metadata, file_path, conduit, call_config) if not result['success_flag']: raise PulpCodedException( error_code=error_codes.PLP0047, repo_id=transfer_repo.id, importer_id=repo_importer['importer_type_id'], unit_type=unit_type_id, summary=result['summary'], details=result['details']) repo_controller.rebuild_content_unit_counts(repo_obj) return result except PulpException: msg = _( 'Error from the importer while importing uploaded unit to repository [%(r)s]' ) msg = msg % {'r': repo_id} logger.exception(msg) raise except Exception, e: msg = _( 'Error from the importer while importing uploaded unit to repository [%(r)s]' ) msg = msg % {'r': repo_id} logger.exception(msg) raise PulpExecutionException(e), None, sys.exc_info()[2]
def sync(self, repo_id, sync_config_override=None): """ Performs a synchronize operation on the given repository. The given repo must have an importer configured. The identity of the importer is not a parameter to this call; if multiple importers are eventually supported this will have to change to indicate which importer to use. This method is intentionally limited to synchronizing a single repo. Performing multiple repository syncs concurrently will require a more global view of the server and must be handled outside the scope of this class. @param repo_id: identifies the repo to sync @type repo_id: str @param sync_config_override: optional config containing values to use for this sync only @type sync_config_override: dict @return: The synchronization report. @rtype: L{pulp.server.plugins.model.SyncReport} @raise MissingResource: if repo_id does not refer to a valid repo @raise OperationFailed: if the given repo does not have an importer set """ repo_coll = Repo.get_collection() # Validation repo = repo_coll.find_one({'id' : repo_id}) if repo is None: raise MissingResource(repo_id) importer_instance, importer_config = self._get_importer_instance_and_config(repo_id) if importer_instance is None: raise MissingResource(repo_id) dispatch_context = dispatch_factory.context() dispatch_context.set_cancel_control_hook(importer_instance.cancel_sync_repo) importer_manager = manager_factory.repo_importer_manager() repo_importer = importer_manager.get_importer(repo_id) # Assemble the data needed for the sync conduit = RepoSyncConduit(repo_id, repo_importer['id'], RepoContentUnit.OWNER_TYPE_IMPORTER, repo_importer['id']) call_config = PluginCallConfiguration(importer_config, repo_importer['config'], sync_config_override) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.importer_working_dir(repo_importer['importer_type_id'], repo_id, mkdir=True) # Fire an events around the call fire_manager = manager_factory.event_fire_manager() fire_manager.fire_repo_sync_started(repo_id) sync_result = self._do_sync(repo, importer_instance, transfer_repo, conduit, call_config) fire_manager.fire_repo_sync_finished(sync_result) dispatch_context.clear_cancel_control_hook() if sync_result['result'] == RepoSyncResult.RESULT_FAILED: raise PulpExecutionException(_('Importer indicated a failed response')) return sync_result
class AgentManager(object): """ The agent manager. """ @staticmethod def unregister(consumer_id): """ Notification that a consumer (agent) has been unregistered. This ensure that all registration artifacts have been cleaned up. Then, we fire off a task to lazily delete the agent queue. :param consumer_id: The consumer ID. :type consumer_id: str """ manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) context = Context(consumer) agent = PulpAgent() agent.consumer.unregister(context) url = context.url name = context.address.split('/')[-1] task_tags = [ tags.resource_tag(tags.ACTION_AGENT_QUEUE_DELETE, consumer_id) ] delete_queue.apply_async(args=[url, name, consumer_id], countdown=QUEUE_DELETE_DELAY, tags=task_tags) @staticmethod def bind(consumer_id, repo_id, distributor_id, options): """ Request the agent to perform the specified bind. This method will be called after the server-side representation of the binding has been created. :param consumer_id: The consumer ID. :type consumer_id: str :param repo_id: A repository ID. :type repo_id: str :param distributor_id: A distributor ID. :type distributor_id: str :param options: The options are handler specific. :type options: dict :return: The task created by the bind :rtype: dict """ # track agent operations using a pseudo task task_id = str(uuid4()) task_tags = [ tags.resource_tag(tags.RESOURCE_CONSUMER_TYPE, consumer_id), tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id), tags.resource_tag(tags.RESOURCE_REPOSITORY_DISTRIBUTOR_TYPE, distributor_id), tags.action_tag(tags.ACTION_AGENT_BIND) ] task = TaskStatus(task_id, 'agent', tags=task_tags).save() # agent request consumer_manager = managers.consumer_manager() binding_manager = managers.consumer_bind_manager() consumer = consumer_manager.get_consumer(consumer_id) binding = binding_manager.get_bind(consumer_id, repo_id, distributor_id) agent_bindings = AgentManager._bindings([binding]) context = Context(consumer, task_id=task_id, action='bind', consumer_id=consumer_id, repo_id=repo_id, distributor_id=distributor_id) agent = PulpAgent() agent.consumer.bind(context, agent_bindings, options) # bind action tracking consumer_manager = managers.consumer_bind_manager() consumer_manager.action_pending(consumer_id, repo_id, distributor_id, Bind.Action.BIND, task_id) return task @staticmethod def unbind(consumer_id, repo_id, distributor_id, options): """ Request the agent to perform the specified unbind. :param consumer_id: The consumer ID. :type consumer_id: str :param repo_id: A repository ID. :type repo_id: str :param distributor_id: A distributor ID. :type distributor_id: str :param options: The options are handler specific. :type options: dict :return: A task ID that may be used to track the agent request. :rtype: str """ # track agent operations using a pseudo task task_id = str(uuid4()) task_tags = [ tags.resource_tag(tags.RESOURCE_CONSUMER_TYPE, consumer_id), tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id), tags.resource_tag(tags.RESOURCE_REPOSITORY_DISTRIBUTOR_TYPE, distributor_id), tags.action_tag(tags.ACTION_AGENT_UNBIND) ] task = TaskStatus(task_id, 'agent', tags=task_tags).save() # agent request manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) binding = dict(repo_id=repo_id, distributor_id=distributor_id) bindings = AgentManager._unbindings([binding]) context = Context(consumer, task_id=task_id, action='unbind', consumer_id=consumer_id, repo_id=repo_id, distributor_id=distributor_id) agent = PulpAgent() agent.consumer.unbind(context, bindings, options) # unbind action tracking manager = managers.consumer_bind_manager() manager.action_pending(consumer_id, repo_id, distributor_id, Bind.Action.UNBIND, task_id) return task @staticmethod def install_content(consumer_id, units, options): """ Install content units on a consumer. :param consumer_id: The consumer ID. :type consumer_id: str :param units: A list of content units to be installed. :type units: list of: { type_id:<str>, unit_key:<dict> } :param options: Install options; based on unit type. :type options: dict :return: A task used to track the agent request. :rtype: dict """ # track agent operations using a pseudo task task_id = str(uuid4()) task_tags = [ tags.resource_tag(tags.RESOURCE_CONSUMER_TYPE, consumer_id), tags.action_tag(tags.ACTION_AGENT_UNIT_INSTALL) ] task = TaskStatus(task_id, 'agent', tags=task_tags).save() # agent request manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) conduit = ProfilerConduit() collated = Units(units) for typeid, units in collated.items(): pc = AgentManager._profiled_consumer(consumer_id) profiler, cfg = AgentManager._profiler(typeid) units = AgentManager._invoke_plugin(profiler.install_units, pc, units, options, cfg, conduit) collated[typeid] = units units = collated.join() context = Context(consumer, task_id=task_id, consumer_id=consumer_id) agent = PulpAgent() agent.content.install(context, units, options) history_manager = managers.consumer_history_manager() history_manager.record_event(consumer_id, 'content_unit_installed', {'units': units}) return task @staticmethod def update_content(consumer_id, units, options): """ Update content units on a consumer. :param consumer_id: The consumer ID. :type consumer_id: str :param units: A list of content units to be updated. :type units: list of: { type_id:<str>, unit_key:<dict> } :param options: Update options; based on unit type. :type options: dict :return: A task used to track the agent request. :rtype: dict """ # track agent operations using a pseudo task task_id = str(uuid4()) task_tags = [ tags.resource_tag(tags.RESOURCE_CONSUMER_TYPE, consumer_id), tags.action_tag(tags.ACTION_AGENT_UNIT_UPDATE) ] task = TaskStatus(task_id, 'agent', tags=task_tags).save() # agent request manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) conduit = ProfilerConduit() collated = Units(units) for typeid, units in collated.items(): pc = AgentManager._profiled_consumer(consumer_id) profiler, cfg = AgentManager._profiler(typeid) units = AgentManager._invoke_plugin(profiler.update_units, pc, units, options, cfg, conduit) collated[typeid] = units units = collated.join() context = Context(consumer, task_id=task_id, consumer_id=consumer_id) agent = PulpAgent() agent.content.update(context, units, options) return task @staticmethod def uninstall_content(consumer_id, units, options): """ Uninstall content units on a consumer. :param consumer_id: The consumer ID. :type consumer_id: str :param units: A list of content units to be uninstalled. :type units: list of: { type_id:<str>, type_id:<dict> } :param options: Uninstall options; based on unit type. :type options: dict :return: A task ID that may be used to track the agent request. :rtype: dict """ # track agent operations using a pseudo task task_id = str(uuid4()) task_tags = [ tags.resource_tag(tags.RESOURCE_CONSUMER_TYPE, consumer_id), tags.action_tag(tags.ACTION_AGENT_UNIT_UNINSTALL) ] task = TaskStatus(task_id, 'agent', tags=task_tags).save() # agent request manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) conduit = ProfilerConduit() collated = Units(units) for typeid, units in collated.items(): pc = AgentManager._profiled_consumer(consumer_id) profiler, cfg = AgentManager._profiler(typeid) units = AgentManager._invoke_plugin(profiler.uninstall_units, pc, units, options, cfg, conduit) collated[typeid] = units units = collated.join() context = Context(consumer, task_id=task_id, consumer_id=consumer_id) agent = PulpAgent() agent.content.uninstall(context, units, options) history_manager = managers.consumer_history_manager() history_manager.record_event(consumer_id, 'content_unit_uninstalled', {'units': units}) return task def cancel_request(self, consumer_id, task_id): """ Cancel an agent request associated with the specified task ID. :param consumer_id: The consumer ID. :type consumer_id: str :param: task_id: The task ID associated with the request. :type: str """ manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) context = Context(consumer) agent = PulpAgent() agent.cancel(context, task_id) @staticmethod def _invoke_plugin(call, *args, **kwargs): try: return call(*args, **kwargs) except InvalidUnitsRequested, e: trace = sys.exc_info()[2] raise PulpDataException(e.message), None, trace except Exception: raise PulpExecutionException(), None, sys.exc_info()[2]
class AgentManager(object): """ The agent manager. """ def unregistered(self, consumer_id): """ Notification that a consumer (agent) has been unregistered. This ensure that all registration artifacts have been cleaned up. :param consumer_id: The consumer ID. :type consumer_id: str """ manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) agent = PulpAgent(consumer) agent.consumer.unregistered() def bind(self, consumer_id, repo_id, distributor_id, options): """ Request the agent to perform the specified bind. This method will be called after the server-side representation of the binding has been created. :param consumer_id: The consumer ID. :type consumer_id: str :param repo_id: A repository ID. :type repo_id: str :param distributor_id: A distributor ID. :type distributor_id: str :param options: The options are handler specific. :type options: dict """ # agent request consumer_manager = managers.consumer_manager() binding_manager = managers.consumer_bind_manager() consumer = consumer_manager.get_consumer(consumer_id) binding = binding_manager.get_bind(consumer_id, repo_id, distributor_id) agent_bindings = self.__bindings([binding]) agent = PulpAgent(consumer) agent.consumer.bind(agent_bindings, options) # request tracking action_id = factory.context().call_request_id consumer_manager = managers.consumer_bind_manager() consumer_manager.action_pending(consumer_id, repo_id, distributor_id, Bind.Action.BIND, action_id) def unbind(self, consumer_id, repo_id, distributor_id, options): """ Request the agent to perform the specified unbind. :param consumer_id: The consumer ID. :type consumer_id: str :param repo_id: A repository ID. :type repo_id: str :param distributor_id: A distributor ID. :type distributor_id: str :param options: The options are handler specific. :type options: dict """ # agent request manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) binding = dict(repo_id=repo_id, distributor_id=distributor_id) bindings = self.__unbindings([binding]) agent = PulpAgent(consumer) agent.consumer.unbind(bindings, options) # request tracking action_id = factory.context().call_request_id manager = managers.consumer_bind_manager() manager.action_pending(consumer_id, repo_id, distributor_id, Bind.Action.UNBIND, action_id) def install_content(self, consumer_id, units, options): """ Install content units on a consumer. :param consumer_id: The consumer ID. :type consumer_id: str :param units: A list of content units to be installed. :type units: list of: { type_id:<str>, unit_key:<dict> } :param options: Install options; based on unit type. :type options: dict """ manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) conduit = ProfilerConduit() collated = Units(units) for typeid, units in collated.items(): pc = self.__profiled_consumer(consumer_id) profiler, cfg = self.__profiler(typeid) units = self.__invoke_plugin(profiler.install_units, pc, units, options, cfg, conduit) collated[typeid] = units units = collated.join() agent = PulpAgent(consumer) agent.content.install(units, options) def update_content(self, consumer_id, units, options): """ Update content units on a consumer. :param consumer_id: The consumer ID. :type consumer_id: str :param units: A list of content units to be updated. :type units: list of: { type_id:<str>, unit_key:<dict> } :param options: Update options; based on unit type. :type options: dict """ manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) conduit = ProfilerConduit() collated = Units(units) for typeid, units in collated.items(): pc = self.__profiled_consumer(consumer_id) profiler, cfg = self.__profiler(typeid) units = self.__invoke_plugin(profiler.update_units, pc, units, options, cfg, conduit) collated[typeid] = units units = collated.join() agent = PulpAgent(consumer) agent.content.update(units, options) def uninstall_content(self, consumer_id, units, options): """ Uninstall content units on a consumer. :param consumer_id: The consumer ID. :type consumer_id: str :param units: A list of content units to be uninstalled. :type units: list of: { type_id:<str>, type_id:<dict> } :param options: Uninstall options; based on unit type. :type options: dict """ manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) conduit = ProfilerConduit() collated = Units(units) for typeid, units in collated.items(): pc = self.__profiled_consumer(consumer_id) profiler, cfg = self.__profiler(typeid) units = self.__invoke_plugin(profiler.uninstall_units, pc, units, options, cfg, conduit) collated[typeid] = units units = collated.join() agent = PulpAgent(consumer) agent.content.uninstall(units, options) def send_profile(self, consumer_id): """ Send the content profile(s). :param consumer_id: The consumer ID. :type consumer_id: str """ _LOG.info(consumer_id) def cancel_request(self, consumer_id, task_id): """ Cancel an agent request associated with the specified task ID. :param consumer_id: The consumer ID. :type consumer_id: str :param: task_id: The task ID associated with the request. :type: str """ manager = managers.consumer_manager() consumer = manager.get_consumer(consumer_id) agent = PulpAgent(consumer) agent.cancel(task_id) def __invoke_plugin(self, call, *args, **kwargs): try: return call(*args, **kwargs) except InvalidUnitsRequested, e: raise PulpDataException(e.units, e.message) except Exception: raise PulpExecutionException(), None, sys.exc_info()[2]
class RepoGroupDistributorManager(object): @staticmethod def get_distributor(repo_group_id, distributor_id): """ Returns an individual distributor on the given repo group, raising an exception if one does not exist at the given ID. @param repo_group_id: identifies the repo group @type repo_group_id: str @param distributor_id: identifies the distributor @type distributor_id: str @return: SON representation of the distributor @rtype: dict @raise MissingResource: if either there is no distributor for the given group ID/distributor ID pair or the group itself does not exist """ # Check the group's existence for the exception contract first manager_factory.repo_group_query_manager().get_group(repo_group_id) # Check for the distributor if we know the group exists spec = { 'repo_group_id': repo_group_id, 'id': distributor_id, } distributor = RepoGroupDistributor.get_collection().find_one(spec) if distributor is None: raise MissingResource(repo_group=repo_group_id, distributor=distributor_id) return distributor @staticmethod def find_distributors(repo_group_id): """ Returns all distributors on the given repo group, returning an empty list if none exist. @param repo_group_id: identifies the repo group @type repo_group_id: str @return: list of SON representations of the group's distributors @rtype: list @raise MissingResource: if the group does not exist """ group = RepoGroup.get_collection().find_one({'id': repo_group_id}) if group is None: raise MissingResource(repo_group=repo_group_id) spec = {'repo_group_id': repo_group_id} distributors = list(RepoGroupDistributor.get_collection().find(spec)) return distributors @staticmethod def add_distributor(repo_group_id, distributor_type_id, group_plugin_config, distributor_id=None): """ Adds an association from the given repository group to a distributor. The assocation will be tracked through the distributor_id; each distributor on a given group must have a unique ID. If this is not specified, one will be generated. If a distributor already exists on the group with a given ID, the existing one will be removed and replaced with the newly configured one. @param repo_group_id: identifies the repo group @type repo_group_id: str @param distributor_type_id: type of distributor being added; must reference one of the installed group distributors @type distributor_type_id: str @param group_plugin_config: config to use for the distributor for this group alone @type group_plugin_config: dict @param distributor_id: if specified, the newly added distributor will be referenced by this value and the group id; if omitted one will be generated @type distributor_id: str @return: database representation of the added distributor @rtype: dict @raise MissingResource: if the group doesn't exist @raise InvalidValue: if a distributor ID is provided and is not valid @raise PulpDataException: if the plugin indicates the config is invalid @raise PulpExecutionException: if the plugin raises an exception while initializing the newly added distributor """ distributor_coll = RepoGroupDistributor.get_collection() query_manager = manager_factory.repo_group_query_manager() # Validation group = query_manager.get_group( repo_group_id) # will raise MissingResource if not plugin_api.is_valid_group_distributor(distributor_type_id): raise InvalidValue(['distributor_type_id']) # Determine the ID for the distributor on this repo if distributor_id is None: distributor_id = str(uuid.uuid4()) else: # Validate if one was passed in if not is_distributor_id_valid(distributor_id): raise InvalidValue(['distributor_id']) distributor_instance, plugin_config = plugin_api.get_group_distributor_by_id( distributor_type_id) # Convention is that a value of None means unset. Remove any keys that # are explicitly set to None so the plugin will default them. clean_config = None if group_plugin_config is not None: clean_config = dict([(k, v) for k, v in group_plugin_config.items() if v is not None]) # Let the plugin validate the configuration call_config = PluginCallConfiguration(plugin_config, clean_config) transfer_group = common_utils.to_transfer_repo_group(group) config_conduit = RepoConfigConduit(distributor_type_id) # Request the plugin validate the configuration try: is_valid, message = distributor_instance.validate_config( transfer_group, call_config, config_conduit) if not is_valid: raise PulpDataException(message) except Exception, e: msg = _( 'Exception received from distributor [%(d)s] while validating config' ) msg = msg % {'d': distributor_type_id} _logger.exception(msg) raise PulpDataException(e.args), None, sys.exc_info()[2] # Remove the old distributor if it exists try: RepoGroupDistributorManager.remove_distributor(repo_group_id, distributor_id, force=False) except MissingResource: pass # if it didn't exist, no problem # Invoke the appopriate plugin lifecycle method try: distributor_instance.distributor_added(transfer_group, call_config) except Exception, e: _logger.exception( 'Error initializing distributor [%s] for group [%s]' % (distributor_type_id, repo_group_id)) raise PulpExecutionException(), None, sys.exc_info()[2]
def import_uploaded_unit(self, repo_id, unit_type_id, unit_key, unit_metadata, upload_id): """ Called to trigger the importer's handling of an uploaded unit. This should not be called until the bits have finished uploading. The importer is then responsible for moving the file to the correct location, adding it to the Pulp server's inventory, and associating it with the repository. This call will first call is_valid_upload to check the integrity of the destination repository. See that method's documentation for exception possibilities. @param repo_id: identifies the repository into which the unit is uploaded @type repo_id: str @param unit_type_id: type of unit being uploaded @type unit_type_id: str @param unit_key: unique identifier for the unit (user-specified) @type unit_key: dict @param unit_metadata: any user-specified information about the unit @type unit_metadata: dict @param upload_id: upload being imported @type upload_id: str """ # If it doesn't raise an exception, it's good to go self.is_valid_upload(repo_id, unit_type_id) repo_query_manager = manager_factory.repo_query_manager() importer_manager = manager_factory.repo_importer_manager() repo = repo_query_manager.find_by_id(repo_id) repo_importer = importer_manager.get_importer(repo_id) try: importer_instance, plugin_config = plugin_api.get_importer_by_id( repo_importer['importer_type_id']) except plugin_exceptions.PluginNotFound: raise MissingResource(repo_id), None, sys.exc_info()[2] # Assemble the data needed for the import conduit = UploadConduit( repo_id, repo_importer['id'], RepoContentUnit.OWNER_TYPE_USER, manager_factory.principal_manager().get_principal()['login']) call_config = PluginCallConfiguration(plugin_config, repo_importer['config'], None) transfer_repo = repo_common_utils.to_transfer_repo(repo) transfer_repo.working_dir = repo_common_utils.importer_working_dir( repo_importer['importer_type_id'], repo_id, mkdir=True) file_path = self._upload_file_path(upload_id) # Invoke the importer try: importer_instance.upload_unit(transfer_repo, unit_type_id, unit_key, unit_metadata, file_path, conduit, call_config) except PulpException: _LOG.exception( 'Error from the importer while importing uploaded unit to repository [%s]' % repo_id) raise except Exception, e: _LOG.exception( 'Error from the importer while importing uploaded unit to repository [%s]' % repo_id) raise PulpExecutionException(e), None, sys.exc_info()[2]