def _do_publish(repo, distributor_id, distributor_instance, transfer_repo, conduit, call_config): distributor_coll = RepoDistributor.get_collection() publish_result_coll = RepoPublishResult.get_collection() repo_id = repo['id'] # Perform the publish publish_start_timestamp = _now_timestamp() try: # Add the register_sigterm_handler decorator to the publish_repo call, so that we can # respond to signals by calling the Distributor's cancel_publish_repo() method. publish_repo = register_sigterm_handler( distributor_instance.publish_repo, distributor_instance.cancel_publish_repo) publish_report = publish_repo(transfer_repo, conduit, call_config) except Exception, e: publish_end_timestamp = _now_timestamp() # Reload the distributor in case the scratchpad is set by the plugin repo_distributor = distributor_coll.find_one( {'repo_id' : repo_id, 'id' : distributor_id}) repo_distributor['last_publish'] = publish_end_timestamp distributor_coll.save(repo_distributor, safe=True) # Add a publish history entry for the run result = RepoPublishResult.error_result( repo_id, repo_distributor['id'], repo_distributor['distributor_type_id'], publish_start_timestamp, publish_end_timestamp, e, sys.exc_info()[2]) publish_result_coll.save(result, safe=True) logger.exception( _('Exception caught from plugin during publish for repo [%(r)s]' % {'r' : repo_id})) raise PulpExecutionException(), None, sys.exc_info()[2]
def sync(repo_id, sync_config_override=None): """ Performs a synchronize operation on the given repository and triggers publishs for distributors with autopublish enabled. The given repo must have an importer configured. This method is intentionally limited to synchronizing a single repo. Performing multiple repository syncs concurrently will require a more global view of the server and must be handled outside the scope of this class. :param repo_id: identifies the repo to sync :type repo_id: str :param sync_config_override: optional config containing values to use for this sync only :type sync_config_override: dict :return: TaskResult containing sync results and a list of spawned tasks :rtype: pulp.server.async.tasks.TaskResult :raise pulp_exceptions.MissingResource: if specified repo does not exist, or it does not have an importer and associated plugin :raise pulp_exceptions.PulpExecutionException: if the task fails. """ repo_obj = model.Repository.objects.get_repo_or_missing_resource(repo_id) transfer_repo = repo_obj.to_transfer_repo() importer_collection = RepoImporter.get_collection() repo_importer = importer_collection.find_one({'repo_id': repo_obj.repo_id}) if repo_importer is None: raise pulp_exceptions.MissingResource(repository=repo_id) try: importer, imp_config = plugin_api.get_importer_by_id(repo_importer['importer_type_id']) except plugin_exceptions.PluginNotFound: raise pulp_exceptions.MissingResource(repository=repo_id) call_config = PluginCallConfiguration(imp_config, repo_importer['config'], sync_config_override) transfer_repo.working_dir = common_utils.get_working_directory() conduit = RepoSyncConduit(repo_id, repo_importer['id']) sync_result_collection = RepoSyncResult.get_collection() # Fire an events around the call fire_manager = manager_factory.event_fire_manager() fire_manager.fire_repo_sync_started(repo_id) # Perform the sync sync_start_timestamp = _now_timestamp() sync_result = None try: # Replace the Importer's sync_repo() method with our register_sigterm_handler decorator, # which will set up cancel_sync_repo() as the target for the signal handler sync_repo = register_sigterm_handler(importer.sync_repo, importer.cancel_sync_repo) sync_report = sync_repo(transfer_repo, conduit, call_config) except Exception, e: sync_end_timestamp = _now_timestamp() sync_result = RepoSyncResult.error_result( repo_obj.repo_id, repo_importer['id'], repo_importer['importer_type_id'], sync_start_timestamp, sync_end_timestamp, e, sys.exc_info()[2]) raise
def _do_sync(repo, importer_instance, transfer_repo, conduit, call_config): """ Once all of the preparation for a sync has taken place, this call will perform the sync, making the necessary database updates. It returns the sync result instance (already saved to the database). This call does not have any behavior based on the success/failure of the sync; it is up to the caller to raise an exception in the event of a failed sync if that behavior is desired. """ importer_coll = RepoImporter.get_collection() sync_result_coll = RepoSyncResult.get_collection() repo_id = repo['id'] repo_importer = importer_coll.find_one({'repo_id': repo_id}) # Perform the sync sync_start_timestamp = _now_timestamp() sync_end_timestamp = None result = None try: # Replace the Importer's sync_repo() method with our register_sigterm_handler decorator, # which will set up cancel_sync_repo() as the target for the signal handler sync_repo = register_sigterm_handler(importer_instance.sync_repo, importer_instance.cancel_sync_repo) sync_report = sync_repo(transfer_repo, conduit, call_config) except Exception, e: sync_end_timestamp = _now_timestamp() result = RepoSyncResult.error_result( repo_id, repo_importer['id'], repo_importer['importer_type_id'], sync_start_timestamp, sync_end_timestamp, e, sys.exc_info()[2]) raise
def sync(repo_id, sync_config_override=None, scheduled_call_id=None): """ Performs a synchronize operation on the given repository and triggers publishes for distributors with auto-publish enabled. The given repo must have an importer configured. This method is intentionally limited to synchronizing a single repo. Performing multiple repository syncs concurrently will require a more global view of the server and must be handled outside the scope of this class. :param repo_id: identifies the repo to sync :type repo_id: str :param sync_config_override: optional config containing values to use for this sync only :type sync_config_override: dict :param scheduled_call_id: id of scheduled call that dispatched this task :type scheduled_call_id: str :return: TaskResult containing sync results and a list of spawned tasks :rtype: pulp.server.async.tasks.TaskResult :raise pulp_exceptions.MissingResource: if specified repo does not exist, or it does not have an importer and associated plugin :raise pulp_exceptions.PulpExecutionException: if the task fails. """ repo_obj = model.Repository.objects.get_repo_or_missing_resource(repo_id) transfer_repo = repo_obj.to_transfer_repo() repo_importer = model.Importer.objects.get_or_404(repo_id=repo_id) try: importer, imp_config = plugin_api.get_importer_by_id(repo_importer.importer_type_id) except plugin_exceptions.PluginNotFound: raise pulp_exceptions.MissingResource(repository=repo_id) call_config = PluginCallConfiguration(imp_config, repo_importer.config, sync_config_override) transfer_repo.working_dir = common_utils.get_working_directory() conduit = RepoSyncConduit(repo_id, repo_importer.importer_type_id, repo_importer.id) sync_result_collection = RepoSyncResult.get_collection() # Fire an events around the call fire_manager = manager_factory.event_fire_manager() fire_manager.fire_repo_sync_started(repo_id) # Perform the sync sync_start_timestamp = _now_timestamp() sync_result = None try: # Replace the Importer's sync_repo() method with our register_sigterm_handler decorator, # which will set up cancel_sync_repo() as the target for the signal handler sync_repo = register_sigterm_handler(importer.sync_repo, importer.cancel_sync_repo) sync_report = sync_repo(transfer_repo, conduit, call_config) except Exception, e: sync_end_timestamp = _now_timestamp() sync_result = RepoSyncResult.error_result( repo_obj.repo_id, repo_importer['id'], repo_importer['importer_type_id'], sync_start_timestamp, sync_end_timestamp, e, sys.exc_info()[2]) raise
def _do_publish(repo_obj, dist_id, dist_inst, transfer_repo, conduit, call_config): """ Publish the repository using the given distributor. :param repo_obj: repository object :type repo_obj: pulp.server.db.model.Repository :param dist_id: identifies the distributor :type dist_id: str :param dist_inst: instance of the distributor :type dist_inst: dict :param transfer_repo: dict representation of a repo for the plugins to use :type transfer_repo: pulp.plugins.model.Repository :param conduit: allows the plugin to interact with core pulp :type conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit :param call_config: allows the plugin to retrieve values :type call_config: pulp.plugins.config.PluginCallConfiguration :return: publish result containing information about the publish :rtype: pulp.server.db.model.repository.RepoPublishResult :raises pulp_exceptions.PulpCodedException: if the publish report's success flag is falsey """ distributor_coll = RepoDistributor.get_collection() publish_result_coll = RepoPublishResult.get_collection() publish_start_timestamp = _now_timestamp() try: # Add the register_sigterm_handler decorator to the publish_repo call, so that we can # respond to signals by calling the Distributor's cancel_publish_repo() method. publish_repo = register_sigterm_handler(dist_inst.publish_repo, dist_inst.cancel_publish_repo) publish_report = publish_repo(transfer_repo, conduit, call_config) if publish_report is not None and hasattr(publish_report, 'success_flag') \ and not publish_report.success_flag: raise pulp_exceptions.PulpCodedException( error_code=error_codes.PLP0034, repository_id=repo_obj.repo_id, distributor_id=dist_id ) except Exception, e: publish_end_timestamp = _now_timestamp() # Reload the distributor in case the scratchpad is set by the plugin repo_distributor = distributor_coll.find_one( {'repo_id': repo_obj.repo_id, 'id': dist_id}) distributor_coll.save(repo_distributor, safe=True) # Add a publish history entry for the run result = RepoPublishResult.error_result( repo_obj.repo_id, repo_distributor['id'], repo_distributor['distributor_type_id'], publish_start_timestamp, publish_end_timestamp, e, sys.exc_info()[2]) publish_result_coll.save(result, safe=True) _logger.exception( _('Exception caught from plugin during publish for repo [%(r)s]' % {'r': repo_obj.repo_id})) raise
def _do_publish(repo_obj, dist_id, dist_inst, transfer_repo, conduit, call_config): """ Publish the repository using the given distributor. :param repo_obj: repository object :type repo_obj: pulp.server.db.model.Repository :param dist_id: identifies the distributor :type dist_id: str :param dist_inst: instance of the distributor :type dist_inst: dict :param transfer_repo: dict representation of a repo for the plugins to use :type transfer_repo: pulp.plugins.model.Repository :param conduit: allows the plugin to interact with core pulp :type conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit :param call_config: allows the plugin to retrieve values :type call_config: pulp.plugins.config.PluginCallConfiguration :return: publish result containing information about the publish :rtype: pulp.server.db.model.repository.RepoPublishResult :raises pulp_exceptions.PulpCodedException: if the publish report's success flag is falsey """ publish_result_coll = RepoPublishResult.get_collection() publish_start_timestamp = _now_timestamp() try: # Add the register_sigterm_handler decorator to the publish_repo call, so that we can # respond to signals by calling the Distributor's cancel_publish_repo() method. publish_repo = register_sigterm_handler(dist_inst.publish_repo, dist_inst.cancel_publish_repo) publish_report = publish_repo(transfer_repo, conduit, call_config) if publish_report is not None and hasattr(publish_report, 'success_flag') \ and not publish_report.success_flag: _logger.info(publish_report.summary) raise pulp_exceptions.PulpCodedException( error_code=error_codes.PLP0034, repository_id=repo_obj.repo_id, distributor_id=dist_id, summary=publish_report.summary ) except Exception, e: exception_timestamp = _now_timestamp() # Reload the distributor in case the scratchpad is set by the plugin dist = model.Distributor.objects.get_or_404(repo_id=repo_obj.repo_id, distributor_id=dist_id) # Add a publish history entry for the run result = RepoPublishResult.error_result( repo_obj.repo_id, dist.distributor_id, dist.distributor_type_id, publish_start_timestamp, exception_timestamp, e, sys.exc_info()[2]) publish_result_coll.save(result, safe=True) _logger.exception( _('Exception caught from plugin during publish for repo [%(r)s]' % {'r': repo_obj.repo_id})) raise
def _do_publish(repo, distributor_id, distributor_instance, transfer_repo, conduit, call_config): distributor_coll = RepoDistributor.get_collection() publish_result_coll = RepoPublishResult.get_collection() repo_id = repo['id'] # Perform the publish publish_start_timestamp = _now_timestamp() try: # Add the register_sigterm_handler decorator to the publish_repo call, so that we can # respond to signals by calling the Distributor's cancel_publish_repo() method. publish_repo = register_sigterm_handler( distributor_instance.publish_repo, distributor_instance.cancel_publish_repo) publish_report = publish_repo(transfer_repo, conduit, call_config) if publish_report is not None and hasattr(publish_report, 'success_flag') \ and not publish_report.success_flag: raise PulpCodedException(error_code=error_codes.PLP0034, repository_id=repo_id, distributor_id=distributor_id) except Exception, e: publish_end_timestamp = _now_timestamp() # Reload the distributor in case the scratchpad is set by the plugin repo_distributor = distributor_coll.find_one({ 'repo_id': repo_id, 'id': distributor_id }) repo_distributor['last_publish'] = publish_end_timestamp distributor_coll.save(repo_distributor, safe=True) # Add a publish history entry for the run result = RepoPublishResult.error_result( repo_id, repo_distributor['id'], repo_distributor['distributor_type_id'], publish_start_timestamp, publish_end_timestamp, e, sys.exc_info()[2]) publish_result_coll.save(result, safe=True) _logger.exception( _('Exception caught from plugin during publish for repo [%(r)s]' % {'r': repo_id})) raise
def _do_sync(repo, importer_instance, transfer_repo, conduit, call_config): """ Once all of the preparation for a sync has taken place, this call will perform the sync, making the necessary database updates. It returns the sync result instance (already saved to the database). This call does not have any behavior based on the success/failure of the sync; it is up to the caller to raise an exception in the event of a failed sync if that behavior is desired. """ importer_coll = RepoImporter.get_collection() sync_result_coll = RepoSyncResult.get_collection() repo_id = repo['id'] repo_importer = importer_coll.find_one({'repo_id': repo_id}) # Perform the sync sync_start_timestamp = _now_timestamp() sync_end_timestamp = None result = None try: # Replace the Importer's sync_repo() method with our register_sigterm_handler decorator, # which will set up cancel_sync_repo() as the target for the signal handler sync_repo = register_sigterm_handler( importer_instance.sync_repo, importer_instance.cancel_sync_repo) sync_report = sync_repo(transfer_repo, conduit, call_config) except Exception, e: sync_end_timestamp = _now_timestamp() result = RepoSyncResult.error_result( repo_id, repo_importer['id'], repo_importer['importer_type_id'], sync_start_timestamp, sync_end_timestamp, e, sys.exc_info()[2]) _logger.exception( _('Exception caught from plugin during sync for repo [%(r)s]' % {'r': repo_id})) raise