def clean(self): Bind.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() RepoImporter.get_collection().remove() RepoContentUnit.get_collection().remove() unit_db.clean()
def clean(self): super(RepoPublishConduitTests, self).clean() mock_plugins.reset() Repo.get_collection().remove() RepoDistributor.get_collection().remove()
def clean(self): super(RepoDistributorManagerTests, self).clean() mock_plugins.MOCK_DISTRIBUTOR.reset_mock() Repo.get_collection().remove() RepoDistributor.get_collection().remove()
def test_import(self): # Setup self.populate() pulp_conf.set('server', 'storage_dir', self.parentfs) dist = NodesHttpDistributor() repo = Repository(self.REPO_ID) cfg = { 'protocol':'file', 'http':{'alias':self.alias}, 'https':{'alias':self.alias}, 'file':{'alias':self.alias}, } conduit = RepoPublishConduit(self.REPO_ID, constants.HTTP_DISTRIBUTOR) dist.publish_repo(repo, conduit, cfg) Repo.get_collection().remove() RepoDistributor.get_collection().remove() RepoContentUnit.get_collection().remove() unit_db.clean() # Test importer = NodesHttpImporter() publisher = dist.publisher(repo, cfg) manifest_url = 'file://' + publisher.manifest_path() cfg = dict(manifest_url=manifest_url, strategy=constants.MIRROR_STRATEGY) conduit = RepoSyncConduit( self.REPO_ID, constants.HTTP_IMPORTER, RepoContentUnit.OWNER_TYPE_IMPORTER, constants.HTTP_IMPORTER) importer.sync_repo(repo, conduit, cfg) # Verify units = conduit.get_units() self.assertEquals(len(units), self.NUM_UNITS)
def clean(self): super(RepoManagerTests, self).clean() Repo.get_collection().remove() RepoImporter.get_collection().remove() RepoDistributor.get_collection().remove() dispatch.TaskStatus.objects().delete()
def tearDown(self): PulpItineraryTests.tearDown(self) Consumer.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() mock_plugins.reset()
def tearDown(self): super(ConsumerTest, self).tearDown() Consumer.get_collection().remove(safe=True) Repo.get_collection().remove(safe=True) RepoDistributor.get_collection().remove(safe=True) Bind.get_collection().remove(safe=True) mock_plugins.reset()
def tearDown(self): base.PulpWebserviceTests.tearDown(self) Consumer.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() mock_plugins.reset()
def tearDown(self): super(BindManagerTests, self).tearDown() Consumer.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() mock_plugins.reset()
def setUp(self): super(Migration0004Tests, self).setUp() # Special way to import modules that start with a number self.migration = _import_all_the_way( 'pulp_rpm.plugins.migrations.0004_pkg_group_category_repoid') factory.initialize() types_db.update_database([TYPE_DEF_GROUP, TYPE_DEF_CATEGORY]) # Create the repositories necessary for the tests self.source_repo_id = 'source-repo' # where units were copied from with the bad code self.dest_repo_id = 'dest-repo' # where bad units were copied to source_repo = Repo(self.source_repo_id, '') Repo.get_collection().insert(source_repo, safe=True) dest_repo = Repo(self.dest_repo_id, '') Repo.get_collection().insert(dest_repo, safe=True) source_importer = RepoImporter(self.source_repo_id, 'yum_importer', 'yum_importer', {}) RepoImporter.get_collection().insert(source_importer, safe=True) dest_importer = RepoImporter(self.dest_repo_id, 'yum_importer', 'yum_importer', {}) RepoImporter.get_collection().insert(dest_importer, safe=True)
def clean(self): super(RepoSyncConduitTests, self).clean() types_database.clean() mock_plugins.reset() RepoContentUnit.get_collection().remove() Repo.get_collection().remove()
def clean(self): super(RepoSyncManagerTests, self).clean() Repo.get_collection().remove() RepoImporter.get_collection().remove() RepoSyncResult.get_collection().remove() # Reset the state of the mock's tracker variables MockRepoPublishManager.reset()
def tearDown(self): PulpRPMTests.tearDown(self) Consumer.get_collection().remove() Repo.get_collection().remove() RepoContentUnit.get_collection().remove() RepoDistributor.get_collection().remove() database.clean() plugins.finalize()
def tearDown(self): super(self.__class__, self).tearDown() Consumer.get_collection().remove() ConsumerGroup.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() mock_plugins.reset()
def setUp(self): super(QueryTests, self).setUp() Repo.get_collection().remove() RepoContentUnit.get_collection().remove() unit_db.clean() self.define_plugins() plugin_api._create_manager() plugin_api._MANAGER.importers.add_plugin(constants.HTTP_IMPORTER, NodesHttpImporter, {})
def setUp(self): base.PulpWebserviceTests.setUp(self) Consumer.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() plugin_api._create_manager() mock_plugins.install()
def setUp(self): super(BindManagerTests, self).setUp() Consumer.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() plugin_api._create_manager() mock_plugins.install()
def tearDown(self): super(TestDoSync, self).tearDown() mock_plugins.reset() manager_factory.reset() Repo.get_collection().remove() RepoImporter.get_collection().remove() RepoSyncResult.get_collection().remove() MockRepoPublishManager.reset()
def setUp(self): PulpItineraryTests.setUp(self) Consumer.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() plugin_api._create_manager() mock_plugins.install() mock_agent.install()
def setUp(self): super(self.__class__, self).setUp() Consumer.get_collection().remove() ConsumerGroup.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() plugin_api._create_manager() mock_plugins.install()
def tearDown(self): super(Migration0004Tests, self).tearDown() # Delete any sample data added for the test types_db.clean() RepoContentUnit.get_collection().remove() RepoImporter.get_collection().remove() Repo.get_collection().remove()
def setUp(self): super(QueryTests, self).setUp() Repo.get_collection().remove() RepoContentUnit.get_collection().remove() unit_db.clean() for type_id in ALL_TYPES: unit_db.type_definition = Mock(return_value=dict(id=type_id, unit_key=UNIT_METADATA)) unit_db.type_units_unit_key = Mock(return_value=['A', 'B', 'C', 'N']) plugin_api._create_manager() plugin_api._MANAGER.importers.add_plugin(constants.HTTP_IMPORTER, NodesHttpImporter, {})
def tearDown(self): super(BaseProfilerConduitTests, self).tearDown() Consumer.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() RepoContentUnit.get_collection().remove() UnitProfile.get_collection().remove() typedb.clean() factory.reset()
def clean(self): super(DependencyManagerTests, self).clean() database.clean() Repo.get_collection().remove() RepoImporter.get_collection().remove() RepoContentUnit.get_collection().remove() mock_plugins.MOCK_IMPORTER.resolve_dependencies.return_value = None
def clean(self, units_only=False, plugins=False): RepoContentUnit.get_collection().remove() unit_db.clean() if units_only: return Bind.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() RepoImporter.get_collection().remove() if plugins: plugin_api._MANAGER.distributors.plugins = {}
def tearDown(self): WebTest.tearDown(self) shutil.rmtree(self.parentfs) shutil.rmtree(self.childfs) Consumer.get_collection().remove() Bind.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() RepoImporter.get_collection().remove() RepoContentUnit.get_collection().remove() unit_db.clean()
def setUp(self): super(BaseProfilerConduitTests, self).setUp() Consumer.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() Bind.get_collection().remove() RepoContentUnit.get_collection().remove() UnitProfile.get_collection().remove() plugin_api._create_manager() typedb.update_database([self.TYPE_1_DEF, self.TYPE_2_DEF]) mock_plugins.install()
def clean(self, just_units=False, purge_plugins=False): RepoContentUnit.get_collection().remove() unit_db.clean() if just_units: return Bind.get_collection().remove() Repo.get_collection().remove() RepoDistributor.get_collection().remove() RepoImporter.get_collection().remove() if purge_plugins: plugin_api._MANAGER.importers.plugins = {} plugin_api._MANAGER.distributors.plugins = {}
def test_migrate_duplicates_doesnt_delete_from_source_repo(self): """ This tests the correct behavior when we try to change the repo_id on an object, and end up causing a duplicate error due to our uniqueness constraint. It also makes sure the units are not deleted from the source repository if they are in the source repository. """ # Let's put two units here with the same IDs with two different repo_ids, and the run the # migration. source_repo_group_id = add_unit('group', self.source_repo_id, ids.TYPE_ID_PKG_GROUP) dest_repo_group_id = add_unit('group', self.dest_repo_id, ids.TYPE_ID_PKG_GROUP) # Associate the source_repo_group_id with both source and destination repos associate_unit(source_repo_group_id, self.source_repo_id, ids.TYPE_ID_PKG_GROUP) associate_unit(source_repo_group_id, self.dest_repo_id, ids.TYPE_ID_PKG_GROUP) associate_unit(dest_repo_group_id, self.dest_repo_id, ids.TYPE_ID_PKG_GROUP) # Migrate should not cause a DuplicateKeyError self.migration.migrate() # Verify that both groups remain, because the migration should not have removed either group_collection = types_db.type_units_collection(ids.TYPE_ID_PKG_GROUP) all_groups = list(group_collection.find()) self.assertEqual(len(all_groups), 2) self.assertEqual( group_collection.find({'id': 'group', 'repo_id': self.dest_repo_id}).count(), 1) self.assertEqual( group_collection.find({'id': 'group', 'repo_id': self.source_repo_id}).count(), 1) # Let's make sure that there are two associations, and that they are correct. query_manager = factory.repo_unit_association_query_manager() dest_units = query_manager.get_units(self.dest_repo_id) self.assertEqual(len(dest_units), 1) dest_unit = dest_units[0] self.assertEqual(dest_unit['unit_type_id'], ids.TYPE_ID_PKG_GROUP) self.assertEqual(dest_unit['unit_id'], dest_repo_group_id) source_units = query_manager.get_units(self.source_repo_id) self.assertEqual(len(source_units), 1) source_unit = source_units[0] self.assertEqual(source_unit['unit_type_id'], ids.TYPE_ID_PKG_GROUP) self.assertEqual(source_unit['unit_id'], source_repo_group_id) # Verify the repo counts self.assertEqual( Repo.get_collection().find({'id': 'source-repo'})[0]['content_unit_counts'], {'package_group': 1}) self.assertEqual(Repo.get_collection().find({'id': 'dest-repo'})[0]['content_unit_counts'], {'package_group': 1})
def test_sync_with_sync_config_override(self): """ Tests a sync when passing in an individual config of override options. """ # Setup importer_config = {'thor': 'thor'} self.repo_manager.create_repo('repo-1') self.importer_manager.set_importer('repo-1', 'mock-importer', importer_config) # Test sync_config_override = {'clint': 'hawkeye'} self.sync_manager.sync('repo-1', sync_config_override=sync_config_override) # Verify repo = Repo.get_collection().find_one({'id': 'repo-1'}) repo_importer = RepoImporter.get_collection().find_one({'repo_id': 'repo-1', 'id': 'mock-importer'}) # Database self.assertTrue(repo_importer['last_sync'] is not None) self.assertTrue(assert_last_sync_time(repo_importer['last_sync'])) # Call into the importer sync_args = mock_plugins.MOCK_IMPORTER.sync_repo.call_args[0] self.assertEqual(repo['id'], sync_args[0].id) self.assertTrue(sync_args[1] is not None) self.assertEqual({}, sync_args[2].plugin_config) self.assertEqual(importer_config, sync_args[2].repo_plugin_config) self.assertEqual(sync_config_override, sync_args[2].override_config)
def clean(self): super(DistributorScratchpadMixinTests, self).clean() types_database.clean() Repo.get_collection().remove()
def test_sync(self, mock_finished, mock_started, mock_auto_distributors, mock_queue_publish): """ Tests sync under normal conditions where everything is configured correctly. No importer config is specified. """ # Setup sync_config = {'bruce': 'hulk', 'tony': 'ironman'} self.repo_manager.create_repo('repo-1') self.importer_manager.set_importer('repo-1', 'mock-importer', sync_config) mock_auto_distributors.return_value = [{'id': 'my_distributor'}] mock_queue_publish.return_value.task_id = 'abc123' # Test report = self.sync_manager.sync('repo-1', sync_config_override=None) # Verify repo = Repo.get_collection().find_one({'id': 'repo-1'}) repo_importer = RepoImporter.get_collection().find_one({ 'repo_id': 'repo-1', 'id': 'mock-importer' }) # Database self.assertTrue(repo_importer['last_sync'] is not None) self.assertTrue(assert_last_sync_time(repo_importer['last_sync'])) # Call into the Importer sync_args = mock_plugins.MOCK_IMPORTER.sync_repo.call_args[0] self.assertEqual(repo['id'], sync_args[0].id) self.assertTrue(sync_args[1] is not None) self.assertEqual({}, sync_args[2].plugin_config) self.assertEqual(sync_config, sync_args[2].repo_plugin_config) self.assertEqual({}, sync_args[2].override_config) # History Entry history = list(RepoSyncResult.get_collection().find( {'repo_id': 'repo-1'})) self.assertEqual(1, len(history)) self.assertEqual('repo-1', history[0]['repo_id']) self.assertEqual(RepoSyncResult.RESULT_SUCCESS, history[0]['result']) self.assertEqual('mock-importer', history[0]['importer_id']) self.assertEqual('mock-importer', history[0]['importer_type_id']) self.assertTrue(history[0]['started'] is not None) self.assertTrue(history[0]['completed'] is not None) self.assertEqual(10, history[0]['added_count']) self.assertEqual(1, history[0]['removed_count']) self.assertTrue(history[0]['summary'] is not None) self.assertTrue(history[0]['details'] is not None) self.assertTrue(history[0]['error_message'] is None) self.assertTrue(history[0]['exception'] is None) self.assertTrue(history[0]['traceback'] is None) self.assertEqual(1, mock_started.call_count) self.assertEqual('repo-1', mock_started.call_args[0][0]) self.assertEqual(1, mock_finished.call_count) self.assertEqual('repo-1', mock_finished.call_args[0][0]['repo_id']) # auto publish tests mock_auto_distributors.assert_called_once_with('repo-1') mock_queue_publish.assert_called_once_with('repo-1', 'my_distributor') self.assertTrue(isinstance(report, TaskResult)) self.assertEqual(report.spawned_tasks, [{'task_id': 'abc123'}])
def clean(self): base.PulpServerTests.clean(self) Repo.get_collection().remove() RepoImporter.get_collection().remove()
def clean(self): super(RepoUnitAssociationManagerTests, self).clean() database.clean() RepoContentUnit.get_collection().remove() RepoImporter.get_collection().remove() Repo.get_collection().remove()
def tearDown(self): super(RepoGroupTests, self).tearDown() self.manager = None Repo.get_collection().remove(safe=True) RepoGroup.get_collection().remove(safe=True) RepoGroupDistributor.get_collection().remove(safe=True)
def update_importer_config(repo_id, importer_config): """ Attempts to update the saved configuration for the given repo's importer. The importer will be asked if the new configuration is valid. If not, this method will raise an error and the existing configuration will remain unchanged. :param repo_id: identifies the repo :type repo_id: str :param importer_config: new configuration values to use for this repo :type importer_config: dict :raise MissingResource: if the given repo does not exist :raise MissingResource: if the given repo does not have an importer :raise InvalidConfiguration: if the plugin indicates the given configuration is invalid """ repo_coll = Repo.get_collection() importer_coll = RepoImporter.get_collection() # Input Validation repo = repo_coll.find_one({'id' : repo_id}) if repo is None: raise MissingResource(repo_id) repo_importer = importer_coll.find_one({'repo_id' : repo_id}) if repo_importer is None: raise MissingResource(repo_id) importer_type_id = repo_importer['importer_type_id'] importer_instance, plugin_config = plugin_api.get_importer_by_id(importer_type_id) # The supplied config is a delta of changes to make to the existing config. # The plugin expects a full configuration, so we apply those changes to # the original config and pass that to the plugin's validate method. merged_config = dict(repo_importer['config']) # The convention is that None in an update is removing the value and # setting it to the default. Find all such properties in this delta and # remove them from the existing config if they are there. unset_property_names = [k for k in importer_config if importer_config[k] is None] for key in unset_property_names: merged_config.pop(key, None) importer_config.pop(key, None) # Whatever is left over are the changed/added values, so merge them in. merged_config.update(importer_config) # Let the importer plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, merged_config) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.importer_working_dir(importer_type_id, repo_id) try: result = importer_instance.validate_config(transfer_repo, call_config) # For backward compatibility with plugins that don't yet return the tuple if isinstance(result, bool): valid_config = result message = None else: valid_config, message = result except Exception, e: msg = _('Exception received from importer [%(i)s] while validating config for repo ' '[%(r)s]') msg = msg % {'i': importer_type_id, 'r': repo_id} logger.exception(msg) raise PulpDataException(e.args), None, sys.exc_info()[2]
def update_distributor_config(repo_id, distributor_id, distributor_config, auto_publish=None): """ Attempts to update the saved configuration for the given distributor. The distributor will be asked if the new configuration is valid. If not, this method will raise an error and the existing configuration will remain unchanged. :param repo_id: identifies the repo :type repo_id: str :param distributor_id: identifies the distributor on the repo :type distributor_id: str :param distributor_config: new configuration values to use :type distributor_config: dict :param auto_publish: If true, this distributor is used automatically during a sync operation :type auto_publish: bool :return: the updated distributor :rtype: dict :raise MissingResource: if the given repo or distributor doesn't exist :raise PulpDataException: if the plugin rejects the given changes """ repo_coll = Repo.get_collection() distributor_coll = RepoDistributor.get_collection() # Input Validation repo = repo_coll.find_one({'id': repo_id}) if repo is None: raise MissingResource(repository=repo_id) repo_distributor = distributor_coll.find_one({ 'repo_id': repo_id, 'id': distributor_id }) if repo_distributor is None: raise MissingResource(distributor=distributor_id) distributor_type_id = repo_distributor['distributor_type_id'] distributor_instance, plugin_config = plugin_api.get_distributor_by_id( distributor_type_id) # The supplied config is a delta of changes to make to the existing config. # The plugin expects a full configuration, so we apply those changes to # the original config and pass that to the plugin's validate method. merged_config = dict(repo_distributor['config']) # The convention is that None in an update is removing the value and # setting it to the default. Find all such properties in this delta and # remove them from the existing config if they are there. unset_property_names = [ k for k in distributor_config if distributor_config[k] is None ] for key in unset_property_names: merged_config.pop(key, None) distributor_config.pop(key, None) # Whatever is left over are the changed/added values, so merge them in. merged_config.update(distributor_config) # Let the distributor plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, merged_config) transfer_repo = common_utils.to_transfer_repo(repo) config_conduit = RepoConfigConduit(distributor_type_id) try: result = distributor_instance.validate_config( transfer_repo, call_config, config_conduit) # For backward compatibility with plugins that don't yet return the tuple if isinstance(result, bool): valid_config = result message = None else: valid_config, message = result except Exception, e: msg = _( 'Exception raised from distributor [%(d)s] while validating config for repo ' '[%(r)s]') msg = msg % {'d': distributor_type_id, 'r': repo_id} _logger.exception(msg) raise PulpDataException(e.args), None, sys.exc_info()[2]
def add_distributor(self, repo_id, distributor_type_id, repo_plugin_config, auto_publish, distributor_id=None): """ Adds an association from the given repository to a distributor. The association will be tracked through the distributor_id; each distributor on a given repository must have a unique ID. If this is not specified, one will be generated. If a distributor already exists on the repo for the given ID, the existing one will be removed and replaced with the newly configured one. @param repo_id: identifies the repo @type repo_id: str @param distributor_type_id: identifies the distributor; must correspond to a distributor loaded at server startup @type distributor_type_id: str @param repo_plugin_config: configuration the repo will use with this distributor; may be None @type repo_plugin_config: dict @param auto_publish: if true, this distributor will be invoked at the end of every sync @type auto_publish: bool @param distributor_id: unique ID to refer to this distributor for this repo @type distributor_id: str @return: ID assigned to the distributor (only valid in conjunction with the repo) @raise MissingResource: if the given repo_id does not refer to a valid repo @raise InvalidValue: if the distributor ID is provided and unacceptable @raise InvalidDistributorConfiguration: if the distributor plugin does not accept the given configuration """ repo_coll = Repo.get_collection() distributor_coll = RepoDistributor.get_collection() # Validation repo = repo_coll.find_one({'id': repo_id}) if repo is None: raise MissingResource(repository=repo_id) if not plugin_api.is_valid_distributor(distributor_type_id): raise InvalidValue(['distributor_type_id']) # Determine the ID for this distributor on this repo; will be # unique for all distributors on this repository but not globally if distributor_id is None: distributor_id = str(uuid.uuid4()) else: # Validate if one was passed in if not is_distributor_id_valid(distributor_id): raise InvalidValue(['distributor_id']) distributor_instance, plugin_config = plugin_api.get_distributor_by_id( distributor_type_id) # Convention is that a value of None means unset. Remove any keys that # are explicitly set to None so the plugin will default them. if repo_plugin_config is not None: clean_config = dict([(k, v) for k, v in repo_plugin_config.items() if v is not None]) else: clean_config = None # Let the distributor plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, clean_config) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.distributor_working_dir( distributor_type_id, repo_id) query_manager = manager_factory.repo_query_manager() related_repos = query_manager.find_with_distributor_type( distributor_type_id) transfer_related_repos = [] for r in related_repos: all_configs = [d['config'] for d in r['distributors']] trr = common_utils.to_related_repo(r, all_configs) transfer_related_repos.append(trr) try: result = distributor_instance.validate_config( transfer_repo, call_config, transfer_related_repos) # For backward compatibility with plugins that don't yet return the tuple if isinstance(result, bool): valid_config = result message = None else: valid_config, message = result except Exception, e: _LOG.exception( 'Exception received from distributor [%s] while validating config' % distributor_type_id) raise PulpDataException(e.args), None, sys.exc_info()[2]
def clean(self): super(RepoSyncConduitTests, self).clean() RepoContentUnit.get_collection().remove() Repo.get_collection().remove()
def update_distributor_config(self, repo_id, distributor_id, distributor_config): """ Attempts to update the saved configuration for the given distributor. The distributor will be asked if the new configuration is valid. If not, this method will raise an error and the existing configuration will remain unchanged. @param repo_id: identifies the repo @type repo_id: str @param distributor_id: identifies the distributor on the repo @type distributor_id: str @param distributor_config: new configuration values to use @type distributor_config: dict @return: the updated distributor @rtype: dict @raise MissingResource: if the given repo or distributor doesn't exist @raise PulpDataException: if the plugin rejects the given changes """ repo_coll = Repo.get_collection() distributor_coll = RepoDistributor.get_collection() # Input Validation repo = repo_coll.find_one({'id': repo_id}) if repo is None: raise MissingResource(repository=repo_id) repo_distributor = distributor_coll.find_one({ 'repo_id': repo_id, 'id': distributor_id }) if repo_distributor is None: raise MissingResource(distributor=distributor_id) distributor_type_id = repo_distributor['distributor_type_id'] distributor_instance, plugin_config = plugin_api.get_distributor_by_id( distributor_type_id) # The supplied config is a delta of changes to make to the existing config. # The plugin expects a full configuration, so we apply those changes to # the original config and pass that to the plugin's validate method. merged_config = dict(repo_distributor['config']) # The convention is that None in an update is removing the value and # setting it to the default. Find all such properties in this delta and # remove them from the existing config if they are there. unset_property_names = [ k for k in distributor_config if distributor_config[k] is None ] for key in unset_property_names: merged_config.pop(key, None) distributor_config.pop(key, None) # Whatever is left over are the changed/added values, so merge them in. merged_config.update(distributor_config) # Let the distributor plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, merged_config) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.distributor_working_dir( distributor_type_id, repo_id) query_manager = manager_factory.repo_query_manager() related_repos = query_manager.find_with_distributor_type( distributor_type_id) transfer_related_repos = [] for r in related_repos: # Don't include the repo being updated in this list if r['id'] == repo_id: continue all_configs = [d['config'] for d in r['distributors']] trr = common_utils.to_related_repo(r, all_configs) transfer_related_repos.append(trr) try: result = distributor_instance.validate_config( transfer_repo, call_config, transfer_related_repos) # For backward compatibility with plugins that don't yet return the tuple if isinstance(result, bool): valid_config = result message = None else: valid_config, message = result except Exception, e: _LOG.exception( 'Exception raised from distributor [%s] while validating config for repo [%s]' % (distributor_type_id, repo_id)) raise PulpDataException(e.args), None, sys.exc_info()[2]
Tests cleanup is successful if the distributor list is malformed. """ # Test distributors = 'bad data' # Verify try: self.manager.create_and_configure_repo( 'repo-1', distributor_list=distributors) self.fail() except exceptions.InvalidValue, e: self.assertEqual(e.property_names[0], 'distributor_list') # Verify the repo was deleted repo = Repo.get_collection().find_one({'id': 'repo-1'}) self.assertTrue(repo is None) def test_create_and_configure_bad_distributor_in_list(self): """ Tests cleanup is successful if the distributor list is malformed. """ # Test distributors = ['bad-data'] # Verify try: self.manager.create_and_configure_repo( 'repo-1', distributor_list=distributors) self.fail()
# Delete the repository working directory repo_working_dir = common_utils.repository_working_dir(repo_id, mkdir=False) if os.path.exists(repo_working_dir): try: shutil.rmtree(repo_working_dir) except Exception, e: logger.exception( 'Error while deleting repo working dir [%s] for repo [%s]' % (repo_working_dir, repo_id)) error_tuples.append(e) # Database Updates try: Repo.get_collection().remove({'id': repo_id}, safe=True) # Remove all importers and distributors from the repo # This is likely already done by the calls to other methods in # this manager, but in case those failed we still want to attempt # to keep the database clean RepoDistributor.get_collection().remove({'repo_id': repo_id}, safe=True) RepoImporter.get_collection().remove({'repo_id': repo_id}, safe=True) RepoSyncResult.get_collection().remove({'repo_id': repo_id}, safe=True) RepoPublishResult.get_collection().remove({'repo_id': repo_id}, safe=True)
def clean(self): super(ImporterScratchPadMixinTests, self).clean() types_database.clean() Repo.get_collection().remove()
def publish_history(self, repo_id, distributor_id, limit=None, sort=constants.SORT_DESCENDING, start_date=None, end_date=None): """ Returns publish history entries for the give repo, sorted from most recent to oldest. If there are no entries, an empty list is returned. :param repo_id: identifies the repo :type repo_id: str :param distributor_id: identifies the distributor to retrieve history for :type distributor_id: str :param limit: If specified, the query will only return up to this amount of entries. The default is to return the entire publish history. :type limit: int :param sort: Indicates the sort direction of the results, which are sorted by start date. Options are "ascending" and "descending". Descending is the default. :type sort: str :param start_date: if specified, no events prior to this date will be returned. Expected to be an iso8601 datetime string. :type start_date: str :param end_date: if specified, no events after this date will be returned. Expected to be an iso8601 datetime string. :type end_date: str :return: list of publish history result instances :rtype: list :raise MissingResource: if repo_id does not reference a valid repo :raise InvalidValue: if one or more of the options have invalid values """ # Validation repo = Repo.get_collection().find_one({'id': repo_id}) if repo is None: raise MissingResource(repo_id) dist = RepoDistributor.get_collection().find_one({ 'repo_id': repo_id, 'id': distributor_id }) if dist is None: raise MissingResource(distributor_id) invalid_values = [] # Verify the limit makes sense if limit is not None: try: limit = int(limit) if limit < 1: invalid_values.append('limit') except ValueError: invalid_values.append('limit') # Verify the sort direction is valid if sort not in constants.SORT_DIRECTION: invalid_values.append('sort') # Verify that start_date and end_date is valid if start_date is not None: try: dateutils.parse_iso8601_datetime(start_date) except (ValueError, isodate.ISO8601Error): invalid_values.append('start_date') if end_date is not None: try: dateutils.parse_iso8601_datetime(end_date) except (ValueError, isodate.ISO8601Error): invalid_values.append('end_date') # Report any invalid values if invalid_values: raise InvalidValue(invalid_values) # Assemble the mongo search parameters search_params = {'repo_id': repo_id, 'distributor_id': distributor_id} date_range = {} if start_date: date_range['$gte'] = start_date if end_date: date_range['$lte'] = end_date if len(date_range) > 0: search_params['started'] = date_range # Retrieve the entries cursor = RepoPublishResult.get_collection().find(search_params) # Sort the results on the 'started' field. By default, descending order is used cursor.sort('started', direction=constants.SORT_DIRECTION[sort]) if limit is not None: cursor.limit(limit) return list(cursor)
def tearDown(self): super(QueryTests, self).tearDown() Repo.get_collection().remove() RepoContentUnit.get_collection().remove() unit_db.clean()
def clean(self): super(RepoManagerTests, self).clean() Repo.get_collection().remove() RepoImporter.get_collection().remove() RepoDistributor.get_collection().remove()
def publish(repo_id, distributor_id, publish_config_override=None): """ Requests the given distributor publish the repository it is configured on. The publish operation is executed synchronously in the caller's thread and will block until it is completed. The caller must take the necessary steps to address the fact that a publish call may be time intensive. @param repo_id: identifies the repo being published @type repo_id: str @param distributor_id: identifies the repo's distributor to publish @type distributor_id: str @param publish_config_override: optional config values to use for this publish call only @type publish_config_override: dict, None :return: report of the details of the publish :rtype: pulp.server.db.model.repository.RepoPublishResult """ repo_coll = Repo.get_collection() distributor_coll = RepoDistributor.get_collection() # Validation repo = repo_coll.find_one({'id': repo_id}) if repo is None: raise MissingResource(repo_id) repo_distributor = distributor_coll.find_one({ 'repo_id': repo_id, 'id': distributor_id }) if repo_distributor is None: raise MissingResource(repository=repo_id, distributor=distributor_id) distributor_instance, distributor_config = RepoPublishManager.\ _get_distributor_instance_and_config(repo_id, distributor_id) # Assemble the data needed for the publish conduit = RepoPublishConduit(repo_id, distributor_id) call_config = PluginCallConfiguration(distributor_config, repo_distributor['config'], publish_config_override) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.distributor_working_dir( repo_distributor['distributor_type_id'], repo_id, mkdir=True) # Fire events describing the publish state fire_manager = manager_factory.event_fire_manager() fire_manager.fire_repo_publish_started(repo_id, distributor_id) result = RepoPublishManager._do_publish(repo, distributor_id, distributor_instance, transfer_repo, conduit, call_config) fire_manager.fire_repo_publish_finished(result) return result
def clean(self): super(RepoGroupCollectionTests, self).clean() Repo.get_collection().remove() RepoGroup.get_collection().remove()
def add_distributor(repo_id, distributor_type_id, repo_plugin_config, auto_publish, distributor_id=None): """ Adds an association from the given repository to a distributor. The association will be tracked through the distributor_id; each distributor on a given repository must have a unique ID. If this is not specified, one will be generated. If a distributor already exists on the repo for the given ID, the existing one will be removed and replaced with the newly configured one. :param repo_id: identifies the repo :type repo_id: str :param distributor_type_id: identifies the distributor; must correspond to a distributor loaded at server startup :type distributor_type_id: str :param repo_plugin_config: configuration the repo will use with this distributor; may be None :type repo_plugin_config: dict :param auto_publish: if true, this distributor will be invoked at the end of every sync :type auto_publish: bool :param distributor_id: unique ID to refer to this distributor for this repo :type distributor_id: str :return: ID assigned to the distributor (only valid in conjunction with the repo) :raise MissingResource: if the given repo_id does not refer to a valid repo :raise InvalidValue: if the distributor ID is provided and unacceptable :raise InvalidDistributorConfiguration: if the distributor plugin does not accept the given configuration """ repo_coll = Repo.get_collection() distributor_coll = RepoDistributor.get_collection() # Validation repo = repo_coll.find_one({'id': repo_id}) if repo is None: raise MissingResource(repository=repo_id) if not plugin_api.is_valid_distributor(distributor_type_id): raise InvalidValue(['distributor_type_id']) # Determine the ID for this distributor on this repo; will be # unique for all distributors on this repository but not globally if distributor_id is None: distributor_id = str(uuid.uuid4()) else: # Validate if one was passed in if not is_distributor_id_valid(distributor_id): raise InvalidValue(['distributor_id']) distributor_instance, plugin_config = plugin_api.get_distributor_by_id( distributor_type_id) # Convention is that a value of None means unset. Remove any keys that # are explicitly set to None so the plugin will default them. if repo_plugin_config is not None: clean_config = dict([(k, v) for k, v in repo_plugin_config.items() if v is not None]) else: clean_config = None # Let the distributor plugin verify the configuration call_config = PluginCallConfiguration(plugin_config, clean_config) transfer_repo = common_utils.to_transfer_repo(repo) config_conduit = RepoConfigConduit(distributor_type_id) result = distributor_instance.validate_config(transfer_repo, call_config, config_conduit) # For backward compatibility with plugins that don't yet return the tuple if isinstance(result, bool): valid_config = result message = None else: valid_config, message = result if not valid_config: raise PulpDataException(message) # Remove the old distributor if it exists try: RepoDistributorManager.remove_distributor(repo_id, distributor_id) except MissingResource: pass # if it didn't exist, no problem # Let the distributor plugin initialize the repository try: distributor_instance.distributor_added(transfer_repo, call_config) except Exception: msg = _('Error initializing distributor [%(d)s] for repo [%(r)s]') msg = msg % {'d': distributor_type_id, 'r': repo_id} _logger.exception(msg) raise PulpExecutionException(), None, sys.exc_info()[2] # Database Update distributor = RepoDistributor(repo_id, distributor_id, distributor_type_id, clean_config, auto_publish) distributor_coll.save(distributor, safe=True) return distributor
def remove_repodata_from_scratchpad(repo_id): repo_collection = Repo.get_collection() repo = repo_collection.find_one({'id': repo_id}, fields=['scratchpad']) repo['scratchpad'].pop('repodata', None) repo_collection.update({'id': repo_id}, {'$set': {'scratchpad': repo['scratchpad']}}, safe=True)
def sync(repo_id, sync_config_override=None): """ Performs a synchronize operation on the given repository. The given repo must have an importer configured. The identity of the importer is not a parameter to this call; if multiple importers are eventually supported this will have to change to indicate which importer to use. This method is intentionally limited to synchronizing a single repo. Performing multiple repository syncs concurrently will require a more global view of the server and must be handled outside the scope of this class. @param repo_id: identifies the repo to sync @type repo_id: str @param sync_config_override: optional config containing values to use for this sync only @type sync_config_override: dict @return: The synchronization report. @rtype: L{pulp.server.plugins.model.SyncReport} @raise MissingResource: if repo_id does not refer to a valid repo @raise OperationFailed: if the given repo does not have an importer set """ repo_coll = Repo.get_collection() # Validation repo = repo_coll.find_one({'id': repo_id}) if repo is None: raise MissingResource(repo_id) importer_instance, importer_config = RepoSyncManager._get_importer_instance_and_config( repo_id) if importer_instance is None: raise MissingResource(repo_id) importer_manager = manager_factory.repo_importer_manager() repo_importer = importer_manager.get_importer(repo_id) # Assemble the data needed for the sync conduit = RepoSyncConduit(repo_id, repo_importer['id'], RepoContentUnit.OWNER_TYPE_IMPORTER, repo_importer['id']) call_config = PluginCallConfiguration(importer_config, repo_importer['config'], sync_config_override) transfer_repo = common_utils.to_transfer_repo(repo) transfer_repo.working_dir = common_utils.importer_working_dir( repo_importer['importer_type_id'], repo_id, mkdir=True) # Fire an events around the call fire_manager = manager_factory.event_fire_manager() fire_manager.fire_repo_sync_started(repo_id) sync_result = RepoSyncManager._do_sync(repo, importer_instance, transfer_repo, conduit, call_config) fire_manager.fire_repo_sync_finished(sync_result) if sync_result['result'] == RepoSyncResult.RESULT_FAILED: raise PulpExecutionException(_('Importer indicated a failed response')) repo_publish_manager = manager_factory.repo_publish_manager() auto_distributors = repo_publish_manager.auto_distributors(repo_id) spawned_tasks = [] for distributor in auto_distributors: distributor_id = distributor['id'] spawned_tasks.append( repo_publish_manager.queue_publish(repo_id, distributor_id).task_id) return TaskResult(sync_result, spawned_tasks=spawned_tasks)