def test_download_cancelled_in_succeeded(self, mock_started, mock_cancel): request_list = [] _dir = self.populate_content(PRIMARY, 0, 5) for n in range(0, 5): unit_key = { 'name': 'unit_%d' % n, 'version': '1.0.%d' % n, 'release': '1', 'checksum': str(uuid4()) } request = Request(TYPE_ID, unit_key, 'file://%s/unit_%d' % (_dir, n), os.path.join(self.downloaded, 'unit_%d' % n)) request_list.append(request) downloader = LocalFileDownloader(DownloaderConfig()) container = ContentContainer(path=self.tmp_dir) container.refresh = Mock() event = CancelEvent(2) report = container.download(event, downloader, request_list) self.assertTrue(mock_started.called) self.assertTrue(mock_cancel.called) self.assertEqual(report.total_passes, 1) self.assertEqual(report.total_sources, 2) self.assertEqual(len(report.downloads), 1) self.assertEqual(report.downloads[PRIMARY_ID].total_succeeded, 5) self.assertEqual(report.downloads[PRIMARY_ID].total_failed, 0)
def __init__(self, sync_conduit, config): """ Initialize an ISOSyncRun. :param sync_conduit: the sync conduit to use for this sync run. :type sync_conduit: pulp.plugins.conduits.repo_sync.RepoSyncConduit :param config: plugin configuration :type config: pulp.plugins.config.PluginCallConfiguration """ self.sync_conduit = sync_conduit self._remove_missing_units = config.get( importer_constants.KEY_UNITS_REMOVE_MISSING, default=constants.CONFIG_UNITS_REMOVE_MISSING_DEFAULT) self._validate_downloads = config.get( importer_constants.KEY_VALIDATE, default=constants.CONFIG_VALIDATE_DEFAULT) self._repo_url = encode_unicode(config.get( importer_constants.KEY_FEED)) # The _repo_url must end in a trailing slash, because we will use urljoin to determine # the path to # PULP_MANIFEST later if self._repo_url[-1] != '/': self._repo_url = self._repo_url + '/' # Cast our config parameters to the correct types and use them to build a Downloader max_speed = config.get(importer_constants.KEY_MAX_SPEED) if max_speed is not None: max_speed = float(max_speed) max_downloads = config.get(importer_constants.KEY_MAX_DOWNLOADS) if max_downloads is not None: max_downloads = int(max_downloads) else: max_downloads = constants.CONFIG_MAX_DOWNLOADS_DEFAULT ssl_validation = config.get_boolean( importer_constants.KEY_SSL_VALIDATION) ssl_validation = ssl_validation if ssl_validation is not None else \ constants.CONFIG_VALIDATE_DEFAULT downloader_config = { 'max_speed': max_speed, 'max_concurrent': max_downloads, 'ssl_client_cert': config.get(importer_constants.KEY_SSL_CLIENT_CERT), 'ssl_client_key': config.get(importer_constants.KEY_SSL_CLIENT_KEY), 'ssl_ca_cert': config.get(importer_constants.KEY_SSL_CA_CERT), 'ssl_validation': ssl_validation, 'proxy_url': config.get(importer_constants.KEY_PROXY_HOST), 'proxy_port': config.get(importer_constants.KEY_PROXY_PORT), 'proxy_username': config.get(importer_constants.KEY_PROXY_USER), 'proxy_password': config.get(importer_constants.KEY_PROXY_PASS) } downloader_config = DownloaderConfig(**downloader_config) # We will pass self as the event_listener, so that we can receive the callbacks in this # class if self._repo_url.lower().startswith('file'): self.downloader = LocalFileDownloader(downloader_config, self) else: self.downloader = HTTPThreadedDownloader(downloader_config, self) self.progress_report = SyncProgressReport(sync_conduit)
def test_publish(self, mock_repo_ctrl): # Setup self.populate() with mock_config.patch({'server': {'storage_dir': self.parentfs}}): # Test dist = NodesHttpDistributor() repo = Repository(self.REPO_ID) conduit = RepoPublishConduit(self.REPO_ID, constants.HTTP_DISTRIBUTOR) dist.publish_repo(repo, conduit, self.dist_conf()) # Verify conf = DownloaderConfig() downloader = LocalFileDownloader(conf) pub = dist.publisher(repo, self.dist_conf()) url = pathlib.url_join(pub.base_url, pub.manifest_path()) working_dir = self.childfs manifest = RemoteManifest(url, downloader, working_dir) manifest.fetch() manifest.fetch_units() units = [u for u, r in manifest.get_units()] self.assertEqual(len(units), self.NUM_UNITS) for n in range(0, self.NUM_UNITS): unit = units[n] created = self.units[n] for p, v in unit['unit_key'].items(): self.assertEqual(created[p], v) for p, v in unit['metadata'].items(): if p in ('_ns', '_content_type_id'): continue self.assertEqual(created[p], v) self.assertEqual(created.get('_storage_path'), unit['storage_path']) self.assertEqual(unit['type_id'], self.UNIT_TYPE_ID)
def test_download_to_stream(self): request_list = [] _dir, cataloged = self.populate_catalog(ORPHANED, 0, 10) _dir, cataloged = self.populate_catalog(UNIT_WORLD, 0, 10) _dir = self.populate_content(PRIMARY, 0, 20) # unit-world for n in range(0, 10): request = Request( cataloged[n].type_id, cataloged[n].unit_key, 'file://%s/unit_%d' % (_dir, n), StringIO()) request_list.append(request) # primary for n in range(11, 20): unit_key = { 'name': 'unit_%d' % n, 'version': '1.0.%d' % n, 'release': '1', 'checksum': str(uuid4()) } request = Request( TYPE_ID, unit_key, 'file://%s/unit_%d' % (_dir, n), StringIO()) request_list.append(request) downloader = LocalFileDownloader(DownloaderConfig()) listener = Mock() container = ContentContainer(path=self.tmp_dir) container.threaded = False container.refresh = Mock() # test report = container.download(downloader, request_list, listener) # validation # unit-world for i in range(0, 10): request = request_list[i] self.assertTrue(request.downloaded) self.assertEqual(len(request.errors), 0) fp = request.destination s = fp.getvalue() self.assertTrue(UNIT_WORLD in s) # primary for i in range(11, len(request_list)): request = request_list[i] self.assertTrue(request.downloaded) self.assertEqual(len(request.errors), 0) fp = request.destination s = fp.getvalue() self.assertTrue(PRIMARY in s) self.assertEqual(report.total_sources, 2) self.assertEqual(len(report.downloads), 2) self.assertEqual(report.downloads[PRIMARY_ID].total_succeeded, 9) self.assertEqual(report.downloads[PRIMARY_ID].total_failed, 0) self.assertEqual(report.downloads[UNIT_WORLD].total_succeeded, 10) self.assertEqual(report.downloads[UNIT_WORLD].total_failed, 0)
def test_download_with_errors(self): request_list = [] _dir, cataloged = self.populate_catalog(ORPHANED, 0, 10) _dir, cataloged = self.populate_catalog(UNDERGROUND, 0, 10) _dir, cataloged = self.populate_catalog(UNIT_WORLD, 0, 10) shutil.rmtree(_dir) _dir = self.populate_content(PRIMARY, 0, 20) # unit-world for n in range(0, 10): request = Request(cataloged[n].type_id, cataloged[n].unit_key, 'file://%s/unit_%d' % (_dir, n), os.path.join(self.downloaded, 'unit_%d' % n)) request_list.append(request) # primary for n in range(11, 20): unit_key = { 'name': 'unit_%d' % n, 'version': '1.0.%d' % n, 'release': '1', 'checksum': str(uuid4()) } request = Request(TYPE_ID, unit_key, 'file://%s/unit_%d' % (_dir, n), os.path.join(self.downloaded, 'unit_%d' % n)) request_list.append(request) downloader = LocalFileDownloader(DownloaderConfig()) listener = Mock() container = ContentContainer(path=self.tmp_dir) container.refresh = Mock() event = Event() # test report = container.download(event, downloader, request_list, listener) # validation # unit-world for i in range(0, 10): request = request_list[i] self.assertTrue(request.downloaded, msg='URL: %s' % request.url) self.assertEqual(len(request.errors), 1) with open(request.destination) as fp: s = fp.read() self.assertTrue(UNDERGROUND in s) # primary for i in range(11, len(request_list)): request = request_list[i] self.assertTrue(request.downloaded, msg='URL: %s' % request.url) self.assertEqual(len(request.errors), 0) with open(request.destination) as fp: s = fp.read() self.assertTrue(PRIMARY in s) self.assertEqual(report.total_sources, 2) self.assertEqual(len(report.downloads), 3) self.assertEqual(report.downloads[PRIMARY_ID].total_succeeded, 9) self.assertEqual(report.downloads[PRIMARY_ID].total_failed, 0) self.assertEqual(report.downloads[UNDERGROUND].total_succeeded, 10) self.assertEqual(report.downloads[UNDERGROUND].total_failed, 0) self.assertEqual(report.downloads[UNIT_WORLD].total_succeeded, 0) self.assertEqual(report.downloads[UNIT_WORLD].total_failed, 10)
def test_download_cancelled_during_refreshing(self): downloader = LocalFileDownloader(DownloaderConfig()) container = ContentContainer(path=self.tmp_dir) container.collated = Mock() event = CancelEvent(1) report = container.download(event, downloader, []) self.assertFalse(container.collated.called) self.assertEqual(report.total_passes, 0) self.assertEqual(report.total_sources, 2) self.assertEqual(len(report.downloads), 0)
def test_publisher(self): # setup units = self.populate() # test # publish repo_id = 'test_repo' base_url = 'file://' publish_dir = os.path.join(self.tmpdir, 'nodes/repos') virtual_host = (publish_dir, publish_dir) with HttpPublisher(base_url, virtual_host, repo_id) as p: p.publish(units) p.commit() # verify conf = DownloaderConfig() downloader = LocalFileDownloader(conf) manifest_path = p.manifest_path() working_dir = os.path.join(self.tmpdir, 'working_dir') os.makedirs(working_dir) url = pathlib.url_join(base_url, manifest_path) manifest = RemoteManifest(url, downloader, working_dir) manifest.fetch() manifest.fetch_units() self.assertTrue(manifest.has_valid_units()) units = manifest.get_units() n = 0 for unit, ref in units: self.assertEqual(manifest.publishing_details[constants.BASE_URL], pathlib.url_join(base_url, publish_dir, repo_id)) if n == 0: # TARBALL path = pathlib.join(publish_dir, repo_id, unit[constants.TARBALL_PATH]) self.assertTrue(os.path.isfile(path)) else: path = pathlib.join(publish_dir, repo_id, unit[constants.RELATIVE_PATH]) self.assertTrue(os.path.islink(path)) self.assertEqual(unit[constants.FILE_SIZE], os.path.getsize(path)) if n == 0: # TARBALL path = pathlib.join(publish_dir, repo_id, unit[constants.TARBALL_PATH]) tb = tarfile.open(path) try: files = sorted(tb.getnames()) finally: tb.close() self.assertEqual(len(files), self.NUM_TARED_FILES) else: path = pathlib.join(publish_dir, repo_id, unit[constants.RELATIVE_PATH]) with open(path, 'rb') as fp: unit_content = fp.read() self.assertEqual(unit_content, unit_content) self.assertEqual(unit['unit_key']['n'], n) n += 1
def retrieve_metadata(self, progress_report): """ Retrieves all metadata documents needed to fulfill the configuration set for the repository. The progress report will be updated as the downloads take place. :param progress_report: used to communicate the progress of this operation :type progress_report: pulp_puppet.importer.sync_progress.ProgressReport :return: list of JSON documents describing all modules to import :rtype: list """ feed = self.config.get(constants.CONFIG_FEED) source_dir = feed[len('file://'):] metadata_filename = os.path.join(source_dir, constants.REPO_METADATA_FILENAME) # Only do one query for this implementation progress_report.metadata_query_finished_count = 0 progress_report.metadata_query_total_count = 1 progress_report.metadata_current_query = metadata_filename progress_report.update_progress() config = importer_config_to_nectar_config(self.config.flatten()) listener = LocalMetadataDownloadEventListener(progress_report) self.downloader = LocalFileDownloader(config, listener) url = os.path.join(feed, constants.REPO_METADATA_FILENAME) destination = StringIO() request = DownloadRequest(url, destination) self.downloader.download([request]) config.finalize() self.downloader = None for report in listener.failed_reports: raise FileRetrievalException(report.error_msg) return [destination.getvalue()]
def test_download_with_unsupported_url(self): request_list = [] _dir, cataloged = self.populate_catalog(UNSUPPORTED_PROTOCOL, 0, 10) _dir = self.populate_content(PRIMARY, 0, 20) # unit-world for n in range(0, 10): request = Request(cataloged[n].type_id, cataloged[n].unit_key, 'file://%s/unit_%d' % (_dir, n), os.path.join(self.downloaded, 'unit_%d' % n)) request_list.append(request) # primary for n in range(11, 20): unit_key = { 'name': 'unit_%d' % n, 'version': '1.0.%d' % n, 'release': '1', 'checksum': str(uuid4()) } request = Request(TYPE_ID, unit_key, 'file://%s/unit_%d' % (_dir, n), os.path.join(self.downloaded, 'unit_%d' % n)) request_list.append(request) downloader = LocalFileDownloader(DownloaderConfig()) listener = MockListener() container = ContentContainer(path=self.tmp_dir) container.refresh = Mock() event = Event() report = container.download(event, downloader, request_list, listener) for i in range(0, len(request_list)): request = request_list[i] self.assertTrue(request.downloaded) self.assertEqual(len(request.errors), 0) with open(request.destination) as fp: s = fp.read() self.assertTrue(PRIMARY in s) self.assertEqual(listener.download_started.call_count, len(request_list)) self.assertEqual(listener.download_succeeded.call_count, len(request_list)) self.assertEqual(listener.download_failed.call_count, 0) self.assertEqual(report.total_passes, 1) self.assertEqual(report.total_sources, 2) self.assertEqual(len(report.downloads), 1) self.assertEqual(report.downloads[PRIMARY_ID].total_succeeded, 19) self.assertEqual(report.downloads[PRIMARY_ID].total_failed, 0)
def test_round_trip(self): # Setup units = [] manifest_path = os.path.join(self.tmp_dir, MANIFEST_FILE_NAME) for i in range(0, self.NUM_UNITS): unit = dict(unit_id=i, type_id='T', unit_key={}) units.append(unit) units_path = os.path.join(self.tmp_dir, UNITS_FILE_NAME) writer = UnitWriter(units_path) for u in units: writer.add(u) writer.close() manifest = Manifest(manifest_path, self.MANIFEST_ID) manifest.units_published(writer) manifest.write() # Test cfg = DownloaderConfig() downloader = LocalFileDownloader(cfg) working_dir = os.path.join(self.tmp_dir, 'working_dir') os.makedirs(working_dir) path = os.path.join(self.tmp_dir, MANIFEST_FILE_NAME) url = 'file://%s' % path manifest = RemoteManifest(url, downloader, working_dir) manifest.fetch() manifest.fetch_units() # Verify self.assertTrue(manifest.is_valid()) self.assertTrue(manifest.has_valid_units()) units_in = [] for unit, ref in manifest.get_units(): units_in.append(unit) _unit = ref.fetch() self.assertEqual(unit, _unit) self.verify(units, units_in) # should already be unzipped self.assertTrue(manifest.is_valid()) self.assertTrue(manifest.has_valid_units()) self.assertFalse(manifest.units_path().endswith('.gz')) units_in = [] for unit, ref in manifest.get_units(): units_in.append(unit) _unit = ref.fetch() self.assertEqual(unit, _unit) self.verify(units, units_in)
def test_download_with_errors(self): request_list = [] _dir, cataloged = self.populate_catalog(ORPHANED, 0, 1000) _dir, cataloged = self.populate_catalog(UNDERGROUND, 0, 1000) _dir, cataloged = self.populate_catalog(UNIT_WORLD, 0, 1000) shutil.rmtree(_dir) _dir = self.populate_content(PRIMARY, 0, 2000) # unit-world for n in range(0, 1000): request = Request(cataloged[n].type_id, cataloged[n].unit_key, 'file://%s/unit_%d' % (_dir, n), os.path.join(self.downloaded, 'unit_%d' % n)) request_list.append(request) # primary for n in range(1001, 2000): unit_key = { 'name': 'unit_%d' % n, 'version': '1.0.%d' % n, 'release': '1', 'checksum': str(uuid4()) } request = Request(TYPE_ID, unit_key, 'file://%s/unit_%d' % (_dir, n), os.path.join(self.downloaded, 'unit_%d' % n)) request_list.append(request) downloader = LocalFileDownloader(DownloaderConfig()) event = Event() threshold = len(request_list) * 0.10 # cancel after 10% started listener = TestListener(event, threshold) container = ContentContainer(path=self.tmp_dir) container.refresh = Mock() # test report = container.download(event, downloader, request_list, listener) # validation self.assertEqual(report.total_sources, 2) self.assertEqual(len(report.downloads), 2) self.assertTrue( 0 < report.downloads[UNDERGROUND].total_succeeded < 500) self.assertEqual(report.downloads[UNDERGROUND].total_failed, 0) self.assertEqual(report.downloads[UNIT_WORLD].total_succeeded, 0) self.assertTrue(0 < report.downloads[UNIT_WORLD].total_failed < 1000)
def build_downloader(url, nectar_config): """ Return a Nectar downloader for a URL with the given nectar config. :param url: The URL is used to determine the scheme so the correct type of downloader can be created. :type url: basestring :param nectar_config: The configuration that should be used with the downloader :type nectar_config: nectar.config.DownloaderConfig :return: A configured downloader. :rtype: nectar.downloaders.base.Downloader :raise ValueError: When the URL scheme is not supported. """ url = urlparse(url) scheme = url.scheme.lower() if scheme == 'file': return LocalFileDownloader(nectar_config) if scheme in ('http', 'https'): return HTTPThreadedDownloader(nectar_config) raise ValueError(_('Scheme "{s}" not supported').format(s=url.scheme))
def initialize(self): """ Set up the nectar downloader Originally based on the ISO sync setup """ config = self.get_config() self._validate_downloads = config.get(importer_constants.KEY_VALIDATE, default=True) self._repo_url = encode_unicode(config.get(importer_constants.KEY_FEED)) # The _repo_url must end in a trailing slash, because we will use # urljoin to determine the path later if self._repo_url[-1] != '/': self._repo_url = self._repo_url + '/' downloader_config = importer_config_to_nectar_config(config.flatten()) # We will pass self as the event_listener, so that we can receive the # callbacks in this class if self._repo_url.lower().startswith('file'): self.downloader = LocalFileDownloader(downloader_config, self) else: self.downloader = HTTPThreadedDownloader(downloader_config, self)
def get_downloader(config, url, **options): """ Get a configured downloader. :param config: A plugin configuration. :type config: pulp.plugins.config.PluginCallConfiguration :param url: A URL. :type url: str :param options: Extended configuration. :type options: dict :return: A configured downloader. :rtype: nectar.downloaders.base.Downloader :raise ValueError: when the URL scheme is not supported. """ url = urlparse(url) nectar_config = importer_config_to_nectar_config(config.flatten()) scheme = url.scheme.lower() if scheme == 'file': return LocalFileDownloader(nectar_config) if scheme in ('http', 'https'): return HTTPThreadedDownloader(nectar_config) raise ValueError(_('Scheme "{s}" not supported').format(s=url.scheme))
class LocalDownloader(BaseDownloader): """ Used when the source for puppet modules is a directory local to the Pulp server. """ def retrieve_metadata(self, progress_report): """ Retrieves all metadata documents needed to fulfill the configuration set for the repository. The progress report will be updated as the downloads take place. :param progress_report: used to communicate the progress of this operation :type progress_report: pulp_puppet.importer.sync_progress.ProgressReport :return: list of JSON documents describing all modules to import :rtype: list """ feed = self.config.get(constants.CONFIG_FEED) source_dir = feed[len('file://'):] metadata_filename = os.path.join(source_dir, constants.REPO_METADATA_FILENAME) # Only do one query for this implementation progress_report.metadata_query_finished_count = 0 progress_report.metadata_query_total_count = 1 progress_report.metadata_current_query = metadata_filename progress_report.update_progress() config = importer_config_to_nectar_config(self.config.flatten()) listener = LocalMetadataDownloadEventListener(progress_report) self.downloader = LocalFileDownloader(config, listener) url = os.path.join(feed, constants.REPO_METADATA_FILENAME) destination = StringIO() request = DownloadRequest(url, destination) self.downloader.download([request]) config.finalize() self.downloader = None for report in listener.failed_reports: raise FileRetrievalException(report.error_msg) return [destination.getvalue()] def retrieve_module(self, progress_report, module): """ Retrieves the given module and returns where on disk it can be found. It is the caller's job to copy this file to where Pulp wants it to live as its final resting place. This downloader will then be allowed to clean up the downloaded file in the cleanup_module call. :param progress_report: used if any updates need to be made as the download runs :type progress_report: pulp_puppet.importer.sync_progress.ProgressReport :param module: module to download :type module: pulp_puppet.common.model.Module :return: full path to the temporary location where the module file is :rtype: str """ # Determine the full path to the existing module on disk. This assumes # a structure where the modules are located in the same directory as # specified in the feed. feed = self.config.get(constants.CONFIG_FEED) source_dir = feed[len('file://'):] module_filename = module.filename() full_filename = os.path.join(source_dir, module_filename) if not os.path.exists(full_filename): raise FileNotFoundException(full_filename) return full_filename def retrieve_modules(self, progress_report, module_list): """ Batch version of the retrieve_module method :param progress_report: used if any updates need to be made as the download runs :type progress_report: pulp_puppet.importer.sync_progress.ProgressReport :param module_list: list of modules to be downloaded :type module_list: iterable :return: list of full paths to the temporary locations where the modules are :rtype: list """ return [ self.retrieve_module(progress_report, module) for module in module_list ] def cancel(self): """ Cancel the current operation. """ downloader = self.downloader if downloader is None: return downloader.cancel() downloader.config.finalize() def cleanup_module(self, module): """ Called once the unit has been copied into Pulp's storage location to let the downloader do any post-processing it needs (for instance, deleting any temporary copies of the file). :param module: module to clean up :type module: pulp_puppet.common.model.Module """ # We don't want to delete the original location on disk, so do # nothing here. pass
def get_downloader(self, conduit, config, url): if url.startswith('http'): return HTTPThreadedDownloader(nectar_config(config)) if url.startswith('file'): return LocalFileDownloader(nectar_config(config)) raise ValueError('unsupported url')
class LocalDownloader(BaseDownloader): """ Used when the source for puppet modules is a directory local to the Pulp server. """ def retrieve_metadata(self, progress_report): """ Retrieves all metadata documents needed to fulfill the configuration set for the repository. The progress report will be updated as the downloads take place. :param progress_report: used to communicate the progress of this operation :type progress_report: pulp_puppet.importer.sync_progress.ProgressReport :return: list of JSON documents describing all modules to import :rtype: list """ feed = self.config.get(constants.CONFIG_FEED) source_dir = feed[len('file://'):] metadata_filename = os.path.join(source_dir, constants.REPO_METADATA_FILENAME) # Only do one query for this implementation progress_report.metadata_query_finished_count = 0 progress_report.metadata_query_total_count = 1 progress_report.metadata_current_query = metadata_filename progress_report.update_progress() config = importer_config_to_nectar_config(self.config.flatten()) listener = LocalMetadataDownloadEventListener(progress_report) self.downloader = LocalFileDownloader(config, listener) url = os.path.join(feed, constants.REPO_METADATA_FILENAME) destination = StringIO() request = DownloadRequest(url, destination) self.downloader.download([request]) config.finalize() self.downloader = None for report in listener.failed_reports: raise FileRetrievalException(report.error_msg) return [destination.getvalue()] def retrieve_module(self, progress_report, module): """ Retrieves the given module and returns where on disk it can be found. It is the caller's job to copy this file to where Pulp wants it to live as its final resting place. This downloader will then be allowed to clean up the downloaded file in the cleanup_module call. :param progress_report: used if any updates need to be made as the download runs :type progress_report: pulp_puppet.importer.sync_progress.ProgressReport :param module: module to download :type module: pulp_puppet.common.model.Module :return: full path to the temporary location where the module file is :rtype: str """ # Determine the full path to the existing module on disk. This assumes # a structure where the modules are located in the same directory as # specified in the feed. feed = self.config.get(constants.CONFIG_FEED) source_dir = feed[len('file://'):] module_filename = module.filename() full_filename = os.path.join(source_dir, module_filename) if not os.path.exists(full_filename): raise FileNotFoundException(full_filename) return full_filename def retrieve_modules(self, progress_report, module_list): """ Batch version of the retrieve_module method :param progress_report: used if any updates need to be made as the download runs :type progress_report: pulp_puppet.importer.sync_progress.ProgressReport :param module_list: list of modules to be downloaded :type module_list: iterable :return: list of full paths to the temporary locations where the modules are :rtype: list """ return [self.retrieve_module(progress_report, module) for module in module_list] def cancel(self): """ Cancel the current operation. """ downloader = self.downloader if downloader is None: return downloader.cancel() downloader.config.finalize() def cleanup_module(self, module): """ Called once the unit has been copied into Pulp's storage location to let the downloader do any post-processing it needs (for instance, deleting any temporary copies of the file). :param module: module to clean up :type module: pulp_puppet.common.model.Module """ # We don't want to delete the original location on disk, so do # nothing here. pass