コード例 #1
0
    def test_remove_repo_duplicate_nevra_unit_counts(self):
        # ensure that the unit associations are correct for each repo after purge
        purge.remove_repo_duplicate_nevra(self.conduit.repo_id)

        # duplicate removal should have removed two duplicates of each type for repo a
        expected_rcu_count_a = 2 * len(self.UNIT_TYPES)
        self.assertEqual(platform_model.RepositoryContentUnit.objects.filter(
            repo_id=self.repo_a.repo_id).count(), expected_rcu_count_a)

        # repo B counts should be unchanged, since its duplicates were not purged
        expected_rcu_count_b = 4 * len(self.UNIT_TYPES)
        self.assertEqual(platform_model.RepositoryContentUnit.objects.filter(
            repo_id=self.repo_b.repo_id).count(), expected_rcu_count_b)

        # get a list of all the unit ids associated with the purged repo, demonstrate that
        # none of the duplicate unit ids are assocated with the purged repo
        repo_rcu = platform_model.RepositoryContentUnit.objects.filter(repo_id=self.repo_a.repo_id)
        repo_rcu_ids = set([rcu.unit_id for rcu in repo_rcu])
        self.assertFalse(self.duplicate_unit_ids.intersection(repo_rcu_ids))
コード例 #2
0
    def run(self):
        """
        Steps through the entire workflow of a repo sync.

        :return:    A SyncReport detailing how the sync went
        :rtype:     pulp.plugins.model.SyncReport
        """
        # Empty list could be returned in case _parse_as_mirrorlist()
        # was not able to find any valid url
        if not self.sync_feed:
            raise PulpCodedException(error_code=error_codes.RPM1004, reason='Not found')
        url_count = 0
        for url in self.sync_feed:
            # Verify that we have a feed url.
            # if there is no feed url, then we have nothing to sync
            if url is None:
                raise PulpCodedException(error_code=error_codes.RPM1005)
            # using this tmp dir ensures that cleanup leaves nothing behind, since
            # we delete below
            self.tmp_dir = tempfile.mkdtemp(dir=self.working_dir)
            url_count += 1
            try:
                with self.update_state(self.progress_report['metadata']):
                    metadata_files = self.check_metadata(url)
                    metadata_files = self.get_metadata(metadata_files)

                    # Save the default checksum from the metadata
                    self.save_default_metadata_checksum_on_repo(metadata_files)

                with self.update_state(self.content_report) as skip:
                    if not (skip or self.skip_repomd_steps):
                        self.update_content(metadata_files, url)

                _logger.info(_('Downloading additional units.'))

                with self.update_state(self.distribution_report,
                                       models.Distribution._content_type_id) as skip:
                    if not skip:
                        dist_sync = DistSync(self, url)
                        dist_sync.run()

                with self.update_state(self.progress_report['errata'], ids.TYPE_ID_ERRATA) as skip:
                    if not (skip or self.skip_repomd_steps):
                        self.get_errata(metadata_files)

                with self.update_state(self.progress_report['comps']) as skip:
                    if not (skip or self.skip_repomd_steps):
                        self.get_comps_file_units(metadata_files, group.process_group_element,
                                                  group.GROUP_TAG)
                        self.get_comps_file_units(metadata_files, group.process_category_element,
                                                  group.CATEGORY_TAG)
                        self.get_comps_file_units(metadata_files, group.process_environment_element,
                                                  group.ENVIRONMENT_TAG)
                        self.get_comps_file_units(metadata_files, group.process_langpacks_element,
                                                  group.LANGPACKS_TAG)

                with self.update_state(self.progress_report['purge_duplicates']) as skip:
                    if not (skip or self.skip_repomd_steps):
                        purge.remove_repo_duplicate_nevra(self.conduit.repo_id)

            except PulpCodedException, e:
                # Check if the caught exception indicates that the mirror is bad.
                # Try next mirror in the list without raising the exception.
                # In case it was the last mirror in the list, raise the exception.
                bad_mirror_exceptions = [error_codes.RPM1004, error_codes.RPM1006]
                if (e.error_code in bad_mirror_exceptions) and \
                        url_count != len(self.sync_feed):
                            continue
                else:
                    self._set_failed_state(e)
                    raise

            except Exception, e:
                # In case other exceptions were caught that are not related to the state of the
                # mirror, raise the exception immediately and do not iterate throught the rest
                # of the mirrors.
                _logger.exception(e)
                self._set_failed_state(e)
                report = self.conduit.build_failure_report(self._progress_summary,
                                                           self.progress_report)
                return report
コード例 #3
0
    def run(self):
        """
        Steps through the entire workflow of a repo sync.

        :return:    A SyncReport detailing how the sync went
        :rtype:     pulp.plugins.model.SyncReport
        """
        # Empty list could be returned in case _parse_as_mirrorlist()
        # was not able to find any valid url
        if not self.sync_feed:
            raise PulpCodedException(error_code=error_codes.RPM1004,
                                     reason='Not found')
        url_count = 0
        for url in self.sync_feed:
            # Verify that we have a feed url.
            # if there is no feed url, then we have nothing to sync
            if url is None:
                raise PulpCodedException(error_code=error_codes.RPM1005)
            # using this tmp dir ensures that cleanup leaves nothing behind, since
            # we delete below
            self.tmp_dir = tempfile.mkdtemp(dir=self.working_dir)
            url_count += 1
            try:
                with self.update_state(self.progress_report['metadata']):
                    metadata_files = self.check_metadata(url)
                    metadata_files = self.get_metadata(metadata_files)

                    # Save the default checksum from the metadata
                    self.save_default_metadata_checksum_on_repo(metadata_files)

                with self.update_state(self.content_report) as skip:
                    if not (skip or self.skip_repomd_steps):
                        self.update_content(metadata_files, url)

                _logger.info(_('Downloading additional units.'))

                with self.update_state(
                        self.distribution_report,
                        models.Distribution._content_type_id) as skip:
                    if not skip:
                        dist_sync = DistSync(self, url)
                        dist_sync.run()

                with self.update_state(self.progress_report['errata'],
                                       ids.TYPE_ID_ERRATA) as skip:
                    if not (skip or self.skip_repomd_steps):
                        self.get_errata(metadata_files)

                with self.update_state(self.progress_report['comps']) as skip:
                    if not (skip or self.skip_repomd_steps):
                        self.get_comps_file_units(metadata_files,
                                                  group.process_group_element,
                                                  group.GROUP_TAG)
                        self.get_comps_file_units(
                            metadata_files, group.process_category_element,
                            group.CATEGORY_TAG)
                        self.get_comps_file_units(
                            metadata_files, group.process_environment_element,
                            group.ENVIRONMENT_TAG)

                with self.update_state(
                        self.progress_report['purge_duplicates']) as skip:
                    if not (skip or self.skip_repomd_steps):
                        purge.remove_repo_duplicate_nevra(self.conduit.repo_id)

            except PulpCodedException, e:
                # Check if the caught exception indicates that the mirror is bad.
                # Try next mirror in the list without raising the exception.
                # In case it was the last mirror in the list, raise the exception.
                bad_mirror_exceptions = [
                    error_codes.RPM1004, error_codes.RPM1006
                ]
                if (e.error_code in bad_mirror_exceptions) and \
                        url_count != len(self.sync_feed):
                    continue
                else:
                    self._set_failed_state(e)
                    raise

            except Exception, e:
                # In case other exceptions were caught that are not related to the state of the
                # mirror, raise the exception immediately and do not iterate throught the rest
                # of the mirrors.
                _logger.exception(e)
                self._set_failed_state(e)
                report = self.conduit.build_failure_report(
                    self._progress_summary, self.progress_report)
                return report