示例#1
0
    def publish_repo(self, repo, publish_conduit, config):
        publish_start_time = time.time()
        _LOG.info("Start publish time %s" % publish_start_time)
        progress_status = {
            "rpms":               {"state": "NOT_STARTED"},
            "errata":             {"state": "NOT_STARTED"},
            "distribution":       {"state": "NOT_STARTED"},
            "metadata":           {"state": "NOT_STARTED"},
            "packagegroups":      {"state": "NOT_STARTED"},
            "isos":               {"state": "NOT_STARTED"},
            "publish_http":       {"state": "NOT_STARTED"},
            "publish_https":      {"state": "NOT_STARTED"},
            }

        def progress_callback(type_id, status):
            progress_status[type_id] = status
            publish_conduit.set_progress(progress_status)

        self.repo_working_dir = repo_working_dir = repo.working_dir

        if self.cancelled:
            return publish_conduit.build_failure_report(self.summary, self.details)

        skip_types = config.get("skip") or []
        repo_exporter = RepoExporter(repo_working_dir, skip=skip_types)
        date_filter = repo_exporter.create_date_range_filter(config)
        groups_xml_path = None
        updateinfo_xml_path = None
        if date_filter:
            # export errata by date and associated rpm units
            progress_status["errata"]["state"] = "STARTED"
            criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_ERRATA], unit_filters=date_filter)
            errata_units = publish_conduit.get_units(criteria=criteria)
            rpm_units = self.__get_rpm_units(publish_conduit)
            drpm_criteria = UnitAssociationCriteria(type_ids=TYPE_ID_DRPM)
            rpm_units += publish_conduit.get_units(criteria=drpm_criteria)
            rpm_units = repo_exporter.get_errata_rpms(errata_units, rpm_units)
            rpm_summary, rpm_errors = repo_exporter.export_rpms(rpm_units, progress_callback=progress_callback)
            if self.cancelled:
                return publish_conduit.build_failure_report(self.summary, self.details)
            updateinfo_xml_path = updateinfo.updateinfo(errata_units, repo_working_dir)
            progress_status["errata"]["num_success"] = len(errata_units)
            progress_status["errata"]["state"] = "FINISHED"
            self.summary = dict(self.summary.items() + rpm_summary.items())
            self.summary["num_errata_units_exported"] = len(errata_units)
            self.details["errors"] = rpm_errors
        else:
            # export everything
            # export rpms
            rpm_units = self.__get_rpm_units(publish_conduit)
            drpm_criteria = UnitAssociationCriteria(type_ids=TYPE_ID_DRPM)
            rpm_units += publish_conduit.get_units(criteria=drpm_criteria)
            rpm_summary, rpm_errors = repo_exporter.export_rpms(rpm_units, progress_callback=progress_callback)
            # export package groups
            groups_xml_path = None
            if "packagegroup" not in skip_types:
                progress_status["packagegroups"]["state"] = "STARTED"
                criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_PKG_GROUP, TYPE_ID_PKG_CATEGORY])
                existing_units = publish_conduit.get_units(criteria=criteria)
                existing_groups = filter(lambda u : u.type_id in [TYPE_ID_PKG_GROUP], existing_units)
                existing_cats = filter(lambda u : u.type_id in [TYPE_ID_PKG_CATEGORY], existing_units)
                groups_xml_path = comps_util.write_comps_xml(repo_working_dir, existing_groups, existing_cats)
                self.summary["num_package_groups_exported"] = len(existing_groups)
                self.summary["num_package_categories_exported"] = len(existing_cats)
                progress_status["packagegroups"]["state"] = "FINISHED"
            else:
                progress_status["packagegroups"]["state"] = "SKIPPED"
                _LOG.info("packagegroup unit type in skip list [%s]; skipping export" % skip_types)

            if self.cancelled:
                return publish_conduit.build_failure_report(self.summary, self.details)

            # export errata
            updateinfo_xml_path = None
            if 'erratum' not in skip_types:
                progress_status["errata"]["state"] = "STARTED"
                criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_ERRATA])
                errata_units = publish_conduit.get_units(criteria=criteria)
                progress_status["errata"]["state"] = "IN_PROGRESS"
                updateinfo_xml_path = updateinfo.updateinfo(errata_units, repo_working_dir)
                progress_status["errata"]["num_success"] = len(errata_units)
                progress_status["errata"]["state"] = "FINISHED"
                self.summary["num_errata_units_exported"] = len(errata_units)
            else:
                #errata_summary, errata_errors = repo_exporter.export_errata(errata_units, progress_callback=progress_callback)
                progress_status["errata"]["state"] = "SKIPPED"
                _LOG.info("errata unit type in skip list [%s]; skipping export" % skip_types)
            # distro units
            criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_DISTRO])
            distro_units = publish_conduit.get_units(criteria=criteria)
            distro_summary, distro_errors = repo_exporter.export_distributions(distro_units, progress_callback=progress_callback)
            # sum up summary and details
            self.details["errors"] = rpm_errors + distro_errors
            self.summary = dict(self.summary.items() + rpm_summary.items() + distro_summary.items())
        # generate metadata
        metadata_status, metadata_errors = metadata.generate_yum_metadata(
            repo_working_dir, publish_conduit, config, progress_callback, is_cancelled=self.cancelled,
            group_xml_path=groups_xml_path, updateinfo_xml_path=updateinfo_xml_path, repo_scratchpad=publish_conduit.get_repo_scratchpad())
        _LOG.info("metadata generation complete at target location %s" % repo_working_dir)
        self.details["errors"] += metadata_errors
        # build iso and publish via HTTPS
        self._publish_isos(repo, config, progress_callback=progress_callback)
        _LOG.info("Publish complete:  summary = <%s>, details = <%s>" % (self.summary, self.details))
        # remove exported content from working dirctory
        iso_util.cleanup_working_dir(self.repo_working_dir)
        if self.details["errors"]:
            return publish_conduit.build_failure_report(self.summary, self.details)
        return publish_conduit.build_success_report(self.summary, self.details)
示例#2
0
    def publish_group(self, repo_group, publish_conduit, config):
        """
        see parent class for doc string
        """
        self.group_working_dir = group_working_dir = repo_group.working_dir
        skip_types = config.get("skip") or []
        self.init_group_progress()
        self.group_progress_status["group-id"] = repo_group.id

        # progress callback for group status
        def group_progress_callback(type_id, status):
            self.group_progress_status[type_id] = status
            publish_conduit.set_progress(self.group_progress_status)

        # loop through each repo in the group and perform exports
        for repoid in repo_group.repo_ids:
            _LOG.info("Exporting repo %s " % repoid)
            summary = {}
            details = {}
            progress_status = {
                                    "rpms":               {"state": "NOT_STARTED"},
                                    "errata":             {"state": "NOT_STARTED"},
                                    "distribution":       {"state": "NOT_STARTED"},
                                    "packagegroups":      {"state": "NOT_STARTED"},}
            def progress_callback(type_id, status):
                progress_status[type_id] = status
                publish_conduit.set_progress(progress_status)
            repo_working_dir = "%s/%s" % (group_working_dir, repoid)
            repo_exporter = RepoExporter(repo_working_dir, skip=skip_types)
            # check if any datefilter is set on the distributor
            date_filter = repo_exporter.create_date_range_filter(config)
            _LOG.info("repo working dir %s" % repo_working_dir)
            if date_filter:
                # If a date range is specified, we only export the errata within that range
                # and associated rpm units. This might change once we have dates associated
                # to other units.
                criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_ERRATA], unit_filters=date_filter)
                errata_units = publish_conduit.get_units(repoid, criteria=criteria)
                # we only include binary and source; drpms are not associated to errata
                criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_RPM, TYPE_ID_SRPM])
                rpm_units = publish_conduit.get_units(repoid, criteria=criteria)
                rpm_units = repo_exporter.get_errata_rpms(errata_units, rpm_units)
                rpm_status, rpm_errors = repo_exporter.export_rpms(rpm_units, progress_callback=progress_callback)
                if self.canceled:
                    return publish_conduit.build_failure_report(summary, details)

                # generate metadata
                metadata_status, metadata_errors = metadata.generate_metadata(
                       repo_working_dir, publish_conduit, config, progress_callback)
                _LOG.info("metadata generation complete at target location %s" % repo_working_dir)
                # export errata and generate updateinfo xml
                errata_status, errata_errors = repo_exporter.export_errata(errata_units, progress_callback=progress_callback)

                summary["num_package_units_attempted"] = len(rpm_units)
                summary["num_package_units_exported"] = len(rpm_units) - len(rpm_errors)
                summary["num_package_units_errors"] = len(rpm_errors)
                details["errors"] = rpm_errors +  errata_errors + metadata_errors
            else:

                # export rpm units(this includes binary, source and delta)
                criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_RPM, TYPE_ID_SRPM, TYPE_ID_DRPM])
                rpm_units = publish_conduit.get_units(repoid, criteria)
                rpm_status, rpm_errors = repo_exporter.export_rpms(rpm_units, progress_callback=progress_callback)
                summary["num_package_units_attempted"] = len(rpm_units)
                summary["num_package_units_exported"] = len(rpm_units) - len(rpm_errors)
                summary["num_package_units_errors"] = len(rpm_errors)

                # export package groups information and generate comps.xml
                groups_xml_path = None
                if "packagegroup" not in skip_types:
                    progress_status["packagegroups"]["state"] = "STARTED"
                    criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_PKG_GROUP, TYPE_ID_PKG_CATEGORY])
                    existing_units = publish_conduit.get_units(repoid, criteria)
                    existing_groups = filter(lambda u : u.type_id in [TYPE_ID_PKG_GROUP], existing_units)
                    existing_cats = filter(lambda u : u.type_id in [TYPE_ID_PKG_CATEGORY], existing_units)
                    groups_xml_path = comps_util.write_comps_xml(repo_working_dir, existing_groups, existing_cats)
                    summary["num_package_groups_exported"] = len(existing_groups)
                    summary["num_package_categories_exported"] = len(existing_cats)
                    progress_status["packagegroups"]["state"] = "FINISHED"
                else:
                    progress_status["packagegroups"]["state"] = "SKIPPED"
                    _LOG.info("packagegroup unit type in skip list [%s]; skipping export" % skip_types)

                if self.canceled:
                    return publish_conduit.build_failure_report(summary, details)

                # generate metadata
                metadata_status, metadata_errors = metadata.generate_metadata(
                        repo_working_dir, publish_conduit, config, progress_callback, groups_xml_path)
                _LOG.info("metadata generation complete at target location %s" % repo_working_dir)

                # export errata units and associated rpms
                criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_ERRATA])
                errata_units = publish_conduit.get_units(repoid, criteria)
                errata_status, errata_errors = repo_exporter.export_errata(errata_units, progress_callback=progress_callback)
                summary["num_errata_units_exported"] = len(errata_units)

                # export distributions
                criteria = UnitAssociationCriteria(type_ids=[TYPE_ID_DISTRO])
                distro_units = publish_conduit.get_units(repoid, criteria)
                distro_status, distro_errors = repo_exporter.export_distributions(distro_units, progress_callback=progress_callback)
                summary["num_distribution_units_attempted"] = len(distro_units)
                summary["num_distribution_units_exported"] = len(distro_units) - len(distro_errors)
                summary["num_distribution_units_errors"] = len(distro_errors)

                self.group_progress_status["repositories"][repoid] = progress_status
                self.set_progress("repositories", self.group_progress_status["repositories"], group_progress_callback)

                details["errors"] = rpm_errors + distro_errors + errata_errors + metadata_errors
                self.group_summary[repoid] = summary
                self.group_details[repoid] = details

        # generate and publish isos
        self._publish_isos(repo_group, config, progress_callback=group_progress_callback)
        _LOG.info("Publish complete:  summary = <%s>, details = <%s>" % (self.group_summary, self.group_details))

        # remove exported content from working dirctory
        iso_util.cleanup_working_dir(self.group_working_dir)

        # check for any errors
        if not len([self.group_details[repoid]["errors"] for repoid in self.group_details.keys()]):
            return publish_conduit.build_failure_report(self.group_summary, self.group_details)

        return publish_conduit.build_success_report(self.group_summary, self.group_details)