def download_succeeded(self, report): with util.deleting(report.destination): unit = report.data try: super(RPMListener, self).download_succeeded(report) except (verification.VerificationException, util.InvalidChecksumType): # verification failed, unit not added return # we need to read package header in order to extract signature info # unit is not added if header cannot be read headers = rpm_parse.package_headers(report.destination) unit['signing_key'] = rpm_parse.package_signature(headers) added_unit = self.sync.add_rpm_unit(self.metadata_files, unit) try: added_unit.safe_import_content(report.destination) except verification.VerificationException as e: error_report = { constants.NAME: unit.name, constants.UNIT_KEY: unit.unit_key, constants.ERROR_CODE: constants.ERROR_SIZE_VERIFICATION, constants.ERROR_KEY_EXPECTED_SIZE: unit.size, constants.ERROR_KEY_ACTUAL_SIZE: e[0] } self.sync.progress_report['content'].failure( unit, error_report) raise if self.sync.signature_filter_passed(added_unit): self.sync.associate_rpm_unit(added_unit) if not added_unit.downloaded: added_unit.downloaded = True added_unit.save()
def test_package_signature_from_header(self): sample_rpm_filename = os.path.join(DATA_DIR, 'walrus-5.21-1.noarch.rpm') headers = rpm.package_headers(sample_rpm_filename) self.assertTrue(isinstance(headers, rpm_module.hdr)) signature = rpm.package_signature(headers) self.assertEquals(signature, 'f78fb195') self.assertEquals(len(signature), 8)
def download_succeeded(self, report): with util.deleting(report.destination): unit = report.data try: super(RPMListener, self).download_succeeded(report) except (verification.VerificationException, util.InvalidChecksumType): # verification failed, unit not added return # we need to read package header in order to extract signature info # unit is not added if header cannot be read headers = rpm_parse.package_headers(report.destination) unit["signing_key"] = rpm_parse.package_signature(headers) added_unit = self.sync.add_rpm_unit(self.metadata_files, unit) try: added_unit.safe_import_content(report.destination) except verification.VerificationException as e: error_report = { constants.NAME: unit.name, constants.UNIT_KEY: unit.unit_key, constants.ERROR_CODE: constants.ERROR_SIZE_VERIFICATION, constants.ERROR_KEY_EXPECTED_SIZE: unit.size, constants.ERROR_KEY_ACTUAL_SIZE: e[0], } self.sync.progress_report["content"].failure(unit, error_report) raise if self.sync.signature_filter_passed(added_unit): self.sync.associate_rpm_unit(added_unit) if not added_unit.downloaded: added_unit.downloaded = True added_unit.save()
def _extract_drpm_data(drpm_filename): """ Extract a dict of information for a given DRPM. :param drpm_filename: full path to the package to analyze :type drpm_filename: str :return: dict of data about the package :rtype: dict """ drpm_data = dict() headers = rpm_parse.drpm_package_info(drpm_filename) try: # "handle" rpm-only drpms (without rpm header) rpm_headers = rpm_parse.package_headers(drpm_filename) except rpm.error: raise RPMOnlyDRPMsAreNotSupported(drpm_filename) drpm_data['signing_key'] = rpm_parse.package_signature(rpm_headers) drpm_data['arch'] = rpm_headers['arch'] old_nevr = old_name, old_epoch, old_version, old_release = rpm_parse.nevr( headers["old_nevr"]) new_nevr = new_name, new_epoch, new_version, new_release = rpm_parse.nevr( headers["nevr"]) drpm_data['sequence'] = headers["old_nevr"] + "-" + headers["seq"] drpm_data['epoch'] = str(new_epoch) drpm_data['oldepoch'] = str(old_epoch) drpm_data['version'] = str(new_version) drpm_data['oldversion'] = str(old_version) drpm_data['release'] = new_release drpm_data['oldrelease'] = old_release drpm_data['new_package'] = new_name drpm_data['size'] = os.stat(drpm_filename)[stat.ST_SIZE] old_evr = rpm_parse.nevr_to_evr(*old_nevr) new_evr = rpm_parse.nevr_to_evr(*new_nevr) drpm_data['filename'] = "drpms/%s-%s_%s.%s.drpm" % ( new_name, rpm_parse.evr_to_str(*old_evr), rpm_parse.evr_to_str(*new_evr), drpm_data['arch']) return _encode_as_utf8(drpm_data)
def _extract_drpm_data(drpm_filename): """ Extract a dict of information for a given DRPM. :param drpm_filename: full path to the package to analyze :type drpm_filename: str :return: dict of data about the package :rtype: dict """ drpm_data = dict() headers = rpm_parse.drpm_package_info(drpm_filename) try: # "handle" rpm-only drpms (without rpm header) rpm_headers = rpm_parse.package_headers(drpm_filename) except rpm.error: raise RPMOnlyDRPMsAreNotSupported(drpm_filename) drpm_data['signing_key'] = rpm_parse.package_signature(rpm_headers) drpm_data['arch'] = rpm_headers['arch'] old_nevr = old_name, old_epoch, old_version, old_release = rpm_parse.nevr(headers["old_nevr"]) new_nevr = new_name, new_epoch, new_version, new_release = rpm_parse.nevr(headers["nevr"]) drpm_data['sequence'] = headers["old_nevr"] + "-" + headers["seq"] drpm_data['epoch'] = str(new_epoch) drpm_data['oldepoch'] = str(old_epoch) drpm_data['version'] = str(new_version) drpm_data['oldversion'] = str(old_version) drpm_data['release'] = new_release drpm_data['oldrelease'] = old_release drpm_data['new_package'] = new_name drpm_data['size'] = os.stat(drpm_filename)[stat.ST_SIZE] old_evr = rpm_parse.nevr_to_evr(*old_nevr) new_evr = rpm_parse.nevr_to_evr(*new_nevr) drpm_data['filename'] = "drpms/%s-%s_%s.%s.drpm" % (new_name, rpm_parse.evr_to_str(*old_evr), rpm_parse.evr_to_str(*new_evr), drpm_data['arch']) return _encode_as_utf8(drpm_data)
def download_succeeded(self, report): with util.deleting(report.destination): unit = report.data try: super(RPMListener, self).download_succeeded(report) except (verification.VerificationException, util.InvalidChecksumType): # verification failed, unit not added return # we need to read package header in order to extract signature info # unit is not added if header cannot be read headers = rpm_parse.package_headers(report.destination) added_unit = self.sync.add_rpm_unit(self.metadata_files, unit) added_unit.safe_import_content(report.destination) if not added_unit.downloaded: added_unit.downloaded = True added_unit['signature'] = rpm_parse.package_signature(headers) added_unit.save()
def download_succeeded(self, report): with util.deleting(report.destination): unit = report.data try: super(RPMListener, self).download_succeeded(report) except (verification.VerificationException, util.InvalidChecksumType): # verification failed, unit not added return # we need to read package header in order to extract signature info # unit is not added if header cannot be read headers = rpm_parse.package_headers(report.destination) unit['signing_key'] = rpm_parse.package_signature(headers) added_unit = self.sync.add_rpm_unit(self.metadata_files, unit) added_unit.safe_import_content(report.destination) if self.sync.signature_filter_passed(added_unit): self.sync.associate_rpm_unit(added_unit) if not added_unit.downloaded: added_unit.downloaded = True added_unit.save()
def _extract_rpm_data(type_id, rpm_filename): """ Extract a dict of information for a given RPM or SRPM. :param type_id: The type of the unit that is being generated :type type_id: str :param rpm_filename: full path to the package to analyze :type rpm_filename: str :return: dict of data about the package :rtype: dict """ rpm_data = dict() # Read the RPM header attributes for use later headers = rpm_parse.package_headers(rpm_filename) for k in ['name', 'version', 'release', 'epoch']: rpm_data[k] = headers[k] if rpm_data['epoch'] is not None: rpm_data['epoch'] = str(rpm_data['epoch']) else: rpm_data['epoch'] = str(0) if headers['sourcepackage']: if RPMTAG_NOSOURCE in headers.keys(): rpm_data['arch'] = 'nosrc' else: rpm_data['arch'] = 'src' else: rpm_data['arch'] = headers['arch'] # construct filename from metadata (BZ #1101168) if headers[rpm.RPMTAG_SOURCEPACKAGE]: if type_id != models.SRPM._content_type_id.default: raise PulpCodedValidationException(error_code=error_codes.RPM1002) rpm_basefilename = "%s-%s-%s.src.rpm" % (headers['name'], headers['version'], headers['release']) else: if type_id != models.RPM._content_type_id.default: raise PulpCodedValidationException(error_code=error_codes.RPM1003) rpm_basefilename = "%s-%s-%s.%s.rpm" % (headers['name'], headers['version'], headers['release'], headers['arch']) rpm_data['relativepath'] = rpm_basefilename rpm_data['filename'] = rpm_basefilename # This format is, and has always been, incorrect. As of the new yum importer, the # plugin will generate these from the XML snippet because the API into RPM headers # is atrocious. This is the end game for this functionality anyway, moving all of # that metadata derivation into the plugin, so this is just a first step. # I'm leaving these in and commented to show how not to do it. # rpm_data['requires'] = [(r,) for r in headers['requires']] # rpm_data['provides'] = [(p,) for p in headers['provides']] rpm_data['buildhost'] = headers['buildhost'] rpm_data['license'] = headers['license'] rpm_data['vendor'] = headers['vendor'] rpm_data['description'] = headers['description'] rpm_data['build_time'] = headers[rpm.RPMTAG_BUILDTIME] # Use the mtime of the file to match what is in the generated xml from # rpm_parse.get_package_xml(..) file_stat = os.stat(rpm_filename) rpm_data['time'] = file_stat[stat.ST_MTIME] rpm_data['signing_key'] = rpm_parse.package_signature(headers) return _encode_as_utf8(rpm_data)
def _extract_rpm_data(type_id, rpm_filename): """ Extract a dict of information for a given RPM or SRPM. :param type_id: The type of the unit that is being generated :type type_id: str :param rpm_filename: full path to the package to analyze :type rpm_filename: str :return: dict of data about the package :rtype: dict """ rpm_data = dict() # Read the RPM header attributes for use later headers = rpm_parse.package_headers(rpm_filename) for k in ['name', 'version', 'release', 'epoch']: rpm_data[k] = headers[k] if rpm_data['epoch'] is not None: rpm_data['epoch'] = str(rpm_data['epoch']) else: rpm_data['epoch'] = str(0) if headers['sourcepackage']: if RPMTAG_NOSOURCE in headers.keys(): rpm_data['arch'] = 'nosrc' else: rpm_data['arch'] = 'src' else: rpm_data['arch'] = headers['arch'] # construct filename from metadata (BZ #1101168) if headers[rpm.RPMTAG_SOURCEPACKAGE]: if type_id != models.SRPM._content_type_id.default: raise PulpCodedValidationException(error_code=error_codes.RPM1002) rpm_basefilename = "%s-%s-%s.src.rpm" % ( headers['name'], headers['version'], headers['release']) else: if type_id != models.RPM._content_type_id.default: raise PulpCodedValidationException(error_code=error_codes.RPM1003) rpm_basefilename = "%s-%s-%s.%s.rpm" % ( headers['name'], headers['version'], headers['release'], headers['arch']) rpm_data['relativepath'] = rpm_basefilename rpm_data['filename'] = rpm_basefilename # This format is, and has always been, incorrect. As of the new yum importer, the # plugin will generate these from the XML snippet because the API into RPM headers # is atrocious. This is the end game for this functionality anyway, moving all of # that metadata derivation into the plugin, so this is just a first step. # I'm leaving these in and commented to show how not to do it. # rpm_data['requires'] = [(r,) for r in headers['requires']] # rpm_data['provides'] = [(p,) for p in headers['provides']] rpm_data['buildhost'] = headers['buildhost'] rpm_data['license'] = headers['license'] rpm_data['vendor'] = headers['vendor'] rpm_data['description'] = headers['description'] rpm_data['build_time'] = headers[rpm.RPMTAG_BUILDTIME] # Use the mtime of the file to match what is in the generated xml from # rpm_parse.get_package_xml(..) file_stat = os.stat(rpm_filename) rpm_data['time'] = file_stat[stat.ST_MTIME] rpm_data['signing_key'] = rpm_parse.package_signature(headers) return _encode_as_utf8(rpm_data)
def _handle_package(repo, type_id, unit_key, metadata, file_path, conduit, config): """ Handles the upload for an RPM, SRPM or DRPM. This inspects the package contents to determine field values. The unit_key and metadata fields overwrite field values determined through package inspection. :param repo: The repository to import the package into :type repo: pulp.server.db.model.Repository :param type_id: The type_id of the package being uploaded :type type_id: str :param unit_key: A dictionary of fields to overwrite introspected field values :type unit_key: dict :param metadata: A dictionary of fields to overwrite introspected field values, or None :type metadata: dict or None :param file_path: The path to the uploaded package :type file_path: str :param conduit: provides access to relevant Pulp functionality :type conduit: pulp.plugins.conduits.upload.UploadConduit :param config: plugin configuration for the repository :type config: pulp.plugins.config.PluginCallConfiguration :raises PulpCodedException PLP1005: if the checksum type from the user is not recognized :raises PulpCodedException PLP1013: if the checksum value from the user does not validate """ try: if type_id == models.DRPM._content_type_id.default: unit = models.DRPM(**_extract_drpm_data(file_path)) else: repodata = rpm_parse.get_package_xml(file_path, sumtype=util.TYPE_SHA256) package_xml = (utils.fake_xml_element( repodata['primary'], constants.COMMON_NAMESPACE).find(primary.PACKAGE_TAG)) unit = primary.process_package_element(package_xml) except Exception: raise PulpCodedException(error_codes.RPM1016) # metadata can be None metadata = metadata or {} model_class = plugin_api.get_unit_model_by_id(type_id) update_fields_inbound(model_class, unit_key or {}) update_fields_inbound(model_class, metadata or {}) with open(file_path) as fp: sums = util.calculate_checksums(fp, models.RpmBase.DEFAULT_CHECKSUM_TYPES) # validate checksum if possible if metadata.get('checksum'): checksumtype = metadata.pop('checksum_type', util.TYPE_SHA256) checksumtype = util.sanitize_checksum_type(checksumtype) if checksumtype not in sums: raise PulpCodedException(error_code=error_codes.RPM1009, checksumtype=checksumtype) if metadata['checksum'] != sums[checksumtype]: raise PulpCodedException(error_code=platform_errors.PLP1013) _LOGGER.debug(_('Upload checksum matches.')) # Save all uploaded RPMs with sha256 in the unit key, since we can now publish with other # types, regardless of what is in the unit key. unit.checksumtype = util.TYPE_SHA256 unit.checksum = sums[util.TYPE_SHA256] # keep all available checksum values on the model unit.checksums = sums # Update the RPM-extracted data with anything additional the user specified. # Allow the user-specified values to override the extracted ones. for key, value in metadata.items(): setattr(unit, key, value) for key, value in unit_key.items(): setattr(unit, key, value) if type_id != models.DRPM._content_type_id.default: # Extract/adjust the repodata snippets unit.signing_key = rpm_parse.package_signature( rpm_parse.package_headers(file_path)) # construct filename from metadata (BZ #1101168) if type_id == models.SRPM._content_type_id.default: rpm_basefilename = "%s-%s-%s.src.rpm" % (unit.name, unit.version, unit.release) else: rpm_basefilename = "%s-%s-%s.%s.rpm" % (unit.name, unit.version, unit.release, unit.arch) unit.relativepath = rpm_basefilename unit.filename = rpm_basefilename _update_files(unit, repodata) unit.modify_xml(repodata) # check if the unit has duplicate nevra purge.remove_unit_duplicate_nevra(unit, repo) unit.set_storage_path(os.path.basename(file_path)) try: unit.save_and_import_content(file_path) except TypeError: raise ModelInstantiationError() except NotUniqueError: unit = unit.__class__.objects.filter(**unit.unit_key).first() if rpm_parse.signature_enabled(config): rpm_parse.filter_signature(unit, config) repo_controller.associate_single_unit(repo, unit)