def create_metafile(self, datafile): info = collections.defaultdict(lambda: "") # Generic package information including Pakfire information. info.update({ "pakfire_version" : PAKFIRE_VERSION, "type" : "source", }) # Include distribution information. info.update(self.pakfire.distro.info) info.update(self.pkg.info) # Size is the size of the (uncompressed) datafile. info["inst_size"] = self.getsize(datafile) # Update package information for string formatting. requires = [PACKAGE_INFO_DEPENDENCY_LINE % r for r in self.pkg.requires] info.update({ "groups" : " ".join(self.pkg.groups), "requires" : "\n".join(requires), }) # Format description. description = [PACKAGE_INFO_DESCRIPTION_LINE % l \ for l in util.text_wrap(self.pkg.description, length=80)] info["description"] = "\n".join(description) # Build information. info.update({ # Package it built right now. "build_time" : int(time.time()), "build_id" : uuid.uuid4(), }) # Arches equals supported arches. info["arch"] = self.pkg.supported_arches # Set UUID # XXX replace this by the payload hash info.update({ "uuid" : uuid.uuid4(), }) metafile = self.mktemp() f = open(metafile, "w") f.write(PACKAGE_INFO % info) f.close() return metafile
def create_metafile(self, datafile): info = collections.defaultdict(lambda: "") # Extract datafile in temporary directory and scan for dependencies. tmpdir = self.mktemp(directory=True) if self.payload_compression == "xz": tarfile = tar.InnerTarFileXz.open(datafile) else: tarfile = tar.InnerTarFile.open(datafile) tarfile.extractall(path=tmpdir) tarfile.close() # Run the dependency tracker. self.pkg.track_dependencies(self.builder, tmpdir) # Generic package information including Pakfire information. info.update({ "pakfire_version" : PAKFIRE_VERSION, "uuid" : self.pkg.uuid, "type" : "binary", }) # Include distribution information. info.update(self.pakfire.distro.info) info.update(self.pkg.info) # Update package information for string formatting. info.update({ "groups" : " ".join(self.pkg.groups), "prerequires" : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \ for d in self.pkg.prerequires]), "requires" : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \ for d in self.pkg.requires]), "provides" : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \ for d in self.pkg.provides]), "conflicts" : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \ for d in self.pkg.conflicts]), "obsoletes" : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \ for d in self.pkg.obsoletes]), "recommends" : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \ for d in self.pkg.recommends]), "suggests" : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \ for d in self.pkg.suggests]), }) # Format description. description = [PACKAGE_INFO_DESCRIPTION_LINE % l \ for l in util.text_wrap(self.pkg.description, length=80)] info["description"] = "\n".join(description) # Build information. info.update({ # Package it built right now. "build_time" : int(time.time()), "build_id" : uuid.uuid4(), }) # Installed size (equals size of the uncompressed tarball). info.update({ "inst_size" : self.getsize(datafile), }) metafile = self.mktemp() f = open(metafile, "w") f.write(PACKAGE_INFO % info) f.close() return metafile
def dump(self, short=False, long=False, filelist=False): if short: return "%s.%s : %s" % (self.name, self.arch, self.summary) items = [ (_("Name"), self.name), ] # Show supported arches if available. if hasattr(self, "supported_arches") and not self.supported_arches == "all": arch = "%s (%s)" % (self.arch, self.supported_arches) else: arch = self.arch items.append((_("Arch"), arch)) items += [ (_("Version"), self.version), (_("Release"), self.release), ] if self.size: items.append((_("Size"), util.format_size(self.size))) if self.inst_size: items.append( (_("Installed size"), util.format_size(self.inst_size)) ) # filter dummy repository if not self.repo == self.pakfire.repos.dummy: items.append((_("Repo"), self.repo.name)) items += [ (_("Summary"), self.summary), (_("Groups"), " ".join(self.groups)), (_("URL"), self.url), (_("License"), self.license), ] caption = _("Description") for line in util.text_wrap(self.description): items.append((caption, line)) caption = "" if long: if self.maintainer: items.append((_("Maintainer"), self.maintainer)) items.append((_("Vendor"), self.vendor)) items.append((_("UUID"), self.uuid)) items.append((_("Build ID"), self.build_id)) items.append((_("Build date"), self.build_date)) items.append((_("Build host"), self.build_host)) caption = _("Signatures") for sig in self.signatures: items.append((caption, sig)) caption = "" caption = _("Provides") for prov in sorted(self.provides): items.append((caption, prov)) caption = "" caption = _("Pre-requires") for req in sorted(self.prerequires): items.append((caption, req)) caption = "" caption = _("Requires") for req in sorted(self.requires): items.append((caption, req)) caption = "" caption = _("Conflicts") for req in sorted(self.conflicts): items.append((caption, req)) caption = "" caption = _("Obsoletes") for req in sorted(self.obsoletes): items.append((caption, req)) caption = "" caption = _("Recommends") for req in sorted(self.recommends): items.append((caption, req)) caption = "" caption = _("Suggests") for req in sorted(self.suggests): items.append((caption, req)) caption = "" # Append filelist if requested. if filelist: for file in self.filelist: items.append((_("File"), file)) format = "%%-%ds : %%s" % (max([len(k) for k, v in items])) s = [] for caption, value in items: s.append(format % (caption, value)) s.append("") # New line at the end # XXX why do we need to decode this? return "\n".join([str.decode("utf-8") for str in s])