def gen_repo_conf(self): self.fs.mkdir_p("conf") fp = self.fs.open("conf/distributions", "w") need_update = False for att in self.attrs: fp.write("Origin: " + self.origin + "\n") fp.write("Label: " + self.origin + "\n") fp.write("Suite: " + codename2suite[att.codename] + "\n") fp.write("Codename: " + att.codename + "\n") fp.write("Architectures: " + " ".join(att.arch) + "\n") fp.write("Components: " + " ".join(att.components.difference( set(["main/debian-installer"]))) + "\n") fp.write("UDebComponents: " + " ".join(att.components.difference( set(["main/debian-installer"]))) + "\n") fp.write("Description: " + self.description + "\n") fp.write("SignWith: " + self.keyid + "\n") if 'main/debian-installer' in att.components: fp.write("Update: di\n") ufp = self.fs.open("conf/updates", "w") ufp.write("Name: di\n") ufp.write("Method: " + att.mirror + "\n") ufp.write("VerifyRelease: blindtrust\n") ufp.write("Components: \n") ufp.write("GetInRelease: no\n") # It would be nicer, to use this # ufp.write( "Architectures: " + " ".join (att.arch) + "\n" ) # But we end up with 'armel amd64' sometimes. # So lets just use the init_attr... if self.init_attr: ufp.write( "Architectures: " + " ".join( self.init_attr.arch) + "\n") else: ufp.write("Architectures: " + " ".join(att.arch) + "\n") ufp.write("UDebComponents: main>main\n") ufp.close() need_update = True fp.write("\n") fp.close() export_key(self.keyid, self.fs.fname("/repo.pub")) if need_update: cmd = 'reprepro --export=force --basedir "%s" update' % self.fs.path do(cmd, env_add={'GNUPGHOME': "/var/cache/elbe/gnupg"}) else: for att in self.attrs: cmd = 'reprepro --basedir "%s" export %s' % (self.fs.path, att.codename) do(cmd, env_add={'GNUPGHOME': "/var/cache/elbe/gnupg"})
def pbuild(self, p): self.pdebuild_init() src_path = os.path.join(self.builddir, "pdebuilder", "current") src_uri = p.text('.').replace("LOCALMACHINE", "10.0.2.2").strip() logging.info("Retrieve pbuild sources: %s", src_uri) if p.tag == 'git': do("git clone %s %s" % (src_uri, src_path)) try: do("cd %s; git reset --hard %s" % (src_path, p.et.attrib['revision'])) except IndexError: pass elif p.tag == 'svn': do("svn co --non-interactive %s %s" % (src_uri, src_path)) elif p.tag == 'src-pkg': pdb_path = os.path.join(self.builddir, "pdebuilder") os.mkdir(pdb_path) apt_args = '--yes -q --download-only' if self.xml.prj.has('noauth'): apt_args += ' --allow-unauthenticated' do('cd "%s";apt-get source %s "%s"' % (pdb_path, apt_args, src_uri)) do('dpkg-source -x %s/*.dsc "%s"' % (pdb_path, src_path)) else: logging.info("Unknown pbuild source: %s", p.tag) # pdebuild_build(-1) means use all cpus self.pdebuild_build(cpuset=-1, profile="", cross=False)
def _includedsc(self, path, codename, components=None): if self.maxsize: new_size = self.fs.disk_usage("") + get_dsc_size(path) if new_size > self.maxsize: self.new_repo_volume() if self.maxsize and (self.fs.disk_usage("") > self.maxsize): self.new_repo_volume() cmd = 'reprepro %s includedsc %s %s' global_opt = ["--keepunreferencedfiles", "--export=never", '--basedir "%s"' % self.fs.path, "--priority normal", "--section misc"] if components is not None: # Compatibility with old callers if isinstance(components, str): components = [components] global_opt.append('--component "%s"' % '|'.join(components)) global_opt = ' '.join(global_opt) do(cmd % (global_opt, codename, path))
def add_binary_blob(hd, target): imagename = os.path.join(target, hd.text("name")) for binary in hd: if binary.tag != "binary": continue try: offset = binary.et.attrib["offset"] except KeyError: offset = 0 try: bs = binary.et.attrib["blocksize"] except KeyError: bs = 1 # use file from target/ dir if binary path starts with / if binary.et.text[0] == '/': bf = os.path.join(target, 'target', binary.et.text[1:]) print(bf) else: # use file from /var/cache/elbe/<uuid> project dir bf = os.path.join(target, binary.et.text) do('dd if="%s" of="%s" seek="%s" bs="%s" conv=notrunc' % (bf, imagename, offset, bs))
def add_key(self, key): cmd = 'echo "%s" > %s' % (key, self.rfs.fname("tmp/key.pub")) clean = 'rm -f %s' % self.rfs.fname("tmp/key.pub") do(cmd) with self.rfs: chroot(self.rfs.path, 'apt-key add /tmp/key.pub') do(clean)
def pdebuild_build(self, cpuset, profile): # check whether we have to use taskset to run pdebuild # this might be useful, when things like java dont # work with multithreading # if cpuset != -1: cpuset_cmd = 'taskset %d ' % cpuset else: # cpuset == -1 means empty cpuset_cmd cpuset_cmd = '' try: do('cd "%s"; %s pdebuild --debbuildopts "-j%s -sa" ' '--configfile "%s" ' '--use-pdebuild-internal --buildresult "%s"' % ( os.path.join(self.builddir, "pdebuilder", "current"), cpuset_cmd, cfg['pbuilder_jobs'], os.path.join(self.builddir, "pbuilderrc"), os.path.join(self.builddir, "pbuilder", "result")), env_add={'DEB_BUILD_PROFILES': profile.replace(",", " ")}) self.repo.remove(os.path.join(self.builddir, "pdebuilder", "current", "debian", "control")) self.repo.include(os.path.join(self.builddir, "pbuilder", "result", "*.changes")) except CommandError: logging.exception("Package fails to build.\n" "Please make sure, that the submited package " "builds in pbuilder")
def execute_prj(self, _buildenv, target, builddir): src = self.node.et.text dst = self.node.et.attrib['dst'] fmt = self.node.et.attrib['fmt'] if src not in target.images: logging.error( "Artifact '%s' does not exist.\n" "Valid Artifcact are: %s", src, ", ".join([str(i) for i in target.images])) raise FinetuningException("Artifact '%s' does not exist" % src) src_fname = os.path.join(builddir, src) dst_fname = os.path.join(builddir, dst) cmd = 'qemu-img convert -O "%s" "%s" "%s"' % (fmt, src_fname, dst_fname) do(cmd) target.images.append(dst) target.image_packers[dst] = default_packer if not self.node.bool_attr('keep_src'): target.images.remove(src) del target.image_packers[src]
def enqueue(self, queue, db): self.old_status = db.set_busy(self.project.builddir, ["build_done", "has_changes"]) self.name = self.project.xml.text("project/name") self.version = self.project.xml.text("project/version") # Create the database entry now. This has the advantage that the # user will see an error message immediately, if he tries to use # the same version number twice. The time-consuming part is creating # the package archive, which is done in execute. try: db.save_version(self.project.builddir, self.description) except BaseException: db.reset_busy(self.project.builddir, self.old_status) raise if self.project.savesh_file: logging.info("save version script:") do(self.project.savesh_file + ' "%s %s %s"' % (self.project.builddir, self.project.xml.text( "project/version"), self.project.xml.text("project/name")), allow_fail=True) logging.info("Enqueueing project to save package archive") AsyncWorkerJob.enqueue(self, queue, db)
def execute_img(self, _buildenv, target, builddir, loop_dev): part_nr = self.node.et.attrib['part'] part_type = self.node.et.attrib['type'] cmd = f'fdisk {loop_dev}' inp = f't\n{part_nr}\n{part_type}\nw\n' do(cmd, stdin=inp)
def _includedeb(self, path, codename, component): if self.maxsize: new_size = self.fs.disk_usage("") + os.path.getsize(path) if new_size > self.maxsize: self.new_repo_volume() cmd = ('reprepro --keepunreferencedfiles --export=never ' '--basedir "%s" -C %s includedeb %s %s') do(cmd % (self.fs.path, component, codename, path))
def pdebuild_build(self, cpuset, profile, cross): # check whether we have to use taskset to run pdebuild # this might be useful, when things like java dont # work with multithreading # if cpuset != -1: cpuset_cmd = 'taskset %d ' % cpuset else: # cpuset == -1 means empty cpuset_cmd cpuset_cmd = '' profile_list = profile.split(",") deb_build_opts = [ i for i in profile_list if i == "nodoc" or i == "nocheck" ] try: if cross: do('cd "%s"; dpkg-source -b .; %s ' 'pbuilder build --host-arch %s --configfile "%s" ' '--basetgz "%s" --buildresult "%s" ' '../*.dsc' % (os.path.join(self.builddir, "pdebuilder", "current"), cpuset_cmd, self.arch, os.path.join(self.builddir, "cross_pbuilderrc"), os.path.join(self.builddir, "pbuilder_cross", "base.tgz"), os.path.join(self.builddir, "pbuilder_cross", "result")), env_add={ 'DEB_BUILD_PROFILES': profile.replace(",", " "), 'DEB_BUILD_OPTIONS': " ".join(deb_build_opts) }) pbuilderdir = "pbuilder_cross" else: do('cd "%s"; %s pdebuild --debbuildopts "-j%s -sa" ' '--configfile "%s" ' '--use-pdebuild-internal --buildresult "%s"' % (os.path.join(self.builddir, "pdebuilder", "current"), cpuset_cmd, cfg['pbuilder_jobs'], os.path.join(self.builddir, "pbuilderrc"), os.path.join(self.builddir, "pbuilder", "result")), env_add={ 'DEB_BUILD_PROFILES': profile.replace(",", " "), 'DEB_BUILD_OPTIONS': " ".join(deb_build_opts) }) pbuilderdir = "pbuilder" self.repo.remove( os.path.join(self.builddir, "pdebuilder", "current", "debian", "control")) self.repo.include( os.path.join(self.builddir, pbuilderdir, "result", "*.changes")) except CommandError: logging.exception("Package fails to build.\n" "Please make sure, that the submitted package " "builds in pbuilder")
def build_host_sysroot(self, pkgs, hostsysrootpath): do('rm -rf %s; mkdir "%s"' % (hostsysrootpath, hostsysrootpath)) self.host_sysrootenv = BuildEnv(self.xml, hostsysrootpath, clean=True, arch="amd64", hostsysroot=True) # Import keyring self.host_sysrootenv.import_keys() logging.info("Keys imported") with self.host_sysrootenv: try: cache = self.get_rpcaptcache(env=self.host_sysrootenv, norecommend=True) cache.update() except Exception as e: raise AptCacheUpdateError(e) for p in pkgs: try: cache.mark_install(p, None) except KeyError: logging.exception("No Package %s", p) except SystemError: logging.exception( "Unable to correct problems in " "package %s", p) try: cache.commit() except SystemError as e: logging.exception("Commiting changes failed") raise AptCacheCommitError(str(e)) self.gen_licenses("sysroot-host", self.host_sysrootenv, [p.name for p in cache.get_installed_pkgs()]) # This is just a sysroot, some directories # need to be removed. # # This can move into finetuning in the # second implementation step. self.host_sysrootenv.rfs.rmtree('/boot') self.host_sysrootenv.rfs.rmtree('/dev') self.host_sysrootenv.rfs.rmtree('/etc') self.host_sysrootenv.rfs.rmtree('/home') self.host_sysrootenv.rfs.rmtree('/media') self.host_sysrootenv.rfs.rmtree('/mnt') self.host_sysrootenv.rfs.rmtree('/proc') self.host_sysrootenv.rfs.rmtree('/root') self.host_sysrootenv.rfs.rmtree('/run') self.host_sysrootenv.rfs.rmtree('/sys') self.host_sysrootenv.rfs.rmtree('/tmp') self.host_sysrootenv.rfs.rmtree('/var')
def execute_img(self, _buildenv, target, builddir, loop_dev): part_nr = self.node.et.attrib['part'] imgname = os.path.join(builddir, self.node.et.text) cmd = 'dd if=%sp%s of="%s"' % (loop_dev, part_nr, imgname) do(cmd) target.images.append(self.node.et.text) target.image_packers[self.node.et.text] = default_packer
def execute_img(self, _buildenv, _target, builddir, loop_dev): script = '\n'.join( line.lstrip(" \t") for line in self.node.et.text.strip("\n").splitlines()) mnt = os.path.join(builddir, 'imagemnt') dev = "%sp%s" % (loop_dev, self.node.et.attrib["part"]) with ImgMountFilesystem(mnt, dev) as fs: do("/bin/sh", stdin=script, env_add={"ELBE_MNT": fs.path})
def pack_file(self, builddir, fname): try: fpath = os.path.join(builddir, fname) do('%s "%s"' % (self.cmd, fpath)) except CommandError: # in case of an error, we just return None # which means, that the orig file does not # exist anymore return None return fname + self.suffix
def execute_img(self, _buildenv, _target, builddir, loop_dev): part_nr = self.node.et.attrib['part'] aname = self.node.et.attrib['artifact'] img_mnt = os.path.join(builddir, 'imagemnt') device = "%sp%s" % (loop_dev, part_nr) with ImgMountFilesystem(img_mnt, device) as mnt_fs: fname = mnt_fs.fname(self.node.et.text) cmd = 'cp "%s" "%s"' % (os.path.join(builddir, aname), fname) do(cmd)
def execute(self, _buildenv, target): if self.node.et.text[0] == '/': dest = self.node.et.text[1:] else: dest = self.node.et.text dest = os.path.join('..', dest) src = target.glob(self.node.et.attrib['path']) cmd = "mv -v %s {}".format(dest) for f in src: do(cmd % f)
def do_image_hd(hd, fslabel, target, grub_version, grub_fw_type=None): # pylint: disable=too-many-arguments # pylint: disable=too-many-locals # pylint: disable=too-many-branches sector_size = 512 s = size_to_int(hd.text("size")) size_in_sectors = s // sector_size imagename = os.path.join(target, hd.text("name")) do('rm -f "%s"' % imagename, allow_fail=True) f = open(imagename, "wb") f.truncate(size_in_sectors * sector_size) f.close() imag = parted.Device(imagename) if hd.tag == "gpthd": disk = parted.freshDisk(imag, "gpt") else: disk = parted.freshDisk(imag, "msdos") if grub_version == 202: grub = grubinstaller202(grub_fw_type) elif grub_version == 97: grub = grubinstaller97(grub_fw_type) else: grub = grubinstaller_base() current_sector = size_to_int( hd.text("first_partition_sector", default="2048")) for part in hd: if part.tag == "partition": ppart = create_partition(disk, part, parted.PARTITION_NORMAL, fslabel, size_in_sectors, current_sector) if part.text("label") in fslabel: create_label(disk, part, ppart, fslabel, target, grub) elif part.tag == "extended": ppart = create_partition(disk, part, parted.PARTITION_EXTENDED, fslabel, size_in_sectors, current_sector) create_logical_partitions(disk, part, ppart, fslabel, target, grub) else: continue current_sector += ppart.getLength() disk.commit() if hd.has("grub-install") and grub_version: grub.install(target) return hd.text("name")
def losetup(self): cmd = ('losetup --offset %d --sizelimit %d --find --show "%s"' % (self.offset, self.size, self.filename)) try: loopdev = get_command_out(cmd) except CommandError as e: if e.returncode != 1: raise do('sync') loopdev = get_command_out(cmd) return loopdev.rstrip('\n')
def _includedsc(self, path, codename, component): if self.maxsize: new_size = self.fs.disk_usage("") + get_dsc_size(path) if new_size > self.maxsize: self.new_repo_volume() if self.maxsize and (self.fs.disk_usage("") > self.maxsize): self.new_repo_volume() cmd = ('reprepro --keepunreferencedfiles --export=never ' '--basedir "%s" -C %s -P normal -S misc includedsc %s %s') do(cmd % (self.fs.path, component, codename, path))
def execute(self, _buildenv, target): files = target.glob(self.node.et.text) if 'exclude' in self.node.et.attrib: exclude = self.node.et.attrib['exclude'].split(' ') else: exclude = [] for f in files: if os.path.basename(f) in exclude: continue do("rm -rvf '%s'" % f)
def execute_prj(self, buildenv, target, builddir): imgname = self.node.et.attrib['img'] imgpath = os.path.join(builddir, imgname) cmd = 'losetup --find --show --partscan "%s"' % imgpath loop_dev = get_command_out(cmd).strip() try: for i in self.node: action = ImageFinetuningAction(i) action.execute_img(buildenv, target, builddir, loop_dev) finally: cmd = 'losetup --detach "%s"' % loop_dev do(cmd)
def _removesrc(self, srcname, codename, components=None): cmd = 'reprepro %s removesrc %s %s' global_opt = ["--basedir %s" % self.fs.path] if components is not None: # Compatibility with old callers if isinstance(components, str): components = [components] global_opt.append('--component "%s"' % '|'.join(components)) global_opt = ' '.join(global_opt) do(cmd % (global_opt, codename, srcname), env_add={'GNUPGHOME': '/var/cache/elbe/gnupg'})
def buildiso(self, fname, options=""): files = [] if self.volume_count == 0: new_path = '"' + self.fs.path + '"' do("genisoimage %s -o %s -J -joliet-long -R %s" % (options, fname, new_path)) files.append(fname) else: for i in self.volume_indexes: volfs = self.get_volume_fs(i) newname = fname + ("%02d" % i) do("genisoimage %s -o %s -J -joliet-long -R %s" % (options, newname, volfs.path)) files.append(newname) return files
def losetup(self): cmd = ('losetup --offset %d --sizelimit %d --find --show "%s"' % (self.offset, self.size, self.filename)) while True: try: loopdev = get_command_out(cmd) except CommandError as e: if e.returncode != 1: raise do('sync') time.sleep(1) else: break return loopdev.decode().rstrip('\n')
def _include(self, path, codename, components=None): cmd = 'reprepro %s include %s %s' global_opt = ["--ignore=wrongdistribution", "--ignore=surprisingbinary", "--keepunreferencedfiles", "--export=never", '--basedir "%s"' % self.fs.path, "--priority normal", "--section misc"] if components is not None: # Compatibility with old callers if isinstance(components, str): components = [components] global_opt.append('--component "%s"' % '|'.join(components)) global_opt = ' '.join(global_opt) do(cmd % (global_opt, codename, path))
def pack_file(self, builddir, fname): try: fpath = os.path.join(builddir, fname) dirname = os.path.dirname(fpath) basename = os.path.basename(fpath) archname = fpath + self.suffix do('tar cv%sf "%s" --sparse -C "%s" "%s"' % (self.flag, archname, dirname, basename)) do('rm -f "%s"' % fpath) except CommandError: # in case of an error, we just return None # which means, that the orig file does not # exist anymore. # # Even if it actually exists, it might be # much to big to download it and remove # the sparsity. return None return fname + self.suffix
def set_xml(self, xmlpath): # Use supplied XML file, if given, otherwise change to source.xml if not xmlpath: xmlpath = os.path.join(self.builddir, "source.xml") newxml = ElbeXML(xmlpath, buildtype=self.override_buildtype, skip_validate=self.skip_validate, url_validation=self.url_validation) # New XML file has to have the same architecture oldarch = self.xml.text("project/arch", key="arch") newarch = newxml.text("project/arch", key="arch") if newarch != oldarch: raise IncompatibleArchitectureException(oldarch, newarch) # Throw away old APT cache, targetfs and buildenv self.targetfs = None self.buildenv = None # dont create sysroot instance, it should be build from scratch # each time, because the pkglist including the -dev packages is # tracked nowhere. self.sysrootenv = None do('rm -rf %s' % self.sysrootpath) self.xml = newxml # Create a new BuildEnv instance, if we have a build directory if self.has_full_buildenv(): self.buildenv = BuildEnv(self.xml, self.chrootpath, clean=False) # Create TargetFs instance, if the target directory exists. # We use the old content of the directory if no rebuild is done, so # don't clean it (yet). if os.path.exists(self.targetpath): self.targetfs = TargetFs(self.targetpath, self.xml, clean=False) else: self.targetfs = None
def do_sdk(self, sdk): with TmpdirFilesystem() as tmp: # Make a copy of the installer shutil.copyfile(sdk, tmp.fname(sdk)) # Let's work in our temp dir from now on os.chdir(tmp.path) # The script is self extracting; it needs to be executable os.chmod(sdk, 0o744) # Extract here with 'yes' to all answers do("./%s -y -d ." % sdk) # Get environment file env = tmp.glob("environment-setup*")[0] # NOTE! This script requires binfmt to be installed. do("/bin/sh", stdin=self.script, env_add={"ELBE_SDK_ENV": env})
def __exit__(self, typ, value, traceback): self.rfs.__exit__(typ, value, traceback) self.cdrom_umount() if os.path.exists(self.path + '/repo'): do("mv %s/repo %s/../" % (self.path, self.path)) do("rm %s/etc/apt/sources.list.d/local.list" % self.path) do("rm %s/etc/apt/trusted.gpg.d/elbe-localrepo.gpg" % self.path)