def do_install_disk(self, disk, disk_name, cr, workdir, oe_builddir, bootimg_dir, kernel_dir, native_sysroot): """ Called after all partitions have been prepared and assembled into a disk image. In this case, we install the MBR. """ mbrfile = "%s/syslinux/" % bootimg_dir if cr._ptable_format == 'gpt': mbrfile += "gptmbr.bin" else: mbrfile += "mbr.bin" if not os.path.exists(mbrfile): msger.error( "Couldn't find %s. If using the -e option, do you have the right MACHINE set in local.conf? If not, is the bootimg_dir path correct?" % mbrfile) full_path = cr._full_path(workdir, disk_name, "direct") msger.debug("Installing MBR on disk %s as %s with size %s bytes" \ % (disk_name, full_path, disk['min_size'])) rc = runner.show( ['dd', 'if=%s' % mbrfile, 'of=%s' % full_path, 'conv=notrunc']) if rc != 0: raise MountError("Unable to set MBR to %s" % full_path)
def exec_native_cmd(cmd_and_args, native_sysroot, catch=3): """ Execute native command, catching stderr, stdout Need to execute as_shell if the command uses wildcards Always need to execute native commands as_shell """ native_paths = \ "export PATH=%s/sbin:%s/usr/sbin:%s/usr/bin:$PATH" % \ (native_sysroot, native_sysroot, native_sysroot) native_cmd_and_args = "%s;%s" % (native_paths, cmd_and_args) msger.debug("exec_native_cmd: %s" % cmd_and_args) args = cmd_and_args.split() msger.debug(args) rc, out = exec_cmd(native_cmd_and_args, True, catch) if rc == 127: # shell command-not-found msger.error("A native (host) program required to build the image " "was not found (see details above). Please make sure " "it's installed and try again.") return (rc, out)
def exec_native_cmd(cmd_and_args, native_sysroot, catch = 3): """ Execute native command, catching stderr, stdout Need to execute as_shell if the command uses wildcards Always need to execute native commands as_shell """ native_paths = \ "export PATH=%s/sbin:%s/usr/sbin:%s/usr/bin:$PATH" % \ (native_sysroot, native_sysroot, native_sysroot) native_cmd_and_args = "%s;%s" % (native_paths, cmd_and_args) msger.debug("exec_native_cmd: %s" % cmd_and_args) args = cmd_and_args.split() msger.debug(args) rc, out = exec_cmd(native_cmd_and_args, True, catch) if rc == 127: # shell command-not-found msger.error("A native (host) program required to build the image " "was not found (see details above). Please make sure " "it's installed and try again.") return (rc, out)
def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir, kernel_dir, native_sysroot): """ Called to do the actual content population for a partition i.e. it 'prepares' the partition to be incorporated into the image. In this case, prepare content for legacy bios boot partition. """ if not bootimg_dir: bootimg_dir = get_bitbake_var("STAGING_DATADIR") if not bootimg_dir: msger.error("Couldn't find STAGING_DATADIR, exiting\n") # just so the result notes display it cr.set_bootimg_dir(bootimg_dir) staging_kernel_dir = kernel_dir staging_data_dir = bootimg_dir hdddir = "%s/hdd/boot" % cr_workdir install_cmd = "install -m 0644 %s/bzImage %s/vmlinuz" \ % (staging_kernel_dir, hdddir) tmp = exec_cmd(install_cmd) install_cmd = "install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" \ % (staging_data_dir, hdddir) tmp = exec_cmd(install_cmd) du_cmd = "du -bks %s" % hdddir rc, out = exec_cmd(du_cmd) blocks = int(out.split()[0]) blocks += BOOTDD_EXTRA_SPACE # Ensure total sectors is an integral number of sectors per # track or mcopy will complain. Sectors are 512 bytes, and we # generate images with 32 sectors per track. This calculation is # done in blocks, thus the mod by 16 instead of 32. blocks += (16 - (blocks % 16)) # dosfs image, created by mkdosfs bootimg = "%s/boot.img" % cr_workdir dosfs_cmd = "mkdosfs -n boot -S 512 -C %s %d" % (bootimg, blocks) exec_native_cmd(dosfs_cmd, native_sysroot) mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir) exec_native_cmd(mcopy_cmd, native_sysroot) syslinux_cmd = "syslinux %s" % bootimg exec_native_cmd(syslinux_cmd, native_sysroot) chmod_cmd = "chmod 644 %s" % bootimg exec_cmd(chmod_cmd) du_cmd = "du -Lbms %s" % bootimg rc, out = exec_cmd(du_cmd) bootimg_size = out.split()[0] part.set_size(bootimg_size) part.set_source_file(bootimg)
def close(self): if self.created: try: self.cleanup() self.device = None except MountError, e: msger.error("%s" % e)
def do_stage_partition(self, part, cr, workdir, oe_builddir, bootimg_dir, kernel_dir, native_sysroot): """ Special content staging hook called before do_prepare_partition(), normally empty. For galileo, we need to stage just the boot/ dir in the deploy dir. """ bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE") if not bootimg_dir: msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n") # just so the result notes display it cr.set_bootimg_dir(bootimg_dir) hdddir = "%s/hdd" % workdir hdd_boot_dir = "%s/hdd/boot" % workdir boot_dir = "%s/boot" % bootimg_dir rm_cmd = "rm -rf %s" % workdir exec_cmd(rm_cmd) msger.debug("Copying %s to %s" % (boot_dir, hdd_boot_dir)) shutil.copytree(bootimg_dir+"/boot/", hdd_boot_dir) machine = get_bitbake_var("MACHINE_ARCH") if not machine: msger.error("Couldn't find MACHINE, exiting\n") install_cmd = "install -m 0644 %s/image-micro-%s.cpio.gz %s" % \ (bootimg_dir, machine, hdddir) tmp = exec_cmd(install_cmd)
def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir, kernel_dir, krootfs_dir, native_sysroot): """ Called to do the actual content population for a partition i.e. it 'prepares' the partition to be incorporated into the image. In this case, prepare content for legacy bios boot partition. """ if part.rootfs is None: if not 'ROOTFS_DIR' in krootfs_dir: msg = "Couldn't find --rootfs-dir, exiting" msger.error(msg) rootfs_dir = krootfs_dir['ROOTFS_DIR'] else: if part.rootfs in krootfs_dir: rootfs_dir = krootfs_dir[part.rootfs] elif os.path.isdir(part.rootfs): rootfs_dir = part.rootfs else: msg = "Couldn't find --rootfs-dir=%s connection" msg += " or it is not a valid path, exiting" msger.error(msg % part.rootfs) part.set_rootfs(rootfs_dir) part.prepare_rootfs(cr_workdir, oe_builddir, rootfs_dir, native_sysroot)
def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir, kernel_dir, krootfs_dir, native_sysroot): """ Called to do the actual content population for a partition i.e. it 'prepares' the partition to be incorporated into the image. In this case, prepare content for legacy bios boot partition. """ if part.rootfs is None: if not 'ROOTFS_DIR' in krootfs_dir: msg = "Couldn't find --rootfs-dir, exiting" msger.error(msg) rootfs_dir = krootfs_dir['ROOTFS_DIR'] else: if part.rootfs in krootfs_dir: rootfs_dir = krootfs_dir[part.rootfs] elif part.rootfs: rootfs_dir = part.rootfs else: msg = "Couldn't find --rootfs-dir=%s connection" msg += " or it is not a valid path, exiting" msger.error(msg % part.rootfs) real_rootfs_dir = self.__get_rootfs_dir(rootfs_dir) part.set_rootfs(real_rootfs_dir) part.prepare_rootfs(cr_workdir, oe_builddir, real_rootfs_dir, native_sysroot)
def set_runtime(self, runtime): if runtime not in ("bootstrap", "native"): msger.error("Invalid runtime mode: %s" % runtime) if misc.get_distro()[0] in ("tizen", "Tizen"): runtime = "native" self.create['runtime'] = runtime
def _do_chroot_tar(cls, target, cmd=[]): mountfp_xml = os.path.splitext(target)[0] + '.xml' if not os.path.exists(mountfp_xml): raise errors.CreatorError("No mount point file found for this tar " "image, please check %s" % mountfp_xml) import tarfile tar = tarfile.open(target, 'r') tmpdir = misc.mkdtemp() tar.extractall(path=tmpdir) tar.close() mntdir = misc.mkdtemp() loops = [] for (mp, label, name, size, fstype) in load_mountpoints(mountfp_xml): if fstype in ("ext2", "ext3", "ext4"): myDiskMount = fs_related.ExtDiskMount elif fstype == "btrfs": myDiskMount = fs_related.BtrfsDiskMount elif fstype in ("vfat", "msdos"): myDiskMount = fs_related.VfatDiskMount else: msger.error("Cannot support fstype: %s" % fstype) name = os.path.join(tmpdir, name) size = size * 1024L * 1024L loop = myDiskMount(fs_related.SparseLoopbackDisk(name, size), os.path.join(mntdir, mp.lstrip('/')), fstype, size, label) try: msger.verbose("Mount %s to %s" % (mp, mntdir + mp)) fs_related.makedirs(os.path.join(mntdir, mp.lstrip('/'))) loop.mount() except: loop.cleanup() for lp in reversed(loops): chroot.cleanup_after_chroot("img", lp, None, mntdir) shutil.rmtree(tmpdir, ignore_errors=True) raise loops.append(loop) try: if len(cmd) != 0: cmdline = "/usr/bin/env HOME=/root " + ' '.join(cmd) else: cmdline = "/usr/bin/env HOME=/root /bin/bash" chroot.chroot(mntdir, None, cmdline) except: raise errors.CreatorError("Failed to chroot to %s." % target) finally: for loop in reversed(loops): chroot.cleanup_after_chroot("img", loop, None, mntdir) shutil.rmtree(tmpdir, ignore_errors=True)
def postoptparse(self): if self.options.verbose: msger.set_loglevel('verbose') if self.options.debug: msger.set_loglevel('debug') if self.options.logfile: msger.set_interactive(False) msger.set_logfile(self.options.logfile) configmgr.create['logfile'] = self.options.logfile if self.options.config: configmgr.reset() configmgr._siteconf = self.options.config if self.options.outdir is not None: configmgr.create['outdir'] = self.options.outdir if self.options.cachedir is not None: configmgr.create['cachedir'] = self.options.cachedir os.environ['ZYPP_LOCKFILE_ROOT'] = configmgr.create['cachedir'] if self.options.local_pkgs_path is not None: if not os.path.exists(self.options.local_pkgs_path): msger.error('Local pkgs directory: \'%s\' not exist' \ % self.options.local_pkgs_path) configmgr.create['local_pkgs_path'] = self.options.local_pkgs_path if self.options.release: configmgr.create['release'] = self.options.release if self.options.record_pkgs: configmgr.create['record_pkgs'] = [] for infotype in self.options.record_pkgs.split(','): if infotype not in ('name', 'content', 'license'): raise errors.Usage('Invalid pkg recording: %s, valid ones:' ' "name", "content", "license"' \ % infotype) configmgr.create['record_pkgs'].append(infotype) if self.options.arch is not None: supported_arch = sorted(rpmmisc.archPolicies.keys(), reverse=True) if self.options.arch in supported_arch: configmgr.create['arch'] = self.options.arch else: raise errors.Usage('Invalid architecture: "%s".\n' ' Supported architectures are: \n' ' %s\n' % (self.options.arch, ', '.join(supported_arch))) if self.options.pkgmgr is not None: configmgr.create['pkgmgr'] = self.options.pkgmgr if self.options.runtime: configmgr.create['runtime'] = self.options.runtime if self.options.compress_disk_image is not None: configmgr.create['compress_disk_image'] = \ self.options.compress_disk_image
def postoptparse(self): if self.options.verbose: msger.set_loglevel('verbose') if self.options.debug: msger.set_loglevel('debug') if self.options.logfile: msger.set_interactive(False) msger.set_logfile(self.options.logfile) configmgr.create['logfile'] = self.options.logfile if self.options.config: configmgr.reset() configmgr._siteconf = self.options.config if self.options.outdir is not None: configmgr.create['outdir'] = self.options.outdir if self.options.cachedir is not None: configmgr.create['cachedir'] = self.options.cachedir os.environ['ZYPP_LOCKFILE_ROOT'] = configmgr.create['cachedir'] if self.options.local_pkgs_path is not None: if not os.path.exists(self.options.local_pkgs_path): msger.error('Local pkgs directory: \'%s\' not exist' \ % self.options.local_pkgs_path) configmgr.create['local_pkgs_path'] = self.options.local_pkgs_path if self.options.release: configmgr.create['release'] = self.options.release if self.options.record_pkgs: configmgr.create['record_pkgs'] = [] for infotype in self.options.record_pkgs.split(','): if infotype not in ('name', 'content', 'license'): raise errors.Usage('Invalid pkg recording: %s, valid ones:' ' "name", "content", "license"' \ % infotype) configmgr.create['record_pkgs'].append(infotype) if self.options.arch is not None: supported_arch = sorted(rpmmisc.archPolicies.keys(), reverse=True) if self.options.arch in supported_arch: configmgr.create['arch'] = self.options.arch else: raise errors.Usage( 'Invalid architecture: "%s".\n' ' Supported architectures are: \n' ' %s\n' % (self.options.arch, ', '.join(supported_arch))) if self.options.pkgmgr is not None: configmgr.create['pkgmgr'] = self.options.pkgmgr if self.options.runtime: configmgr.create['runtime'] = self.options.runtime if self.options.compress_disk_image is not None: configmgr.create['compress_disk_image'] = \ self.options.compress_disk_image
def runtool(cmdln_or_args, catch=1): """ wrapper for most of the subprocess calls input: cmdln_or_args: can be both args and cmdln str (shell=True) catch: 0, quitely run 1, only STDOUT 2, only STDERR 3, both STDOUT and STDERR return: (rc, output) if catch==0: the output will always None """ if catch not in (0, 1, 2, 3): # invalid catch selection, will cause exception, that's good return None if isinstance(cmdln_or_args, list): cmd = cmdln_or_args[0] shell = False else: import shlex cmd = shlex.split(cmdln_or_args)[0] shell = True if catch != 3: dev_null = os.open("/dev/null", os.O_WRONLY) if catch == 0: sout = dev_null serr = dev_null elif catch == 1: sout = subprocess.PIPE serr = dev_null elif catch == 2: sout = dev_null serr = subprocess.PIPE elif catch == 3: sout = subprocess.PIPE serr = subprocess.STDOUT try: p = subprocess.Popen(cmdln_or_args, stdout=sout, stderr=serr, shell=shell) (sout, serr) = p.communicate() # combine stdout and stderr, filter None out out = ''.join(filter(None, [sout, serr])) except OSError, e: if e.errno == 2: # [Errno 2] No such file or directory msger.error('Cannot run command: %s, lost dependency?' % cmd) else: raise # relay
def _mount_instroot(self, base_on=None): if base_on and os.path.isfile(base_on): self.__imgdir = os.path.dirname(base_on) imgname = os.path.basename(base_on) self._base_on(base_on) self._set_image_size(misc.get_file_size(self._image)) # here, self._instloops must be [] self._instloops.append({ "mountpoint": "/", "label": self.name, "name": imgname, "size": self.__image_size or 4096, "fstype": self.__fstype or "ext3", "loop": None }) self._check_imgdir() for loop in self._instloops: fstype = loop['fstype'] mp = os.path.join(self._instroot, loop['mountpoint'].lstrip('/')) size = loop['size'] * 1024 * 1024 imgname = loop['name'] fsopts = loop['fsopts'] dargs = [ fs.SparseLoopbackDisk(os.path.join(self._imgdir, imgname), size), mp, fstype, self._blocksize, loop['label'] ] dkwargs = {"fsopts": fsopts} if fstype in ("ext2", "ext3", "ext4"): MyDiskMount = fs.ExtDiskMount elif fstype == "btrfs": MyDiskMount = fs.BtrfsDiskMount dkwargs["subvolumes"] = loop["subvolumes"] dkwargs["snapshots"] = loop["snapshots"] elif fstype in ("vfat", "msdos"): MyDiskMount = fs.VfatDiskMount else: msger.error('Cannot support fstype: %s' % fstype) loop['loop'] = MyDiskMount(*dargs, **dkwargs) loop['uuid'] = loop['loop'].uuid try: msger.verbose('Mounting image "%s" on "%s"' % (imgname, mp)) fs.makedirs(mp) loop['loop'].mount() except MountError as e: raise
def _mount_instroot(self, base_on=None): if base_on and os.path.isfile(base_on): self.__imgdir = os.path.dirname(base_on) imgname = os.path.basename(base_on) self._base_on(base_on) self._set_image_size(misc.get_file_size(self._image)) # here, self._instloops must be [] self._instloops.append({ "mountpoint": "/", "label": self.name, "name": imgname, "size": self.__image_size or 4096L, "fstype": self.__fstype or "ext3", "extopts": None, "loop": None }) self._check_imgdir() for loop in self._instloops: fstype = loop['fstype'] mp = os.path.join(self._instroot, loop['mountpoint'].lstrip('/')) size = loop['size'] * 1024L * 1024L imgname = loop['name'] if fstype in ("ext2", "ext3", "ext4"): MyDiskMount = fs.ExtDiskMount elif fstype == "btrfs": MyDiskMount = fs.BtrfsDiskMount elif fstype in ("vfat", "msdos"): MyDiskMount = fs.VfatDiskMount else: msger.error('Cannot support fstype: %s' % fstype) loop['loop'] = MyDiskMount(fs.SparseLoopbackDisk( os.path.join(self.__imgdir, imgname), size), mp, fstype, self._blocksize, loop['label']) if fstype in ("ext2", "ext3", "ext4"): loop['loop'].extopts = loop['extopts'] try: msger.verbose('Mounting image "%s" on "%s"' % (imgname, mp)) fs.makedirs(mp) loop['loop'].mount() except MountError, e: raise
def precmd(self, argv): # check help before cmd if '-h' in argv or '?' in argv or '--help' in argv or 'help' in argv: return argv if len(argv) == 1: return ['help', argv[0]] if os.geteuid() != 0: msger.error("Root permission is required, abort") try: w = pwd.getpwuid(os.geteuid()) except KeyError: msger.warning("Might fail in compressing stage for undetermined user") return argv
def __get_rootfs_dir(rootfs_dir): if os.path.isdir(rootfs_dir): return rootfs_dir bitbake_env_lines = find_bitbake_env_lines(rootfs_dir) if not bitbake_env_lines: msg = "Couldn't get bitbake environment, exiting." msger.error(msg) image_rootfs_dir = find_artifact(bitbake_env_lines, "IMAGE_ROOTFS") if not os.path.isdir(image_rootfs_dir): msg = "No valid artifact IMAGE_ROOTFS from image named" msg += " %s has been found at %s, exiting.\n" % \ (rootfs_dir, image_rootfs_dir) msger.error(msg) return image_rootfs_dir
def __run_parted(self, args): """ Run parted with arguments specified in the 'args' list. """ args.insert(0, self.parted) msger.debug(args) rc, out = runner.runtool(args, catch = 3) out = out.strip() if out: msger.debug('"parted" output: %s' % out) if rc != 0: # We don't throw exception when return code is not 0, because # parted always fails to reload part table with loop devices. This # prevents us from distinguishing real errors based on return # code. msger.error("WARNING: parted returned '%s' instead of 0 (use --debug for details)" % rc)
def precmd(self, argv): # check help before cmd if '-h' in argv or '?' in argv or '--help' in argv or 'help' in argv: return argv if len(argv) == 1: return ['help', argv[0]] if os.geteuid() != 0: msger.error("Root permission is required, abort") try: w = pwd.getpwuid(os.geteuid()) except KeyError: msger.warning( "Might fail in compressing stage for undetermined user") return argv
def main(self, argv=None): if argv is None: argv = sys.argv else: argv = argv[:] # don't modify caller's list self.optparser = self.get_optparser() if self.optparser: try: argv = self.preoptparse(argv) self.options, args = self.optparser.parse_args(argv) except cmdln.CmdlnUserError, ex: msg = "%s: %s\nTry '%s help' for info.\n"\ % (self.name, ex, self.name) msger.error(msg) except cmdln.StopOptionProcessing, ex: return 0
def __run_parted(self, args): """ Run parted with arguments specified in the 'args' list. """ args.insert(0, self.parted) msger.debug(args) rc, out = runner.runtool(args, catch=3) out = out.strip() if out: msger.debug('"parted" output: %s' % out) if rc != 0: # We don't throw exception when return code is not 0, because # parted always fails to reload part table with loop devices. This # prevents us from distinguishing real errors based on return # code. msger.error( "WARNING: parted returned '%s' instead of 0 (use --debug for details)" % rc)
def _install_syslinux(self): mbrfile = "%s/syslinux/" % self.bootimg_dir if self._ptable_format == 'gpt': mbrfile += "gptmbr.bin" else: mbrfile += "mbr.bin" if not os.path.exists(mbrfile): msger.error("Couldn't find %s. If using the -e option, do you have the right MACHINE set in local.conf? If not, is the bootimg_dir path correct?" % mbrfile) for disk_name, disk in self.__instimage.disks.items(): full_path = self._full_path(self.__imgdir, disk_name, "direct") msger.debug("Installing MBR on disk %s as %s with size %s bytes" \ % (disk_name, full_path, disk['min_size'])) rc = runner.show(['dd', 'if=%s' % mbrfile, 'of=%s' % full_path, 'conv=notrunc']) if rc != 0: raise MountError("Unable to set MBR to %s" % full_path)
def _parse_siteconf(self, siteconf): if not siteconf: return if not os.path.exists(siteconf): msger.warning("cannot read config file: %s" % siteconf) return parser = ConfigParser.SafeConfigParser() parser.read(siteconf) for section in parser.sections(): if section in self.DEFAULTS: getattr(self, section).update(dict(parser.items(section))) # append common section items to other sections for section in self.DEFAULTS.keys(): if section != "common": getattr(self, section).update(self.common) # check and normalize the scheme of proxy url if self.create['proxy']: m = re.match('^(\w+)://.*', self.create['proxy']) if m: scheme = m.group(1) if scheme not in ('http', 'https', 'ftp', 'socks'): msger.error("%s: proxy scheme is incorrect" % siteconf) else: msger.warning("%s: proxy url w/o scheme, use http as default" % siteconf) self.create['proxy'] = "http://" + self.create['proxy'] proxy.set_proxies(self.create['proxy'], self.create['no_proxy']) # bootstrap option handling self.set_runtime(self.create['runtime']) if isinstance(self.bootstrap['packages'], basestring): packages = self.bootstrap['packages'].replace('\n', ' ') if packages.find(',') != -1: packages = packages.split(',') else: packages = packages.split() self.bootstrap['packages'] = packages
def precmd(self, argv): # check help before cmd if '-h' in argv or '?' in argv or '--help' in argv or 'help' in argv: return argv if len(argv) == 1: return ['help', argv[0]] if os.geteuid() != 0: raise msger.error("Root permission is required, abort") return argv
def do_stage_partition(self, part, cr, workdir, oe_builddir, bootimg_dir, kernel_dir, native_sysroot): """ Special content staging hook called before do_prepare_partition(), normally empty. For the iot-devkit, we need to stage just the boot/ dir in the deploy dir. """ if not bootimg_dir: msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n") # just so the result notes display it cr.set_bootimg_dir(bootimg_dir) hdddir = "%s/hdd/boot" % workdir boot_dir = "%s/boot" % bootimg_dir rm_cmd = "rm -rf %s" % workdir exec_cmd(rm_cmd) msger.debug("Copying %s to %s" % (boot_dir, hdddir)) shutil.copytree(bootimg_dir+"/boot/", hdddir)
def do_stage_partition(self, part, cr, workdir, oe_builddir, bootimg_dir, kernel_dir, native_sysroot): """ Special content staging hook called before do_prepare_partition(), normally empty. For the iot-devkit, we need to stage just the boot/ dir in the deploy dir. """ if not bootimg_dir: msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n") # just so the result notes display it cr.set_bootimg_dir(bootimg_dir) hdddir = "%s/hdd/boot" % workdir boot_dir = "%s/boot" % bootimg_dir rm_cmd = "rm -rf %s" % workdir exec_cmd(rm_cmd) msger.debug("Copying %s to %s" % (boot_dir, hdddir)) shutil.copytree(bootimg_dir + "/boot/", hdddir)
def do_install_disk(self, disk, disk_name, cr, workdir, oe_builddir, bootimg_dir, kernel_dir, native_sysroot): """ Called after all partitions have been prepared and assembled into a disk image. In this case, we install the MBR. """ mbrfile = "%s/syslinux/" % bootimg_dir if cr._ptable_format == 'gpt': mbrfile += "gptmbr.bin" else: mbrfile += "mbr.bin" if not os.path.exists(mbrfile): msger.error("Couldn't find %s. If using the -e option, do you have the right MACHINE set in local.conf? If not, is the bootimg_dir path correct?" % mbrfile) full_path = cr._full_path(workdir, disk_name, "direct") msger.debug("Installing MBR on disk %s as %s with size %s bytes" \ % (disk_name, full_path, disk['min_size'])) rc = runner.show(['dd', 'if=%s' % mbrfile, 'of=%s' % full_path, 'conv=notrunc']) if rc != 0: raise MountError("Unable to set MBR to %s" % full_path)
def _install_syslinux(self): mbrfile = "%s/syslinux/" % self.bootimg_dir if self._ptable_format == 'gpt': mbrfile += "gptmbr.bin" else: mbrfile += "mbr.bin" if not os.path.exists(mbrfile): msger.error( "Couldn't find %s. If using the -e option, do you have the right MACHINE set in local.conf? If not, is the bootimg_dir path correct?" % mbrfile) for disk_name, disk in self.__instimage.disks.items(): full_path = self._full_path(self.__imgdir, disk_name, "direct") msger.debug("Installing MBR on disk %s as %s with size %s bytes" \ % (disk_name, full_path, disk['min_size'])) rc = runner.show( ['dd', 'if=%s' % mbrfile, 'of=%s' % full_path, 'conv=notrunc']) if rc != 0: raise MountError("Unable to set MBR to %s" % full_path)
def __copy_kernel_and_initramfs(self, isodir, version, index): bootdir = self._instroot + "/boot" shutil.copyfile(bootdir + "/vmlinuz-" + version, isodir + "/isolinux/vmlinuz" + index) isDracut = False if os.path.exists(bootdir + "/initramfs-" + version + ".img"): shutil.copyfile(bootdir + "/initramfs-" + version + ".img", isodir + "/isolinux/initrd" + index + ".img") isDracut = True elif os.path.exists(bootdir + "/initrd-" + version + ".img"): shutil.copyfile(bootdir + "/initrd-" + version + ".img", isodir + "/isolinux/initrd" + index + ".img") else: msger.error("No initrd or initramfs found for %s" % (version,)) is_xen = False if os.path.exists(bootdir + "/xen.gz-" + version[:-3]): shutil.copyfile(bootdir + "/xen.gz-" + version[:-3], isodir + "/isolinux/xen" + index + ".gz") is_xen = True return (is_xen, isDracut)
def __set_ksconf(self, ksconf): if not os.path.isfile(ksconf): msger.error('Cannot find ks file: %s' % ksconf) self.__ksconf = ksconf self._parse_kickstart(ksconf)
% (self.name, ex, self.name) msger.error(msg) except cmdln.StopOptionProcessing, ex: return 0 else: # optparser=None means no process for opts self.options, args = None, argv[1:] if not args: return self.emptyline() self.postoptparse() if os.geteuid() != 0 and args[0] != 'help': msger.error('root permission is required to continue, abort') return self.cmd(args) def do_auto(self, subcmd, opts, *args): """${cmd_name}: auto detect image type from magic header Usage: ${name} ${cmd_name} <ksfile> ${cmd_option_list} """ def parse_magic_line(re_str, pstr, ptype='mic'): ptn = re.compile(re_str) m = ptn.match(pstr) if not m or not m.groups():
def _root_confirm(): """Make sure command is called by root There are a lot of commands needed to be run during creating images, some of them must be run with root privilege like mount, kpartx""" if os.geteuid() != 0: msger.error('Root permission is required to continue, abort')
else: argv = argv[:] # don't modify caller's list self.optparser = self.get_optparser() if self.optparser: try: argv = self.preoptparse(argv) self.options, args = self.optparser.parse_args(argv) except cmdln.CmdlnUserError, ex: msg = "%s: %s\nTry '%s help' for info.\n"\ % (self.name, ex, self.name) msger.error(msg) except cmdln.StopOptionProcessing, ex: return 0 else: # optparser=None means no process for opts self.options, args = None, argv[1:] if not args: return self.emptyline() self.postoptparse() if os.geteuid() != 0 and args[0] != 'help': msger.error('root permission is required to continue, abort') return self.cmd(args)
def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir, kernel_dir, rootfs_dir, native_sysroot): """ Called to do the actual content population for a partition i.e. it 'prepares' the partition to be incorporated into the image. In this case, prepare content for an EFI (grub) boot partition. """ if not bootimg_dir: bootimg_dir = get_bitbake_var("HDDDIR") if not bootimg_dir: msger.error("Couldn't find HDDDIR, exiting\n") # just so the result notes display it cr.set_bootimg_dir(bootimg_dir) staging_kernel_dir = kernel_dir staging_data_dir = bootimg_dir hdddir = "%s/hdd/boot" % cr_workdir install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \ (staging_kernel_dir, hdddir) tmp = exec_cmd(install_cmd) shutil.copyfile("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir, "%s/grub.cfg" % cr_workdir) cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (staging_data_dir, hdddir) exec_cmd(cp_cmd, True) shutil.move("%s/grub.cfg" % cr_workdir, "%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir) du_cmd = "du -bks %s" % hdddir rc, out = exec_cmd(du_cmd) blocks = int(out.split()[0]) extra_blocks = part.get_extra_block_count(blocks) if extra_blocks < BOOTDD_EXTRA_SPACE: extra_blocks = BOOTDD_EXTRA_SPACE blocks += extra_blocks msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \ (extra_blocks, part.mountpoint, blocks)) # Ensure total sectors is an integral number of sectors per # track or mcopy will complain. Sectors are 512 bytes, and we # generate images with 32 sectors per track. This calculation is # done in blocks, thus the mod by 16 instead of 32. blocks += (16 - (blocks % 16)) # dosfs image, created by mkdosfs bootimg = "%s/boot.img" % cr_workdir dosfs_cmd = "mkdosfs -n efi -C %s %d" % (bootimg, blocks) exec_native_cmd(dosfs_cmd, native_sysroot) mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir) exec_native_cmd(mcopy_cmd, native_sysroot) chmod_cmd = "chmod 644 %s" % bootimg exec_cmd(chmod_cmd) du_cmd = "du -Lbms %s" % bootimg rc, out = exec_cmd(du_cmd) bootimg_size = out.split()[0] part.set_size(bootimg_size) part.set_source_file(bootimg)
def postoptparse(self): abspath = lambda pth: os.path.abspath(os.path.expanduser(pth)) if self.options.verbose: msger.set_loglevel('verbose') if self.options.debug: msger.set_loglevel('debug') if self.options.logfile: logfile_abs_path = abspath(self.options.logfile) if os.path.isdir(logfile_abs_path): raise errors.Usage("logfile's path %s should be file" % self.options.logfile) if not os.path.exists(os.path.dirname(logfile_abs_path)): os.makedirs(os.path.dirname(logfile_abs_path)) msger.set_interactive(False) msger.set_logfile(logfile_abs_path) configmgr.create['logfile'] = self.options.logfile if self.options.config: configmgr.reset() configmgr._siteconf = self.options.config if self.options.outdir is not None: configmgr.create['outdir'] = abspath(self.options.outdir) if self.options.cachedir is not None: configmgr.create['cachedir'] = abspath(self.options.cachedir) os.environ['ZYPP_LOCKFILE_ROOT'] = configmgr.create['cachedir'] for cdir in ('outdir', 'cachedir'): if os.path.exists(configmgr.create[cdir]) \ and not os.path.isdir(configmgr.create[cdir]): msger.error('Invalid directory specified: %s' \ % configmgr.create[cdir]) if self.options.local_pkgs_path is not None: if not os.path.exists(self.options.local_pkgs_path): msger.error('Local pkgs directory: \'%s\' not exist' \ % self.options.local_pkgs_path) configmgr.create['local_pkgs_path'] = self.options.local_pkgs_path if self.options.release: configmgr.create['release'] = self.options.release.rstrip('/') if self.options.record_pkgs: configmgr.create['record_pkgs'] = [] for infotype in self.options.record_pkgs.split(','): if infotype not in ('name', 'content', 'license', 'vcs'): raise errors.Usage('Invalid pkg recording: %s, valid ones:' ' "name", "content", "license", "vcs"' \ % infotype) configmgr.create['record_pkgs'].append(infotype) if self.options.arch is not None: supported_arch = sorted(rpmmisc.archPolicies.keys(), reverse=True) if self.options.arch in supported_arch: configmgr.create['arch'] = self.options.arch else: raise errors.Usage('Invalid architecture: "%s".\n' ' Supported architectures are: \n' ' %s' % (self.options.arch, ', '.join(supported_arch))) if self.options.pkgmgr is not None: configmgr.create['pkgmgr'] = self.options.pkgmgr if self.options.runtime: configmgr.set_runtime(self.options.runtime) if self.options.pack_to is not None: configmgr.create['pack_to'] = self.options.pack_to if self.options.copy_kernel: configmgr.create['copy_kernel'] = self.options.copy_kernel if self.options.install_pkgs: configmgr.create['install_pkgs'] = [] for pkgtype in self.options.install_pkgs.split(','): if pkgtype not in ('source', 'debuginfo', 'debugsource'): raise errors.Usage('Invalid parameter specified: "%s", ' 'valid values: source, debuginfo, ' 'debusource' % pkgtype) configmgr.create['install_pkgs'].append(pkgtype) if self.options.enabletmpfs: configmgr.create['enabletmpfs'] = self.options.enabletmpfs if self.options.repourl: for item in self.options.repourl: try: key, val = item.split('=') except: continue configmgr.create['repourl'][key] = val
def postoptparse(self): abspath = lambda pth: os.path.abspath(os.path.expanduser(pth)) if self.options.verbose: msger.set_loglevel('verbose') if self.options.debug: msger.set_loglevel('debug') if self.options.logfile: msger.set_interactive(False) msger.set_logfile(self.options.logfile) configmgr.create['logfile'] = self.options.logfile if self.options.config: configmgr.reset() configmgr._siteconf = self.options.config if self.options.outdir is not None: configmgr.create['outdir'] = abspath(self.options.outdir) if self.options.cachedir is not None: configmgr.create['cachedir'] = abspath(self.options.cachedir) os.environ['ZYPP_LOCKFILE_ROOT'] = configmgr.create['cachedir'] if self.options.local_pkgs_path is not None: if not os.path.exists(self.options.local_pkgs_path): msger.error('Local pkgs directory: \'%s\' not exist' \ % self.options.local_pkgs_path) configmgr.create['local_pkgs_path'] = self.options.local_pkgs_path if self.options.release: configmgr.create['release'] = self.options.release if self.options.record_pkgs: configmgr.create['record_pkgs'] = [] for infotype in self.options.record_pkgs.split(','): if infotype not in ('name', 'url', 'content', 'license'): raise errors.Usage('Invalid pkg recording: %s, valid ones:' ' "name", "url", "content", "license"' \ % infotype) configmgr.create['record_pkgs'].append(infotype) if self.options.arch is not None: supported_arch = sorted(rpmmisc.archPolicies.keys(), reverse=True) if self.options.arch in supported_arch: configmgr.create['arch'] = self.options.arch else: raise errors.Usage('Invalid architecture: "%s".\n' ' Supported architectures are: \n' ' %s' % (self.options.arch, ', '.join(supported_arch))) if self.options.pkgmgr is not None: configmgr.create['pkgmgr'] = self.options.pkgmgr if self.options.runtime: configmgr.create['runtime'] = self.options.runtime if self.options.pack_to is not None: configmgr.create['pack_to'] = self.options.pack_to if self.options.copy_kernel: configmgr.create['copy_kernel'] = self.options.copy_kernel if self.options.tokenmap: tokenmap = {} for pair in self.options.tokenmap.split(","): token, value = pair.split(":",1) tokenmap[token] = value if not "RELEASE" in tokenmap and self.options.release: tokenmap["RELEASE"] = self.options.release if not "BUILD_ID" in tokenmap and "RELEASE" in tokenmap: tokenmap["BUILD_ID"] = tokenmap["RELEASE"] configmgr.create['tokenmap'] = tokenmap
def installPkgs(self, package_objects): if not self.ts: self.__initialize_transaction() # Set filters probfilter = 0 for flag in self.probFilterFlags: probfilter |= flag self.ts.setProbFilter(probfilter) localpkgs = self.localpkgs.keys() for po in package_objects: pkgname = po.name() if pkgname in localpkgs: rpmpath = self.localpkgs[pkgname] else: rpmpath = self.getLocalPkgPath(po) if not os.path.exists(rpmpath): # Maybe it is a local repo baseurl = str(po.repoInfo().baseUrls()[0]) baseurl = baseurl.strip() location = zypp.asKindPackage(po).location() location = str(location.filename()) if baseurl.startswith("file:/"): rpmpath = baseurl[5:] + "/%s" % (location) if not os.path.exists(rpmpath): raise RpmError("Error: %s doesn't exist" % rpmpath) h = rpmmisc.readRpmHeader(self.ts, rpmpath) self.ts.addInstall(h, rpmpath, 'u') unresolved_dependencies = self.ts.check() if not unresolved_dependencies: self.ts.order() cb = rpmmisc.RPMInstallCallback(self.ts) installlogfile = "%s/__catched_stderr.buf" % (self.instroot) # start to catch stderr output from librpm msger.enable_logstderr(installlogfile) errors = self.ts.run(cb.callback, '') if errors is None: pass elif len(errors) == 0: msger.warning('scriptlet or other non-fatal errors occurred ' 'during transaction.') else: for e in errors: msger.warning(e[0]) msger.error('Could not run transaction.') # stop catch msger.disable_logstderr() self.ts.closeDB() self.ts = None else: for pkg, need, needflags, sense, key in unresolved_dependencies: package = '-'.join(pkg) if needflags == rpm.RPMSENSE_LESS: deppkg = ' < '.join(need) elif needflags == rpm.RPMSENSE_EQUAL: deppkg = ' = '.join(need) elif needflags == rpm.RPMSENSE_GREATER: deppkg = ' > '.join(need) else: deppkg = '-'.join(need) if sense == rpm.RPMDEP_SENSE_REQUIRES: msger.warning("[%s] Requires [%s], which is not provided" \ % (package, deppkg)) elif sense == rpm.RPMDEP_SENSE_CONFLICTS: msger.warning("[%s] Conflicts with [%s]" %(package,deppkg)) raise RepoError("Unresolved dependencies, transaction failed.")
def installPkgs(self, package_objects): if not self.ts: self.__initialize_transaction() """ Set filters """ probfilter = 0 for flag in self.probFilterFlags: probfilter |= flag self.ts.setProbFilter(probfilter) localpkgs = self.localpkgs.keys() for po in package_objects: pkgname = po.name() if pkgname in localpkgs: rpmpath = self.localpkgs[pkgname] else: rpmpath = self.getLocalPkgPath(po) if not os.path.exists(rpmpath): """ Maybe it is a local repo """ baseurl = str(po.repoInfo().baseUrls()[0]) baseurl = baseurl.strip() location = zypp.asKindPackage(po).location() location = str(location.filename()) if baseurl.startswith("file:/"): rpmpath = baseurl[5:] + "/%s" % (location) if not os.path.exists(rpmpath): raise RpmError("Error: %s doesn't exist" % rpmpath) h = rpmmisc.readRpmHeader(self.ts, rpmpath) self.ts.addInstall(h, rpmpath, 'u') unresolved_dependencies = self.ts.check() if not unresolved_dependencies: self.ts.order() cb = rpmmisc.RPMInstallCallback(self.ts) installlogfile = "%s/__catched_stderr.buf" % ( self.creator._instroot) msger.enable_logstderr(installlogfile) errors = self.ts.run(cb.callback, '') if errors is None: pass elif len(errors) == 0: msger.warning( 'scriptlet or other non-fatal errors occurred during transaction.' ) else: for e in errors: msger.warning(e[0]) msger.error('Could not run transaction.') msger.disable_logstderr() self.ts.closeDB() self.ts = None else: for pkg, need, needflags, sense, key in unresolved_dependencies: package = '-'.join(pkg) if needflags == rpm.RPMSENSE_LESS: deppkg = ' < '.join(need) elif needflags == rpm.RPMSENSE_EQUAL: deppkg = ' = '.join(need) elif needflags == rpm.RPMSENSE_GREATER: deppkg = ' > '.join(need) else: deppkg = '-'.join(need) if sense == rpm.RPMDEP_SENSE_REQUIRES: msger.warning("[%s] Requires [%s], which is not provided" % (package, deppkg)) elif sense == rpm.RPMDEP_SENSE_CONFLICTS: msger.warning("[%s] Conflicts with [%s]" % (package, deppkg)) raise RepoError("Unresolved dependencies, transaction failed.")
def main(parser, args, argv): """mic create entry point.""" #args is argparser namespace, argv is the input cmd line if args is None: raise errors.Usage("Invalid arguments") if not os.path.exists(args.ksfile): raise errors.CreatorError("Can't find the file: %s" % args.ksfile) if os.geteuid() != 0: msger.error("Root permission is required, abort") try: w = pwd.getpwuid(os.geteuid()) except KeyError: msger.warning("Might fail in compressing stage for undetermined user") abspath = lambda pth: os.path.abspath(os.path.expanduser(pth)) if args.logfile: logfile_abs_path = abspath(args.logfile) if os.path.isdir(logfile_abs_path): raise errors.Usage("logfile's path %s should be file" % args.logfile) configmgr.create['logfile'] = logfile_abs_path configmgr.set_logfile() if args.subcommand == "auto": do_auto(parser, args.ksfile, argv) return if args.interactive: msger.enable_interactive() else: msger.disable_interactive() if args.verbose: msger.set_loglevel('VERBOSE') if args.debug: try: import rpm rpm.setVerbosity(rpm.RPMLOG_NOTICE) except ImportError: pass msger.set_loglevel('DEBUG') #check the imager type createrClass = None for subcmd, klass in pluginmgr.get_plugins('imager').iteritems(): if subcmd == args.subcommand and hasattr(klass, 'do_create'): createrClass = klass if createrClass is None: raise errors.CreatorError("Can't support subcommand %s" % args.subcommand) if args.config: configmgr.reset() configmgr._siteconf = args.config if args.outdir is not None: configmgr.create['outdir'] = abspath(args.outdir) if args.cachedir is not None: configmgr.create['cachedir'] = abspath(args.cachedir) os.environ['ZYPP_LOCKFILE_ROOT'] = configmgr.create['cachedir'] for cdir in ('outdir', 'cachedir'): if os.path.exists(configmgr.create[cdir]) \ and not os.path.isdir(configmgr.create[cdir]): raise errors.Usage('Invalid directory specified: %s' \ % configmgr.create[cdir]) if not os.path.exists(configmgr.create[cdir]): os.makedirs(configmgr.create[cdir]) if os.getenv('SUDO_UID', '') and os.getenv('SUDO_GID', ''): os.chown(configmgr.create[cdir], int(os.getenv('SUDO_UID')), int(os.getenv('SUDO_GID'))) if args.local_pkgs_path is not None: if not os.path.exists(args.local_pkgs_path): raise errors.Usage('Local pkgs directory: \'%s\' not exist' \ % args.local_pkgs_path) configmgr.create['local_pkgs_path'] = args.local_pkgs_path if args.release: configmgr.create['release'] = args.release.rstrip('/') if args.record_pkgs: configmgr.create['record_pkgs'] = [] for infotype in args.record_pkgs.split(','): if infotype not in ('name', 'content', 'license', 'vcs'): raise errors.Usage('Invalid pkg recording: %s, valid ones:' ' "name", "content", "license", "vcs"' \ % infotype) configmgr.create['record_pkgs'].append(infotype) if args.strict_mode: configmgr.create['strict_mode'] = args.strict_mode if args.arch is not None: supported_arch = sorted(rpmmisc.archPolicies.keys(), reverse=True) if args.arch in supported_arch: configmgr.create['arch'] = args.arch else: raise errors.Usage('Invalid architecture: "%s".\n' ' Supported architectures are: \n' ' %s' % (args.arch, ', '.join(supported_arch))) if args.pkgmgr is not None: configmgr.create['pkgmgr'] = args.pkgmgr if args.runtime: configmgr.set_runtime(args.runtime) if args.pack_to is not None: configmgr.create['pack_to'] = args.pack_to if args.copy_kernel: configmgr.create['copy_kernel'] = args.copy_kernel if args.install_pkgs: configmgr.create['install_pkgs'] = [] for pkgtype in args.install_pkgs.split(','): if pkgtype not in ('source', 'debuginfo', 'debugsource'): raise errors.Usage('Invalid parameter specified: "%s", ' 'valid values: source, debuginfo, ' 'debusource' % pkgtype) configmgr.create['install_pkgs'].append(pkgtype) if args.check_pkgs: for pkg in args.check_pkgs.split(','): configmgr.create['check_pkgs'].append(pkg) if args.enabletmpfs: configmgr.create['enabletmpfs'] = args.enabletmpfs if args.repourl: for item in args.repourl: try: key, val = item.split('=') except: continue configmgr.create['repourl'][key] = val if args.repo: for optvalue in args.repo: repo = {} for item in optvalue.split(';'): try: key, val = item.split('=') except: continue repo[key.strip()] = val.strip() if 'name' in repo: configmgr.create['extrarepos'][repo['name']] = repo if args.ignore_ksrepo: configmgr.create['ignore_ksrepo'] = args.ignore_ksrepo if args.run_script: configmgr.create['run_script'] = args.run_script if args.tpk_install: configmgr.create['tpk_install'] = args.tpk_install creater = createrClass() creater.do_create(args)