def install_mbr(self, root_dir, mbr_device, size): # Install MBR mbr_path = os.path.join(self.image_root, 'boot', 'extlinux', 'mbr.bin') if not os.path.exists(mbr_path): raise RuntimeError('syslinux MBR not found at "%s"' % mbr_path) logCall('dd if="%s" of="%s" conv=notrunc' % (mbr_path, mbr_device))
def _mount_dev(self): # Temporarily bind-mount the jobslave /dev into the chroot so # grub2-install can see the loop device it's targeting. logCall("mount -o bind /dev %s/dev" % self.image_root) # /etc/grub.d/10_linux tries to find the backing device for loop # devices, on the assumption that it's a block device with cryptoloop # on top. Replace losetup with a stub while running mkconfig so it # keeps the loop device name and all the right UUIDs get emitted. losetup = util.joinPaths(self.image_root, '/sbin/losetup') os.rename(losetup, losetup + '.bak') with open(losetup, 'w') as f_losetup: print >> f_losetup, '#!/bin/sh' print >> f_losetup, 'echo "$1"' os.chmod(losetup, 0755) # In order for the root device to be detected as a FS UUID and not # /dev/loop0 there needs to be a link in /dev/disk/by-uuid, which # doesn't happen with the jobmaster's containerized environment. link_path = None if self.root_device.uuid: link_path = util.joinPaths(self.image_root, '/dev/disk/by-uuid', self.root_device.uuid) util.mkdirChain(os.path.dirname(link_path)) util.removeIfExists(link_path) os.symlink(self.root_device.devPath, link_path) try: yield finally: try: if link_path: os.unlink(link_path) os.rename(losetup + '.bak', losetup) logCall("umount %s/dev" % self.image_root) except: pass
def install_mbr(self, root_dir, mbr_device, size): """ Install grub into the MBR. """ if not os.path.exists(util.joinPaths(self.image_root, self.grub_path)): log.info("grub not found. skipping setup.") return # Assumed: # * raw hdd image at mbr_device is bind mounted at root_dir/disk.img # * The size requested is an integer multiple of the cylinder size bytesPerCylinder = self.geometry.bytesPerCylinder assert not (size % bytesPerCylinder), "The size passed in here must be cylinder aligned" cylinders = size / bytesPerCylinder # IMPORTANT: Use "rootnoverify" here, since some versions of grub # have trouble test-mounting the partition inside disk1.img (RBL-8193) grubCmds = "device (hd0) /disk.img\n" \ "geometry (hd0) %d %d %d\n" \ "rootnoverify (hd0,0)\n" \ "setup (hd0)" % (cylinders, self.geometry.heads, self.geometry.sectors) logCall('echo -e "%s" | ' 'chroot %s sh -c "%s --no-floppy --batch"' % (grubCmds, root_dir, self.grub_path))
def zipArchive(self, source, dest=None): assert os.path.isdir(source) if not dest: dest = source + ".zip" parDir, targetDir = os.path.split(source) logCall(["/usr/bin/zip", "-r", dest, targetDir], cwd=parDir) return dest
def install(self): """Generate grub2 configs""" cfgname = '/boot/grub2/grub.cfg' util.mkdirChain(os.path.dirname(util.joinPaths(self.image_root, cfgname))) with self._mount_dev(): logCall('chroot %s grub2-mkconfig -o %s' % (self.image_root, cfgname)) rdgen = dracut.DracutGenerator(self.image_root) rdgen.generateFromGrub2()
def create(self): # create the raw file # NB: blocksize is unrelated to the one in constants.py, and is # completely arbitrary. blocksize = 512 seek = (self.totalSize - 1) / blocksize logCall('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' % ( self.image, max(seek, 0), blocksize))
def install_mbr(self, root_dir, mbr_device, size): """Install grub2 into the MBR.""" # Neither grub2-mkconfig nor grub2-install correctly detect the # partitioning type because everything is on loop mounts. Hence we # force it to load the right one(s). with self._mount_dev(): logCall("chroot %s /usr/sbin/grub2-install /disk.img " "--modules=part_msdos" % self.image_root)
def makeBlankFS(self, image, fsType, size, fsLabel=None): if os.path.exists(image): util.rmtree(image) util.mkdirChain(os.path.split(image)[0]) logCall("dd if=/dev/zero of=%s count=1 seek=%d bs=4096" % (image, (size / 4096) - 1)) fs = bootable_image.Filesystem(image, fsType, size, fsLabel=fsLabel) fs.format() return fs
def write(self): # Output setup topDir = os.path.join(self.workDir, 'ova_base') util.mkdirChain(topDir) outputDir = os.path.join(constants.finishedDir, self.UUID) util.mkdirChain(outputDir) deliverable = os.path.join(outputDir, self.basefilename + self.suffix) # Build the filesystem images image_path = os.path.join(self.workDir, 'hdimage') disk = self.makeHDImage(image_path) # Open a manifest for tar so that it writes out files in the optimal # order. manifest_path = os.path.join(self.workDir, 'files') manifest = open(manifest_path, 'w') # Write the ova.xml file ovaName = 'ova.xml' ovaPath = os.path.join(topDir, ovaName) self.createXVA(ovaPath, disk.totalSize) print >>manifest, ovaName # Split the HD image into 1GB (not GiB) chunks label = 'xvda' chunk_dir = os.path.join(topDir, label) chunkPrefix = os.path.join(chunk_dir, 'chunk-') util.mkdirChain(os.path.split(chunkPrefix)[0]) self.status('Splitting hard disk image') infile = open(image_path, 'rb') n = 0 tocopy = os.stat(image_path).st_size while True: chunkname = '%s%04d.gz' % (chunkPrefix, n) outfile = gzip.GzipFile(chunkname, 'wb') tocopy -= util.copyfileobj(infile, outfile, sizeLimit=1000000000) outfile.close() print >>manifest, chunkname if not tocopy: break n += 1 infile.close() # Delete the FS image to free up temp space os.unlink(image_path) # Create XVA file manifest.close() self.status('Creating XVA Image') logCall('tar -cv -f "%s" -C "%s" -T "%s"' % \ (deliverable, topDir, manifest_path)) self.outputFileList.append((deliverable, 'Citrix XenServer (TM) Image'),) self.postOutput(self.outputFileList)
def generateOne(self, kver, rdPath): log.info("Rebuilding initrd for kernel %s", kver) args = ['/usr/sbin/chroot', self.image_root, '/sbin/mkinitrd', '-f', '--allow-missing'] for driver in self.MODULES: args.append('--with=' + driver) if is_RH(self.image_root): args.append('--preload=xenblk') args.extend([rdPath, kver]) logCall(args)
def generateOne(self, kver, rdPath): if not os.path.exists(util.joinPaths(self.image_root, '/sbin/btrfs')): self.DRACUT_MODULES.discard('btrfs') args = ['/usr/sbin/chroot', self.image_root, '/sbin/dracut', '--force', '--add=' + ' '.join(self.DRACUT_MODULES), '--add-drivers=' + ' '.join(self.MODULES), ] args.extend([rdPath, kver]) logCall(args)
def gzip(self, source, dest=None): if os.path.isdir(source): if not dest: dest = source + ".tar.gz" parDir, targetDir = os.path.split(source) logCall("tar -C %s -cv %s | gzip > %s" % (parDir, targetDir, dest)) else: if not dest: dest = source + ".gz" logCall("gzip -c %s > %s" % (source, dest)) return dest
def loopAttach(image, offset=None, size=None): p = os.popen("losetup -f") dev = p.read().strip() p.close() cmd = ["losetup"] if offset: cmd += ["-o", str(offset)] if size: cmd += ["--sizelimit", str(size)] cmd += [dev, image] logCall(cmd) return dev
def writeLayer(self, basePath, layersDir, imgSpec, withDeletions=False): layerId = imgSpec.dockerImageId if imgSpec.parent: parentLayerId = imgSpec.parent.dockerImageId else: parentLayerId = None layerDir = os.path.join(layersDir, layerId) util.mkdirChain(layerDir) tarball = os.path.join(layerDir, 'layer.tar') self.status('Creating layer') if withDeletions: ovlfs2docker(basePath) logCall('tar -C %s -cpPsf %s .' % (basePath, tarball)) if withDeletions: docker2ovlfs(basePath) file(os.path.join(layerDir, "VERSION"), "w").write("1.0") # XXX for some reason the layer sizes reported by docker are smaller # than the tar file st = os.stat(tarball) imgSpec.layerSize = layerSize = st.st_size layerCtime = datetime.datetime.utcfromtimestamp(st.st_ctime).isoformat() + 'Z' if imgSpec.nvf.flavor.satisfies(deps.parseFlavor('is: x86_64')): arch = 'amd64' else: arch = '386' config = Manifest( Env=[ "PATH=/usr/sbin:/usr/bin:/sbin:/bin" ] ) self.status('Creating manifest') manifest = imgSpec._manifest = Manifest(id=layerId, Size=layerSize, Comment="Created by Conary command: conary update '%s'" % imgSpec.nvf.asString(), created=layerCtime, config=config, os='linux', docker_version='1.8.1', # The checksum isn't used by docker 1.7+, so let's not # generate it anymore # checksum=TarSum.checksum(tarball, formatted=True), Architecture=arch, comment="Created by the SAS App Engine", container_config=dict(), ) if parentLayerId: manifest['parent'] = parentLayerId if imgSpec._parsedDockerfile: imgSpec._parsedDockerfile.toManifest(manifest) if imgSpec.parent: manifest.merge(imgSpec.parent._manifest) mfile = file(os.path.join(layerDir, "json"), "w") manifest.save(mfile) return layerId
def mountOverlayFs(cls, unpackDir, thisLayerId, prevMount): thisLayerDir = os.path.join(unpackDir, thisLayerId) ovlfsDir = thisLayerDir + '.ovffs' ovlWorkDir = thisLayerDir + '.work' util.mkdirChain(ovlfsDir) util.mkdirChain(ovlWorkDir) log.debug("Mounting layer %s on top of %s", thisLayerId, cls._path(prevMount)) logCall(["mount", "-t", "overlay", os.path.basename(ovlfsDir), ovlfsDir, "-olowerdir={0},upperdir={1},workdir={2}".format( prevMount, thisLayerDir, ovlWorkDir)]) return ovlfsDir
def mount(self, mountPoint): if self.fsType in ("swap", "none", "unallocated"): return self.attach() options = [] fsType = self.fsType if self.fsType in ("ext3", "ext4"): # Turn off data integrity during install options.append("data=writeback,barrier=0") options = "-o %s" % (",".join(options)) if options else "" logCall("mount -n -t %s %s %s %s" % (fsType, self.devPath, mountPoint, options)) self.mounted = True self.mountPoint = mountPoint
def retrieveTemplates(self): self.status("Retrieving ISO template") log.info("requesting anaconda-templates for " + self.arch) tmpDir = tempfile.mkdtemp(dir=constants.tmpDir) cclient = self.getConaryClient( \ tmpDir, getArchFlavor(self.baseFlavor).freeze()) cclient.cfg.installLabelPath.append( versions.Label(constants.templatesLabel)) uJob = self._getUpdateJob(cclient, 'anaconda-templates') if not uJob: raise RuntimeError, "Failed to find anaconda-templates" kernels = self.findImageSubtrove('kernel') kernelTup = kernels and sorted(kernels)[0] or None params = { 'templateTup': self._getNVF(uJob), 'kernelTup': kernelTup, } params = base64.urlsafe_b64encode(cPickle.dumps(params, 2)) url = '%stemplates/getTemplate?p=%s' % (self.cfg.masterUrl, params) noStart = False path = None while True: conn = urllib2.urlopen(url) response = conn.read() conn.close() status, path = response.split()[:2] if status == 'DONE': break elif status == 'NOT_FOUND': raise RuntimeError("Failed to request templates. " "Check the jobmaster logfile.") if not noStart: noStart = True url += '&nostart=1' time.sleep(5) templatePath = os.path.join(self.cfg.templateCache, path) templateDir = tempfile.mkdtemp('templates-') logCall(['/bin/tar', '-xf', templatePath, '-C', templateDir]) metadata = cPickle.load(open(templatePath + '.metadata', 'rb')) ncpv = metadata['netclient_protocol_version'] return os.path.join(templateDir, 'unified'), ncpv
def createVMDK(hdImage, outfile, size, geometry, adapter, hwVersion, streaming=False): args = [ '/usr/bin/raw2vmdk', '-C', str(geometry.cylindersRequired(size)), '-H', str(geometry.heads), '-S', str(geometry.sectors), '-A', adapter, '-V', str(hwVersion), ] if streaming: args += ['-s'] args += [hdImage, outfile] logCall(args)
def _extractLayer(self, unpackDir, tarFile): util.mkdirChain(unpackDir) # Walk the files in the tar file, looking for .wh.* tf = tarfile.open(tarFile) toDeleteAfter = set() for tinfo in tf: bname = os.path.basename(tinfo.name) if bname.startswith('.wh.') and tinfo.mode == 0: util.rmtree(util.joinPaths(unpackDir, os.path.dirname(tinfo.name), bname[4:]), ignore_errors=True) toDeleteAfter.add(util.joinPaths(unpackDir, tinfo.name)) logCall(["tar", "-C", unpackDir, "-xf", tarFile]) for fname in toDeleteAfter: util.removeIfExists(fname)
def _package(self, outputDir, layersDir, imgSpec): reposData = {} toArchive = [ 'repositories' ] img = imgSpec while img: toArchive.append(img.dockerImageId) for name, tagToId in img.tags.items(): reposData.setdefault(name, {}).update(tagToId) img = img.parent json.dump(reposData, file(os.path.join(layersDir, 'repositories'), "w")) self.status('Packaging layers') tarball = os.path.join(outputDir, self.sanitizeBaseFileName(imgSpec.name + '.tar.gz')) logCall('tar -C %s -cpPsS --to-stdout %s | gzip > %s' % (layersDir, ' '.join(toArchive), tarball)) return tarball
def write(self): self.swapSize = self.getBuildData("swapSize") * 1048576 basePath = os.path.join(self.workDir, self.basefilename) util.mkdirChain(basePath) outputDir = os.path.join(constants.finishedDir, self.UUID) util.mkdirChain(outputDir) tarball = os.path.join(outputDir, self.basefilename + '.tar.gz') self.installFileTree(basePath, no_mbr=True) sizes = os.statvfs(basePath) installedSize = (sizes.f_blocks - sizes.f_bavail) * sizes.f_frsize log.info("Installed size: %.1f MB", installedSize / 1e6) self.status('Creating tarball') logCall('tar -C %s -cpPsS --to-stdout ./ | gzip > %s' % \ (basePath, tarball)) self.postOutput(((tarball, 'Tar File'),), attributes={'installed_size': installedSize})
def umountChrootMounts(self, dest): # umount all mounts inside the chroot. mounts = open("/proc/mounts", "r") mntlist = set() for line in mounts: line = line.strip() mntpoint = line.split(" ")[1] if not mntpoint.startswith(dest): continue # Ignore actual managed filesystem mounts name = mntpoint[len(dest) :] if not name: name = "/" if name in self.mountDict: continue mntlist.add(mntpoint) # unmount in reverse sorted order to get /foo/bar before /foo for mntpoint in reversed(sorted(mntlist)): logCall("umount -n %s" % mntpoint)
def umount(self): if self.fsType in ("swap", "none", "unallocated"): return if not self.devPath or not self.mounted: return try: logCall("umount -n %s" % self.mountPoint) except RuntimeError: log.warning("Unmount of %s from %s failed - trying again", self.devPath, self.mountPoint) clean = False for x in range(5): logCall("sync") time.sleep(1) try: logCall("umount -n %s" % self.mountPoint) except RuntimeError: pass else: clean = True break if not clean: log.error("Unmount failed because these files " "were still open:") for path in sorted(helperfuncs.getMountedFiles(self.mountPoint)): log.error(path) raise RuntimeError("Failed to unmount %s" % self.devPath) self.detach() self.mounted = False
def _downloadParentImage(self, imgSpec, unpackDir, layersDir): log.debug('Downloading parent image %s', imgSpec.dockerImageId) self.status('Downloading parent image') resp = self.response.getImage(imgSpec.url) tmpf = tempfile.TemporaryFile(dir=self.workDir) util.copyfileobj(resp, tmpf) tmpf.seek(0) self.status('Unpacking parent image') errcode, stdout, stderr = logCall(["tar", "-C", layersDir, "-zxf", "-"], stdin=tmpf) tmpf.close() parentImageDir = os.path.join(unpackDir, imgSpec.dockerImageId) log.debug('Unpacking parent image as %s', self._path(parentImageDir)) layerFilesStack = [] # Unpack the layers in some temporary directories layer = imgSpec while layer is not None: layerId = layer.dockerImageId layer._unpackDir = parentImageDir layerFilesStack.append( (layerId, os.path.join(layersDir, layerId, "layer.tar"))) layer.layerSize = os.stat(layerFilesStack[-1][1]).st_size layer._manifest = mf = Manifest(json.load(file(os.path.join(layersDir, layerId, 'json')))) mf = json.load(file(os.path.join(layersDir, layerId, 'json'))) parent = mf.get('parent') if parent is not None and not layer.parent: layer.parent = ImageSpec(dockerImageId=parent) layer.parent.children.append(layer) layer = layer.parent # We now extract all layers, top-to-bottom, in the same directory. while layerFilesStack: layerId, layerFile = layerFilesStack.pop() log.debug(" Extracting parent layer %s on %s", layerId, self._path(parentImageDir)) self._extractLayer(parentImageDir, layerFile) idToNameTags = {} reposFile = os.path.join(layersDir, 'repositories') if os.path.isfile(reposFile): repos = json.load(file(reposFile)) for name, tagToId in repos.iteritems(): for tag, imgid in tagToId.iteritems(): idToNameTags.setdefault(imgid, set()).add((name, tag)) # Walk list again, to compute tags layer = imgSpec while layer is not None: layerId = layer.dockerImageId layer.updateNamesAndTags(idToNameTags.get(layerId)) layer = layer.parent
def runTagScripts(self): dest = self.root self.status("Running tag scripts") outScript = os.path.join(dest, "root", "conary-tag-script") inScript = outScript + ".in" outs = open(outScript, "wt") ins = open(inScript, "rt") outs.write("/sbin/ldconfig\n") for line in ins: if not line.startswith("/sbin/ldconfig"): outs.write(line) ins.close() outs.close() os.unlink(os.path.join(dest, "root", "conary-tag-script.in")) for tagScript in ("conary-tag-script", "conary-tag-script-kernel"): tagPath = util.joinPaths(os.path.sep, "root", tagScript) if not os.path.exists(util.joinPaths(dest, tagPath)): continue try: logCall("chroot %s sh -c 'sh -x %s > %s 2>&1'" % (dest, tagPath, tagPath + ".output")) except Exception, e: exc, e, bt = sys.exc_info() try: log.warning("error executing %s: %s", tagPath, e) log.warning("script contents:") f = file(util.joinPaths(dest, tagPath), "r") log.warning("----------------\n" + f.read()) f.close() log.warning("script output:") f = file(util.joinPaths(dest, tagPath + ".output"), "r") log.warning("----------------\n" + f.read()) f.close() except: log.warning("error recording tag handler output") raise exc, e, bt
def format(self): self.attach() try: if self.fsType in ("ext3", "ext4"): cmd = ["mkfs." + self.fsType, "-F", "-b", "4096", "-L", self.fsLabel, self.devPath] if self.size: cmd.append(str(self.size / 4096)) logCall(cmd) logCall(["tune2fs", "-i", "0", "-c", "0", self.devPath]) elif self.fsType == "xfs": logCall(["mkfs.xfs", "-L", self.fsLabel, self.devPath]) elif self.fsType == "swap": logCall(["mkswap", "-L", self.fsLabel, self.devPath]) elif self.fsType in ("none", "unallocated"): pass else: raise RuntimeError, "Invalid filesystem type: %s" % self.fsType self._get_uuid() finally: self.detach()
def install(self): # Create extlinux configs bootloader.writeBootmanConfigs(self) logCall('chroot "%s" /sbin/bootman' % self.image_root) self.mkinitrd() if not self.do_install: return # Bind-mount /dev so extlinux can write to the boot sector image_dev = os.path.join(self.image_root, 'dev') logCall('mount -n --bind /dev "%s"' % image_dev) # Install extlinux try: util.mkdirChain(os.path.join(self.image_root, 'boot', 'extlinux')) logCall('chroot "%s" /sbin/extlinux --install ' '--heads %s --sectors %s /boot/extlinux/' % (self.image_root, self.geometry.heads, self.geometry.sectors)) finally: logCall('umount -n "%s"' % image_dev)
def createOva(self): """ Create a new tar archive @ self.ovaPath. The ova is a tar consisting of the ovf and the disk file(s). """ self.ovaFileName = self.sanitizedImageName + '.' + constants.OVA_EXTENSION self.ovaPath = os.path.join(self.outputDir, self.ovaFileName) # Add the ovf as the first file to the ova tar. logCall('tar -C %s -cv %s -f %s' % \ (self.workingDir, self.ovfFileName, self.ovaPath)) # Add the manifest as the 2nd file. logCall('tar -C %s -rv %s -f %s' % \ (self.workingDir, self.manifestFileName, self.ovaPath)) # Add the disk as the 3rd file. logCall('tar -C %s -rv %s -f %s' % \ (self.outputDir, self.diskFileName, self.ovaPath)) return self.ovaPath
def loopDetach(dev): logCall(["losetup", "-d", dev], ignoreErrors=True)
def writeChild(self, unpackDir, layersDir, imgSpec): if imgSpec.parent.url: pImageId = imgSpec.parent.dockerImageId parentLayerDir = os.path.join(layersDir, pImageId) if not os.path.exists(parentLayerDir): # First child image in this hierarchy to be built self._downloadParentImage(imgSpec.parent, unpackDir, layersDir) else: imgSpec.parent._unpackDir = os.path.join(unpackDir, pImageId) assert os.path.isdir(imgSpec.parent._unpackDir) self._setLayerSize(imgSpec.parent, layersDir) # At this point, the parent layer should be on the filesystem, and # should have been unpacked dockerImageId = imgSpec.dockerImageId = self.getImageId(imgSpec.nvf) log.debug("Building child image %s, layer %s", imgSpec.name, imgSpec.dockerImageId) thisLayerContent = os.path.join(unpackDir, dockerImageId) util.mkdirChain(thisLayerContent) imgHierarchy = [imgSpec] img = imgSpec.parent while img: imgHierarchy.append(img) if img._unpackDir is not None: break img = img.parent mountResources = [] # The base doesn't need to be unioned prevMount = imgHierarchy.pop()._unpackDir while imgHierarchy: thisLayer = imgHierarchy.pop() thisLayerId = thisLayer.dockerImageId if thisLayerId != imgSpec.dockerImageId: # Itermediate image layerFile = os.path.join(layersDir, thisLayerId, 'layer.tar') if thisLayer._lastChild: # Uncompress this image onto the parent log.debug("Extracting layer %s on %s", thisLayerId, self._path(prevMount)) self._extractLayer(prevMount, layerFile) thisLayer._unpackDir = prevMount continue # Did we already set up an overlay? if mountResources: # Uncompress this layer on top of the overlay prevMount = mountResources[0] log.debug("Extracting layer %s on %s", thisLayerId, self._path(prevMount)) self._extractLayer(prevMount, layerFile) continue # If no other overlay was set up, or it's the current image, set # one up ovlfsDir = self.mountOverlayFs(unpackDir, thisLayerId, prevMount) mountResources.append(ovlfsDir) prevMount = ovlfsDir basePath = mountResources[-1] log.debug("Installing %s into %s", imgSpec.nvf.asString(), self._path(basePath)) self.installFilesInExistingTree(basePath, imgSpec.nvf) while mountResources: ovldir = mountResources.pop() logCall(["umount", "-f", ovldir]) thisLayerDir = os.path.join(unpackDir, imgSpec.dockerImageId) self.writeLayer(thisLayerDir, layersDir, imgSpec, withDeletions=True)