예제 #1
0
    def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
        self.log.debug("builder_should_create_target_image called for Docker plugin - doing all our work here then stopping the process")
        tdlobj = oz.TDL.TDL(xmlstring=template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
        # At this point our input base_image is available as builder.base_image.data
        # We simply mount it up in libguestfs and tar out the results as builder.target_image.data
        wrap_metadata = parameter_cast_to_bool(parameters.get('create_docker_metadata', True))
        compress_type = parameters.get('compress', None)
        if compress_type:
            if compress_type in self.compress_commands.keys():
                compress_command = self.compress_commands[compress_type]
            else:
                raise Exception("Passed unknown compression type (%s) for Docker plugin" % (compress_type))
        else:
            compress_command = None
        guestfs_handle = launch_inspect_and_mount(builder.base_image.data, readonly = True)
        storagedir = os.path.dirname(builder.target_image.data)

        # guestfs lets us mount locally via the API, which is cool, but requires that
        # we call a blocking function to activate the mount, which requires a thread
        # We also need a temp dir to mount it to - do our best to clean up when things
        # go wrong
        tempdir = None
        fuse_thread = None
        try:
            tempdir = tempfile.mkdtemp(dir=storagedir)
            self.log.debug("Mounting input image locally at (%s)" % (tempdir))
            guestfs_handle.mount_local(tempdir)
            def _run_guestmount(g):
                g.mount_local_run()
            self.log.debug("Launching mount_local_run thread")
            fuse_thread = threading.Thread(group=None, target=_run_guestmount, args=(guestfs_handle,))
            fuse_thread.start()
            self.log.debug("Creating tar of entire image")
            # Use acls and xattrs to ensure SELinux data is not lost
            tarcmd = [ 'tar',  '-cf', builder.target_image.data, '-C', tempdir, '--acls', '--xattrs' ]
            # User may pass in a comma separated list of excludes to override this
            # Default to ./etc/fstab as many people have complained this does not belong in Docker images
            tar_excludes = parameters.get('tar_excludes', './etc/fstab').split(',')
            for exclude in tar_excludes:
                tarcmd.append('--exclude=%s' % (exclude.strip()))
            tarcmd.append('./')
            self.log.debug("Command: %s" % (str(tarcmd)))
            subprocess.check_call(tarcmd)
            if wrap_metadata:
                self.log.debug("Estimating size of tar contents to include in Docker metadata")
                size = 0
                for root, dirs, files in os.walk(tempdir):
                    for name in files:
                        fp = os.path.join(root,name)
                        if os.path.isfile(fp) and not os.path.islink(fp):
                            size += os.path.getsize(fp)
                self.log.debug("Total real file content size (%d)" % (size))
        except Exception, e:
            self.log.exception(e)
            raise
예제 #2
0
    def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
        self.log.debug("builder_should_create_target_image called for Docker plugin - doing all our work here then stopping the process")
        tdlobj = oz.TDL.TDL(xmlstring=template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
        # At this point our input base_image is available as builder.base_image.data
        # We simply mount it up in libguestfs and tar out the results as builder.target_image.data
        wrap_metadata = parameter_cast_to_bool(parameters.get('create_docker_metadata', True))
        compress_type = parameters.get('compress', None)
        if compress_type:
            if compress_type in self.compress_commands:
                compress_command = self.compress_commands[compress_type]
            else:
                raise Exception("Passed unknown compression type (%s) for Docker plugin" % (compress_type))
        else:
            compress_command = None
        guestfs_handle = launch_inspect_and_mount(builder.base_image.data, readonly = True)
        storagedir = os.path.dirname(builder.target_image.data)

        # guestfs lets us mount locally via the API, which is cool, but requires that
        # we call a blocking function to activate the mount, which requires a thread
        # We also need a temp dir to mount it to - do our best to clean up when things
        # go wrong
        tempdir = None
        fuse_thread = None
        try:
            tempdir = tempfile.mkdtemp(dir=storagedir)
            self.log.debug("Mounting input image locally at (%s)" % (tempdir))
            guestfs_handle.mount_local(tempdir)
            def _run_guestmount(g):
                g.mount_local_run()
            self.log.debug("Launching mount_local_run thread")
            fuse_thread = threading.Thread(group=None, target=_run_guestmount, args=(guestfs_handle,))
            fuse_thread.start()
            self.log.debug("Creating tar of entire image")
            # NOTE - we used to capture xattrs here but have reverted the change for now
            #        as SELinux xattrs break things in unexpected ways and the tar feature
            #        to allow selective inclusion is broken
            # TODO: Follow up with tar maintainers and docker image creators to find out what
            #       if any xattrs we really need to capture here
            tarcmd = [ 'tar',  '-cf', builder.target_image.data, '-C', tempdir ]
            # User may pass in a comma separated list of additional options to the tar command
            tar_options = parameters.get('tar_options', None)
            if tar_options:
                tar_options_list=tar_options.split(',')
                for option in tar_options_list:
                    tarcmd.append(option.strip())
            # User may pass in a comma separated list of excludes to override this
            # Default to ./etc/fstab as many people have complained this does not belong in Docker images
            tar_excludes = parameters.get('tar_excludes', './etc/fstab').split(',')
            for exclude in tar_excludes:
                tarcmd.append('--exclude=%s' % (exclude.strip()))
            tarcmd.append('./')
            self.log.debug("Command: %s" % (str(tarcmd)))
            subprocess.check_call(tarcmd)
            if wrap_metadata:
                self.log.debug("Estimating size of tar contents to include in Docker metadata")
                size = 0
                for root, dirs, files in os.walk(tempdir):
                    for name in files:
                        fp = os.path.join(root,name)
                        if os.path.isfile(fp) and not os.path.islink(fp):
                            size += os.path.getsize(fp)
                self.log.debug("Total real file content size (%d)" % (size))
        except Exception as e:
            self.log.exception(e)
            raise
        finally:
            if tempdir:
                try:
                    subprocess.check_call( ['umount', '-f', tempdir] )
                    os.rmdir(tempdir)
                except Exception as e:
                    self.log.exception(e)
                    self.log.error("WARNING: Could not unmount guest at (%s) - may still be mounted" % (tempdir) )
            if fuse_thread:
                fuse_thread.join(30.0)
                if fuse_thread.isAlive():
                    self.log.error("Guestfs local mount thread is still active - FUSE filesystem still mounted at (%s)" % (tempdir) )

        if wrap_metadata:
            # Get any parameters and if they are not set, create our defaults
            # Docker image names should not have uppercase characters
            # https://fedorahosted.org/cloud/ticket/131
            repository = parameters.get('repository',tdlobj.name).lower()
            tag = parameters.get('tag','latest')
            docker_image_id = parameters.get('docker_image_id', self._generate_docker_id())
            cmd = parameters.get('docker_cmd', 'null')
            env = parameters.get('docker_env', 'null')
            label = parameters.get('docker_label', 'null')

            # Dynamically set the architecture label if requested by config
            set_arch_label = parameters.get('docker_set_arch_label')
            if set_arch_label:
                if label == 'null':
                    label = dict()
                label["architecture"] = tdlobj.arch

            rdict = { repository: { tag: docker_image_id } }

            dockerversion = parameters.get('dockerversion', '0.11.1')
            if not dockerversion in self.docker_templates_dict:
                raise Exception("No docker JSON template available for specified docker version (%s)" % (dockerversion))
            docker_json_template=self.docker_templates_dict[dockerversion]

            arch = tdlobj.arch
            if arch == "x86_64":
                arch = "amd64"
            elif arch == "armv7hl":
                arch = "armhfp"
            tdict = { }
            tdict['commentstring'] = parameters.get('comment', 'Created by Image Factory')
            tdict['os'] = parameters.get('os', 'linux')
            tdict['createdtime'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
            tdict['arch'] = arch
            tdict['idstring'] = docker_image_id
            tdict['cmd'] = cmd
            tdict['env'] = env
            tdict['label'] = label
            tdict['size'] = size

            image_json = docker_json_template.format(**tdict)

            # v2 images
            # TODO: Something significantly less hacky looking.....
            if dockerversion == "1.10.1":
                shasum = self._file_sha256(builder.target_image.data)
                image_v2_config = json.loads(image_json)
                # The new top level JSON file is a light modification of the layer JSON
                del image_v2_config['Size']
                del image_v2_config['id']
                image_v2_config['history'] = [ { 'comment': image_v2_config['comment'],
                                               'created': image_v2_config['created'] } ]
                image_v2_config['rootfs'] = { 'diff_ids': [ "sha256:%s" % (shasum) ],
                                            'type': 'layers' }

                # Docker wants this config file to be named after its own sha256 sum
                image_v2_config_id = hashlib.sha256(json.dumps(image_v2_config)).hexdigest()

                image_v2_manifest = [ { "Config": "%s.json" % (image_v2_config_id),
                                        "Layers": [ "%s/layer.tar" % (docker_image_id) ],
                                        "RepoTags": [ "%s:%s" % (repository, tag) ] } ]

            # Create directory
            storagedir = os.path.dirname(builder.target_image.data)
            tempdir = None
            try:
                tempdir = tempfile.mkdtemp(dir=storagedir)
                self.log.debug("Creating docker image directory structure in (%s)" % (tempdir))

                repositories_path = os.path.join(tempdir,'repositories')
                repositories = open(repositories_path,"w")
                json.dump(rdict, repositories)
                repositories.close()

                if dockerversion == "1.10.1":
                    config_path = os.path.join(tempdir, '%s.json' % (image_v2_config_id))
                    config = open(config_path, "w")
                    json.dump(image_v2_config, config)
                    config.close()

                    manifest_path = os.path.join(tempdir, 'manifest.json')
                    manifest = open(manifest_path, "w")
                    json.dump(image_v2_manifest, manifest)
                    manifest.close()

                imagedir = os.path.join(tempdir, docker_image_id)
                os.mkdir(imagedir)

                jsonfile_path = os.path.join(imagedir,'json')
                jsonfile = open(jsonfile_path,'w')
                jsonfile.write(image_json)
                jsonfile.close()

                versionfile_path = os.path.join(imagedir,'VERSION')
                versionfile = open(versionfile_path, 'w')
                # TODO - Track version developments and compatibility
                versionfile.write("1.0")
                versionfile.close()

                layerfile_path = os.path.join(imagedir,'layer.tar')
                shutil.move(builder.target_image.data, layerfile_path)

                outtar = tarfile.TarFile(name=builder.target_image.data, mode="w")
                # It turns out that in at least some configurations or versions, Docker will
                # complain if the repositories file is not the last file in the archive
                # we add our single image directory first and then the repositories file to
                # avoid this
                outtar.add(imagedir, arcname=docker_image_id)
                outtar.add(repositories_path, arcname='repositories')
                if dockerversion == "1.10.1":
                    outtar.add(config_path, arcname='%s.json' % (image_v2_config_id))
                    outtar.add(manifest_path, arcname='manifest.json')
                outtar.close()
            finally:
                if tempdir:
                    try:
                        shutil.rmtree(tempdir)
                    except:
                        self.log.warning("Error encountered when removing temp dir (%s) - may not have been deleted" % (tempdir))

        if compress_command:
            self.log.debug("Compressing tar file using %s" % (compress_type))
            rawimage =  builder.target_image.data
            compimage =  builder.target_image.data + ".tmp.%s" % (compress_type)
            result = subprocess.call(compress_command % ( rawimage, compimage), shell = True)
            if result:
                raise Exception("Compression of image failed")
            self.log.debug("Compression complete, replacing original")
            os.unlink(rawimage)
            os.rename(compimage, rawimage)
            self.log.debug("Done")
        return False
예제 #3
0
    def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
        self.log.debug("builder_should_create_target_image called for Docker plugin - doing all our work here then stopping the process")
        tdlobj = oz.TDL.TDL(xmlstring=template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
        if tdlobj.arch != "x86_64":
            raise Exception("Docker plugin currently supports only x86_64 images")
        # At this point our input base_image is available as builder.base_image.data
        # We simply mount it up in libguestfs and tar out the results as builder.target_image.data
        compress_type = parameters.get('compress', None)
        if compress_type:
            if compress_type in self.compress_commands.keys():
                compress_command = self.compress_commands[compress_type]
            else:
                raise Exception("Passed unknown compression type (%s) for Docker plugin" % (compress_type))
        else:
            compress_command = None
        guestfs_handle = launch_inspect_and_mount(builder.base_image.data, readonly = True)
        self.log.debug("Creating tar of root directory of input image %s saving as output image %s" % 
                       (builder.base_image.data, builder.target_image.data) )
        guestfs_handle.tar_out_opts("/", builder.target_image.data)
        wrap_metadata = parameter_cast_to_bool(parameters.get('create_docker_metadata', True))
        if wrap_metadata:
            # Get any parameters and if they are not set, create our defaults
            repository = parameters.get('repository',tdlobj.name)
            tag = parameters.get('tag','latest')
            docker_image_id = parameters.get('docker_image_id', self._generate_docker_id())
            rdict = { repository: { tag: docker_image_id } }
                       
            tdict = { }
            tdict['commentstring'] = parameters.get('comment', 'Created by Image Factory')
            tdict['dockerversion'] = parameters.get('dockerversion', '1.0.0')
            tdict['os'] = parameters.get('os', 'linux')
            tdict['createdtime'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
            tdict['arch'] = "amd64"
            tdict['idstring'] = docker_image_id
	    size = 0
            self.log.debug("Reading raw tar file to generate unpacked size estimate")
            tar =  tarfile.open(builder.target_image.data, "r")
            try:
                for tarinfo in tar:
                    if tarinfo.isfile():
                        size += tarinfo.size
            finally:
                tar.close()
            tdict['size'] = size

            image_json = self.docker_json_template.format(**tdict) 

            # Create directory
            storagedir = os.path.dirname(builder.target_image.data)
            tempdir = None
            try:
                tempdir = tempfile.mkdtemp(dir=storagedir)
                self.log.debug("Creating docker image directory structure in (%s)" % (tempdir))

                repositories_path = os.path.join(tempdir,'repositories')
                repositories = open(repositories_path,"w")
                json.dump(rdict, repositories)
                repositories.close()

                imagedir = os.path.join(tempdir, docker_image_id)
                os.mkdir(imagedir)

                jsonfile_path = os.path.join(imagedir,'json')
                jsonfile = open(jsonfile_path,'w')
                jsonfile.write(image_json)
                jsonfile.close()

                versionfile_path = os.path.join(imagedir,'VERSION')
                versionfile = open(versionfile_path, 'w')
                versionfile.write(tdict['dockerversion'])
                versionfile.close()

                layerfile_path = os.path.join(imagedir,'layer.tar')
                shutil.move(builder.target_image.data, layerfile_path)

                outtar = tarfile.TarFile(name=builder.target_image.data, mode="w")
                # this, conveniently, adds our temp dir recursively but with the leading path elements stripped
                # this is what docker wants
                outtar.add(tempdir,arcname="")
                outtar.close()
            finally:
                if tempdir:
                    try:
                        shutil.rmtree(tempdir)
                    except:
                        self.log.warning("Error encountered when removing temp dir (%s) - may not have been deleted" % (tempdir))

        if compress_command:
            self.log.debug("Compressing tar file using %s" % (compress_type))
            rawimage =  builder.target_image.data
            compimage =  builder.target_image.data + ".tmp.%s" % (compress_type)
            result = subprocess.call(compress_command % ( rawimage, compimage), shell = True)
            if result:
                raise Exception("Compression of image failed")
            self.log.debug("Compression complete, replacing original")
            os.unlink(rawimage)
            os.rename(compimage, rawimage)
            self.log.debug("Done")
        return False
예제 #4
0
    def builder_should_create_target_image(self, builder, target, image_id,
                                           template, parameters):
        self.log.debug(
            "builder_should_create_target_image called for Docker plugin - doing all our work here then stopping the process"
        )
        tdlobj = oz.TDL.TDL(
            xmlstring=template.xml,
            rootpw_required=self.app_config["tdl_require_root_pw"])
        # At this point our input base_image is available as builder.base_image.data
        # We simply mount it up in libguestfs and tar out the results as builder.target_image.data
        wrap_metadata = parameter_cast_to_bool(
            parameters.get('create_docker_metadata', True))
        compress_type = parameters.get('compress', None)
        if compress_type:
            if compress_type in self.compress_commands.keys():
                compress_command = self.compress_commands[compress_type]
            else:
                raise Exception(
                    "Passed unknown compression type (%s) for Docker plugin" %
                    (compress_type))
        else:
            compress_command = None
        guestfs_handle = launch_inspect_and_mount(builder.base_image.data,
                                                  readonly=True)
        storagedir = os.path.dirname(builder.target_image.data)

        # guestfs lets us mount locally via the API, which is cool, but requires that
        # we call a blocking function to activate the mount, which requires a thread
        # We also need a temp dir to mount it to - do our best to clean up when things
        # go wrong
        tempdir = None
        fuse_thread = None
        try:
            tempdir = tempfile.mkdtemp(dir=storagedir)
            self.log.debug("Mounting input image locally at (%s)" % (tempdir))
            guestfs_handle.mount_local(tempdir)

            def _run_guestmount(g):
                g.mount_local_run()

            self.log.debug("Launching mount_local_run thread")
            fuse_thread = threading.Thread(group=None,
                                           target=_run_guestmount,
                                           args=(guestfs_handle, ))
            fuse_thread.start()
            self.log.debug("Creating tar of entire image")
            # NOTE - we used to capture xattrs here but have reverted the change for now
            #        as SELinux xattrs break things in unexpected ways and the tar feature
            #        to allow selective inclusion is broken
            # TODO: Follow up with tar maintainers and docker image creators to find out what
            #       if any xattrs we really need to capture here
            tarcmd = ['tar', '-cf', builder.target_image.data, '-C', tempdir]
            # User may pass in a comma separated list of additional options to the tar command
            tar_options = parameters.get('tar_options', None)
            if tar_options:
                tar_options_list = tar_options.split(',')
                for option in tar_options_list:
                    tarcmd.append(option.strip())
            # User may pass in a comma separated list of excludes to override this
            # Default to ./etc/fstab as many people have complained this does not belong in Docker images
            tar_excludes = parameters.get('tar_excludes',
                                          './etc/fstab').split(',')
            for exclude in tar_excludes:
                tarcmd.append('--exclude=%s' % (exclude.strip()))
            tarcmd.append('./')
            self.log.debug("Command: %s" % (str(tarcmd)))
            subprocess.check_call(tarcmd)
            if wrap_metadata:
                self.log.debug(
                    "Estimating size of tar contents to include in Docker metadata"
                )
                size = 0
                for root, dirs, files in os.walk(tempdir):
                    for name in files:
                        fp = os.path.join(root, name)
                        if os.path.isfile(fp) and not os.path.islink(fp):
                            size += os.path.getsize(fp)
                self.log.debug("Total real file content size (%d)" % (size))
        except Exception, e:
            self.log.exception(e)
            raise
예제 #5
0
    def create_base_image(self, builder, template, parameters):
        self.log.info(
            'create_base_image() called for TinMan plugin - creating a BaseImage'
        )

        self.tdlobj = oz.TDL.TDL(
            xmlstring=template.xml,
            rootpw_required=self.app_config["tdl_require_root_pw"])
        if parameters:
            self.parameters = parameters
        else:
            self.parameters = {}

        # TODO: Standardize reference scheme for the persistent image objects in our builder
        #   Having local short-name copies like this may well be a good idea though they
        #   obscure the fact that these objects are in a container "upstream" of our plugin object
        self.base_image = builder.base_image

        # Set to the image object that is actively being created or modified
        # Used in the logging helper function above
        self.active_image = self.base_image

        try:
            self._init_oz()
            self.guest.diskimage = self.base_image.data
            self.activity("Cleaning up any old Oz guest")
            self.guest.cleanup_old_guest()
            self.activity("Generating JEOS install media")
            self.threadsafe_generate_install_media(self.guest)
            self.percent_complete = 10

            # We want to save this later for use by RHEV-M and Condor clouds
            libvirt_xml = ""
            gfs = None

            try:
                self.activity("Generating JEOS disk image")
                # Newer Oz versions introduce a configurable disk size in TDL
                # We must still detect that it is present and pass it in this call
                try:
                    disksize = getattr(self.guest, "disksize")
                except AttributeError:
                    disksize = 10
                self.guest.generate_diskimage(size=disksize)
                # TODO: If we already have a base install reuse it
                #  subject to some rules about updates to underlying repo
                self.activity("Execute JEOS install")
                libvirt_xml = self.guest.install(self.app_config["timeout"])
                self.base_image.parameters['libvirt_xml'] = libvirt_xml
                self.image = self.guest.diskimage
                self.log.debug(
                    "Base install complete - Doing customization and ICICLE generation"
                )
                self.percent_complete = 30
                # Power users may wish to avoid ever booting the guest after the installer is finished
                # They can do so by passing in a { "generate_icicle": False } KV pair in the parameters dict
                if parameter_cast_to_bool(
                        self.parameters.get("generate_icicle", True)):
                    if parameter_cast_to_bool(
                            self.parameters.get("offline_icicle", False)):
                        self.guest.customize(libvirt_xml)
                        gfs = launch_inspect_and_mount(self.image,
                                                       readonly=True)

                        # Monkey-patching is bad
                        # TODO: Work with Chris to incorporate a more elegant version of this into Oz itself
                        def libguestfs_execute_command(gfs, cmd, timeout):
                            stdout = gfs.sh(cmd)
                            return (stdout, None, 0)

                        self.guest.guest_execute_command = libguestfs_execute_command
                        builder.base_image.icicle = self.guest.do_icicle(gfs)
                    else:
                        builder.base_image.icicle = self.guest.customize_and_generate_icicle(
                            libvirt_xml)
                else:
                    # koji errs out if this value is None - set to an empty ICICLE instead
                    builder.base_image.icicle = "<icicle></icicle>"
                    self.guest.customize(libvirt_xml)
                self.log.debug("Customization and ICICLE generation complete")
                self.percent_complete = 50
            finally:
                self.activity("Cleaning up install artifacts")
                if self.guest:
                    self.guest.cleanup_install()
                if self.install_script_object:
                    # NamedTemporaryFile - removed on close
                    self.install_script_object.close()
                if gfs:
                    shutdown_and_close(gfs)

            self.log.debug("Generated disk image (%s)" %
                           (self.guest.diskimage))
            # OK great, we now have a customized KVM image

        finally:
            pass
예제 #6
0
    def builder_should_create_target_image(self, builder, target, image_id,
                                           template, parameters):
        self.log.debug(
            "builder_should_create_target_image called for Docker plugin - doing all our work here then stopping the process"
        )
        tdlobj = oz.TDL.TDL(
            xmlstring=template.xml,
            rootpw_required=self.app_config["tdl_require_root_pw"])
        if tdlobj.arch != "x86_64":
            raise Exception(
                "Docker plugin currently supports only x86_64 images")
        # At this point our input base_image is available as builder.base_image.data
        # We simply mount it up in libguestfs and tar out the results as builder.target_image.data
        compress_type = parameters.get('compress', None)
        if compress_type:
            if compress_type in self.compress_commands.keys():
                compress_command = self.compress_commands[compress_type]
            else:
                raise Exception(
                    "Passed unknown compression type (%s) for Docker plugin" %
                    (compress_type))
        else:
            compress_command = None
        guestfs_handle = launch_inspect_and_mount(builder.base_image.data,
                                                  readonly=True)
        self.log.debug(
            "Creating tar of root directory of input image %s saving as output image %s"
            % (builder.base_image.data, builder.target_image.data))
        guestfs_handle.tar_out_opts("/", builder.target_image.data)
        wrap_metadata = parameter_cast_to_bool(
            parameters.get('create_docker_metadata', True))
        if wrap_metadata:
            # Get any parameters and if they are not set, create our defaults
            repository = parameters.get('repository', tdlobj.name)
            tag = parameters.get('tag', 'latest')
            docker_image_id = parameters.get('docker_image_id',
                                             self._generate_docker_id())
            rdict = {repository: {tag: docker_image_id}}

            dockerversion = parameters.get('dockerversion', '0.11.1')
            if not dockerversion in self.docker_templates_dict:
                raise Exception(
                    "No docker JSON template available for specified docker version (%s)"
                    % (dockerversion))
            docker_json_template = self.docker_templates_dict[dockerversion]

            tdict = {}
            tdict['commentstring'] = parameters.get(
                'comment', 'Created by Image Factory')
            tdict['os'] = parameters.get('os', 'linux')
            tdict['createdtime'] = datetime.datetime.utcnow().strftime(
                '%Y-%m-%dT%H:%M:%SZ')
            tdict['arch'] = "amd64"
            tdict['idstring'] = docker_image_id
            size = 0
            self.log.debug(
                "Reading raw tar file to generate unpacked size estimate")
            tar = tarfile.open(builder.target_image.data, "r")
            try:
                for tarinfo in tar:
                    if tarinfo.isfile():
                        size += tarinfo.size
            finally:
                tar.close()
            tdict['size'] = size

            image_json = docker_json_template.format(**tdict)

            # Create directory
            storagedir = os.path.dirname(builder.target_image.data)
            tempdir = None
            try:
                tempdir = tempfile.mkdtemp(dir=storagedir)
                self.log.debug(
                    "Creating docker image directory structure in (%s)" %
                    (tempdir))

                repositories_path = os.path.join(tempdir, 'repositories')
                repositories = open(repositories_path, "w")
                json.dump(rdict, repositories)
                repositories.close()

                imagedir = os.path.join(tempdir, docker_image_id)
                os.mkdir(imagedir)

                jsonfile_path = os.path.join(imagedir, 'json')
                jsonfile = open(jsonfile_path, 'w')
                jsonfile.write(image_json)
                jsonfile.close()

                versionfile_path = os.path.join(imagedir, 'VERSION')
                versionfile = open(versionfile_path, 'w')
                # TODO - Track version developments and compatibility
                versionfile.write("1.0")
                versionfile.close()

                layerfile_path = os.path.join(imagedir, 'layer.tar')
                shutil.move(builder.target_image.data, layerfile_path)

                outtar = tarfile.TarFile(name=builder.target_image.data,
                                         mode="w")
                # It turns out that in at least some configurations or versions, Docker will
                # complain if the repositories file is not the last file in the archive
                # we add our single image directory first and then the repositories file to
                # avoid this
                outtar.add(imagedir, arcname=docker_image_id)
                outtar.add(repositories_path, arcname='repositories')
                outtar.close()
            finally:
                if tempdir:
                    try:
                        shutil.rmtree(tempdir)
                    except:
                        self.log.warning(
                            "Error encountered when removing temp dir (%s) - may not have been deleted"
                            % (tempdir))

        if compress_command:
            self.log.debug("Compressing tar file using %s" % (compress_type))
            rawimage = builder.target_image.data
            compimage = builder.target_image.data + ".tmp.%s" % (compress_type)
            result = subprocess.call(compress_command % (rawimage, compimage),
                                     shell=True)
            if result:
                raise Exception("Compression of image failed")
            self.log.debug("Compression complete, replacing original")
            os.unlink(rawimage)
            os.rename(compimage, rawimage)
            self.log.debug("Done")
        return False
예제 #7
0
    def builder_should_create_target_image(self, builder, target, image_id,
                                           template, parameters):
        self.log.debug(
            "builder_should_create_target_image called for Docker plugin - doing all our work here then stopping the process"
        )
        tdlobj = oz.TDL.TDL(
            xmlstring=template.xml,
            rootpw_required=self.app_config["tdl_require_root_pw"])
        if tdlobj.arch != "x86_64":
            raise Exception(
                "Docker plugin currently supports only x86_64 images")
        # At this point our input base_image is available as builder.base_image.data
        # We simply mount it up in libguestfs and tar out the results as builder.target_image.data
        wrap_metadata = parameter_cast_to_bool(
            parameters.get('create_docker_metadata', True))
        compress_type = parameters.get('compress', None)
        if compress_type:
            if compress_type in self.compress_commands.keys():
                compress_command = self.compress_commands[compress_type]
            else:
                raise Exception(
                    "Passed unknown compression type (%s) for Docker plugin" %
                    (compress_type))
        else:
            compress_command = None
        guestfs_handle = launch_inspect_and_mount(builder.base_image.data,
                                                  readonly=True)
        storagedir = os.path.dirname(builder.target_image.data)

        # guestfs lets us mount locally via the API, which is cool, but requires that
        # we call a blocking function to activate the mount, which requires a thread
        # We also need a temp dir to mount it to - do our best to clean up when things
        # go wrong
        tempdir = None
        fuse_thread = None
        try:
            tempdir = tempfile.mkdtemp(dir=storagedir)
            self.log.debug("Mounting input image locally at (%s)" % (tempdir))
            guestfs_handle.mount_local(tempdir)

            def _run_guestmount(g):
                g.mount_local_run()

            self.log.debug("Launching mount_local_run thread")
            fuse_thread = threading.Thread(group=None,
                                           target=_run_guestmount,
                                           args=(guestfs_handle, ))
            fuse_thread.start()
            self.log.debug("Creating tar of entire image")
            # Use acls and xattrs to ensure SELinux data is not lost
            # Use --sparse to avoid exploding large empty files from input image
            tarcmd = [
                'tar', '-cf', builder.target_image.data, '-C', tempdir,
                '--sparse', '--acls', '--xattrs', './'
            ]
            subprocess.check_call(tarcmd)
            if wrap_metadata:
                self.log.debug(
                    "Estimating size of tar contents to include in Docker metadata"
                )
                size = 0
                for root, dirs, files in os.walk(tempdir):
                    for name in files:
                        fp = os.path.join(root, name)
                        if os.path.isfile(fp) and not os.path.islink(fp):
                            size += os.path.getsize(fp)
                self.log.debug("Total real file content size (%d)" % (size))
        except Exception, e:
            self.log.exception(e)
            raise