Ejemplo n.º 1
0
def extract_tarball(path, target):
    """Extract the tarball into target.

    If there are any errors, delete the folder being extracted to.
    """
    # TODO(cmaloney): Validate extraction will pass before unpacking as much as possible.
    # TODO(cmaloney): Unpack into a temporary directory then move into place to
    # prevent partial extraction from ever laying around on the filesystem.
    try:
        assert os.path.exists(
            path), "Path doesn't exist but should: {}".format(path)
        make_directory(target)

        # TODO(tweidner): https://jira.mesosphere.com/browse/DCOS-48220
        # Make this cross-platform via Python's tarfile module once
        # https://bugs.python.org/issue21872 is fixed.
        if is_windows:
            subprocess.check_call(['bsdtar', '-xf', path, '-C', target])
        else:
            subprocess.check_call(['tar', '-xf', path, '-C', target])

    except:
        # If there are errors, we can't really cope since we are already in an error state.
        rmtree(target, ignore_errors=True)
        raise
Ejemplo n.º 2
0
def remove_directory(path):
    """recursively removes a directory tree. fails silently if the tree does not exist"""
    if is_windows:
        # python library on Windows does not like symbolic links in directories
        # so calling out to the cmd prompt to do this fixes that.
        path = path.replace('/', '\\')
        if os.path.exists(path):
            subprocess.call(['cmd.exe', '/c', 'rmdir', '/s', '/q', path])
    else:
        subprocess.check_call(['rm', '-rf', path])
Ejemplo n.º 3
0
def copy_file(src_path, dst_path):
    """copy a single directory item from one location to another"""
    if is_windows:
        # To make sure the copy works we are using cmd version as python
        # libraries may not handle symbolic links and other things that are
        # thrown at it.
        src = src_path.replace('/', '\\')
        dst = dst_path.replace('/', '\\')
        subprocess.check_call(['cmd.exe', '/c', 'copy', src, dst])
    else:
        subprocess.check_call(['cp', src_path, dst_path])
Ejemplo n.º 4
0
def copy_directory(src_path, dst_path):
    """copy recursively a directory tree from one location to another"""
    if is_windows:
        # To make sure the copy works we are using cmd version as python
        # libraries may not handle symbolic links and other things that are
        # thrown at it.
        src = src_path.replace('/', '\\')
        dst = dst_path.replace('/', '\\')
        subprocess.check_call(
            ['cmd.exe', '/c', 'xcopy', src, dst, '/E', '/B', '/I'])
    else:
        subprocess.check_call(['cp', '-r', src_path, dst_path])
Ejemplo n.º 5
0
    def checkout_to(self, directory):
        # Clone into `src/`.
        check_call(["git", "clone", "-q", self.src_repo_path, directory])

        # Make sure we got the right commit as head
        assert get_git_sha1(directory + "/.git", "HEAD") == self.commit

        # Checkout from the bare repo in the cache folder at the specific sha1
        check_call([
            "git", "--git-dir", directory + "/.git", "--work-tree", directory,
            "checkout", "-f", "-q", self.commit
        ])
Ejemplo n.º 6
0
def _do_build_docker(name, path):
    path_sha = pkgpanda.build.hash_folder_abs(path, os.path.dirname(path))
    container_name = 'dcos/dcos-builder:{}_dockerdir-{}'.format(name, path_sha)

    log.debug("Attempting to pull docker: %s", container_name)
    pulled = False
    try:
        # TODO(cmaloney): Rather than pushing / pulling from Docker Hub upload as a build artifact.
        # the exported tarball.
        subprocess.check_call(['docker', 'pull', container_name])
        pulled = True
        # TODO(cmaloney): Differentiate different failures of running the process better here
    except subprocess.CalledProcessError:
        pulled = False

    if not pulled:
        log.debug("Pull failed, building instead: %s", container_name)
        # Pull failed, build it
        subprocess.check_call(['docker', 'build', '-t', container_name, path])

        # TODO(cmaloney): Push the built docker image on successful package build to both
        # 1) commit-<commit_id>
        # 2) Dockerfile-<file_contents_sha1>
        # 3) bootstrap-<bootstrap_id>
        # So we can track back the builder id for a given commit or bootstrap id, and reproduce whatever
        # we need. The  Dockerfile-<sha1> is useful for making sure we don't rebuild more than
        # necessary.
        try:
            subprocess.check_call(['docker', 'push', container_name])
        except subprocess.CalledProcessError:
            logger.warning(
                "docker push of dcos-builder failed. This means it will be very difficult "
                "for this build to be reproduced (others will have a different / non-identical "
                "base docker for most packages.")
            pass

    # mark as latest so it will be used when building packages
    # extract the docker client version string
    try:
        docker_version = subprocess.check_output(
            ['docker', 'version', '-f', '{{.Client.Version}}']).decode()
    except subprocess.CalledProcessError:
        # If the above command fails then we know we have an older version of docker
        # Older versions of docker spit out an entirely different format
        docker_version = subprocess.check_output(
            ['docker', 'version']).decode().split("\n")[0].split()[2]

    # only use force tag if using docker version 1.9 or earlier
    container_name_t = 'dcos/dcos-builder:{}_dockerdir-latest'.format(name)
    if LooseVersion(docker_version) < LooseVersion('1.10'):
        args = ['docker', 'tag', '-f', container_name, container_name_t]
    else:
        args = ['docker', 'tag', container_name, container_name_t]
    subprocess.check_call(args)
Ejemplo n.º 7
0
    def checkout_to(self, directory):
        # fetch into a bare repository so if we're on a host which has a cache we can
        # only get the new commits.
        fetch_git(self.bare_folder, self.url)

        # Warn if the ref_origin is set and gives a different sha1 than the
        # current ref.
        try:
            origin_commit = get_git_sha1(self.bare_folder, self.ref_origin)
        except Exception as ex:
            raise ValidationError(
                "Unable to find sha1 of ref_origin {}: {}".format(
                    self.ref_origin, ex))
        if self.ref != origin_commit:
            logger.warning(
                "Current ref doesn't match the ref origin. "
                "Package ref should probably be updated to pick up "
                "new changes to the code:" +
                " Current: {}, Origin: {}".format(self.ref, origin_commit))

        # Clone into `src/`.
        if is_windows:
            # Note: Mesos requires autocrlf to be set on Windows otherwise it does not build.
            check_call([
                "git", "clone", "-q", "--config", "core.autocrlf=true",
                self.bare_folder, directory
            ])
        else:
            check_call(["git", "clone", "-q", self.bare_folder, directory])

        # Checkout from the bare repo in the cache folder at the specific sha1
        check_call([
            "git", "--git-dir", directory + "/.git", "--work-tree", directory,
            "checkout", "-f", "-q", self.ref
        ])
Ejemplo n.º 8
0
 def stop_all(self):
     if not self.__active:
         log.warning("Do not stop services")
         return
     if not os.path.exists(self.__unit_directory):
         log.warning("Do not stop services. %s does not exist", self.__unit_directory)
         return
     names = list(filter(
         lambda n: os.path.isfile(os.path.join(self.__unit_directory, n)),
         os.listdir(self.__unit_directory)))
     try:
         cmd = ["systemctl", "stop"] + names
         if not self.__block:
             cmd.append("--no-block")
         check_call(cmd)
     except CalledProcessError as ex:
         # If the service doesn't exist, don't error. This happens when a
         # bootstrap tarball has just been extracted but nothing started
         # yet during first activation.
         log.warning(ex)
         if ex.returncode != 5:
             raise
Ejemplo n.º 9
0
def extract_archive(archive, dst_dir):
    archive_type = _identify_archive_type(archive)

    if archive_type == 'tar':
        if is_windows:
            check_call(["bsdtar", "-xf", archive, "-C", dst_dir])
        else:
            check_call(
                ["tar", "-xf", archive, "--strip-components=1", "-C", dst_dir])
    elif archive_type == 'zip':
        if is_windows:
            check_call([
                "powershell.exe", "-command", "expand-archive", "-path",
                archive, "-destinationpath", dst_dir
            ])
        else:
            check_call(["unzip", "-x", archive, "-d", dst_dir])
        # unzip binary does not support '--strip-components=1',
        _strip_first_path_component(dst_dir)
    else:
        raise ValidationError("Unsupported archive: {}".format(
            os.path.basename(archive)))
Ejemplo n.º 10
0
def fetch_git(bare_folder, git_uri):
    # Do a git clone if the cache folder doesn't exist yet, otherwise
    # do a git pull of everything.
    if not os.path.exists(bare_folder):
        check_call(
            ["git", "clone", "--mirror", "--progress", git_uri, bare_folder])
    else:
        check_call([
            "git", "--git-dir", bare_folder, "remote", "set-url", "origin",
            git_uri
        ])
        check_call(
            ["git", "--git-dir", bare_folder, "remote", "update", "origin"])

    return bare_folder
Ejemplo n.º 11
0
def make_installer_docker(variant, variant_info, installer_info):
    bootstrap_id = variant_info['bootstrap']
    assert len(bootstrap_id) > 0

    image_version = util.dcos_image_commit[:18] + '-' + bootstrap_id[:18]
    genconf_tar = "dcos-genconf." + image_version + ".tar"
    installer_filename = "packages/cache/dcos_generate_config." + pkgpanda.util.variant_prefix(
        variant) + "sh"
    bootstrap_filename = bootstrap_id + ".bootstrap.tar.xz"
    bootstrap_active_filename = bootstrap_id + ".active.json"
    installer_bootstrap_filename = installer_info[
        'bootstrap'] + '.bootstrap.tar.xz'
    bootstrap_latest_filename = pkgpanda.util.variant_prefix(
        variant) + 'bootstrap.latest'
    latest_complete_filename = pkgpanda.util.variant_prefix(
        variant) + 'complete.latest.json'
    packages_dir = 'packages'
    docker_image_name = 'mesosphere/dcos-genconf:' + image_version

    # TODO(cmaloney): All of this should use package_resources
    with tempfile.TemporaryDirectory() as build_dir:
        assert build_dir[-1] != '/'

        print("Setting up build environment")

        def dest_path(filename):
            return build_dir + '/' + filename

        def copy_to_build(src_prefix, filename):
            dest_filename = dest_path(filename)
            os.makedirs(os.path.dirname(dest_filename), exist_ok=True)
            copy_file(os.getcwd() + '/' + src_prefix + '/' + filename,
                      dest_filename)

        def fill_template(base_name, format_args):
            pkgpanda.util.write_string(
                dest_path(base_name),
                pkg_resources.resource_string(
                    __name__, 'bash/' + base_name +
                    '.in').decode().format(**format_args))

        fill_template(
            'Dockerfile', {
                'installer_bootstrap_filename': installer_bootstrap_filename,
                'bootstrap_filename': bootstrap_filename,
                'bootstrap_active_filename': bootstrap_active_filename,
                'bootstrap_latest_filename': bootstrap_latest_filename,
                'latest_complete_filename': latest_complete_filename,
                'packages_dir': packages_dir
            })

        fill_template(
            'installer_internal_wrapper', {
                'variant': pkgpanda.util.variant_str(variant),
                'bootstrap_id': bootstrap_id,
                'dcos_image_commit': util.dcos_image_commit
            })

        subprocess.check_call(
            ['chmod', '+x',
             dest_path('installer_internal_wrapper')])

        # TODO(cmaloney) make this use make_bootstrap_artifacts / that set
        # rather than manually keeping everything in sync
        copy_to_build('packages/cache/bootstrap', bootstrap_filename)
        copy_to_build('packages/cache/bootstrap', installer_bootstrap_filename)
        copy_to_build('packages/cache/bootstrap', bootstrap_active_filename)
        copy_to_build('packages/cache/bootstrap', bootstrap_latest_filename)
        copy_to_build('packages/cache/complete', latest_complete_filename)
        for package_id in variant_info['packages']:
            package_name = pkgpanda.PackageId(package_id).name
            copy_to_build(
                'packages/cache/', packages_dir + '/' + package_name + '/' +
                package_id + '.tar.xz')

        # Copy across gen_extra if it exists
        if os.path.exists('gen_extra'):
            copy_directory('gen_extra', dest_path('gen_extra'))
        else:
            make_directory(dest_path('gen_extra'))

        print("Building docker container in " + build_dir)
        subprocess.check_call(
            ['docker', 'build', '-t', docker_image_name, build_dir])

        print("Building", installer_filename)
        pkgpanda.util.write_string(
            installer_filename,
            pkg_resources.resource_string(
                __name__, 'bash/dcos_generate_config.sh.in').decode().format(
                    genconf_tar=genconf_tar,
                    docker_image_name=docker_image_name,
                    variant=variant) + '\n#EOF#\n')
        subprocess.check_call(['docker', 'save', docker_image_name],
                              stdout=open(genconf_tar, 'w'))
        subprocess.check_call(['tar', 'cvf', '-', genconf_tar],
                              stdout=open(installer_filename, 'a'))
        subprocess.check_call(['chmod', '+x', installer_filename])

        # Cleanup
        subprocess.check_call(['rm', genconf_tar])

    return installer_filename
Ejemplo n.º 12
0
    def activate(self, packages):
        # Ensure the new set is reasonable.
        validate_compatible(packages, self.__roles)

        # Build the absolute paths for the running config, new config location,
        # and where to archive the config.
        active_names = self.get_active_names()
        active_dirs = list(
            map(self._make_abs, self.__well_known_dirs + ["active"]))

        new_names = [name + ".new" for name in active_names]
        new_dirs = [name + ".new" for name in active_dirs]

        old_names = [name + ".old" for name in active_names]

        log.info("Remove all pre-existing new and old directories")
        for name in chain(new_names, old_names):
            if os.path.exists(name):
                if os.path.isdir(name):
                    remove_directory(name)
                else:
                    os.remove(name)

        log.info(
            "Remove unit files staged for an activation that didn't occur.")
        if not self.__skip_systemd_dirs:
            self.systemd.remove_staged_unit_files()

        log.debug("Make the directories for the new config: " +
                  ", ".join(new_dirs))
        for name in new_dirs:
            os.makedirs(name)

        def symlink_all(src, dest):
            if not os.path.isdir(src):
                return

            symlink_tree(src, dest)

        log.info("Set the new LD_LIBRARY_PATH, PATH.")
        env_contents = env_header.format(
            "/opt/mesosphere" if self.__fake_path else self.__root)
        env_export_contents = env_export_header.format(
            "/opt/mesosphere" if self.__fake_path else self.__root)

        active_buildinfo_full = {}

        dcos_service_configuration = self._get_dcos_configuration_template()

        log.info("Building up the set of users.")
        sysusers = UserManagement(self.__manage_users, self.__add_users)

        def _get_service_files(_dir):
            service_files = []
            for root, directories, filenames in os.walk(_dir):
                for filename in filter(lambda name: name.endswith(".service"),
                                       filenames):
                    service_files.append(os.path.join(root, filename))
            return service_files

        def _get_service_names(_dir):
            service_files = list(
                map(os.path.basename, _get_service_files(_dir)))

            if not service_files:
                return []

            return list(
                map(lambda name: os.path.splitext(name)[0], service_files))

        # Add the folders, config in each package.
        for package in packages:
            # Package folders
            # NOTE: Since active is at the end of the folder list it will be
            # removed by the zip. This is the desired behavior, since it will be
            # populated later.
            # Do the basename since some well known dirs are full paths (dcos.target.wants)
            # while inside the packages they are always top level directories.
            for new, dir_name in zip(new_dirs, self.__well_known_dirs):
                dir_name = os.path.basename(dir_name)
                pkg_dir = os.path.join(package.path, dir_name)

                assert os.path.isabs(new)
                assert os.path.isabs(pkg_dir)

                try:
                    symlink_all(pkg_dir, new)

                    # Symlink all applicable role-based config
                    for role in self.__roles:
                        role_dir = os.path.join(
                            package.path, "{0}_{1}".format(dir_name, role))
                        symlink_all(role_dir, new)

                except ConflictingFile as ex:
                    raise ValidationError(
                        "Two packages are trying to install the same file {0} or "
                        "two roles in the set of roles {1} are causing a package "
                        "to try activating multiple versions of the same file. "
                        "One of the package files is {2}.".format(
                            ex.dest, self.__roles, ex.src))

            log.info("Add %s to the active folder", package.name)
            os.symlink(
                package.path,
                os.path.join(self._make_abs("active.new"), package.name))

            # Add to the environment and environment.export contents

            env_contents += "# package: {0}\n".format(package.id)
            env_export_contents += "# package: {0}\n".format(package.id)

            for k, v in package.environment.items():
                env_contents += "{0}={1}\n".format(k, v)
                env_export_contents += "export {0}={1}\n".format(k, v)

            env_contents += "\n"
            env_export_contents += "\n"

            # Add to the buildinfo
            try:
                active_buildinfo_full[package.name] = load_json(
                    os.path.join(package.path, "buildinfo.full.json"))
            except FileNotFoundError:
                # TODO(cmaloney): These only come from setup-packages. Should update
                # setup-packages to add a buildinfo.full for those packages
                active_buildinfo_full[package.name] = None

            # NOTE: It is critical the state dir, the package name and the user name are all the
            # same. Otherwise on upgrades we might remove access to a files by changing their chown
            # to something incompatible. We survive the first upgrade because everything goes from
            # root to specific users, and root can access all user files.
            if package.username is not None:
                sysusers.add_user(package.username, package.group)

            # Ensure the state directory exists
            # TODO(cmaloney): On upgrade take a snapshot?
            if self.__manage_state_dir:
                state_dir_path = self.__state_dir_root + '/' + package.name
                if package.state_directory:
                    make_directory(state_dir_path)
                    if package.username and not is_windows:
                        uid = sysusers.get_uid(package.username)
                        check_call(['chown', '-R', str(uid), state_dir_path])

            if package.sysctl:
                service_names = _get_service_names(package.path)

                if not service_names:
                    raise ValueError(
                        "service name required for sysctl could not be determined for {package}"
                        .format(package=package.id))

                for service in service_names:
                    if service in package.sysctl:
                        dcos_service_configuration["sysctl"][
                            service] = package.sysctl[service]

        log.info("Prepare new systemd units for activation.")
        if not self.__skip_systemd_dirs:
            new_wants_dir = self._make_abs(self.__systemd_dir + ".new")
            if os.path.exists(new_wants_dir):
                self.systemd.stage_new_units(new_wants_dir)

        dcos_service_configuration_file = os.path.join(
            self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE)
        write_json(dcos_service_configuration_file, dcos_service_configuration)

        log.info("Write out the new environment file.")
        new_env = self._make_abs("environment.new")
        write_string(new_env, env_contents)

        log.info("Write out the new environment.export file")
        new_env_export = self._make_abs("environment.export.new")
        write_string(new_env_export, env_export_contents)

        log.info("Write out the buildinfo of every active package")
        new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
        write_json(new_buildinfo_meta, active_buildinfo_full)

        self.swap_active(".new")