def test_recovery_archive(tmpdir): # Recover from the "archive" state correctly. shutil.copytree("resources/install_recovery_archive", str(tmpdir.join("install")), symlinks=True) install = Install(str(tmpdir.join("install")), "resources/systemd", True, False, True) action, _ = install.recover_swap_active() assert action # TODO(cmaloney): expect_fs expect_fs( str(tmpdir.join("install")), { ".gitignore": None, "active": ["mesos"], "active.buildinfo.full.json": None, "active.old": ["mesos"], "bin": ["mesos", "mesos-dir"], "dcos.target.wants": [".gitignore"], "environment": None, "environment.export": None, "environment.old": None, "etc": [".gitignore"], "include": [".gitignore"], "lib": ["libmesos.so"] })
def set_app_attrs_from_config(): current_app.install = Install(current_app.config['DCOS_ROOT'], current_app.config['DCOS_CONFIG_DIR'], current_app.config['DCOS_ROOTED_SYSTEMD'], manage_systemd=True, block_systemd=False) current_app.repository = Repository(current_app.config['DCOS_REPO_DIR'])
def test_recovery_move_new(tmpdir): # From the "move_new" state correctly. shutil.copytree("resources/install_recovery_move", str(tmpdir.join("install")), symlinks=True) install = Install(str(tmpdir.join("install")), "resources/systemd", True, False, True) action, _ = install.recover_swap_active() assert action # TODO(cmaloney): expect_fs expect_fs( str(tmpdir.join("install")), { ".gitignore": None, "active": ["mesos"], "active.buildinfo.full.json": None, "bin": ["mesos", "mesos-dir"], "dcos.target.wants": [".gitignore"], "environment": None, "environment.export": None, "etc": [".gitignore"], "include": [".gitignore"], "lib": ["libmesos.so"] })
def make_bootstrap_tarball(package_store, packages, variant): # Convert filenames to package ids pkg_ids = list() for pkg_path in packages: # Get the package id from the given package path filename = os.path.basename(pkg_path) if not filename.endswith(".tar.xz"): raise BuildError("Packages must be packaged / end with a .tar.xz. Got {}".format(filename)) pkg_id = filename[:-len(".tar.xz")] pkg_ids.append(pkg_id) bootstrap_cache_dir = package_store.get_bootstrap_cache_dir() # Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz} bootstrap_id = hash_checkout(pkg_ids) latest_name = "{}/{}bootstrap.latest".format(bootstrap_cache_dir, pkgpanda.util.variant_prefix(variant)) output_name = bootstrap_cache_dir + '/' + bootstrap_id + '.' # bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz bootstrap_name = "{}bootstrap.tar.xz".format(output_name) active_name = "{}active.json".format(output_name) def mark_latest(): # Ensure latest is always written write_string(latest_name, bootstrap_id) print("bootstrap: {}".format(bootstrap_name)) print("active: {}".format(active_name)) print("latest: {}".format(latest_name)) return bootstrap_id if (os.path.exists(bootstrap_name)): print("Bootstrap already up to date, not recreating") return mark_latest() check_call(['mkdir', '-p', bootstrap_cache_dir]) # Try downloading. if package_store.try_fetch_bootstrap_and_active(bootstrap_id): print("Bootstrap already up to date, Not recreating. Downloaded from repository-url.") return mark_latest() print("Unable to download from cache. Building.") print("Creating bootstrap tarball for variant {}".format(variant)) work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp') def make_abs(path): return os.path.join(work_dir, path) pkgpanda_root = make_abs("opt/mesosphere") repository = Repository(os.path.join(pkgpanda_root, "packages")) # Fetch all the packages to the root for pkg_path in packages: filename = os.path.basename(pkg_path) pkg_id = filename[:-len(".tar.xz")] def local_fetcher(id, target): shutil.unpack_archive(pkg_path, target, "gztar") repository.add(local_fetcher, pkg_id, False) # Activate the packages inside the repository. # Do generate dcos.target.wants inside the root so that we don't # try messing with /etc/systemd/system. install = Install( root=pkgpanda_root, config_dir=None, rooted_systemd=True, manage_systemd=False, block_systemd=True, fake_path=True, skip_systemd_dirs=True, manage_users=False, manage_state_dir=False) install.activate(repository.load_packages(pkg_ids)) # Mark the tarball as a bootstrap tarball/filesystem so that # dcos-setup.service will fire. make_file(make_abs("opt/mesosphere/bootstrap")) # Write out an active.json for the bootstrap tarball write_json(active_name, pkg_ids) # Rewrite all the symlinks to point to /opt/mesosphere rewrite_symlinks(work_dir, work_dir, "/") make_tar(bootstrap_name, pkgpanda_root) shutil.rmtree(work_dir) # Update latest last so that we don't ever use partially-built things. write_string(latest_name, bootstrap_id) print("Built bootstrap") return mark_latest()
def build(package_store, name, variant, clean_after_build, recursive=False): assert isinstance(package_store, PackageStore) print("Building package {} variant {}".format( name, pkgpanda.util.variant_str(variant))) tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo") repository = Repository(tmpdir.name) package_dir = package_store.get_package_folder(name) def src_abs(name): return package_dir + '/' + name def cache_abs(filename): return package_store.get_package_cache_folder(name) + '/' + filename # Build pkginfo over time, translating fields from buildinfo. pkginfo = {} # Build up the docker command arguments over time, translating fields as needed. cmd = DockerCmd() assert (name, variant) in package_store.packages, \ "Programming error: name, variant should have been validated to be valid before calling build()." buildinfo = copy.deepcopy(package_store.get_buildinfo(name, variant)) if 'name' in buildinfo: raise BuildError( "'name' is not allowed in buildinfo.json, it is implicitly the name of the " "folder containing the buildinfo.json") # Convert single_source -> sources try: sources = expand_single_source_alias(name, buildinfo) except ValidationError as ex: raise BuildError( "Invalid buildinfo.json for package: {}".format(ex)) from ex # Save the final sources back into buildinfo so it gets written into # buildinfo.json. This also means buildinfo.json is always expanded form. buildinfo['sources'] = sources # Construct the source fetchers, gather the checkout ids from them checkout_ids = dict() fetchers = dict() try: for src_name, src_info in sorted(sources.items()): # TODO(cmaloney): Switch to a unified top level cache directory shared by all packages cache_dir = package_store.get_package_cache_folder( name) + '/' + src_name check_call(['mkdir', '-p', cache_dir]) fetcher = get_src_fetcher(src_info, cache_dir, package_dir) fetchers[src_name] = fetcher checkout_ids[src_name] = fetcher.get_id() except ValidationError as ex: raise BuildError( "Validation error when fetching sources for package: {}".format( ex)) for src_name, checkout_id in checkout_ids.items(): # NOTE: single_source buildinfo was expanded above so the src_name is # always correct here. # Make sure we never accidentally overwrite something which might be # important. Fields should match if specified (And that should be # tested at some point). For now disallowing identical saves hassle. assert_no_duplicate_keys(checkout_id, buildinfo['sources'][src_name]) buildinfo['sources'][src_name].update(checkout_id) # Add the sha1 of the buildinfo.json + build file to the build ids build_ids = {"sources": checkout_ids} build_ids['build'] = pkgpanda.util.sha1(src_abs(buildinfo['build_script'])) build_ids['pkgpanda_version'] = pkgpanda.build.constants.version build_ids['variant'] = '' if variant is None else variant extra_dir = src_abs("extra") # Add the "extra" folder inside the package as an additional source if it # exists if os.path.exists(extra_dir): extra_id = hash_folder(extra_dir) build_ids['extra_source'] = extra_id buildinfo['extra_source'] = extra_id # Figure out the docker name. docker_name = buildinfo['docker'] cmd.container = docker_name # Add the id of the docker build environment to the build_ids. try: docker_id = get_docker_id(docker_name) except CalledProcessError: # docker pull the container and try again check_call(['docker', 'pull', docker_name]) docker_id = get_docker_id(docker_name) build_ids['docker'] = docker_id # TODO(cmaloney): The environment variables should be generated during build # not live in buildinfo.json. build_ids['environment'] = buildinfo['environment'] # Packages need directories inside the fake install root (otherwise docker # will try making the directories on a readonly filesystem), so build the # install root now, and make the package directories in it as we go. install_dir = tempfile.mkdtemp(prefix="pkgpanda-") active_packages = list() active_package_ids = set() active_package_variants = dict() auto_deps = set() # Verify all requires are in the repository. if 'requires' in buildinfo: # Final package has the same requires as the build. pkginfo['requires'] = buildinfo['requires'] # TODO(cmaloney): Pull generating the full set of requires a function. to_check = copy.deepcopy(buildinfo['requires']) if type(to_check) != list: raise BuildError( "`requires` in buildinfo.json must be an array of dependencies." ) while to_check: requires_info = to_check.pop(0) requires_name, requires_variant = expand_require(requires_info) if requires_name in active_package_variants: # TODO(cmaloney): If one package depends on the <default> # variant of a package and 1+ others depends on a non-<default> # variant then update the dependency to the non-default variant # rather than erroring. if requires_variant != active_package_variants[requires_name]: # TODO(cmaloney): Make this contain the chains of # dependencies which contain the conflicting packages. # a -> b -> c -> d {foo} # e {bar} -> d {baz} raise BuildError( "Dependncy on multiple variants of the same package {}. " "variants: {} {}".format( requires_name, requires_variant, active_package_variants[requires_name])) # The variant has package {requires_name, variant} already is a # dependency, don't process it again / move on to the next. continue active_package_variants[requires_name] = requires_variant # Figure out the last build of the dependency, add that as the # fully expanded dependency. requires_last_build = package_store.get_last_build_filename( requires_name, requires_variant) if not os.path.exists(requires_last_build): if recursive: # Build the dependency build(package_store, requires_name, requires_variant, clean_after_build, recursive) else: raise BuildError( "No last build file found for dependency {} variant {}. Rebuild " "the dependency".format(requires_name, requires_variant)) try: pkg_id_str = load_string(requires_last_build) auto_deps.add(pkg_id_str) pkg_buildinfo = package_store.get_buildinfo( requires_name, requires_variant) pkg_requires = pkg_buildinfo['requires'] pkg_path = repository.package_path(pkg_id_str) pkg_tar = pkg_id_str + '.tar.xz' if not os.path.exists( package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar): raise BuildError( "The build tarball {} refered to by the last_build file of the " "dependency {} variant {} doesn't exist. Rebuild the dependency." .format(pkg_tar, requires_name, requires_variant)) active_package_ids.add(pkg_id_str) # Mount the package into the docker container. cmd.volumes[ pkg_path] = "/opt/mesosphere/packages/{}:ro".format( pkg_id_str) os.makedirs( os.path.join(install_dir, "packages/{}".format(pkg_id_str))) # Add the dependencies of the package to the set which will be # activated. # TODO(cmaloney): All these 'transitive' dependencies shouldn't # be available to the package being built, only what depends on # them directly. to_check += pkg_requires except ValidationError as ex: raise BuildError( "validating package needed as dependency {0}: {1}".format( requires_name, ex)) from ex except PackageError as ex: raise BuildError( "loading package needed as dependency {0}: {1}".format( requires_name, ex)) from ex # Add requires to the package id, calculate the final package id. # NOTE: active_packages isn't fully constructed here since we lazily load # packages not already in the repository. build_ids['requires'] = list(active_package_ids) version_base = hash_checkout(build_ids) version = None if "version_extra" in buildinfo: version = "{0}-{1}".format(buildinfo["version_extra"], version_base) else: version = version_base pkg_id = PackageId.from_parts(name, version) # Save the build_ids. Useful for verify exactly what went into the # package build hash. buildinfo['build_ids'] = build_ids buildinfo['package_version'] = version # Save the package name and variant. The variant is used when installing # packages to validate dependencies. buildinfo['name'] = name buildinfo['variant'] = variant # If the package is already built, don't do anything. pkg_path = package_store.get_package_cache_folder( name) + '/{}.tar.xz'.format(pkg_id) # Done if it exists locally if exists(pkg_path): print("Package up to date. Not re-building.") # TODO(cmaloney): Updating / filling last_build should be moved out of # the build function. write_string(package_store.get_last_build_filename(name, variant), str(pkg_id)) return pkg_path # Try downloading. dl_path = package_store.try_fetch_by_id(pkg_id) if dl_path: print( "Package up to date. Not re-building. Downloaded from repository-url." ) # TODO(cmaloney): Updating / filling last_build should be moved out of # the build function. write_string(package_store.get_last_build_filename(name, variant), str(pkg_id)) print(dl_path, pkg_path) assert dl_path == pkg_path return pkg_path # Fall out and do the build since it couldn't be downloaded print("Unable to download from cache. Proceeding to build") print("Building package {} with buildinfo: {}".format( pkg_id, json.dumps(buildinfo, indent=2, sort_keys=True))) # Clean out src, result so later steps can use them freely for building. def clean(): # Run a docker container to remove src/ and result/ cmd = DockerCmd() cmd.volumes = { package_store.get_package_cache_folder(name): "/pkg/:rw", } cmd.container = "ubuntu:14.04.4" cmd.run(["rm", "-rf", "/pkg/src", "/pkg/result"]) clean() # Only fresh builds are allowed which don't overlap existing artifacts. result_dir = cache_abs("result") if exists(result_dir): raise BuildError( "result folder must not exist. It will be made when the package is " "built. {}".format(result_dir)) # 'mkpanda add' all implicit dependencies since we actually need to build. for dep in auto_deps: print("Auto-adding dependency: {}".format(dep)) # NOTE: Not using the name pkg_id because that overrides the outer one. id_obj = PackageId(dep) add_package_file(repository, package_store.get_package_path(id_obj)) package = repository.load(dep) active_packages.append(package) # Checkout all the sources int their respective 'src/' folders. try: src_dir = cache_abs('src') if os.path.exists(src_dir): raise ValidationError( "'src' directory already exists, did you have a previous build? " + "Currently all builds must be from scratch. Support should be " + "added for re-using a src directory when possible. src={}". format(src_dir)) os.mkdir(src_dir) for src_name, fetcher in sorted(fetchers.items()): root = cache_abs('src/' + src_name) os.mkdir(root) fetcher.checkout_to(root) except ValidationError as ex: raise BuildError( "Validation error when fetching sources for package: {}".format( ex)) # Copy over environment settings pkginfo['environment'] = buildinfo['environment'] # Whether pkgpanda should on the host make sure a `/var/lib` state directory is available pkginfo['state_directory'] = buildinfo.get('state_directory', False) if pkginfo['state_directory'] not in [True, False]: raise BuildError( "state_directory in buildinfo.json must be a boolean `true` or `false`" ) username = buildinfo.get('username') if not (username is None or isinstance(username, str)): raise BuildError( "username in buildinfo.json must be either not set (no user for this" " package), or a user name string") if username: try: pkgpanda.UserManagement.validate_username(username) except ValidationError as ex: raise BuildError( "username in buildinfo.json didn't meet the validation rules. {}" .format(ex)) pkginfo['username'] = username # Activate the packages so that we have a proper path, environment # variables. # TODO(cmaloney): RAII type thing for temproary directory so if we # don't get all the way through things will be cleaned up? install = Install(root=install_dir, config_dir=None, rooted_systemd=True, manage_systemd=False, block_systemd=True, fake_path=True, manage_users=False, manage_state_dir=False) install.activate(active_packages) # Rewrite all the symlinks inside the active path because we will # be mounting the folder into a docker container, and the absolute # paths to the packages will change. # TODO(cmaloney): This isn't very clean, it would be much nicer to # just run pkgpanda inside the package. rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/") print("Building package in docker") # TODO(cmaloney): Run as a specific non-root user, make it possible # for non-root to cleanup afterwards. # Run the build, prepping the environment as necessary. mkdir(cache_abs("result")) # Copy the build info to the resulting tarball write_json(cache_abs("src/buildinfo.full.json"), buildinfo) write_json(cache_abs("result/buildinfo.full.json"), buildinfo) write_json(cache_abs("result/pkginfo.json"), pkginfo) # Make the folder for the package we are building. If docker does it, it # gets auto-created with root permissions and we can't actually delete it. os.makedirs(os.path.join(install_dir, "packages", str(pkg_id))) # TOOD(cmaloney): Disallow writing to well known files and directories? # Source we checked out cmd.volumes.update({ # TODO(cmaloney): src should be read only... cache_abs("src"): "/pkg/src:rw", # The build script src_abs(buildinfo['build_script']): "/pkg/build:ro", # Getting the result out cache_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id), install_dir: "/opt/mesosphere:ro" }) if os.path.exists(extra_dir): cmd.volumes[extra_dir] = "/pkg/extra:ro" cmd.environment = { "PKG_VERSION": version, "PKG_NAME": name, "PKG_ID": pkg_id, "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id), "PKG_VARIANT": variant if variant is not None else "<default>" } try: # TODO(cmaloney): Run a wrapper which sources # /opt/mesosphere/environment then runs a build. Also should fix # ownership of /opt/mesosphere/packages/{pkg_id} post build. cmd.run([ "/bin/bash", "-o", "nounset", "-o", "pipefail", "-o", "errexit", "/pkg/build" ]) except CalledProcessError as ex: raise BuildError("docker exited non-zero: {}\nCommand: {}".format( ex.returncode, ' '.join(ex.cmd))) # Clean up the temporary install dir used for dependencies. # TODO(cmaloney): Move to an RAII wrapper. check_call(['rm', '-rf', install_dir]) print("Building package tarball") # Check for forbidden services before packaging the tarball: try: check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES) except ValidationError as ex: raise BuildError("Package validation failed: {}".format(ex)) # TODO(cmaloney): Updating / filling last_build should be moved out of # the build function. write_string(package_store.get_last_build_filename(name, variant), str(pkg_id)) # Bundle the artifacts into the pkgpanda package tmp_name = pkg_path + "-tmp.tar.xz" make_tar(tmp_name, cache_abs("result")) os.rename(tmp_name, pkg_path) print("Package built.") if clean_after_build: clean() return pkg_path
def make_bootstrap_tarball(package_store, packages, variant): # Convert filenames to package ids pkg_ids = list() for pkg_path in packages: # Get the package id from the given package path filename = os.path.basename(pkg_path) if not filename.endswith(".tar.xz"): raise BuildError( "Packages must be packaged / end with a .tar.xz. Got {}". format(filename)) pkg_id = filename[:-len(".tar.xz")] pkg_ids.append(pkg_id) bootstrap_cache_dir = package_store.get_bootstrap_cache_dir() # Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz} bootstrap_id = hash_checkout(pkg_ids) latest_name = "{}/{}bootstrap.latest".format( bootstrap_cache_dir, pkgpanda.util.variant_prefix(variant)) output_name = bootstrap_cache_dir + '/' + bootstrap_id + '.' # bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz bootstrap_name = "{}bootstrap.tar.xz".format(output_name) active_name = "{}active.json".format(output_name) def mark_latest(): # Ensure latest is always written write_string(latest_name, bootstrap_id) print("bootstrap: {}".format(bootstrap_name)) print("active: {}".format(active_name)) print("latest: {}".format(latest_name)) return bootstrap_id if (os.path.exists(bootstrap_name)): print("Bootstrap already up to date, not recreating") return mark_latest() check_call(['mkdir', '-p', bootstrap_cache_dir]) # Try downloading. if package_store.try_fetch_bootstrap_and_active(bootstrap_id): print( "Bootstrap already up to date, Not recreating. Downloaded from repository-url." ) return mark_latest() print("Unable to download from cache. Building.") print("Creating bootstrap tarball for variant {}".format(variant)) work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp') def make_abs(path): return os.path.join(work_dir, path) pkgpanda_root = make_abs("opt/mesosphere") repository = Repository(os.path.join(pkgpanda_root, "packages")) # Fetch all the packages to the root for pkg_path in packages: filename = os.path.basename(pkg_path) pkg_id = filename[:-len(".tar.xz")] def local_fetcher(id, target): shutil.unpack_archive(pkg_path, target, "gztar") repository.add(local_fetcher, pkg_id, False) # Activate the packages inside the repository. # Do generate dcos.target.wants inside the root so that we don't # try messing with /etc/systemd/system. install = Install(root=pkgpanda_root, config_dir=None, rooted_systemd=True, manage_systemd=False, block_systemd=True, fake_path=True, skip_systemd_dirs=True, manage_users=False, manage_state_dir=False) install.activate(repository.load_packages(pkg_ids)) # Mark the tarball as a bootstrap tarball/filesystem so that # dcos-setup.service will fire. make_file(make_abs("opt/mesosphere/bootstrap")) # Write out an active.json for the bootstrap tarball write_json(active_name, pkg_ids) # Rewrite all the symlinks to point to /opt/mesosphere rewrite_symlinks(work_dir, work_dir, "/") make_tar(bootstrap_name, pkgpanda_root) shutil.rmtree(work_dir) # Update latest last so that we don't ever use partially-built things. write_string(latest_name, bootstrap_id) print("Built bootstrap") return mark_latest()
def main(): arguments = docopt(__doc__, version="Pkpganda Package Manager") umask(0o022) # NOTE: Changing root or repository will likely break actually running packages. install = Install( os.path.abspath(arguments['--root']), os.path.abspath(arguments['--config-dir']), arguments['--rooted-systemd'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) repository = Repository(os.path.abspath(arguments['--repository'])) try: if arguments['setup']: actions.setup(install, repository) sys.exit(0) if arguments['list']: print_repo_list(repository.list()) sys.exit(0) if arguments['active']: for pkg in sorted(install.get_active()): print(pkg) sys.exit(0) if arguments['add']: actions.add_package_file(repository, arguments['<package-tarball>']) sys.exit(0) if arguments['fetch']: for package_id in arguments['<id>']: actions.fetch_package( repository, arguments['--repository-url'], package_id, os.getcwd()) sys.exit(0) if arguments['activate']: actions.activate_packages( install, repository, arguments['<id>'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) sys.exit(0) if arguments['swap']: actions.swap_active_package( install, repository, arguments['<package-id>'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) sys.exit(0) if arguments['remove']: for package_id in arguments['<id>']: actions.remove_package(install, repository, package_id) sys.exit(0) if arguments['uninstall']: uninstall(install, repository) sys.exit(0) if arguments['check']: checks = find_checks(install, repository) if arguments['--list']: list_checks(checks) sys.exit(0) # Run all checks sys.exit(run_checks(checks, install, repository)) except Exception as ex: print("ERROR: {0}".format(ex)) sys.exit(1) except ValidationError as ex: print("Validation Error: {0}".format(ex)) sys.exit(1) except PackageError as ex: print("Package Error: {0}".format(ex)) sys.exit(1) print("unknown command") sys.exit(1)
def make_bootstrap_tarball(packages_dir, packages, variant, repository_url): # Convert filenames to package ids pkg_ids = list() for pkg_path in packages: # Get the package id from the given package path filename = os.path.basename(pkg_path) if not filename.endswith(".tar.xz"): raise BuildError( "Packages must be packaged / end with a .tar.xz. Got {}". format(filename)) pkg_id = filename[:-len(".tar.xz")] pkg_ids.append(pkg_id) # Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz} bootstrap_id = hash_checkout(pkg_ids) latest_name = "{}/{}bootstrap.latest".format( packages_dir, pkgpanda.util.variant_prefix(variant)) output_name = packages_dir + '/' + bootstrap_id + '.' # bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz bootstrap_name = "{}bootstrap.tar.xz".format(output_name) active_name = "{}active.json".format(output_name) def mark_latest(): # Ensure latest is always written write_string(latest_name, bootstrap_id) print("bootstrap: {}".format(bootstrap_name)) print("active: {}".format(active_name)) print("latest: {}".format(latest_name)) return bootstrap_name if (os.path.exists(bootstrap_name)): print("Bootstrap already up to date, not recreating") return mark_latest() # Try downloading. if repository_url: tmp_bootstrap = bootstrap_name + '.tmp' tmp_active = active_name + '.tmp' try: repository_url = repository_url.rstrip('/') bootstrap_url = repository_url + '/bootstrap/{}.bootstrap.tar.xz'.format( bootstrap_id) active_url = repository_url + '/bootstrap/{}.active.json'.format( bootstrap_id) print("Attempting to download", bootstrap_name, "from", bootstrap_url) # Normalize to no trailing slash for repository_url download(tmp_bootstrap, bootstrap_url, packages_dir) print("Attempting to download", active_name, "from", active_url) download(tmp_active, active_url, packages_dir) # Move into place os.rename(tmp_bootstrap, bootstrap_name) os.rename(tmp_active, active_name) print( "Bootstrap already up to date, Not recreating. Downloaded from repository-url." ) return mark_latest() except FetchError: try: os.remove(tmp_bootstrap) except: pass try: os.remove(tmp_active) except: pass # Fall out and do the build since the command errored. print("Unable to download from cache. Building.") print("Creating bootstrap tarball for variant {}".format(variant)) work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp') def make_abs(path): return os.path.join(work_dir, path) pkgpanda_root = make_abs("opt/mesosphere") repository = Repository(os.path.join(pkgpanda_root, "packages")) # Fetch all the packages to the root for pkg_path in packages: filename = os.path.basename(pkg_path) pkg_id = filename[:-len(".tar.xz")] def local_fetcher(id, target): shutil.unpack_archive(pkg_path, target, "gztar") repository.add(local_fetcher, pkg_id, False) # Activate the packages inside the repository. # Do generate dcos.target.wants inside the root so that we don't # try messing with /etc/systemd/system. install = Install(pkgpanda_root, None, True, False, True, True, True) install.activate(repository.load_packages(pkg_ids)) # Mark the tarball as a bootstrap tarball/filesystem so that # dcos-setup.service will fire. make_file(make_abs("opt/mesosphere/bootstrap")) # Write out an active.json for the bootstrap tarball write_json(active_name, pkg_ids) # Rewrite all the symlinks to point to /opt/mesosphere rewrite_symlinks(work_dir, work_dir, "/") make_tar(bootstrap_name, pkgpanda_root) shutil.rmtree(work_dir) # Update latest last so that we don't ever use partially-built things. write_string(latest_name, bootstrap_id) print("Built bootstrap") return mark_latest()
def main(): arguments = docopt( __doc__.format( default_config_dir=constants.config_dir, default_root=constants.install_root, default_repository=constants.repository_base, ), ) umask(0o022) # NOTE: Changing root or repository will likely break actually running packages. install = Install( os.path.abspath(arguments['--root']), os.path.abspath(arguments['--config-dir']), arguments['--rooted-systemd'], not arguments['--no-systemd'], not arguments['--no-block-systemd'], manage_users=True, add_users=not os.path.exists('/etc/mesosphere/manual_host_users'), manage_state_dir=True) repository = Repository(os.path.abspath(arguments['--repository'])) try: if arguments['setup']: actions.setup(install, repository) sys.exit(0) if arguments['list']: print_repo_list(repository.list()) sys.exit(0) if arguments['active']: for pkg in sorted(install.get_active()): print(pkg) sys.exit(0) if arguments['add']: actions.add_package_file(repository, arguments['<package-tarball>']) sys.exit(0) if arguments['fetch']: for package_id in arguments['<id>']: actions.fetch_package( repository, arguments['--repository-url'], package_id, os.getcwd()) sys.exit(0) if arguments['activate']: actions.activate_packages( install, repository, arguments['<id>'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) sys.exit(0) if arguments['swap']: actions.swap_active_package( install, repository, arguments['<package-id>'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) sys.exit(0) if arguments['remove']: for package_id in arguments['<id>']: try: actions.remove_package(install, repository, package_id) except PackageNotFound: pass sys.exit(0) if arguments['uninstall']: uninstall(install, repository) sys.exit(0) if arguments['check']: checks = find_checks(install, repository) if arguments['--list']: list_checks(checks) sys.exit(0) # Run all checks sys.exit(run_checks(checks, install, repository)) except ValidationError as ex: print("Validation Error: {0}".format(ex)) sys.exit(1) except PackageError as ex: print("Package Error: {0}".format(ex)) sys.exit(1) except Exception as ex: print("ERROR: {0}".format(ex)) sys.exit(1) print("unknown command") sys.exit(1)
def install(): return Install(resources_test_dir("install"), resources_test_dir("systemd"), True, False, True)
def main(): arguments = docopt(__doc__, version="Pkpganda Package Manager") umask(0o022) # NOTE: Changing root or repository will likely break actually running packages. install = Install( os.path.abspath(arguments['--root']), os.path.abspath(arguments['--config-dir']), arguments['--rooted-systemd'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) repository = Repository(os.path.abspath(arguments['--repository'])) if arguments['setup']: try: setup(install, repository) except ValidationError as ex: print("Validation Error: {0}".format(ex)) sys.exit(1) sys.exit(0) if arguments['list']: print_repo_list(repository.list()) sys.exit(0) if arguments['active']: for pkg in sorted(install.get_active()): print(pkg) sys.exit(0) if arguments['add']: add_to_repository(repository, arguments['<package-tarball>']) sys.exit(0) if arguments['fetch']: def fetcher(id, target): return requests_fetcher(arguments['--repository-url'], id, target, os.getcwd()) for pkg_id in arguments['<id>']: # TODO(cmaloney): Make this not use escape sequences when not at a # `real` terminal. sys.stdout.write("\rFetching: {0}".format(pkg_id)) sys.stdout.flush() try: repository.add(fetcher, pkg_id) except FetchError as ex: print("\nUnable to fetch package {0}: {1}".format(pkg_id, ex)) sys.exit(1) sys.stdout.write("\rFetched: {0}\n".format(pkg_id)) sys.stdout.flush() sys.exit(0) if arguments['activate']: do_activate(install, repository, arguments['<id>'], arguments['--no-systemd'], arguments['--no-block-systemd']) sys.exit(0) if arguments['swap']: active = install.get_active() # TODO(cmaloney): I guarantee there is a better way to write this and # I've written the same logic before... packages_by_name = dict() for id_str in active: pkg_id = PackageId(id_str) packages_by_name[pkg_id.name] = pkg_id new_id = PackageId(arguments['<package-id>']) if new_id.name not in packages_by_name: print("ERROR: No package with name {} currently active to swap with.".format(new_id.name)) packages_by_name[new_id.name] = new_id new_active = list(map(str, packages_by_name.values())) # Activate with the new package name do_activate(install, repository, new_active, arguments['--no-systemd'], arguments['--no-block-systemd']) sys.exit(0) if arguments['remove']: # Make sure none of the packages are active active_packages = install.get_active() active = active_packages.intersection(set(arguments['<id>'])) if len(active) > 0: print("Refusing to remove active packages {0}".format(" ".join(sorted(list(active))))) sys.exit(1) for pkg_id in arguments['<id>']: sys.stdout.write("\rRemoving: {0}".format(pkg_id)) sys.stdout.flush() try: # Validate package id, that package is installed. PackageId(pkg_id) repository.remove(pkg_id) except ValidationError: print("\nInvalid package id {0}".format(pkg_id)) sys.exit(1) except OSError as ex: print("\nError removing package {0}".format(pkg_id)) print(ex) sys.exit(1) sys.stdout.write("\rRemoved: {0}\n".format(pkg_id)) sys.stdout.flush() sys.exit(0) if arguments['uninstall']: uninstall(install, repository) sys.exit(0) if arguments['check']: checks = find_checks(install, repository) if arguments['--list']: list_checks(checks) sys.exit(0) # Run all checks sys.exit(run_checks(checks, install, repository)) print("unknown command") sys.exit(1)
def main(): arguments = docopt(__doc__, version="Pkpganda Package Manager") umask(0o022) # NOTE: Changing root or repository will likely break actually running packages. install = Install(os.path.abspath(arguments['--root']), os.path.abspath(arguments['--config-dir']), arguments['--rooted-systemd'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) repository = Repository(os.path.abspath(arguments['--repository'])) try: if arguments['setup']: actions.setup(install, repository) sys.exit(0) if arguments['list']: print_repo_list(repository.list()) sys.exit(0) if arguments['active']: for pkg in sorted(install.get_active()): print(pkg) sys.exit(0) if arguments['add']: actions.add_package_file(repository, arguments['<package-tarball>']) sys.exit(0) if arguments['fetch']: for package_id in arguments['<id>']: actions.fetch_package(repository, arguments['--repository-url'], package_id, os.getcwd()) sys.exit(0) if arguments['activate']: actions.activate_packages(install, repository, arguments['<id>'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) sys.exit(0) if arguments['swap']: actions.swap_active_package(install, repository, arguments['<package-id>'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) sys.exit(0) if arguments['remove']: for package_id in arguments['<id>']: actions.remove_package(install, repository, package_id) sys.exit(0) if arguments['uninstall']: uninstall(install, repository) sys.exit(0) if arguments['check']: checks = find_checks(install, repository) if arguments['--list']: list_checks(checks) sys.exit(0) # Run all checks sys.exit(run_checks(checks, install, repository)) except Exception as ex: print("ERROR: {0}".format(ex)) sys.exit(1) except ValidationError as ex: print("Validation Error: {0}".format(ex)) sys.exit(1) except PackageError as ex: print("Package Error: {0}".format(ex)) sys.exit(1) print("unknown command") sys.exit(1)
def main(): arguments = docopt(__doc__, version="Pkpganda Package Manager") umask(0o022) # NOTE: Changing root or repository will likely break actually running packages. install = Install(os.path.abspath(arguments['--root']), os.path.abspath(arguments['--config-dir']), arguments['--rooted-systemd'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) repository = Repository(os.path.abspath(arguments['--repository'])) if arguments['setup']: try: setup(install, repository) except ValidationError as ex: print("Validation Error: {0}".format(ex)) sys.exit(1) sys.exit(0) if arguments['list']: print_repo_list(repository.list()) sys.exit(0) if arguments['active']: for pkg in sorted(install.get_active()): print(pkg) sys.exit(0) if arguments['add']: add_to_repository(repository, arguments['<package-tarball>']) sys.exit(0) if arguments['fetch']: def fetcher(id, target): return requests_fetcher(arguments['--repository-url'], id, target, os.getcwd()) for pkg_id in arguments['<id>']: # TODO(cmaloney): Make this not use escape sequences when not at a # `real` terminal. sys.stdout.write("\rFetching: {0}".format(pkg_id)) sys.stdout.flush() try: repository.add(fetcher, pkg_id) except FetchError as ex: print("\nUnable to fetch package {0}: {1}".format(pkg_id, ex)) sys.exit(1) sys.stdout.write("\rFetched: {0}\n".format(pkg_id)) sys.stdout.flush() sys.exit(0) if arguments['activate']: do_activate(install, repository, arguments['<id>'], arguments['--no-systemd'], arguments['--no-block-systemd']) sys.exit(0) if arguments['swap']: active = install.get_active() # TODO(cmaloney): I guarantee there is a better way to write this and # I've written the same logic before... packages_by_name = dict() for id_str in active: pkg_id = PackageId(id_str) packages_by_name[pkg_id.name] = pkg_id new_id = PackageId(arguments['<package-id>']) if new_id.name not in packages_by_name: print( "ERROR: No package with name {} currently active to swap with." .format(new_id.name)) packages_by_name[new_id.name] = new_id new_active = list(map(str, packages_by_name.values())) # Activate with the new package name do_activate(install, repository, new_active, arguments['--no-systemd'], arguments['--no-block-systemd']) sys.exit(0) if arguments['remove']: # Make sure none of the packages are active active_packages = install.get_active() active = active_packages.intersection(set(arguments['<id>'])) if len(active) > 0: print("Refusing to remove active packages {0}".format(" ".join( sorted(list(active))))) sys.exit(1) for pkg_id in arguments['<id>']: sys.stdout.write("\rRemoving: {0}".format(pkg_id)) sys.stdout.flush() try: # Validate package id, that package is installed. PackageId(pkg_id) repository.remove(pkg_id) except ValidationError: print("\nInvalid package id {0}".format(pkg_id)) sys.exit(1) except OSError as ex: print("\nError removing package {0}".format(pkg_id)) print(ex) sys.exit(1) sys.stdout.write("\rRemoved: {0}\n".format(pkg_id)) sys.stdout.flush() sys.exit(0) if arguments['uninstall']: uninstall(install, repository) sys.exit(0) if arguments['check']: checks = find_checks(install, repository) if arguments['--list']: list_checks(checks) sys.exit(0) # Run all checks sys.exit(run_checks(checks, install, repository)) print("unknown command") sys.exit(1)
def build(variant, package_dir, name, repository_url, clean_after_build): print("Building package {} variant {}".format(name, variant or "<default>")) tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo") repository = Repository(tmpdir.name) def pkg_abs(name): return package_dir + '/' + name # Build pkginfo over time, translating fields from buildinfo. pkginfo = {} # Build up the docker command arguments over time, translating fields as needed. cmd = DockerCmd() buildinfo = load_buildinfo(package_dir, variant) if 'name' in buildinfo: raise BuildError("'name' is not allowed in buildinfo.json, it is implicitly the name of the " "folder containing the buildinfo.json") # Make sure build_script is only set on variants if 'build_script' in buildinfo and variant is None: raise BuildError("build_script can only be set on package variants") # Convert single_source -> sources try: sources = expand_single_source_alias(name, buildinfo) except ValidationError as ex: raise BuildError("Invalid buildinfo.json for package: {}".format(ex)) from ex # Save the final sources back into buildinfo so it gets written into # buildinfo.json. This also means buildinfo.json is always expanded form. buildinfo['sources'] = sources # Construct the source fetchers, gather the checkout ids from them checkout_ids = dict() fetchers = dict() try: for src_name, src_info in sorted(sources.items()): if src_info['kind'] not in pkgpanda.build.src_fetchers.all_fetchers: raise ValidationError("No known way to catch src with kind '{}'. Known kinds: {}".format( src_info['kind'], pkgpanda.src_fetchers.all_fetchers.keys())) cache_dir = pkg_abs("cache") if not os.path.exists(cache_dir): os.mkdir(cache_dir) fetchers[src_name] = pkgpanda.build.src_fetchers.all_fetchers[src_info['kind']](src_name, src_info, package_dir) checkout_ids[src_name] = fetchers[src_name].get_id() except ValidationError as ex: raise BuildError("Validation error when fetching sources for package: {}".format(ex)) for src_name, checkout_id in checkout_ids.items(): # NOTE: single_source buildinfo was expanded above so the src_name is # always correct here. # Make sure we never accidentally overwrite something which might be # important. Fields should match if specified (And that should be # tested at some point). For now disallowing identical saves hassle. assert_no_duplicate_keys(checkout_id, buildinfo['sources'][src_name]) buildinfo['sources'][src_name].update(checkout_id) # Add the sha1sum of the buildinfo.json + build file to the build ids build_ids = {"sources": checkout_ids} build_ids['build'] = pkgpanda.util.sha1(pkg_abs("build")) build_ids['pkgpanda_version'] = pkgpanda.build.constants.version build_ids['variant'] = '' if variant is None else variant extra_dir = pkg_abs("extra") # Add the "extra" folder inside the package as an additional source if it # exists if os.path.exists(extra_dir): extra_id = hash_folder(extra_dir) build_ids['extra_source'] = extra_id buildinfo['extra_source'] = extra_id # Figure out the docker name. docker_name = buildinfo.get('docker', 'dcos-builder:latest') cmd.container = docker_name # Add the id of the docker build environment to the build_ids. try: docker_id = get_docker_id(docker_name) except CalledProcessError: # docker pull the container and try again check_call(['docker', 'pull', docker_name]) docker_id = get_docker_id(docker_name) build_ids['docker'] = docker_id # TODO(cmaloney): The environment variables should be generated during build # not live in buildinfo.json. build_ids['environment'] = buildinfo.get('environment', {}) # Packages need directories inside the fake install root (otherwise docker # will try making the directories on a readonly filesystem), so build the # install root now, and make the package directories in it as we go. install_dir = tempfile.mkdtemp(prefix="pkgpanda-") active_packages = list() active_package_ids = set() active_package_variants = dict() auto_deps = set() # Verify all requires are in the repository. if 'requires' in buildinfo: # Final package has the same requires as the build. pkginfo['requires'] = buildinfo['requires'] # TODO(cmaloney): Pull generating the full set of requires a function. to_check = copy.deepcopy(buildinfo['requires']) if type(to_check) != list: raise BuildError("`requires` in buildinfo.json must be an array of dependencies.") while to_check: requires_info = to_check.pop(0) requires_name, requires_variant = expand_require(requires_info) if requires_name in active_package_variants: # TODO(cmaloney): If one package depends on the <default> # variant of a package and 1+ others depends on a non-<default> # variant then update the dependency to the non-default variant # rather than erroring. if requires_variant != active_package_variants[requires_name]: # TODO(cmaloney): Make this contain the chains of # dependencies which contain the conflicting packages. # a -> b -> c -> d {foo} # e {bar} -> d {baz} raise BuildError("Dependncy on multiple variants of the same package {}. " "variants: {} {}".format( requires_name, requires_variant, active_package_variants[requires_name])) # The variant has package {requires_name, variant} already is a # dependency, don't process it again / move on to the next. continue active_package_variants[requires_name] = requires_variant # Figure out the last build of the dependency, add that as the # fully expanded dependency. require_package_dir = os.path.normpath(pkg_abs('../' + requires_name)) last_build = require_package_dir + '/' + last_build_filename(requires_variant) if not os.path.exists(last_build): raise BuildError("No last build file found for dependency {} variant {}. Rebuild " "the dependency".format(requires_name, requires_variant)) try: pkg_id_str = load_string(last_build) auto_deps.add(pkg_id_str) pkg_buildinfo = load_buildinfo(require_package_dir, requires_variant) pkg_requires = pkg_buildinfo.get('requires', list()) pkg_path = repository.package_path(pkg_id_str) pkg_tar = pkg_id_str + '.tar.xz' if not os.path.exists(require_package_dir + '/' + pkg_tar): raise BuildError("The build tarball {} refered to by the last_build file of the " "dependency {} variant {} doesn't exist. Rebuild the dependency.".format( pkg_tar, requires_name, requires_variant)) active_package_ids.add(pkg_id_str) # Mount the package into the docker container. cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str) os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str))) # Add the dependencies of the package to the set which will be # activated. # TODO(cmaloney): All these 'transitive' dependencies shouldn't # be available to the package being built, only what depends on # them directly. to_check += pkg_requires except ValidationError as ex: raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex except PackageError as ex: raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex # Add requires to the package id, calculate the final package id. # NOTE: active_packages isn't fully constructed here since we lazily load # packages not already in the repository. build_ids['requires'] = list(active_package_ids) version_base = hash_checkout(build_ids) version = None if "version_extra" in buildinfo: version = "{0}-{1}".format(buildinfo["version_extra"], version_base) else: version = version_base pkg_id = PackageId.from_parts(name, version) # Save the build_ids. Useful for verify exactly what went into the # package build hash. buildinfo['build_ids'] = build_ids buildinfo['package_version'] = version # Save the package name and variant. The variant is used when installing # packages to validate dependencies. buildinfo['name'] = name buildinfo['variant'] = variant # If the package is already built, don't do anything. pkg_path = pkg_abs("{}.tar.xz".format(pkg_id)) # Done if it exists locally if exists(pkg_path): print("Package up to date. Not re-building.") # TODO(cmaloney): Updating / filling last_build should be moved out of # the build function. check_call(["mkdir", "-p", pkg_abs("cache")]) write_string(pkg_abs(last_build_filename(variant)), str(pkg_id)) return pkg_path # Try downloading. if repository_url: tmp_filename = pkg_path + '.tmp' try: # Normalize to no trailing slash for repository_url repository_url = repository_url.rstrip('/') url = repository_url + '/packages/{0}/{1}.tar.xz'.format(pkg_id.name, str(pkg_id)) print("Attempting to download", pkg_id, "from", url) download(tmp_filename, url, package_dir) os.rename(tmp_filename, pkg_path) print("Package up to date. Not re-building. Downloaded from repository-url.") # TODO(cmaloney): Updating / filling last_build should be moved out of # the build function. check_call(["mkdir", "-p", pkg_abs("cache")]) write_string(pkg_abs(last_build_filename(variant)), str(pkg_id)) return pkg_path except FetchError: try: os.remove(tmp_filename) except: pass # Fall out and do the build since the command errored. print("Unable to download from cache. Proceeding to build") print("Building package {} with buildinfo: {}".format( pkg_id, json.dumps(buildinfo, indent=2, sort_keys=True))) # Clean out src, result so later steps can use them freely for building. clean(package_dir) # Only fresh builds are allowed which don't overlap existing artifacts. result_dir = pkg_abs("result") if exists(result_dir): raise BuildError("result folder must not exist. It will be made when the package is " "built. {}".format(result_dir)) # 'mkpanda add' all implicit dependencies since we actually need to build. for dep in auto_deps: print("Auto-adding dependency: {}".format(dep)) # NOTE: Not using the name pkg_id because that overrides the outer one. id_obj = PackageId(dep) add_to_repository(repository, pkg_abs('../{0}/{1}.tar.xz'.format(id_obj.name, dep))) package = repository.load(dep) active_packages.append(package) # Checkout all the sources int their respective 'src/' folders. try: src_dir = pkg_abs('src') if os.path.exists(src_dir): raise ValidationError( "'src' directory already exists, did you have a previous build? " + "Currently all builds must be from scratch. Support should be " + "added for re-using a src directory when possible. src={}".format(src_dir)) os.mkdir(src_dir) for src_name, fetcher in sorted(fetchers.items()): root = pkg_abs('src/' + src_name) os.mkdir(root) fetcher.checkout_to(root) except ValidationError as ex: raise BuildError("Validation error when fetching sources for package: {}".format(ex)) # Copy over environment settings if 'environment' in buildinfo: pkginfo['environment'] = buildinfo['environment'] # Activate the packages so that we have a proper path, environment # variables. # TODO(cmaloney): RAII type thing for temproary directory so if we # don't get all the way through things will be cleaned up? install = Install(install_dir, None, True, False, True, True) install.activate(active_packages) # Rewrite all the symlinks inside the active path because we will # be mounting the folder into a docker container, and the absolute # paths to the packages will change. # TODO(cmaloney): This isn't very clean, it would be much nicer to # just run pkgpanda inside the package. rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/") print("Building package in docker") # TODO(cmaloney): Run as a specific non-root user, make it possible # for non-root to cleanup afterwards. # Run the build, prepping the environment as necessary. mkdir(pkg_abs("result")) # Copy the build info to the resulting tarball write_json(pkg_abs("src/buildinfo.full.json"), buildinfo) write_json(pkg_abs("result/buildinfo.full.json"), buildinfo) write_json(pkg_abs("result/pkginfo.json"), pkginfo) # Make the folder for the package we are building. If docker does it, it # gets auto-created with root permissions and we can't actually delete it. os.makedirs(os.path.join(install_dir, "packages", str(pkg_id))) # TOOD(cmaloney): Disallow writing to well known files and directories? # Source we checked out cmd.volumes.update({ # TODO(cmaloney): src should be read only... pkg_abs("src"): "/pkg/src:rw", # The build script pkg_abs(buildinfo.get('build_script', 'build')): "/pkg/build:ro", # Getting the result out pkg_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id), install_dir: "/opt/mesosphere:ro" }) if os.path.exists(extra_dir): cmd.volumes[extra_dir] = "/pkg/extra:ro" cmd.environment = { "PKG_VERSION": version, "PKG_NAME": name, "PKG_ID": pkg_id, "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id), "PKG_VARIANT": variant if variant is not None else "<default>" } try: # TODO(cmaloney): Run a wrapper which sources # /opt/mesosphere/environment then runs a build. Also should fix # ownership of /opt/mesosphere/packages/{pkg_id} post build. cmd.run([ "/bin/bash", "-o", "nounset", "-o", "pipefail", "-o", "errexit", "/pkg/build"]) except CalledProcessError as ex: raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd))) # Clean up the temporary install dir used for dependencies. # TODO(cmaloney): Move to an RAII wrapper. check_call(['rm', '-rf', install_dir]) print("Building package tarball") # Check for forbidden services before packaging the tarball: try: check_forbidden_services(pkg_abs("result"), RESERVED_UNIT_NAMES) except ValidationError as ex: raise BuildError("Package validation failed: {}".format(ex)) # TODO(cmaloney): Updating / filling last_build should be moved out of # the build function. check_call(["mkdir", "-p", pkg_abs("cache")]) write_string(pkg_abs(last_build_filename(variant)), str(pkg_id)) # Bundle the artifacts into the pkgpanda package tmp_name = pkg_path + "-tmp.tar.xz" make_tar(tmp_name, pkg_abs("result")) os.rename(tmp_name, pkg_path) print("Package built.") if clean_after_build: clean(package_dir) return pkg_path
def make_bootstrap_tarball(packages_dir, packages, variant, repository_url): # Convert filenames to package ids pkg_ids = list() for pkg_path in packages: # Get the package id from the given package path filename = os.path.basename(pkg_path) if not filename.endswith(".tar.xz"): raise BuildError("Packages must be packaged / end with a .tar.xz. Got {}".format(filename)) pkg_id = filename[:-len(".tar.xz")] pkg_ids.append(pkg_id) # Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz} bootstrap_id = hash_checkout(pkg_ids) latest_name = "{}/{}bootstrap.latest".format(packages_dir, pkgpanda.util.variant_prefix(variant)) output_name = packages_dir + '/' + bootstrap_id + '.' # bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz bootstrap_name = "{}bootstrap.tar.xz".format(output_name) active_name = "{}active.json".format(output_name) def mark_latest(): # Ensure latest is always written write_string(latest_name, bootstrap_id) print("bootstrap: {}".format(bootstrap_name)) print("active: {}".format(active_name)) print("latest: {}".format(latest_name)) return bootstrap_name if (os.path.exists(bootstrap_name)): print("Bootstrap already up to date, not recreating") return mark_latest() # Try downloading. if repository_url: tmp_bootstrap = bootstrap_name + '.tmp' tmp_active = active_name + '.tmp' try: repository_url = repository_url.rstrip('/') bootstrap_url = repository_url + '/bootstrap/{}.bootstrap.tar.xz'.format(bootstrap_id) active_url = repository_url + '/bootstrap/{}.active.json'.format(bootstrap_id) print("Attempting to download", bootstrap_name, "from", bootstrap_url) # Normalize to no trailing slash for repository_url download(tmp_bootstrap, bootstrap_url, packages_dir) print("Attempting to download", active_name, "from", active_url) download(tmp_active, active_url, packages_dir) # Move into place os.rename(tmp_bootstrap, bootstrap_name) os.rename(tmp_active, active_name) print("Bootstrap already up to date, Not recreating. Downloaded from repository-url.") return mark_latest() except FetchError: try: os.remove(tmp_bootstrap) except: pass try: os.remove(tmp_active) except: pass # Fall out and do the build since the command errored. print("Unable to download from cache. Building.") print("Creating bootstrap tarball for variant {}".format(variant)) work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp') def make_abs(path): return os.path.join(work_dir, path) pkgpanda_root = make_abs("opt/mesosphere") repository = Repository(os.path.join(pkgpanda_root, "packages")) # Fetch all the packages to the root for pkg_path in packages: filename = os.path.basename(pkg_path) pkg_id = filename[:-len(".tar.xz")] def local_fetcher(id, target): shutil.unpack_archive(pkg_path, target, "gztar") repository.add(local_fetcher, pkg_id, False) # Activate the packages inside the repository. # Do generate dcos.target.wants inside the root so that we don't # try messing with /etc/systemd/system. install = Install(pkgpanda_root, None, True, False, True, True, True) install.activate(repository.load_packages(pkg_ids)) # Mark the tarball as a bootstrap tarball/filesystem so that # dcos-setup.service will fire. make_file(make_abs("opt/mesosphere/bootstrap")) # Write out an active.json for the bootstrap tarball write_json(active_name, pkg_ids) # Rewrite all the symlinks to point to /opt/mesosphere rewrite_symlinks(work_dir, work_dir, "/") make_tar(bootstrap_name, pkgpanda_root) shutil.rmtree(work_dir) # Update latest last so that we don't ever use partially-built things. write_string(latest_name, bootstrap_id) print("Built bootstrap") return mark_latest()
def build(package_store, name, variant, clean_after_build, recursive=False): assert isinstance(package_store, PackageStore) print("Building package {} variant {}".format(name, pkgpanda.util.variant_str(variant))) tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo") repository = Repository(tmpdir.name) package_dir = package_store.get_package_folder(name) def src_abs(name): return package_dir + '/' + name def cache_abs(filename): return package_store.get_package_cache_folder(name) + '/' + filename # Build pkginfo over time, translating fields from buildinfo. pkginfo = {} # Build up the docker command arguments over time, translating fields as needed. cmd = DockerCmd() assert (name, variant) in package_store.packages, \ "Programming error: name, variant should have been validated to be valid before calling build()." builder = IdBuilder(package_store.get_buildinfo(name, variant)) final_buildinfo = dict() builder.add('name', name) builder.add('variant', pkgpanda.util.variant_str(variant)) # Convert single_source -> sources if builder.has('sources'): if builder.has('single_source'): raise BuildError('Both sources and single_source cannot be specified at the same time') sources = builder.take('sources') elif builder.has('single_source'): sources = {name: builder.take('single_source')} builder.replace('single_source', 'sources', sources) else: builder.add('sources', {}) sources = dict() print("NOTICE: No sources specified") final_buildinfo['sources'] = sources # Construct the source fetchers, gather the checkout ids from them checkout_ids = dict() fetchers = dict() try: for src_name, src_info in sorted(sources.items()): # TODO(cmaloney): Switch to a unified top level cache directory shared by all packages cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name check_call(['mkdir', '-p', cache_dir]) fetcher = get_src_fetcher(src_info, cache_dir, package_dir) fetchers[src_name] = fetcher checkout_ids[src_name] = fetcher.get_id() except ValidationError as ex: raise BuildError("Validation error when fetching sources for package: {}".format(ex)) for src_name, checkout_id in checkout_ids.items(): # NOTE: single_source buildinfo was expanded above so the src_name is # always correct here. # Make sure we never accidentally overwrite something which might be # important. Fields should match if specified (And that should be # tested at some point). For now disallowing identical saves hassle. assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name]) final_buildinfo['sources'][src_name].update(checkout_id) # Add the sha1 of the buildinfo.json + build file to the build ids builder.update('sources', checkout_ids) build_script = src_abs(builder.take('build_script')) # TODO(cmaloney): Change dest name to build_script_sha1 builder.replace('build_script', 'build', pkgpanda.util.sha1(build_script)) builder.add('pkgpanda_version', pkgpanda.build.constants.version) extra_dir = src_abs("extra") # Add the "extra" folder inside the package as an additional source if it # exists if os.path.exists(extra_dir): extra_id = hash_folder(extra_dir) builder.add('extra_source', extra_id) final_buildinfo['extra_source'] = extra_id # Figure out the docker name. docker_name = builder.take('docker') cmd.container = docker_name # Add the id of the docker build environment to the build_ids. try: docker_id = get_docker_id(docker_name) except CalledProcessError: # docker pull the container and try again check_call(['docker', 'pull', docker_name]) docker_id = get_docker_id(docker_name) builder.update('docker', docker_id) # TODO(cmaloney): The environment variables should be generated during build # not live in buildinfo.json. pkginfo['environment'] = builder.take('environment') # Whether pkgpanda should on the host make sure a `/var/lib` state directory is available pkginfo['state_directory'] = builder.take('state_directory') if pkginfo['state_directory'] not in [True, False]: raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`") username = None if builder.has('username'): username = builder.take('username') if not isinstance(username, str): raise BuildError("username in buildinfo.json must be either not set (no user for this" " package), or a user name string") try: pkgpanda.UserManagement.validate_username(username) except ValidationError as ex: raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex)) pkginfo['username'] = username group = None if builder.has('group'): group = builder.take('group') if not isinstance(group, str): raise BuildError("group in buildinfo.json must be either not set (use default group for this user)" ", or group must be a string") try: pkgpanda.UserManagement.validate_group_name(group) except ValidationError as ex: raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex)) pkginfo['group'] = group # Packages need directories inside the fake install root (otherwise docker # will try making the directories on a readonly filesystem), so build the # install root now, and make the package directories in it as we go. install_dir = tempfile.mkdtemp(prefix="pkgpanda-") active_packages = list() active_package_ids = set() active_package_variants = dict() auto_deps = set() # Final package has the same requires as the build. requires = builder.take('requires') pkginfo['requires'] = requires if builder.has("sysctl"): pkginfo["sysctl"] = builder.take("sysctl") # TODO(cmaloney): Pull generating the full set of requires a function. to_check = copy.deepcopy(requires) if type(to_check) != list: raise BuildError("`requires` in buildinfo.json must be an array of dependencies.") while to_check: requires_info = to_check.pop(0) requires_name, requires_variant = expand_require(requires_info) if requires_name in active_package_variants: # TODO(cmaloney): If one package depends on the <default> # variant of a package and 1+ others depends on a non-<default> # variant then update the dependency to the non-default variant # rather than erroring. if requires_variant != active_package_variants[requires_name]: # TODO(cmaloney): Make this contain the chains of # dependencies which contain the conflicting packages. # a -> b -> c -> d {foo} # e {bar} -> d {baz} raise BuildError( "Dependncy on multiple variants of the same package {}. variants: {} {}".format( requires_name, requires_variant, active_package_variants[requires_name])) # The variant has package {requires_name, variant} already is a # dependency, don't process it again / move on to the next. continue active_package_variants[requires_name] = requires_variant # Figure out the last build of the dependency, add that as the # fully expanded dependency. requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant) if not os.path.exists(requires_last_build): if recursive: # Build the dependency build(package_store, requires_name, requires_variant, clean_after_build, recursive) else: raise BuildError("No last build file found for dependency {} variant {}. Rebuild " "the dependency".format(requires_name, requires_variant)) try: pkg_id_str = load_string(requires_last_build) auto_deps.add(pkg_id_str) pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant) pkg_requires = pkg_buildinfo['requires'] pkg_path = repository.package_path(pkg_id_str) pkg_tar = pkg_id_str + '.tar.xz' if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar): raise BuildError( "The build tarball {} refered to by the last_build file of the dependency {} " "variant {} doesn't exist. Rebuild the dependency.".format( pkg_tar, requires_name, requires_variant)) active_package_ids.add(pkg_id_str) # Mount the package into the docker container. cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str) os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str))) # Add the dependencies of the package to the set which will be # activated. # TODO(cmaloney): All these 'transitive' dependencies shouldn't # be available to the package being built, only what depends on # them directly. to_check += pkg_requires except ValidationError as ex: raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex except PackageError as ex: raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex # Add requires to the package id, calculate the final package id. # NOTE: active_packages isn't fully constructed here since we lazily load # packages not already in the repository. builder.update('requires', list(active_package_ids)) version_extra = None if builder.has('version_extra'): version_extra = builder.take('version_extra') build_ids = builder.get_build_ids() version_base = hash_checkout(build_ids) version = None if builder.has('version_extra'): version = "{0}-{1}".format(version_extra, version_base) else: version = version_base pkg_id = PackageId.from_parts(name, version) # Everything must have been extracted by now. If it wasn't, then we just # had a hard error that it was set but not used, as well as didn't include # it in the caluclation of the PackageId. builder = None # Save the build_ids. Useful for verify exactly what went into the # package build hash. final_buildinfo['build_ids'] = build_ids final_buildinfo['package_version'] = version # Save the package name and variant. The variant is used when installing # packages to validate dependencies. final_buildinfo['name'] = name final_buildinfo['variant'] = variant # If the package is already built, don't do anything. pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id) # Done if it exists locally if exists(pkg_path): print("Package up to date. Not re-building.") # TODO(cmaloney): Updating / filling last_build should be moved out of # the build function. write_string(package_store.get_last_build_filename(name, variant), str(pkg_id)) return pkg_path # Try downloading. dl_path = package_store.try_fetch_by_id(pkg_id) if dl_path: print("Package up to date. Not re-building. Downloaded from repository-url.") # TODO(cmaloney): Updating / filling last_build should be moved out of # the build function. write_string(package_store.get_last_build_filename(name, variant), str(pkg_id)) print(dl_path, pkg_path) assert dl_path == pkg_path return pkg_path # Fall out and do the build since it couldn't be downloaded print("Unable to download from cache. Proceeding to build") print("Building package {} with buildinfo: {}".format( pkg_id, json.dumps(final_buildinfo, indent=2, sort_keys=True))) # Clean out src, result so later steps can use them freely for building. def clean(): # Run a docker container to remove src/ and result/ cmd = DockerCmd() cmd.volumes = { package_store.get_package_cache_folder(name): "/pkg/:rw", } cmd.container = "ubuntu:14.04.4" cmd.run("package-cleaner", ["rm", "-rf", "/pkg/src", "/pkg/result"]) clean() # Only fresh builds are allowed which don't overlap existing artifacts. result_dir = cache_abs("result") if exists(result_dir): raise BuildError("result folder must not exist. It will be made when the package is " "built. {}".format(result_dir)) # 'mkpanda add' all implicit dependencies since we actually need to build. for dep in auto_deps: print("Auto-adding dependency: {}".format(dep)) # NOTE: Not using the name pkg_id because that overrides the outer one. id_obj = PackageId(dep) add_package_file(repository, package_store.get_package_path(id_obj)) package = repository.load(dep) active_packages.append(package) # Checkout all the sources int their respective 'src/' folders. try: src_dir = cache_abs('src') if os.path.exists(src_dir): raise ValidationError( "'src' directory already exists, did you have a previous build? " + "Currently all builds must be from scratch. Support should be " + "added for re-using a src directory when possible. src={}".format(src_dir)) os.mkdir(src_dir) for src_name, fetcher in sorted(fetchers.items()): root = cache_abs('src/' + src_name) os.mkdir(root) fetcher.checkout_to(root) except ValidationError as ex: raise BuildError("Validation error when fetching sources for package: {}".format(ex)) # Activate the packages so that we have a proper path, environment # variables. # TODO(cmaloney): RAII type thing for temproary directory so if we # don't get all the way through things will be cleaned up? install = Install( root=install_dir, config_dir=None, rooted_systemd=True, manage_systemd=False, block_systemd=True, fake_path=True, manage_users=False, manage_state_dir=False) install.activate(active_packages) # Rewrite all the symlinks inside the active path because we will # be mounting the folder into a docker container, and the absolute # paths to the packages will change. # TODO(cmaloney): This isn't very clean, it would be much nicer to # just run pkgpanda inside the package. rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/") print("Building package in docker") # TODO(cmaloney): Run as a specific non-root user, make it possible # for non-root to cleanup afterwards. # Run the build, prepping the environment as necessary. mkdir(cache_abs("result")) # Copy the build info to the resulting tarball write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo) write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo) write_json(cache_abs("result/pkginfo.json"), pkginfo) # Make the folder for the package we are building. If docker does it, it # gets auto-created with root permissions and we can't actually delete it. os.makedirs(os.path.join(install_dir, "packages", str(pkg_id))) # TOOD(cmaloney): Disallow writing to well known files and directories? # Source we checked out cmd.volumes.update({ # TODO(cmaloney): src should be read only... cache_abs("src"): "/pkg/src:rw", # The build script build_script: "/pkg/build:ro", # Getting the result out cache_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id), install_dir: "/opt/mesosphere:ro" }) if os.path.exists(extra_dir): cmd.volumes[extra_dir] = "/pkg/extra:ro" cmd.environment = { "PKG_VERSION": version, "PKG_NAME": name, "PKG_ID": pkg_id, "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id), "PKG_VARIANT": variant if variant is not None else "<default>", "NUM_CORES": multiprocessing.cpu_count() } try: # TODO(cmaloney): Run a wrapper which sources # /opt/mesosphere/environment then runs a build. Also should fix # ownership of /opt/mesosphere/packages/{pkg_id} post build. cmd.run("package-builder", [ "/bin/bash", "-o", "nounset", "-o", "pipefail", "-o", "errexit", "/pkg/build"]) except CalledProcessError as ex: raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd))) # Clean up the temporary install dir used for dependencies. # TODO(cmaloney): Move to an RAII wrapper. check_call(['rm', '-rf', install_dir]) print("Building package tarball") # Check for forbidden services before packaging the tarball: try: check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES) except ValidationError as ex: raise BuildError("Package validation failed: {}".format(ex)) # TODO(cmaloney): Updating / filling last_build should be moved out of # the build function. write_string(package_store.get_last_build_filename(name, variant), str(pkg_id)) # Bundle the artifacts into the pkgpanda package tmp_name = pkg_path + "-tmp.tar.xz" make_tar(tmp_name, cache_abs("result")) os.rename(tmp_name, pkg_path) print("Package built.") if clean_after_build: clean() return pkg_path
def main(): arguments = docopt( __doc__.format( default_config_dir=constants.config_dir, default_root=constants.install_root, default_repository=constants.repository_base, ), ) umask(0o022) # NOTE: Changing root or repository will likely break actually running packages. install = Install( os.path.abspath(arguments['--root']), os.path.abspath(arguments['--config-dir']), arguments['--rooted-systemd'], not arguments['--no-systemd'], not arguments['--no-block-systemd'], manage_users=True, add_users=not os.path.exists('/etc/mesosphere/manual_host_users'), manage_state_dir=True) repository = Repository(os.path.abspath(arguments['--repository'])) try: if arguments['setup']: actions.setup(install, repository) sys.exit(0) if arguments['list']: print_repo_list(repository.list()) sys.exit(0) if arguments['active']: for pkg in sorted(install.get_active()): print(pkg) sys.exit(0) if arguments['add']: actions.add_package_file(repository, arguments['<package-tarball>']) sys.exit(0) if arguments['fetch']: for package_id in arguments['<id>']: actions.fetch_package(repository, arguments['--repository-url'], package_id, os.getcwd()) sys.exit(0) if arguments['activate']: actions.activate_packages(install, repository, arguments['<id>'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) sys.exit(0) if arguments['swap']: actions.swap_active_package(install, repository, arguments['<package-id>'], not arguments['--no-systemd'], not arguments['--no-block-systemd']) sys.exit(0) if arguments['remove']: for package_id in arguments['<id>']: try: actions.remove_package(install, repository, package_id) except PackageNotFound: pass sys.exit(0) if arguments['uninstall']: uninstall(install, repository) sys.exit(0) if arguments['check']: checks = find_checks(install, repository) if arguments['--list']: list_checks(checks) sys.exit(0) # Run all checks sys.exit(run_checks(checks, install, repository)) except ValidationError as ex: print("Validation Error: {0}".format(ex), file=sys.stderr) sys.exit(1) except PackageError as ex: print("Package Error: {0}".format(ex), file=sys.stderr) sys.exit(1) except Exception as ex: print("ERROR: {0}".format(ex), file=sys.stderr) sys.exit(1) print("unknown command", file=sys.stderr) sys.exit(1)
def install(): return Install("resources/install", "resources/systemd", True, False, True)