def mock_do_build_packages(cache_repository_url, tree_variants): make_directory('packages/cache/bootstrap') write_string("packages/cache/bootstrap/bootstrap_id.bootstrap.tar.xz", "bootstrap_contents") write_json("packages/cache/bootstrap/bootstrap_id.active.json", ['a--b', 'c--d']) write_string("packages/cache/bootstrap/bootstrap.latest", "bootstrap_id") write_string("packages/cache/bootstrap/installer.bootstrap.latest", "installer_bootstrap_id") write_json("packages/cache/bootstrap/installer_bootstrap_id.active.json", ['c--d', 'e--f']) write_string("packages/cache/bootstrap/downstream.installer.bootstrap.latest", "downstream_installer_bootstrap_id") write_json("packages/cache/bootstrap/downstream_installer_bootstrap_id.active.json", []) make_directory('packages/cache/complete') write_json( "packages/cache/complete/complete.latest.json", {'bootstrap': 'bootstrap_id', 'packages': ['a--b', 'c--d']}) write_json( "packages/cache/complete/installer.complete.latest.json", {'bootstrap': 'installer_bootstrap_id', 'packages': ['c--d', 'e--f']}) write_json( "packages/cache/complete/downstream.installer.complete.latest.json", {'bootstrap': 'installer_bootstrap_id', 'packages': []}) return { None: {"bootstrap": "bootstrap_id", "packages": ["a--b", "c--d"]}, "installer": {"bootstrap": "installer_bootstrap_id", "packages": ["c--d", "e--f"]}, "downstream.installer": {"bootstrap": "downstream_installer_bootstrap_id", "packages": []} }
def generate_node_upgrade_script(gen_out, installed_cluster_version, serve_dir=SERVE_DIR): # installed_cluster_version: Current installed version on the cluster # installer_version: Version we are upgrading to bootstrap_url = gen_out.arguments['bootstrap_url'] installer_version = gen.calc.entry['must']['dcos_version'] package_list = ' '.join(package['id'] for package in gen_out.cluster_packages.values()) bash_script = gen.template.parse_str(node_upgrade_template).render({ 'dcos_image_commit': util.dcos_image_commit, 'generation_date': util.template_generation_date, 'bootstrap_url': bootstrap_url, 'cluster_packages': package_list, 'installed_cluster_version': installed_cluster_version, 'installer_version': installer_version}) upgrade_script_path = '/upgrade/' + uuid.uuid4().hex make_directory(serve_dir + upgrade_script_path) write_string(serve_dir + upgrade_script_path + '/dcos_node_upgrade.sh', bash_script) print("Node upgrade script URL: " + bootstrap_url + upgrade_script_path + '/dcos_node_upgrade.sh') return 0
def fetch_artifacts(filenames, src_dir, dest_dir): # If all the dest files already exist, no-op dest_files = [dest_dir + '/' + filename for filename in filenames] if all(map(os.path.exists, dest_files)): return # Make sure the source files exist src_files = [src_dir + '/' + filename for filename in filenames] for filename in src_files: if not os.path.exists(filename): log.error("Internal Error: %s not found. Should have been in the installer container.", filename) raise FileNotFoundError(filename) make_directory(dest_dir) do_move_atomic(src_dir, dest_dir, filenames)
def mock_do_build_packages(cache_repository_url, tree_variants): make_directory('packages/cache/bootstrap') write_string("packages/cache/bootstrap/bootstrap_id.bootstrap.tar.xz", "bootstrap_contents") write_json("packages/cache/bootstrap/bootstrap_id.active.json", ['a--b', 'c--d']) write_string("packages/cache/bootstrap/bootstrap.latest", "bootstrap_id") write_string("packages/cache/bootstrap/installer.bootstrap.latest", "installer_bootstrap_id") write_json("packages/cache/bootstrap/installer_bootstrap_id.active.json", ['c--d', 'e--f']) write_string( "packages/cache/bootstrap/downstream.installer.bootstrap.latest", "downstream_installer_bootstrap_id") write_json( "packages/cache/bootstrap/downstream_installer_bootstrap_id.active.json", []) make_directory('packages/cache/complete') write_json("packages/cache/complete/complete.latest.json", { 'bootstrap': 'bootstrap_id', 'packages': ['a--b', 'c--d'] }) write_json("packages/cache/complete/installer.complete.latest.json", { 'bootstrap': 'installer_bootstrap_id', 'packages': ['c--d', 'e--f'] }) write_json( "packages/cache/complete/downstream.installer.complete.latest.json", { 'bootstrap': 'installer_bootstrap_id', 'packages': [] }) return { None: { "bootstrap": "bootstrap_id", "packages": ["a--b", "c--d"] }, "installer": { "bootstrap": "installer_bootstrap_id", "packages": ["c--d", "e--f"] }, "downstream.installer": { "bootstrap": "downstream_installer_bootstrap_id", "packages": [] } }
def fetch_artifacts(filenames, src_dir, dest_dir): # If all the dest files already exist, no-op dest_files = [dest_dir + '/' + filename for filename in filenames] if all(map(os.path.exists, dest_files)): return # Make sure the source files exist src_files = [src_dir + '/' + filename for filename in filenames] for filename in src_files: if not os.path.exists(filename): log.error( "Internal Error: %s not found. Should have been in the installer container.", filename) raise FileNotFoundError(filename) make_directory(dest_dir) do_move_atomic(src_dir, dest_dir, filenames)
def generate_node_upgrade_win_script(gen_out, installed_cluster_version, serve_dir=SERVE_DIR): # installed_cluster_version: Current installed version on the cluster # installer_version: Version we are upgrading to bootstrap_url = gen_out.arguments['bootstrap_url'] if gen_out.arguments['master_discovery'] == 'static': master_list = gen_out.arguments['master_list'] elif gen_out.arguments['master_discovery'] == 'master_http_loadbalancer': master_list = gen_out.arguments['exhibitor_address'] + ':2181' else: master_list = 'zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181,zk-4.zk:2181,zk-5.zk:2181' installer_version = gen.calc.entry['must']['dcos_version'] node_upgrade_template_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../gen/build_deploy/powershell/dcos_node_upgrade.ps1.in') with open(node_upgrade_template_path, 'r') as f: node_upgrade_template = f.read() powershell_script = gen.template.parse_str( node_upgrade_template).render({ 'dcos_image_commit': util.dcos_image_commit, 'generation_date': util.template_generation_date, 'bootstrap_url': bootstrap_url, 'master_list': master_list, 'installed_cluster_version': installed_cluster_version, 'installer_version': installer_version }) upgrade_script_path = '/windows/upgrade/' + uuid.uuid4().hex make_directory(serve_dir + upgrade_script_path) write_string(serve_dir + upgrade_script_path + '/dcos_node_upgrade.ps1', powershell_script) print("Windows agent upgrade script URL: " + bootstrap_url + upgrade_script_path + '/dcos_node_upgrade.ps1') return 0
def upload(self, destination_path: str, blob: Optional[bytes] = None, local_path: Optional[str] = None, no_cache: bool = False, content_type: Optional[str] = None): # TODO(cmaloney): Don't discard the extra no_cache / content_type. We ideally want to be # able to test those are set. destination_full_path = self.__full_path(destination_path) make_directory(os.path.dirname(destination_full_path)) assert local_path is None or blob is None if local_path: self.__copy(local_path, destination_full_path) else: assert isinstance(blob, bytes) with open(destination_full_path, 'wb') as f: f.write(blob)
def make_serve_dir(gen_out): make_directory(SERVE_DIR) gen.build_deploy.bash.generate(gen_out, SERVE_DIR) # Copy cached artifacts. cached_packages = sorted( i['filename'] for i in gen_out.cluster_packages.values() if i['filename'] not in gen_out.stable_artifacts ) bootstrap_files = [ "bootstrap/{}.bootstrap.tar.xz".format(gen_out.arguments['bootstrap_id']), "bootstrap/{}.active.json".format(gen_out.arguments['bootstrap_id']) ] fetch_artifacts( bootstrap_files + cached_packages, ARTIFACT_DIR, SERVE_DIR, ) # Write some package metadata pkgpanda.util.write_json(CLUSTER_PACKAGES_PATH, gen_out.cluster_packages)
def make_serve_dir(gen_out): make_directory(SERVE_DIR) gen.build_deploy.bash.generate(gen_out, SERVE_DIR) # Copy cached artifacts. cached_packages = sorted(i['filename'] for i in gen_out.cluster_packages.values() if i['filename'] not in gen_out.stable_artifacts) bootstrap_files = [ "bootstrap/{}.bootstrap.tar.xz".format( gen_out.arguments['bootstrap_id']), "bootstrap/{}.active.json".format(gen_out.arguments['bootstrap_id']) ] fetch_artifacts( bootstrap_files + cached_packages, ARTIFACT_DIR, SERVE_DIR, ) # Write some package metadata pkgpanda.util.write_json(CLUSTER_PACKAGES_PATH, gen_out.cluster_packages)
def generate_node_upgrade_script(gen_out, installed_cluster_version, serve_dir=SERVE_DIR): # installed_cluster_version: Current installed version on the cluster # installer_version: Version we are upgrading to bootstrap_url = gen_out.arguments['bootstrap_url'] installer_version = gen.calc.entry['must']['dcos_version'] package_list = ' '.join(package['id'] for package in gen_out.cluster_packages.values()) bash_script = gen.template.parse_str(node_upgrade_template).render({ 'dcos_image_commit': util.dcos_image_commit, 'generation_date': util.template_generation_date, 'bootstrap_url': bootstrap_url, 'cluster_packages': package_list, 'installed_cluster_version': installed_cluster_version, 'installer_version': installer_version }) upgrade_script_path = '/upgrade/' + uuid.uuid4().hex make_directory(serve_dir + upgrade_script_path) write_string(serve_dir + upgrade_script_path + '/dcos_node_upgrade.sh', bash_script) print("Node upgrade script URL: " + bootstrap_url + upgrade_script_path + '/dcos_node_upgrade.sh') return 0
def activate(self, packages): # Ensure the new set is reasonable. validate_compatible(packages, self.__roles) # Build the absolute paths for the running config, new config location, # and where to archive the config. active_names = self.get_active_names() active_dirs = list( map(self._make_abs, self.__well_known_dirs + ["active"])) new_names = [name + ".new" for name in active_names] new_dirs = [name + ".new" for name in active_dirs] old_names = [name + ".old" for name in active_names] # Remove all pre-existing new and old directories for name in chain(new_names, old_names): if os.path.exists(name): if os.path.isdir(name): remove_directory(name) else: os.remove(name) # Remove unit files staged for an activation that didn't occur. if not self.__skip_systemd_dirs: self.systemd.remove_staged_unit_files() # Make the directories for the new config for name in new_dirs: os.makedirs(name) def symlink_all(src, dest): if not os.path.isdir(src): return symlink_tree(src, dest) # Set the new LD_LIBRARY_PATH, PATH. env_contents = env_header.format( "/opt/mesosphere" if self.__fake_path else self.__root) env_export_contents = env_export_header.format( "/opt/mesosphere" if self.__fake_path else self.__root) active_buildinfo_full = {} dcos_service_configuration = self._get_dcos_configuration_template() # Building up the set of users sysusers = UserManagement(self.__manage_users, self.__add_users) def _get_service_files(_dir): service_files = [] for root, directories, filenames in os.walk(_dir): for filename in filter(lambda name: name.endswith(".service"), filenames): service_files.append(os.path.join(root, filename)) return service_files def _get_service_names(_dir): service_files = list( map(os.path.basename, _get_service_files(_dir))) if not service_files: return [] return list( map(lambda name: os.path.splitext(name)[0], service_files)) # Add the folders, config in each package. for package in packages: # Package folders # NOTE: Since active is at the end of the folder list it will be # removed by the zip. This is the desired behavior, since it will be # populated later. # Do the basename since some well known dirs are full paths (dcos.target.wants) # while inside the packages they are always top level directories. for new, dir_name in zip(new_dirs, self.__well_known_dirs): dir_name = os.path.basename(dir_name) pkg_dir = os.path.join(package.path, dir_name) assert os.path.isabs(new) assert os.path.isabs(pkg_dir) try: symlink_all(pkg_dir, new) # Symlink all applicable role-based config for role in self.__roles: role_dir = os.path.join( package.path, "{0}_{1}".format(dir_name, role)) symlink_all(role_dir, new) except ConflictingFile as ex: raise ValidationError( "Two packages are trying to install the same file {0} or " "two roles in the set of roles {1} are causing a package " "to try activating multiple versions of the same file. " "One of the package files is {2}.".format( ex.dest, self.__roles, ex.src)) # Add to the active folder os.symlink( package.path, os.path.join(self._make_abs("active.new"), package.name)) # Add to the environment and environment.export contents env_contents += "# package: {0}\n".format(package.id) env_export_contents += "# package: {0}\n".format(package.id) for k, v in package.environment.items(): env_contents += "{0}={1}\n".format(k, v) env_export_contents += "export {0}={1}\n".format(k, v) env_contents += "\n" env_export_contents += "\n" # Add to the buildinfo try: active_buildinfo_full[package.name] = load_json( os.path.join(package.path, "buildinfo.full.json")) except FileNotFoundError: # TODO(cmaloney): These only come from setup-packages. Should update # setup-packages to add a buildinfo.full for those packages active_buildinfo_full[package.name] = None # NOTE: It is critical the state dir, the package name and the user name are all the # same. Otherwise on upgrades we might remove access to a files by changing their chown # to something incompatible. We survive the first upgrade because everything goes from # root to specific users, and root can access all user files. if package.username is not None: sysusers.add_user(package.username, package.group) # Ensure the state directory exists # TODO(cmaloney): On upgrade take a snapshot? if self.__manage_state_dir: state_dir_path = self.__state_dir_root + '/' + package.name if package.state_directory: make_directory(state_dir_path) if package.username and not is_windows: uid = sysusers.get_uid(package.username) check_call(['chown', '-R', str(uid), state_dir_path]) if package.sysctl: service_names = _get_service_names(package.path) if not service_names: raise ValueError( "service name required for sysctl could not be determined for {package}" .format(package=package.id)) for service in service_names: if service in package.sysctl: dcos_service_configuration["sysctl"][ service] = package.sysctl[service] # Prepare new systemd units for activation. if not self.__skip_systemd_dirs: new_wants_dir = self._make_abs(self.__systemd_dir + ".new") if os.path.exists(new_wants_dir): self.systemd.stage_new_units(new_wants_dir) dcos_service_configuration_file = os.path.join( self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE) write_json(dcos_service_configuration_file, dcos_service_configuration) # Write out the new environment file. new_env = self._make_abs("environment.new") write_string(new_env, env_contents) # Write out the new environment.export file new_env_export = self._make_abs("environment.export.new") write_string(new_env_export, env_export_contents) # Write out the buildinfo of every active package new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new") write_json(new_buildinfo_meta, active_buildinfo_full) self.swap_active(".new")
def activate(self, packages): # Ensure the new set is reasonable. validate_compatible(packages, self.__roles) # Build the absolute paths for the running config, new config location, # and where to archive the config. active_names = self.get_active_names() active_dirs = list(map(self._make_abs, self.__well_known_dirs + ["active"])) new_names = [name + ".new" for name in active_names] new_dirs = [name + ".new" for name in active_dirs] old_names = [name + ".old" for name in active_names] # Remove all pre-existing new and old directories for name in chain(new_names, old_names): if os.path.exists(name): if os.path.isdir(name): remove_directory(name) else: os.remove(name) # Remove unit files staged for an activation that didn't occur. if not self.__skip_systemd_dirs: self.systemd.remove_staged_unit_files() # Make the directories for the new config for name in new_dirs: os.makedirs(name) def symlink_all(src, dest): if not os.path.isdir(src): return symlink_tree(src, dest) # Set the new LD_LIBRARY_PATH, PATH. env_contents = env_header.format("/opt/mesosphere" if self.__fake_path else self.__root) env_export_contents = env_export_header.format("/opt/mesosphere" if self.__fake_path else self.__root) active_buildinfo_full = {} dcos_service_configuration = self._get_dcos_configuration_template() # Building up the set of users sysusers = UserManagement(self.__manage_users, self.__add_users) def _get_service_files(_dir): service_files = [] for root, directories, filenames in os.walk(_dir): for filename in filter(lambda name: name.endswith(".service"), filenames): service_files.append(os.path.join(root, filename)) return service_files def _get_service_names(_dir): service_files = list(map(os.path.basename, _get_service_files(_dir))) if not service_files: return [] return list(map(lambda name: os.path.splitext(name)[0], service_files)) # Add the folders, config in each package. for package in packages: # Package folders # NOTE: Since active is at the end of the folder list it will be # removed by the zip. This is the desired behavior, since it will be # populated later. # Do the basename since some well known dirs are full paths (dcos.target.wants) # while inside the packages they are always top level directories. for new, dir_name in zip(new_dirs, self.__well_known_dirs): dir_name = os.path.basename(dir_name) pkg_dir = os.path.join(package.path, dir_name) assert os.path.isabs(new) assert os.path.isabs(pkg_dir) try: symlink_all(pkg_dir, new) # Symlink all applicable role-based config for role in self.__roles: role_dir = os.path.join(package.path, "{0}_{1}".format(dir_name, role)) symlink_all(role_dir, new) except ConflictingFile as ex: raise ValidationError("Two packages are trying to install the same file {0} or " "two roles in the set of roles {1} are causing a package " "to try activating multiple versions of the same file. " "One of the package files is {2}.".format(ex.dest, self.__roles, ex.src)) # Add to the active folder os.symlink(package.path, os.path.join(self._make_abs("active.new"), package.name)) # Add to the environment and environment.export contents env_contents += "# package: {0}\n".format(package.id) env_export_contents += "# package: {0}\n".format(package.id) for k, v in package.environment.items(): env_contents += "{0}={1}\n".format(k, v) env_export_contents += "export {0}={1}\n".format(k, v) env_contents += "\n" env_export_contents += "\n" # Add to the buildinfo try: active_buildinfo_full[package.name] = load_json(os.path.join(package.path, "buildinfo.full.json")) except FileNotFoundError: # TODO(cmaloney): These only come from setup-packages. Should update # setup-packages to add a buildinfo.full for those packages active_buildinfo_full[package.name] = None # NOTE: It is critical the state dir, the package name and the user name are all the # same. Otherwise on upgrades we might remove access to a files by changing their chown # to something incompatible. We survive the first upgrade because everything goes from # root to specific users, and root can access all user files. if package.username is not None: sysusers.add_user(package.username, package.group) # Ensure the state directory exists # TODO(cmaloney): On upgrade take a snapshot? if self.__manage_state_dir: state_dir_path = self.__state_dir_root + '/' + package.name if package.state_directory: make_directory(state_dir_path) if package.username and not is_windows: uid = sysusers.get_uid(package.username) check_call(['chown', '-R', str(uid), state_dir_path]) if package.sysctl: service_names = _get_service_names(package.path) if not service_names: raise ValueError("service name required for sysctl could not be determined for {package}".format( package=package.id)) for service in service_names: if service in package.sysctl: dcos_service_configuration["sysctl"][service] = package.sysctl[service] # Prepare new systemd units for activation. if not self.__skip_systemd_dirs: new_wants_dir = self._make_abs(self.__systemd_dir + ".new") if os.path.exists(new_wants_dir): self.systemd.stage_new_units(new_wants_dir) dcos_service_configuration_file = os.path.join(self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE) write_json(dcos_service_configuration_file, dcos_service_configuration) # Write out the new environment file. new_env = self._make_abs("environment.new") write_string(new_env, env_contents) # Write out the new environment.export file new_env_export = self._make_abs("environment.export.new") write_string(new_env_export, env_export_contents) # Write out the buildinfo of every active package new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new") write_json(new_buildinfo_meta, active_buildinfo_full) self.swap_active(".new")
def make_installer_docker(variant, variant_info, installer_info): bootstrap_id = variant_info['bootstrap'] assert len(bootstrap_id) > 0 image_version = util.dcos_image_commit[:18] + '-' + bootstrap_id[:18] genconf_tar = "dcos-genconf." + image_version + ".tar" installer_filename = "packages/cache/dcos_generate_config." + pkgpanda.util.variant_prefix( variant) + "sh" bootstrap_filename = bootstrap_id + ".bootstrap.tar.xz" bootstrap_active_filename = bootstrap_id + ".active.json" installer_bootstrap_filename = installer_info[ 'bootstrap'] + '.bootstrap.tar.xz' bootstrap_latest_filename = pkgpanda.util.variant_prefix( variant) + 'bootstrap.latest' latest_complete_filename = pkgpanda.util.variant_prefix( variant) + 'complete.latest.json' packages_dir = 'packages' docker_image_name = 'mesosphere/dcos-genconf:' + image_version # TODO(cmaloney): All of this should use package_resources with tempfile.TemporaryDirectory() as build_dir: assert build_dir[-1] != '/' print("Setting up build environment") def dest_path(filename): return build_dir + '/' + filename def copy_to_build(src_prefix, filename): dest_filename = dest_path(filename) os.makedirs(os.path.dirname(dest_filename), exist_ok=True) copy_file(os.getcwd() + '/' + src_prefix + '/' + filename, dest_filename) def fill_template(base_name, format_args): pkgpanda.util.write_string( dest_path(base_name), pkg_resources.resource_string( __name__, 'bash/' + base_name + '.in').decode().format(**format_args)) fill_template( 'Dockerfile', { 'installer_bootstrap_filename': installer_bootstrap_filename, 'bootstrap_filename': bootstrap_filename, 'bootstrap_active_filename': bootstrap_active_filename, 'bootstrap_latest_filename': bootstrap_latest_filename, 'latest_complete_filename': latest_complete_filename, 'packages_dir': packages_dir }) fill_template( 'installer_internal_wrapper', { 'variant': pkgpanda.util.variant_str(variant), 'bootstrap_id': bootstrap_id, 'dcos_image_commit': util.dcos_image_commit }) if not is_windows: subprocess.check_call( ['chmod', '+x', dest_path('installer_internal_wrapper')]) # TODO(cmaloney) make this use make_bootstrap_artifacts / that set # rather than manually keeping everything in sync copy_to_build('packages/cache/bootstrap', bootstrap_filename) copy_to_build('packages/cache/bootstrap', installer_bootstrap_filename) copy_to_build('packages/cache/bootstrap', bootstrap_active_filename) copy_to_build('packages/cache/bootstrap', bootstrap_latest_filename) copy_to_build('packages/cache/complete', latest_complete_filename) for package_id in variant_info['packages']: package_name = pkgpanda.PackageId(package_id).name copy_to_build( 'packages/cache/', packages_dir + '/' + package_name + '/' + package_id + '.tar.xz') # Copy across gen_extra if it exists if os.path.exists('gen_extra'): copy_directory('gen_extra', dest_path('gen_extra')) else: make_directory(dest_path('gen_extra')) print("Building docker container in " + build_dir) subprocess.check_call( ['docker', 'build', '-t', docker_image_name, build_dir]) print("Building", installer_filename) pkgpanda.util.write_string( installer_filename, pkg_resources.resource_string( __name__, 'bash/dcos_generate_config.sh.in').decode().format( genconf_tar=genconf_tar, docker_image_name=docker_image_name, variant=variant) + '\n#EOF#\n') subprocess.check_call(['docker', 'save', docker_image_name], stdout=open(genconf_tar, 'w')) subprocess.check_call(['tar', 'cvf', '-', genconf_tar], stdout=open(installer_filename, 'a')) subprocess.check_call(['chmod', '+x', installer_filename]) # Cleanup subprocess.check_call(['rm', genconf_tar]) return installer_filename
def download(self, path, local_path): dirname = os.path.dirname(local_path) if dirname: make_directory(dirname) self.download_inner(path, local_path)
def make_installer_docker(variant, variant_info, installer_info): bootstrap_id = variant_info['bootstrap'] assert len(bootstrap_id) > 0 image_version = util.dcos_image_commit[:18] + '-' + bootstrap_id[:18] genconf_tar = "dcos-genconf." + image_version + ".tar" installer_filename = "packages/cache/dcos_generate_config." + pkgpanda.util.variant_prefix(variant) + "sh" bootstrap_filename = bootstrap_id + ".bootstrap.tar.xz" bootstrap_active_filename = bootstrap_id + ".active.json" installer_bootstrap_filename = installer_info['bootstrap'] + '.bootstrap.tar.xz' bootstrap_latest_filename = pkgpanda.util.variant_prefix(variant) + 'bootstrap.latest' latest_complete_filename = pkgpanda.util.variant_prefix(variant) + 'complete.latest.json' packages_dir = 'packages' docker_image_name = 'mesosphere/dcos-genconf:' + image_version # TODO(cmaloney): All of this should use package_resources with tempfile.TemporaryDirectory() as build_dir: assert build_dir[-1] != '/' print("Setting up build environment") def dest_path(filename): return build_dir + '/' + filename def copy_to_build(src_prefix, filename): dest_filename = dest_path(filename) os.makedirs(os.path.dirname(dest_filename), exist_ok=True) copy_file(os.getcwd() + '/' + src_prefix + '/' + filename, dest_filename) def fill_template(base_name, format_args): pkgpanda.util.write_string( dest_path(base_name), pkg_resources.resource_string(__name__, 'bash/' + base_name + '.in').decode().format(**format_args)) fill_template('Dockerfile', { 'installer_bootstrap_filename': installer_bootstrap_filename, 'bootstrap_filename': bootstrap_filename, 'bootstrap_active_filename': bootstrap_active_filename, 'bootstrap_latest_filename': bootstrap_latest_filename, 'latest_complete_filename': latest_complete_filename, 'packages_dir': packages_dir}) fill_template('installer_internal_wrapper', { 'variant': pkgpanda.util.variant_str(variant), 'bootstrap_id': bootstrap_id, 'dcos_image_commit': util.dcos_image_commit}) if not is_windows: subprocess.check_call(['chmod', '+x', dest_path('installer_internal_wrapper')]) # TODO(cmaloney) make this use make_bootstrap_artifacts / that set # rather than manually keeping everything in sync copy_to_build('packages/cache/bootstrap', bootstrap_filename) copy_to_build('packages/cache/bootstrap', installer_bootstrap_filename) copy_to_build('packages/cache/bootstrap', bootstrap_active_filename) copy_to_build('packages/cache/bootstrap', bootstrap_latest_filename) copy_to_build('packages/cache/complete', latest_complete_filename) for package_id in variant_info['packages']: package_name = pkgpanda.PackageId(package_id).name copy_to_build('packages/cache/', packages_dir + '/' + package_name + '/' + package_id + '.tar.xz') # Copy across gen_extra if it exists if os.path.exists('gen_extra'): copy_directory('gen_extra', dest_path('gen_extra')) else: make_directory(dest_path('gen_extra')) print("Building docker container in " + build_dir) subprocess.check_call(['docker', 'build', '-t', docker_image_name, build_dir]) print("Building", installer_filename) pkgpanda.util.write_string( installer_filename, pkg_resources.resource_string(__name__, 'bash/dcos_generate_config.sh.in').decode().format( genconf_tar=genconf_tar, docker_image_name=docker_image_name, variant=variant) + '\n#EOF#\n') subprocess.check_call( ['docker', 'save', docker_image_name], stdout=open(genconf_tar, 'w')) subprocess.check_call(['tar', 'cvf', '-', genconf_tar], stdout=open(installer_filename, 'a')) subprocess.check_call(['chmod', '+x', installer_filename]) # Cleanup subprocess.check_call(['rm', genconf_tar]) return installer_filename
def __copy(self, full_source_path, full_destination_path): make_directory(os.path.dirname(full_destination_path)) copy_file(full_source_path, full_destination_path)