def add(self, fetcher, id, warn_added=True): # Validate the package id. PackageId(id) # If the package already exists, return true package_path = self.package_path(id) if os.path.exists(package_path): if warn_added: print("Package already added.") return False # TODO(cmaloney): Supply a temporary directory to extract to # Then swap that into place, preventing partially-extracted things from # becoming an issue. pkg_path = self.package_path(id) # Appending _tmp so there is very little chance of us running into the # rm of another package, since all our PackageID strings are SHA-1, so # they never end with `_tmp`. `{sha}_tmp` is still a valid version # number however so other code doing directory scans will be fine with # the temp folders. tmp_path = pkg_path + '_tmp' # Cleanup artifacts (if any) laying around from previous partial # package extractions. remove_directory(tmp_path) fetcher(id, tmp_path) os.rename(tmp_path, pkg_path) return True
def add(self, fetcher, id, warn_added=True): # Validate the package id. PackageId(id) # If the package already exists, return true package_path = self.package_path(id) if os.path.exists(package_path): if warn_added: print("Package already added.") return False # TODO(cmaloney): Supply a temporary directory to extract to # Then swap that into place, preventing partially-extracted things from # becoming an issue. pkg_path = self.package_path(id) # Appending _tmp so there is very little chance of us running into the # rm of another package, since all our PackageID strings are SHA-1, so # they never end with `_tmp`. `{sha}_tmp` is still a valid version # number however so other code doing directory scans will be fine with # the temp folders. tmp_path = pkg_path + '_tmp' # Cleanup artifacts (if any) laying around from previous partial # package extractions. remove_directory(tmp_path) fetcher(id, tmp_path) os.rename(tmp_path, pkg_path) return True
def remove_recursive(self, path): full_path = self.__full_path(path) # Make sure we're not going to delete something too horrible / in the # base system. Adjust as needed. assert len(path) > 5 assert len(full_path) > 5 remove_directory(full_path)
def uninstall(install, repository): print("Uninstalling DC/OS") # Remove dcos.target # TODO(cmaloney): Make this not quite so magical print("Removing dcos.target") print(os.path.dirname(install.systemd_dir) + "/dcos.target") remove_file(os.path.dirname(install.systemd_dir) + "/dcos.target") # Cleanup all systemd units # TODO(cmaloney): This is much more work than we need to do the job print("Deactivating all packages") install.activate([]) # NOTE: All python libs need to be loaded before this so they are in-memory before we do the delete # Remove all well known files, directories # TODO(cmaloney): This should be a method of Install. print("Removing all runtime / activation directories") active_names = install.get_active_names() new_names = [name + '.new' for name in active_names] old_names = [name + '.old' for name in active_names] all_names = active_names + new_names + old_names assert len(all_names) > 0 if '/' in all_names + [install.root]: print("Cowardly refusing to rm -rf '/' as part of uninstall.", file=sys.stderr) print("Uninstall directories: ", ','.join(all_names + [install.root]), file=sys.stderr) sys.exit(1) else: for name in all_names: remove_directory(name) # Removing /opt/mesosphere remove_directory(install.root)
def activate(self, packages): # Ensure the new set is reasonable. validate_compatible(packages, self.__roles) # Build the absolute paths for the running config, new config location, # and where to archive the config. active_names = self.get_active_names() active_dirs = list( map(self._make_abs, self.__well_known_dirs + ["active"])) new_names = [name + ".new" for name in active_names] new_dirs = [name + ".new" for name in active_dirs] old_names = [name + ".old" for name in active_names] # Remove all pre-existing new and old directories for name in chain(new_names, old_names): if os.path.exists(name): if os.path.isdir(name): remove_directory(name) else: os.remove(name) # Remove unit files staged for an activation that didn't occur. if not self.__skip_systemd_dirs: self.systemd.remove_staged_unit_files() # Make the directories for the new config for name in new_dirs: os.makedirs(name) def symlink_all(src, dest): if not os.path.isdir(src): return symlink_tree(src, dest) # Set the new LD_LIBRARY_PATH, PATH. env_contents = env_header.format( "/opt/mesosphere" if self.__fake_path else self.__root) env_export_contents = env_export_header.format( "/opt/mesosphere" if self.__fake_path else self.__root) active_buildinfo_full = {} dcos_service_configuration = self._get_dcos_configuration_template() # Building up the set of users sysusers = UserManagement(self.__manage_users, self.__add_users) def _get_service_files(_dir): service_files = [] for root, directories, filenames in os.walk(_dir): for filename in filter(lambda name: name.endswith(".service"), filenames): service_files.append(os.path.join(root, filename)) return service_files def _get_service_names(_dir): service_files = list( map(os.path.basename, _get_service_files(_dir))) if not service_files: return [] return list( map(lambda name: os.path.splitext(name)[0], service_files)) # Add the folders, config in each package. for package in packages: # Package folders # NOTE: Since active is at the end of the folder list it will be # removed by the zip. This is the desired behavior, since it will be # populated later. # Do the basename since some well known dirs are full paths (dcos.target.wants) # while inside the packages they are always top level directories. for new, dir_name in zip(new_dirs, self.__well_known_dirs): dir_name = os.path.basename(dir_name) pkg_dir = os.path.join(package.path, dir_name) assert os.path.isabs(new) assert os.path.isabs(pkg_dir) try: symlink_all(pkg_dir, new) # Symlink all applicable role-based config for role in self.__roles: role_dir = os.path.join( package.path, "{0}_{1}".format(dir_name, role)) symlink_all(role_dir, new) except ConflictingFile as ex: raise ValidationError( "Two packages are trying to install the same file {0} or " "two roles in the set of roles {1} are causing a package " "to try activating multiple versions of the same file. " "One of the package files is {2}.".format( ex.dest, self.__roles, ex.src)) # Add to the active folder os.symlink( package.path, os.path.join(self._make_abs("active.new"), package.name)) # Add to the environment and environment.export contents env_contents += "# package: {0}\n".format(package.id) env_export_contents += "# package: {0}\n".format(package.id) for k, v in package.environment.items(): env_contents += "{0}={1}\n".format(k, v) env_export_contents += "export {0}={1}\n".format(k, v) env_contents += "\n" env_export_contents += "\n" # Add to the buildinfo try: active_buildinfo_full[package.name] = load_json( os.path.join(package.path, "buildinfo.full.json")) except FileNotFoundError: # TODO(cmaloney): These only come from setup-packages. Should update # setup-packages to add a buildinfo.full for those packages active_buildinfo_full[package.name] = None # NOTE: It is critical the state dir, the package name and the user name are all the # same. Otherwise on upgrades we might remove access to a files by changing their chown # to something incompatible. We survive the first upgrade because everything goes from # root to specific users, and root can access all user files. if package.username is not None: sysusers.add_user(package.username, package.group) # Ensure the state directory exists # TODO(cmaloney): On upgrade take a snapshot? if self.__manage_state_dir: state_dir_path = self.__state_dir_root + '/' + package.name if package.state_directory: make_directory(state_dir_path) if package.username and not is_windows: uid = sysusers.get_uid(package.username) check_call(['chown', '-R', str(uid), state_dir_path]) if package.sysctl: service_names = _get_service_names(package.path) if not service_names: raise ValueError( "service name required for sysctl could not be determined for {package}" .format(package=package.id)) for service in service_names: if service in package.sysctl: dcos_service_configuration["sysctl"][ service] = package.sysctl[service] # Prepare new systemd units for activation. if not self.__skip_systemd_dirs: new_wants_dir = self._make_abs(self.__systemd_dir + ".new") if os.path.exists(new_wants_dir): self.systemd.stage_new_units(new_wants_dir) dcos_service_configuration_file = os.path.join( self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE) write_json(dcos_service_configuration_file, dcos_service_configuration) # Write out the new environment file. new_env = self._make_abs("environment.new") write_string(new_env, env_contents) # Write out the new environment.export file new_env_export = self._make_abs("environment.export.new") write_string(new_env_export, env_export_contents) # Write out the buildinfo of every active package new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new") write_json(new_buildinfo_meta, active_buildinfo_full) self.swap_active(".new")
def remove(self, id): path = self.package_path(id) if not os.path.exists(path): raise PackageNotFound(id) remove_directory(path)
def activate(self, packages): # Ensure the new set is reasonable. validate_compatible(packages, self.__roles) # Build the absolute paths for the running config, new config location, # and where to archive the config. active_names = self.get_active_names() active_dirs = list(map(self._make_abs, self.__well_known_dirs + ["active"])) new_names = [name + ".new" for name in active_names] new_dirs = [name + ".new" for name in active_dirs] old_names = [name + ".old" for name in active_names] # Remove all pre-existing new and old directories for name in chain(new_names, old_names): if os.path.exists(name): if os.path.isdir(name): remove_directory(name) else: os.remove(name) # Remove unit files staged for an activation that didn't occur. if not self.__skip_systemd_dirs: self.systemd.remove_staged_unit_files() # Make the directories for the new config for name in new_dirs: os.makedirs(name) def symlink_all(src, dest): if not os.path.isdir(src): return symlink_tree(src, dest) # Set the new LD_LIBRARY_PATH, PATH. env_contents = env_header.format("/opt/mesosphere" if self.__fake_path else self.__root) env_export_contents = env_export_header.format("/opt/mesosphere" if self.__fake_path else self.__root) active_buildinfo_full = {} dcos_service_configuration = self._get_dcos_configuration_template() # Building up the set of users sysusers = UserManagement(self.__manage_users, self.__add_users) def _get_service_files(_dir): service_files = [] for root, directories, filenames in os.walk(_dir): for filename in filter(lambda name: name.endswith(".service"), filenames): service_files.append(os.path.join(root, filename)) return service_files def _get_service_names(_dir): service_files = list(map(os.path.basename, _get_service_files(_dir))) if not service_files: return [] return list(map(lambda name: os.path.splitext(name)[0], service_files)) # Add the folders, config in each package. for package in packages: # Package folders # NOTE: Since active is at the end of the folder list it will be # removed by the zip. This is the desired behavior, since it will be # populated later. # Do the basename since some well known dirs are full paths (dcos.target.wants) # while inside the packages they are always top level directories. for new, dir_name in zip(new_dirs, self.__well_known_dirs): dir_name = os.path.basename(dir_name) pkg_dir = os.path.join(package.path, dir_name) assert os.path.isabs(new) assert os.path.isabs(pkg_dir) try: symlink_all(pkg_dir, new) # Symlink all applicable role-based config for role in self.__roles: role_dir = os.path.join(package.path, "{0}_{1}".format(dir_name, role)) symlink_all(role_dir, new) except ConflictingFile as ex: raise ValidationError("Two packages are trying to install the same file {0} or " "two roles in the set of roles {1} are causing a package " "to try activating multiple versions of the same file. " "One of the package files is {2}.".format(ex.dest, self.__roles, ex.src)) # Add to the active folder os.symlink(package.path, os.path.join(self._make_abs("active.new"), package.name)) # Add to the environment and environment.export contents env_contents += "# package: {0}\n".format(package.id) env_export_contents += "# package: {0}\n".format(package.id) for k, v in package.environment.items(): env_contents += "{0}={1}\n".format(k, v) env_export_contents += "export {0}={1}\n".format(k, v) env_contents += "\n" env_export_contents += "\n" # Add to the buildinfo try: active_buildinfo_full[package.name] = load_json(os.path.join(package.path, "buildinfo.full.json")) except FileNotFoundError: # TODO(cmaloney): These only come from setup-packages. Should update # setup-packages to add a buildinfo.full for those packages active_buildinfo_full[package.name] = None # NOTE: It is critical the state dir, the package name and the user name are all the # same. Otherwise on upgrades we might remove access to a files by changing their chown # to something incompatible. We survive the first upgrade because everything goes from # root to specific users, and root can access all user files. if package.username is not None: sysusers.add_user(package.username, package.group) # Ensure the state directory exists # TODO(cmaloney): On upgrade take a snapshot? if self.__manage_state_dir: state_dir_path = self.__state_dir_root + '/' + package.name if package.state_directory: make_directory(state_dir_path) if package.username and not is_windows: uid = sysusers.get_uid(package.username) check_call(['chown', '-R', str(uid), state_dir_path]) if package.sysctl: service_names = _get_service_names(package.path) if not service_names: raise ValueError("service name required for sysctl could not be determined for {package}".format( package=package.id)) for service in service_names: if service in package.sysctl: dcos_service_configuration["sysctl"][service] = package.sysctl[service] # Prepare new systemd units for activation. if not self.__skip_systemd_dirs: new_wants_dir = self._make_abs(self.__systemd_dir + ".new") if os.path.exists(new_wants_dir): self.systemd.stage_new_units(new_wants_dir) dcos_service_configuration_file = os.path.join(self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE) write_json(dcos_service_configuration_file, dcos_service_configuration) # Write out the new environment file. new_env = self._make_abs("environment.new") write_string(new_env, env_contents) # Write out the new environment.export file new_env_export = self._make_abs("environment.export.new") write_string(new_env_export, env_export_contents) # Write out the buildinfo of every active package new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new") write_json(new_buildinfo_meta, active_buildinfo_full) self.swap_active(".new")
def remove(self, id): path = self.package_path(id) if not os.path.exists(path): raise PackageNotFound(id) remove_directory(path)