def postinst_dependencies(problem, problem_path, debian_path, install_path): """ Handles the generation of the postinst script for additional dependencies. Args: problem: the problem object. problem_path: the problem directory. debian_path: the deb's DEBIAN directory. """ postinst_template = ["#!/bin/bash"] requirements_path = join(problem_path, "requirements.txt") dependencies_path = join(problem_path, "install_dependencies") staging_requirements_path = join(install_path, "requirements.txt") deployed_requirements_path = join(get_problem_root(problem["name"], absolute=True), "__files", "requirements.txt") deployed_setup_path = join(get_problem_root(problem["name"], absolute=True), "__files", "install_dependencies") listed_requirements = problem.get("pip_requirements", []) #Write or copy the requirements to the staging directory. if len(listed_requirements) > 0: if isfile(requirements_path): raise Exception("Problem has both a pip_requirements field and requirements.txt.") with open(staging_requirements_path, "w") as f: f.writelines("\n".join(listed_requirements)) elif isfile(requirements_path): copy(requirements_path, staging_requirements_path) if isfile(staging_requirements_path): postinst_template.append("pip3 install -r {}".format(deployed_requirements_path)) if isfile(dependencies_path): copy(dependencies_path, join(install_path, "install_dependencies")) #Ensure it is executable chmod(join(install_path, "install_dependencies"), 0o500) postinst_template.append("bash -c '{}'".format(deployed_setup_path)) chmod(debian_path, 0o775) postinst_path = join(debian_path, "postinst") with open(postinst_path, "w") as f: chmod(postinst_path, 0o775) contents = "\n".join(postinst_template) f.write(contents)
def get_all_problems(): """ Returns a dictionary of name:object mappings """ problems = {} if os.path.isdir(PROBLEM_ROOT): for name in os.listdir(PROBLEM_ROOT): try: problem = get_problem(get_problem_root(name, absolute=True)) problems[name] = problem except FileNotFoundError as e: pass return problems
def undeploy_problems(args, config): """ Main entrypoint for problem undeployment """ problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[problem["name"]] = [] for instance in get_all_problem_instances(path): already_deployed[problem["name"]].append(instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error("Cannot undeploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: problem_root = get_problem_root(problem_name, absolute=True) if isdir(problem_root): problem = get_problem(problem_root) instances = list(filter(lambda x: x in already_deployed[problem["name"]], instance_list)) if len(instances) == 0: logger.warn("No deployed instances %s were found for problem '%s'.", instance_list, problem["name"]) else: logger.debug("Undeploying problem '%s'.", problem["name"]) remove_instances(problem_name, instance_list) logger.info("Problem instances %s were successfully removed from '%s'.", instances, problem["name"]) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error("Cannot specify deployment directory if deploying multiple problems or instances.") raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warn("Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error("Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if args.redeploy: todo_instance_list = instance_list else: # remove already deployed instances todo_instance_list = list(set(instance_list) - set(already_deployed.get(problem_name, []))) if args.dry and isdir(problem_name): deploy_problem(problem_name, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) elif isdir(join(get_problem_root(problem_name, absolute=True))): deploy_problem(join(get_problem_root(problem_name, absolute=True)), instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) if not args.dry: os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map deploy_config = config try: user = getpwnam(deploy_config.DEFAULT_USER) except KeyError as e: print("DEFAULT_USER {} does not exist. Creating now.".format(deploy_config.DEFAULT_USER)) create_user(deploy_config.DEFAULT_USER) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): raise Exception("Cannot specify deployment directory if deploying multiple problems or instances.") problems = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: raise Exception("Could not get bundle.") problems = bundle_problems # before deploying problems, load in port_map for path, problem in get_all_problems().items(): for instance in get_all_problem_instances(path): if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): raise Exception( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'" ) if not args.dry: with open(lock_file, "w") as f: f.write("1") try: for path in problems: if args.dry and os.path.isdir(path): deploy_problem( path, instances=args.num_instances, test=args.dry, deployment_directory=args.deployment_directory ) elif os.path.isdir(os.path.join(get_problem_root(path, absolute=True))): deploy_problem( os.path.join(get_problem_root(path, absolute=True)), instances=args.num_instances, test=args.dry, deployment_directory=args.deployment_directory, ) else: raise Exception("Problem path {} cannot be found".format(path)) except Exception as e: traceback.print_exc() finally: if not args.dry: os.remove(lock_file)
def problem_builder(args, config): """ Main entrypoint for package building operations. """ #Grab a problem_path problem_base_path = args.problem_paths.pop() problem_paths = find_problems(problem_base_path) if len(problem_paths) == 0: logging.critical("No problems found under '%s'!", problem_base_path) raise FatalException for problem_path in problem_paths: problem = get_problem(problem_path) logger.debug("Starting to package: '%s'.", problem["name"]) paths = {} if args.staging_dir is None: paths["staging"] = join(problem_path, "__staging") else: paths["staging"] = join(args.staging_dir, "__staging") paths["debian"] = join(paths["staging"], "DEBIAN") paths["data"] = join(paths["staging"], get_problem_root(problem["name"])) paths["install_data"] = join(paths["data"], "__files") #Make all of the directories, order does not matter with makedirs [makedirs(staging_path) for _, staging_path in paths.items() if not isdir(staging_path)] args.ignore.append("__staging") full_copy(problem_path, paths["data"], ignore=args.ignore) # note that this chmod does not work correct if on a vagrant shared folder, # so we need to package the problems elsewhere chmod(paths["data"], 0o750) problem_to_control(problem, paths["debian"]) postinst_dependencies(problem, problem_path, paths["debian"], paths["install_data"]) deb_directory = args.out if args.out is not None else getcwd() def format_deb_file_name(problem): """ Prepare the file name of the deb package according to deb policy. Args: problem: the problem object Returns: An acceptable file name for the problem. """ raw_package_name = "{}-{}-{}.deb".format( sanitize_name(problem.get("organization", "ctf")), sanitize_name(problem.get("pkg_name", problem["name"])), sanitize_name(problem.get("version", "1.0-0")) ) return raw_package_name deb_path = join(deb_directory, format_deb_file_name(problem)) shell = spur.LocalShell() result = shell.run(["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path]) if result.return_code != 0: logger.error("Error building problem deb for '%s'.", problem["name"]) logger.error(result.output) else: logger.info("Problem '%s' packaged successfully.", problem["name"]) logger.debug("Clearning up '%s' staging directory '%s'.", problem["name"], paths["staging"]) rmtree(paths["staging"]) if len(args.problem_paths) >= 1: return problem_builder(args, config)
def undeploy_problems(args, config): """ Main entrypoint for problem undeployment """ problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[problem["name"]] = [] for instance in get_all_problem_instances(path): already_deployed[problem["name"]].append( instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot undeploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: problem_root = get_problem_root(problem_name, absolute=True) if isdir(problem_root): problem = get_problem(problem_root) instances = list( filter(lambda x: x in already_deployed[problem["name"]], instance_list)) if len(instances) == 0: logger.warn( "No deployed instances %s were found for problem '%s'.", instance_list, problem["name"]) else: logger.debug("Undeploying problem '%s'.", problem["name"]) remove_instances(problem_name, instance_list) logger.info( "Problem instances %s were successfully removed from '%s'.", instances, problem["name"]) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error( "Cannot specify deployment directory if deploying multiple problems or instances." ) raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warn("Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if args.redeploy: todo_instance_list = instance_list else: # remove already deployed instances todo_instance_list = list( set(instance_list) - set(already_deployed.get(problem_name, []))) if args.dry and isdir(problem_name): deploy_problem(problem_name, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) elif isdir(join(get_problem_root(problem_name, absolute=True))): deploy_problem(join( get_problem_root(problem_name, absolute=True)), instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) if not args.dry: os.remove(lock_file)
def postinst_dependencies(problem, problem_path, debian_path, install_path): """ Handles the generation of the postinst script for additional dependencies. Args: problem: the problem object. problem_path: the problem directory. debian_path: the deb's DEBIAN directory. """ postinst_template = ["#!/bin/bash"] requirements_path = join(problem_path, "requirements.txt") dependencies_path = join(problem_path, "install_dependencies") staging_requirements_path = join(install_path, "requirements.txt") deployed_requirements_path = join( get_problem_root(problem["name"], absolute=True), "__files", "requirements.txt") deployed_setup_path = join( get_problem_root(problem["name"], absolute=True), "__files", "install_dependencies") listed_requirements = problem.get("pip_requirements", []) #Write or copy the requirements to the staging directory. if len(listed_requirements) > 0: if isfile(requirements_path): logger.error( "Problem '%s' has both a pip_requirements field and requirements.txt.", problem["name"]) raise FatalException with open(staging_requirements_path, "w") as f: f.writelines("\n".join(listed_requirements)) elif isfile(requirements_path): copy(requirements_path, staging_requirements_path) if logger.getEffectiveLevel() <= logging.DEBUG and isfile( staging_requirements_path): with open(staging_requirements_path, "r") as f: logger.debug("python requirements:\n%s", f.read()) if isfile(staging_requirements_path): postinst_template.append( "pip3 install -r {}".format(deployed_requirements_path)) if isfile(dependencies_path): copy(dependencies_path, join(install_path, "install_dependencies")) #Ensure it is executable chmod(join(install_path, "install_dependencies"), 0o500) postinst_template.append("bash -c '{}'".format(deployed_setup_path)) chmod(debian_path, 0o775) postinst_path = join(debian_path, "postinst") with open(postinst_path, "w") as f: chmod(postinst_path, 0o775) contents = "\n".join(postinst_template) f.write(contents) #post_template always has a she-bang. if len(postinst_template) > 1: logger.debug("post install:\n%s", contents)
def problem_builder(args, config): """ Main entrypoint for package building operations. """ #Grab a problem_path problem_base_path = args.problem_paths.pop() problem_paths = find_problems(problem_base_path) if len(problem_paths) == 0: logging.critical("No problems found under '%s'!", problem_base_path) raise FatalException for problem_path in problem_paths: problem = get_problem(problem_path) logger.debug("Starting to package: '%s'.", problem["name"]) paths = {} if args.staging_dir is None: paths["staging"] = join(problem_path, "__staging") else: paths["staging"] = join(args.staging_dir, "__staging") paths["debian"] = join(paths["staging"], "DEBIAN") paths["data"] = join(paths["staging"], get_problem_root(problem["name"])) paths["install_data"] = join(paths["data"], "__files") #Make all of the directories, order does not matter with makedirs [ makedirs(staging_path) for _, staging_path in paths.items() if not isdir(staging_path) ] args.ignore.append("__staging") full_copy(problem_path, paths["data"], ignore=args.ignore) # note that this chmod does not work correct if on a vagrant shared folder, # so we need to package the problems elsewhere chmod(paths["data"], 0o750) problem_to_control(problem, paths["debian"]) postinst_dependencies(problem, problem_path, paths["debian"], paths["install_data"]) deb_directory = args.out if args.out is not None else getcwd() def format_deb_file_name(problem): """ Prepare the file name of the deb package according to deb policy. Args: problem: the problem object Returns: An acceptable file name for the problem. """ raw_package_name = "{}-{}-{}.deb".format( sanitize_name(problem.get("organization", "ctf")), sanitize_name(problem.get("pkg_name", problem["name"])), sanitize_name(problem.get("version", "1.0-0"))) return raw_package_name deb_path = join(deb_directory, format_deb_file_name(problem)) shell = spur.LocalShell() result = shell.run( ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path]) if result.return_code != 0: logger.error("Error building problem deb for '%s'.", problem["name"]) logger.error(result.output) else: logger.info("Problem '%s' packaged successfully.", problem["name"]) logger.debug("Clearning up '%s' staging directory '%s'.", problem["name"], paths["staging"]) rmtree(paths["staging"]) if len(args.problem_paths) >= 1: return problem_builder(args, config)