def problem_to_control(problem, debian_path): """ Converts problem.json to a deb control file. Args: problem: deserialized problem.json (dict) debian_path: path to the DEBIAN directory """ # a-z, digits 0-9, plus + and minus - signs, and periods package_name = problem.get("pkg_name", problem["name"]) sanitized_name = "{}-{}".format(sanitize_name(package_name), get_pid_hash(problem, True)) control = deepcopy(DEB_DEFAULTS) control.update( **{ "Package": sanitized_name, "Version": problem.get("version", "1.0-0"), "Architecture": problem.get("architecture", "all"), "Maintainer": problem["author"], "Description": problem.get("pkg_description", problem["description"].replace( "\n", "")), # replace the new lines to prevent a crash }) if "pkg_dependencies" in problem: control["Depends"] = ", ".join(problem.get("pkg_dependencies", [])) contents = "" for option, value in sorted(control.items()): contents += "{}: {}\n".format(option, value) control_file = open(join(debian_path, "control"), "w") control_file.write(contents) control_file.close() logger.debug("Control file contents:\n%s", contents)
def deploy_problem(problem_directory, instances=None, test=False, deployment_directory=None, debug=False, restart_xinetd=True): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory debug: Output debug info restart_xinetd: Whether to restart xinetd upon deployment of this set of instances for a problem. Defaults True as used by tests, but typically is used with False from deploy_problems, which takes in multiple problems. """ if instances is None: instances = [0] global current_problem, current_instance problem_object = get_problem(problem_directory) current_problem = problem_object["name"] instance_list = [] need_restart_xinetd = False logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory( problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance( problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory) instance_list.append((instance_number, instance)) deployment_json_dir = join(DEPLOYED_ROOT, "{}-{}".format(sanitize_name(problem_object["name"]), get_pid_hash(problem_object, True))) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug("...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory) deploy_files(problem_path, deployment_directory, instance["files"], problem.user, problem.__class__) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") # This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) if instance["service_file"] is not None: install_user_service(instance["service_file"], instance["socket_file"]) # set to true, this will signal restart xinetd need_restart_xinetd = True # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) unique = problem_object["name"] + problem_object["author"] + str( instance_number) + deploy_config.deploy_secret deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": None if instance["service_file"] is None else os.path.basename(instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename( instance["socket_file"]), "server": problem.server, "description": problem.description, "flag": problem.flag, "flag_sha1": problem.flag_sha1, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]] } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write( json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug( "The instance deployment information can be found at '%s'.", instance_info_path) # restart xinetd if restart_xinetd and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.info("Problem instances %s were successfully deployed for '%s'.", instances, problem_object["name"]) return need_restart_xinetd
def deploy_problem( problem_directory, instances=None, test=False, deployment_directory=None, debug=False, restart_xinetd=True, containerize=False, ): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory debug: Output debug info restart_xinetd: Whether to restart xinetd upon deployment of this set of instances for a problem. Defaults True as used by tests, but typically is used with False from deploy_problems, which takes in multiple problems. containerize: Deployment is occuring in a container. This flag is used by containerize and external tools like cmgr that deploy challenges in an isolated environment. """ if instances is None: instances = [0] global current_problem, current_instance, port_map problem_object = get_problem(problem_directory) current_problem = problem_object["unique_name"] instance_list = [] need_restart_xinetd = False logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) problem_deb_location = ( os.path.join(DEB_ROOT, sanitize_name(problem_object["unique_name"])) + ".deb") try: subprocess.run( "DEBIAN_FRONTEND=noninteractive apt-get -y install " + f"--reinstall {problem_deb_location}", shell=True, check=True, stdout=subprocess.PIPE, ) except subprocess.CalledProcessError: logger.error("An error occurred while installing problem packages.") raise FatalException logger.debug("Reinstalled problem's deb package to fulfill dependencies") for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory( problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance( problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory, ) instance_list.append((instance_number, instance)) deployment_json_dir = join( DEPLOYED_ROOT, "{}-{}".format(sanitize_name(problem_object["name"]), get_pid_hash(problem_object, True)), ) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug( "...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory, ) deploy_files( problem_path, deployment_directory, instance["files"], problem.user, problem.__class__, ) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") # This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) if instance["service_file"] is not None: install_user_service(instance["service_file"], instance["socket_file"]) # set to true, this will signal restart xinetd need_restart_xinetd = True # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": None if instance["service_file"] is None else os.path.basename( instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename( instance["socket_file"]), "server": problem.server, "description": problem.description, "hints": problem.hints, "flag": problem.flag, "flag_sha1": problem.flag_sha1, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]], "docker_challenge": isinstance(problem, DockerChallenge) } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) # pass along image digest so webui can launch the correct image if isinstance(problem, DockerChallenge): deployment_info["instance_digest"] = problem.image_digest deployment_info["port_info"] = { n: p.dict() for n, p in problem.ports.items() } port_map[(current_problem, instance_number)] = deployment_info.get("port", None) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write( json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug( "The instance deployment information can be found at '%s'.", instance_info_path, ) # restart xinetd if restart_xinetd and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.info( "Problem instances %s were successfully deployed for '%s'.", instances, problem_object["unique_name"], ) return need_restart_xinetd