def problem_to_control(problem, debian_path): """ Convert problem.json to a deb control file. Args: problem: deserialized problem.json (dict) debian_path: path to the DEBIAN directory """ #a-z, digits 0-9, plus + and minus - signs, and periods package_name = problem.get("pkg_name", problem["name"]) sanitized_name = sanitize_name(package_name) control = deepcopy(DEB_DEFAULTS) control.update(**{ "Package": sanitized_name, "Version": problem.get("version", "1.0-0"), "Architecture": problem.get("architecture", "all"), "Maintainer": problem["author"], "Description": problem.get("pkg_description", problem["description"]) }) if "pkg_dependencies" in problem: control["Depends"] = ", ".join(problem.get("pkg_dependencies", [])) contents = "" for option, value in sorted(control.items()): contents += "{}: {}\n".format(option, value) control_file = open(join(debian_path, "control"), "w") control_file.write(contents) control_file.close() logger.debug("Control file contents:\n%s", contents)
def __init__(self): """ Connnects to the docker daemon""" # will be used as the tag on the docker image if hasattr(self, "name"): self.problem_name = sanitize_name(self.name) else: self.problem_name = "problem" # use an explicit remote docker daemon per the configuration try: tls_config = docker.tls.TLSConfig( ca_cert=self.docker_ca_cert, client_cert=(self.docker_client_cert, self.docker_client_key), verify=True) self.client = docker.DockerClient(base_url=self.docker_host, tls=tls_config) self.api_client = docker.APIClient(base_url=self.docker_host, tls=tls_config) logger.debug("Connecting to docker daemon with config") # Docker options not set in configuration so use the environment to # configure (could be local or remote) except AttributeError: logger.debug("Connecting to docker daemon with env") self.client = docker.from_env() # throws an exception if the server returns an error: docker.errors.APIError self.client.ping()
def create_service_file(problem, instance_number, path): """ Creates a systemd service file for the given problem Args: problem: the instantiated problem object instance_number: the instance number path: the location to drop the service file Returns: The path to the created service file """ template = """[Unit] Description={} instance [Service] Type={} ExecStart={} [Install] WantedBy=default.target """ problem_service_info = problem.service() converted_name = sanitize_name(problem.name) content = template.format(problem.name, problem_service_info['Type'], problem_service_info['ExecStart']) service_file_path = join(path, "{}_{}.service".format(converted_name, instance_number)) with open(service_file_path, "w") as f: f.write(content) return service_file_path
def bundle_to_control(bundle, debian_path): """ Create a deb control file for a bundle. Args: bundle: the bundle object. debian_path: path to the DEBIAN directory """ control = object_copy.deepcopy(DEB_DEFAULTS) control.update( **{ "Package": sanitize_name(bundle["name"]), "Version": bundle.get("version", "1.0-0"), "Architecture": bundle.get("architecture", "all"), "Maintainer": bundle["author"], "Description": bundle["description"], } ) control["Depends"] = ", ".join(bundle["problems"]) contents = "" for option, value in control.items(): contents += "{}: {}\n".format(option, value) control_file = open(join(debian_path, "control"), "w") control_file.write(contents) control_file.close()
def problem_to_control(problem, debian_path): """ Convert problem.json to a deb control file. Args: problem: deserialized problem.json (dict) debian_path: path to the DEBIAN directory """ #a-z, digits 0-9, plus + and minus - signs, and periods package_name = problem.get("pkg_name", problem["name"]) sanitized_name = sanitize_name(package_name) control = deepcopy(DEB_DEFAULTS) control.update( **{ "Package": sanitized_name, "Version": problem.get("version", "1.0-0"), "Architecture": problem.get("architecture", "all"), "Maintainer": problem["author"], "Description": problem.get("pkg_description", problem["description"]) }) if "pkg_dependencies" in problem: control["Depends"] = ", ".join(problem.get("pkg_dependencies", [])) contents = "" for option, value in sorted(control.items()): contents += "{}: {}\n".format(option, value) control_file = open(join(debian_path, "control"), "w") control_file.write(contents) control_file.close() logger.debug("Control file contents:\n%s", contents)
def bundle_to_control(bundle, debian_path): """ Create a deb control file for a bundle. Args: bundle: the bundle object. debian_path: path to the DEBIAN directory """ control = object_copy.deepcopy(DEB_DEFAULTS) control.update( **{ "Package": sanitize_name(bundle["name"]), "Version": bundle.get("version", "1.0-0"), "Architecture": bundle.get("architecture", "all"), "Maintainer": bundle["author"], "Description": bundle["description"] }) # Need to install problems and bundle dependencies. pkg_dependencies = bundle["problems"] + bundle.get("pkg_dependencies", []) control["Depends"] = ", ".join(pkg_dependencies) contents = "" for option, value in sorted(control.items()): contents += "{}: {}\n".format(option, value) control_file = open(join(debian_path, "control"), "w") control_file.write(contents) control_file.close() logger.debug("Control file contents:\n%s", contents)
def format_deb_file_name(problem): """ Prepare the file name of the deb package according to deb policy. Args: problem: the problem object Returns: An acceptable file name for the problem. """ raw_package_name = "{}-{}-{}.deb".format( sanitize_name(problem.get("organization", "ctf")), sanitize_name(problem.get("pkg_name", problem["name"])), sanitize_name(problem.get("version", "1.0-0"))) return raw_package_name
def format_deb_file_name(bundle): """ Prepare the file name of the deb package according to deb policy. Args: bundle: the bundle object Returns: An acceptable file name for the bundle. """ raw_package_name = "{}-{}-bundle-{}.deb".format( sanitize_name(bundle.get("organization", "ctf")), sanitize_name(bundle["name"]), sanitize_name(bundle.get("version", "1.0-0"))) return raw_package_name
def format_deb_file_name(bundle): """ Prepare the file name of the deb package according to deb policy. Args: bundle: the bundle object Returns: An acceptable file name for the bundle. """ raw_package_name = "{}-{}-bundle-{}.deb".format( sanitize_name(bundle.get("organization", "ctf")), sanitize_name(bundle["name"]), sanitize_name(bundle.get("version", "1.0-0")), ) return raw_package_name
def install_bundle(args): """ "Installs" a bundle (validates it and stores a copy). "Bundles" are just JSON problem unlock weightmaps which are exposed to and used by the web server. All problems specified in a bundle must already be installed. """ if not args.bundle_path: logger.error("No problem source path specified") raise FatalException bundle_path = args.bundle_path bundle_obj = get_bundle(bundle_path) if os.path.isdir(join(BUNDLE_ROOT, sanitize_name(bundle_obj["name"]))): logger.error( f"A bundle with name {bundle_obj['name']} is " + "already installed" ) raise FatalException for problem_name, info in bundle_obj["dependencies"].items(): if not os.path.isdir(join(PROBLEM_ROOT, problem_name)): logger.error( f"Problem {problem_name} must be installed " + "before installing bundle" ) raise FatalException for dependency_name in info["weightmap"]: if not os.path.isdir(join(PROBLEM_ROOT, dependency_name)): logger.error( f"Problem {dependency_name} must be installed " + "before installing bundle" ) raise FatalException bundle_destination = join( BUNDLE_ROOT, sanitize_name(bundle_obj["name"]), "bundle.json" ) os.makedirs(os.path.dirname(bundle_destination), exist_ok=True) shutil.copy(bundle_path, bundle_destination) logger.info(f"Installed bundle {bundle_obj['name']}")
def get_username(problem_name, instance_number): """ Determine the username for a given problem instance. Given limitation of 32char linux usernames with useradd, truncates generated username to 28chars. This allows up to 1000 instances of problems with usernames that do require truncation. """ username = "******".format(sanitize_name(problem_name)[0:28], instance_number) if len(username) > 32: raise Exception( "Unable to create more than 1000 instances of this problem. Shorten problem name.") return username
def format_deb_file_name(problem): """ Prepare the file name of the deb package according to deb policy. Args: problem: the problem object Returns: An acceptable file name for the problem. """ raw_package_name = "{}.deb".format( sanitize_name(problem["unique_name"])) return raw_package_name
def uninstall_bundle(args): """ Uninstall a bundle by deleting it from the shell servers. Problems referenced within the bundle are not affected. """ if not args.bundle_name: logger.error("No bundle name specified") raise FatalException bundle_name = args.bundle_name bundle_dir = join(BUNDLE_ROOT, sanitize_name(bundle_name)) if not os.path.isdir(bundle_dir): logger.error(f"Bundle '{bundle_name}' is not installed") else: shutil.rmtree(bundle_dir) logger.info(f"Bundle '{bundle_name}' successfully removed")
def publish(args): """ Main entrypoint for publish """ problems = get_all_problems() bundles = get_all_bundles() output = {"problems": [], "bundles": []} for name_with_hash, problem in problems.items(): logger.debug("Finding instances of %s", name_with_hash) problem["instances"] = get_all_problem_instances(name_with_hash) problem["sanitized_name"] = sanitize_name(problem["name"]) problem["unique_name"] = name_with_hash output["problems"].append(problem) for _, bundle in bundles.items(): output["bundles"].append(bundle) print(json.dumps(output, indent=2))
def create_service_file(problem, instance_number, path): """ Creates a systemd service file for the given problem Args: problem: the instantiated problem object instance_number: the instance number path: the location to drop the service file Returns: The path to the created service file """ template = """[Unit] Description={} instance [Service] User={} WorkingDirectory={} Type={} ExecStart={} Restart={} [Install] WantedBy=shell_manager.target """ problem_service_info = problem.service() converted_name = sanitize_name(problem.name) content = template.format( problem.name, problem.user, problem.directory, problem_service_info["Type"], problem_service_info["ExecStart"], "no" if problem_service_info["Type"] == "oneshot" else "always", ) service_file_path = join(path, "{}_{}.service".format(converted_name, instance_number)) with open(service_file_path, "w") as f: f.write(content) return service_file_path
def create_instance_user(problem_name, instance_number): """ Generates a random username based on the problem name. The username returned is guaranteed to not exist. Args: problem_name: The name of the problem instance_number: The unique number for this instance Returns: A tuple containing the username and home directory """ converted_name = sanitize_name(problem_name) username = "******".format(converted_name, instance_number) try: # Check if the user already exists. user = getpwnam(username) return username, user.pw_dir except KeyError: home_directory = create_user(username, deploy_config.HOME_DIRECTORY_ROOT) return username, home_directory
def create_instance_user(problem_name, instance_number): """ Generates a random username based on the problem name. The username returned is guaranteed to not exist. Args: problem_name: The name of the problem instance_number: The unique number for this instance Returns: The created username """ converted_name = sanitize_name(problem_name) username = get_username(converted_name, instance_number) try: #Check if the user already exists. user = getpwnam(username) new = False except KeyError: create_user(username) new = True return username, new
def get_username(problem_name, instance_number): """ Determine the username for a given problem instance. """ return "{}_{}".format(sanitize_name(problem_name), instance_number)
def containerize(metadata, seed, flag_fmt): logger.info(f"containerize: {metadata['name']}") if os.path.isfile("Dockerfile"): logger.error( "Error: cannot containerize, problem already contains a Dockerfile" ) return None # Add a Dockerfile to support shiming the challenge deploy dockerfile = os.path.join(os.path.dirname(__file__), "static", "docker", "Dockerfile.containerize") shutil.copyfile(dockerfile, "Dockerfile") # Use DockerChallenge to shim a deployment within a container. The actually # challenge will be built standard class within the image. Load with # default variables and configuration settings Problem = update_problem_class(DockerChallenge, metadata, "", "", "") builder = Problem() builder.problem_name = sanitize_name(metadata["name"]) # standard DockerChallenge build sequence builder.initialize() builder.initialize_docker({"SEED": str(seed), "FORMAT": flag_fmt}) # fetch static downloads from image html_static = os.path.join(builder.web_root, STATIC_FILE_ROOT) builder.copy_from_image(html_static) # Copy static downloads to local HTTP server static = glob.glob(os.path.join(STATIC_FILE_ROOT, "*")) if len(static) > 1: logger.warn( f"more than one static dir for containerized instance: {static}") for src in static: dst = os.path.join(html_static, os.path.basename(src)) # remove target directory (not always cleaned/removed on undeploy) if os.path.isdir(dst): logger.warn(f"removing stale static directory: {dst}") shutil.rmtree(dst) logger.debug(f"moving {src} to {html_static}") shutil.move(src, html_static) # fetch instance json from image builder.copy_from_image(DEPLOYED_ROOT) local = os.path.join(os.path.basename(DEPLOYED_ROOT), "**", "*.json") deployed = glob.glob(local, recursive=True) if len(deployed) != 1: logger.error("Error challenge failed to deploy in a container") return None # load instance to allow patching instance = None with open(deployed[0]) as instance_json: instance = json.load(instance_json) # add DockerChallenge style fields instance["docker_challenge"] = True instance["instance_digest"] = builder.image_digest instance["port_info"] = {n: p.dict() for n, p in builder.ports.items()} # remove invalid fields instance["service"] = None instance["server"] = None # shell only knows internal docker host if "socket" in instance: del instance["socket"] if "port" in instance: del instance["port"] # hint to front end instance["containerize"] = True # write patched instance json to "register" it with shell_manager json_dst = os.path.join(*pathlib.Path(deployed[0]).parts[1:]) dst = os.path.join(DEPLOYED_ROOT, json_dst) os.makedirs(os.path.dirname(dst), exist_ok=True) with open(dst, 'w') as out: json.dump(instance, out)
def test_problem_name_sanitization(problem_name, expected_name): """Test valid problem names are correctly sanitized.""" sanitized_name = util.sanitize_name(problem_name) assert expected_name == sanitized_name
def test_invalid_prob_name_sanitization(problem_name): """Test invalid problem names raise an exception during sanitization.""" with pytest.raises(Exception) as excep_info: util.sanitize_name(problem_name) assert "Can not sanitize an empty field" in str(excep_info.value)
def deploy_problem(problem_directory, instances=[0], test=False, deployment_directory=None, debug=False): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances or not. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory """ global current_problem, current_instance problem_object = get_problem(problem_directory) current_problem = problem_object["name"] instance_list = [] logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory(problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance(problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory) instance_list.append((instance_number, instance)) deployment_json_dir = join(DEPLOYED_ROOT, sanitize_name(problem_object["name"])) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug("...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory) deploy_files(problem_path, deployment_directory, instance["files"], problem.user, problem.__class__) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") #This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) install_user_service(instance["service_file"], instance["socket_file"]) # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) unique = problem_object["name"] + problem_object["author"] + str(instance_number) + deploy_config.deploy_secret deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": os.path.basename(instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename(instance["socket_file"]), "server": problem.server, "description": problem.description, "flag": problem.flag, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]] } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write(json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug("The instance deployment information can be found at '%s'.", instance_info_path) logger.info("Problem instances %s were successfully deployed for '%s'.", instances, problem_object["name"])
def deploy_problem(problem_directory, instances=1, test=False, deployment_directory=None): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The number of instances to deploy. Defaults to 1. test: Whether the instances are test instances or not. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory """ global current_problem, current_instance problem_object = get_problem(problem_directory) current_problem = problem_object["name"] instance_list = [] for instance_number in range(instances): current_instance = instance_number print('Generating instance {} of "{}".'.format(instance_number, problem_object["name"])) staging_directory = generate_staging_directory() if test and deployment_directory is None: deployment_directory = os.path.join(staging_directory, "deployed") instance = generate_instance( problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory, ) instance_list.append(instance) deployment_json_dir = os.path.join(DEPLOYED_ROOT, sanitize_name(problem_object["name"])) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in enumerate(instance_list): print('Deploying instance {} of "{}".'.format(instance_number, problem_object["name"])) problem_path = os.path.join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] deploy_files(problem_path, deployment_directory, instance["files"], problem.user) if test is True: print("Description: {}".format(problem.description)) print("Deployment Directory: {}".format(deployment_directory)) # This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass execute(["userdel", problem.user]) shutil.rmtree(instance["home_directory"]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) install_user_service(instance["service_file"]) # delete staging directory shutil.rmtree(instance["staging_directory"]) unique = problem_object["name"] + problem_object["author"] + str(instance_number) + deploy_config.DEPLOY_SECRET iid = md5(unique.encode("utf-8")).hexdigest() deployment_info = { "user": problem.user, "service": os.path.basename(instance["service_file"]), "server": problem.server, "description": problem.description, "flag": problem.flag, "iid": iid, "instance_number": instance_number, "files": [f.to_dict() for f in problem.files], } if isinstance(problem, Service): deployment_info["port"] = problem.port instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write(json.dumps(deployment_info, indent=4, separators=(", ", ": "))) print("The instance deployment information can be found at {}.".format(instance_info_path))
def deploy_problem( problem_directory, instances=None, test=False, deployment_directory=None, debug=False, restart_xinetd=True, containerize=False, ): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory debug: Output debug info restart_xinetd: Whether to restart xinetd upon deployment of this set of instances for a problem. Defaults True as used by tests, but typically is used with False from deploy_problems, which takes in multiple problems. containerize: Deployment is occuring in a container. This flag is used by containerize and external tools like cmgr that deploy challenges in an isolated environment. """ if instances is None: instances = [0] global current_problem, current_instance, port_map problem_object = get_problem(problem_directory) current_problem = problem_object["unique_name"] instance_list = [] need_restart_xinetd = False logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) problem_deb_location = ( os.path.join(DEB_ROOT, sanitize_name(problem_object["unique_name"])) + ".deb") try: subprocess.run( "DEBIAN_FRONTEND=noninteractive apt-get -y install " + f"--reinstall {problem_deb_location}", shell=True, check=True, stdout=subprocess.PIPE, ) except subprocess.CalledProcessError: logger.error("An error occurred while installing problem packages.") raise FatalException logger.debug("Reinstalled problem's deb package to fulfill dependencies") for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory( problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance( problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory, ) instance_list.append((instance_number, instance)) deployment_json_dir = join( DEPLOYED_ROOT, "{}-{}".format(sanitize_name(problem_object["name"]), get_pid_hash(problem_object, True)), ) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug( "...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory, ) deploy_files( problem_path, deployment_directory, instance["files"], problem.user, problem.__class__, ) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") # This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) if instance["service_file"] is not None: install_user_service(instance["service_file"], instance["socket_file"]) # set to true, this will signal restart xinetd need_restart_xinetd = True # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": None if instance["service_file"] is None else os.path.basename( instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename( instance["socket_file"]), "server": problem.server, "description": problem.description, "hints": problem.hints, "flag": problem.flag, "flag_sha1": problem.flag_sha1, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]], "docker_challenge": isinstance(problem, DockerChallenge) } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) # pass along image digest so webui can launch the correct image if isinstance(problem, DockerChallenge): deployment_info["instance_digest"] = problem.image_digest deployment_info["port_info"] = { n: p.dict() for n, p in problem.ports.items() } port_map[(current_problem, instance_number)] = deployment_info.get("port", None) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write( json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug( "The instance deployment information can be found at '%s'.", instance_info_path, ) # restart xinetd if restart_xinetd and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.info( "Problem instances %s were successfully deployed for '%s'.", instances, problem_object["unique_name"], ) return need_restart_xinetd
def deploy_problem(problem_directory, instances=[0], test=False, deployment_directory=None, debug=False): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances or not. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory """ global current_problem, current_instance problem_object = get_problem(problem_directory) current_problem = problem_object["name"] instance_list = [] logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory( problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance(problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory) instance_list.append((instance_number, instance)) deployment_json_dir = join(DEPLOYED_ROOT, sanitize_name(problem_object["name"])) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug("...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory) deploy_files(problem_path, deployment_directory, instance["files"], problem.user, problem.__class__) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") #This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) install_user_service(instance["service_file"], instance["socket_file"]) # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) unique = problem_object["name"] + problem_object["author"] + str( instance_number) + deploy_config.deploy_secret deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": None if instance["service_file"] is None else os.path.basename( instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename( instance["socket_file"]), "server": problem.server, "description": problem.description, "flag": problem.flag, "flag_sha1": problem.flag_sha1, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]] } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write( json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug( "The instance deployment information can be found at '%s'.", instance_info_path) logger.info("Problem instances %s were successfully deployed for '%s'.", instances, problem_object["name"])
def package_problem(problem_path, staging_path=None, out_path=None, ignore_files=[]): """ Does the work of packaging a single problem. Args: problem_path (str): path to the problem directory staging_path (str, optional): path to a temporary. staging directory for packaging this problem. out_path (str, optional): path to an output directory for the resultant .deb package. ignore_files (list of str, optional): filenames to exclude when packaging this problem. Returns: tuple (str, str): the name of the package, the absolute path to the packaged problem """ problem = get_problem(problem_path) logger.debug("Starting to package: '%s'.", problem["name"]) # Create staging directories needed for packaging paths = {} if staging_path is None: paths["staging"] = join(problem_path, "__staging") else: paths["staging"] = join(staging_path, "__staging") paths["debian"] = join(paths["staging"], "DEBIAN") paths["data"] = join(paths["staging"], get_problem_root(problem["name"])) paths["install_data"] = join(paths["data"], "__files") for path in paths.values(): if not isdir(path): makedirs(path) # Copy the problem files to the staging directory ignore_files.append("__staging") full_copy(problem_path, paths["data"], ignore=ignore_files) # note that this chmod does not work correct if on a vagrant shared folder, # so we need to package the problems elsewhere chmod(paths["data"], 0o750) problem_to_control(problem, paths["debian"]) postinst_dependencies(problem, problem_path, paths["debian"], paths["install_data"]) # Package the staging directory as a .deb def format_deb_file_name(problem): """ Prepare the file name of the deb package according to deb policy. Args: problem: the problem object Returns: An acceptable file name for the problem. """ raw_package_name = "{}-{}-{}.deb".format( sanitize_name(problem.get("organization", "ctf")), sanitize_name(problem.get("pkg_name", problem["name"])), sanitize_name(problem.get("version", "1.0-0"))) return raw_package_name deb_directory = out_path if out_path is not None else getcwd() deb_path = join(deb_directory, format_deb_file_name(problem)) shell = spur.LocalShell() result = shell.run( ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path]) if result.return_code != 0: logger.error("Error building problem deb for '%s'.", problem["name"]) logger.error(result.output) raise FatalException else: logger.info("Problem '%s' packaged successfully.", problem["name"]) # Remove the staging directory logger.debug("Cleaning up '%s' staging directory '%s'.", problem["name"], paths["staging"]) rmtree(paths["staging"]) return (sanitize_name(problem.get("pkg_name", problem["name"])), os.path.abspath(deb_path))