def migrate_problems(args, config): """ Main entrypoint for problem migration. """ additional_defaults = {} for default_pair in args.set_defaults: if ":" in default_pair: field, value = default_pair.split(":") additional_defaults[field] = value for problem_path in args.problem_paths: problem = get_problem(problem_path) problem_copy = deepcopy(problem) logger.debug("Migrating '%s' from legacy %s format.", problem["name"], args.legacy_format) migrater = MIGRATION_TABLE[args.legacy_format] updated_problem = migrater(problem_path, problem_copy, overrides=additional_defaults) if args.dry: print(updated_problem) else: logger.info("Updated '%s' to the new problem format.", problem["name"]) set_problem(problem_path, updated_problem)
def get_all_problems(): """ Returns a dictionary of name-hash:object mappings """ problems = {} if os.path.isdir(PROBLEM_ROOT): for name in os.listdir(PROBLEM_ROOT): try: problem = get_problem(get_problem_root(name, absolute=True)) problems[name] = problem except FileNotFoundError as e: pass return problems
def get_all_problems(): """ Returns a dictionary of name:object mappings """ problems = {} if os.path.isdir(PROBLEM_ROOT): for name in os.listdir(PROBLEM_ROOT): try: problem = get_problem(get_problem_root(name, absolute=True)) problems[name] = problem except FileNotFoundError as e: pass return problems
def containerize_problems(args): """ Main entrypoint for problem containerization """ # determine what we are deploying problem_names = args.problem_names if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) logger.debug(f"Containerizing: {problem_names} {instance_list}") # build base images required ensure_base_images() deploy_init(contain=True) flag_fmt = args.flag_format if args.flag_format else FLAG_FMT for name in problem_names: if not os.path.isdir(get_problem_root(name, absolute=True)): logger.error(f"'{name}' is not an installed problem") continue logger.debug(f"Problem : {name}") src = get_problem_root(name, absolute=True) metadata = get_problem(src) cur_instances = [ i["instance_number"] for i in get_all_problem_instances(name) ] logger.debug(f"Existing : {cur_instances}") origwd = os.getcwd() for instance in instance_list: if instance in cur_instances: logger.warn(f"Instance already deployed: {instance}") continue logger.debug(f"Instance : {instance}") # copy source files to a staging directory and switch to it staging = generate_staging_directory(problem_name=name, instance_number=instance) dst = os.path.join(staging, "_containerize") shutil.copytree(src, dst) os.chdir(dst) # build the image containerize(metadata, instance, flag_fmt) # return to the orginal directory os.chdir(origwd)
def install_problem(problem_path, allow_reinstall=False): """ Install a problem from a source directory. Args: problem_path: path to the problem source directory """ problem_obj = get_problem(problem_path) if ( os.path.isdir(get_problem_root_hashed(problem_obj, absolute=True)) and not allow_reinstall ): logger.error( f"Problem {problem_obj['unique_name']} is already installed. You may specify --reinstall to reinstall an updated version from the specified directory." ) return logger.info(f"Installing problem {problem_obj['unique_name']}...") acquire_lock() staging_dir_path = generate_staging_directory( problem_name=problem_obj["unique_name"] ) logger.debug( f"{problem_obj['unique_name']}: created staging directory" + f" ({staging_dir_path})" ) generated_deb_path = package_problem( problem_path, staging_path=staging_dir_path, out_path=DEB_ROOT ) logger.debug(f"{problem_obj['unique_name']}: created debian package") try: subprocess.run( "DEBIAN_FRONTEND=noninteractive apt-get -y install " + f"--reinstall {generated_deb_path}", shell=True, check=True, stdout=subprocess.PIPE, ) except subprocess.CalledProcessError: logger.error("An error occurred while installing problem packages.") raise FatalException finally: release_lock() logger.debug(f"{problem_obj['unique_name']}: installed package") logger.info(f"{problem_obj['unique_name']} installed successfully")
def package_problem(problem_path, staging_path=None, out_path=None, ignore_files=None): """ Does the work of packaging a single problem. Args: problem_path (str): path to the problem directory staging_path (str, optional): path to a temporary. staging directory for packaging this problem. out_path (str, optional): path to an output directory for the resultant .deb package. ignore_files (list of str, optional): filenames to exclude when packaging this problem. Returns: str: the absolute path to the packaged problem """ if ignore_files is None: ignore_files = [] problem = get_problem(problem_path) logger.debug("Starting to package: '%s'.", problem["name"]) # Create staging directories needed for packaging paths = {} if staging_path is None: paths["staging"] = join(problem_path, "__staging") else: paths["staging"] = join(staging_path, "__staging") paths["debian"] = join(paths["staging"], "DEBIAN") paths["data"] = join(paths["staging"], get_problem_root_hashed(problem)) paths["install_data"] = join(paths["data"], "__files") for path in paths.values(): if not isdir(path): makedirs(path) # Copy the problem files to the staging directory ignore_files.append("__staging") full_copy(problem_path, paths["data"], ignore=ignore_files) # note that this chmod does not work correct if on a vagrant shared folder, # so we need to package the problems elsewhere chmod(paths["data"], 0o750) problem_to_control(problem, paths["debian"]) postinst_dependencies(problem, problem_path, paths["debian"], paths["install_data"]) # Package the staging directory as a .deb def format_deb_file_name(problem): """ Prepare the file name of the deb package according to deb policy. Args: problem: the problem object Returns: An acceptable file name for the problem. """ raw_package_name = "{}.deb".format( sanitize_name(problem["unique_name"])) return raw_package_name deb_directory = out_path if out_path is not None else getcwd() deb_path = join(deb_directory, format_deb_file_name(problem)) shell = spur.LocalShell() result = shell.run( ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path]) if result.return_code != 0: logger.error("Error building problem deb for '%s'.", problem["name"]) logger.error(result.output) raise FatalException else: logger.debug("Problem '%s' packaged successfully.", problem["unique_name"]) # Remove the staging directory logger.debug("Cleaning up '%s' staging directory '%s'.", problem["name"], paths["staging"]) rmtree(paths["staging"]) return os.path.abspath(deb_path)
def deploy_problem( problem_directory, instances=None, test=False, deployment_directory=None, debug=False, restart_xinetd=True, containerize=False, ): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory debug: Output debug info restart_xinetd: Whether to restart xinetd upon deployment of this set of instances for a problem. Defaults True as used by tests, but typically is used with False from deploy_problems, which takes in multiple problems. containerize: Deployment is occuring in a container. This flag is used by containerize and external tools like cmgr that deploy challenges in an isolated environment. """ if instances is None: instances = [0] global current_problem, current_instance, port_map problem_object = get_problem(problem_directory) current_problem = problem_object["unique_name"] instance_list = [] need_restart_xinetd = False logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) problem_deb_location = ( os.path.join(DEB_ROOT, sanitize_name(problem_object["unique_name"])) + ".deb") try: subprocess.run( "DEBIAN_FRONTEND=noninteractive apt-get -y install " + f"--reinstall {problem_deb_location}", shell=True, check=True, stdout=subprocess.PIPE, ) except subprocess.CalledProcessError: logger.error("An error occurred while installing problem packages.") raise FatalException logger.debug("Reinstalled problem's deb package to fulfill dependencies") for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory( problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance( problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory, ) instance_list.append((instance_number, instance)) deployment_json_dir = join( DEPLOYED_ROOT, "{}-{}".format(sanitize_name(problem_object["name"]), get_pid_hash(problem_object, True)), ) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug( "...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory, ) deploy_files( problem_path, deployment_directory, instance["files"], problem.user, problem.__class__, ) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") # This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) if instance["service_file"] is not None: install_user_service(instance["service_file"], instance["socket_file"]) # set to true, this will signal restart xinetd need_restart_xinetd = True # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": None if instance["service_file"] is None else os.path.basename( instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename( instance["socket_file"]), "server": problem.server, "description": problem.description, "hints": problem.hints, "flag": problem.flag, "flag_sha1": problem.flag_sha1, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]], "docker_challenge": isinstance(problem, DockerChallenge) } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) # pass along image digest so webui can launch the correct image if isinstance(problem, DockerChallenge): deployment_info["instance_digest"] = problem.image_digest deployment_info["port_info"] = { n: p.dict() for n, p in problem.ports.items() } port_map[(current_problem, instance_number)] = deployment_info.get("port", None) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write( json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug( "The instance deployment information can be found at '%s'.", instance_info_path, ) # restart xinetd if restart_xinetd and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.info( "Problem instances %s were successfully deployed for '%s'.", instances, problem_object["unique_name"], ) return need_restart_xinetd
def deploy_problems(args): """ Main entrypoint for problem deployment """ global FLAG_FMT if args.flag_format: FLAG_FMT = args.flag_format logger.info(f"Deploying with custom flag format: {FLAG_FMT}") shared_config, local_config, port_map = deploy_init(args.containerize) need_restart_xinetd = False try: user = getpwnam(shared_config.default_user) except KeyError as e: logger.info( "default_user '%s' does not exist. Creating the user now.", shared_config.default_user, ) create_user(shared_config.default_user) problem_names = args.problem_names if len(problem_names) == 1 and problem_names[0] == "all": # Shortcut to deploy n instances of all problems problem_names = [ v["unique_name"] for k, v in get_all_problems().items() ] if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) if args.containerize and (len(problem_names) > 1 or len(instance_list) > 1): logger.error("can only deploy a single instance per container") return acquire_lock() try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue source_location = get_problem_root(problem_name, absolute=True) problem_object = get_problem(source_location) instances_to_deploy = copy(instance_list) is_static_flag = problem_object.get("static_flag", False) if is_static_flag is True: instances_to_deploy = [0] # Avoid redeploying already-deployed instances if not args.redeploy: already_deployed = set() for instance in get_all_problem_instances(problem_name): already_deployed.add(instance["instance_number"]) instances_to_deploy = list( set(instances_to_deploy) - already_deployed) if instances_to_deploy: deploy_problem(source_location, instances=instances_to_deploy, test=args.dry, debug=args.debug, restart_xinetd=False, containerize=args.containerize) else: logger.info( "No additional instances to deploy for '%s'.", problem_object["unique_name"], ) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart: execute(["service", "xinetd", "restart"], timeout=60) # Write out updated port map with open(PORT_MAP_PATH, "w") as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) release_lock()
def bundle_problems(args, config): """ Main entrypoint for generating problem bundles. """ bundle_path = args.bundle_path if os.path.isdir(args.bundle_path): bundle = get_bundle(args.bundle_path) bundle_path = join(args.bundle_path, "bundle.json") elif os.path.isfile(args.bundle_path): bundle = json.loads(open(args.bundle_path).read()) else: logger.error("No bundle could be found at '%s'", args.bundle_path) raise FatalException logger.debug("Starting to bundle: '%s'.", bundle["name"]) for problem_name in bundle["problems"]: installed_path = get_problem_root(problem_name, absolute=True) if not isdir(installed_path) or not get_problem(installed_path): logger.error("'%s' is not an installed problem.", problem_name) raise FatalException paths = {"working": getcwd() if args.out is None else args.out} if args.staging_dir: paths["staging"] = join(args.staging_dir, "__staging") else: paths["staging"] = join(paths["working"], "__staging") paths["debian"] = join(paths["staging"], "DEBIAN") paths["bundle_root"] = join(paths["staging"], get_bundle_root(bundle["name"])) [ makedirs(staging_path) for _, staging_path in paths.items() if not isdir(staging_path) ] # note that this chmod does not work correct if on a vagrant shared folder, # so we need to package the problems elsewhere chmod(dirname(paths["bundle_root"]), 0o750) bundle_to_control(bundle, paths["debian"]) copied_bundle_path = join(paths["bundle_root"], "bundle.json") copyfile(bundle_path, copied_bundle_path) def format_deb_file_name(bundle): """ Prepare the file name of the deb package according to deb policy. Args: bundle: the bundle object Returns: An acceptable file name for the bundle. """ raw_package_name = "{}-{}-bundle-{}.deb".format( sanitize_name(bundle.get("organization", "ctf")), sanitize_name(bundle["name"]), sanitize_name(bundle.get("version", "1.0-0"))) return raw_package_name deb_path = join(paths["working"], format_deb_file_name(bundle)) shell = spur.LocalShell() result = shell.run( ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path]) if result.return_code != 0: logger.error("Error building bundle deb for '%s'.", bundle["name"]) logger.error(result.output) else: logger.info("Bundle '%s' packaged successfully.", bundle["name"]) logger.debug("Clearning up '%s' staging directory '%s'.", bundle["name"], paths["staging"]) rmtree(paths["staging"])
def deploy_problem(problem_directory, instances=[0], test=False, deployment_directory=None, debug=False): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances or not. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory """ global current_problem, current_instance problem_object = get_problem(problem_directory) current_problem = problem_object["name"] instance_list = [] logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory( problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance(problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory) instance_list.append((instance_number, instance)) deployment_json_dir = join(DEPLOYED_ROOT, sanitize_name(problem_object["name"])) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug("...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory) deploy_files(problem_path, deployment_directory, instance["files"], problem.user, problem.__class__) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") #This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) install_user_service(instance["service_file"], instance["socket_file"]) # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) unique = problem_object["name"] + problem_object["author"] + str( instance_number) + deploy_config.deploy_secret deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": None if instance["service_file"] is None else os.path.basename( instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename( instance["socket_file"]), "server": problem.server, "description": problem.description, "flag": problem.flag, "flag_sha1": problem.flag_sha1, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]] } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write( json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug( "The instance deployment information can be found at '%s'.", instance_info_path) logger.info("Problem instances %s were successfully deployed for '%s'.", instances, problem_object["name"])
def deploy_problem(problem_directory, instances=[0], test=False, deployment_directory=None, debug=False): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances or not. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory """ global current_problem, current_instance problem_object = get_problem(problem_directory) current_problem = problem_object["name"] instance_list = [] logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory(problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance(problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory) instance_list.append((instance_number, instance)) deployment_json_dir = join(DEPLOYED_ROOT, sanitize_name(problem_object["name"])) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug("...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory) deploy_files(problem_path, deployment_directory, instance["files"], problem.user, problem.__class__) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") #This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) install_user_service(instance["service_file"], instance["socket_file"]) # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) unique = problem_object["name"] + problem_object["author"] + str(instance_number) + deploy_config.deploy_secret deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": os.path.basename(instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename(instance["socket_file"]), "server": problem.server, "description": problem.description, "flag": problem.flag, "flag_sha1": problem.flag_sha1, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]] } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write(json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug("The instance deployment information can be found at '%s'.", instance_info_path) logger.info("Problem instances %s were successfully deployed for '%s'.", instances, problem_object["name"])
def deploy_problems(args): """ Main entrypoint for problem deployment """ global shared_config, local_config, port_map shared_config = get_shared_config() local_config = get_local_config() need_restart_xinetd = False try: user = getpwnam(shared_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", shared_config.default_user) create_user(shared_config.default_user) problem_names = args.problem_names if len(problem_names) == 1 and problem_names[0] == 'all': # Shortcut to deploy n instances of all problems problem_names = [ v['unique_name'] for k, v in get_all_problems().items() ] # Attempt to load the port_map from file try: port_map_path = join(SHARED_ROOT, 'port_map.json') with open(port_map_path, 'r') as f: port_map = json.load(f) port_map = {literal_eval(k): v for k, v in port_map.items()} except FileNotFoundError: # If it does not exist, create it for path, problem in get_all_problems().items(): for instance in get_all_problem_instances(path): port_map[(problem["unique_name"], instance["instance_number"])] = instance.get( "port", None) with open(port_map_path, 'w') as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) except IOError: logger.error(f"Error loading port map from {port_map_path}") raise acquire_lock() if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue source_location = get_problem_root(problem_name, absolute=True) problem_object = get_problem(source_location) instances_to_deploy = copy(instance_list) is_static_flag = problem_object.get("static_flag", False) if is_static_flag is True: instances_to_deploy = [0] # Avoid redeploying already-deployed instances if not args.redeploy or is_static_flag: already_deployed = set() for instance in get_all_problem_instances(problem_name): already_deployed.add(instance["instance_number"]) instances_to_deploy = list( set(instances_to_deploy) - already_deployed) if instances_to_deploy: deploy_problem(source_location, instances=instances_to_deploy, test=args.dry, debug=args.debug, restart_xinetd=False) else: logger.info("No additional instances to deploy for '%s'.", problem_object["unique_name"]) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart: execute(["service", "xinetd", "restart"], timeout=60) # Write out updated port map with open(port_map_path, 'w') as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) release_lock()
def problem_builder(args, config): """ Main entrypoint for package building operations. """ #Grab a problem_path problem_base_path = args.problem_paths.pop() problem_paths = find_problems(problem_base_path) if len(problem_paths) == 0: logging.critical("No problems found under '%s'!", problem_base_path) raise FatalException for problem_path in problem_paths: problem = get_problem(problem_path) logger.debug("Starting to package: '%s'.", problem["name"]) paths = {} if args.staging_dir is None: paths["staging"] = join(problem_path, "__staging") else: paths["staging"] = join(args.staging_dir, "__staging") paths["debian"] = join(paths["staging"], "DEBIAN") paths["data"] = join(paths["staging"], get_problem_root(problem["name"])) paths["install_data"] = join(paths["data"], "__files") #Make all of the directories, order does not matter with makedirs [makedirs(staging_path) for _, staging_path in paths.items() if not isdir(staging_path)] args.ignore.append("__staging") full_copy(problem_path, paths["data"], ignore=args.ignore) # note that this chmod does not work correct if on a vagrant shared folder, # so we need to package the problems elsewhere chmod(paths["data"], 0o750) problem_to_control(problem, paths["debian"]) postinst_dependencies(problem, problem_path, paths["debian"], paths["install_data"]) deb_directory = args.out if args.out is not None else getcwd() def format_deb_file_name(problem): """ Prepare the file name of the deb package according to deb policy. Args: problem: the problem object Returns: An acceptable file name for the problem. """ raw_package_name = "{}-{}-{}.deb".format( sanitize_name(problem.get("organization", "ctf")), sanitize_name(problem.get("pkg_name", problem["name"])), sanitize_name(problem.get("version", "1.0-0")) ) return raw_package_name deb_path = join(deb_directory, format_deb_file_name(problem)) shell = spur.LocalShell() result = shell.run(["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path]) if result.return_code != 0: logger.error("Error building problem deb for '%s'.", problem["name"]) logger.error(result.output) else: logger.info("Problem '%s' packaged successfully.", problem["name"]) logger.debug("Clearning up '%s' staging directory '%s'.", problem["name"], paths["staging"]) rmtree(paths["staging"]) if len(args.problem_paths) >= 1: return problem_builder(args, config)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config need_restart_xinetd = False try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error( "Cannot specify deployment directory if deploying multiple problems or instances." ) raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warning( "Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root( bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if isdir(get_problem_root(problem_name, absolute=True)): # problem_name is already an installed package deploy_location = get_problem_root(problem_name, absolute=True) elif isdir(problem_name) and args.dry: # dry run - avoid installing package deploy_location = problem_name elif isdir(problem_name): # problem_name is a source dir - convert to .deb and install try: if not os.path.isdir(TEMP_DEB_DIR): os.mkdir(TEMP_DEB_DIR) generated_deb_path = package_problem(problem_name, out_path=TEMP_DEB_DIR) except FatalException: logger.error("An error occurred while packaging %s.", problem_name) raise try: # reinstall flag ensures package will be overwritten if version is the same, # maintaining previous 'dpkg -i' behavior subprocess.run('apt-get install --reinstall {}'.format(generated_deb_path), shell=True, check=True, stdout=subprocess.PIPE) except subprocess.CalledProcessError: logger.error("An error occurred while installing problem packages.") raise FatalException deploy_location = get_problem_root_hashed(get_problem(problem_name), absolute=True) else: logger.error("'%s' is neither an installed package, nor a valid problem directory", problem_name) raise FatalException # Avoid redeploying already-deployed instances if args.redeploy: todo_instance_list = instance_list else: todo_instance_list = list( set(instance_list) - set(already_deployed.get(problem_name, []))) need_restart_xinetd = deploy_problem( deploy_location, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug, restart_xinetd=False) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def problem_builder(args, config): """ Main entrypoint for package building operations. """ if not args.problem_paths: print( "usage: shell_manager package [-h] [-s STAGING_DIR] [-o OUT] [-i IGNORE] problem_path" ) print( "shell_manager bundle: error: the following arguments are required: problem_path" ) raise FatalException # Grab a problem_path problem_base_path = args.problem_paths.pop() problem_paths = find_problems(problem_base_path) if len(problem_paths) == 0: logging.critical("No problems found under '%s'!", problem_base_path) raise FatalException for problem_path in problem_paths: problem = get_problem(problem_path) logger.debug("Starting to package: '%s'.", problem["name"]) paths = {} if args.staging_dir is None: paths["staging"] = join(problem_path, "__staging") else: paths["staging"] = join(args.staging_dir, "__staging") paths["debian"] = join(paths["staging"], "DEBIAN") paths["data"] = join(paths["staging"], get_problem_root(problem["name"])) paths["install_data"] = join(paths["data"], "__files") # Make all of the directories, order does not matter with makedirs [ makedirs(staging_path) for _, staging_path in paths.items() if not isdir(staging_path) ] args.ignore.append("__staging") full_copy(problem_path, paths["data"], ignore=args.ignore) # note that this chmod does not work correct if on a vagrant shared folder, # so we need to package the problems elsewhere chmod(paths["data"], 0o750) problem_to_control(problem, paths["debian"]) postinst_dependencies(problem, problem_path, paths["debian"], paths["install_data"]) deb_directory = args.out if args.out is not None else getcwd() def format_deb_file_name(problem): """ Prepare the file name of the deb package according to deb policy. Args: problem: the problem object Returns: An acceptable file name for the problem. """ raw_package_name = "{}-{}-{}.deb".format( sanitize_name(problem.get("organization", "ctf")), sanitize_name(problem.get("pkg_name", problem["name"])), sanitize_name(problem.get("version", "1.0-0"))) return raw_package_name deb_path = join(deb_directory, format_deb_file_name(problem)) shell = spur.LocalShell() result = shell.run( ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path]) if result.return_code != 0: logger.error("Error building problem deb for '%s'.", problem["name"]) logger.error(result.output) else: logger.info("Problem '%s' packaged successfully.", problem["name"]) logger.debug("Clearning up '%s' staging directory '%s'.", problem["name"], paths["staging"]) rmtree(paths["staging"]) if len(args.problem_paths) >= 1: return problem_builder(args, config)
def undeploy_problems(args, config): """ Main entrypoint for problem undeployment """ problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[problem["name"]] = [] for instance in get_all_problem_instances(path): already_deployed[problem["name"]].append(instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error("Cannot undeploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: problem_root = get_problem_root(problem_name, absolute=True) if isdir(problem_root): problem = get_problem(problem_root) instances = list(filter(lambda x: x in already_deployed[problem["name"]], instance_list)) if len(instances) == 0: logger.warn("No deployed instances %s were found for problem '%s'.", instance_list, problem["name"]) else: logger.debug("Undeploying problem '%s'.", problem["name"]) remove_instances(problem_name, instance_list) logger.info("Problem instances %s were successfully removed from '%s'.", instances, problem["name"]) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def undeploy_problems(args, config): """ Main entrypoint for problem undeployment """ problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[problem["name"]] = [] for instance in get_all_problem_instances(path): already_deployed[problem["name"]].append( instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot undeploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: problem_root = get_problem_root(problem_name, absolute=True) if isdir(problem_root): problem = get_problem(problem_root) instances = list( filter(lambda x: x in already_deployed[problem["name"]], instance_list)) if len(instances) == 0: logger.warn( "No deployed instances %s were found for problem '%s'.", instance_list, problem["name"]) else: logger.debug("Undeploying problem '%s'.", problem["name"]) remove_instances(problem_name, instance_list) logger.info( "Problem instances %s were successfully removed from '%s'.", instances, problem["name"]) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def bundle_problems(args, config): """ Main entrypoint for generating problem bundles. """ bundle_path = args.bundle_path if os.path.isdir(args.bundle_path): bundle = get_bundle(args.bundle_path) bundle_path = join(args.bundle_path, "bundle.json") elif os.path.isfile(args.bundle_path): bundle = json.loads(open(args.bundle_path).read()) else: raise Exception("No bundle {}".format(args.bundle_path)) for problem_name in bundle["problems"]: installed_path = get_problem_root(problem_name, absolute=True) if not isdir(installed_path) or not get_problem(installed_path): raise Exception("'{}' is not an installed problem.".format(problem_name)) paths = {"working": getcwd() if args.out is None else args.out} if args.staging_dir: paths["staging"] = join(args.staging_dir, "__staging") else: paths["staging"] = join(paths["working"], "__staging") paths["debian"] = join(paths["staging"], "DEBIAN") paths["bundle_root"] = join(paths["staging"], get_bundle_root(bundle["name"])) for _, staging_path in path.items(): if not isdir(staging_path): makedirs(staging_path) # note that this chmod does not work correct if on a vagrant shared folder, # so we need to package the problems elsewhere chmod(dirname(paths["bundle_root"]), 0o750) bundle_to_control(bundle, paths["debian"]) copied_bundle_path = join(paths["bundle_root"], "bundle.json") copyfile(bundle_path, copied_bundle_path) def format_deb_file_name(bundle): """ Prepare the file name of the deb package according to deb policy. Args: bundle: the bundle object Returns: An acceptable file name for the bundle. """ raw_package_name = "{}-{}-bundle-{}.deb".format( bundle.get("organization", "ctf"), bundle["name"], bundle.get("version", "1.0-0") ) return sanitize_name(raw_package_name) deb_path = join(paths["working"], format_deb_file_name(bundle)) shell = spur.LocalShell() result = shell.run(["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path]) if result.return_code != 0: print("Error building bundle deb for '{}'".format(bundle["name"])) print(result.output) else: print("Bundle '{}' packaged successfully.".format(bundle["name"])) print("Cleaning up staging directory '{}'.".format(paths["staging"])) rmtree(paths["staging"])