def get_instance_status(instance): status = { "instance_number": instance["instance_number"], "port": instance["port"] if "port" in instance else None, "flag": instance["flag"] } status["connection"] = False if "port" in instance: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(("localhost", instance["port"])) s.close() status["connection"] = True except ConnectionRefusedError as e: pass if instance["service"]: result = execute(["systemctl", "is-failed", instance["service"]], allow_error=True) else: result = execute(["systemctl", "is-failed"], allow_error=True) status["service"] = result.return_code == 1 if status["port"] is not None and not status["connection"]: status["service"] = False return status
def remove_instances(problem_name, instances_to_remove): """Remove all files and metadata for a given list of instances.""" deployed_instances = get_all_problem_instances(problem_name) deployment_json_dir = join(DEPLOYED_ROOT, problem_name) for instance in deployed_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instances_to_remove: logger.debug(f"Removing instance {instance_number} of {problem_name}") # Remove the xinetd service definition service = instance["service"] if service: logger.debug("...Removing xinetd service '%s'.", service) try: os.remove(join(XINETD_SERVICE_PATH, service)) except FileNotFoundError: logger.error("xinetd service definition missing, skipping") # Remove the deployed instance directory directory = instance["deployment_directory"] logger.debug("...Removing deployment directory '%s'.", directory) try: shutil.rmtree(directory) except FileNotFoundError: logger.error("deployment directory missing, skipping") # Kill any active problem processes if instance.get("port", None): port = instance["port"] logger.debug(f"...Killing any processes running on port {port}") try: execute(["fuser", "-k", "-TERM", "-n", "tcp", str(port)]) except RunProcessError as e: logger.error( "error killing processes, skipping - {}".format(str(e)) ) # Remove the problem user user = instance["user"] logger.debug("...Removing problem user '%s'.", user) try: execute(["userdel", user]) except RunProcessError as e: logger.error( "error removing problem user, skipping - {}".format(str(e)) ) # Remove the internal instance metadata deployment_json_path = join( deployment_json_dir, "{}.json".format(instance_number) ) logger.debug("...Removing instance metadata '%s'.", deployment_json_path) os.remove(deployment_json_path) logger.info( "Problem instances %s were successfully removed for '%s'.", instances_to_remove, problem_name, )
def get_instance_status(instance): status = { "instance_number": instance["instance_number"], "port": instance["port"] if "port" in instance else None, "flag": instance["flag"], } status["connection"] = False if "port" in instance: port = instance["port"] try: # XXX: assumes that the challenge is hosted locally s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(("localhost", port)) s.close() status["connection"] = True except ConnectionRefusedError as e: logger.debug( f"instance: {instance['instance_number']} has port: {port} but can't connect" ) pass if instance["service"]: result = execute(["systemctl", "is-failed", instance["service"]], allow_error=True) else: result = execute(["systemctl", "is-failed"], allow_error=True) status["service"] = result.return_code == 1 if status["port"] is not None and not status["connection"]: status["service"] = False return status
def remove_instances(path, instance_list): """ Remove all files under deployment directory and metdata for a given list of instances """ path = path.lower().replace(" ", "-") problem_instances = get_all_problem_instances(path) deployment_json_dir = join(DEPLOYED_ROOT, path) for instance in problem_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instance_list: logger.debug("Removing instance {} of '{}'.".format( instance_number, path)) directory = instance["deployment_directory"] user = instance["user"] service = instance["service"] socket = instance["socket"] deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing xinetd service '%s'.", service) os.remove(join(XINETD_SERVICE_PATH, service)) logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) os.remove(deployment_json_path) logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user]) if problem_instances: execute(["service", "xinetd", "restart"], timeout=60)
def remove_instances(problem_name, instances_to_remove): """Remove all files and metadata for a given list of instances.""" deployed_instances = get_all_problem_instances(problem_name) deployment_json_dir = join(DEPLOYED_ROOT, problem_name) for instance in deployed_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instances_to_remove: logger.debug( f"Removing instance {instance_number} of {problem_name}") service = instance["service"] if service: logger.debug("...Removing xinetd service '%s'.", service) os.remove(join(XINETD_SERVICE_PATH, service)) directory = instance["deployment_directory"] logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) user = instance["user"] logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user]) deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing instance metadata '%s'.", deployment_json_path) os.remove(deployment_json_path) logger.info("Problem instances %s were successfully removed for '%s'.", instances_to_remove, problem_name)
def remove_instances(path, instance_list): """ Remove all files under deployment directory and metdata for a given list of instances """ problem_instances = get_all_problem_instances(path) deployment_json_dir = join(DEPLOYED_ROOT, path) for instance in problem_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instance_list: logger.debug("Removing instance {} of '{}'.".format( instance_number, path)) directory = instance["deployment_directory"] user = instance["user"] service = instance["service"] socket = instance["socket"] deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing xinetd service '%s'.", service) os.remove(join(XINETD_SERVICE_PATH, service)) execute(["service", "xinetd", "restart"], timeout=60) logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) os.remove(deployment_json_path) logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user])
def make_no_aslr_wrapper(self, exec_path, output="no_aslr_wrapper"): """ Compiles a setgid wrapper to remove aslr. Returns the name of the file generated """ source_path = "no_aslr_wrapper.c" execute(["gcc", "-o", output, "-DBINARY_PATH=\"{}\"".format(exec_path), join(EXTRA_ROOT, source_path)]) self.files.append(ExecutableFile(output)) return output
def undeploy_problems(args): """ Main entrypoint for problem undeployment Does not remove the installed packages (apt-get remove [sanitized name with hash]). Does not remove the problem from the web server (delete it from the mongo db). """ problem_names = args.problem_names if len(problem_names) == 0: logger.error("No problem name(s) specified") raise FatalException if len(problem_names) == 1 and problem_names[0] == "all": # Shortcut to undeploy n instances of all problems problem_names = [ v["unique_name"] for k, v in get_all_problems().items() ] acquire_lock() if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue instances_to_remove = copy(instance_list) deployed_instances = set() for instance in get_all_problem_instances(problem_name): deployed_instances.add(instance["instance_number"]) instances_to_remove = list( set(instances_to_remove).intersection(deployed_instances)) if len(instances_to_remove) == 0: logger.warning( f"No deployed instances found for {problem_name}") continue remove_instances(problem_name, instances_to_remove) finally: execute(["service", "xinetd", "restart"], timeout=60) release_lock()
def compiler_setup(self): """ Setup function for compiled challenges """ if self.program_name is None: raise Exception("Must specify program_name for compiled challenge.") if self.makefile is not None: execute(["make", "-f", self.makefile]) elif len(self.compiler_sources) > 0: compile_cmd = [self.compiler] + self.compiler_flags + self.compiler_sources compile_cmd += ["-o", self.program_name] execute(compile_cmd) self.compiled_files = [ExecutableFile(self.program_name)]
def compiler_setup(self): """ Setup function for compiled challenges """ if self.program_name is None: raise Exception("Must specify program_name for compiled challenge.") if self.makefile is not None: execute(["make", "-f", self.makefile]) elif len(self.compiler_sources) > 0: compile_cmd = [self.compiler] + self.compiler_flags + self.compiler_sources compile_cmd += ["-o", self.program_name] execute(compile_cmd) if not isinstance(self, Remote): # only add the setgid executable if Remote is not handling it self.compiled_files = [ExecutableFile(self.program_name)]
def make_no_aslr_wrapper(self, exec_path, output="no_aslr_wrapper"): """ Compiles a setgid wrapper to remove aslr. Returns the name of the file generated """ src_path = os.path.join(os.path.dirname(__file__), "static", "no_aslr_wrapper.c") execute([ "gcc", "-o", output, '-DBINARY_PATH="{}"'.format(exec_path), src_path, ]) self.files.append(ExecutableFile(output)) return output
def remove_instance_state(instance): """ Removes state for an instance that is deployed to a host. Includes: service files, deployment directory, users """ # Remove the xinetd service definition service = instance["service"] if service: logger.debug("...Removing xinetd service '%s'.", service) try: os.remove(join(XINETD_SERVICE_PATH, service)) except FileNotFoundError: logger.error("xinetd service definition missing, skipping") # Remove the deployed instance directory directory = instance["deployment_directory"] logger.debug("...Removing deployment directory '%s'.", directory) try: shutil.rmtree(directory) except FileNotFoundError: logger.error("deployment directory missing, skipping") # Kill any active instance processes logger.debug(f"...Killing any instance processes") try: subprocess.check_output(f"pgrep {instance['user']} | xargs -r kill -15", shell=True) except CalledProcessError as e: logger.error( "error killing processes, skipping - {}".format(str(e)) ) # Remove the problem user user = instance["user"] logger.debug("...Removing problem user '%s'.", user) try: execute(["userdel", user]) except RunProcessError as e: logger.error( "error removing problem user, skipping - {}".format(str(e)) )
def remove_instances(path, instance_list): """ Remove all files under deployment directory and metdata for a given list of instances """ problem_instances = get_all_problem_instances(path) deployment_json_dir = join(DEPLOYED_ROOT, path) for instance in problem_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instance_list: logger.debug("Removing instance {} of '{}'.".format( instance_number, path)) directory = instance["deployment_directory"] user = instance["user"] service = instance["service"] socket = instance["socket"] deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing systemd service '%s'.", service) if socket != None: execute(["systemctl", "stop", socket], timeout=60) execute(["systemctl", "disable", socket], timeout=60) os.remove(join(SYSTEMD_SERVICE_PATH, socket)) try: execute(["systemctl", "stop", service], timeout=60) except RunProcessError as e: pass execute(["systemctl", "disable", service], timeout=60) os.remove(join(SYSTEMD_SERVICE_PATH, service)) logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) os.remove(deployment_json_path) logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user])
def install_user_service(service_file, socket_file): """ Installs the service file and socket file into the xinetd service directory, sets the service to start on boot, and starts the service now. Args: service_file: The path to the systemd service file to install socket_file: The path to the systemd socket file to install """ if service_file is None: return service_name = os.path.basename(service_file) logger.debug("...Installing user service '%s'.", service_name) # copy service file service_path = os.path.join(XINETD_SERVICE_PATH, service_name) shutil.copy2(service_file, service_path) execute(["service", "xinetd", "restart"], timeout=60)
def remove_instance_state(instance): """ Removes state for an instance that is deployed to a host. Includes: service files, deployment directory, users """ # Remove the xinetd service definition service = instance["service"] if service: logger.debug("...Removing xinetd service '%s'.", service) try: os.remove(join(XINETD_SERVICE_PATH, service)) except FileNotFoundError: logger.error("xinetd service definition missing, skipping") # Remove the deployed instance directory directory = instance["deployment_directory"] logger.debug("...Removing deployment directory '%s'.", directory) try: shutil.rmtree(directory) except FileNotFoundError: logger.error("deployment directory missing, skipping") # Kill any active problem processes if instance.get("port", None): port = instance["port"] logger.debug(f"...Killing any processes running on port {port}") try: execute(["fuser", "-k", "-TERM", "-n", "tcp", str(port)]) except RunProcessError as e: logger.error("error killing processes, skipping - {}".format( str(e))) # Remove the problem user user = instance["user"] logger.debug("...Removing problem user '%s'.", user) try: execute(["userdel", user]) except RunProcessError as e: logger.error("error removing problem user, skipping - {}".format( str(e)))
def remove_instances(path, instance_list): """ Remove all files under deployment directory and metdata for a given list of instances """ problem_instances = get_all_problem_instances(path) deployment_json_dir = join(DEPLOYED_ROOT, path) for instance in problem_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instance_list: logger.debug("Removing instance {} of '{}'.".format(instance_number, path)) directory = instance["deployment_directory"] user = instance["user"] service = instance["service"] socket = instance["socket"] deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing systemd service '%s'.", service) if socket != None: execute(["systemctl", "stop", socket], timeout=60) execute(["systemctl", "disable", socket], timeout=60) os.remove(join(SYSTEMD_SERVICE_PATH, socket)) try: execute(["systemctl", "stop", service], timeout=60) except RunProcessError as e: pass execute(["systemctl", "disable", service], timeout=60) os.remove(join(SYSTEMD_SERVICE_PATH, service)) logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) os.remove(deployment_json_path) logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user])
def get_instance_status(instance): status = { "iid": instance["iid"], "port": instance["port"] if "port" in instance else None, "flag": instance["flag"] } status["connection"] = False if "port" in instance: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(("localhost", instance["port"])) s.close() status["connection"] = True except ConnectionRefusedError as e: pass result = execute("sudo su -l {} bash -c 'systemctl --user is-failed {}'".format(instance["user"], instance["service"]), allow_error=True) status["service"] = result.return_code == 1 if status["port"] is not None and not status["connection"]: status["service"] = False return status
def install_user_service(service_file): """ Installs the service file into the systemd service directory, sets the service to start on boot, and starts the service now. Args: service_file: The path to the systemd service file to install """ service_name = os.path.basename(service_file) # copy service file service_path = os.path.join(SYSTEMD_SERVICE_PATH, service_name) shutil.copy2(service_file, service_path) execute(["systemctl", "daemon-reload"], timeout=60) execute(["systemctl", "enable", service_name], timeout=60) execute(["systemctl", "restart", service_name], timeout=60)
def install_user_service(home_directory, user, service_file): """ Installs the service file into the systemd user directory for the provided user, sets the service to start on boot, and starts the service now. Args: home_directory: The home directory for the user provided user: The user that will run the service service_file: The path to the systemd service file to install """ # make user service directory service_dir_path = os.path.join(home_directory, ".config", "systemd", "user") if not os.path.isdir(service_dir_path): os.makedirs(service_dir_path) # make target directory for enabled services default_target_path = os.path.join(service_dir_path, "default.target.wants") if not os.path.isdir(default_target_path): os.makedirs(default_target_path) userpw = getpwnam(user) os.chown(default_target_path, userpw.pw_uid, userpw.pw_gid) # copy service file service_path = os.path.join(service_dir_path, os.path.basename(service_file)) shutil.copy2(service_file, service_path) # enable automatic starting of user services execute("loginctl enable-linger {}".format(user)) execute("systemctl restart user@{}.service".format(userpw.pw_uid), timeout=60) # set environment variable so "su -l problem_user" will correctly populate it # this is due to a known issue with su -l with open(os.path.join(home_directory, ".profile"), "w") as f: f.write("export XDG_RUNTIME_DIR=/run/user/{}\n".format(userpw.pw_uid)) # enable and restart the service execute("su -l {} bash -c 'systemctl --user daemon-reload; systemctl --user enable {}; systemctl --user restart {}'".format( user, os.path.basename(service_file), os.path.basename(service_file)))
def deploy_problems(args): """ Main entrypoint for problem deployment """ global shared_config, local_config, port_map shared_config = get_shared_config() local_config = get_local_config() need_restart_xinetd = False try: user = getpwnam(shared_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", shared_config.default_user) create_user(shared_config.default_user) problem_names = args.problem_names if len(problem_names) == 1 and problem_names[0] == 'all': # Shortcut to deploy n instances of all problems problem_names = [ v['unique_name'] for k, v in get_all_problems().items() ] # Attempt to load the port_map from file try: port_map_path = join(SHARED_ROOT, 'port_map.json') with open(port_map_path, 'r') as f: port_map = json.load(f) port_map = {literal_eval(k): v for k, v in port_map.items()} except FileNotFoundError: # If it does not exist, create it for path, problem in get_all_problems().items(): for instance in get_all_problem_instances(path): port_map[(problem["unique_name"], instance["instance_number"])] = instance.get( "port", None) with open(port_map_path, 'w') as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) except IOError: logger.error(f"Error loading port map from {port_map_path}") raise acquire_lock() if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue source_location = get_problem_root(problem_name, absolute=True) problem_object = get_problem(source_location) instances_to_deploy = copy(instance_list) is_static_flag = problem_object.get("static_flag", False) if is_static_flag is True: instances_to_deploy = [0] # Avoid redeploying already-deployed instances if not args.redeploy or is_static_flag: already_deployed = set() for instance in get_all_problem_instances(problem_name): already_deployed.add(instance["instance_number"]) instances_to_deploy = list( set(instances_to_deploy) - already_deployed) if instances_to_deploy: deploy_problem(source_location, instances=instances_to_deploy, test=args.dry, debug=args.debug, restart_xinetd=False) else: logger.info("No additional instances to deploy for '%s'.", problem_object["unique_name"]) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart: execute(["service", "xinetd", "restart"], timeout=60) # Write out updated port map with open(port_map_path, 'w') as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) release_lock()
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config need_restart_xinetd = False try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error( "Cannot specify deployment directory if deploying multiple problems or instances." ) raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warning( "Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root( bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if args.redeploy: todo_instance_list = instance_list else: # remove already deployed instances todo_instance_list = list( set(instance_list) - set(already_deployed.get(problem_name, []))) if args.dry and isdir(problem_name): need_restart_xinetd = deploy_problem( problem_name, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug, restart_xinetd=False) elif isdir(join(get_problem_root(problem_name, absolute=True))): need_restart_xinetd = deploy_problem( join(get_problem_root(problem_name, absolute=True)), instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug, restart_xinetd=False) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.debug("Releasing lock file %s", lock_file) if not args.dry: os.remove(lock_file)
def deploy_problem(problem_directory, instances=[0], test=False, deployment_directory=None, debug=False): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances or not. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory """ global current_problem, current_instance problem_object = get_problem(problem_directory) current_problem = problem_object["name"] instance_list = [] logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory( problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance(problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory) instance_list.append((instance_number, instance)) deployment_json_dir = join(DEPLOYED_ROOT, sanitize_name(problem_object["name"])) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug("...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory) deploy_files(problem_path, deployment_directory, instance["files"], problem.user, problem.__class__) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") #This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) install_user_service(instance["service_file"], instance["socket_file"]) # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) unique = problem_object["name"] + problem_object["author"] + str( instance_number) + deploy_config.deploy_secret deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": None if instance["service_file"] is None else os.path.basename( instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename( instance["socket_file"]), "server": problem.server, "description": problem.description, "flag": problem.flag, "flag_sha1": problem.flag_sha1, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]] } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write( json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug( "The instance deployment information can be found at '%s'.", instance_info_path) logger.info("Problem instances %s were successfully deployed for '%s'.", instances, problem_object["name"])
def install_user_service(service_file, socket_file): """ Installs the service file and socket file into the systemd service directory, sets the service to start on boot, and starts the service now. Args: service_file: The path to the systemd service file to install socket_file: The path to the systemd socket file to install """ service_name = os.path.basename(service_file) logger.debug("...Installing user service '%s'.", service_name) # copy service file service_path = os.path.join(SYSTEMD_SERVICE_PATH, service_name) shutil.copy2(service_file, service_path) if socket_file != None: socket_name = os.path.basename(socket_file) # copy socket file socket_path = os.path.join(SYSTEMD_SERVICE_PATH, socket_name) shutil.copy2(socket_file, socket_path) execute(["systemctl", "enable", socket_name], timeout=60) # if this is a redeployment of a web challenge, it is necessary to stop all instances # of the running service before restarting the socket. try: execute(["systemctl", "stop", service_name], timeout=60) except RunProcessError as e: pass execute(["systemctl", "restart", socket_name], timeout=60) execute(["systemctl", "daemon-reload"], timeout=60) execute(["systemctl", "enable", service_name], timeout=60) if socket_file == None: execute(["systemctl", "restart", service_name], timeout=60)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config need_restart_xinetd = False try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error( "Cannot specify deployment directory if deploying multiple problems or instances." ) raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warning( "Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root( bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if isdir(get_problem_root(problem_name, absolute=True)): # problem_name is already an installed package deploy_location = get_problem_root(problem_name, absolute=True) elif isdir(problem_name) and args.dry: # dry run - avoid installing package deploy_location = problem_name elif isdir(problem_name): # problem_name is a source dir - convert to .deb and install try: if not os.path.isdir(TEMP_DEB_DIR): os.mkdir(TEMP_DEB_DIR) generated_deb_path = package_problem(problem_name, out_path=TEMP_DEB_DIR) except FatalException: logger.error("An error occurred while packaging %s.", problem_name) raise try: # reinstall flag ensures package will be overwritten if version is the same, # maintaining previous 'dpkg -i' behavior subprocess.run('apt-get install --reinstall {}'.format(generated_deb_path), shell=True, check=True, stdout=subprocess.PIPE) except subprocess.CalledProcessError: logger.error("An error occurred while installing problem packages.") raise FatalException deploy_location = get_problem_root_hashed(get_problem(problem_name), absolute=True) else: logger.error("'%s' is neither an installed package, nor a valid problem directory", problem_name) raise FatalException # Avoid redeploying already-deployed instances if args.redeploy: todo_instance_list = instance_list else: todo_instance_list = list( set(instance_list) - set(already_deployed.get(problem_name, []))) need_restart_xinetd = deploy_problem( deploy_location, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug, restart_xinetd=False) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def deploy_problem(problem_directory, instances=[0], test=False, deployment_directory=None, debug=False): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances or not. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory """ global current_problem, current_instance problem_object = get_problem(problem_directory) current_problem = problem_object["name"] instance_list = [] logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory(problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance(problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory) instance_list.append((instance_number, instance)) deployment_json_dir = join(DEPLOYED_ROOT, sanitize_name(problem_object["name"])) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug("...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory) deploy_files(problem_path, deployment_directory, instance["files"], problem.user, problem.__class__) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") #This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) install_user_service(instance["service_file"], instance["socket_file"]) # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) unique = problem_object["name"] + problem_object["author"] + str(instance_number) + deploy_config.deploy_secret deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": os.path.basename(instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename(instance["socket_file"]), "server": problem.server, "description": problem.description, "flag": problem.flag, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]] } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write(json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug("The instance deployment information can be found at '%s'.", instance_info_path) logger.info("Problem instances %s were successfully deployed for '%s'.", instances, problem_object["name"])
def deploy_problem(problem_directory, instances=1, test=False, deployment_directory=None): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The number of instances to deploy. Defaults to 1. test: Whether the instances are test instances or not. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory """ global current_problem, current_instance problem_object = get_problem(problem_directory) current_problem = problem_object["name"] instance_list = [] for instance_number in range(instances): current_instance = instance_number print('Generating instance {} of "{}".'.format(instance_number, problem_object["name"])) staging_directory = generate_staging_directory() if test and deployment_directory is None: deployment_directory = os.path.join(staging_directory, "deployed") instance = generate_instance( problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory, ) instance_list.append(instance) deployment_json_dir = os.path.join(DEPLOYED_ROOT, sanitize_name(problem_object["name"])) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in enumerate(instance_list): print('Deploying instance {} of "{}".'.format(instance_number, problem_object["name"])) problem_path = os.path.join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] deploy_files(problem_path, deployment_directory, instance["files"], problem.user) if test is True: print("Description: {}".format(problem.description)) print("Deployment Directory: {}".format(deployment_directory)) # This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass execute(["userdel", problem.user]) shutil.rmtree(instance["home_directory"]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) install_user_service(instance["service_file"]) # delete staging directory shutil.rmtree(instance["staging_directory"]) unique = problem_object["name"] + problem_object["author"] + str(instance_number) + deploy_config.DEPLOY_SECRET iid = md5(unique.encode("utf-8")).hexdigest() deployment_info = { "user": problem.user, "service": os.path.basename(instance["service_file"]), "server": problem.server, "description": problem.description, "flag": problem.flag, "iid": iid, "instance_number": instance_number, "files": [f.to_dict() for f in problem.files], } if isinstance(problem, Service): deployment_info["port"] = problem.port instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write(json.dumps(deployment_info, indent=4, separators=(", ", ": "))) print("The instance deployment information can be found at {}.".format(instance_info_path))
def deploy_problems(args): """ Main entrypoint for problem deployment """ global FLAG_FMT if args.flag_format: FLAG_FMT = args.flag_format logger.info(f"Deploying with custom flag format: {FLAG_FMT}") shared_config, local_config, port_map = deploy_init(args.containerize) need_restart_xinetd = False try: user = getpwnam(shared_config.default_user) except KeyError as e: logger.info( "default_user '%s' does not exist. Creating the user now.", shared_config.default_user, ) create_user(shared_config.default_user) problem_names = args.problem_names if len(problem_names) == 1 and problem_names[0] == "all": # Shortcut to deploy n instances of all problems problem_names = [ v["unique_name"] for k, v in get_all_problems().items() ] if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) if args.containerize and (len(problem_names) > 1 or len(instance_list) > 1): logger.error("can only deploy a single instance per container") return acquire_lock() try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue source_location = get_problem_root(problem_name, absolute=True) problem_object = get_problem(source_location) instances_to_deploy = copy(instance_list) is_static_flag = problem_object.get("static_flag", False) if is_static_flag is True: instances_to_deploy = [0] # Avoid redeploying already-deployed instances if not args.redeploy: already_deployed = set() for instance in get_all_problem_instances(problem_name): already_deployed.add(instance["instance_number"]) instances_to_deploy = list( set(instances_to_deploy) - already_deployed) if instances_to_deploy: deploy_problem(source_location, instances=instances_to_deploy, test=args.dry, debug=args.debug, restart_xinetd=False, containerize=args.containerize) else: logger.info( "No additional instances to deploy for '%s'.", problem_object["unique_name"], ) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart: execute(["service", "xinetd", "restart"], timeout=60) # Write out updated port map with open(PORT_MAP_PATH, "w") as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) release_lock()
def deploy_problem( problem_directory, instances=None, test=False, deployment_directory=None, debug=False, restart_xinetd=True, containerize=False, ): """ Deploys the problem specified in problem_directory. Args: problem_directory: The directory storing the problem instances: The list of instances to deploy. Defaults to [0] test: Whether the instances are test instances. Defaults to False. deployment_directory: If not None, the challenge will be deployed here instead of their home directory debug: Output debug info restart_xinetd: Whether to restart xinetd upon deployment of this set of instances for a problem. Defaults True as used by tests, but typically is used with False from deploy_problems, which takes in multiple problems. containerize: Deployment is occuring in a container. This flag is used by containerize and external tools like cmgr that deploy challenges in an isolated environment. """ if instances is None: instances = [0] global current_problem, current_instance, port_map problem_object = get_problem(problem_directory) current_problem = problem_object["unique_name"] instance_list = [] need_restart_xinetd = False logger.debug("Beginning to deploy problem '%s'.", problem_object["name"]) problem_deb_location = ( os.path.join(DEB_ROOT, sanitize_name(problem_object["unique_name"])) + ".deb") try: subprocess.run( "DEBIAN_FRONTEND=noninteractive apt-get -y install " + f"--reinstall {problem_deb_location}", shell=True, check=True, stdout=subprocess.PIPE, ) except subprocess.CalledProcessError: logger.error("An error occurred while installing problem packages.") raise FatalException logger.debug("Reinstalled problem's deb package to fulfill dependencies") for instance_number in instances: current_instance = instance_number staging_directory = generate_staging_directory( problem_name=problem_object["name"], instance_number=instance_number) if test and deployment_directory is None: deployment_directory = join(staging_directory, "deployed") instance = generate_instance( problem_object, problem_directory, instance_number, staging_directory, deployment_directory=deployment_directory, ) instance_list.append((instance_number, instance)) deployment_json_dir = join( DEPLOYED_ROOT, "{}-{}".format(sanitize_name(problem_object["name"]), get_pid_hash(problem_object, True)), ) if not os.path.isdir(deployment_json_dir): os.makedirs(deployment_json_dir) # ensure that the deployed files are not world-readable os.chmod(DEPLOYED_ROOT, 0o750) # all instances generated without issue. let's do something with them for instance_number, instance in instance_list: problem_path = join(instance["staging_directory"], PROBLEM_FILES_DIR) problem = instance["problem"] deployment_directory = instance["deployment_directory"] logger.debug( "...Copying problem files %s to deployment directory %s.", instance["files"], deployment_directory, ) deploy_files( problem_path, deployment_directory, instance["files"], problem.user, problem.__class__, ) if test: logger.info("Test instance %d information:", instance_number) logger.info("...Description: %s", problem.description) logger.info("...Deployment Directory: %s", deployment_directory) logger.debug("Cleaning up test instance side-effects.") logger.debug("...Killing user processes.") # This doesn't look great. try: execute("killall -u {}".format(problem.user)) sleep(0.1) except RunProcessError as e: pass logger.debug("...Removing test user '%s'.", problem.user) execute(["userdel", problem.user]) deployment_json_dir = instance["staging_directory"] else: # copy files to the web root logger.debug("...Copying web accessible files: %s", instance["web_accessible_files"]) for source, destination in instance["web_accessible_files"]: if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(source, destination) if instance["service_file"] is not None: install_user_service(instance["service_file"], instance["socket_file"]) # set to true, this will signal restart xinetd need_restart_xinetd = True # keep the staging directory if run with debug flag # this can still be cleaned up by running "shell_manager clean" if not debug: shutil.rmtree(instance["staging_directory"]) deployment_info = { "user": problem.user, "deployment_directory": deployment_directory, "service": None if instance["service_file"] is None else os.path.basename( instance["service_file"]), "socket": None if instance["socket_file"] is None else os.path.basename( instance["socket_file"]), "server": problem.server, "description": problem.description, "hints": problem.hints, "flag": problem.flag, "flag_sha1": problem.flag_sha1, "instance_number": instance_number, "should_symlink": not isinstance(problem, Service) and len(instance["files"]) > 0, "files": [f.to_dict() for f in instance["files"]], "docker_challenge": isinstance(problem, DockerChallenge) } if isinstance(problem, Service): deployment_info["port"] = problem.port logger.debug("...Port %d has been allocated.", problem.port) # pass along image digest so webui can launch the correct image if isinstance(problem, DockerChallenge): deployment_info["instance_digest"] = problem.image_digest deployment_info["port_info"] = { n: p.dict() for n, p in problem.ports.items() } port_map[(current_problem, instance_number)] = deployment_info.get("port", None) instance_info_path = os.path.join(deployment_json_dir, "{}.json".format(instance_number)) with open(instance_info_path, "w") as f: f.write( json.dumps(deployment_info, indent=4, separators=(", ", ": "))) logger.debug( "The instance deployment information can be found at '%s'.", instance_info_path, ) # restart xinetd if restart_xinetd and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.info( "Problem instances %s were successfully deployed for '%s'.", instances, problem_object["unique_name"], ) return need_restart_xinetd