def deploy_init(contain): global shared_config, local_config, port_map, containerize containerize = contain shared_config = get_shared_config() local_config = get_local_config() # Attempt to load the port_map from file try: with open(PORT_MAP_PATH, "r") as f: port_map = json.load(f) port_map = {literal_eval(k): v for k, v in port_map.items()} except FileNotFoundError: # If it does not exist, create it for path, problem in get_all_problems().items(): for instance in get_all_problem_instances(path): port_map[(problem["unique_name"], instance["instance_number"])] = instance.get( "port", None) with open(PORT_MAP_PATH, "w") as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) except IOError: logger.error(f"Error loading port map from {PORT_MAP_PATH}") raise return shared_config, local_config, port_map
def remove_instances(path, instance_list): """ Remove all files under deployment directory and metdata for a given list of instances """ path = path.lower().replace(" ", "-") problem_instances = get_all_problem_instances(path) deployment_json_dir = join(DEPLOYED_ROOT, path) for instance in problem_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instance_list: logger.debug("Removing instance {} of '{}'.".format( instance_number, path)) directory = instance["deployment_directory"] user = instance["user"] service = instance["service"] socket = instance["socket"] deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing xinetd service '%s'.", service) os.remove(join(XINETD_SERVICE_PATH, service)) logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) os.remove(deployment_json_path) logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user]) if problem_instances: execute(["service", "xinetd", "restart"], timeout=60)
def remove_instances(problem_name, instances_to_remove): """Remove all files and metadata for a given list of instances.""" deployed_instances = get_all_problem_instances(problem_name) deployment_json_dir = join(DEPLOYED_ROOT, problem_name) for instance in deployed_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instances_to_remove: logger.debug(f"Removing instance {instance_number} of {problem_name}") # Remove the xinetd service definition service = instance["service"] if service: logger.debug("...Removing xinetd service '%s'.", service) try: os.remove(join(XINETD_SERVICE_PATH, service)) except FileNotFoundError: logger.error("xinetd service definition missing, skipping") # Remove the deployed instance directory directory = instance["deployment_directory"] logger.debug("...Removing deployment directory '%s'.", directory) try: shutil.rmtree(directory) except FileNotFoundError: logger.error("deployment directory missing, skipping") # Kill any active problem processes if instance.get("port", None): port = instance["port"] logger.debug(f"...Killing any processes running on port {port}") try: execute(["fuser", "-k", "-TERM", "-n", "tcp", str(port)]) except RunProcessError as e: logger.error( "error killing processes, skipping - {}".format(str(e)) ) # Remove the problem user user = instance["user"] logger.debug("...Removing problem user '%s'.", user) try: execute(["userdel", user]) except RunProcessError as e: logger.error( "error removing problem user, skipping - {}".format(str(e)) ) # Remove the internal instance metadata deployment_json_path = join( deployment_json_dir, "{}.json".format(instance_number) ) logger.debug("...Removing instance metadata '%s'.", deployment_json_path) os.remove(deployment_json_path) logger.info( "Problem instances %s were successfully removed for '%s'.", instances_to_remove, problem_name, )
def remove_instances(path, instance_list): """ Remove all files under deployment directory and metdata for a given list of instances """ problem_instances = get_all_problem_instances(path) deployment_json_dir = join(DEPLOYED_ROOT, path) for instance in problem_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instance_list: logger.debug("Removing instance {} of '{}'.".format( instance_number, path)) directory = instance["deployment_directory"] user = instance["user"] service = instance["service"] socket = instance["socket"] deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing xinetd service '%s'.", service) os.remove(join(XINETD_SERVICE_PATH, service)) execute(["service", "xinetd", "restart"], timeout=60) logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) os.remove(deployment_json_path) logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user])
def remove_instances(problem_name, instances_to_remove): """Remove all files and metadata for a given list of instances.""" deployed_instances = get_all_problem_instances(problem_name) deployment_json_dir = join(DEPLOYED_ROOT, problem_name) for instance in deployed_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instances_to_remove: logger.debug( f"Removing instance {instance_number} of {problem_name}") containerize = 'containerize' in instance and instance[ 'containerize'] if not containerize: remove_instance_state(instance) # Remove the internal instance metadata deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing instance metadata '%s'.", deployment_json_path) os.remove(deployment_json_path) logger.info( "Problem instances %s were successfully removed for '%s'.", instances_to_remove, problem_name, )
def remove_instances(problem_name, instances_to_remove): """Remove all files and metadata for a given list of instances.""" deployed_instances = get_all_problem_instances(problem_name) deployment_json_dir = join(DEPLOYED_ROOT, problem_name) for instance in deployed_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instances_to_remove: logger.debug( f"Removing instance {instance_number} of {problem_name}") service = instance["service"] if service: logger.debug("...Removing xinetd service '%s'.", service) os.remove(join(XINETD_SERVICE_PATH, service)) directory = instance["deployment_directory"] logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) user = instance["user"] logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user]) deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing instance metadata '%s'.", deployment_json_path) os.remove(deployment_json_path) logger.info("Problem instances %s were successfully removed for '%s'.", instances_to_remove, problem_name)
def containerize_problems(args): """ Main entrypoint for problem containerization """ # determine what we are deploying problem_names = args.problem_names if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) logger.debug(f"Containerizing: {problem_names} {instance_list}") # build base images required ensure_base_images() deploy_init(contain=True) flag_fmt = args.flag_format if args.flag_format else FLAG_FMT for name in problem_names: if not os.path.isdir(get_problem_root(name, absolute=True)): logger.error(f"'{name}' is not an installed problem") continue logger.debug(f"Problem : {name}") src = get_problem_root(name, absolute=True) metadata = get_problem(src) cur_instances = [ i["instance_number"] for i in get_all_problem_instances(name) ] logger.debug(f"Existing : {cur_instances}") origwd = os.getcwd() for instance in instance_list: if instance in cur_instances: logger.warn(f"Instance already deployed: {instance}") continue logger.debug(f"Instance : {instance}") # copy source files to a staging directory and switch to it staging = generate_staging_directory(problem_name=name, instance_number=instance) dst = os.path.join(staging, "_containerize") shutil.copytree(src, dst) os.chdir(dst) # build the image containerize(metadata, instance, flag_fmt) # return to the orginal directory os.chdir(origwd)
def undeploy_problems(args): """ Main entrypoint for problem undeployment Does not remove the installed packages (apt-get remove [sanitized name with hash]). Does not remove the problem from the web server (delete it from the mongo db). """ problem_names = args.problem_names if len(problem_names) == 0: logger.error("No problem name(s) specified") raise FatalException if len(problem_names) == 1 and problem_names[0] == "all": # Shortcut to undeploy n instances of all problems problem_names = [ v["unique_name"] for k, v in get_all_problems().items() ] acquire_lock() if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue instances_to_remove = copy(instance_list) deployed_instances = set() for instance in get_all_problem_instances(problem_name): deployed_instances.add(instance["instance_number"]) instances_to_remove = list( set(instances_to_remove).intersection(deployed_instances)) if len(instances_to_remove) == 0: logger.warning( f"No deployed instances found for {problem_name}") continue remove_instances(problem_name, instances_to_remove) finally: execute(["service", "xinetd", "restart"], timeout=60) release_lock()
def remove_instances(path, instance_list): """ Remove all files under deployment directory and metdata for a given list of instances """ problem_instances = get_all_problem_instances(path) deployment_json_dir = join(DEPLOYED_ROOT, path) for instance in problem_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instance_list: logger.debug("Removing instance {} of '{}'.".format( instance_number, path)) directory = instance["deployment_directory"] user = instance["user"] service = instance["service"] socket = instance["socket"] deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing systemd service '%s'.", service) if socket != None: execute(["systemctl", "stop", socket], timeout=60) execute(["systemctl", "disable", socket], timeout=60) os.remove(join(SYSTEMD_SERVICE_PATH, socket)) try: execute(["systemctl", "stop", service], timeout=60) except RunProcessError as e: pass execute(["systemctl", "disable", service], timeout=60) os.remove(join(SYSTEMD_SERVICE_PATH, service)) logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) os.remove(deployment_json_path) logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user])
def remove_instances(path, instance_list): """ Remove all files under deployment directory and metdata for a given list of instances """ problem_instances = get_all_problem_instances(path) deployment_json_dir = join(DEPLOYED_ROOT, path) for instance in problem_instances: instance_number = instance["instance_number"] if instance["instance_number"] in instance_list: logger.debug("Removing instance {} of '{}'.".format(instance_number, path)) directory = instance["deployment_directory"] user = instance["user"] service = instance["service"] socket = instance["socket"] deployment_json_path = join(deployment_json_dir, "{}.json".format(instance_number)) logger.debug("...Removing systemd service '%s'.", service) if socket != None: execute(["systemctl", "stop", socket], timeout=60) execute(["systemctl", "disable", socket], timeout=60) os.remove(join(SYSTEMD_SERVICE_PATH, socket)) try: execute(["systemctl", "stop", service], timeout=60) except RunProcessError as e: pass execute(["systemctl", "disable", service], timeout=60) os.remove(join(SYSTEMD_SERVICE_PATH, service)) logger.debug("...Removing deployment directory '%s'.", directory) shutil.rmtree(directory) os.remove(deployment_json_path) logger.debug("...Removing problem user '%s'.", user) execute(["userdel", user])
def undeploy_problems(args, config): """ Main entrypoint for problem undeployment """ problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[problem["name"]] = [] for instance in get_all_problem_instances(path): already_deployed[problem["name"]].append( instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot undeploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: problem_root = get_problem_root(problem_name, absolute=True) if isdir(problem_root): problem = get_problem(problem_root) instances = list( filter(lambda x: x in already_deployed[problem["name"]], instance_list)) if len(instances) == 0: logger.warn( "No deployed instances %s were found for problem '%s'.", instance_list, problem["name"]) else: logger.debug("Undeploying problem '%s'.", problem["name"]) remove_instances(problem_name, instance_list) logger.info( "Problem instances %s were successfully removed from '%s'.", instances, problem["name"]) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error( "Cannot specify deployment directory if deploying multiple problems or instances." ) raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warn("Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if args.redeploy: todo_instance_list = instance_list else: # remove already deployed instances todo_instance_list = list( set(instance_list) - set(already_deployed.get(problem_name, []))) if args.dry and isdir(problem_name): deploy_problem(problem_name, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) elif isdir(join(get_problem_root(problem_name, absolute=True))): deploy_problem(join( get_problem_root(problem_name, absolute=True)), instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) if not args.dry: os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config need_restart_xinetd = False try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error( "Cannot specify deployment directory if deploying multiple problems or instances." ) raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warning( "Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root( bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if isdir(get_problem_root(problem_name, absolute=True)): # problem_name is already an installed package deploy_location = get_problem_root(problem_name, absolute=True) elif isdir(problem_name) and args.dry: # dry run - avoid installing package deploy_location = problem_name elif isdir(problem_name): # problem_name is a source dir - convert to .deb and install try: if not os.path.isdir(TEMP_DEB_DIR): os.mkdir(TEMP_DEB_DIR) generated_deb_path = package_problem(problem_name, out_path=TEMP_DEB_DIR) except FatalException: logger.error("An error occurred while packaging %s.", problem_name) raise try: # reinstall flag ensures package will be overwritten if version is the same, # maintaining previous 'dpkg -i' behavior subprocess.run('apt-get install --reinstall {}'.format(generated_deb_path), shell=True, check=True, stdout=subprocess.PIPE) except subprocess.CalledProcessError: logger.error("An error occurred while installing problem packages.") raise FatalException deploy_location = get_problem_root_hashed(get_problem(problem_name), absolute=True) else: logger.error("'%s' is neither an installed package, nor a valid problem directory", problem_name) raise FatalException # Avoid redeploying already-deployed instances if args.redeploy: todo_instance_list = instance_list else: todo_instance_list = list( set(instance_list) - set(already_deployed.get(problem_name, []))) need_restart_xinetd = deploy_problem( deploy_location, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug, restart_xinetd=False) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def undeploy_problems(args, config): """ Main entrypoint for problem undeployment """ problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[problem["name"]] = [] for instance in get_all_problem_instances(path): already_deployed[problem["name"]].append(instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error("Cannot undeploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: problem_root = get_problem_root(problem_name, absolute=True) if isdir(problem_root): problem = get_problem(problem_root) instances = list(filter(lambda x: x in already_deployed[problem["name"]], instance_list)) if len(instances) == 0: logger.warn("No deployed instances %s were found for problem '%s'.", instance_list, problem["name"]) else: logger.debug("Undeploying problem '%s'.", problem["name"]) remove_instances(problem_name, instance_list) logger.info("Problem instances %s were successfully removed from '%s'.", instances, problem["name"]) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error("Cannot specify deployment directory if deploying multiple problems or instances.") raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warn("Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error("Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if args.redeploy: todo_instance_list = instance_list else: # remove already deployed instances todo_instance_list = list(set(instance_list) - set(already_deployed.get(problem_name, []))) if args.dry and isdir(problem_name): deploy_problem(problem_name, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) elif isdir(join(get_problem_root(problem_name, absolute=True))): deploy_problem(join(get_problem_root(problem_name, absolute=True)), instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) if not args.dry: os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map deploy_config = config try: user = getpwnam(deploy_config.DEFAULT_USER) except KeyError as e: print("DEFAULT_USER {} does not exist. Creating now.".format(deploy_config.DEFAULT_USER)) create_user(deploy_config.DEFAULT_USER) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): raise Exception("Cannot specify deployment directory if deploying multiple problems or instances.") problems = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: raise Exception("Could not get bundle.") problems = bundle_problems # before deploying problems, load in port_map for path, problem in get_all_problems().items(): for instance in get_all_problem_instances(path): if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): raise Exception( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'" ) if not args.dry: with open(lock_file, "w") as f: f.write("1") try: for path in problems: if args.dry and os.path.isdir(path): deploy_problem( path, instances=args.num_instances, test=args.dry, deployment_directory=args.deployment_directory ) elif os.path.isdir(os.path.join(get_problem_root(path, absolute=True))): deploy_problem( os.path.join(get_problem_root(path, absolute=True)), instances=args.num_instances, test=args.dry, deployment_directory=args.deployment_directory, ) else: raise Exception("Problem path {} cannot be found".format(path)) except Exception as e: traceback.print_exc() finally: if not args.dry: os.remove(lock_file)
def deploy_problems(args): """ Main entrypoint for problem deployment """ global FLAG_FMT if args.flag_format: FLAG_FMT = args.flag_format logger.info(f"Deploying with custom flag format: {FLAG_FMT}") shared_config, local_config, port_map = deploy_init(args.containerize) need_restart_xinetd = False try: user = getpwnam(shared_config.default_user) except KeyError as e: logger.info( "default_user '%s' does not exist. Creating the user now.", shared_config.default_user, ) create_user(shared_config.default_user) problem_names = args.problem_names if len(problem_names) == 1 and problem_names[0] == "all": # Shortcut to deploy n instances of all problems problem_names = [ v["unique_name"] for k, v in get_all_problems().items() ] if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) if args.containerize and (len(problem_names) > 1 or len(instance_list) > 1): logger.error("can only deploy a single instance per container") return acquire_lock() try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue source_location = get_problem_root(problem_name, absolute=True) problem_object = get_problem(source_location) instances_to_deploy = copy(instance_list) is_static_flag = problem_object.get("static_flag", False) if is_static_flag is True: instances_to_deploy = [0] # Avoid redeploying already-deployed instances if not args.redeploy: already_deployed = set() for instance in get_all_problem_instances(problem_name): already_deployed.add(instance["instance_number"]) instances_to_deploy = list( set(instances_to_deploy) - already_deployed) if instances_to_deploy: deploy_problem(source_location, instances=instances_to_deploy, test=args.dry, debug=args.debug, restart_xinetd=False, containerize=args.containerize) else: logger.info( "No additional instances to deploy for '%s'.", problem_object["unique_name"], ) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart: execute(["service", "xinetd", "restart"], timeout=60) # Write out updated port map with open(PORT_MAP_PATH, "w") as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) release_lock()
def deploy_problems(args): """ Main entrypoint for problem deployment """ global shared_config, local_config, port_map shared_config = get_shared_config() local_config = get_local_config() need_restart_xinetd = False try: user = getpwnam(shared_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", shared_config.default_user) create_user(shared_config.default_user) problem_names = args.problem_names if len(problem_names) == 1 and problem_names[0] == 'all': # Shortcut to deploy n instances of all problems problem_names = [ v['unique_name'] for k, v in get_all_problems().items() ] # Attempt to load the port_map from file try: port_map_path = join(SHARED_ROOT, 'port_map.json') with open(port_map_path, 'r') as f: port_map = json.load(f) port_map = {literal_eval(k): v for k, v in port_map.items()} except FileNotFoundError: # If it does not exist, create it for path, problem in get_all_problems().items(): for instance in get_all_problem_instances(path): port_map[(problem["unique_name"], instance["instance_number"])] = instance.get( "port", None) with open(port_map_path, 'w') as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) except IOError: logger.error(f"Error loading port map from {port_map_path}") raise acquire_lock() if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue source_location = get_problem_root(problem_name, absolute=True) problem_object = get_problem(source_location) instances_to_deploy = copy(instance_list) is_static_flag = problem_object.get("static_flag", False) if is_static_flag is True: instances_to_deploy = [0] # Avoid redeploying already-deployed instances if not args.redeploy or is_static_flag: already_deployed = set() for instance in get_all_problem_instances(problem_name): already_deployed.add(instance["instance_number"]) instances_to_deploy = list( set(instances_to_deploy) - already_deployed) if instances_to_deploy: deploy_problem(source_location, instances=instances_to_deploy, test=args.dry, debug=args.debug, restart_xinetd=False) else: logger.info("No additional instances to deploy for '%s'.", problem_object["unique_name"]) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart: execute(["service", "xinetd", "restart"], timeout=60) # Write out updated port map with open(port_map_path, 'w') as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) release_lock()