def deploy_init(contain): global shared_config, local_config, port_map, containerize containerize = contain shared_config = get_shared_config() local_config = get_local_config() # Attempt to load the port_map from file try: with open(PORT_MAP_PATH, "r") as f: port_map = json.load(f) port_map = {literal_eval(k): v for k, v in port_map.items()} except FileNotFoundError: # If it does not exist, create it for path, problem in get_all_problems().items(): for instance in get_all_problem_instances(path): port_map[(problem["unique_name"], instance["instance_number"])] = instance.get( "port", None) with open(PORT_MAP_PATH, "w") as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) except IOError: logger.error(f"Error loading port map from {PORT_MAP_PATH}") raise return shared_config, local_config, port_map
def undeploy_problems(args): """ Main entrypoint for problem undeployment Does not remove the installed packages (apt-get remove [sanitized name with hash]). Does not remove the problem from the web server (delete it from the mongo db). """ problem_names = args.problem_names if len(problem_names) == 0: logger.error("No problem name(s) specified") raise FatalException if len(problem_names) == 1 and problem_names[0] == "all": # Shortcut to undeploy n instances of all problems problem_names = [ v["unique_name"] for k, v in get_all_problems().items() ] acquire_lock() if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue instances_to_remove = copy(instance_list) deployed_instances = set() for instance in get_all_problem_instances(problem_name): deployed_instances.add(instance["instance_number"]) instances_to_remove = list( set(instances_to_remove).intersection(deployed_instances)) if len(instances_to_remove) == 0: logger.warning( f"No deployed instances found for {problem_name}") continue remove_instances(problem_name, instances_to_remove) finally: execute(["service", "xinetd", "restart"], timeout=60) release_lock()
def undeploy_problems(args, config): """ Main entrypoint for problem undeployment """ problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[problem["name"]] = [] for instance in get_all_problem_instances(path): already_deployed[problem["name"]].append( instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot undeploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: problem_root = get_problem_root(problem_name, absolute=True) if isdir(problem_root): problem = get_problem(problem_root) instances = list( filter(lambda x: x in already_deployed[problem["name"]], instance_list)) if len(instances) == 0: logger.warn( "No deployed instances %s were found for problem '%s'.", instance_list, problem["name"]) else: logger.debug("Undeploying problem '%s'.", problem["name"]) remove_instances(problem_name, instance_list) logger.info( "Problem instances %s were successfully removed from '%s'.", instances, problem["name"]) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error( "Cannot specify deployment directory if deploying multiple problems or instances." ) raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warn("Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if args.redeploy: todo_instance_list = instance_list else: # remove already deployed instances todo_instance_list = list( set(instance_list) - set(already_deployed.get(problem_name, []))) if args.dry and isdir(problem_name): deploy_problem(problem_name, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) elif isdir(join(get_problem_root(problem_name, absolute=True))): deploy_problem(join( get_problem_root(problem_name, absolute=True)), instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) if not args.dry: os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config need_restart_xinetd = False try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error( "Cannot specify deployment directory if deploying multiple problems or instances." ) raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warning( "Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root( bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if isdir(get_problem_root(problem_name, absolute=True)): # problem_name is already an installed package deploy_location = get_problem_root(problem_name, absolute=True) elif isdir(problem_name) and args.dry: # dry run - avoid installing package deploy_location = problem_name elif isdir(problem_name): # problem_name is a source dir - convert to .deb and install try: if not os.path.isdir(TEMP_DEB_DIR): os.mkdir(TEMP_DEB_DIR) generated_deb_path = package_problem(problem_name, out_path=TEMP_DEB_DIR) except FatalException: logger.error("An error occurred while packaging %s.", problem_name) raise try: # reinstall flag ensures package will be overwritten if version is the same, # maintaining previous 'dpkg -i' behavior subprocess.run('apt-get install --reinstall {}'.format(generated_deb_path), shell=True, check=True, stdout=subprocess.PIPE) except subprocess.CalledProcessError: logger.error("An error occurred while installing problem packages.") raise FatalException deploy_location = get_problem_root_hashed(get_problem(problem_name), absolute=True) else: logger.error("'%s' is neither an installed package, nor a valid problem directory", problem_name) raise FatalException # Avoid redeploying already-deployed instances if args.redeploy: todo_instance_list = instance_list else: todo_instance_list = list( set(instance_list) - set(already_deployed.get(problem_name, []))) need_restart_xinetd = deploy_problem( deploy_location, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug, restart_xinetd=False) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart and need_restart_xinetd: execute(["service", "xinetd", "restart"], timeout=60) logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def undeploy_problems(args, config): """ Main entrypoint for problem undeployment """ problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[problem["name"]] = [] for instance in get_all_problem_instances(path): already_deployed[problem["name"]].append(instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error("Cannot undeploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: problem_root = get_problem_root(problem_name, absolute=True) if isdir(problem_root): problem = get_problem(problem_root) instances = list(filter(lambda x: x in already_deployed[problem["name"]], instance_list)) if len(instances) == 0: logger.warn("No deployed instances %s were found for problem '%s'.", instance_list, problem["name"]) else: logger.debug("Undeploying problem '%s'.", problem["name"]) remove_instances(problem_name, instance_list) logger.info("Problem instances %s were successfully removed from '%s'.", instances, problem["name"]) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map, inv_port_map deploy_config = config try: user = getpwnam(deploy_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user) create_user(deploy_config.default_user) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): logger.error("Cannot specify deployment directory if deploying multiple problems or instances.") raise FatalException if args.secret: deploy_config.deploy_secret = args.secret logger.warn("Overriding deploy_secret with user supplied secret '%s'.", args.secret) problem_names = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: logger.error("Could not find bundle at '%s'.", bundle_path) raise FatalException problem_names = bundle_problems # before deploying problems, load in port_map and already_deployed instances already_deployed = {} for path, problem in get_all_problems().items(): already_deployed[path] = [] for instance in get_all_problem_instances(path): already_deployed[path].append(instance["instance_number"]) if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"] lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): logger.error("Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'") raise FatalException logger.debug("Obtaining deployment lock file %s", lock_file) with open(lock_file, "w") as f: f.write("1") if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if args.redeploy: todo_instance_list = instance_list else: # remove already deployed instances todo_instance_list = list(set(instance_list) - set(already_deployed.get(problem_name, []))) if args.dry and isdir(problem_name): deploy_problem(problem_name, instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) elif isdir(join(get_problem_root(problem_name, absolute=True))): deploy_problem(join(get_problem_root(problem_name, absolute=True)), instances=todo_instance_list, test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug) else: logger.error("Problem '%s' doesn't appear to be installed.", problem_name) raise FatalException finally: logger.debug("Releasing lock file %s", lock_file) if not args.dry: os.remove(lock_file)
def deploy_problems(args, config): """ Main entrypoint for problem deployment """ global deploy_config, port_map deploy_config = config try: user = getpwnam(deploy_config.DEFAULT_USER) except KeyError as e: print("DEFAULT_USER {} does not exist. Creating now.".format(deploy_config.DEFAULT_USER)) create_user(deploy_config.DEFAULT_USER) if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1): raise Exception("Cannot specify deployment directory if deploying multiple problems or instances.") problems = args.problem_paths if args.bundle: bundle_problems = [] for bundle_path in args.problem_paths: if os.path.isfile(bundle_path): bundle = get_bundle(bundle_path) bundle_problems.extend(bundle["problems"]) else: bundle_sources_path = get_bundle_root(bundle_path, absolute=True) if os.path.isdir(bundle_sources_path): bundle = get_bundle(bundle_sources_path) bundle_problems.extend(bundle["problems"]) else: raise Exception("Could not get bundle.") problems = bundle_problems # before deploying problems, load in port_map for path, problem in get_all_problems().items(): for instance in get_all_problem_instances(path): if "port" in instance: port_map[instance["port"]] = (problem["name"], instance["instance_number"]) lock_file = join(HACKSPORTS_ROOT, "deploy.lock") if os.path.isfile(lock_file): raise Exception( "Cannot deploy while other deployment in progress. If you believe this is an error, " "run 'shell_manager clean'" ) if not args.dry: with open(lock_file, "w") as f: f.write("1") try: for path in problems: if args.dry and os.path.isdir(path): deploy_problem( path, instances=args.num_instances, test=args.dry, deployment_directory=args.deployment_directory ) elif os.path.isdir(os.path.join(get_problem_root(path, absolute=True))): deploy_problem( os.path.join(get_problem_root(path, absolute=True)), instances=args.num_instances, test=args.dry, deployment_directory=args.deployment_directory, ) else: raise Exception("Problem path {} cannot be found".format(path)) except Exception as e: traceback.print_exc() finally: if not args.dry: os.remove(lock_file)
def deploy_problems(args): """ Main entrypoint for problem deployment """ global FLAG_FMT if args.flag_format: FLAG_FMT = args.flag_format logger.info(f"Deploying with custom flag format: {FLAG_FMT}") shared_config, local_config, port_map = deploy_init(args.containerize) need_restart_xinetd = False try: user = getpwnam(shared_config.default_user) except KeyError as e: logger.info( "default_user '%s' does not exist. Creating the user now.", shared_config.default_user, ) create_user(shared_config.default_user) problem_names = args.problem_names if len(problem_names) == 1 and problem_names[0] == "all": # Shortcut to deploy n instances of all problems problem_names = [ v["unique_name"] for k, v in get_all_problems().items() ] if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) if args.containerize and (len(problem_names) > 1 or len(instance_list) > 1): logger.error("can only deploy a single instance per container") return acquire_lock() try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue source_location = get_problem_root(problem_name, absolute=True) problem_object = get_problem(source_location) instances_to_deploy = copy(instance_list) is_static_flag = problem_object.get("static_flag", False) if is_static_flag is True: instances_to_deploy = [0] # Avoid redeploying already-deployed instances if not args.redeploy: already_deployed = set() for instance in get_all_problem_instances(problem_name): already_deployed.add(instance["instance_number"]) instances_to_deploy = list( set(instances_to_deploy) - already_deployed) if instances_to_deploy: deploy_problem(source_location, instances=instances_to_deploy, test=args.dry, debug=args.debug, restart_xinetd=False, containerize=args.containerize) else: logger.info( "No additional instances to deploy for '%s'.", problem_object["unique_name"], ) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart: execute(["service", "xinetd", "restart"], timeout=60) # Write out updated port map with open(PORT_MAP_PATH, "w") as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) release_lock()
def deploy_problems(args): """ Main entrypoint for problem deployment """ global shared_config, local_config, port_map shared_config = get_shared_config() local_config = get_local_config() need_restart_xinetd = False try: user = getpwnam(shared_config.default_user) except KeyError as e: logger.info("default_user '%s' does not exist. Creating the user now.", shared_config.default_user) create_user(shared_config.default_user) problem_names = args.problem_names if len(problem_names) == 1 and problem_names[0] == 'all': # Shortcut to deploy n instances of all problems problem_names = [ v['unique_name'] for k, v in get_all_problems().items() ] # Attempt to load the port_map from file try: port_map_path = join(SHARED_ROOT, 'port_map.json') with open(port_map_path, 'r') as f: port_map = json.load(f) port_map = {literal_eval(k): v for k, v in port_map.items()} except FileNotFoundError: # If it does not exist, create it for path, problem in get_all_problems().items(): for instance in get_all_problem_instances(path): port_map[(problem["unique_name"], instance["instance_number"])] = instance.get( "port", None) with open(port_map_path, 'w') as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) except IOError: logger.error(f"Error loading port map from {port_map_path}") raise acquire_lock() if args.instances: instance_list = args.instances else: instance_list = list(range(0, args.num_instances)) try: for problem_name in problem_names: if not isdir(get_problem_root(problem_name, absolute=True)): logger.error(f"'{problem_name}' is not an installed problem") continue source_location = get_problem_root(problem_name, absolute=True) problem_object = get_problem(source_location) instances_to_deploy = copy(instance_list) is_static_flag = problem_object.get("static_flag", False) if is_static_flag is True: instances_to_deploy = [0] # Avoid redeploying already-deployed instances if not args.redeploy or is_static_flag: already_deployed = set() for instance in get_all_problem_instances(problem_name): already_deployed.add(instance["instance_number"]) instances_to_deploy = list( set(instances_to_deploy) - already_deployed) if instances_to_deploy: deploy_problem(source_location, instances=instances_to_deploy, test=args.dry, debug=args.debug, restart_xinetd=False) else: logger.info("No additional instances to deploy for '%s'.", problem_object["unique_name"]) finally: # Restart xinetd unless specified. Service must be manually restarted if not args.no_restart: execute(["service", "xinetd", "restart"], timeout=60) # Write out updated port map with open(port_map_path, 'w') as f: stringified_port_map = {repr(k): v for k, v in port_map.items()} json.dump(stringified_port_map, f) release_lock()