Exemple #1
0
def containerize_problems(args):
    """ Main entrypoint for problem containerization """

    # determine what we are deploying
    problem_names = args.problem_names
    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    logger.debug(f"Containerizing: {problem_names} {instance_list}")

    # build base images required
    ensure_base_images()

    deploy_init(contain=True)
    flag_fmt = args.flag_format if args.flag_format else FLAG_FMT

    for name in problem_names:
        if not os.path.isdir(get_problem_root(name, absolute=True)):
            logger.error(f"'{name}' is not an installed problem")
            continue

        logger.debug(f"Problem  : {name}")
        src = get_problem_root(name, absolute=True)
        metadata = get_problem(src)

        cur_instances = [
            i["instance_number"] for i in get_all_problem_instances(name)
        ]
        logger.debug(f"Existing : {cur_instances}")

        origwd = os.getcwd()
        for instance in instance_list:
            if instance in cur_instances:
                logger.warn(f"Instance already deployed: {instance}")
                continue

            logger.debug(f"Instance : {instance}")

            # copy source files to a staging directory and switch to it
            staging = generate_staging_directory(problem_name=name,
                                                 instance_number=instance)
            dst = os.path.join(staging, "_containerize")
            shutil.copytree(src, dst)
            os.chdir(dst)

            # build the image
            containerize(metadata, instance, flag_fmt)

        # return to the orginal directory
        os.chdir(origwd)
Exemple #2
0
def get_all_problems():
    """ Returns a dictionary of name-hash:object mappings """

    problems = {}
    if os.path.isdir(PROBLEM_ROOT):
        for name in os.listdir(PROBLEM_ROOT):
            try:
                problem = get_problem(get_problem_root(name, absolute=True))
                problems[name] = problem
            except FileNotFoundError as e:
                pass
    return problems
Exemple #3
0
def get_all_problems():
    """ Returns a dictionary of name:object mappings """

    problems = {}
    if os.path.isdir(PROBLEM_ROOT):
        for name in os.listdir(PROBLEM_ROOT):
            try:
                problem = get_problem(get_problem_root(name, absolute=True))
                problems[name] = problem
            except FileNotFoundError as e:
                pass
    return problems
Exemple #4
0
def undeploy_problems(args):
    """
    Main entrypoint for problem undeployment

    Does not remove the installed packages (apt-get remove [sanitized name with hash]).
    Does not remove the problem from the web server (delete it from the mongo db).
    """

    problem_names = args.problem_names

    if len(problem_names) == 0:
        logger.error("No problem name(s) specified")
        raise FatalException

    if len(problem_names) == 1 and problem_names[0] == "all":
        # Shortcut to undeploy n instances of all problems
        problem_names = [
            v["unique_name"] for k, v in get_all_problems().items()
        ]

    acquire_lock()

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            if not isdir(get_problem_root(problem_name, absolute=True)):
                logger.error(f"'{problem_name}' is not an installed problem")
                continue

            instances_to_remove = copy(instance_list)
            deployed_instances = set()
            for instance in get_all_problem_instances(problem_name):
                deployed_instances.add(instance["instance_number"])
            instances_to_remove = list(
                set(instances_to_remove).intersection(deployed_instances))

            if len(instances_to_remove) == 0:
                logger.warning(
                    f"No deployed instances found for {problem_name}")
                continue

            remove_instances(problem_name, instances_to_remove)
    finally:
        execute(["service", "xinetd", "restart"], timeout=60)
        release_lock()
Exemple #5
0
def postinst_dependencies(problem, problem_path, debian_path, install_path):
    """
    Handles the generation of the postinst script for additional dependencies.

    Args:
        problem: the problem object.
        problem_path: the problem directory.
        debian_path: the deb's DEBIAN directory.
    """

    postinst_template = ["#!/bin/bash"]

    requirements_path = join(problem_path, "requirements.txt")
    dependencies_path = join(problem_path, "install_dependencies")

    staging_requirements_path = join(install_path, "requirements.txt")

    deployed_requirements_path = join(
        get_problem_root(problem["name"], absolute=True), "__files",
        "requirements.txt")
    deployed_setup_path = join(
        get_problem_root(problem["name"], absolute=True), "__files",
        "install_dependencies")

    listed_requirements = problem.get("pip_requirements", [])

    pip_python_version = problem.get("pip_python_version")
    valid_pip_python_versions = ["2", "3", "3.6"]
    if pip_python_version not in valid_pip_python_versions:
        pip_python_version = "3"

    # Write or copy the requirements to the staging directory.
    if len(listed_requirements) > 0:
        if isfile(requirements_path):
            logger.error(
                "Problem '%s' has both a pip_requirements field and requirements.txt.",
                problem["name"])
            raise FatalException

        with open(staging_requirements_path, "w") as f:
            f.writelines("\n".join(listed_requirements))

    elif isfile(requirements_path):
        copy(requirements_path, staging_requirements_path)

    if logger.getEffectiveLevel() <= logging.DEBUG and isfile(
            staging_requirements_path):
        with open(staging_requirements_path, "r") as f:
            logger.debug("python requirements:\n%s", f.read())

    if isfile(staging_requirements_path):
        postinst_template.append("python{ver} -m pip install -r {path}".format(
            ver=pip_python_version, path=deployed_requirements_path))

    if isfile(dependencies_path):
        copy(dependencies_path, join(install_path, "install_dependencies"))

        # Ensure it is executable
        chmod(join(install_path, "install_dependencies"), 0o500)

        postinst_template.append("bash -c '{}'".format(deployed_setup_path))

    chmod(debian_path, 0o775)

    postinst_path = join(debian_path, "postinst")
    with open(postinst_path, "w") as f:
        chmod(postinst_path, 0o775)
        contents = "\n".join(postinst_template)
        f.write(contents)

        # post_template always has a she-bang.
        if len(postinst_template) > 1:
            logger.debug("post install:\n%s", contents)
Exemple #6
0
def problem_builder(args, config):
    """
    Main entrypoint for package building operations.
    """

    if not args.problem_paths:
        print(
            "usage: shell_manager package [-h] [-s STAGING_DIR] [-o OUT] [-i IGNORE] problem_path"
        )
        print(
            "shell_manager bundle: error: the following arguments are required: problem_path"
        )
        raise FatalException

    # Grab a problem_path
    problem_base_path = args.problem_paths.pop()

    problem_paths = find_problems(problem_base_path)

    if len(problem_paths) == 0:
        logging.critical("No problems found under '%s'!", problem_base_path)
        raise FatalException

    for problem_path in problem_paths:
        problem = get_problem(problem_path)

        logger.debug("Starting to package: '%s'.", problem["name"])

        paths = {}
        if args.staging_dir is None:
            paths["staging"] = join(problem_path, "__staging")
        else:
            paths["staging"] = join(args.staging_dir, "__staging")

        paths["debian"] = join(paths["staging"], "DEBIAN")
        paths["data"] = join(paths["staging"],
                             get_problem_root(problem["name"]))
        paths["install_data"] = join(paths["data"], "__files")

        # Make all of the directories, order does not matter with makedirs
        [
            makedirs(staging_path) for _, staging_path in paths.items()
            if not isdir(staging_path)
        ]

        args.ignore.append("__staging")

        full_copy(problem_path, paths["data"], ignore=args.ignore)

        # note that this chmod does not work correct if on a vagrant shared folder,
        # so we need to package the problems elsewhere
        chmod(paths["data"], 0o750)

        problem_to_control(problem, paths["debian"])

        postinst_dependencies(problem, problem_path, paths["debian"],
                              paths["install_data"])

        deb_directory = args.out if args.out is not None else getcwd()

        def format_deb_file_name(problem):
            """
            Prepare the file name of the deb package according to deb policy.

            Args:
                problem: the problem object

            Returns:
            An acceptable file name for the problem.
            """

            raw_package_name = "{}-{}-{}.deb".format(
                sanitize_name(problem.get("organization", "ctf")),
                sanitize_name(problem.get("pkg_name", problem["name"])),
                sanitize_name(problem.get("version", "1.0-0")))

            return raw_package_name

        deb_path = join(deb_directory, format_deb_file_name(problem))

        shell = spur.LocalShell()
        result = shell.run(
            ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path])

        if result.return_code != 0:
            logger.error("Error building problem deb for '%s'.",
                         problem["name"])
            logger.error(result.output)
        else:
            logger.info("Problem '%s' packaged successfully.", problem["name"])

        logger.debug("Clearning up '%s' staging directory '%s'.",
                     problem["name"], paths["staging"])

        rmtree(paths["staging"])

    if len(args.problem_paths) >= 1:
        return problem_builder(args, config)
Exemple #7
0
def deploy_problems(args, config):
    """ Main entrypoint for problem deployment """

    global deploy_config, port_map, inv_port_map
    deploy_config = config

    need_restart_xinetd = False

    try:
        user = getpwnam(deploy_config.default_user)
    except KeyError as e:
        logger.info("default_user '%s' does not exist. Creating the user now.",
                    deploy_config.default_user)
        create_user(deploy_config.default_user)

    if args.deployment_directory is not None and (len(args.problem_paths) > 1 or
                                                  args.num_instances > 1):
        logger.error(
            "Cannot specify deployment directory if deploying multiple problems or instances."
        )
        raise FatalException

    if args.secret:
        deploy_config.deploy_secret = args.secret
        logger.warning(
            "Overriding deploy_secret with user supplied secret '%s'.",
            args.secret)

    problem_names = args.problem_paths

    if args.bundle:
        bundle_problems = []
        for bundle_path in args.problem_paths:
            if os.path.isfile(bundle_path):
                bundle = get_bundle(bundle_path)
                bundle_problems.extend(bundle["problems"])
            else:
                bundle_sources_path = get_bundle_root(
                    bundle_path, absolute=True)
                if os.path.isdir(bundle_sources_path):
                    bundle = get_bundle(bundle_sources_path)
                    bundle_problems.extend(bundle["problems"])
                else:
                    logger.error("Could not find bundle at '%s'.", bundle_path)
                    raise FatalException
        problem_names = bundle_problems

    # before deploying problems, load in port_map and already_deployed instances
    already_deployed = {}
    for path, problem in get_all_problems().items():
        already_deployed[path] = []
        for instance in get_all_problem_instances(path):
            already_deployed[path].append(instance["instance_number"])
            if "port" in instance:
                port_map[instance["port"]] = (problem["name"],
                                              instance["instance_number"])
                inv_port_map[(problem["name"],
                              instance["instance_number"])] = instance["port"]

    lock_file = join(HACKSPORTS_ROOT, "deploy.lock")
    if os.path.isfile(lock_file):
        logger.error(
            "Cannot deploy while other deployment in progress. If you believe this is an error, "
            "run 'shell_manager clean'")
        raise FatalException

    logger.debug("Obtaining deployment lock file %s", lock_file)
    with open(lock_file, "w") as f:
        f.write("1")

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            if isdir(get_problem_root(problem_name, absolute=True)):
                # problem_name is already an installed package
                deploy_location = get_problem_root(problem_name, absolute=True)
            elif isdir(problem_name) and args.dry:
                # dry run - avoid installing package
                deploy_location = problem_name
            elif isdir(problem_name):
                # problem_name is a source dir - convert to .deb and install
                try:
                    if not os.path.isdir(TEMP_DEB_DIR):
                        os.mkdir(TEMP_DEB_DIR)
                    generated_deb_path = package_problem(problem_name, out_path=TEMP_DEB_DIR)
                except FatalException:
                    logger.error("An error occurred while packaging %s.", problem_name)
                    raise
                try:
                    # reinstall flag ensures package will be overwritten if version is the same,
                    # maintaining previous 'dpkg -i' behavior
                    subprocess.run('apt-get install --reinstall {}'.format(generated_deb_path), shell=True, check=True, stdout=subprocess.PIPE)
                except subprocess.CalledProcessError:
                    logger.error("An error occurred while installing problem packages.")
                    raise FatalException
                deploy_location = get_problem_root_hashed(get_problem(problem_name), absolute=True)
            else:
                logger.error("'%s' is neither an installed package, nor a valid problem directory",
                             problem_name)
                raise FatalException

            # Avoid redeploying already-deployed instances
            if args.redeploy:
                todo_instance_list = instance_list
            else:
                todo_instance_list = list(
                    set(instance_list) -
                    set(already_deployed.get(problem_name, [])))

            need_restart_xinetd = deploy_problem(
                deploy_location,
                instances=todo_instance_list,
                test=args.dry,
                deployment_directory=args.deployment_directory,
                debug=args.debug,
                restart_xinetd=False)
    finally:
        # Restart xinetd unless specified. Service must be manually restarted
        if not args.no_restart and need_restart_xinetd:
            execute(["service", "xinetd", "restart"], timeout=60)

        logger.debug("Releasing lock file %s", lock_file)
        os.remove(lock_file)
Exemple #8
0
def deploy_problems(args):
    """ Main entrypoint for problem deployment """

    global FLAG_FMT
    if args.flag_format:
        FLAG_FMT = args.flag_format
        logger.info(f"Deploying with custom flag format: {FLAG_FMT}")

    shared_config, local_config, port_map = deploy_init(args.containerize)

    need_restart_xinetd = False

    try:
        user = getpwnam(shared_config.default_user)
    except KeyError as e:
        logger.info(
            "default_user '%s' does not exist. Creating the user now.",
            shared_config.default_user,
        )
        create_user(shared_config.default_user)

    problem_names = args.problem_names

    if len(problem_names) == 1 and problem_names[0] == "all":
        # Shortcut to deploy n instances of all problems
        problem_names = [
            v["unique_name"] for k, v in get_all_problems().items()
        ]

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    if args.containerize and (len(problem_names) > 1
                              or len(instance_list) > 1):
        logger.error("can only deploy a single instance per container")
        return

    acquire_lock()
    try:
        for problem_name in problem_names:
            if not isdir(get_problem_root(problem_name, absolute=True)):
                logger.error(f"'{problem_name}' is not an installed problem")
                continue
            source_location = get_problem_root(problem_name, absolute=True)

            problem_object = get_problem(source_location)

            instances_to_deploy = copy(instance_list)
            is_static_flag = problem_object.get("static_flag", False)
            if is_static_flag is True:
                instances_to_deploy = [0]

            # Avoid redeploying already-deployed instances
            if not args.redeploy:
                already_deployed = set()
                for instance in get_all_problem_instances(problem_name):
                    already_deployed.add(instance["instance_number"])
                instances_to_deploy = list(
                    set(instances_to_deploy) - already_deployed)

            if instances_to_deploy:
                deploy_problem(source_location,
                               instances=instances_to_deploy,
                               test=args.dry,
                               debug=args.debug,
                               restart_xinetd=False,
                               containerize=args.containerize)
            else:
                logger.info(
                    "No additional instances to deploy for '%s'.",
                    problem_object["unique_name"],
                )
    finally:
        # Restart xinetd unless specified. Service must be manually restarted
        if not args.no_restart:
            execute(["service", "xinetd", "restart"], timeout=60)

        # Write out updated port map
        with open(PORT_MAP_PATH, "w") as f:
            stringified_port_map = {repr(k): v for k, v in port_map.items()}
            json.dump(stringified_port_map, f)

        release_lock()
Exemple #9
0
def bundle_problems(args, config):
    """
    Main entrypoint for generating problem bundles.
    """

    bundle_path = args.bundle_path
    if os.path.isdir(args.bundle_path):
        bundle = get_bundle(args.bundle_path)
        bundle_path = join(args.bundle_path, "bundle.json")
    elif os.path.isfile(args.bundle_path):
        bundle = json.loads(open(args.bundle_path).read())
    else:
        logger.error("No bundle could be found at '%s'", args.bundle_path)
        raise FatalException

    logger.debug("Starting to bundle: '%s'.", bundle["name"])

    for problem_name in bundle["problems"]:
        installed_path = get_problem_root(problem_name, absolute=True)
        if not isdir(installed_path) or not get_problem(installed_path):
            logger.error("'%s' is not an installed problem.", problem_name)
            raise FatalException

    paths = {"working": getcwd() if args.out is None else args.out}

    if args.staging_dir:
        paths["staging"] = join(args.staging_dir, "__staging")
    else:
        paths["staging"] = join(paths["working"], "__staging")

    paths["debian"] = join(paths["staging"], "DEBIAN")
    paths["bundle_root"] = join(paths["staging"],
                                get_bundle_root(bundle["name"]))

    [
        makedirs(staging_path)
        for _, staging_path in paths.items()
        if not isdir(staging_path)
    ]

    # note that this chmod does not work correct if on a vagrant shared folder,
    # so we need to package the problems elsewhere
    chmod(dirname(paths["bundle_root"]), 0o750)

    bundle_to_control(bundle, paths["debian"])

    copied_bundle_path = join(paths["bundle_root"], "bundle.json")
    copyfile(bundle_path, copied_bundle_path)

    def format_deb_file_name(bundle):
        """
        Prepare the file name of the deb package according to deb policy.

        Args:
            bundle: the bundle object

        Returns:
           An acceptable file name for the bundle.
        """

        raw_package_name = "{}-{}-bundle-{}.deb".format(
            sanitize_name(bundle.get("organization", "ctf")),
            sanitize_name(bundle["name"]),
            sanitize_name(bundle.get("version", "1.0-0")))

        return raw_package_name

    deb_path = join(paths["working"], format_deb_file_name(bundle))

    shell = spur.LocalShell()
    result = shell.run(
        ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path])

    if result.return_code != 0:
        logger.error("Error building bundle deb for '%s'.", bundle["name"])
        logger.error(result.output)
    else:
        logger.info("Bundle '%s' packaged successfully.", bundle["name"])

    logger.debug("Clearning up '%s' staging directory '%s'.", bundle["name"],
                 paths["staging"])

    rmtree(paths["staging"])
Exemple #10
0
def undeploy_problems(args, config):
    """ Main entrypoint for problem undeployment """

    problem_names = args.problem_paths

    if args.bundle:
        bundle_problems = []
        for bundle_path in args.problem_paths:
            if isfile(bundle_path):
                bundle = get_bundle(bundle_path)
                bundle_problems.extend(bundle["problems"])
            else:
                bundle_sources_path = get_bundle_root(bundle_path, absolute=True)
                if isdir(bundle_sources_path):
                    bundle = get_bundle(bundle_sources_path)
                    bundle_problems.extend(bundle["problems"])
                else:
                    logger.error("Could not find bundle at '%s'.", bundle_path)
                    raise FatalException
        problem_names = bundle_problems

    # before deploying problems, load in already_deployed instances
    already_deployed = {}
    for path, problem in get_all_problems().items():
        already_deployed[problem["name"]] = []
        for instance in get_all_problem_instances(path):
            already_deployed[problem["name"]].append(instance["instance_number"])

    lock_file = join(HACKSPORTS_ROOT, "deploy.lock")
    if os.path.isfile(lock_file):
        logger.error("Cannot undeploy while other deployment in progress. If you believe this is an error, "
                         "run 'shell_manager clean'")
        raise FatalException

    logger.debug("Obtaining deployment lock file %s", lock_file)
    with open(lock_file, "w") as f:
        f.write("1")

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            problem_root = get_problem_root(problem_name, absolute=True)
            if isdir(problem_root):
                problem = get_problem(problem_root)
                instances = list(filter(lambda x: x in already_deployed[problem["name"]], instance_list))
                if len(instances) == 0:
                    logger.warn("No deployed instances %s were found for problem '%s'.", instance_list, problem["name"])
                else:
                    logger.debug("Undeploying problem '%s'.", problem["name"])
                    remove_instances(problem_name, instance_list)
                    logger.info("Problem instances %s were successfully removed from '%s'.", instances, problem["name"])
            else:
                logger.error("Problem '%s' doesn't appear to be installed.", problem_name)
                raise FatalException
    finally:
        logger.debug("Releasing lock file %s", lock_file)
        os.remove(lock_file)
Exemple #11
0
def deploy_problems(args, config):
    """ Main entrypoint for problem deployment """

    global deploy_config, port_map, inv_port_map
    deploy_config = config

    try:
        user = getpwnam(deploy_config.default_user)
    except KeyError as e:
        logger.info("default_user '%s' does not exist. Creating the user now.", deploy_config.default_user)
        create_user(deploy_config.default_user)

    if args.deployment_directory is not None and (len(args.problem_paths) > 1 or args.num_instances > 1):
        logger.error("Cannot specify deployment directory if deploying multiple problems or instances.")
        raise FatalException

    if args.secret:
        deploy_config.deploy_secret = args.secret
        logger.warn("Overriding deploy_secret with user supplied secret '%s'.", args.secret)

    problem_names = args.problem_paths

    if args.bundle:
        bundle_problems = []
        for bundle_path in args.problem_paths:
            if os.path.isfile(bundle_path):
                bundle = get_bundle(bundle_path)
                bundle_problems.extend(bundle["problems"])
            else:
                bundle_sources_path = get_bundle_root(bundle_path, absolute=True)
                if os.path.isdir(bundle_sources_path):
                    bundle = get_bundle(bundle_sources_path)
                    bundle_problems.extend(bundle["problems"])
                else:
                    logger.error("Could not find bundle at '%s'.", bundle_path)
                    raise FatalException
        problem_names = bundle_problems

    # before deploying problems, load in port_map and already_deployed instances
    already_deployed = {}
    for path, problem in get_all_problems().items():
        already_deployed[path] = []
        for instance in get_all_problem_instances(path):
            already_deployed[path].append(instance["instance_number"])
            if "port" in instance:
                port_map[instance["port"]] = (problem["name"], instance["instance_number"])
                inv_port_map[(problem["name"], instance["instance_number"])] = instance["port"]

    lock_file = join(HACKSPORTS_ROOT, "deploy.lock")
    if os.path.isfile(lock_file):
        logger.error("Cannot deploy while other deployment in progress. If you believe this is an error, "
                         "run 'shell_manager clean'")
        raise FatalException

    logger.debug("Obtaining deployment lock file %s", lock_file)
    with open(lock_file, "w") as f:
        f.write("1")

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            if args.redeploy:
                todo_instance_list = instance_list
            else:
                # remove already deployed instances
                todo_instance_list = list(set(instance_list) - set(already_deployed.get(problem_name, [])))

            if args.dry and isdir(problem_name):
                deploy_problem(problem_name, instances=todo_instance_list, test=args.dry,
                                deployment_directory=args.deployment_directory, debug=args.debug)
            elif isdir(join(get_problem_root(problem_name, absolute=True))):
                deploy_problem(join(get_problem_root(problem_name, absolute=True)), instances=todo_instance_list,
                                test=args.dry, deployment_directory=args.deployment_directory, debug=args.debug)
            else:
                logger.error("Problem '%s' doesn't appear to be installed.", problem_name)
                raise FatalException
    finally:
        logger.debug("Releasing lock file %s", lock_file)
        if not args.dry:
            os.remove(lock_file)
Exemple #12
0
def deploy_problems(args):
    """ Main entrypoint for problem deployment """

    global shared_config, local_config, port_map
    shared_config = get_shared_config()
    local_config = get_local_config()

    need_restart_xinetd = False

    try:
        user = getpwnam(shared_config.default_user)
    except KeyError as e:
        logger.info("default_user '%s' does not exist. Creating the user now.",
                    shared_config.default_user)
        create_user(shared_config.default_user)

    problem_names = args.problem_names

    if len(problem_names) == 1 and problem_names[0] == 'all':
        # Shortcut to deploy n instances of all problems
        problem_names = [
            v['unique_name'] for k, v in get_all_problems().items()
        ]

    # Attempt to load the port_map from file
    try:
        port_map_path = join(SHARED_ROOT, 'port_map.json')
        with open(port_map_path, 'r') as f:
            port_map = json.load(f)
            port_map = {literal_eval(k): v for k, v in port_map.items()}
    except FileNotFoundError:
        # If it does not exist, create it
        for path, problem in get_all_problems().items():
            for instance in get_all_problem_instances(path):
                port_map[(problem["unique_name"],
                          instance["instance_number"])] = instance.get(
                              "port", None)
        with open(port_map_path, 'w') as f:
            stringified_port_map = {repr(k): v for k, v in port_map.items()}
            json.dump(stringified_port_map, f)
    except IOError:
        logger.error(f"Error loading port map from {port_map_path}")
        raise

    acquire_lock()

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            if not isdir(get_problem_root(problem_name, absolute=True)):
                logger.error(f"'{problem_name}' is not an installed problem")
                continue
            source_location = get_problem_root(problem_name, absolute=True)

            problem_object = get_problem(source_location)

            instances_to_deploy = copy(instance_list)
            is_static_flag = problem_object.get("static_flag", False)
            if is_static_flag is True:
                instances_to_deploy = [0]

            # Avoid redeploying already-deployed instances
            if not args.redeploy or is_static_flag:
                already_deployed = set()
                for instance in get_all_problem_instances(problem_name):
                    already_deployed.add(instance["instance_number"])
                instances_to_deploy = list(
                    set(instances_to_deploy) - already_deployed)

            if instances_to_deploy:
                deploy_problem(source_location,
                               instances=instances_to_deploy,
                               test=args.dry,
                               debug=args.debug,
                               restart_xinetd=False)
            else:
                logger.info("No additional instances to deploy for '%s'.",
                            problem_object["unique_name"])
    finally:
        # Restart xinetd unless specified. Service must be manually restarted
        if not args.no_restart:
            execute(["service", "xinetd", "restart"], timeout=60)

        # Write out updated port map
        with open(port_map_path, 'w') as f:
            stringified_port_map = {repr(k): v for k, v in port_map.items()}
            json.dump(stringified_port_map, f)

        release_lock()
Exemple #13
0
def package_problem(problem_path,
                    staging_path=None,
                    out_path=None,
                    ignore_files=[]):
    """
    Does the work of packaging a single problem.

    Args:
        problem_path (str): path to the problem directory
        staging_path (str, optional): path to a temporary.
            staging directory for packaging this problem.
        out_path (str, optional): path to an output directory
            for the resultant .deb package.
        ignore_files (list of str, optional): filenames to exclude
            when packaging this problem.
    Returns:
        tuple (str, str): the name of the package,
            the absolute path to the packaged problem
    """
    problem = get_problem(problem_path)
    logger.debug("Starting to package: '%s'.", problem["name"])

    # Create staging directories needed for packaging
    paths = {}
    if staging_path is None:
        paths["staging"] = join(problem_path, "__staging")
    else:
        paths["staging"] = join(staging_path, "__staging")
    paths["debian"] = join(paths["staging"], "DEBIAN")
    paths["data"] = join(paths["staging"], get_problem_root(problem["name"]))
    paths["install_data"] = join(paths["data"], "__files")
    for path in paths.values():
        if not isdir(path):
            makedirs(path)

    # Copy the problem files to the staging directory
    ignore_files.append("__staging")
    full_copy(problem_path, paths["data"], ignore=ignore_files)
    # note that this chmod does not work correct if on a vagrant shared folder,
    # so we need to package the problems elsewhere
    chmod(paths["data"], 0o750)
    problem_to_control(problem, paths["debian"])
    postinst_dependencies(problem, problem_path, paths["debian"],
                          paths["install_data"])

    # Package the staging directory as a .deb
    def format_deb_file_name(problem):
        """
        Prepare the file name of the deb package according to deb policy.

        Args:
            problem: the problem object

        Returns:
            An acceptable file name for the problem.
        """

        raw_package_name = "{}-{}-{}.deb".format(
            sanitize_name(problem.get("organization", "ctf")),
            sanitize_name(problem.get("pkg_name", problem["name"])),
            sanitize_name(problem.get("version", "1.0-0")))

        return raw_package_name

    deb_directory = out_path if out_path is not None else getcwd()
    deb_path = join(deb_directory, format_deb_file_name(problem))
    shell = spur.LocalShell()
    result = shell.run(
        ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path])
    if result.return_code != 0:
        logger.error("Error building problem deb for '%s'.", problem["name"])
        logger.error(result.output)
        raise FatalException
    else:
        logger.info("Problem '%s' packaged successfully.", problem["name"])

    # Remove the staging directory
    logger.debug("Cleaning up '%s' staging directory '%s'.", problem["name"],
                 paths["staging"])
    rmtree(paths["staging"])

    return (sanitize_name(problem.get("pkg_name", problem["name"])),
            os.path.abspath(deb_path))
Exemple #14
0
def postinst_dependencies(problem, problem_path, debian_path, install_path):
    """
    Handles the generation of the postinst script for additional dependencies.

    Args:
        problem: the problem object.
        problem_path: the problem directory.
        debian_path: the deb's DEBIAN directory.
    """

    postinst_template = ["#!/bin/bash"]

    requirements_path = join(problem_path, "requirements.txt")
    dependencies_path = join(problem_path, "install_dependencies")

    staging_requirements_path = join(install_path, "requirements.txt")

    deployed_requirements_path = join(get_problem_root(problem["name"], absolute=True),
                                      "__files", "requirements.txt")
    deployed_setup_path = join(get_problem_root(problem["name"], absolute=True),
                               "__files", "install_dependencies")

    listed_requirements = problem.get("pip_requirements", [])

    #Write or copy the requirements to the staging directory.
    if len(listed_requirements) > 0:
        if isfile(requirements_path):
            logger.error("Problem '%s' has both a pip_requirements field and requirements.txt.", problem["name"])
            raise FatalException

        with open(staging_requirements_path, "w") as f:
            f.writelines("\n".join(listed_requirements))

    elif isfile(requirements_path):
        copy(requirements_path, staging_requirements_path)

    if logger.getEffectiveLevel() <= logging.DEBUG and isfile(staging_requirements_path):
        with open(staging_requirements_path, "r") as f:
            logger.debug("python requirements:\n%s", f.read())

    if isfile(staging_requirements_path):
        postinst_template.append("pip3 install -r {}".format(deployed_requirements_path))

    if isfile(dependencies_path):
        copy(dependencies_path, join(install_path, "install_dependencies"))

        #Ensure it is executable
        chmod(join(install_path, "install_dependencies"), 0o500)

        postinst_template.append("bash -c '{}'".format(deployed_setup_path))

    chmod(debian_path, 0o775)

    postinst_path = join(debian_path, "postinst")
    with open(postinst_path, "w") as f:
        chmod(postinst_path, 0o775)
        contents = "\n".join(postinst_template)
        f.write(contents)

        #post_template always has a she-bang.
        if len(postinst_template) > 1:
            logger.debug("post install:\n%s", contents)
Exemple #15
0
def problem_builder(args, config):
    """
    Main entrypoint for package building operations.
    """

    #Grab a problem_path
    problem_base_path = args.problem_paths.pop()

    problem_paths = find_problems(problem_base_path)

    if len(problem_paths) == 0:
        logging.critical("No problems found under '%s'!", problem_base_path)
        raise FatalException

    for problem_path in problem_paths:
        problem = get_problem(problem_path)

        logger.debug("Starting to package: '%s'.", problem["name"])

        paths = {}
        if args.staging_dir is None:
            paths["staging"] = join(problem_path, "__staging")
        else:
            paths["staging"] = join(args.staging_dir, "__staging")

        paths["debian"] = join(paths["staging"], "DEBIAN")
        paths["data"] = join(paths["staging"], get_problem_root(problem["name"]))
        paths["install_data"] = join(paths["data"], "__files")


        #Make all of the directories, order does not matter with makedirs
        [makedirs(staging_path) for _, staging_path in paths.items() if not isdir(staging_path)]

        args.ignore.append("__staging")

        full_copy(problem_path, paths["data"], ignore=args.ignore)

        # note that this chmod does not work correct if on a vagrant shared folder,
        # so we need to package the problems elsewhere
        chmod(paths["data"], 0o750)

        problem_to_control(problem, paths["debian"])

        postinst_dependencies(problem, problem_path,
                            paths["debian"], paths["install_data"])

        deb_directory = args.out if args.out is not None else getcwd()

        def format_deb_file_name(problem):
            """
            Prepare the file name of the deb package according to deb policy.

            Args:
                problem: the problem object

            Returns:
            An acceptable file name for the problem.
            """

            raw_package_name = "{}-{}-{}.deb".format(
                sanitize_name(problem.get("organization", "ctf")),
                sanitize_name(problem.get("pkg_name", problem["name"])),
                sanitize_name(problem.get("version", "1.0-0"))
            )

            return raw_package_name

        deb_path = join(deb_directory, format_deb_file_name(problem))

        shell = spur.LocalShell()
        result = shell.run(["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path])

        if result.return_code != 0:
            logger.error("Error building problem deb for '%s'.", problem["name"])
            logger.error(result.output)
        else:
            logger.info("Problem '%s' packaged successfully.", problem["name"])

        logger.debug("Clearning up '%s' staging directory '%s'.", problem["name"], paths["staging"])


        rmtree(paths["staging"])

    if len(args.problem_paths) >= 1:
        return problem_builder(args, config)
Exemple #16
0
def deploy_problems(args, config):
    """ Main entrypoint for problem deployment """

    global deploy_config, port_map, inv_port_map
    deploy_config = config

    try:
        user = getpwnam(deploy_config.default_user)
    except KeyError as e:
        logger.info("default_user '%s' does not exist. Creating the user now.",
                    deploy_config.default_user)
        create_user(deploy_config.default_user)

    if args.deployment_directory is not None and (len(args.problem_paths) > 1
                                                  or args.num_instances > 1):
        logger.error(
            "Cannot specify deployment directory if deploying multiple problems or instances."
        )
        raise FatalException

    if args.secret:
        deploy_config.deploy_secret = args.secret
        logger.warn("Overriding deploy_secret with user supplied secret '%s'.",
                    args.secret)

    problem_names = args.problem_paths

    if args.bundle:
        bundle_problems = []
        for bundle_path in args.problem_paths:
            if os.path.isfile(bundle_path):
                bundle = get_bundle(bundle_path)
                bundle_problems.extend(bundle["problems"])
            else:
                bundle_sources_path = get_bundle_root(bundle_path,
                                                      absolute=True)
                if os.path.isdir(bundle_sources_path):
                    bundle = get_bundle(bundle_sources_path)
                    bundle_problems.extend(bundle["problems"])
                else:
                    logger.error("Could not find bundle at '%s'.", bundle_path)
                    raise FatalException
        problem_names = bundle_problems

    # before deploying problems, load in port_map and already_deployed instances
    already_deployed = {}
    for path, problem in get_all_problems().items():
        already_deployed[path] = []
        for instance in get_all_problem_instances(path):
            already_deployed[path].append(instance["instance_number"])
            if "port" in instance:
                port_map[instance["port"]] = (problem["name"],
                                              instance["instance_number"])
                inv_port_map[(problem["name"],
                              instance["instance_number"])] = instance["port"]

    lock_file = join(HACKSPORTS_ROOT, "deploy.lock")
    if os.path.isfile(lock_file):
        logger.error(
            "Cannot deploy while other deployment in progress. If you believe this is an error, "
            "run 'shell_manager clean'")
        raise FatalException

    logger.debug("Obtaining deployment lock file %s", lock_file)
    with open(lock_file, "w") as f:
        f.write("1")

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            if args.redeploy:
                todo_instance_list = instance_list
            else:
                # remove already deployed instances
                todo_instance_list = list(
                    set(instance_list) -
                    set(already_deployed.get(problem_name, [])))

            if args.dry and isdir(problem_name):
                deploy_problem(problem_name,
                               instances=todo_instance_list,
                               test=args.dry,
                               deployment_directory=args.deployment_directory,
                               debug=args.debug)
            elif isdir(join(get_problem_root(problem_name, absolute=True))):
                deploy_problem(join(
                    get_problem_root(problem_name, absolute=True)),
                               instances=todo_instance_list,
                               test=args.dry,
                               deployment_directory=args.deployment_directory,
                               debug=args.debug)
            else:
                logger.error("Problem '%s' doesn't appear to be installed.",
                             problem_name)
                raise FatalException
    finally:
        logger.debug("Releasing lock file %s", lock_file)
        if not args.dry:
            os.remove(lock_file)
Exemple #17
0
def undeploy_problems(args, config):
    """ Main entrypoint for problem undeployment """

    problem_names = args.problem_paths

    if args.bundle:
        bundle_problems = []
        for bundle_path in args.problem_paths:
            if isfile(bundle_path):
                bundle = get_bundle(bundle_path)
                bundle_problems.extend(bundle["problems"])
            else:
                bundle_sources_path = get_bundle_root(bundle_path,
                                                      absolute=True)
                if isdir(bundle_sources_path):
                    bundle = get_bundle(bundle_sources_path)
                    bundle_problems.extend(bundle["problems"])
                else:
                    logger.error("Could not find bundle at '%s'.", bundle_path)
                    raise FatalException
        problem_names = bundle_problems

    # before deploying problems, load in already_deployed instances
    already_deployed = {}
    for path, problem in get_all_problems().items():
        already_deployed[problem["name"]] = []
        for instance in get_all_problem_instances(path):
            already_deployed[problem["name"]].append(
                instance["instance_number"])

    lock_file = join(HACKSPORTS_ROOT, "deploy.lock")
    if os.path.isfile(lock_file):
        logger.error(
            "Cannot undeploy while other deployment in progress. If you believe this is an error, "
            "run 'shell_manager clean'")
        raise FatalException

    logger.debug("Obtaining deployment lock file %s", lock_file)
    with open(lock_file, "w") as f:
        f.write("1")

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            problem_root = get_problem_root(problem_name, absolute=True)
            if isdir(problem_root):
                problem = get_problem(problem_root)
                instances = list(
                    filter(lambda x: x in already_deployed[problem["name"]],
                           instance_list))
                if len(instances) == 0:
                    logger.warn(
                        "No deployed instances %s were found for problem '%s'.",
                        instance_list, problem["name"])
                else:
                    logger.debug("Undeploying problem '%s'.", problem["name"])
                    remove_instances(problem_name, instance_list)
                    logger.info(
                        "Problem instances %s were successfully removed from '%s'.",
                        instances, problem["name"])
            else:
                logger.error("Problem '%s' doesn't appear to be installed.",
                             problem_name)
                raise FatalException
    finally:
        logger.debug("Releasing lock file %s", lock_file)
        os.remove(lock_file)
def bundle_problems(args, config):
    """
    Main entrypoint for generating problem bundles.
    """

    bundle_path = args.bundle_path
    if os.path.isdir(args.bundle_path):
        bundle = get_bundle(args.bundle_path)
        bundle_path = join(args.bundle_path, "bundle.json")
    elif os.path.isfile(args.bundle_path):
        bundle = json.loads(open(args.bundle_path).read())
    else:
        raise Exception("No bundle {}".format(args.bundle_path))

    for problem_name in bundle["problems"]:
        installed_path = get_problem_root(problem_name, absolute=True)
        if not isdir(installed_path) or not get_problem(installed_path):
            raise Exception("'{}' is not an installed problem.".format(problem_name))

    paths = {"working": getcwd() if args.out is None else args.out}

    if args.staging_dir:
        paths["staging"] = join(args.staging_dir, "__staging")
    else:
        paths["staging"] = join(paths["working"], "__staging")

    paths["debian"] = join(paths["staging"], "DEBIAN")
    paths["bundle_root"] = join(paths["staging"], get_bundle_root(bundle["name"]))

    for _, staging_path in path.items():
        if not isdir(staging_path):
            makedirs(staging_path)

    # note that this chmod does not work correct if on a vagrant shared folder,
    # so we need to package the problems elsewhere
    chmod(dirname(paths["bundle_root"]), 0o750)

    bundle_to_control(bundle, paths["debian"])

    copied_bundle_path = join(paths["bundle_root"], "bundle.json")
    copyfile(bundle_path, copied_bundle_path)

    def format_deb_file_name(bundle):
        """
        Prepare the file name of the deb package according to deb policy.

        Args:
            bundle: the bundle object

        Returns:
           An acceptable file name for the bundle.
        """

        raw_package_name = "{}-{}-bundle-{}.deb".format(
            bundle.get("organization", "ctf"), bundle["name"], bundle.get("version", "1.0-0")
        )

        return sanitize_name(raw_package_name)

    deb_path = join(paths["working"], format_deb_file_name(bundle))

    shell = spur.LocalShell()
    result = shell.run(["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path])

    if result.return_code != 0:
        print("Error building bundle deb for '{}'".format(bundle["name"]))
        print(result.output)
    else:
        print("Bundle '{}' packaged successfully.".format(bundle["name"]))

    print("Cleaning up staging directory '{}'.".format(paths["staging"]))

    rmtree(paths["staging"])