コード例 #1
0
ファイル: status.py プロジェクト: tefah/picoCTF
def clean(args):
    """ Main entrypoint for clean """
    # remove staging directories
    if os.path.isdir(STAGING_ROOT):
        logger.info("Removing the staging directories")
        shutil.rmtree(STAGING_ROOT)

    # remove lock file
    release_lock()
コード例 #2
0
ファイル: deploy.py プロジェクト: tefah/picoCTF
def undeploy_problems(args):
    """
    Main entrypoint for problem undeployment

    Does not remove the installed packages (apt-get remove [sanitized name with hash]).
    Does not remove the problem from the web server (delete it from the mongo db).
    """

    problem_names = args.problem_names

    if len(problem_names) == 0:
        logger.error("No problem name(s) specified")
        raise FatalException

    if len(problem_names) == 1 and problem_names[0] == "all":
        # Shortcut to undeploy n instances of all problems
        problem_names = [
            v["unique_name"] for k, v in get_all_problems().items()
        ]

    acquire_lock()

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            if not isdir(get_problem_root(problem_name, absolute=True)):
                logger.error(f"'{problem_name}' is not an installed problem")
                continue

            instances_to_remove = copy(instance_list)
            deployed_instances = set()
            for instance in get_all_problem_instances(problem_name):
                deployed_instances.add(instance["instance_number"])
            instances_to_remove = list(
                set(instances_to_remove).intersection(deployed_instances))

            if len(instances_to_remove) == 0:
                logger.warning(
                    f"No deployed instances found for {problem_name}")
                continue

            remove_instances(problem_name, instances_to_remove)
    finally:
        execute(["service", "xinetd", "restart"], timeout=60)
        release_lock()
コード例 #3
0
def install_problem(problem_path, allow_reinstall=False):
    """
    Install a problem from a source directory.

    Args:
        problem_path: path to the problem source directory
    """
    problem_obj = get_problem(problem_path)
    if (
        os.path.isdir(get_problem_root_hashed(problem_obj, absolute=True))
        and not allow_reinstall
    ):
        logger.error(
            f"Problem {problem_obj['unique_name']} is already installed. You may specify --reinstall to reinstall an updated version from the specified directory."
        )
        return
    logger.info(f"Installing problem {problem_obj['unique_name']}...")

    acquire_lock()

    staging_dir_path = generate_staging_directory(
        problem_name=problem_obj["unique_name"]
    )
    logger.debug(
        f"{problem_obj['unique_name']}: created staging directory"
        + f" ({staging_dir_path})"
    )

    generated_deb_path = package_problem(
        problem_path, staging_path=staging_dir_path, out_path=DEB_ROOT
    )
    logger.debug(f"{problem_obj['unique_name']}: created debian package")

    try:
        subprocess.run(
            "DEBIAN_FRONTEND=noninteractive apt-get -y install "
            + f"--reinstall {generated_deb_path}",
            shell=True,
            check=True,
            stdout=subprocess.PIPE,
        )
    except subprocess.CalledProcessError:
        logger.error("An error occurred while installing problem packages.")
        raise FatalException
    finally:
        release_lock()
    logger.debug(f"{problem_obj['unique_name']}: installed package")
    logger.info(f"{problem_obj['unique_name']} installed successfully")
コード例 #4
0
ファイル: install.py プロジェクト: penny-1995/picoCTF
def uninstall_problem(problem_name):
    """
    Uninstalls a given problem, which means that the generated debian package
    and source files within the SHARED_ROOT directory are removed.

    An uninstalled problem will no longer appear when listing problems, even
    if deployed instances remain (undeploying all instances of a problem
    before uninstallation is recommended.)

    Additionally, any assigned instance ports for the problem will be
    removed from the port map.
    """
    acquire_lock()

    try:
        # Remove .deb package used to install dependencies on deployment
        os.remove(join(DEB_ROOT, problem_name + '.deb'))

        # Remove problem source used for templating instances
        shutil.rmtree(join(PROBLEM_ROOT, problem_name))

        # Remove any ports assigned to this problem from the port map
        port_map_path = join(SHARED_ROOT, 'port_map.json')
        with open(port_map_path, 'r') as f:
            port_map = json.load(f)
            port_map = {literal_eval(k): v for k, v in port_map.items()}

        port_map = {k: v for k, v in port_map.items() if k[0] != problem_name}

        with open(port_map_path, 'w') as f:
            stringified_port_map = {repr(k): v for k, v in port_map.items()}
            json.dump(stringified_port_map, f)

    except Exception as e:
        logger.error(f"An error occurred while uninstalling {problem_name}:")
        logger.error(f'{str(e)}')
        raise FatalException

    logger.info(f"{problem_name} removed successfully")
    release_lock()
コード例 #5
0
ファイル: deploy.py プロジェクト: tefah/picoCTF
def deploy_problems(args):
    """ Main entrypoint for problem deployment """

    global FLAG_FMT
    if args.flag_format:
        FLAG_FMT = args.flag_format
        logger.info(f"Deploying with custom flag format: {FLAG_FMT}")

    shared_config, local_config, port_map = deploy_init(args.containerize)

    need_restart_xinetd = False

    try:
        user = getpwnam(shared_config.default_user)
    except KeyError as e:
        logger.info(
            "default_user '%s' does not exist. Creating the user now.",
            shared_config.default_user,
        )
        create_user(shared_config.default_user)

    problem_names = args.problem_names

    if len(problem_names) == 1 and problem_names[0] == "all":
        # Shortcut to deploy n instances of all problems
        problem_names = [
            v["unique_name"] for k, v in get_all_problems().items()
        ]

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    if args.containerize and (len(problem_names) > 1
                              or len(instance_list) > 1):
        logger.error("can only deploy a single instance per container")
        return

    acquire_lock()
    try:
        for problem_name in problem_names:
            if not isdir(get_problem_root(problem_name, absolute=True)):
                logger.error(f"'{problem_name}' is not an installed problem")
                continue
            source_location = get_problem_root(problem_name, absolute=True)

            problem_object = get_problem(source_location)

            instances_to_deploy = copy(instance_list)
            is_static_flag = problem_object.get("static_flag", False)
            if is_static_flag is True:
                instances_to_deploy = [0]

            # Avoid redeploying already-deployed instances
            if not args.redeploy:
                already_deployed = set()
                for instance in get_all_problem_instances(problem_name):
                    already_deployed.add(instance["instance_number"])
                instances_to_deploy = list(
                    set(instances_to_deploy) - already_deployed)

            if instances_to_deploy:
                deploy_problem(source_location,
                               instances=instances_to_deploy,
                               test=args.dry,
                               debug=args.debug,
                               restart_xinetd=False,
                               containerize=args.containerize)
            else:
                logger.info(
                    "No additional instances to deploy for '%s'.",
                    problem_object["unique_name"],
                )
    finally:
        # Restart xinetd unless specified. Service must be manually restarted
        if not args.no_restart:
            execute(["service", "xinetd", "restart"], timeout=60)

        # Write out updated port map
        with open(PORT_MAP_PATH, "w") as f:
            stringified_port_map = {repr(k): v for k, v in port_map.items()}
            json.dump(stringified_port_map, f)

        release_lock()
コード例 #6
0
ファイル: deploy.py プロジェクト: penny-1995/picoCTF
def deploy_problems(args):
    """ Main entrypoint for problem deployment """

    global shared_config, local_config, port_map
    shared_config = get_shared_config()
    local_config = get_local_config()

    need_restart_xinetd = False

    try:
        user = getpwnam(shared_config.default_user)
    except KeyError as e:
        logger.info("default_user '%s' does not exist. Creating the user now.",
                    shared_config.default_user)
        create_user(shared_config.default_user)

    problem_names = args.problem_names

    if len(problem_names) == 1 and problem_names[0] == 'all':
        # Shortcut to deploy n instances of all problems
        problem_names = [
            v['unique_name'] for k, v in get_all_problems().items()
        ]

    # Attempt to load the port_map from file
    try:
        port_map_path = join(SHARED_ROOT, 'port_map.json')
        with open(port_map_path, 'r') as f:
            port_map = json.load(f)
            port_map = {literal_eval(k): v for k, v in port_map.items()}
    except FileNotFoundError:
        # If it does not exist, create it
        for path, problem in get_all_problems().items():
            for instance in get_all_problem_instances(path):
                port_map[(problem["unique_name"],
                          instance["instance_number"])] = instance.get(
                              "port", None)
        with open(port_map_path, 'w') as f:
            stringified_port_map = {repr(k): v for k, v in port_map.items()}
            json.dump(stringified_port_map, f)
    except IOError:
        logger.error(f"Error loading port map from {port_map_path}")
        raise

    acquire_lock()

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            if not isdir(get_problem_root(problem_name, absolute=True)):
                logger.error(f"'{problem_name}' is not an installed problem")
                continue
            source_location = get_problem_root(problem_name, absolute=True)

            problem_object = get_problem(source_location)

            instances_to_deploy = copy(instance_list)
            is_static_flag = problem_object.get("static_flag", False)
            if is_static_flag is True:
                instances_to_deploy = [0]

            # Avoid redeploying already-deployed instances
            if not args.redeploy or is_static_flag:
                already_deployed = set()
                for instance in get_all_problem_instances(problem_name):
                    already_deployed.add(instance["instance_number"])
                instances_to_deploy = list(
                    set(instances_to_deploy) - already_deployed)

            if instances_to_deploy:
                deploy_problem(source_location,
                               instances=instances_to_deploy,
                               test=args.dry,
                               debug=args.debug,
                               restart_xinetd=False)
            else:
                logger.info("No additional instances to deploy for '%s'.",
                            problem_object["unique_name"])
    finally:
        # Restart xinetd unless specified. Service must be manually restarted
        if not args.no_restart:
            execute(["service", "xinetd", "restart"], timeout=60)

        # Write out updated port map
        with open(port_map_path, 'w') as f:
            stringified_port_map = {repr(k): v for k, v in port_map.items()}
            json.dump(stringified_port_map, f)

        release_lock()