Пример #1
0
 def list_systems(self):
     """ Method to query Cobbler and list all systems defined within its database
     """
     _method_ = "CobblerDiscoveryPlugin.list_systems"
     systems = []
     if self.shell is None:
         self.shell = spur.LocalShell()
     with self.shell:
         try:
             process = self.shell.spawn(["sh", "-c", "sudo -S %s" % COBBLER_CMD_SYSTEM_LIST])
             process.stdin_write(SUDO_PASSWORD + "\n")
             result = process.wait_for_result()
             # iterate through output if command executed successfully
             if result.return_code == 0:
                 for line in result.output.decode('ascii').splitlines():
                     system = line.strip()
                     systems.append(system)
                     logging.info("%s::Found system to be imported from Cobbler::%s", _method_, system)
             else:
                 logging.error("%s::Failed to retrieve systems from Cobbler", _method_)
         except (spur.ssh.NoSuchCommandError, spur.ssh.ConnectionError) as e:
             logging.exception(e)
             logging.error("%s::Connection error - host = %s", _method_, self.shell._hostname)
             raise exceptions.ConnectionException(e)
     return systems
Пример #2
0
def local_update(repo_path, deb_paths=[]):
    """
    Updates a local deb repository by copying debs and running scanpackages.

    Args:
        repo_path: the path to the local repository.
        dep_paths: list of problem deb paths to copy.
    """

    if not exists(repo_path):
        logger.info("Creating repository at '%s'.", repo_path)
        makedirs(repo_path)
    elif not isdir(repo_path):
        logger.error("Repository '%s' is not a directory!", repo_path)
        raise FatalException

    [copy2(deb_path, repo_path) for deb_path in deb_paths]

    shell = spur.LocalShell()
    result = shell.run(["dpkg-scanpackages", ".", "/dev/null"], cwd=repo_path)

    packages_path = join(repo_path, "Packages.gz")
    with gzip.open(packages_path, "wb") as packages:
        packages.write(result.output)

    logger.info("Repository '%s' updated successfully. Copied %d packages.",
                repo_path, len(deb_paths))
Пример #3
0
 def _create_challenge_dirs(self):
     self.full_path = os.path.join(self.conf('path'), challenges.HTTP01.URI_ROOT_PATH)
     self.shells = [spur.SshShell(h, missing_host_key=spur.ssh.MissingHostKey.accept) for h in self.conf('host')]
     if not self.conf('nolocal'):
         self.shells.insert(0, spur.LocalShell())
     for s in self.shells:
         s.run(['mkdir', '--mode', '0755', '--parents', self.full_path])
         stat = s.run(['stat', '--format', '%U:%G', self.full_path])
         user_group = stat.output.strip()
         s.run(['chown', user_group, self.full_path])
 def setUpClass(cls):
     cls.shell = spur.LocalShell()
     cls.shell.run(
         shlex.split(
             'psql test_original -f tests/files/original_test_db.sql'))
     cls.shell.run(
         shlex.split(
             'psql test_updated -f tests/files/updated_test_db.sql'))
     with open('tests/files/json_schema.json', "r") as schema:
         cls.schema = json.load(schema)
Пример #5
0
 def discover_resources(self, resource_label='*', offline=False, add=False):
     """ Method to query all data from all systems defined within Cobbler's database
     """
     _method_ = "CobblerDiscoveryPlugin.import_resources"
     systems = self.list_systems()
     if self.shell is None:
         self.shell = spur.LocalShell()
     for system in systems:
         if resource_label != '*' and resource_label != system:
             continue
         with self.shell:
             try:
                 process = self.shell.spawn(["sh", "-c", "sudo -S %s %s" % (COBBLER_CMD_SYSTEM_DUMPVARS, system)])
                 process.stdin_write(SUDO_PASSWORD + "\n")
                 result = process.wait_for_result()
                 resource_ipv4 = None
                 resource_breed = None
                 # iterate through output if command executed successfully
                 if result.return_code == 0:
                     for line in result.output.decode('ascii').splitlines():
                         # Parse the output to extract the resource data
                         if COBBLER_TAG_ADDRESS in line:
                             resource_ipv4 = line.split(':')[1].strip()
                         elif COBBLER_TAG_BREED in line:
                             resource_breed = line.split(':')[1].strip()
                 else:
                     logging.error("%s::Failed to import data from Cobbler for system::%s", _method_, system)
                 # Figure out if there is a valid resource type for this system
                 resource_type = COBBLER_DIC_TYPES[resource_breed]
                 resource = self.format_resource(system, resource_type, resource_ipv4, DEFAULT_RESOURCE_USER)
                 if resource_type is None:
                     print("Warning:: Cannot find a suitable resource type for %s" % resource)
                 else:
                     print("Discovered from %s: %s" % (self.get_type(), resource))
                     rc1, message = validate_label(system)
                     if rc1 != 0:
                         print("Warning:: %s" % message)
                     rc2, message = validate_address(resource_ipv4)
                     if rc2 != 0:
                         print("Warning:: %s" % message)
                     if add:
                         if rc1 == 0 and rc2 == 0:
                             rc, message = add_resource(system, resource_type, resource_ipv4,
                                                        DEFAULT_RESOURCE_USER, DEFAULT_RESOURCE_PWD,
                                                        offline=offline)
                             if rc == 0:
                                 print("Resource imported: %s" % system)
                             else:
                                 print("Import error: %s" % message)
                         else:
                             print("Cannot import: %s - conflicting resource exists" % system)
             except (spur.ssh.NoSuchCommandError, spur.ssh.ConnectionError) as e:
                 logging.exception(e)
                 logging.error("%s::Connection error - host = %s", _method_, self.shell._hostname)
                 raise exceptions.ConnectionException(e)
Пример #6
0
def get_branches():
    json_filename = '/tmp/branches.json'
    branches_file = os.path.expanduser('~/backupws/branches.py')
    instance_path = os.path.expanduser('~/instance')
    command = 'python {branches} -s -p {instance} -f {name}'.format(
        name=json_filename, branches=branches_file, instance=instance_path)
    shell = spur.LocalShell()
    shell.run(shlex.split(command))
    with open(json_filename, "r") as dest:
        branches = json.load(dest)
    return branches
Пример #7
0
def get_docker_password():
    """
    Retrieve the Docker Hub password from the MacOS keychain.

    TODO: generalize to support Linux password stores.
    """
    if sys.platform == "darwin":
        cmd = "security find-internet-password -s id.docker.com -a {} -w"
    else:
        cmd = "pass show web/hub.docker.com/{}"
    pswd = spur.LocalShell().run(cmd.format(DOCKER_USER).split())
    return pswd.output.strip()
Пример #8
0
    def run(self, cmd):
        """
            :param cmd: Shell Command. list
            Usage:
                from psystem import shell
                local_shell = shell.Local()
                local_shell.run(['echo', '-n', 'hello'])
        """
        shell = spur.LocalShell()
        result = shell.run(cmd)

        return result.output
Пример #9
0
def clone_repo(base_path, repo):
    """
    go to the base_path and clone the repo
    """
    shell = spur.LocalShell()
    output = shell.run(["git","clone", repo], base_path)
    if output == 1:
        logging.info("The repo exists")
    else:
        logging.error("F**k")
        logging.info(output.output)
    return output
Пример #10
0
def db_dump(service):
    service_obj = find_service(service)
    config = load_config(service)
    params = {
        'host': service_obj.node.ip_address,
        'port': service_obj.ports['5432'],
        'timestamp': datetime.now().strftime("%Y%m%d%H%M")
    }
    db_password = config.get('secrets')['NMPI_DATABASE_PASSWORD']
    cmd = "pg_dump --clean --create --insert --host={host} --port={port} --username=nmpi_dbadmin --dbname=nmpi --file=nmpi_v2_dump_{timestamp}.sql".format(
        **params)
    shell = spur.LocalShell()
    shell.run(shlex.split(cmd), update_env={"PGPASSWORD": db_password})
Пример #11
0
def get_token():
    """
    Retrieve the DigitalOcean API token from the MacOS keychain.

    TODO: generalize to support Linux password stores.
    """
    if sys.platform == "darwin":
        cmd = [
            'security', 'find-generic-password', '-s',
            'DigitalOcean API token', '-w'
        ]
    else:
        cmd = ['pass', 'show', 'tokens/digitalocean']
    token = spur.LocalShell().run(cmd, encoding='utf-8')
    return token.output.strip()
Пример #12
0
def create_folders(base_path, account):
    """create a working folder structure based on the account"""
    wanted_path = define_path(base_path, account)
    logging.debug(wanted_path)
    try:
        shell = spur.LocalShell()
        output = shell.run(["mkdir","-p", wanted_path])
        logging.info("Created working folder on {0}".format(wanted_path))
        return True
    except OSError as e:
        if e.errno is 17:
            logging.info( wanted_path + " " +  e.strerror)
            return True
        else:
            logging.error( wanted_path + " " +  e.strerror)
            return False
Пример #13
0
def build(service, colour):
    """Build a Docker image locally and push to Docker Hub."""
    repo = git.Repo('.', search_parent_directories=True)
    git_tag = repo.head.commit.hexsha[:7]
    if repo.is_dirty():
        git_tag += "z"

    shell = spur.LocalShell()
    config = load_config(service)
    image = config["image"]
    logger.info(
        "Building image '{}' for service '{}', environment '{}', version {}".
        format(image, service, colour, git_tag))

    # build image
    build_directory = os.getcwd()
    dockerfile = config["dockerfile"]
    cmd = "docker build -t {} -f {} .".format(image, dockerfile)

    # write version information
    with open(join(build_directory, "build_info.json"), "w") as fp:
        json.dump(
            {
                "git": git_tag,
                "colour": colour,
                "date": datetime.now().isoformat()
            }, fp)

    click.echo("Building image")
    result = shell.run(cmd.split(), cwd=build_directory, allow_error=True)
    logger.debug(result.output)
    if result.return_code != 0:
        click.echo(result.output)
        raise click.Abort()

    # tag image
    colour_tag = colour or "latest"
    for tag in (colour_tag, git_tag):
        cmd = "docker tag {} {}:{}".format(image, image, tag)
        shell.run(cmd.split())

    # push image
    cmd = "docker push {}:{}".format(image, colour_tag)
    click.echo("Pushing image")
    result = shell.run(cmd.split())
    logger.debug(result.output)
    logger.info("Pushed image {}:{}".format(image, colour_tag))
Пример #14
0
def send_key(username, host):
    #send key
    local_shell = spur.LocalShell()
    local_shell.run([
        "scp", "-i", config.ADMIN_KEY_PATH, config.KEY_STORAGE_PATH % username,
        "ubuntu@%s:%s/%s.pub" % (host, config.ADMIN_HOME, username)
    ])

    shell = spur.SshShell(hostname=host,
                          username=config.ADMIN_USERNAME,
                          private_key_file=config.ADMIN_KEY_PATH,
                          missing_host_key=spur.ssh.MissingHostKey.warn)

    # create user
    shell.run(["sudo", "useradd", "-m", username])
    shell.run(["sudo", "usermod", "-aG", "sudo", username])
    shell.run(["sudo", "mkdir", "/home/%s/.ssh" % username])

    # save key
    shell.run([
        "sudo", "sh", "-c",
        "cat %s/%s.pub >> /home/%s/.ssh/authorized_keys" %
        (config.ADMIN_HOME, username, username)
    ])
    shell.run([
        "sudo", "chown", "-R",
        "%s:%s" % (username, username),
        "/home/%s/.ssh" % username
    ])
    shell.run(
        ["sudo", "chmod", "600",
         "/home/%s/.ssh/authorized_keys" % username])
    shell.run(["sudo", "chmod", "700", "/home/%s/.ssh" % username])

    # configure server
    shell.run(["sudo", "chsh", "-s", "/bin/bash", username])
    local_shell.run([
        "scp", "-i", config.KEY_STORAGE_PATH % username, config.VIMRC_PATH,
        "%s@%s:~/.vimrc" % (username, host)
    ])
    local_shell.run([
        "scp", "-i", config.KEY_STORAGE_PATH % username, config.BASHRC_PATH,
        "%s@%s:~/.bashrc" % (username, host)
    ])
Пример #15
0
def run_makefile(make_directory):
    """
    Runs a makefile in a given directory.

    Args:
        make_directory: directory where the Makefile is located.
    """

    make_path = path.join(make_directory, "Makefile")

    if not path.isfile(make_path):
        raise InternalException(make_path + " does not exist.")

    shell = spur.LocalShell()

    try:
        shell.run(["make", "-C", make_directory])
    except Exception as e:
        raise InternalException(str(e))
Пример #16
0
def db_restore(service, filename):
    service_obj = find_service(service)
    config = load_config(service)
    params = {
        'host': service_obj.node.ip_address,
        'port': service_obj.ports['5432'],
        'filename': filename
    }
    db_password = config.get('secrets')['NMPI_DATABASE_PASSWORD']
    shell = spur.LocalShell()
    psql = "psql -h {host} -p {port} --username=postgres".format(**params)
    cmd = """echo "CREATE USER nmpi_dbadmin WITH PASSWORD '{}';" | """.format(
        db_password) + psql
    print(cmd)
    #print shlex.split(cmd)
    pg_password = getpass("Enter the password for the 'postgres' user: "******"sh", "-c", cmd], update_env={"PGPASSWORD": pg_password})
    cmd = psql + " < {filename}".format(**params)
    print(cmd)
    #print shlex.split(cmd)
    shell.run(["sh", "-c", cmd], update_env={"PGPASSWORD": pg_password})
Пример #17
0
def main():
    opcao = input('Utilizar a ultima configuração? s/n ')
    if opcao == 's':
        try:
            config = load()
        except:
            print(
                'Não foi possivel carregar a ultima configuração, por favor, inicie novamente o programa com novas configurações'
            )
            return
    elif opcao == 'n':
        config = {}
        config['hostname'] = input('hostname:')
        config['username'] = input('username:'******'password'] = input('password:'******'local_directory'] = input('diretorio local:')
        config['remote_directory'] = input('diretorio remoto:')
        save(config)
    else:
        print('Opção invalida')
        return
    remote_shell = spur.SshShell(hostname=config['hostname'],
                                 username=config['username'],
                                 password=config['password'])
    local_shell = spur.LocalShell()
    observer = Observer()
    bh = BeagleHandler(remote_shell, local_shell, config['local_directory'],
                       config['remote_directory'])
    observer.schedule(bh, config['local_directory'], recursive=True)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    except spur.CouldNotChangeDirectoryError:
        print(
            "Não foi possivel identificar os diretorios, por favor verifique se os diretorios existem"
        )
    observer.join()
Пример #18
0
def terminal_open():
    """
    Opens a new terminal session

    :return: the id of the terminal session
    """

    term_num = len(current_app.config['terminal'].keys())

    current_app.config['terminal'][term_num] = {}

    if hasSpur:
        s = spur.LocalShell()

        current_app.config['terminal'][term_num]['shell'] = s
    else:
        current_app.config['terminal'][term_num]['shell'] = None

    current_app.config['terminal'][term_num]['workingdir'] = str(
        Path().resolve())

    return str(term_num)
Пример #19
0
    def __init__(self,
                 hostname: Optional[str] = None,
                 username: Optional[str] = None,
                 password: Optional[str] = None,
                 cwd: Optional[PathLike] = None,
                 executable: Optional[PathLike] = None,
                 env_additions: Optional[Dict[str, str]] = None,
                 connect: bool = True,
                 **connection_kwargs) -> None:
        # ADD DOCSTRING -- note circular issue: if using an executable
        #  other than /bin/bash, you should either pass it to
        #  constructor or set it before connecting, otherwise some
        #  environment variables may missing or incorrect. When first
        #  connecting, environment variables are read from output of
        #  printenv, which is affected by the executable used to run the
        #  command. So even though $SHELL will be read and used as the
        #  default executable if one is not explicitly before
        #  connecting, other environment variables will reflect having
        #  been read from a bash shell

        self._env_additions = env_additions or dict()
        self._environ = None
        self._cwd = cwd
        self._executable = executable
        self.connected = False
        if hostname == 'localhost':
            self._shell = spur.LocalShell()
            self._hostname = socket.gethostname()
            self._username = getpass.getuser()
            self._port = None
        else:
            self._shell = None
            self._hostname = hostname
            self._username = username
            self._port = connection_kwargs.pop('port', None)
            if connect:
                self.connect(password=password, **connection_kwargs)
Пример #20
0
    def __init__(self,
                 exe,
                 cloud=False,
                 cloud_hostname=None,
                 cloud_username=None,
                 cloud_private_key_file=None):
        """Constructor for an AI player.

        'name' is the name of the player (string).
        'profile' is the profile to use for the AI (Profile).
        'level' is the difficulty level to use (string).
        """

        self.__positionCommand = 'position startpos'
        self.__haveMoves = False

        self.readyToConfigure = False
        self.__options = None

        self.ready = False
        self.__inCallback = False
        self.eng_process = None
        self.cloud = cloud

        self.STATE_IDLE = 'IDLE'
        self.STATE_CONNECTING = 'CONNECTING'

        self.options = {}
        # self.set_options = ()
        self.available_options = {}
        self.engine_info = {}
        self.__queuedCommands = []
        self.eng_process = None
        # "engines/stockfish4-mac-64"
        try:
            if not cloud:
                shell = spur.LocalShell()
                self.eng_process = shell.spawn(exe,
                                               stdout=subprocess.PIPE,
                                               store_pid=True)

            else:
                print "Trying cloud connect.."
                print "cloud_hostname : {0}".format(cloud_hostname)
                print "cloud_username : {0}".format(cloud_username)
                print "cloud_private_key_file : {0}".format(
                    cloud_private_key_file)

                shell = spur.SshShell(
                    hostname=cloud_hostname,
                    username=cloud_username,
                    private_key_file=cloud_private_key_file,
                    missing_host_key=paramiko.AutoAddPolicy())
                self.eng_process = shell.spawn([exe],
                                               stdout=subprocess.PIPE,
                                               store_pid=True,
                                               allow_error=True)

            process_stdout = self.eng_process._stdout if cloud else self.eng_process._subprocess.stdout

            self.buffer = Queue()
            t = Thread(target=self.enqueue_output,
                       args=(process_stdout, self.buffer))
            t.daemon = True  # thread dies with the program
            t.start()

        except OSError:
            print "OS error in starting engine"
Пример #21
0
def problem_builder(args, config):
    """
    Main entrypoint for package building operations.
    """

    if not args.problem_paths:
        print(
            "usage: shell_manager package [-h] [-s STAGING_DIR] [-o OUT] [-i IGNORE] problem_path"
        )
        print(
            "shell_manager bundle: error: the following arguments are required: problem_path"
        )
        raise FatalException

    # Grab a problem_path
    problem_base_path = args.problem_paths.pop()

    problem_paths = find_problems(problem_base_path)

    if len(problem_paths) == 0:
        logging.critical("No problems found under '%s'!", problem_base_path)
        raise FatalException

    for problem_path in problem_paths:
        problem = get_problem(problem_path)

        logger.debug("Starting to package: '%s'.", problem["name"])

        paths = {}
        if args.staging_dir is None:
            paths["staging"] = join(problem_path, "__staging")
        else:
            paths["staging"] = join(args.staging_dir, "__staging")

        paths["debian"] = join(paths["staging"], "DEBIAN")
        paths["data"] = join(paths["staging"],
                             get_problem_root(problem["name"]))
        paths["install_data"] = join(paths["data"], "__files")

        # Make all of the directories, order does not matter with makedirs
        [
            makedirs(staging_path) for _, staging_path in paths.items()
            if not isdir(staging_path)
        ]

        args.ignore.append("__staging")

        full_copy(problem_path, paths["data"], ignore=args.ignore)

        # note that this chmod does not work correct if on a vagrant shared folder,
        # so we need to package the problems elsewhere
        chmod(paths["data"], 0o750)

        problem_to_control(problem, paths["debian"])

        postinst_dependencies(problem, problem_path, paths["debian"],
                              paths["install_data"])

        deb_directory = args.out if args.out is not None else getcwd()

        def format_deb_file_name(problem):
            """
            Prepare the file name of the deb package according to deb policy.

            Args:
                problem: the problem object

            Returns:
            An acceptable file name for the problem.
            """

            raw_package_name = "{}-{}-{}.deb".format(
                sanitize_name(problem.get("organization", "ctf")),
                sanitize_name(problem.get("pkg_name", problem["name"])),
                sanitize_name(problem.get("version", "1.0-0")))

            return raw_package_name

        deb_path = join(deb_directory, format_deb_file_name(problem))

        shell = spur.LocalShell()
        result = shell.run(
            ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path])

        if result.return_code != 0:
            logger.error("Error building problem deb for '%s'.",
                         problem["name"])
            logger.error(result.output)
        else:
            logger.info("Problem '%s' packaged successfully.", problem["name"])

        logger.debug("Clearning up '%s' staging directory '%s'.",
                     problem["name"], paths["staging"])

        rmtree(paths["staging"])

    if len(args.problem_paths) >= 1:
        return problem_builder(args, config)
Пример #22
0
 def create_shell(self):
     return spur.LocalShell()
Пример #23
0
import spur.ssh

if (len(sys.argv) < 4):
	print "Usage python load_testing <pem_file> <testing script> <tsung_host> <copy_down=False>"
	exit(1)

pem_file = sys.argv[1]
testing_script = sys.argv[2]
tsung_host = sys.argv[3]
tsung_home_dir = "ec2-user@{0}:~".format(tsung_host)
copy_down = sys.argv[4]

if len(sys.argv) == 5 and sys.argv[4] == "true":
	copy_down = True

local_shell = spur.LocalShell()
scp_result = local_shell.run(["scp", "-i", pem_file, testing_script, tsung_home_dir])
print scp_result.output

ec2_shell = spur.SshShell(
	hostname=tsung_host,
	username="******",
	private_key_file=pem_file,
	missing_host_key=spur.ssh.MissingHostKey.accept
)

testing_file_name = testing_script.split("/")[-1]
print testing_file_name
tsung_result = ec2_shell.run(["tsung", "-f", testing_file_name, "start"])
print tsung_result.output
Пример #24
0
def package_problem(problem_path,
                    staging_path=None,
                    out_path=None,
                    ignore_files=None):
    """
    Does the work of packaging a single problem.

    Args:
        problem_path (str): path to the problem directory
        staging_path (str, optional): path to a temporary.
            staging directory for packaging this problem.
        out_path (str, optional): path to an output directory
            for the resultant .deb package.
        ignore_files (list of str, optional): filenames to exclude
            when packaging this problem.
    Returns:
        str: the absolute path to the packaged problem
    """
    if ignore_files is None:
        ignore_files = []
    problem = get_problem(problem_path)
    logger.debug("Starting to package: '%s'.", problem["name"])

    # Create staging directories needed for packaging
    paths = {}
    if staging_path is None:
        paths["staging"] = join(problem_path, "__staging")
    else:
        paths["staging"] = join(staging_path, "__staging")
    paths["debian"] = join(paths["staging"], "DEBIAN")
    paths["data"] = join(paths["staging"], get_problem_root_hashed(problem))
    paths["install_data"] = join(paths["data"], "__files")
    for path in paths.values():
        if not isdir(path):
            makedirs(path)

    # Copy the problem files to the staging directory
    ignore_files.append("__staging")
    full_copy(problem_path, paths["data"], ignore=ignore_files)
    # note that this chmod does not work correct if on a vagrant shared folder,
    # so we need to package the problems elsewhere
    chmod(paths["data"], 0o750)
    problem_to_control(problem, paths["debian"])
    postinst_dependencies(problem, problem_path, paths["debian"],
                          paths["install_data"])

    # Package the staging directory as a .deb
    def format_deb_file_name(problem):
        """
        Prepare the file name of the deb package according to deb policy.

        Args:
            problem: the problem object

        Returns:
            An acceptable file name for the problem.
        """

        raw_package_name = "{}.deb".format(
            sanitize_name(problem["unique_name"]))
        return raw_package_name

    deb_directory = out_path if out_path is not None else getcwd()
    deb_path = join(deb_directory, format_deb_file_name(problem))
    shell = spur.LocalShell()
    result = shell.run(
        ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path])
    if result.return_code != 0:
        logger.error("Error building problem deb for '%s'.", problem["name"])
        logger.error(result.output)
        raise FatalException
    else:
        logger.debug("Problem '%s' packaged successfully.",
                     problem["unique_name"])

    # Remove the staging directory
    logger.debug("Cleaning up '%s' staging directory '%s'.", problem["name"],
                 paths["staging"])
    rmtree(paths["staging"])

    return os.path.abspath(deb_path)
Пример #25
0
    def __init__(self, cpus: int,
                 mem: int,
                 hostname: str,
                 datacenter: str,
                 count: int,
                 transient=False):

        # Instantiate IBM cloud API object

        self._sl_client = SoftLayer.create_client_from_env()
        self.cloud_mgr = VSManager(self._sl_client)

        self._cpus = cpus
        self._mem = mem
        self._hostname = hostname
        self._count = count
        self._transient = transient

        # Load redis password
        self.redis_pw = os.environ['REDIS_PW']

        self.own_ip = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr']
        print("Determined that own IP is", self.own_ip)

        # Restart redis server
        print("Starting local redis server")
        start_redis_script = """
        if [ "$(docker ps -aq -f name=redis)" ]; then \
            docker rm -f redis ; \
        fi ; \
        docker run -d -p 6379:6379 --name redis redis --requirepass {0}
        """.format(self.redis_pw)
        result = self._shell_run_script(spur.LocalShell(), start_redis_script)
        if result.return_code != 0:
            print("Error while starting local redis server")
            print(result.stderr_output.decode('utf-8'))
            exit(-1)

        # Build and save worker docker image
        print("Building worker docker image")
        build_docker_image = """
            docker build . -t invaders && \
            docker save invaders | pigz > invaders.tar.gz
        """
        result = self._shell_run_script(spur.LocalShell(), build_docker_image)
        if result.return_code != 0:
            print("Error while building worker docker image")
            print(result.stderr_output.decode('utf-8'))
            exit(-1)

        # Create all the hostnames
        hostnames = [self._hostname + '-' + str(i) for i in range(self._count)]
        # Keep aside only the ones that are not already instantiated on IBM Cloud
        instances_list = self.cloud_mgr.list_instances()
        hostnames_list = [e['hostname'] for e in instances_list]

        # List of the VMs to instantiate
        hostnames_nonexistant = [h for h in hostnames if h not in hostnames_list]

        datacenters = []
        for _ in range(len(hostnames_nonexistant)):
            datacenters.append(datacenter)

        if len(hostnames_nonexistant) > 0:
            print("Requesting the VMs...")
            vm_settings = [{
                'hostname': h,
                'domain': 'IBM.cloud',
                'datacenter': d,
                'dedicated': False,
                'private': True,
                'cpus': self._cpus,
                'os_code': 'CENTOS_7_64',
                'local_disk': False,
                'memory': self._mem * 1024,
                'tags': 'worker, ga',
                'nic_speed': 100
            }
                for h, d in zip(hostnames_nonexistant, datacenters)]

            # Request the machines 10 at a time
            vm_settings = [vm_settings[x:x + 10] for x in range(0, len(vm_settings), 10)]
            for s in vm_settings:
                self.cloud_mgr.create_instances(config_list=s)

        # Get the IDs of the VMs we'll use
        self._vm_ids = [e['id'] for e in self.cloud_mgr.list_instances() if e['hostname'] in hostnames]

        print("Waiting for the VMs to be available + set-up (in background thread)")
        self.pool = ThreadPool(processes=10)  # Limit to 10 to avoid hitting the API calls limit
        self._setup_results = self.pool.map_async(self._setup_vm, self._vm_ids)
Пример #26
0
def bundle_problems(args, config):
    """
    Main entrypoint for generating problem bundles.
    """

    bundle_path = args.bundle_path
    if os.path.isdir(args.bundle_path):
        bundle = get_bundle(args.bundle_path)
        bundle_path = join(args.bundle_path, "bundle.json")
    elif os.path.isfile(args.bundle_path):
        bundle = json.loads(open(args.bundle_path).read())
    else:
        logger.error("No bundle could be found at '%s'", args.bundle_path)
        raise FatalException

    logger.debug("Starting to bundle: '%s'.", bundle["name"])

    for problem_name in bundle["problems"]:
        installed_path = get_problem_root(problem_name, absolute=True)
        if not isdir(installed_path) or not get_problem(installed_path):
            logger.error("'%s' is not an installed problem.", problem_name)
            raise FatalException

    paths = {"working": getcwd() if args.out is None else args.out}

    if args.staging_dir:
        paths["staging"] = join(args.staging_dir, "__staging")
    else:
        paths["staging"] = join(paths["working"], "__staging")

    paths["debian"] = join(paths["staging"], "DEBIAN")
    paths["bundle_root"] = join(paths["staging"],
                                get_bundle_root(bundle["name"]))

    [
        makedirs(staging_path)
        for _, staging_path in paths.items()
        if not isdir(staging_path)
    ]

    # note that this chmod does not work correct if on a vagrant shared folder,
    # so we need to package the problems elsewhere
    chmod(dirname(paths["bundle_root"]), 0o750)

    bundle_to_control(bundle, paths["debian"])

    copied_bundle_path = join(paths["bundle_root"], "bundle.json")
    copyfile(bundle_path, copied_bundle_path)

    def format_deb_file_name(bundle):
        """
        Prepare the file name of the deb package according to deb policy.

        Args:
            bundle: the bundle object

        Returns:
           An acceptable file name for the bundle.
        """

        raw_package_name = "{}-{}-bundle-{}.deb".format(
            sanitize_name(bundle.get("organization", "ctf")),
            sanitize_name(bundle["name"]),
            sanitize_name(bundle.get("version", "1.0-0")))

        return raw_package_name

    deb_path = join(paths["working"], format_deb_file_name(bundle))

    shell = spur.LocalShell()
    result = shell.run(
        ["fakeroot", "dpkg-deb", "--build", paths["staging"], deb_path])

    if result.return_code != 0:
        logger.error("Error building bundle deb for '%s'.", bundle["name"])
        logger.error(result.output)
    else:
        logger.info("Bundle '%s' packaged successfully.", bundle["name"])

    logger.debug("Clearning up '%s' staging directory '%s'.", bundle["name"],
                 paths["staging"])

    rmtree(paths["staging"])
Пример #27
0
def run_process(cwd: str,
                args: list,
                shell=None,
                sudo: Union[bool, str] = None,
                show: bool = False,
                stdout: str = None,
                stdin: str = None,
                timeout: int = 5) -> Tuple[bool, any]:
    """
    Executes an external process via subprocess.check_output
    :param cwd: Working directory
    :param args: List of command plus its arguments
    :param shell: Either a spur.LocalShell or a spur.SshShell
    :param sudo: Username (or True for root) to use with sudo, False for no sudo
    :param show: Log executed command at info priority before executing
    :param stdout: String to fill with std_out data
    :param stdin: String to supply as std_in data
    :param timeout: Timeout for the process in seconds
    :return: A boolean success flag and the whole output
    :rtype:

    """

    log("Running:", cwd, args, lvl=verbose)

    # if shell is None and sudo is None:
    #     check_root()

    def build_command(*things):
        """Construct a command adding sudo if necessary"""

        if sudo not in (None, False, "False"):
            if isinstance(sudo, bool) and sudo is True or sudo == "True":
                user = "******"
            elif isinstance(sudo, str):
                user = sudo
            else:
                log("Malformed run_process call:", things, lvl=error)
                return

            log("Using sudo with user:"******"sudo", "-H", "-u", user] + list(things)
        else:
            log("Not using sudo", lvl=verbose)
            cmd = []
            for thing in things:
                cmd += [thing]

        return cmd

    if shell is None:
        log("Running on local shell", lvl=verbose)
        shell = spur.LocalShell()
    else:
        log("Running on remote shell:", shell, lvl=debug)

    command = build_command(*args)
    log(command, lvl=verbose)

    try:
        if show:
            log("Executing:", command)

        if stdin is not None:
            process = shell.spawn(command,
                                  cwd=cwd,
                                  store_pid=True,
                                  stdout=stdout)
            process.stdin_write(stdin)

            try:
                process._process_stdin.close()  # Local
            except AttributeError:
                process._stdin.close()  # SSH

            begin = time.time()
            waiting = 0.0
            while waiting < timeout and process.is_running():
                waiting = time.time() - begin
            if waiting >= timeout:
                log("Sending SIGHUP", lvl=warn)
                process.send_signal(signal.SIGHUP)

                time.sleep(0.5)
                if process.is_running():
                    log("Sending SIGKILL", lvl=error)
                    process.send_signal(signal.SIGKILL)

            process = process.wait_for_result()
        else:
            process = shell.run(command, cwd=cwd, stdout=stdout)

        decoded = str(process.output, encoding="utf-8")
        log(decoded.replace("\\n", "\n"), lvl=verbose)

        return True, process
    except spur.RunProcessError as e:
        log(
            "Uh oh, the teapot broke again! Error:",
            e,
            type(e),
            lvl=verbose,
            pretty=True,
        )
        log(command, e.args, e.return_code, e.output, lvl=verbose)
        if e.stderr_output not in ("", None, False):
            log("Error output:", e.stderr_output, lvl=error)
        return False, e
    except spur.NoSuchCommandError as e:
        log("Command was not found:", e, type(e), lvl=verbose, pretty=True)
        log(args)
        return False, e
Пример #28
0
import os
import base64

from nose.tools import istest, assert_equal
import spur
import tempman

from .testing import test_path

_local = spur.LocalShell()


@istest
def html_is_printed_to_stdout_if_output_file_is_not_set():
    docx_path = test_path("single-paragraph.docx")
    result = _local.run(["mammoth", docx_path])
    assert_equal(b"", result.stderr_output)
    assert_equal(b"<p>Walking on imported air</p>", result.output)


@istest
def html_is_written_to_file_if_output_file_is_set():
    with tempman.create_temp_dir() as temp_dir:
        output_path = os.path.join(temp_dir.path, "output.html")
        docx_path = test_path("single-paragraph.docx")
        result = _local.run(["mammoth", docx_path, output_path])
        assert_equal(b"", result.stderr_output)
        assert_equal(b"", result.output)
        with open(output_path) as output_file:
            assert_equal("<p>Walking on imported air</p>", output_file.read())
Пример #29
0
    def _setup_vm(self, id_):
        self.cloud_mgr.wait_for_ready(id_)

        ip = None
        pw = None

        # Sometimes the OS password is not ready on time, so retry 30 times
        for i in range(30):
            try:
                vm_info = self.cloud_mgr.get_instance(id_)
                ip = vm_info['primaryBackendIpAddress']
                pw = vm_info['operatingSystem']['passwords'][0]['password']
            except KeyError:
                sleep(10)
            else:
                break

        assert ip is not None, "Could not retrieve IP address for " + str(id_)
        assert pw is not None, "Could not retrieve password for " + str(id_)

        local_shell = spur.LocalShell()
        shell = spur.SshShell(hostname=ip, username='******', password=pw,
                              missing_host_key=spur.ssh.MissingHostKey.accept,
                              load_system_host_keys=False)

        # Configure the VM
        vm_config_script = """
            ip route replace default via {0} ; \
            yum install -y epel-release && \
            yum install -y wget pxz lbzip2 pigz rsync && \

            wget -q https://get.docker.com/ -O docker_install.sh && \
            sh docker_install.sh && \
            
            sysctl -w net.ipv4.ip_forward=1 && \
            systemctl restart network && \
            
            systemctl enable docker && \
            systemctl restart docker
        """.format(self.own_ip)

        result = self._shell_run_script(shell, vm_config_script)
        if result.return_code != 0:
            print("Error while setting up the VM", id_)
            print(result.stderr_output.decode('utf-8'))
            exit(-1)

        # Uploading the docker image on the VMs
        docker_copy_script = """
            /usr/bin/rsync --verbose --inplace -r \
            --rsh="/usr/bin/sshpass -p {0} \
            ssh -Tx -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Compression=no -l root" \
            invaders.tar.gz \
            {1}:/root/ga/
        """.format(pw, ip)

        result = self._shell_run_script(local_shell, docker_copy_script)
        if result.return_code != 0:
            print("Error while uploading docker image on VM", id_)
            print(result.stderr_output.decode('utf-8'))
            exit(-1)

        # Decompressing the docker image, loading it
        # + changing sshd settings to allow for simultaneous connections

        docker_load_script = """
            cd /root/ga ; \
            docker rm -f invaders ; \
            docker rm -f /invaders ; \
            cat invaders.tar.gz | pigz -d  | docker load ; \
            docker run -d -p 6379:6379 -e \"REDIS_PW={0}\" -e \"REDIS_IP={1}\" --name invaders invaders
            """.format(self.redis_pw, self.own_ip)

        result = self._shell_run_script(shell, docker_load_script)
        if result.return_code != 0:
            print("Error while loading docker image on VM", id_)
            print(result.stderr_output.decode('utf-8'))
            exit(-1)

        return ip, pw
Пример #30
0
def get_isomer(source,
               url,
               destination,
               upgrade=False,
               release=None,
               shell=None,
               sudo=None):
    """Grab a copy of Isomer somehow"""
    success = False
    log("Beginning get_isomer:",
        source,
        url,
        destination,
        upgrade,
        release,
        shell,
        sudo,
        lvl=debug)

    if url in ("", None) and source == "git" and not upgrade:
        abort(EXIT_ISOMER_URL_REQUIRED)

    if source in ("git", "github"):
        if not upgrade or not os.path.exists(
                os.path.join(destination, "repository")):
            log("Cloning repository from", url)
            success, result = run_process(destination,
                                          ["git", "clone", url, "repository"],
                                          shell, sudo)
            if not success:
                log(result, lvl=error)
                abort(50000)

        if upgrade:
            log("Updating repository from", url)

            if release is not None:
                log("Checking out release:", release)
                success, result = run_process(
                    os.path.join(destination, "repository"),
                    ["git", "checkout", "tags/" + release],
                    shell,
                    sudo,
                )
                if not success:
                    log(result, lvl=error)
                    abort(50000)
            else:
                log("Pulling latest")
                success, result = run_process(
                    os.path.join(destination, "repository"),
                    ["git", "pull", "origin", "master"],
                    shell,
                    sudo,
                )
                if not success:
                    log(result, lvl=error)
                    abort(50000)

        repository = os.path.join(destination, "repository")
        log("Initializing submodules")
        success, result = run_process(repository, ["git", "submodule", "init"],
                                      shell, sudo)
        if not success:
            log(result, lvl=error)
            abort(50000)

        #log("Pulling frontend")
        #success, result = run_process(
        #    os.path.join(repository, "frontend"),
        #    ["git", "pull", "origin", "master"],
        #    shell,
        #    sudo,
        #)
        #if not success:
        #    log(result, lvl=error)
        #    abort(50000)

        log("Updating frontend")
        success, result = run_process(repository,
                                      ["git", "submodule", "update"], shell,
                                      sudo)
        if not success:
            log(result, lvl=error)
            abort(50000)
    elif source == "link":
        if shell is not None:
            log(
                "Remote Linking? Are you sure? Links will be local, "
                "they cannot span over any network.",
                lvl=warn,
            )

        path = os.path.abspath(url)

        if not os.path.exists(os.path.join(destination, "repository")):
            log("Linking repository from", path)
            success, result = run_process(destination,
                                          ["ln", "-s", path, "repository"],
                                          shell, sudo)
            if not success:
                log(result, lvl=error)
                abort(50000)
        else:
            log("Repository already exists!", lvl=warn)

        if not os.path.exists(
                os.path.join(destination, "repository", "frontend", "src")):
            log("Linking frontend")
            success, result = run_process(
                destination,
                [
                    "ln", "-s",
                    os.path.join(path, "frontend"), "repository/frontend"
                ],
                shell,
                sudo,
            )
            if not success:
                log(result, lvl=error)
                abort(50000)
        else:
            log("Frontend already present")
    elif source == "copy":
        log("Copying local repository")

        path = os.path.realpath(os.path.expanduser(url))
        target = os.path.join(destination, "repository")

        if shell is None:
            shell = spur.LocalShell()
        else:
            log("Copying to remote")

        log("Copying %s to %s" % (path, target), lvl=verbose)

        shell.upload_dir(path, target, [".tox*", "node_modules*"])

        if sudo is not None:
            success, result = run_process("/", ["chown", sudo, "-R", target])
            if not success:
                log("Could not change ownership to", sudo, lvl=warn)
                abort(50000)
        return True
    else:
        log("Invalid source selected. "
            "Currently, only git, github, copy, link are supported ")

    return success