Ejemplo n.º 1
0
def perform_put(dispatcher, intent):
    """
    See :py:class:`Put`.
    """
    return Effect(
        Run(command='printf -- %s > %s' %
            (shell_quote(intent.content), shell_quote(intent.path))))
Ejemplo n.º 2
0
def perform_put(dispatcher, intent):
    """
    See :py:class:`Put`.
    """
    return Effect(Run(command='printf -- %s > %s'
                              % (shell_quote(intent.content),
                                 shell_quote(intent.path))))
Ejemplo n.º 3
0
def get_command_line(instance_type, env, message, data, mode, open,
                     tensorboard, command_str):
    """
    Return a string representing the full floyd command entered in the command line
    """
    floyd_command = ["floyd", "run"]
    floyd_command.append('--' + INSTANCE_NAME_MAP[instance_type])
    if not env == DEFAULT_ENV:
        floyd_command += ["--env", env]
    if message:
        floyd_command += ["--message", shell_quote(message)]
    if data:
        for data_item in data:
            floyd_command += ["--data", data_item]
    if tensorboard:
        floyd_command.append("--tensorboard")
    if not mode == "job":
        floyd_command += ["--mode", mode]
        if mode == 'jupyter':
            if not open:
                floyd_command.append("--no-open")
    else:
        if command_str:
            floyd_command.append(shell_quote(command_str))
    return ' '.join(floyd_command)
Ejemplo n.º 4
0
def get_command_line(instance_type, env, message, data, mode, open_notebook,
                     tensorboard, command_str):
    """
    Return a string representing the full floyd command entered in the command line
    """
    floyd_command = ["floyd", "run"]
    if instance_type:
        floyd_command.append('--' + INSTANCE_NAME_MAP[instance_type])
    if env and not env == DEFAULT_ENV:
        floyd_command += ["--env", env]
    if message:
        floyd_command += ["--message", shell_quote(message)]
    if data:
        for data_item in data:
            parts = data_item.split(':')

            if len(parts) > 1:
                data_item = normalize_data_name(
                    parts[0], use_data_config=False) + ':' + parts[1]

            floyd_command += ["--data", data_item]
    if tensorboard:
        floyd_command.append("--tensorboard")
    if mode and mode != "job":
        floyd_command += ["--mode", mode]
        if mode == 'jupyter':
            if not open_notebook:
                floyd_command.append("--no-open")
    else:
        if command_str:
            floyd_command.append(shell_quote(command_str))
    return ' '.join(floyd_command)
Ejemplo n.º 5
0
def setup_git_deploy(host_IP_address, admin_privkey_path, git_ssh_path, live_path, local_repo_path,
                     src_ref_SHA1):
    if live_path.endswith('/') or not os.path.isabs(live_path):
        bad_path = u"%s" % (live_path,)
        error_message = u"live_path is: '%s': but the path must be absolute and not end with /" % (bad_path)
        raise PathFormatError(error_message)

    q_live_path = shell_quote(live_path)
    q_update_hook_path = shell_quote('%s/.git/hooks/post-update' % (live_path,))

    run('rm -rf %s' % (q_live_path,))
    run_git('init %s' % (q_live_path,))
    write(GIT_DEPLOY_POST_UPDATE_HOOK_TEMPLATE % (live_path,), q_update_hook_path)
    run('chmod -f +x %s' % (q_update_hook_path,))

    unique_tag = tag_local_repo(host_IP_address, local_repo_path, src_ref_SHA1)
    local_git_push = ['/usr/bin/git',
                        '--git-dir=%s' % (local_repo_path,),
                        'push',
                        'website@%s:%s' % (host_IP_address, live_path),
                        '%s:%s' % (unique_tag, unique_tag)]

    env = {}
    env.update(os.environ)
    env['GIT_SSH'] = git_ssh_path
    env['PRIVATE_KEY'] = admin_privkey_path
    subprocess.check_call(local_git_push, env=env)

    q_unique_tag = shell_quote(unique_tag)
    with cd(live_path):
        run_git('checkout %s' % (q_unique_tag,))
        run_git('checkout -b %s' % (q_unique_tag,))
Ejemplo n.º 6
0
def install_key(key_spec):
    if 'url' in key_spec:
        return ('wget -qO - {} | apt-key add -'.format(shell_quote(key_spec['url'])),)

    elif 'keyid' in key_spec:
        keyserver = key_spec.get('keyserver', 'hkp://keyserver.ubuntu.com:80')
        keyid = key_spec['keyid']
        return ('apt-key adv --keyserver {} --recv {}'.format(shell_quote(keyserver), shell_quote(keyid)),)

    else:
        raise Exception('Not sure what to do with key description: {}'.format(key_spec))
Ejemplo n.º 7
0
    def cmd_push(self):
        for f_in, f_out in it.chain(
                zip(self.args["<file>"], self.args["<file>"]),
                zip(self.args["<file_in>"], self.args["<file_out>"])):
            rfolder = random_string()
            self.run_ssh_command("mkdir "+shell_quote(rfolder))
            path = osp.join(rfolder, os.path.basename(f_out))
            self.run_scp_command(f_in, path)
            self.run_ssh_command("chown :{1} {0} && chmod g+r {0}".format(
                shell_quote(path), self.www_group))

            print(self.encode_url(path))
Ejemplo n.º 8
0
def save_environment(directory, cluster, package_source):
    """
    Report environment variables describing the cluster.
    The variables are printed on standard output and also
    saved in "environment.env" file.

    :param FilePath directory: The variables are saved in this directory.
    :param Cluster cluster: The cluster.
    :param PackageSource package_source: The source of Flocker omnibus package.
    """
    environment_variables = get_trial_environment(cluster, package_source)
    environment_strings = list()
    for environment_variable in environment_variables:
        environment_strings.append(
            "export {name}={value};\n".format(
                name=environment_variable, value=shell_quote(environment_variables[environment_variable])
            )
        )
    environment = "".join(environment_strings)
    print ("The following variables describe the cluster:")
    print (environment)
    env_file = directory.child("environment.env")
    env_file.setContent(environment)
    print ("The variables are also saved in {}".format(env_file.path))
    print ("Be sure to preserve the required files.")
Ejemplo n.º 9
0
    def cmd_push(self):
        for f_in, f_out in it.chain(
            zip(self.args["<file>"], self.args["<file>"]),
            zip(self.args["<file_in>"], self.args["<file_out>"]),
        ):
            rfolder = random_string()
            self.run_ssh_command("mkdir " + shell_quote(rfolder))
            path = osp.join(rfolder, os.path.basename(f_out))
            self.run_scp_command(f_in, path)
            self.run_ssh_command(
                "chown :{1} {0} && chmod g+r {0}".format(
                    shell_quote(path), self.www_group
                )
            )

            print(self.encode_url(path))
Ejemplo n.º 10
0
def install_source(source_name, source_spec):
    for key_spec in source_spec.get('keys') or ():
        for line in install_key(key_spec):
            yield line

    for line in source_spec.get('sources') or ():
        yield 'echo {} >> /etc/apt/sources.list.d/{}.list'.format(shell_quote(line), source_name)
Ejemplo n.º 11
0
def save_environment(directory, cluster, package_source):
    """
    Report environment variables describing the cluster.
    The variables are printed on standard output and also
    saved in "environment.env" file.

    :param FilePath directory: The variables are saved in this directory.
    :param Cluster cluster: The cluster.
    :param PackageSource package_source: The source of Flocker omnibus package.
    """
    environment_variables = get_trial_environment(cluster, package_source)
    environment_strings = list()
    for environment_variable in environment_variables:
        environment_strings.append(
            "export {name}={value};\n".format(
                name=environment_variable,
                value=shell_quote(
                    environment_variables[environment_variable]
                ),
            )
        )
    environment = ''.join(environment_strings)
    print("The following variables describe the cluster:")
    print(environment)
    env_file = directory.child("environment.env")
    env_file.setContent(environment)
    print("The variables are also saved in {}".format(
        env_file.path
    ))
    print("Be sure to preserve the required files.")
Ejemplo n.º 12
0
def _ExecInDocker(container_name,
                  cmd_array,
                  workdir=None,
                  logfile=None,
                  detach=False):
    """Execute in docker container."""
    if not workdir:
        workdir = "/tmp"
    opts = ["-t", "-w", workdir]
    if detach:
        opts += ["-d"]
    # TODO(drpng): avoid quoting hell.
    base_cmd = ["exec"] + opts + [container_name]
    if logfile:
        # The logfile is in the container.
        cmd = " ".join(shell_quote(x) for x in cmd_array)
        cmd += " >& %s" % logfile
        full_cmd = base_cmd + ["bash", "-c", cmd]
    else:
        full_cmd = base_cmd + cmd_array
    ret = _RunDocker(full_cmd)
    if ret != 0:
        sys.stderr.write("Failed to exec within %s: %s" %
                         (container_name, cmd_array))
        sys.exit(ret)
Ejemplo n.º 13
0
    def run(self,
            args,
            stdin_string=None,
            env_extend=None,
            binary_output=False):
        # Allow overriding default settings. If a piece of code really wants to
        # set own PATH or CIB_file, we must allow it. I.e. it wants to run
        # a pacemaker tool on a CIB in a file but cannot afford the risk of
        # changing the CIB in the file specified by the user.
        env_vars = self._env_vars.copy()
        env_vars.update(dict(env_extend) if env_extend else dict())

        log_args = " ".join([shell_quote(x) for x in args])
        self._logger.debug(
            "Running: {args}\nEnvironment:{env_vars}{stdin_string}".format(
                args=log_args,
                stdin_string=("" if not stdin_string else (
                    "\n--Debug Input Start--\n{0}\n--Debug Input End--".format(
                        stdin_string))),
                env_vars=("" if not env_vars else ("\n" + "\n".join([
                    "  {0}={1}".format(key, val)
                    for key, val in sorted(env_vars.items())
                ])))))
        self._reporter.process(
            reports.run_external_process_started(log_args, stdin_string,
                                                 env_vars))

        try:
            process = subprocess.Popen(
                args,
                # Some commands react differently if they get anything via stdin
                stdin=(subprocess.PIPE if stdin_string is not None else None),
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=(
                    lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)),
                close_fds=True,
                shell=False,
                env=env_vars,
                # decodes newlines and in python3 also converts bytes to str
                universal_newlines=(not self._python2 and not binary_output))
            out_std, out_err = process.communicate(stdin_string)
            retval = process.returncode
        except OSError as e:
            raise LibraryError(
                reports.run_external_process_error(log_args, e.strerror))

        self._logger.debug(
            ("Finished running: {args}\nReturn value: {retval}" +
             "\n--Debug Stdout Start--\n{out_std}\n--Debug Stdout End--" +
             "\n--Debug Stderr Start--\n{out_err}\n--Debug Stderr End--"
             ).format(args=log_args,
                      retval=retval,
                      out_std=out_std,
                      out_err=out_err))
        self._reporter.process(
            reports.run_external_process_finished(log_args, retval, out_std,
                                                  out_err))
        return out_std, out_err, retval
Ejemplo n.º 14
0
    def _remote_yarsnap(self, cmd):
        if "args" in globals() and hasattr(globals()["args"], "verbosity") and globals()["args"].verbosity > 0:
            cmd += ["-"+"v"*globals()["args"].verbosity]

        cmd_call = shlex.split(self.rsh)
        if self.host[1] is not None:
            cmd_call += ["{1}@{0}".format(*self.host)]
        else:
            cmd_call += [self.host[0]]
        cmd_call += [self.rsh_yarsnap]
        cmd_call += [" ".join([shell_quote(c) for c in cmd])]

        self.logger.info("issuing remote yarsnap: %s", " ".join([shell_quote(c) for c in cmd_call]))
        try:
            return subprocess.check_output(cmd_call, stderr=sys.stderr)
        except subprocess.CalledProcessError, e:
            raise e
Ejemplo n.º 15
0
def setup_git_deploy(host_IP_address, admin_privkey_path, git_ssh_path, live_path, local_repo_gitdir,
                     src_ref_SHA1):
    if live_path.endswith('/') or not os.path.isabs(live_path):
        bad_path = u"%s" % (live_path,)
        error_message = u"live_path is: '%s': but the path must be absolute and not end with /" % (bad_path)
        raise PathFormatError(error_message)

    q_live_path = shell_quote(live_path)
    q_update_hook_path = shell_quote('%s/.git/hooks/post-update' % (live_path,))

    run('rm -rf %s' % (q_live_path,))
    run_git('init %s' % (q_live_path,))
    write(GIT_DEPLOY_POST_UPDATE_HOOK_TEMPLATE % (live_path,), q_update_hook_path)
    run('chmod -f +x %s' % (q_update_hook_path,))

    tag_push_checkout(local_repo_gitdir, src_ref_SHA1, host_IP_address, live_path, git_ssh_path,
                      admin_privkey_path)
Ejemplo n.º 16
0
    def run(
        self, args, stdin_string=None, env_extend=None, binary_output=False
    ):
        #Reset environment variables by empty dict is desired here.  We need to
        #get rid of defaults - we do not know the context and environment of the
        #library.  So executable must be specified with full path.
        env_vars = dict(env_extend) if env_extend else dict()
        env_vars.update(self._env_vars)

        log_args = " ".join([shell_quote(x) for x in args])
        msg = "Running: {args}"
        if stdin_string:
            msg += "\n--Debug Input Start--\n{stdin}\n--Debug Input End--"
        self._logger.debug(msg.format(args=log_args, stdin=stdin_string))
        self._reporter.process(
            reports.run_external_process_started(log_args, stdin_string)
        )

        try:
            process = subprocess.Popen(
                args,
                # Some commands react differently if they get anything via stdin
                stdin=(subprocess.PIPE if stdin_string is not None else None),
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=(
                    lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
                ),
                close_fds=True,
                shell=False,
                env=env_vars,
                # decodes newlines and in python3 also converts bytes to str
                universal_newlines=(not self._python2 and not binary_output)
            )
            out_std, out_err = process.communicate(stdin_string)
            retval = process.returncode
        except OSError as e:
            raise LibraryError(
                reports.run_external_process_error(log_args, e.strerror)
            )

        self._logger.debug(
            (
                "Finished running: {args}\nReturn value: {retval}"
                + "\n--Debug Stdout Start--\n{out_std}\n--Debug Stdout End--"
                + "\n--Debug Stderr Start--\n{out_err}\n--Debug Stderr End--"
            ).format(
                args=log_args,
                retval=retval,
                out_std=out_std,
                out_err=out_err
            )
        )
        self._reporter.process(reports.run_external_process_finished(
            log_args, retval, out_std, out_err
        ))
        return out_std, out_err, retval
Ejemplo n.º 17
0
 def command(self, conf, args):
     """Uses a shell function so that arguments can be used in the command
     as shell arguments.
     """
     subprocess.call(
         'pubs_alias_fun () {{\n{}\n}}\npubs_alias_fun {}'.format(
             self.definition,
             ' '.join([shell_quote(a) for a in args.arguments])
         ), shell=True)
Ejemplo n.º 18
0
Archivo: alias.py Proyecto: pubs/pubs
 def command(self, conf, args):
     """Uses a shell function so that arguments can be used in the command
     as shell arguments.
     """
     subprocess.call(
         'pubs_alias_fun () {{\n{}\n}}\npubs_alias_fun {}'.format(
             self.definition,
             ' '.join([shell_quote(a) for a in args.arguments])
         ), shell=True)
Ejemplo n.º 19
0
    def vset(self, variable_name, value):
        """Variable set front-end method. If the type of the value is str,
             then --format=string will be used. Otherwise the value will be
             converted to json and --format=json will be used.

        Arguments:
        variable_name -- str, variable name to set
        value         -- Any type that is JSON-encodable or str.
        """
        if type(value) is str:
            format = 'string'
        else:
            format = 'json'
            value = json.dumps(value)

        args = (format, shell_quote(variable_name), shell_quote(value))

        return self.command('vset --exact -y --format=%s %s %s' % args)
Ejemplo n.º 20
0
    def _issue_rsync(self, params):
        rsync_call = ["rsync"] + params

        self.logger.info("issuing rsync: %s", " ".join([shell_quote(s) for s in rsync_call]))

        print >>sys.stderr, ""
        ret = subprocess.call(rsync_call, stdout=sys.stderr, stderr=sys.stderr)
        print >>sys.stderr, ""

        if ret != 0:
            raise Exception("rsync failed")
Ejemplo n.º 21
0
    def publish(self, log_rec):  # deploy?
        #self._load_custom_mod()
        #self._call_custom_hook('pre_publish')
        prod_config = self.get_config('prod')
        rsync_cmd = prod_config.get('rsync_cmd', 'rsync')
        if not rsync_cmd.isalpha():
            rsync_cmd = shell_quote(rsync_cmd)
        # TODO: add -e 'ssh -o "NumberOfPasswordPrompts 0"' to fail if
        # ssh keys haven't been set up.
        rsync_flags = prod_config.get('rsync_flags', 'avzPk')
        local_site_path = self.output_path
        if not local_site_path.endswith('/'):
            local_site_path += '/'  # not just cosmetic; rsync needs this
        assert os.path.exists(local_site_path + 'index.html')
        remote_host = prod_config['remote_host']
        remote_user = prod_config['remote_user']
        remote_path = prod_config['remote_path']
        remote_slug = "%s@%s:'%s'" % (remote_user,
                                      remote_host,
                                      shell_quote(remote_path))

        full_rsync_cmd = '%s -%s %s %s' % (rsync_cmd,
                                           rsync_flags,
                                           local_site_path,
                                           remote_slug)
        log_rec['rsync_cmd'] = full_rsync_cmd
        print 'Executing', full_rsync_cmd
        try:
            rsync_output = subprocess.check_output(full_rsync_cmd, shell=True)
        except subprocess.CalledProcessError as cpe:
            log_rec['rsync_exit_code'] = cpe.returncode
            rsync_output = cpe.output
            print rsync_output
            log_rec.failure('publish failed: rsync got exit code {rsync_exit_code}')
            return False
        else:
            print rsync_output
            log_rec.success()
        return True
Ejemplo n.º 22
0
    def run(self,
            args,
            stdin_string=None,
            env_extend=None,
            binary_output=False):
        #Reset environment variables by empty dict is desired here.  We need to
        #get rid of defaults - we do not know the context and environment of the
        #library.  So executable must be specified with full path.
        env_vars = dict(env_extend) if env_extend else dict()
        env_vars.update(self._env_vars)

        log_args = " ".join([shell_quote(x) for x in args])
        msg = "Running: {args}"
        if stdin_string:
            msg += "\n--Debug Input Start--\n{stdin}\n--Debug Input End--"
        self._logger.debug(msg.format(args=log_args, stdin=stdin_string))
        self._reporter.process(
            reports.run_external_process_started(log_args, stdin_string))

        try:
            process = subprocess.Popen(
                args,
                # Some commands react differently if they get anything via stdin
                stdin=(subprocess.PIPE if stdin_string is not None else None),
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=(
                    lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)),
                close_fds=True,
                shell=False,
                env=env_vars,
                # decodes newlines and in python3 also converts bytes to str
                universal_newlines=(not self._python2 and not binary_output))
            out_std, out_err = process.communicate(stdin_string)
            retval = process.returncode
        except OSError as e:
            raise LibraryError(
                reports.run_external_process_error(log_args, e.strerror))

        self._logger.debug(
            ("Finished running: {args}\nReturn value: {retval}" +
             "\n--Debug Stdout Start--\n{out_std}\n--Debug Stdout End--" +
             "\n--Debug Stderr Start--\n{out_err}\n--Debug Stderr End--"
             ).format(args=log_args,
                      retval=retval,
                      out_std=out_std,
                      out_err=out_err))
        self._reporter.process(
            reports.run_external_process_finished(log_args, retval, out_std,
                                                  out_err))
        return out_std, out_err, retval
Ejemplo n.º 23
0
    def run(
        self, args, ignore_stderr=False, stdin_string=None, env_extend=None,
        binary_output=False
    ):
        env_vars = dict(env_extend) if env_extend else dict()
        env_vars.update(self._env_vars)

        log_args = " ".join([shell_quote(x) for x in args])
        msg = "Running: {args}"
        if stdin_string:
            msg += "\n--Debug Input Start--\n{stdin}\n--Debug Input End--"
        self._logger.debug(msg.format(args=log_args, stdin=stdin_string))
        self._reporter.process(
            reports.run_external_process_started(log_args, stdin_string)
        )

        try:
            process = subprocess.Popen(
                args,
                # Some commands react differently if they get anything via stdin
                stdin=(subprocess.PIPE if stdin_string is not None else None),
                stdout=subprocess.PIPE,
                stderr=(
                    subprocess.PIPE if ignore_stderr else subprocess.STDOUT
                ),
                preexec_fn=(
                    lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
                ),
                close_fds=True,
                shell=False,
                env=env_vars,
                # decodes newlines and in python3 also converts bytes to str
                universal_newlines=(not self._python2 and not binary_output)
            )
            output, dummy_stderror = process.communicate(stdin_string)
            retval = process.returncode
        except OSError as e:
            raise LibraryError(
                reports.run_external_process_error(log_args, e.strerror)
            )

        self._logger.debug(
            (
                "Finished running: {args}\nReturn value: {retval}"
                + "\n--Debug Output Start--\n{output}\n--Debug Output End--"
            ).format(args=log_args, retval=retval, output=output)
        )
        self._reporter.process(
            reports.run_external_process_finished(log_args, retval, output)
        )
        return output, retval
Ejemplo n.º 24
0
Archivo: core.py Proyecto: ra2003/chert
    def publish(self, log_rec):  # deploy?
        #self._load_custom_mod()
        #self._call_custom_hook('pre_publish')
        prod_config = self.get_config('prod')
        rsync_cmd = prod_config.get('rsync_cmd', 'rsync')
        if not rsync_cmd.isalpha():
            rsync_cmd = shell_quote(rsync_cmd)
        # TODO: add -e 'ssh -o "NumberOfPasswordPrompts 0"' to fail if
        # ssh keys haven't been set up.
        rsync_flags = prod_config.get('rsync_flags', 'avzPk')
        local_site_path = self.output_path
        if not local_site_path.endswith('/'):
            local_site_path += '/'  # not just cosmetic; rsync needs this
        assert os.path.exists(local_site_path + 'index.html')
        remote_host = prod_config['remote_host']
        remote_user = prod_config['remote_user']
        remote_path = prod_config['remote_path']
        remote_slug = "%s@%s:'%s'" % (remote_user, remote_host,
                                      shell_quote(remote_path))

        full_rsync_cmd = '%s -%s %s %s' % (rsync_cmd, rsync_flags,
                                           local_site_path, remote_slug)
        log_rec['rsync_cmd'] = full_rsync_cmd
        print 'Executing', full_rsync_cmd
        try:
            rsync_output = subprocess.check_output(full_rsync_cmd, shell=True)
        except subprocess.CalledProcessError as cpe:
            log_rec['rsync_exit_code'] = cpe.returncode
            rsync_output = cpe.output
            print rsync_output
            log_rec.failure(
                'publish failed: rsync got exit code {rsync_exit_code}')
            return False
        else:
            print rsync_output
            log_rec.success()
        return True
Ejemplo n.º 25
0
def _shell_join(seq):
    """
    Convert a nested list of strings to a shell command.

    Each string in the list is escaped as necessary to allow it to be
    passed to a shell as a single word. If an item is a list, it is a
    nested command, which will be escaped first, and then added as a
    single word to the top-level command.

    For example, ['su', 'root', '-c', ['apt-get', 'update']] becomes
    "su root -c 'apt-get update'".
    """
    result = []
    for word in seq:
        if isinstance(word, (tuple, MutableSequence)):
            word = _shell_join(word)
        escaped = shell_quote(word)
        result.append(escaped)
    return ' '.join(result)
Ejemplo n.º 26
0
def _shell_join(seq):
    """
    Convert a nested list of strings to a shell command.

    Each string in the list is escaped as necessary to allow it to be
    passed to a shell as a single word. If an item is a list, it is a
    nested command, which will be escaped first, and then added as a
    single word to the top-level command.

    For example, ['su', 'root', '-c', ['apt-get', 'update']] becomes
    "su root -c 'apt-get update'".
    """
    result = []
    for word in seq:
        if isinstance(word, (tuple, MutableSequence)):
            word = _shell_join(word)
        escaped = shell_quote(word)
        result.append(escaped)
    return ' '.join(result)
Ejemplo n.º 27
0
def tag_push_checkout(local_repo_gitdir, src_ref_SHA1, host_IP_address, live_path, git_ssh_path,
                             admin_privkey_path):
    unique_tag = tag_local_repo(host_IP_address, local_repo_gitdir, src_ref_SHA1)
    local_git_push = ['/usr/bin/git',
                      '--git-dir=%s' % (local_repo_gitdir,),
                      'push',
                      'website@%s:%s' % (host_IP_address, live_path),
                      '%s:%s' % (unique_tag, unique_tag)]

    env = {}
    env.update(os.environ)
    env['GIT_SSH'] = git_ssh_path
    env['PRIVATE_KEY'] = admin_privkey_path
    subprocess.check_call(local_git_push, env=env)

    q_unique_tag = shell_quote(unique_tag)
    with cd(live_path):
        run_git('checkout %s' % (q_unique_tag,))
        run_git('checkout -b %s' % (q_unique_tag,))
Ejemplo n.º 28
0
 def create_put_command(content, path):
     return 'printf -- %s > %s' % (shell_quote(content), shell_quote(path))
Ejemplo n.º 29
0
def tag_local_repo(host_IP_address, local_repo, src_ref_SHA1):
    unique_tag_name = make_unique_tag_name(host_IP_address, src_ref_SHA1)
    command_string = ('/usr/bin/git --git-dir=%s tag %s %s'
                      % (shell_quote(local_repo), shell_quote(unique_tag_name), shell_quote(src_ref_SHA1)))
    subprocess.check_call(command_string.split())
    return unique_tag_name
Ejemplo n.º 30
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    cluster = None
    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)

        if options['distribution'] in ('centos-7',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                capture_journal(reactor, node.address, remote_logs_file)

        if not options["no-pull"]:
            yield perform(
                make_dispatcher(reactor),
                parallel([
                    run_remotely(
                        username='******',
                        address=node.address,
                        commands=task_pull_docker_images()
                    ) for node in cluster.agent_nodes
                ]),
            )

        result = yield run_tests(
            reactor=reactor,
            cluster=cluster,
            trial_args=options['trial-args'])
    except:
        result = 1
        raise
    finally:
        # Unless the tests failed, and the user asked to keep the nodes, we
        # delete them.
        if not options['keep']:
            runner.stop_cluster(reactor)
        else:
            print "--keep specified, not destroying nodes."
            if cluster is None:
                print ("Didn't finish creating the cluster.")
            else:
                print ("To run acceptance tests against these nodes, "
                       "set the following environment variables: ")

                environment_variables = get_trial_environment(cluster)

                for environment_variable in environment_variables:
                    print "export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    )

    raise SystemExit(result)
Ejemplo n.º 31
0
    def startDatabase(self):
        """
        Start the database and initialize the subservice.
        """
        def createConnection():
            try:
                createDatabaseConn = self.produceConnection(
                    "schema creation", "postgres")
            except postgres.DatabaseError as e:
                log.error(
                    "Unable to connect to database for schema creation:"
                    " {error}",
                    error=e)
                raise

            createDatabaseCursor = createDatabaseConn.cursor()

            if postgres.__name__ == "pg8000":
                createDatabaseConn.realConnection.autocommit = True
            elif postgres.__name__ == "pgdb":
                createDatabaseCursor.execute("commit")
            else:
                raise InternalDataStoreError(
                    "Unknown Postgres DBM module: {}".format(postgres))

            return createDatabaseConn, createDatabaseCursor

        monitor = PostgresMonitor(self)
        # check consistency of initdb and postgres?

        options = []
        options.append("-c listen_addresses={}".format(
            shell_quote(",".join(self.listenAddresses))))
        if self.socketDir:
            options.append("-c unix_socket_directories={}".format(
                shell_quote(self.socketDir.path)))
        if self.port:
            options.append("-c port={}".format(shell_quote(self.port)))
        options.append("-c shared_buffers={:d}".format(
            self.sharedBuffers)  # int: don't quote
                       )
        options.append("-c max_connections={:d}".format(
            self.maxConnections)  # int: don't quote
                       )
        options.append("-c standard_conforming_strings=on")
        options.append("-c unix_socket_permissions=0770")
        options.extend(self.options)
        if self.logDirectory:  # tell postgres to rotate logs
            options.append("-c log_directory={}".format(
                shell_quote(self.logDirectory)))
            options.append("-c log_truncate_on_rotation=on")
            options.append("-c log_filename=postgresql_%w.log")
            options.append("-c log_rotation_age=1440")
            options.append("-c logging_collector=on")

        options.append("-c log_line_prefix=%t")
        if self.testMode:
            options.append("-c log_statement=all")

        args = [
            self._pgCtl,
            "start",
            "--log={}".format(self.logFile),
            "--timeout=86400",  # Plenty of time for a long cluster upgrade
            "-w",  # Wait for startup to complete
            "-o",
            " ".join(options),  # Options passed to postgres
        ]

        log.info("Requesting postgres start via: {args}", args=args)
        self.reactor.spawnProcess(
            monitor,
            self._pgCtl,
            args,
            env=self.env,
            path=self.workingDir.path,
            uid=self.uid,
            gid=self.gid,
        )
        self.monitor = monitor

        def gotStatus(result):
            """
            Grab the postgres pid from the pgCtl status call in case we need
            to kill it directly later on in hardStop().  Useful in conjunction
            with the DataStoreMonitor so we can shut down if DataRoot has been
            removed/renamed/unmounted.
            """
            reResult = re.search("PID: (\d+)\D", result)
            if reResult is not None:
                self._postgresPid = int(reResult.group(1))
            self.ready(*createConnection())
            self.deactivateDelayedShutdown()

        def gotReady(result):
            """
            We started postgres; we're responsible for stopping it later.
            Call pgCtl status to get the pid.
            """
            log.info("{cmd} exited", cmd=self._pgCtl)
            self.shouldStopDatabase = True
            d = Deferred()
            statusMonitor = CapturingProcessProtocol(d, None)
            self.reactor.spawnProcess(
                statusMonitor,
                self._pgCtl,
                [self._pgCtl, "status"],
                env=self.env,
                path=self.workingDir.path,
                uid=self.uid,
                gid=self.gid,
            )
            return d.addCallback(gotStatus)

        def couldNotStart(f):
            """
            There was an error trying to start postgres.  Try to connect
            because it might already be running.  In this case, we won't
            be the one to stop it.
            """
            d = Deferred()
            statusMonitor = CapturingProcessProtocol(d, None)
            self.reactor.spawnProcess(
                statusMonitor,
                self._pgCtl,
                [self._pgCtl, "status"],
                env=self.env,
                path=self.workingDir.path,
                uid=self.uid,
                gid=self.gid,
            )
            return d.addCallback(gotStatus).addErrback(giveUp)

        def giveUp(f):
            """
            We can't start postgres or connect to a running instance.  Shut
            down.
            """
            log.critical("Can't start or connect to postgres: {failure.value}",
                         failure=f)
            self.deactivateDelayedShutdown()
            self.reactor.stop()

        self.monitor.completionDeferred.addCallback(gotReady).addErrback(
            couldNotStart)
Ejemplo n.º 32
0
 def run_ssh_command(self, command):
     logging.debug(command)
     return self.ssh_exec(
         self.ssh_args
         + ["cd {0} && {1}".format(shell_quote(self.basefolder), command)]
     )
Ejemplo n.º 33
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_file = open("%s.log" % base_path.basename(), "a")
    log_writer = eliot_logging_service(log_file=log_file,
                                       reactor=reactor,
                                       capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService)

    cluster = None
    try:
        cluster = yield runner.start_cluster(reactor)

        if options['distribution'] in ('centos-7', ):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                capture_journal(reactor, node.address, remote_logs_file)

        if not options["no-pull"]:
            yield perform(
                make_dispatcher(reactor),
                parallel([
                    run_remotely(username='******',
                                 address=node.address,
                                 commands=task_pull_docker_images())
                    for node in cluster.agent_nodes
                ]),
            )

        result = yield run_tests(reactor=reactor,
                                 cluster=cluster,
                                 trial_args=options['trial-args'])
    except:
        result = 1
        raise
    finally:
        # Unless the tests failed, and the user asked to keep the nodes, we
        # delete them.
        if not options['keep']:
            runner.stop_cluster(reactor)
        else:
            print "--keep specified, not destroying nodes."
            if cluster is None:
                print("Didn't finish creating the cluster.")
            else:
                print(
                    "To run acceptance tests against these nodes, "
                    "set the following environment variables: ")

                environment_variables = get_trial_environment(cluster)

                for environment_variable in environment_variables:
                    print "export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    )

    raise SystemExit(result)
Ejemplo n.º 34
0
def cmd_preprocess(args):

    proxy = get_apt_proxy()

    with open(args.input_file, 'r') as f:
        data = yaml.load(f.read()) or {}

    output = [
        '#!/usr/bin/env bash',
        '',
        'set -e',
        'export DEBIAN_FRONTEND=noninteractive',
        '',
    ]

    if proxy:
        output.append('echo {} > /etc/apt/apt.conf.d/80proxy'.format(shell_quote(
            'Acquire::http::Proxy "{}";'.format(proxy))))

    # TODO: We really only need wget if we're going to have to fetch a key.
    output.append('apt-get update')  # Must update first; otherwise, there are no package lists.
    output.extend(apt_install(('wget',)))

    for wkk_name, wkk_spec in (data.get('well_known_keys') or {}).items():
        if not wkk_spec:
            continue
        if wkk_name == 'debian-archive-keyring':
            output.extend(apt_install(('debian-archive-keyring',)))
            output.append('cp -a /usr/share/keyrings/debian-archive-keyring.gpg /etc/apt/trusted.gpg.d/')
        else:
            raise ValueError('Unrecognized well-known key name: {}'.format(wkk_name))
    
    for key_spec in data.get('keys') or ():
        output.extend(install_key(key_spec))
    for source_name, source_spec in (data.get('sources') or {}).items():
        output.extend(install_source(source_name, source_spec))
    for ppa_spec in data.get('ppas') or ():
        output.extend(install_ppa(ppa_spec))

    output.extend(('apt-get update', 'apt-get dist-upgrade -yq'))

    output.extend(apt_install(data.get('packages') or ()))

    output.extend(dpkg_install(data.get('binary_packages') or ()))
    
    output.extend((
        'apt-get autoremove -y',
        'apt-get clean',
        'rm -rf /var/lib/apt/lists/*',
        ))

    if proxy:
        output.append('rm /etc/apt/apt.conf.d/80proxy')

    output = '\n'.join(output)
    if not args.output_file:
        print(output)
    else:
        skip_write = False
        if os.path.exists(args.output_file):
            with open(args.output_file, 'r') as f:
                existing_output = f.read()
            if existing_output == output:
                print('{}: Output would be unchanged; not modifying the output file!'.format(sys.argv[0]))
                skip_write = True
        if not skip_write:
            with open(args.output_file, 'w') as f:
                f.write(output)
Ejemplo n.º 35
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    cluster = None
    results = []
    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)
        if options['distribution'] in ('centos-7',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_journal(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_upstart(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        gather_deferreds(results)

        if options['apps-per-node'] > 0:
            config = _build_config(cluster, options['template'],
                                   options['apps-per-node'])
            yield _configure(reactor, cluster, config)

        result = 0

    except BaseException:
        result = 1
        raise
    finally:
        if options['no-keep'] or result == 1:
            runner.stop_cluster(reactor)
        else:
            if cluster is None:
                print("Didn't finish creating the cluster.")
                runner.stop_cluster(reactor)
            else:
                print("The following variables describe the cluster:")
                environment_variables = get_trial_environment(cluster)
                for environment_variable in environment_variables:
                    print("export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    ))
                print("Be sure to preserve the required files.")

    raise SystemExit(result)
Ejemplo n.º 36
0
 def create_put_command(content, path):
     # Escape printf format markers
     content = content.replace('\\', '\\\\').replace('%', '%%')
     return 'printf -- %s > %s' % (shell_quote(content), shell_quote(path))
Ejemplo n.º 37
0
 def command(self, conf, args):
     """Execute a git command in the pubs directory"""
     self.shell(' '.join([shell_quote(a) for a in args.arguments]),
                command=True)
Ejemplo n.º 38
0
def get_command_str():
    return ' '.join([sys.executable] + [shell_quote(v) for v in sys.argv])
Ejemplo n.º 39
0
 def create_put_command(content, path):
     return 'printf -- %s > %s' % (shell_quote(content), shell_quote(path))
Ejemplo n.º 40
0
    def startDatabase(self):
        """
        Start the database and initialize the subservice.
        """

        def createConnection():
            createDatabaseConn = self.produceConnection(
                "schema creation", "postgres"
            )
            createDatabaseCursor = createDatabaseConn.cursor()
            createDatabaseCursor.execute("commit")
            return createDatabaseConn, createDatabaseCursor

        monitor = _PostgresMonitor(self)
        pgCtl = self.pgCtl()
        # check consistency of initdb and postgres?

        options = []
        options.append(
            "-c listen_addresses={}"
            .format(shell_quote(",".join(self.listenAddresses)))
        )
        if self.socketDir:
            options.append(
                "-k {}"
                .format(shell_quote(self.socketDir.path))
            )
        if self.port:
            options.append(
                "-c port={}".format(shell_quote(self.port))
            )
        options.append(
            "-c shared_buffers={:d}"
            .format(self.sharedBuffers)  # int: don't quote
        )
        options.append(
            "-c max_connections={:d}"
            .format(self.maxConnections)  # int: don't quote
        )
        options.append("-c standard_conforming_strings=on")
        options.append("-c unix_socket_permissions=0770")
        options.extend(self.options)
        if self.logDirectory:  # tell postgres to rotate logs
            options.append(
                "-c log_directory={}".format(shell_quote(self.logDirectory))
            )
            options.append("-c log_truncate_on_rotation=on")
            options.append("-c log_filename=postgresql_%w.log")
            options.append("-c log_rotation_age=1440")
            options.append("-c logging_collector=on")

        log.warn(
            "Requesting postgres start via {cmd} {opts}",
            cmd=pgCtl, opts=options
        )
        self.reactor.spawnProcess(
            monitor, pgCtl,
            [
                pgCtl,
                "start",
                "-l", self.logFile,
                "-t 86400",  # Give plenty of time for a long cluster upgrade
                "-w",
                # XXX what are the quoting rules for '-o'?  do I need to repr()
                # the path here?
                "-o",
                " ".join(options),
            ],
            env=self.env, path=self.workingDir.path,
            uid=self.uid, gid=self.gid,
        )
        self.monitor = monitor

        def gotStatus(result):
            """
            Grab the postgres pid from the pgCtl status call in case we need
            to kill it directly later on in hardStop().  Useful in conjunction
            with the DataStoreMonitor so we can shut down if DataRoot has been
            removed/renamed/unmounted.
            """
            reResult = re.search("PID: (\d+)\D", result)
            if reResult is not None:
                self._postgresPid = int(reResult.group(1))
            self.ready(*createConnection())
            self.deactivateDelayedShutdown()

        def gotReady(result):
            """
            We started postgres; we're responsible for stopping it later.
            Call pgCtl status to get the pid.
            """
            log.warn("{cmd} exited", cmd=pgCtl)
            self.shouldStopDatabase = True
            d = Deferred()
            statusMonitor = CapturingProcessProtocol(d, None)
            self.reactor.spawnProcess(
                statusMonitor, pgCtl, [pgCtl, "status"],
                env=self.env, path=self.workingDir.path,
                uid=self.uid, gid=self.gid,
            )
            return d.addCallback(gotStatus)

        def couldNotStart(f):
            """
            There was an error trying to start postgres.  Try to connect
            because it might already be running.  In this case, we won't
            be the one to stop it.
            """
            d = Deferred()
            statusMonitor = CapturingProcessProtocol(d, None)
            self.reactor.spawnProcess(
                statusMonitor, pgCtl, [pgCtl, "status"],
                env=self.env, path=self.workingDir.path,
                uid=self.uid, gid=self.gid,
            )
            return d.addCallback(gotStatus).addErrback(giveUp)

        def giveUp(f):
            """
            We can't start postgres or connect to a running instance.  Shut
            down.
            """
            log.failure("Can't start or connect to postgres", f)
            self.deactivateDelayedShutdown()
            self.reactor.stop()

        self.monitor.completionDeferred.addCallback(
            gotReady).addErrback(couldNotStart)
Ejemplo n.º 41
0
 def put_bytes(self, in_data, out_path):
     if in_data:
         put_cmd = "cat - > %s" % shell_quote(out_path)
     else:
         put_cmd = "cat /dev/null > %s" % shell_quote(out_path)
     self.exec_command(put_cmd, in_data=in_data, sudoable=False)
Ejemplo n.º 42
0
def decide_dmg_action(curr_instance_path, default_app_folder_path, give_us_permissions_cb, kill_other_instances = None, skip_mount_point_check = False):
    TRACE('starting decide_dmg_action %s, %s, %s, %s', curr_instance_path, default_app_folder_path, give_us_permissions_cb, kill_other_instances)
    curr_instance_path = curr_instance_path.rstrip('/')
    if not skip_mount_point_check:
        if not is_on_installer(curr_instance_path):
            return
    prev_installation = find_previous_installation()
    if prev_installation and not is_on_installer(prev_installation):
        app_folder_path = prev_installation
    else:
        app_folder_path = default_app_folder_path
    TRACE('find_previous_installation() returned %r, installing into %r', prev_installation, app_folder_path)
    app_target = os.path.join(app_folder_path, u'%s.app' % (BUILD_KEY,))
    launch_exe = os.path.join(app_target, u'Contents', u'MacOS', BUILD_KEY)
    launch_args = [launch_exe, u'/firstrunupdatemanual' if os.path.exists(app_target) else u'/firstrun']
    if kill_other_instances:
        kill_other_instances()
    did_exist_previously = os.path.exists(app_target)
    handle, temp_file_path = tempfile.mkstemp()
    try:
        os.close(handle)
    except Exception:
        unhandled_exc_handler()

    noqtn = '--noqtn' if MAC_VERSION >= LEOPARD else ''
    try:
        with open(temp_file_path, 'w') as temp_file:
            script = u'#!/bin/bash\n/bin/rm -rf %(app_target)s\n[ ! -e %(app_target)s ] && /usr/bin/ditto %(noqtn)s %(curr_instance_path)s %(app_target)s\n'
            script %= dict(app_target=shell_quote(app_target), curr_instance_path=shell_quote(curr_instance_path), noqtn=noqtn)
            script = script.encode('utf-8')
            temp_file.write(script)
        success = False
        try:
            proc = subprocess.Popen(['/bin/sh', temp_file_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
            stdout, stderr = proc.communicate()
            returncode = proc.returncode
            if stdout:
                TRACE('installer script stdout: %s' % (stdout,))
            if stderr:
                TRACE('installer script stderr: %s' % (stderr,))
            proc.stdout.close()
            proc.stderr.close()
            TRACE('installer script returncode: %s', returncode)
            success = _plist_identical(curr_instance_path, app_target)
        except OSError as e:
            if did_exist_previously and not os.path.exists(app_target):
                report_bad_assumption('In first pass, double-click install deleted old installation at %s but failed to install new one', app_target)
            if e.errno != errno.EACCES:
                raise

        if not success:
            try:
                if not give_us_permissions_cb:
                    return
                safe_activate_translation()
                msg = trans(u'Please enter your computer admin password for Dropbox to finish installing.')
                retval, output = give_us_permissions_cb('/bin/sh', [temp_file_path], msg + u'\n\n')
                if output:
                    TRACE('installer script with elevated permissions output: %s', output)
                TRACE('installer script with elevated permissions returned %s', retval)
                if not retval:
                    return
            except Exception:
                unhandled_exc_handler()
                return

    finally:
        try:
            os.unlink(temp_file_path)
        except Exception:
            unhandled_exc_handler()

    pool = NSAutoreleasePool.alloc().init()
    try:
        ws = NSWorkspace.sharedWorkspace()
        ws.noteFileSystemChanged_(app_target)
    except Exception:
        pass
    finally:
        del pool

    return launch_args
Ejemplo n.º 43
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(destination=FileDestination(
        file=open("%s.log" % (base_path.basename(), ), "a")),
                                       reactor=reactor,
                                       capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService)

    cluster = None
    results = []
    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)
        if options['distribution'] in ('centos-7', ):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(
                    capture_journal(reactor, node.address, remote_logs_file))
        elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(
                    capture_upstart(reactor, node.address, remote_logs_file))
        gather_deferreds(results)

        if options['apps-per-node'] > 0:
            config = _build_config(cluster, options['template'],
                                   options['apps-per-node'])
            yield _configure(reactor, cluster, config)

        result = 0

    except BaseException:
        result = 1
        raise
    finally:
        if options['no-keep'] or result == 1:
            runner.stop_cluster(reactor)
        else:
            if cluster is None:
                print("Didn't finish creating the cluster.")
                runner.stop_cluster(reactor)
            else:
                print("The following variables describe the cluster:")
                environment_variables = get_trial_environment(cluster)
                for environment_variable in environment_variables:
                    print("export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    ))
                print("Be sure to preserve the required files.")

    raise SystemExit(result)
Ejemplo n.º 44
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    cluster = None
    results = []

    setup_succeeded = False
    reached_finally = False

    def cluster_cleanup():
        if not reached_finally:
            print "interrupted..."
        print "stopping cluster"
        return runner.stop_cluster(reactor)

    cleanup_trigger_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                                       cluster_cleanup)

    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)
        if options['distribution'] in ('centos-7',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_journal(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        elif options['distribution'] in ('ubuntu-14.04',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_upstart(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        gather_deferreds(results)

        if not options["no-pull"]:
            yield perform(
                make_dispatcher(reactor),
                parallel([
                    run_remotely(
                        username='******',
                        address=node.address,
                        commands=task_pull_docker_images()
                    ) for node in cluster.agent_nodes
                ]),
            )

        setup_succeeded = True
        result = yield run_tests(
            reactor=reactor,
            cluster=cluster,
            trial_args=options['trial-args'])

    finally:
        reached_finally = True
        # We delete the nodes if the user hasn't asked to keep them
        # or if we failed to provision the cluster.
        if not setup_succeeded:
            print "cluster provisioning failed"
        elif not options['keep']:
            print "not keeping cluster"
        else:
            print "--keep specified, not destroying nodes."
            print ("To run acceptance tests against these nodes, "
                   "set the following environment variables: ")

            environment_variables = get_trial_environment(cluster)

            for environment_variable in environment_variables:
                print "export {name}={value};".format(
                    name=environment_variable,
                    value=shell_quote(
                        environment_variables[environment_variable]),
                )
            reactor.removeSystemEventTrigger(cleanup_trigger_id)

    raise SystemExit(result)
Ejemplo n.º 45
0
def shell_word(s):
    if s != shell_quote(s):
        raise ValueError('not a word: %r' % s)
    return s
Ejemplo n.º 46
0
    def remove_remote(self, path):
        folder, file = osp.split(path)

        self.run_ssh_command("rm " + shell_quote(path))
        self.run_ssh_command("rmdir " + shell_quote(folder))
Ejemplo n.º 47
0
    def install(self):
        """Installer"""
        logger = logging.getLogger(self.name)
        options = self.options
        parts = self.buildout['buildout']['parts-directory']

        name = 'buildout-node'
        node_dir = os.path.join(parts, self.name)
        if not os.path.isdir(node_dir):
            os.makedirs(node_dir)

        node_binary = self.get_binary(options)

        if node_binary is None:
            args = {}
            if 'url' not in options:
                args = dict(
                    v=self.get_version(options),
                    a='x86_64' in os.uname() and 'x64' or 'x86',
                )
                if sys.platform.startswith('linux'):
                    args['p'] = 'linux'
                elif sys.platform == 'darwin':
                    args['p'] = 'darwin'

            if 'p' in args:
                binary_url = options.get('binary-url', self.binary_format)
                options['url'] = url = binary_url.format(**args)
                logger.info('Using binary distribution at %s', url)

                from zc.buildout.download import Download
                from archive import extract

                # Use the buildout download infrastructure
                manager = Download(options=self.buildout['buildout'],
                                   offline=self.buildout['buildout'].get('offline') == 'true')

                # The buildout download utility expects us to know whether or
                # not we have a download cache, which causes fun errors.  This
                # is probably a bug, but this test should be safe regardless.
                if manager.download_cache:
                    filename = manager.download_cached(url)[0]
                else:
                    filename = manager.download(url)[0]

                destination = self.get_node_directory(options)

                # Finally, extract the archive.  The binary distribution urls
                # are defined in this file, so we can safely assume they're
                # gzipped tarballs.  This prevents an error when downloaded
                # into a temporary file.
                extract(filename, destination, ext=".tar.gz")

            else:
                if 'url' not in options:
                    options['url'] = url = self.source_format.format(**args)
                logger.info('Using source distribution at %s', options['url'])
                import zc.recipe.cmmi
                options['environment'] = (
                    'PYTHONPATH=tools:deps/v8/tools:../../deps/v8/tools'
                )

                node = zc.recipe.cmmi.Recipe(
                    self.buildout, name, options)
                node.install()

            node_binary = self.get_binary(options)

        node_bin = os.path.dirname(node_binary)

        npms = options.get('npms', '')
        if npms:
            npms = ' '.join([npm.strip() for npm in npms.split()
                             if npm.strip()])
            cmd_data = {'node_dir': shell_quote(node_dir),
                        'node_bin': shell_quote(node_bin),
                        'cache': os.path.expanduser('~/.npm'),
                        'npms': npms}
            cmd_prefix = (
                'export HOME=%(node_dir)s;'
                'export PATH=%(node_bin)s:"$PATH";'
                'echo "prefix=$HOME" > $HOME/.npmrc;'
                'echo "cache=%(cache)s" >> $HOME/.npmrc;'
                '%(node_bin)s/npm set color false;'
                '%(node_bin)s/npm set unicode false;') % cmd_data

            if self.buildout['buildout'].get('offline') == 'true':
                cmd = cmd_prefix + \
                    '%(node_bin)s/npm ls %(npms)s --global --json' % cmd_data
                import zc.buildout
                try:
                    output = subprocess.check_output(cmd, shell=True)
                    output_json = json.loads(output)
                    installed_npms = output_json.get('dependencies')
                    # if npm reports a discrepancy, error out
                    if not installed_npms or \
                            len(installed_npms) != len(npms.split()):
                        raise zc.buildout.UserError(
                            "Couldn't install %r npms in offline mode" % npms)
                    logger.debug('Using existing npm install for %r' % npms)
                except subprocess.CalledProcessError:
                    # npm fails if install has not yet happened
                    raise zc.buildout.UserError(
                        "Couldn't install %r npms in offline mode" % npms)

            else:
                cmd = cmd_prefix + \
                    '%(node_bin)s/npm install -g %(npms)s' % cmd_data
                p = subprocess.Popen(cmd, shell=True)
                p.wait()

        return self.install_scripts()
Ejemplo n.º 48
0
def get_command_str():
    return " ".join([sys.executable] + [shell_quote(v) for v in sys.argv])
Ejemplo n.º 49
0
    def startDatabase(self):
        """
        Start the database and initialize the subservice.
        """
        def createConnection():
            try:
                createDatabaseConn = self.produceConnection(
                    "schema creation", "postgres"
                )
            except postgres.DatabaseError as e:
                log.error(
                    "Unable to connect to database for schema creation:"
                    " {error}",
                    error=e
                )
                raise

            createDatabaseCursor = createDatabaseConn.cursor()

            if postgres.__name__ == "pg8000":
                createDatabaseConn.realConnection.autocommit = True
            elif postgres.__name__ == "pgdb":
                createDatabaseCursor.execute("commit")
            else:
                raise InternalDataStoreError(
                    "Unknown Postgres DBM module: {}".format(postgres)
                )

            return createDatabaseConn, createDatabaseCursor

        monitor = PostgresMonitor(self)
        # check consistency of initdb and postgres?

        options = []
        options.append(
            "-c listen_addresses={}"
            .format(shell_quote(",".join(self.listenAddresses)))
        )
        if self.socketDir:
            options.append(
                "-c unix_socket_directories={}"
                .format(shell_quote(self.socketDir.path))
            )
        if self.port:
            options.append(
                "-c port={}".format(shell_quote(self.port))
            )
        options.append(
            "-c shared_buffers={:d}"
            .format(self.sharedBuffers)  # int: don't quote
        )
        options.append(
            "-c max_connections={:d}"
            .format(self.maxConnections)  # int: don't quote
        )
        options.append("-c standard_conforming_strings=on")
        options.append("-c unix_socket_permissions=0770")
        options.extend(self.options)
        if self.logDirectory:  # tell postgres to rotate logs
            options.append(
                "-c log_directory={}".format(shell_quote(self.logDirectory))
            )
            options.append("-c log_truncate_on_rotation=on")
            options.append("-c log_filename=postgresql_%w.log")
            options.append("-c log_rotation_age=1440")
            options.append("-c logging_collector=on")

        options.append("-c log_line_prefix=%t")
        if self.testMode:
            options.append("-c log_statement=all")

        args = [
            self._pgCtl, "start",
            "--log={}".format(self.logFile),
            "--timeout=86400",  # Plenty of time for a long cluster upgrade
            "-w",  # Wait for startup to complete
            "-o", " ".join(options),  # Options passed to postgres
        ]

        log.info("Requesting postgres start via: {args}", args=args)
        self.reactor.spawnProcess(
            monitor, self._pgCtl, args,
            env=self.env, path=self.workingDir.path,
            uid=self.uid, gid=self.gid,
        )
        self.monitor = monitor

        def gotStatus(result):
            """
            Grab the postgres pid from the pgCtl status call in case we need
            to kill it directly later on in hardStop().  Useful in conjunction
            with the DataStoreMonitor so we can shut down if DataRoot has been
            removed/renamed/unmounted.
            """
            reResult = re.search("PID: (\d+)\D", result)
            if reResult is not None:
                self._postgresPid = int(reResult.group(1))
            self.ready(*createConnection())
            self.deactivateDelayedShutdown()

        def gotReady(result):
            """
            We started postgres; we're responsible for stopping it later.
            Call pgCtl status to get the pid.
            """
            log.info("{cmd} exited", cmd=self._pgCtl)
            self.shouldStopDatabase = True
            d = Deferred()
            statusMonitor = CapturingProcessProtocol(d, None)
            self.reactor.spawnProcess(
                statusMonitor, self._pgCtl, [self._pgCtl, "status"],
                env=self.env, path=self.workingDir.path,
                uid=self.uid, gid=self.gid,
            )
            return d.addCallback(gotStatus)

        def couldNotStart(f):
            """
            There was an error trying to start postgres.  Try to connect
            because it might already be running.  In this case, we won't
            be the one to stop it.
            """
            d = Deferred()
            statusMonitor = CapturingProcessProtocol(d, None)
            self.reactor.spawnProcess(
                statusMonitor, self._pgCtl, [self._pgCtl, "status"],
                env=self.env, path=self.workingDir.path,
                uid=self.uid, gid=self.gid,
            )
            return d.addCallback(gotStatus).addErrback(giveUp)

        def giveUp(f):
            """
            We can't start postgres or connect to a running instance.  Shut
            down.
            """
            log.critical(
                "Can't start or connect to postgres: {failure.value}",
                failure=f
            )
            self.deactivateDelayedShutdown()
            self.reactor.stop()

        self.monitor.completionDeferred.addCallback(
            gotReady).addErrback(couldNotStart)
Ejemplo n.º 50
0
 def create_put_command(content, path):
     # Escape printf format markers
     content = content.replace('\\', '\\\\').replace('%', '%%')
     return 'printf -- %s > %s' % (shell_quote(content), shell_quote(path))
Ejemplo n.º 51
0
 def __str__(self):
     return ' '.join(shell_quote(a) for a in self.list)
Ejemplo n.º 52
0
    def install(self):
        """Installer"""
        logger = logging.getLogger(self.name)
        options = self.options
        parts = self.buildout['buildout']['parts-directory']

        name = 'buildout-node'
        node_dir = os.path.join(parts, self.name)
        if not os.path.isdir(node_dir):
            os.makedirs(node_dir)

        node_binary = self.get_binary(options)

        if node_binary is None:
            args = {}
            if 'url' not in options:
                args = dict(
                    v=self.get_version(options),
                    a='x86_64' in os.uname() and 'x64' or 'x86',
                )
                if sys.platform.startswith('linux'):
                    args['p'] = 'linux'
                elif sys.platform == 'darwin':
                    args['p'] = 'darwin'

            if 'p' in args:
                options['url'] = url = self.binary_format.format(**args)
                logger.info('Using binary distribution at %s', url)
                
                from zc.buildout.download import Download
                from archive import extract

                # Use the buildout download infrastructure
                manager = Download(options=self.buildout['buildout'])
                
                # The buildout download utility expects us to know whether or
                # not we have a download cache, which causes fun errors.  This
                # is probably a bug, but this test should be safe regardless.
                if manager.download_cache:
                    filename = manager.download_cached(url)[0]
                else:
                    filename = manager.download(url)[0]

                destination = os.path.join(
                                 self.buildout['buildout']['parts-directory'],
                                 name)
                
                # Finally, extract the archive.  The binary distribution urls
                # are defined in this file, so we can safely assume they're 
                # gzipped tarballs.  This prevents an error when downloaded 
                # into a temporary file.
                extract(filename,destination,ext=".tar.gz")

            else:
                if 'url' not in options:
                    options['url'] = url = self.source_format.format(**args)
                logger.info('Using source distribution at %s', options['url'])
                import hexagonit.recipe.cmmi
                options['environment'] = (
                    'PYTHONPATH=tools:deps/v8/tools:../../deps/v8/tools'
                )

                node = hexagonit.recipe.cmmi.Recipe(self.buildout, name, options)
                node.install()

            node_binary = self.get_binary(options)

        node_bin = os.path.dirname(node_binary)

        scripts = options.get('scripts', '').split()
        scripts = [script.strip() for script in scripts
                   if script.strip()]

        npms = options.get('npms', '')
        if npms:
            npms = ' '.join([npm.strip() for npm in npms.split()
                             if npm.strip()])
            
            p = subprocess.Popen((
                'export HOME=%(node_dir)s;'
                'export PATH=%(node_bin)s:$PATH;'
                'echo "prefix=$HOME\n" > $HOME/.npmrc;'
                '%(node_bin)s/npm set color false;'
                '%(node_bin)s/npm set unicode false;'
                '%(node_bin)s/npm install -sg %(npms)s') % {'node_dir':shell_quote(node_dir),
                                                            'node_bin':shell_quote(node_bin),
                                                            'npms':npms},
                shell=True)
            p.wait()

            for script in scripts:
                if script in ['node']:
                    continue
                filename = os.path.join(node_bin, script)
                if os.path.isfile(filename):
                    fd = open(filename)
                    data = fd.read()
                    fd.close()
                    fd = open(filename, 'w')
                    fd.seek(0)
                    data = data.split('\n')
                    data[0] = '#!%s' % node_binary
                    fd.write('\n'.join(data))
                    fd.close()

        for script in ('node', 'npm'):
            if script not in scripts:
                scripts.append(script)

        node_path = options.get('node-path', '').split()
        node_path.insert(0, os.path.join(node_dir, 'lib', 'node_modules'))
        node_path = ':'.join(node_path)
        options['initialization'] = (
            'import os;\nos.environ["NODE_PATH"] = %r' % node_path
        )

        paths = [os.path.join(node_dir, 'bin'), node_bin]
        all_scripts = []
        for p in paths:
            if os.path.isdir(p):
                all_scripts.extend(os.listdir(p))

        typos = []
        for script in scripts:
            if script not in all_scripts:
                typos.append(script)
        if typos:
            import zc.buildout
            typos = ', '.join([repr(s) for s in typos])
            all_scripts = [repr(s) for s in all_scripts]
            all_scripts = ', '.join(sorted(all_scripts))
            raise zc.buildout.UserError((
                'Script(s) {0} not found in {1[0]};{1[1]}.\n'
                'You may have a typo in your buildout config.\n'
                'Available scripts are: {2}'
            ).format(typos, paths, all_scripts))

        options['eggs'] = 'gp.recipe.node'
        options['arguments'] = '%r, (%r, %r), sys.argv[0]' % (
            node_binary,
            os.path.join(node_dir, 'bin'),
            node_bin,
        )
        options['scripts'] = '\n'.join(scripts)
        options['entry-points'] = '\n'.join([
            '%s=gp.recipe.node.script:main' % s for s in scripts
        ])
        from zc.recipe.egg import Scripts
        rscripts = Scripts(self.buildout, self.name, options)
        return rscripts.install()
Ejemplo n.º 53
0
def quoted_string(s):
    return shell_quote(s)
Ejemplo n.º 54
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    def cluster_cleanup():
        print("stopping cluster")
        return runner.stop_cluster(reactor)

    cleanup_trigger_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                                       cluster_cleanup)

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    yield runner.ensure_keys(reactor)
    cluster = yield runner.start_cluster(reactor)

    managed_config_file = options['cert-directory'].child("managed.yaml")
    managed_config = create_managed_config(options['config'], cluster)
    managed_config_file.setContent(
        yaml.safe_dump(managed_config, default_flow_style=False)
    )

    if options['distribution'] in ('centos-7',):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_journal(reactor, node.address,
                            remote_logs_file).addErrback(write_failure)
    elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_upstart(reactor, node.address,
                            remote_logs_file).addErrback(write_failure)

    flocker_client = _make_client(reactor, cluster)
    yield _wait_for_nodes(reactor, flocker_client, len(cluster.agent_nodes))

    if options['no-keep']:
        print("not keeping cluster")
    else:
        environment_variables = get_trial_environment(cluster)
        environment_strings = list()
        for environment_variable in environment_variables:
            environment_strings.append(
                "export {name}={value};\n".format(
                    name=environment_variable,
                    value=shell_quote(
                        environment_variables[environment_variable]
                    ),
                )
            )
        environment = ''.join(environment_strings)
        print("The following variables describe the cluster:")
        print(environment)
        env_file = options['cert-directory'].child("environment.env")
        env_file.setContent(environment)
        print("The variables are also saved in {}".format(
            env_file.path
        ))
        print("Be sure to preserve the required files.")

        reactor.removeSystemEventTrigger(cleanup_trigger_id)
Ejemplo n.º 55
0
    def install(self):
        """Installer"""
        logger = logging.getLogger(self.name)
        options = self.options
        parts = self.buildout['buildout']['parts-directory']

        name = 'buildout-node'
        node_dir = os.path.join(parts, self.name)
        if not os.path.isdir(node_dir):
            os.makedirs(node_dir)

        node_binary = self.get_binary(options)

        if node_binary is None:
            args = {}
            if 'url' not in options:
                args = dict(
                    v=self.get_version(options),
                    a='x86_64' in os.uname() and 'x64' or 'x86',
                )
                if sys.platform.startswith('linux'):
                    args['p'] = 'linux'
                elif sys.platform == 'darwin':
                    args['p'] = 'darwin'

            if 'p' in args:
                options['url'] = url = self.binary_format.format(**args)
                logger.info('Using binary distribution at %s', url)

                from zc.buildout.download import Download
                from archive import extract

                # Use the buildout download infrastructure
                manager = Download(options=self.buildout['buildout'])

                # The buildout download utility expects us to know whether or
                # not we have a download cache, which causes fun errors.  This
                # is probably a bug, but this test should be safe regardless.
                if manager.download_cache:
                    filename = manager.download_cached(url)[0]
                else:
                    filename = manager.download(url)[0]

                destination = self.get_node_directory(options)

                # Finally, extract the archive.  The binary distribution urls
                # are defined in this file, so we can safely assume they're
                # gzipped tarballs.  This prevents an error when downloaded
                # into a temporary file.
                extract(filename, destination, ext=".tar.gz")

            else:
                if 'url' not in options:
                    options['url'] = url = self.source_format.format(**args)
                logger.info('Using source distribution at %s', options['url'])
                import zc.recipe.cmmi
                options['environment'] = (
                    'PYTHONPATH=tools:deps/v8/tools:../../deps/v8/tools'
                )

                node = zc.recipe.cmmi.Recipe(
                    self.buildout, name, options)
                node.install()

            node_binary = self.get_binary(options)

        node_bin = os.path.dirname(node_binary)

        npms = options.get('npms', '')
        if npms:
            npms = ' '.join([npm.strip() for npm in npms.split()
                             if npm.strip()])

            cmd = (
                'export HOME=%(node_dir)s;'
                'export PATH=%(node_bin)s:$PATH;'
                'echo "prefix=$HOME\n" > $HOME/.npmrc;'
                'echo "cache=%(cache)s\n" >> $HOME/.npmrc;'
                '%(node_bin)s/npm set color false;'
                '%(node_bin)s/npm set unicode false;'
                '%(node_bin)s/npm install -g %(npms)s') % {
                    'node_dir': shell_quote(node_dir),
                    'node_bin': shell_quote(node_bin),
                    'cache': os.path.expanduser('~/.npm'),
                    'npms': npms}
            p = subprocess.Popen(cmd, shell=True)
            p.wait()

        return self.install_scripts()
Ejemplo n.º 56
0
    def run(
        self, args, stdin_string=None, env_extend=None, binary_output=False
    ):
        # Allow overriding default settings. If a piece of code really wants to
        # set own PATH or CIB_file, we must allow it. I.e. it wants to run
        # a pacemaker tool on a CIB in a file but cannot afford the risk of
        # changing the CIB in the file specified by the user.
        env_vars = self._env_vars.copy()
        env_vars.update(
            dict(env_extend) if env_extend else dict()
        )

        log_args = " ".join([shell_quote(x) for x in args])
        self._logger.debug(
            "Running: {args}\nEnvironment:{env_vars}{stdin_string}".format(
                args=log_args,
                stdin_string=("" if not stdin_string else (
                    "\n--Debug Input Start--\n{0}\n--Debug Input End--"
                    .format(stdin_string)
                )),
                env_vars=("" if not env_vars else (
                    "\n" + "\n".join([
                        "  {0}={1}".format(key, val)
                        for key, val in sorted(env_vars.items())
                    ])
                ))
            )
        )
        self._reporter.process(
            reports.run_external_process_started(
                log_args, stdin_string, env_vars
            )
        )

        try:
            process = subprocess.Popen(
                args,
                # Some commands react differently if they get anything via stdin
                stdin=(subprocess.PIPE if stdin_string is not None else None),
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=(
                    lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
                ),
                close_fds=True,
                shell=False,
                env=env_vars,
                # decodes newlines and in python3 also converts bytes to str
                universal_newlines=(not self._python2 and not binary_output)
            )
            out_std, out_err = process.communicate(stdin_string)
            retval = process.returncode
        except OSError as e:
            raise LibraryError(
                reports.run_external_process_error(log_args, e.strerror)
            )

        self._logger.debug(
            (
                "Finished running: {args}\nReturn value: {retval}"
                + "\n--Debug Stdout Start--\n{out_std}\n--Debug Stdout End--"
                + "\n--Debug Stderr Start--\n{out_err}\n--Debug Stderr End--"
            ).format(
                args=log_args,
                retval=retval,
                out_std=out_std,
                out_err=out_err
            )
        )
        self._reporter.process(reports.run_external_process_finished(
            log_args, retval, out_std, out_err
        ))
        return out_std, out_err, retval
Ejemplo n.º 57
0
        return False
    elif e_machine == 0x3e:  # x64
        return True
    raise Exception(
        "[!] Unexpected e_machine value in 64-bit check: %s (e_machine: 0x%x)"
        % (filepath, e_machine))


if __name__ == "__main__":
    if len(sys.argv) < 4 or "--" not in sys.argv:
        print(USAGE)
        exit()

    script_options = sys.argv[1:sys.argv.index("--")]
    target_invocation = " ".join(
        shell_quote(i) for i in sys.argv[sys.argv.index("--") + 1:])

    # parse script options
    to_process = script_options[0]

    # parse and remove optional switches
    num_workers = 4
    continuously_monitor = False
    debug = False
    worker_index = -1
    monitor_index = -1
    debug_index = -1
    for i, option in enumerate(script_options):
        if option.startswith("--workers="):
            num_workers = int(option.split('=')[1])
            worker_index = i