def build_singularity_run_command(
    container_command,
    image,
    volumes=[],
    env=[],
    working_directory=DEFAULT_WORKING_DIRECTORY,
    singularity_cmd=DEFAULT_SINGULARITY_COMMAND,
    run_extra_arguments=DEFAULT_RUN_EXTRA_ARGUMENTS,
    sudo=DEFAULT_SUDO,
    sudo_cmd=DEFAULT_SUDO_COMMAND,
):
    command_parts = []
    # http://singularity.lbl.gov/docs-environment-metadata
    for (key, value) in env:
        command_parts.extend(["SINGULARITYENV_%s=%s" % (key, value)])
    command_parts += _singularity_prefix(
        singularity_cmd=singularity_cmd,
        sudo=sudo,
        sudo_cmd=sudo_cmd,
    )
    command_parts.append("exec")
    for volume in volumes:
        command_parts.extend(["-B", shlex_quote(str(volume))])
    if working_directory:
        command_parts.extend(["--pwd", shlex_quote(working_directory)])
    if run_extra_arguments:
        command_parts.append(run_extra_arguments)
    full_image = image
    command_parts.append(shlex_quote(full_image))
    command_parts.append(container_command)
    return " ".join(command_parts)
示例#2
0
    def test_item_removed(self):
        # Emit item_removed event for an item that is in a playlist
        results = self.lib.items(u'path:{0}'.format(shlex_quote(
            os.path.join(self.music_dir, 'd', 'e', 'f.mp3'))))
        item = results[0]
        beets.plugins.send('item_removed', item=item)

        # Emit item_removed event for an item that is not in a playlist
        results = self.lib.items(u'path:{0}'.format(shlex_quote(
            os.path.join(self.music_dir, 'x', 'y', 'z.mp3'))))
        item = results[0]
        beets.plugins.send('item_removed', item=item)

        # Emit cli_exit event
        beets.plugins.send('cli_exit', lib=self.lib)

        # Check playlist with absolute paths
        playlist_path = os.path.join(self.playlist_dir, 'absolute.m3u')
        with open(playlist_path, 'r') as f:
            lines = [line.strip() for line in f.readlines()]

        self.assertEqual(lines, [
            os.path.join(self.music_dir, 'a', 'b', 'c.mp3'),
            os.path.join(self.music_dir, 'nonexisting.mp3'),
        ])

        # Check playlist with relative paths
        playlist_path = os.path.join(self.playlist_dir, 'relative.m3u')
        with open(playlist_path, 'r') as f:
            lines = [line.strip() for line in f.readlines()]

        self.assertEqual(lines, [
            os.path.join('a', 'b', 'c.mp3'),
            'nonexisting.mp3',
        ])
示例#3
0
    def startup_command(self, ctx, **kwds):
        """Return a shell command used to startup this instance.

        Among other common planemo kwds, this should respect the
        ``daemon`` keyword.
        """
        daemon = kwds.get("daemon", False)
        # TODO: Allow running dockerized Galaxy here instead.
        setup_venv_command = setup_venv(ctx, kwds)
        run_script = "%s $COMMON_STARTUP_ARGS" % shlex_quote(os.path.join(self.galaxy_root, "run.sh"))
        if daemon:
            run_script += " --daemon"
            self.env["GALAXY_RUN_ALL"] = "1"
        else:
            run_script += " --server-name %s" % shlex_quote(self.server_name)
        server_ini = os.path.join(self.config_directory, "galaxy.ini")
        self.env["GALAXY_CONFIG_FILE"] = server_ini
        if parse_version(kwds.get('galaxy_python_version') or DEFAULT_PYTHON_VERSION) >= parse_version('3'):
            # We need to start under gunicorn
            self.env['APP_WEBSERVER'] = 'gunicorn'
            self.env['GUNICORN_CMD_ARGS'] = "--bind={host}:{port} --name={server_name}".format(
                host=kwds.get('host', '127.0.0.1'),
                port=kwds['port'],
                server_name=self.server_name,
            )
        cd_to_galaxy_command = ['cd', self.galaxy_root]
        return shell_join(
            cd_to_galaxy_command,
            setup_venv_command,
            setup_common_startup_args(),
            run_script,
        )
示例#4
0
 def make_test_dockerfile(self, docker_file):
     """Used to determine if we need to rebuild the image"""
     self.setup_lines()
     docker_lines = docker_file.docker_lines + [
         "RUN echo {0}".format(shlex_quote(self.action))
       , "RUN echo {0}".format(" ".join(self.folders))
       , "RUN echo {0}".format(shlex_quote(self.default_cmd))
       ]
     return DockerFile(docker_lines=docker_lines, mtime=docker_file.mtime)
示例#5
0
def new_cmd(session, name, cmd, mode, logdir, shell):
    if isinstance(cmd, (list, tuple)):
        cmd = " ".join(shlex_quote(str(v)) for v in cmd)
    if mode == 'tmux':
        return name, "tmux send-keys -t {}:{} {} Enter".format(session, name, shlex_quote(cmd))
    elif mode == 'child':
        return name, "{} >{}/{}.{}.out 2>&1 & echo kill $! >>{}/kill.sh".format(cmd, logdir, session, name, logdir)
    elif mode == 'nohup':
        return name, "nohup {} -c {} >{}/{}.{}.out 2>&1 & echo kill $! >>{}/kill.sh".format(shell, shlex_quote(cmd), logdir, session, name, logdir)
示例#6
0
 def wrap_cmd(session, name, cmd):
     if isinstance(cmd, list):
         cmd = ' '.join(shlex_quote(str(arg)) for arg in cmd)
     if args.mode == 'tmux':
         return 'tmux send-keys -t {}:{} {} Enter'.format(session, name, shlex_quote(cmd))
     elif args.mode == 'child':
         return '{} > {}/{}.{}.out 2>&1 & echo kill $! >> {}/kill.sh'.format(
             cmd, args.logdir, session, name, args.logdir
         )
示例#7
0
 def make_first_dockerfile(self, docker_file):
     """
     Makes the dockerfile for when we don't already have this image
     It will just perform the action after the normal docker lines.
     """
     self.setup_lines()
     docker_lines = docker_file.docker_lines + [
           "RUN {0} -c {1}".format(shlex_quote(self.shell), shlex_quote(self.action))
         , "CMD {0}".format(self.default_cmd)
         ]
     return DockerFile(docker_lines=docker_lines, mtime=docker_file.mtime)
示例#8
0
def process_in_parallel(tag, total_range_size, binary, output_dir):
    """Run the specified binary cfg.NUM_GPUS times in parallel, each time as a
    subprocess that uses one GPU. The binary must accept the command line
    arguments `--range {start} {end}` that specify a data processing range.
    """
    # Snapshot the current cfg state in order to pass to the inference
    # subprocesses
    cfg_file = os.path.join(output_dir, '{}_range_config.yaml'.format(tag))
    with open(cfg_file, 'w') as f:
        yaml.dump(cfg, stream=f)
    subprocess_env = os.environ.copy()
    processes = []
    subinds = np.array_split(range(total_range_size), cfg.NUM_GPUS)
    for i in range(cfg.NUM_GPUS):
        start = subinds[i][0]
        end = subinds[i][-1] + 1
        subprocess_env['CUDA_VISIBLE_DEVICES'] = str(i)
        cmd = '{binary} --range {start} {end} --cfg {cfg_file} NUM_GPUS 1'
        cmd = cmd.format(
            binary=shlex_quote(binary),
            start=int(start),
            end=int(end),
            cfg_file=shlex_quote(cfg_file)
        )
        logger.info('{} range command {}: {}'.format(tag, i, cmd))
        if i == 0:
            subprocess_stdout = subprocess.PIPE
        else:
            filename = os.path.join(
                output_dir, '%s_range_%s_%s.stdout' % (tag, start, end)
            )
            subprocess_stdout = open(filename, 'w')  # NOQA (close below)
        p = subprocess.Popen(
            cmd,
            shell=True,
            env=subprocess_env,
            stdout=subprocess_stdout,
            stderr=subprocess.STDOUT,
            bufsize=1
        )
        processes.append((i, p, start, end, subprocess_stdout))
    # Log output from inference processes and collate their results
    outputs = []
    for i, p, start, end, subprocess_stdout in processes:
        log_subprocess_output(i, p, output_dir, tag, start, end)
        if isinstance(subprocess_stdout, file):  # NOQA (Python 2 for now)
            subprocess_stdout.close()
        range_file = os.path.join(
            output_dir, '%s_range_%s_%s.pkl' % (tag, start, end)
        )
        range_data = pickle.load(open(range_file))
        outputs.append(range_data)
    return outputs
def run_on_control_instead(args, sys_argv):
    argv = [arg for arg in sys_argv][1:]
    argv.remove('--control')
    executable = 'commcare-cloud'
    branch = getattr(args, 'branch', 'master')
    cmd_parts = [
        executable, args.env_name, 'ssh', 'control', '-t',
        'source ~/init-ansible && git fetch --prune && git checkout {branch} '
        '&& git reset --hard origin/{branch} && source ~/init-ansible && {cchq} {cchq_args}'
        .format(branch=branch, cchq=executable, cchq_args=' '.join([shlex_quote(arg) for arg in argv]))
    ]

    print_command(' '.join([shlex_quote(part) for part in cmd_parts]))
    os.execvp(executable, cmd_parts)
示例#10
0
def __add_arg(args, arg, value):
    optarg = '--%s' % arg
    if isinstance(value, bool):
        if value is True:
            args.append(optarg)
    elif isinstance(value, string_types):
        # the = in --optarg=value is usually, but not always, optional
        if value.startswith('='):
            args.append(shlex_quote(optarg + value))
        else:
            args.append(optarg)
            args.append(shlex_quote(value))
    else:
        [__add_arg(args, arg, v) for v in value]
 def run(self, args, manage_args):
     environment = get_environment(args.env_name)
     public_vars = environment.public_vars
     # the default 'cchq' is redundant with ansible/group_vars/all.yml
     cchq_user = public_vars.get('cchq_user', 'cchq')
     deploy_env = environment.meta_config.deploy_env
     # the paths here are redundant with ansible/group_vars/all.yml
     if args.release:
         code_dir = '/home/{cchq_user}/www/{deploy_env}/releases/{release}'.format(
             cchq_user=cchq_user, deploy_env=deploy_env, release=args.release)
     else:
         code_dir = '/home/{cchq_user}/www/{deploy_env}/current'.format(
             cchq_user=cchq_user, deploy_env=deploy_env)
     remote_command = (
         'bash -c "cd {code_dir}; python_env/bin/python manage.py {args}"'
         .format(
             cchq_user=cchq_user,
             code_dir=code_dir,
             args=' '.join(shlex_quote(arg) for arg in manage_args),
         )
     )
     args.server = args.server or 'django_manage:0'
     if args.tmux:
         args.remote_command = remote_command
         Tmux(self.parser).run(args, [])
     else:
         ssh_args = ['sudo -u {cchq_user} {remote_command}'.format(
             cchq_user=cchq_user,
             remote_command=remote_command,
         )]
         if manage_args and manage_args[0] in ["shell", "dbshell"]:
             # force ssh to allocate a pseudo-terminal
             ssh_args = ['-t'] + ssh_args
         Ssh(self.parser).run(args, ssh_args)
 def run(self, args, ssh_args):
     environment = get_environment(args.env_name)
     public_vars = environment.public_vars
     if args.server == '-':
         args.server = 'django_manage:0'
     # the default 'cchq' is redundant with ansible/group_vars/all.yml
     cchq_user = public_vars.get('cchq_user', 'cchq')
     # Name tabs like "droberts (2018-04-13)"
     window_name_expression = '"`whoami` (`date +%Y-%m-%d`)"'
     if args.remote_command:
         ssh_args = [
             '-t',
             r'sudo -iu {cchq_user} tmux attach \; new-window -n {window_name} {remote_command} '
             r'|| sudo -iu {cchq_user} tmux new -n {window_name} {remote_command}'
             .format(
                 cchq_user=cchq_user,
                 remote_command=shlex_quote('{} ; bash'.format(args.remote_command)),
                 window_name=window_name_expression,
             )
         ] + ssh_args
     else:
         ssh_args = [
             '-t',
             'sudo -iu {cchq_user} tmux attach || sudo -iu {cchq_user} tmux new -n {window_name}'
             .format(cchq_user=cchq_user, window_name=window_name_expression)
         ]
     Ssh(self.parser).run(args, ssh_args)
    def ansible_playbook(environment, playbook, *cmd_args):
        if os.path.isabs(playbook):
            playbook_path = playbook
        else:
            playbook_path = os.path.join(ANSIBLE_DIR, '{playbook}'.format(playbook=playbook))
        cmd_parts = (
            'ansible-playbook',
            playbook_path,
            '-i', environment.paths.inventory_source,
            '-e', '@{}'.format(environment.paths.vault_yml),
            '-e', '@{}'.format(environment.paths.public_yml),
            '-e', '@{}'.format(environment.paths.generated_yml),
            '--diff',
        ) + get_limit() + cmd_args

        public_vars = environment.public_vars
        cmd_parts += get_user_arg(public_vars, unknown_args, use_factory_auth)

        if has_arg(unknown_args, '-D', '--diff') or has_arg(unknown_args, '-C', '--check'):
            puts(colored.red("Options --diff and --check not allowed. Please remove -D, --diff, -C, --check."))
            puts("These ansible-playbook options are managed automatically by commcare-cloud and cannot be set manually.")
            return 2  # exit code

        ask_vault_pass = public_vars.get('commcare_cloud_use_vault', True)
        if ask_vault_pass:
            cmd_parts += ('--vault-password-file={}/echo_vault_password.sh'.format(ANSIBLE_DIR),)

        cmd_parts_with_common_ssh_args = get_common_ssh_args(environment, use_factory_auth=use_factory_auth)
        cmd_parts += cmd_parts_with_common_ssh_args
        cmd = ' '.join(shlex_quote(arg) for arg in cmd_parts)
        print_command(cmd)
        env_vars = ansible_context.env_vars
        if ask_vault_pass:
            env_vars['ANSIBLE_VAULT_PASSWORD'] = environment.get_ansible_vault_password()
        return subprocess.call(cmd_parts, env=env_vars)
def format_identity_file(ssh_key_file):
    """Render identity file option."""
    if ssh_key_file:
        safe_key_path = shlex_quote(str(ssh_key_file))
        return '-i {}'.format(safe_key_path)
    else:
        return ''
示例#15
0
    def _write_load_config_sh(self, environments_dir, quiet):
        puts(colored.blue("Let's get you set up to run commcare-cloud."))

        if not environments_dir:
            environments_dir = self._determine_environments_dir(quiet=quiet)

        commcare_cloud_dir = os.path.expanduser("~/.commcare-cloud")
        if not os.path.exists(commcare_cloud_dir):
            os.makedirs(commcare_cloud_dir)
        load_config_file = os.path.expanduser("~/.commcare-cloud/load_config.sh")
        if not os.path.exists(load_config_file) or \
                ask("Overwrite your ~/.commcare-cloud/load_config.sh?", quiet=quiet):
            with open(load_config_file, 'w') as f:
                f.write(textwrap.dedent("""
                    # auto-generated with `manage-commcare-cloud configure`:
                    export COMMCARE_CLOUD_ENVIRONMENTS={COMMCARE_CLOUD_ENVIRONMENTS}
                    export PATH=$PATH:{virtualenv_path}
                    source {PACKAGE_BASE}/.bash_completion
                """.format(
                    COMMCARE_CLOUD_ENVIRONMENTS=shlex_quote(environments_dir),
                    virtualenv_path=get_virtualenv_bin_path(),
                    PACKAGE_BASE=PACKAGE_BASE,
                )).strip())
        puts(colored.blue("Add the following to your ~/.bash_profile:"))
        puts(colored.cyan("source ~/.commcare-cloud/load_config.sh"))
        puts(colored.blue(
            "and then open a new shell. "
            "You should be able to run `commcare-cloud` without entering your virtualenv."))
示例#16
0
文件: filter.py 项目: amol9/redcmd
    def match(self, name):
        dirpath = dirname(name)

        if not any([len(i) > 0 for i in [self.ext_list, self.regex_list, self.glob_list]]):
            self.glob_list.append("*")  # no filters, return all

        ext_matches = []
        for e in self.ext_list:
            ext_matches.extend(self.glob(dirpath, "*." + e))

        glob_matches = []
        for p in self.glob_list:
            glob_matches.extend(self.glob(dirpath, p))

        regex_matches = []
        if len(self.regex_list) > 0:
            allf = self.glob(dirpath, "*")
            for r in self.regex_list:
                cr = re.compile(r)
                for p in allf:
                    if cr.match(basename(p)) is not None:
                        regex_matches.append(p)

        nodups = list(set(ext_matches + glob_matches + regex_matches))

        prefix = basename(name)
        if prefix != "":
            lf = ListFilter(nodups)
            result = lf.match(prefix)
        else:
            result = nodups

        return [shlex_quote(joinpath(dirpath, n)) for n in result]
示例#17
0
    def _launch_legacy(self, image, env_override, volumes):
        """Legacy launch method for use when the container interface is not enabled
        """
        raw_cmd = self.docker_cmd(image, env_override=env_override, volumes=volumes)

        log.info("Starting docker container for IE {0} with command [{1}]".format(
            self.attr.viz_id,
            ' '.join([shlex_quote(x) for x in raw_cmd])
        ))
        p = Popen(raw_cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
        stdout, stderr = p.communicate()
        if p.returncode != 0:
            log.error("Container Launch error\n\n%s\n%s" % (stdout, stderr))
            return None
        else:
            container_id = stdout.strip()
            log.debug("Container id: %s" % container_id)
            inspect_data = self.inspect_container(container_id)
            port_mappings = self.get_container_port_mapping(inspect_data)
            self.attr.docker_hostname = self.get_container_host(inspect_data)
            host_port = self._find_port_mapping(port_mappings)[-1]
            log.debug("Container host/port: %s:%s", self.attr.docker_hostname, host_port)

            # Now we configure our proxy_requst object and we manually specify
            # the port to map to and ensure the proxy is available.
            self.attr.proxy_request = self.trans.app.proxy_manager.setup_proxy(
                self.trans,
                host=self.attr.docker_hostname,
                port=host_port,
                proxy_prefix=self.attr.proxy_prefix,
                route_name=self.attr.viz_id,
                container_ids=[container_id],
            )
            # These variables then become available for use in templating URLs
            self.attr.proxy_url = self.attr.proxy_request['proxy_url']
示例#18
0
    def setup_lines(self):
        """
        Setup convenience lines for copying and waiting for copying
        """
        if getattr(self, "_setup_lines", None):
            return
        self._setup_lines = True

        # Make the shared volume name same as this image name so it doesn't change every time
        shared_name = self["shared_name"] = self.image_name().replace('/', '__')

        # underscored names for our folders
        def without_last_slash(val):
            while val and val.endswith("/"):
                val = val[:-1]
            return val
        folders_underscored = self["folders_underscored"] = [(shlex_quote(name.replace("_", "__").replace("/", "_")), shlex_quote(without_last_slash(name))) for name in self.folders]

        self["move_from_volume"] = " ; ".join(
              "echo {0} && rm -rf {0} && mkdir -p $(dirname {0}) && mv /{1}/{2} {0}".format(name, self.shared_name, underscored)
              for underscored, name in self.folders_underscored
            )

        self["move_into_volume"] = " ; ".join(
              "echo {0} && mkdir -p {0} && mv {0} /{1}/{2}".format(name, self.shared_name, underscored)
              for underscored, name in self.folders_underscored
            )
示例#19
0
def explore(args):
    if not os.path.exists(args.db):
        print("Unable to find database `{}`".format(args.db))
        return
    conn = sqlite3.connect(args.db)
    create_results(conn)
    call(['sqlite3', '-column', '-header', shlex_quote(args.db)])
示例#20
0
    def attempt_port_bind(port):
        uwsgi_command = [
            "uwsgi",
            "--http",
            "%s:%s" % (host, port),
            "--pythonpath",
            os.path.join(galaxy_root, "lib"),
            "--yaml",
            yaml_config_path,
            "--module",
            "galaxy.webapps.galaxy.buildapp:uwsgi_app_factory()",
            "--enable-threads",
            "--die-on-term",
        ]

        handle_uwsgi_cli_command = getattr(
            config_object, "handle_uwsgi_cli_command", None
        )
        if handle_uwsgi_cli_command is not None:
            handle_uwsgi_cli_command(uwsgi_command)

        # we don't want to quote every argument but we don't want to print unquoted ones either, so do this
        log.info("Starting uwsgi with command line: %s", ' '.join([shlex_quote(x) for x in uwsgi_command]))
        p = subprocess.Popen(
            uwsgi_command,
            cwd=galaxy_root,
            preexec_fn=os.setsid,
        )
        return UwsgiServerWrapper(
            p, name, host, port
        )
示例#21
0
文件: pypi.py 项目: mbr/unleash
def publish_release():
    with in_tmpexport(commit) as td, VirtualEnv.temporary() as ve:
        if opts['dry_run']:
            log.info('Creating source distribution, no upload (dry-run)')
            subprocess.check_output(
                [ve.python, 'setup.py', 'sdist'],
                cwd=td,
            )
        else:
            ve.pip_install(td)

            args = [ve.python, 'setup.py',
                    'sdist',
                    'upload'
                    ]

            if opts['sign']:
                args.append('-s')

                if opts['identity'] is not None:
                    args.extend(['-i', shlex_quote(opts['identity'])])
                    log.info('Uploading signed source distribution to PyPI, '
                             'using key \'{}\''.format(opts['identity']))
                else:
                    log.info('Uploading signed source distribution to PyPI '
                             'using default identity')
            else:
                log.info('Uploading unsigned source distribution to PyPI')

            log.debug('Running {}'.format(args))
            ve.check_output(
                args,
                cwd=td,
            )
示例#22
0
文件: run.py 项目: hanke/datalad
def normalize_command(command):
    """Convert `command` to the string representation.
    """
    if isinstance(command, list):
        command = list(map(assure_unicode, command))
        if len(command) == 1 and command[0] != "--":
            # This is either a quoted compound shell command or a simple
            # one-item command. Pass it as is.
            #
            # FIXME: This covers the predominant command-line case, but, for
            # Python API callers, it means values like ["./script with spaces"]
            # requires additional string-like escaping, which is inconsistent
            # with the handling of multi-item lists (and subprocess's
            # handling). Once we have a way to detect "running from Python API"
            # (discussed in gh-2986), update this.
            command = command[0]
        else:
            if command and command[0] == "--":
                # Strip disambiguation marker. Note: "running from Python API"
                # FIXME from below applies to this too.
                command = command[1:]
            command = " ".join(shlex_quote(c) for c in command)
    else:
        command = assure_unicode(command)
    return command
示例#23
0
文件: stack.py 项目: renskiy/fabricio
    def save_new_settings(self, configuration, image):
        self.rotate_sentinel_images()

        labels = [(self.configuration_label, b64encode(configuration).decode())]
        try:
            digests = self._get_digests(self.images)
            digests_bucket = json.dumps(digests, sort_keys=True)
            digests_bucket = b64encode(digests_bucket.encode()).decode()
            labels.append((self.digests_label, digests_bucket))
        except fabricio.host_errors:
            pass

        dockerfile = (
            'FROM {image}\n'
            'LABEL {labels}\n'
        ).format(
            image=image or 'scratch',
            labels=' '.join(itertools.starmap('{0}={1}'.format, labels)),
        )
        build_command = 'echo {dockerfile} | docker build --tag {tag} -'.format(
            dockerfile=shlex_quote(dockerfile),
            tag=self.current_settings_tag,
        )

        try:
            fabricio.run(build_command)
        except fabricio.host_errors as error:
            fabricio.log(
                'WARNING: {error}'.format(error=error),
                output=sys.stderr,
                color=colors.red,
            )
示例#24
0
 def build(self):
     xunit_report_file = self.xunit_report_file
     sd_report_file = self.structured_report_file
     cmd = "sh run_tests.sh $COMMON_STARTUP_ARGS --report_file %s" % shlex_quote(self.html_report_file)
     if xunit_report_file:
         cmd += " --xunit_report_file %s" % shlex_quote(xunit_report_file)
     if sd_report_file:
         cmd += " --structured_data_report_file %s" % shlex_quote(sd_report_file)
     if self.installed:
         cmd += ' -installed'
     elif self.failed:
         sd = StructuredData(sd_report_file)
         tests = " ".join(sd.failed_ids)
         cmd += " %s" % tests
     else:
         cmd += ' functional.test_toolbox'
     return cmd
示例#25
0
 def test_path_query_with_nonexisting_playlist(self):
     q = u'playlist:{0}'.format(shlex_quote(os.path.join(
         self.playlist_dir,
         self.playlist_dir,
         'nonexisting.m3u',
     )))
     results = self.lib.items(q)
     self.assertEqual(set(results), set())
示例#26
0
def exec_fab_command(env_name, *extra_args):
    cmd_parts = (
        'fab', '-f', FABFILE,
        env_name,
    ) + tuple(extra_args)
    cmd = ' '.join(shlex_quote(arg) for arg in cmd_parts)
    print_command(cmd)
    return subprocess.call(cmd_parts)
示例#27
0
def print_command(command):
    """
    commcare-cloud commands by convention print the underlying command they execute

    Use this function to do so
    """
    if isinstance(command, (list, tuple)):
        command = ' '.join(shlex_quote(arg) for arg in command)
    print(colored.cyan(command), file=sys.stderr)
示例#28
0
def ssh_run_cmd(connect_string, cmd, remote_cwd=None, *args, **kwargs):
    if remote_cwd:
        cmd_real = 'mkdir -p {0} ; cd {0} ; '.format(shlex_quote(remote_cwd))
    else:
        cmd_real = ''

    cmd_real += escape_cmd_for_ssh(cmd)
    ssh_cmd = ['ssh', '-q', '-oStrictHostKeyChecking=no', '-oUserKnownHostsFile=/dev/null', connect_string, cmd_real]
    return run_cmd(ssh_cmd, *args, **kwargs)
示例#29
0
 def get_display_text(self, quote=True):
     """
     Returns a string containing the value that would be displayed to the user in the tool interface.
     When quote is True (default), the string is escaped for e.g. command-line usage.
     """
     rval = self.input.value_to_display_text(self.value) or ''
     if quote:
         return shlex_quote(rval)
     return rval
 def run(self, args, ssh_args):
     address = self.lookup_server_address(args)
     if ':' in address:
         address, port = address.split(':')
         ssh_args = ['-p', port] + ssh_args
     cmd_parts = [self.command, address] + ssh_args
     cmd = ' '.join(shlex_quote(arg) for arg in cmd_parts)
     print_command(cmd)
     return subprocess.call(cmd_parts)
示例#31
0
 def fmt(val):
     if isinstance(val, float):
         val = round(val, 4)
     return shlex_quote(format_flag_val(val))
示例#32
0
 def _run_in_container(self, node, *command):
     command = ' '.join([shlex_quote(str(arg)) for arg in command])
     self._get_container(node).exec_run(command)
示例#33
0
 def _exec_attr(self):
     return "${python_exe} -um guild.op_main %s ${flag_args}" % shlex_quote(
         self._script_module()
     )
示例#34
0
def create_commands(session,
                    num_workers,
                    remotes,
                    env_id,
                    logdir,
                    shell='bash',
                    policy='lstm',
                    mode='tmux',
                    visualise=False):
    # for launching the TF workers and for launching tensorboard
    base_cmd = [
        'CUDA_VISIBLE_DEVICES=', sys.executable, 'worker.py', '--log-dir',
        logdir, '--env-id', env_id, '--num-workers',
        str(num_workers)
    ]

    if visualise:
        base_cmd += ['--visualise']

    if remotes is None:
        remotes = ["1"] * num_workers
    else:
        remotes = remotes.split(',')
        assert len(remotes) == num_workers

    cmds_map = [
        new_cmd(session, "ps", base_cmd + ["--job-name", "ps"], mode, logdir,
                shell)
    ]
    for i in range(num_workers):
        cmds_map += [
            new_cmd(
                session, "w-%d" % i, base_cmd + [
                    "--job-name", "worker", "--task",
                    str(i), "--remotes", remotes[i], "--policy", policy
                ], mode, logdir, shell)
        ]

    cmds_map += [
        new_cmd(session, "tb",
                ["tensorboard", "--logdir", logdir, "--port", "12345"], mode,
                logdir, shell)
    ]
    if mode == 'tmux':
        cmds_map += [new_cmd(session, "htop", ["htop"], mode, logdir, shell)]

    windows = [v[0] for v in cmds_map]

    notes = []
    cmds = [
        "mkdir -p {}".format(logdir),
        "echo {} {} > {}/cmd.sh".format(
            sys.executable,
            ' '.join([shlex_quote(arg) for arg in sys.argv if arg != '-n']),
            logdir),
    ]
    if mode == 'nohup' or mode == 'child':
        cmds += ["echo '#!/bin/sh' >{}/kill.sh".format(logdir)]
        notes += ["Run `source {}/kill.sh` to kill the job".format(logdir)]
    if mode == 'tmux':
        notes += [
            "Use `tmux attach -t {}` to watch process output".format(session)
        ]
        notes += [
            "Use `tmux kill-session -t {}` to kill the job".format(session)
        ]
    else:
        notes += [
            "Use `tail -f {}/*.out` to watch process output".format(logdir)
        ]
    notes += [
        "Point your browser to http://localhost:12345 to see Tensorboard"
    ]

    if mode == 'tmux':
        cmds += [
            "kill $( lsof -i:12345 -t ) > /dev/null 2>&1",  # kill any process using tensorboard's port
            "kill $( lsof -i:12222-{} -t ) > /dev/null 2>&1".format(
                num_workers +
                12222),  # kill any processes using ps / worker ports
            "tmux kill-session -t {}".format(session),
            "tmux new-session -s {} -n {} -d {}".format(
                session, windows[0], shell)
        ]
        for w in windows[1:]:
            cmds += [
                "tmux new-window -t {} -n {} {}".format(session, w, shell)
            ]
        cmds += ["sleep 1"]
    for window, cmd in cmds_map:
        cmds += [cmd]

    return cmds, notes
示例#35
0
def create_commands(session,
                    num_workers,
                    remotes,
                    logdir,
                    shell='bash',
                    visualise=False):
    # 启动各个 Worker 和 启动 TensorBoard
    base_cmd = [
        'CUDA_VISIBLE_DEVICES=', sys.executable, 'worker.py', '--log-dir',
        logdir, '--num-workers',
        str(num_workers)
    ]

    # 如果需要可视化
    if visualise:
        base_cmd += ['--visualise']

    if remotes is None:
        remotes = ["1"] * num_workers
    else:
        remotes = remotes.split(',')
        assert len(remotes) == num_workers

    cmds_map = [
        new_cmd(session, "ps", base_cmd + ["--job-name", "ps"], logdir, shell)
    ]

    # 对每一个 worker
    for i in range(num_workers):
        cmds_map += [
            new_cmd(
                session, "w-%d" % i, base_cmd + [
                    "--job-name", "worker", "--task",
                    str(i), "--remotes", remotes[i]
                ], logdir, shell)
        ]

    cmds_map += [
        new_cmd(session, "tb",
                ["tensorboard", "--logdir", logdir, "--port", "6006"], logdir,
                shell)
    ]

    notes = []
    cmds = [
        "mkdir -p {}".format(logdir),
        "echo {} {} > {}/cmd.sh".format(
            sys.executable,
            ' '.join([shlex_quote(arg) for arg in sys.argv if arg != '-n']),
            logdir),
    ]
    cmds += ["echo '#!/bin/sh' >{}/kill.sh".format(logdir)]
    notes += ["运行 `source {}/kill.sh` 命令来结束各个进程".format(logdir)]

    notes += ["运行 `tail -f {}/*.out` 命令来查看各个进程的输出".format(logdir)]
    notes += ["在浏览器中打开 http://localhost:6006 ,查看 TensorBoard 运行的结果"]

    for window, cmd in cmds_map:
        cmds += [cmd]

    return cmds, notes
示例#36
0
def create_commands(session, num_workers, log_dir, shell='bash'):
    # for launching the TF workers and for launching tensorboard
    base_cmd = ['CUDA_VISIBLE_DEVICES=', sys.executable, 'worker.py']

    cmds_map = [
        new_cmd(
            session, "ps",
            base_cmd + ["--job-name", "ps", "--num-workers",
                        str(num_workers)])
    ]
    for i in range(num_workers):
        cmds_map += [
            new_cmd(
                session, "w-%d" % i, base_cmd + [
                    "--job-name", "worker", "--task",
                    str(i), "--num-workers",
                    str(num_workers)
                ])
        ]

    cmds_map += [
        new_cmd(session, "tb", [
            "tensorboard", "--logdir", log_dir, "--port", "{}".format(TB_PORT)
        ])
    ]
    cmds_map += [new_cmd(session, "htop", ["htop"])]

    windows = [v[0] for v in cmds_map]

    notes = []
    cmds = [
        "mkdir -p {}".format(log_dir),
        "echo {} {} > {}/cmd.sh".format(
            sys.executable,
            ' '.join([shlex_quote(arg) for arg in sys.argv if arg != '-n']),
            log_dir),
    ]
    notes += [
        "Use `tmux attach -t {}` to watch process output".format(session)
    ]
    notes += ["Use `tmux kill-session -t {}` to kill the job".format(session)]
    notes += [
        "Point your browser to http://localhost:{} to see Tensorboard".format(
            TB_PORT)
    ]

    cmds += [
        "kill $( lsof -i:{} -t ) > /dev/null 2>&1".format(
            TB_PORT),  # kill any process using tensorboard's port
        "kill $( lsof -i:{}-{} -t ) > /dev/null 2>&1".format(
            PS_PORT, num_workers +
            PS_PORT),  # kill any processes using ps / worker ports
        "tmux kill-session -t {}".format(session),
        "tmux new-session -s {} -n {} -d {}".format(session, windows[0], shell)
    ]
    for w in windows[1:]:
        cmds += ["tmux new-window -t {} -n {} {}".format(session, w, shell)]
    cmds += ["sleep 1"]
    for window, cmd in cmds_map:
        cmds += [cmd]

    return cmds, notes
示例#37
0
def test_flags_reqs_f(d):
    p = get_exists_path('libsimpleflag')
    reqs_file = d.write_to('reqs', [shlex_quote(p) + ' -DCGET_FLAG=On'])
    d.cmds(install_cmds(url='-f {}'.format(reqs_file), alias=p))
示例#38
0
def run_ansible_module(environment,
                       ansible_context,
                       inventory_group,
                       module,
                       module_args,
                       become=True,
                       become_user=None,
                       use_factory_auth=False,
                       quiet=False,
                       extra_args=()):
    extra_args = tuple(extra_args)
    if not quiet:
        extra_args = ("--diff", ) + extra_args
    else:
        extra_args = ("--one-line", ) + extra_args

    cmd_parts = (
        'ansible',
        inventory_group,
        '-m',
        module,
        '-i',
        environment.paths.inventory_source,
        '-a',
        module_args,
    ) + extra_args

    environment.create_generated_yml()
    public_vars = environment.public_vars
    cmd_parts += get_user_arg(public_vars,
                              extra_args,
                              use_factory_auth=use_factory_auth)
    become = become or bool(become_user)
    become_user = become_user
    needs_secrets = False
    env_vars = ansible_context.env_vars

    if become:
        cmd_parts += ('--become', )
        needs_secrets = True
        if become_user:
            cmd_parts += ('--become-user', become_user)

    if needs_secrets:
        cmd_parts += (
            '-e',
            '@{}'.format(environment.paths.public_yml),
            '-e',
            '@{}'.format(environment.paths.generated_yml),
        )
        cmd_parts += environment.secrets_backend.get_extra_ansible_args()
        env_vars.update(
            environment.secrets_backend.get_extra_ansible_env_vars())

    cmd_parts_with_common_ssh_args = get_common_ssh_args(
        environment, use_factory_auth=use_factory_auth)
    cmd_parts += cmd_parts_with_common_ssh_args
    cmd = ' '.join(shlex_quote(arg) for arg in cmd_parts)
    if not quiet:
        print_command(cmd)
    return subprocess.call(cmd_parts, env=env_vars)
def build_docker_run_command(
    container_command,
    image,
    interactive=False,
    terminal=False,
    tag=None,
    volumes=[],
    volumes_from=DEFAULT_VOLUMES_FROM,
    memory=DEFAULT_MEMORY,
    env_directives=[],
    working_directory=DEFAULT_WORKING_DIRECTORY,
    name=None,
    net=DEFAULT_NET,
    run_extra_arguments=DEFAULT_RUN_EXTRA_ARGUMENTS,
    docker_cmd=DEFAULT_DOCKER_COMMAND,
    sudo=DEFAULT_SUDO,
    sudo_cmd=DEFAULT_SUDO_COMMAND,
    auto_rm=DEFAULT_AUTO_REMOVE,
    set_user=DEFAULT_SET_USER,
    host=DEFAULT_HOST,
):
    command_parts = _docker_prefix(
        docker_cmd=docker_cmd,
        sudo=sudo,
        sudo_cmd=sudo_cmd,
        host=host
    )
    command_parts.append("run")
    if interactive:
        command_parts.append("-i")
    if terminal:
        command_parts.append("-t")
    for env_directive in env_directives:
        # e.g. -e "GALAXY_SLOTS=$GALAXY_SLOTS"
        # These are environment variable expansions so we don't quote these.
        command_parts.extend(["-e", env_directive])
    for volume in volumes:
        # These are environment variable expansions so we don't quote these.
        volume_str = str(volume)
        if "$" not in volume_str:
            volume_for_cmd_line = shlex_quote(volume_str)
        else:
            # e.g. $_GALAXY_JOB_TMP_DIR:$_GALAXY_JOB_TMP_DIR:rw so don't single quote.
            volume_for_cmd_line = '"%s"' % volume_str
        command_parts.extend(["-v", volume_for_cmd_line])
    if volumes_from:
        command_parts.extend(["--volumes-from", shlex_quote(str(volumes_from))])
    if memory:
        command_parts.extend(["-m", shlex_quote(memory)])
    if name:
        command_parts.extend(["--name", shlex_quote(name)])
    if working_directory:
        command_parts.extend(["-w", shlex_quote(working_directory)])
    if net:
        command_parts.extend(["--net", shlex_quote(net)])
    if auto_rm:
        command_parts.append("--rm")
    if run_extra_arguments:
        command_parts.append(run_extra_arguments)
    if set_user:
        user = set_user
        if set_user == DEFAULT_SET_USER:
            # If future-us is ever in here and fixing this for docker-machine just
            # use cwltool.docker_id - it takes care of this default nicely.
            euid = os.geteuid()
            egid = os.getgid()

            user = "******" % (euid, egid)
        command_parts.extend(["--user", user])
    full_image = image
    if tag:
        full_image = "%s:%s" % (full_image, tag)
    command_parts.append(shlex_quote(full_image))
    command_parts.append(container_command)
    return " ".join(command_parts)
示例#40
0
def git_log(cfg, filenames, rev_range=None):
    '''
        A git log implementation that allows more flexibility:
        
        - Follow multiple files
        - Exclude commits we don't want to see
    '''

    if len(filenames) == 0:
        print("Specify at least one file to log")

    elif len(filenames) == 1 and not cfg.excluded_commits:
        if rev_range:
            cmd = 'git log --follow -p %s %s' % (rev_range, filenames[0])
        else:
            cmd = 'git log --follow -p %s' % (filenames[0])

        os.system(cmd)
    else:
        # To show renames properly, we have to switch to the root
        # of the repository and then specify the potentially renamed file
        # for each commit
        oldcwd = os.getcwd()
        git_toplevel = str(sh.git('rev-parse', '--show-toplevel')).strip()

        # Use git log to generate lists of commits for each file, sort
        commit_data = []
        for fname in filenames:
            commit_data += _get_commits(fname, rev_range)

        if not len(commit_data):
            return

        # Sort the results by timestamp
        if len(filenames) > 1:
            commit_data.sort(reverse=True)

        # Create an index of filenames per commit id
        fname_by_commit = {}
        for _, commit, fname in commit_data:
            fname_by_commit.setdefault(commit, []).append(fname)

        # Uniquify (http://www.peterbe.com/plog/uniqifiers-benchmark)
        seen = set()
        seen_add = seen.add
        commits = [
            c for _, c, _ in commit_data if not (c in seen or seen_add(c))
        ]

        # Finally, display them
        try:
            os.chdir(git_toplevel)

            commands = []

            for commit in commits:
                if not cfg.is_commit_excluded(commit):
                    for i, fname in enumerate(sorted(fname_by_commit[commit])):
                        fname = shlex_quote(fname)
                        # git log --follow only allows a single filename, so
                        # merge the outputs together using separate commands
                        if i == 0:
                            commands.append(
                                'git log -p -1 --follow --color %s -- %s' %
                                (commit, fname))
                        else:
                            commands.append(
                                'git log -p -1 --follow --color --pretty='
                                ' %s -- %s' % (commit, fname))

            _multi_output(commands)

        finally:
            os.chdir(oldcwd)
示例#41
0
 def save(self, *args, **kwargs):  # pylint: disable=arguments-differ
     self.value = shlex_quote(self.value)
     return super().save(*args, **kwargs)
示例#42
0
def git_diff(filenames, rev_range):
    commands = [
        'git diff --follow -w --color %s %s' % (rev_range, shlex_quote(f))
        for f in filenames
    ]
    _multi_output(commands)
示例#43
0
def create_commands(session, args, shell='bash'):
    # for launching the TF workers and for launching tensorboard
    base_cmd = [
        sys.executable,
        'worker.py',
        '--log',
        args.log,
        '--env-id',
        args.env_id,
        '--num-workers',
        str(args.num_workers),
        '--num-ps',
        str(args.num_ps),
        '--alg',
        args.alg,
        '--model',
        args.model,
        '--max-step',
        args.max_step,
        '--t-max',
        args.t_max,
        '--eps-step',
        args.eps_step,
        '--eps',
        args.eps,
        '--eps-eval',
        args.eps_eval,
        '--gamma',
        args.gamma,
        '--lr',
        args.lr,
        '--decay',
        args.decay,
        '--sync',
        args.sync,
        '--update-freq',
        args.update_freq,
        '--eval-freq',
        args.eval_freq,
        '--eval-num',
        args.eval_num,
        '--prediction-step',
        args.prediction_step,
        '--dim',
        args.dim,
        '--f-num',
        args.f_num,
        '--f-pad',
        args.f_pad,
        '--f-stride',
        args.f_stride,
        '--f-size',
        args.f_size,
        '--h-dim',
        args.h_dim,
        '--branch',
        args.branch,
        '--config',
        args.config,
        '--buf',
        args.buf,
    ]

    if len(args.gpu) > 0:
        base_cmd += ['--gpu', 1]

    if args.remotes is None:
        args.remotes = ["1"] * args.num_workers
    else:
        args.remotes = args.remotes.split(',')
        assert len(args.remotes) == args.num_workers

    cmds_map = []
    for i in range(args.num_ps + 1):
        prefix = ['CUDA_VISIBLE_DEVICES=']
        cmds_map += [
            new_cmd(session, "ps-%d" % i,
                    prefix + base_cmd + ["--job-name", "ps", "--task",
                                         str(i)], args.mode, args.log, shell)
        ]

    for i in range(args.num_workers):
        prefix = []
        if len(args.gpu) > 0:
            prefix = [
                'CUDA_VISIBLE_DEVICES=%d' % args.gpu[(i % len(args.gpu))]
            ]
        else:
            prefix = ['CUDA_VISIBLE_DEVICES=']
        cmds_map += [
            new_cmd(
                session, "w-%d" % i, prefix + base_cmd + [
                    "--job-name", "worker", "--task",
                    str(i), "--remotes", args.remotes[i]
                ], args.mode, args.log, shell)
        ]

    cmds_map += [
        new_cmd(session, "tb",
                ["tensorboard", "--logdir", args.log, "--port", "12345"],
                args.mode, args.log, shell)
    ]
    cmds_map += [
        new_cmd(
            session, "test", prefix + base_cmd +
            ["--job-name", "test", "--task",
             str(args.num_workers)], args.mode, args.log, shell)
    ]
    windows = [v[0] for v in cmds_map]

    notes = []
    cmds = [
        "mkdir -p {}".format(args.log),
        "echo {} {} > {}/cmd.sh".format(
            sys.executable,
            ' '.join([shlex_quote(arg) for arg in sys.argv if arg != '-n']),
            args.log),
    ]
    if args.mode == 'nohup' or args.mode == 'child':
        cmds += ["echo '#!/bin/sh' >{}/kill.sh".format(args.log)]
        notes += ["Run `source {}/kill.sh` to kill the job".format(args.log)]
    if args.mode == 'tmux':
        notes += [
            "Use `tmux attach -t {}` to watch process output".format(session)
        ]
        notes += [
            "Use `tmux kill-session -t {}` to kill the job".format(session)
        ]
    else:
        notes += [
            "Use `tail -f {}/*.out` to watch process output".format(args.log)
        ]
    notes += [
        "Point your browser to http://localhost:12345 to see Tensorboard"
    ]

    if args.mode == 'tmux':
        cmds += [
            "tmux kill-session -t {}".format(session),
            "tmux new-session -s {} -n {} -d {}".format(
                session, windows[0], shell)
        ]
        for w in windows[1:]:
            cmds += [
                "tmux new-window -t {} -n {} {}".format(session, w, shell)
            ]
        cmds += ["sleep 1"]
    for window, cmd in cmds_map:
        cmds += [cmd]

    return cmds, notes
示例#44
0
def mull_targets(
    targets,
    involucro_context=None,
    command="build",
    channels=DEFAULT_CHANNELS,
    namespace="biocontainers",
    test='true',
    test_files=None,
    image_build=None,
    name_override=None,
    repository_template=DEFAULT_REPOSITORY_TEMPLATE,
    dry_run=False,
    conda_version=None,
    verbose=False,
    binds=DEFAULT_BINDS,
    rebuild=True,
    oauth_token=None,
    hash_func="v2",
    singularity=False,
    singularity_image_dir="singularity_import",
):
    targets = list(targets)
    if involucro_context is None:
        involucro_context = InvolucroContext()

    image_function = v1_image_name if hash_func == "v1" else v2_image_name
    if len(targets) > 1 and image_build is None:
        # Force an image build in this case - this seems hacky probably
        # shouldn't work this way but single case broken else wise.
        image_build = "0"

    repo_template_kwds = {
        "namespace":
        namespace,
        "image":
        image_function(targets,
                       image_build=image_build,
                       name_override=name_override)
    }
    repo = string.Template(repository_template).safe_substitute(
        repo_template_kwds)

    if not rebuild or "push" in command:
        repo_name = repo_template_kwds["image"].split(":", 1)[0]
        repo_data = quay_repository(repo_template_kwds["namespace"], repo_name)
        if not rebuild:
            tags = repo_data.get("tags", [])

            target_tag = None
            if ":" in repo_template_kwds["image"]:
                image_name_parts = repo_template_kwds["image"].split(":")
                assert len(
                    image_name_parts
                ) == 2, ": not allowed in image name [%s]" % repo_template_kwds[
                    "image"]
                target_tag = image_name_parts[1]

            if tags and (target_tag is None or target_tag in tags):
                raise BuildExistsException()
        if "push" in command and "error_type" in repo_data and oauth_token:
            # Explicitly create the repository so it can be built as public.
            create_repository(repo_template_kwds["namespace"], repo_name,
                              oauth_token)

    for channel in channels:
        if channel.startswith('file://'):
            bind_path = channel.lstrip('file://')
            binds.append('/%s:/%s' % (bind_path, bind_path))

    channels = ",".join(channels)
    target_str = ",".join(map(conda_build_target_str, targets))
    bind_str = ",".join(binds)
    involucro_args = [
        '-f',
        '%s/invfile.lua' % DIRNAME,
        '-set',
        "CHANNELS='%s'" % channels,
        '-set',
        "TARGETS='%s'" % target_str,
        '-set',
        "REPO='%s'" % repo,
        '-set',
        "BINDS='%s'" % bind_str,
    ]

    if DEST_BASE_IMAGE:
        involucro_args.extend(
            ["-set", "DEST_BASE_IMAGE='%s'" % DEST_BASE_IMAGE])
    if CONDA_IMAGE:
        involucro_args.extend(["-set", "CONDA_IMAGE='%s'" % CONDA_IMAGE])
    if verbose:
        involucro_args.extend(["-set", "VERBOSE='1'"])
    if singularity:
        singularity_image_name = repo_template_kwds['image']
        involucro_args.extend(["-set", "SINGULARITY='1'"])
        involucro_args.extend(
            ["-set",
             "SINGULARITY_IMAGE_NAME='%s'" % singularity_image_name])
        involucro_args.extend(
            ["-set",
             "SINGULARITY_IMAGE_DIR='%s'" % singularity_image_dir])
        involucro_args.extend(
            ["-set", "USER_ID='%s:%s'" % (os.getuid(), os.getgid())])
    if test:
        involucro_args.extend(["-set", "TEST=%s" % shlex_quote(test)])
    if conda_version is not None:
        verbose = "--verbose" if verbose else "--quiet"
        involucro_args.extend([
            "-set",
            "PREINSTALL='conda install %s --yes conda=%s'" %
            (verbose, conda_version)
        ])
    involucro_args.append(command)
    if test_files:
        test_bind = []
        for test_file in test_files:
            if ':' not in test_file:
                if os.path.exists(test_file):
                    test_bind.append(
                        "%s:%s/%s" %
                        (test_file, DEFAULT_WORKING_DIR, test_file))
            else:
                if os.path.exists(test_file.split(':')[0]):
                    test_bind.append(test_file)
        if test_bind:
            involucro_args.insert(6, '-set')
            involucro_args.insert(7, "TEST_BINDS='%s'" % ",".join(test_bind))
    print(" ".join(involucro_context.build_command(involucro_args)))
    if not dry_run:
        ensure_installed(involucro_context, True)
        if singularity:
            if not os.path.exists(singularity_image_dir):
                safe_makedirs(singularity_image_dir)
            with open(os.path.join(singularity_image_dir, 'Singularity'),
                      'w+') as sin_def:
                fill_template = SINGULARITY_TEMPLATE % {'container_test': test}
                sin_def.write(fill_template)
        with PrintProgress():
            ret = involucro_context.exec_command(involucro_args)
        if singularity:
            # we can not remove this folder as it contains the image wich is owned by root
            pass
            # shutil.rmtree('./singularity_import')
        return ret
    return 0
示例#45
0
def shlex_join(strings):
    return ' '.join(shlex_quote(s) for s in strings)
示例#46
0
def get_compile_command(click_ctx):
    """
    Returns a normalized compile command depending on cli context.

    The command will be normalized by:
        - expanding options short to long
        - removing values that are already default
        - sorting the arguments
        - removing one-off arguments like '--upgrade'
        - removing arguments that don't change build behaviour like '--verbose'
    """
    from piptools.scripts.compile import cli

    # Map of the compile cli options (option name -> click.Option)
    compile_options = {option.name: option for option in cli.params}

    left_args = []
    right_args = []

    for option_name, value in click_ctx.params.items():
        option = compile_options[option_name]

        # Get the latest option name (usually it'll be a long name)
        option_long_name = option.opts[-1]

        # Collect variadic args separately, they will be added
        # at the end of the command later
        if option.nargs < 0:
            right_args.extend([shlex_quote(force_text(val)) for val in value])
            continue

        # Exclude one-off options (--upgrade/--upgrade-package/--rebuild/...)
        # or options that don't change compile behaviour (--verbose/--dry-run/...)
        if option_long_name in COMPILE_EXCLUDE_OPTIONS:
            continue

        # Skip options without a value
        if option.default is None and not value:
            continue

        # Skip options with a default value
        if option.default == value:
            continue

        # Use a file name for file-like objects
        if isinstance(value, LazyFile):
            value = value.name

        # Convert value to the list
        if not isinstance(value, (tuple, list)):
            value = [value]

        for val in value:
            # Flags don't have a value, thus add to args true or false option long name
            if option.is_flag:
                # If there are false-options, choose an option name depending on a value
                if option.secondary_opts:
                    # Get the latest false-option
                    secondary_option_long_name = option.secondary_opts[-1]
                    arg = option_long_name if val else secondary_option_long_name
                # There are no false-options, use true-option
                else:
                    arg = option_long_name
                left_args.append(shlex_quote(arg))
            # Append to args the option with a value
            else:
                left_args.append(
                    "{option}={value}".format(
                        option=option_long_name, value=shlex_quote(force_text(val))
                    )
                )

    return " ".join(["pip-compile"] + sorted(left_args) + sorted(right_args))
示例#47
0
def new_cmd(session, name, cmd):
    if isinstance(cmd, (list, tuple)):
        cmd = " ".join(shlex_quote(str(v)) for v in cmd)
    return name, "tmux send-keys -t {}:{} {} Enter".format(
        session, name, shlex_quote(cmd))
 def _stringify_kwopt_string(self, flag, val):
     """
     """
     return '{flag} {value}'.format(flag=flag, value=shlex_quote(str(val)))
示例#49
0
def test_reqs_f(d):
    reqs_file = d.write_to('reqs', [shlex_quote(get_exists_path('libsimple'))])
    d.cmds(
        install_cmds(url='-f {}'.format(reqs_file),
                     lib='simple',
                     alias=get_exists_path('libsimple')))
 def _stringify_kwopt_list(self, flag, val):
     """
     """
     if isinstance(val, string_types):
         return self._stringify_kwopt_string(flag, val)
     return ' '.join(['{flag} {value}'.format(flag=flag, value=shlex_quote(str(v))) for v in val])
示例#51
0
def test_comments_reqs_f(d):
    p = get_exists_path('libsimple')
    reqs_file = d.write_to(
        'reqs', [shlex_quote(p) + ' #A comment', '# Another comment'])
    d.cmds(install_cmds(url='-f {}'.format(reqs_file), alias=p))
示例#52
0
def build_docker_run_command(
    container_command,
    image,
    interactive=False,
    terminal=False,
    tag=None,
    volumes=[],
    volumes_from=DEFAULT_VOLUMES_FROM,
    memory=DEFAULT_MEMORY,
    env_directives=[],
    working_directory=DEFAULT_WORKING_DIRECTORY,
    name=None,
    net=DEFAULT_NET,
    run_extra_arguments=DEFAULT_RUN_EXTRA_ARGUMENTS,
    docker_cmd=DEFAULT_DOCKER_COMMAND,
    sudo=DEFAULT_SUDO,
    sudo_cmd=DEFAULT_SUDO_COMMAND,
    auto_rm=DEFAULT_AUTO_REMOVE,
    set_user=DEFAULT_SET_USER,
    host=DEFAULT_HOST,
):
    command_parts = _docker_prefix(docker_cmd=docker_cmd,
                                   sudo=sudo,
                                   sudo_cmd=sudo_cmd,
                                   host=host)
    command_parts.append("run")
    if interactive:
        command_parts.append("-i")
    if terminal:
        command_parts.append("-t")
    for env_directive in env_directives:
        command_parts.extend(["-e", shlex_quote(env_directive)])
    for volume in volumes:
        command_parts.extend(["-v", shlex_quote(str(volume))])
    if volumes_from:
        command_parts.extend(
            ["--volumes-from",
             shlex_quote(str(volumes_from))])
    if memory:
        command_parts.extend(["-m", shlex_quote(memory)])
    if name:
        command_parts.extend(["--name", shlex_quote(name)])
    if working_directory:
        command_parts.extend(["-w", shlex_quote(working_directory)])
    if net:
        command_parts.extend(["--net", shlex_quote(net)])
    if auto_rm:
        command_parts.append("--rm")
    if run_extra_arguments:
        command_parts.append(run_extra_arguments)
    if set_user:
        user = set_user
        if set_user == DEFAULT_SET_USER:
            user = str(os.geteuid())
        command_parts.extend(["-u", user])
    full_image = image
    if tag:
        full_image = "%s:%s" % (full_image, tag)
    command_parts.append(shlex_quote(full_image))
    command_parts.append(container_command)
    return " ".join(command_parts)
示例#53
0
def run(cmd, show_cmd=False, stdout=False, logfile=None, can_fail=False, workdir=None, stdin_data=None, return_stdout=True, buffer_size=4096, **kwargs):
    """Run a command in shell.

    @param show_cmd: show command in stdout/log
    @type show_cmd: bool
    @param stdout: print output to stdout
    @type stdout: bool
    @param logfile: save output to logfile
    @type logfile: str
    @param can_fail: when set, retcode is returned instead of raising RuntimeError
    @type can_fail: bool
    @param workdir: change current directory to workdir before starting a command
    @type workdir: str
    @param stdin_data: stdin data passed to a command
    @type stdin_data: str
    @param buffer_size: size of buffer for reading from proc's stdout, use -1 for line-buffering
    @type buffer_size: int
    @return_stdout: return command stdout as a function result (turn off when working with large data, None is returned instead of stdout)
    @return_stdout: bool
    @return: (command return code, merged stdout+stderr)
    @rtype: (int, str) or (int, None)
    """
    if type(cmd) in (list, tuple):
        cmd = " ".join((shlex_quote(i) for i in cmd))

    universal_newlines = kwargs.get('universal_newlines', False)

    log = None
    if logfile:
        logfile = os.path.join(workdir or "", logfile)
        # What happens to log file if it exists already? If show_cmd is True,
        # it will be overwritten. Otherwise the command output will just be
        # appended to the existing file.
        mode = 'a' if not show_cmd and os.path.exists(logfile) else 'w'
        if not universal_newlines:
            mode += 'b'
        log = open(logfile, mode)

    try:

        if show_cmd:
            command = "COMMAND: %s\n%s\n" % (
                cmd,
                "-" * min(len(cmd) + 9, 79)
            )
            if stdout:
                print(command, end='')
            if logfile:
                if six.PY3 and not universal_newlines:
                    # Log file opened as binary, encode the command
                    command = bytes(command, encoding='utf-8')
                log.write(command)

        stdin = None
        if stdin_data is not None:
            stdin = subprocess.PIPE

        proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT, stdin=stdin,
                                cwd=workdir, **kwargs)

        if stdin_data is not None:
            class StdinThread(threading.Thread):
                def run(self):
                    proc.stdin.write(stdin_data)
                    proc.stdin.close()
            stdin_thread = StdinThread()
            stdin_thread.daemon = True
            stdin_thread.start()

        output = "" if universal_newlines else b""
        sentinel = "" if universal_newlines else b""
        while True:
            if buffer_size == -1:
                lines = proc.stdout.readline()
            else:
                try:
                    lines = proc.stdout.read(buffer_size)
                except (IOError, OSError) as ex:
                    import errno
                    if ex.errno == errno.EINTR:
                        continue
                    else:
                        raise

            if lines == sentinel:
                break
            if stdout:
                if not universal_newlines:
                    sys.stdout.write(lines.decode('utf-8'))
                else:
                    sys.stdout.write(lines)
            if logfile:
                log.write(lines)
            if return_stdout:
                output += lines
        proc.wait()

    finally:
        if logfile:
            log.close()

    if stdin_data is not None:
        stdin_thread.join()

    err_msg = "ERROR running command: %s" % cmd
    if logfile:
        err_msg += "\nFor more details see %s" % logfile

    if proc.returncode != 0 and not can_fail:
        exc = RuntimeError(err_msg)
        exc.output = output
        raise exc

    if proc.returncode != 0 and show_cmd:
        print(err_msg, file=sys.stderr)

    if not return_stdout:
        output = None

    return (proc.returncode, output)
def process_in_parallel(tag,
                        total_range_size,
                        binary,
                        output_dir,
                        load_ckpt,
                        load_detectron,
                        opts=''):
    """Run the specified binary NUM_GPUS times in parallel, each time as a
    subprocess that uses one GPU. The binary must accept the command line
    arguments `--range {start} {end}` that specify a data processing range.
    """
    # Snapshot the current cfg state in order to pass to the inference
    # subprocesses
    cfg_file = os.path.join(output_dir, '{}_range_config.yaml'.format(tag))
    with open(cfg_file, 'w') as f:
        yaml.dump(cfg, stream=f)
    subprocess_env = os.environ.copy()
    processes = []
    NUM_GPUS = torch.cuda.device_count()
    subinds = np.array_split(range(total_range_size), NUM_GPUS)
    # Determine GPUs to use
    cuda_visible_devices = os.environ.get('CUDA_VISIBLE_DEVICES')
    if cuda_visible_devices:
        gpu_inds = list(map(int, cuda_visible_devices.split(',')))
        assert -1 not in gpu_inds, \
            'Hiding GPU indices using the \'-1\' index is not supported'
    else:
        gpu_inds = range(cfg.NUM_GPUS)
    gpu_inds = list(gpu_inds)
    # Run the binary in cfg.NUM_GPUS subprocesses
    for i, gpu_ind in enumerate(gpu_inds):
        start = subinds[i][0]
        # pdb.set_trace()
        end = subinds[i][-1] + 1
        subprocess_env['CUDA_VISIBLE_DEVICES'] = str(gpu_ind)
        cmd = (
            'python3 {binary} --range {start} {end} --cfg {cfg_file} --set {opts} '
            '--output_dir {output_dir}')
        if load_ckpt is not None:
            cmd += ' --load_ckpt {load_ckpt}'
        elif load_detectron is not None:
            cmd += ' --load_detectron {load_detectron}'
        cmd = cmd.format(binary=shlex_quote(binary),
                         start=int(start),
                         end=int(end),
                         cfg_file=shlex_quote(cfg_file),
                         output_dir=output_dir,
                         load_ckpt=load_ckpt,
                         load_detectron=load_detectron,
                         opts=' '.join([shlex_quote(opt) for opt in opts]))
        logger.info('{} range command {}: {}'.format(tag, i, cmd))
        if i == 0:
            subprocess_stdout = subprocess.PIPE
        else:
            filename = os.path.join(
                output_dir, '%s_range_%s_%s.stdout' % (tag, start, end))
            subprocess_stdout = open(filename, 'w')
        p = subprocess.Popen(cmd,
                             shell=True,
                             env=subprocess_env,
                             stdout=subprocess_stdout,
                             stderr=subprocess.STDOUT,
                             bufsize=1)
        # pdb.set_trace()
        processes.append((i, p, start, end, subprocess_stdout))
    # Log output from inference processes and collate their results

    outputs = []
    for i, p, start, end, subprocess_stdout in processes:
        log_subprocess_output(i, p, output_dir, tag, start, end)
        if isinstance(subprocess_stdout, IOBase):
            subprocess_stdout.close()
        range_file = os.path.join(output_dir,
                                  '%s_range_%s_%s.pkl' % (tag, start, end))
        range_data = pickle.load(open(range_file, 'rb'))
        outputs.append(range_data)
    return outputs
    def run(self, args, manage_args):
        environment = get_environment(args.env_name)
        public_vars = environment.public_vars
        # the default 'cchq' is redundant with ansible/group_vars/all.yml
        cchq_user = public_vars.get('cchq_user', 'cchq')
        deploy_env = environment.meta_config.deploy_env
        # the paths here are redundant with ansible/group_vars/all.yml
        if args.release:
            code_dir = '/home/{cchq_user}/www/{deploy_env}/releases/{release}'.format(
                cchq_user=cchq_user,
                deploy_env=deploy_env,
                release=args.release)
        else:
            code_dir = '/home/{cchq_user}/www/{deploy_env}/current'.format(
                cchq_user=cchq_user, deploy_env=deploy_env)

        def _get_ssh_args(remote_command):
            return [
                'sudo -iu {cchq_user} bash -c {remote_command}'.format(
                    cchq_user=cchq_user,
                    remote_command=shlex_quote(remote_command),
                )
            ]

        if args.tee_file:
            rc = Ssh(self.parser).run(
                args,
                _get_ssh_args('cd {code_dir}; [[ -f {tee_file} ]]'.format(
                    code_dir=code_dir, tee_file=shlex_quote(args.tee_file))))
            if rc in (0, 1):
                file_already_exists = (rc == 0)
            else:
                return rc

            if file_already_exists:
                puts(
                    color_error(
                        "Refusing to --tee to a file that already exists ({})".
                        format(args.tee_file)))
                return 1

            tee_file_cmd = ' | tee {}'.format(shlex_quote(args.tee_file))
        else:
            tee_file_cmd = ''

        python_env = 'python_env-3.6'
        remote_command = (
            'cd {code_dir}; {python_env}/bin/python manage.py {args}{tee_file_cmd}'
            .format(
                python_env=python_env,
                cchq_user=cchq_user,
                code_dir=code_dir,
                args=' '.join(shlex_quote(arg) for arg in manage_args),
                tee_file_cmd=tee_file_cmd,
            ))
        if args.tmux:
            args.remote_command = remote_command
            return Tmux(self.parser).run(args, [])
        else:
            ssh_args = _get_ssh_args(remote_command)
            return Ssh(self.parser).run(args, ssh_args)
示例#56
0
    def run(self,
            app='org.mozilla.geckoview_example',
            intent=None,
            env=[],
            profile=None,
            url=None,
            no_install=None,
            no_wait=None,
            fail_if_running=None,
            restart=None):
        from mozrunner.devices.android_device import verify_android_device, _get_device
        from six.moves import shlex_quote

        if app == 'org.mozilla.geckoview_example':
            activity_name = 'org.mozilla.geckoview_example.GeckoViewActivity'
        elif app == 'org.mozilla.geckoview.test':
            activity_name = 'org.mozilla.geckoview.test.TestRunnerActivity'
        elif 'fennec' in app or 'firefox' in app:
            activity_name = 'org.mozilla.gecko.BrowserApp'
        else:
            raise RuntimeError('Application not recognized: {}'.format(app))

        # `verify_android_device` respects `DEVICE_SERIAL` if it is set and sets it otherwise.
        verify_android_device(self, app=app, install=not no_install)
        device_serial = os.environ.get('DEVICE_SERIAL')
        if not device_serial:
            print('No ADB devices connected.')
            return 1

        device = _get_device(self.substs, device_serial=device_serial)

        args = []
        if profile:
            if os.path.isdir(profile):
                host_profile = profile
                # Always /data/local/tmp, rather than `device.test_root`, because GeckoView only
                # takes its configuration file from /data/local/tmp, and we want to follow suit.
                target_profile = '/data/local/tmp/{}-profile'.format(app)
                device.rm(target_profile, recursive=True, force=True)
                device.push(host_profile, target_profile)
                self.log(
                    logging.INFO, "run", {
                        'host_profile': host_profile,
                        'target_profile': target_profile
                    },
                    'Pushed profile from host "{host_profile}" to target "{target_profile}"'
                )
            else:
                target_profile = profile
                self.log(logging.INFO, "run",
                         {'target_profile': target_profile},
                         'Using profile from target "{target_profile}"')

            args = ['--profile', shlex_quote(target_profile)]

        extras = {}
        for i, e in enumerate(env):
            extras['env{}'.format(i)] = e
        if args:
            extras['args'] = " ".join(args)
        extras[
            'use_multiprocess'] = True  # Only GVE and TRA process this extra.

        if env or args:
            restart = True

        if restart:
            fail_if_running = False
            self.log(logging.INFO, "run", {'app': app},
                     'Stopping {app} to ensure clean restart.')
            device.stop_application(app)

        # We'd prefer to log the actual `am start ...` command, but it's not trivial to wire the
        # device's logger to mach's logger.
        self.log(logging.INFO, "run", {
            'app': app,
            'activity_name': activity_name
        }, 'Starting {app}/{activity_name}.')

        device.launch_application(app_name=app,
                                  activity_name=activity_name,
                                  intent=intent,
                                  extras=extras,
                                  url=url,
                                  wait=not no_wait,
                                  fail_if_running=fail_if_running)

        return 0
示例#57
0
 def _sanitize_param_dict(param_dict):
     return {
         str(key): shlex_quote(str(value))
         for key, value in param_dict.items()
     }
示例#58
0
def _str_optional(s):
    return "NULL" if s is None else "'{}'".format(shlex_quote(str(s)))
示例#59
0
文件: train.py 项目: pustar/human-rl
def create_commands(session,
                    num_workers,
                    remotes,
                    env_id,
                    logdir,
                    shell='bash',
                    mode='tmux',
                    hparams=None):
    # for launching the TF workers and for launching tensorboard
    base_cmd = [
        'CUDA_VISIBLE_DEVICES=', sys.executable, 'worker.py', '--log-dir', logdir, '--env-id',
        env_id, '--num-workers', str(num_workers)
    ]
    if hparams is not None:
        for k, v in hparams.items():
            base_cmd.append("--" + k)
            if isinstance(v, list):
                base_cmd.extend(v)
            elif isinstance(v, str):
                base_cmd.append(v)
            else:
                base_cmd.append(str(v))

        os.makedirs(logdir, exist_ok=True)
        hparams["env_id"] = env_id

        import json
        with open(logdir + '/hparams.json', 'w') as f:
            json.dump(hparams, f, indent=4)

    if remotes is None:
        remotes = ["1"] * num_workers
    else:
        remotes = remotes.split(',')
        assert len(remotes) == num_workers

    cmds_map = [new_cmd(session, "ps", base_cmd + ["--job-name", "ps"], mode, logdir, shell)]

    for i in range(num_workers):
        cmds_map += [
            new_cmd(session, "w-%d" % i,
                    base_cmd + ["--job-name", "worker", "--task", str(i), "--remotes", remotes[i]],
                    mode, logdir, shell)
        ]

    cmds_map += [
        new_cmd(session, "tb", ["tensorboard", "--logdir", logdir, "--port", "12345"], mode, logdir,
                shell)
    ]
    if mode == 'tmux':
        cmds_map += [new_cmd(session, "htop", ["htop"], mode, logdir, shell)]

    windows = [v[0] for v in cmds_map]

    notes = []

    cmds = [
        "mkdir -p {}".format(logdir),
        "mkdir -p {}".format(os.path.join(logdir, "logs")),
        "echo {} {} > {}/cmd.sh".format(sys.executable, ' '.join(
            [shlex_quote(arg) for arg in sys.argv if arg != '-n']), logdir),
    ]
    if mode == 'nohup' or mode == 'child':
        cmds += ["echo '#!/bin/sh' >{}/kill.sh".format(logdir)]
        notes += ["Run `source {}/kill.sh` to kill the job".format(logdir)]
    if mode == 'tmux':
        notes += ["Use `tmux attach -t {}` to watch process output".format(session)]
        notes += ["Use `tmux kill-session -t {}` to kill the job".format(session)]
    else:
        notes += ["Use `tail -f {}/*.out` to watch process output".format(logdir)]
    notes += ["Point your browser to http://localhost:12345 to see Tensorboard"]

    if mode == 'tmux':
        cmds += [
            "tmux kill-session -t {}".format(session), "tmux new-session -s {} -n {} -d {}".format(
                session, windows[0], shell)
        ]
        for w in windows[1:]:
            cmds += ["tmux new-window -t {} -n {} {}".format(session, w, shell)]
        cmds += ["sleep 1"]
    for window, cmd in cmds_map:
        cmds += [cmd]

    return cmds, notes
示例#60
0
def get_compile_command(click_ctx):
    """
    Returns a normalized compile command depending on cli context.

    The command will be normalized by:
        - expanding options short to long
        - removing values that are already default
        - sorting the arguments
        - removing one-off arguments like '--upgrade'
        - removing arguments that don't change build behaviour like '--verbose'
    """
    from piptools.scripts.compile import cli

    # Map of the compile cli options (option name -> click.Option)
    compile_options = {option.name: option for option in cli.params}

    left_args = []
    right_args = []

    for option_name, value in click_ctx.params.items():
        option = compile_options[option_name]

        # Get the latest option name (usually it'll be a long name)
        option_long_name = option.opts[-1]

        # Collect variadic args separately, they will be added
        # at the end of the command later
        if option.nargs < 0:
            # These will necessarily be src_files
            # Re-add click-stripped '--' if any start with '-'
            if any(val.startswith("-") and val != "-" for val in value):
                right_args.append("--")
            right_args.extend([shlex_quote(force_text(val)) for val in value])
            continue

        # Exclude one-off options (--upgrade/--upgrade-package/--rebuild/...)
        # or options that don't change compile behaviour (--verbose/--dry-run/...)
        if option_long_name in COMPILE_EXCLUDE_OPTIONS:
            continue

        # Skip options without a value
        if option.default is None and not value:
            continue

        # Skip options with a default value
        if option.default == value:
            continue

        # Use a file name for file-like objects
        if isinstance(value, LazyFile):
            value = value.name

        # Convert value to the list
        if not isinstance(value, (tuple, list)):
            value = [value]

        for val in value:
            # Flags don't have a value, thus add to args true or false option long name
            if option.is_flag:
                # If there are false-options, choose an option name depending on a value
                if option.secondary_opts:
                    # Get the latest false-option
                    secondary_option_long_name = option.secondary_opts[-1]
                    arg = option_long_name if val else secondary_option_long_name
                # There are no false-options, use true-option
                else:
                    arg = option_long_name
                left_args.append(shlex_quote(arg))
            # Append to args the option with a value
            else:
                if isinstance(val, six.string_types) and is_url(val):
                    val = redact_auth_from_url(val)
                if option.name == "pip_args":
                    # shlex_quote would produce functional but noisily quoted results,
                    # e.g. --pip-args='--cache-dir='"'"'/tmp/with spaces'"'"''
                    # Instead, we try to get more legible quoting via repr:
                    left_args.append("{option}={value}".format(
                        option=option_long_name,
                        value=repr(fs_str(force_text(val)))))
                else:
                    left_args.append("{option}={value}".format(
                        option=option_long_name,
                        value=shlex_quote(force_text(val))))

    return " ".join(["pip-compile"] + sorted(left_args) + sorted(right_args))