Example #1
0
def _bowtie2_index_command(bowtie2_path, reference):
    (reference_root, _) = os.path.splitext(reference)
    return '{bowtie2}-build {reference_in} {bt2_index_base}'.format(**{
        'bowtie2': shlex.quote(bowtie2_path),
        'reference_in': shlex.quote(reference),
        'bt2_index_base': shlex.quote(reference_root)
    })
Example #2
0
    def sync_to_worker_if_possible(self):
        """Syncs the local logdir on driver to worker if possible.

        Requires ray cluster to be started with the autoscaler. Also requires
        rsync to be installed.
        """
        if self.worker_ip == self.local_ip:
            return
        ssh_key = get_ssh_key()
        ssh_user = get_ssh_user()
        global _log_sync_warned
        if ssh_key is None or ssh_user is None:
            if not _log_sync_warned:
                logger.error("Log sync requires cluster to be setup with "
                             "`ray up`.")
                _log_sync_warned = True
            return
        if not distutils.spawn.find_executable("rsync"):
            logger.error("Log sync requires rsync to be installed.")
            return
        source = '{}/'.format(self.local_dir)
        target = '{}@{}:{}/'.format(ssh_user, self.worker_ip, self.local_dir)
        final_cmd = (("""rsync -savz -e "ssh -i {} -o ConnectTimeout=120s """
                      """-o StrictHostKeyChecking=no" {} {}""").format(
                          quote(ssh_key), quote(source), quote(target)))
        logger.info("Syncing results to %s", str(self.worker_ip))
        sync_process = subprocess.Popen(
            final_cmd, shell=True, stdout=self.logfile)
        sync_process.wait()
Example #3
0
def svg2png(svg_filepath, png_filepath, dpi):
    """
    Convert svg to png with inkscape.
    Delete svg file.

    :param str svg_filepath:    SVG file path
    :param str png_filepath:    PNG file path
    :param int dpi:             DotPerInch for resolution
    :return:
    """
    quoted_svg_filepath = shlex.quote(svg_filepath)
    quoted_png_filepath = shlex.quote(png_filepath)
    command = "inkscape -d {dpi} -z -f {svg_filepath} -e {png_filepath}".format(
        svg_filepath=quoted_svg_filepath,
        png_filepath=quoted_png_filepath,
        dpi=dpi)
    # execute command
    status = subprocess.call(shlex.split(command))
    if status != 0:
        exit(status)

    # remove temporary svg file
    if os.path.exists(svg_filepath):
        try:
            os.unlink(svg_filepath)
        except IOError:
            pass
Example #4
0
def _novoalign_command(path, args, ncpu, reference, sample_name, read1, read2=None):
    """
    Args:
        path (str): path to aligner executable
        args (str): raw arguments to be passed to the aligner
        ncpu: number of alignment threads to launch
        reference: (str): reference filename
        sample_name (str): 
        read1 (str): absolute path to read1 fastq[.gz|.bz2]
        read2 (str): absolute path to read2 fastq[.gz|.bz2]

    Returns:
        string: command to execute bowtie2 aligner
    """

    import os

    aligner_command = '{novoalign} -d {dbname} -f {read1} {read2} {paired_string} -c {ncpu} -o SAM {bam_string} {novoalign_args}'.format(**{
        'novoalign': path,
        'dbname': shlex.quote(reference + '.idx'),
        'read1': shlex.quote(read1),
        'read2': shlex.quote(read2) if read2 else '',
        'paired_string': '-i PE 500,100' if read2 else '',
        'ncpu': shlex.quote(str(ncpu)),
        'bam_string': shlex.quote('@RG\\tID:{sample_name}\\tSM:{sample_name}'.format(sample_name=sample_name)),
        'novoalign_args': ' '.join(map(shlex.quote, shlex.split(args)))
    })
    return aligner_command
Example #5
0
def png_add_mask_and_drop_shadow(source_filepath, mask_filepath, destination_filepath, shadow_offset=10):
    """
    Resize source png at 205x280 (75dpi), then cut out with 75 dpi mask, then add drop shadow

    :param str source_filepath: Path to source png file
    :param str mask_filepath:   Path to mask png file
    :param str destination_filepath: Path to save result file
    :param int shadow_offset: Offset of shadow in pixels
    :return:
    """
    quoted_source_filepath = shlex.quote(source_filepath)
    quoted_mask_filepath = shlex.quote(mask_filepath)
    quoted_destination_filepath = shlex.quote(destination_filepath)

    command = "convert \( {source_filepath} -resize 205x280 {mask_filepath} -alpha Off -compose copyopacity -composite \) \
          -background black \( +clone -shadow 60x{offset}+{offset}+{offset} \) +swap \
          -compose Over -composite +repage \
          {destination_filepath}".format(
        source_filepath=quoted_source_filepath,
        mask_filepath=quoted_mask_filepath,
        destination_filepath=quoted_destination_filepath,
        offset=shadow_offset)

    # execute command
    status = subprocess.call(shlex.split(command))
    if status != 0:
        exit(status)
Example #6
0
def write_symlinks(changes):
    for obj in changes:
        new = obj["new_filename"]
        old = obj["filenames"]
        for f in old:
            print("rm "+shlex.quote(f))
            print("ln -s "+shlex.quote(os.path.abspath(new))+" "+shlex.quote(f))
Example #7
0
    def bash_complete(self, prefix, line, begidx, endidx):
        """Attempts BASH completion."""
        splt = line.split()
        cmd = splt[0]
        func = self.bash_complete_funcs.get(cmd, None)
        fnme = self.bash_complete_files.get(cmd, None)
        if func is None or fnme is None:
            return set()
        idx = n = 0
        for n, tok in enumerate(splt):
            if tok == prefix:
                idx = line.find(prefix, idx)
                if idx >= begidx:
                    break
            prev = tok
        if len(prefix) == 0:
            prefix = '""'
            n += 1
        else:
            prefix = shlex.quote(prefix)

        script = BASH_COMPLETE_SCRIPT.format(
            filename=fnme, line=' '.join(shlex.quote(p) for p in splt),
            comp_line=shlex.quote(line), n=n, func=func, cmd=cmd,
            end=endidx + 1, prefix=prefix, prev=shlex.quote(prev))
        try:
            out = subprocess.check_output(
                ['bash'], input=script, universal_newlines=True,
                stderr=subprocess.PIPE, env=builtins.__xonsh_env__.detype())
        except subprocess.CalledProcessError:
            out = ''

        rtn = set(out.splitlines())
        return rtn
Example #8
0
def xcheck_host_envar(conf, name, wafname=None):
	wafname = wafname or name

	chost, chost_envar = get_chost_stuff(conf)

	specific = None
	if chost:
		specific = os.environ.get('%s_%s' % (chost_envar, name), None)

	if specific:
		value = Utils.to_list(specific)
		conf.env[wafname] += value
		conf.msg('Will use cross-compilation %s from %s_%s' \
		 % (name, chost_envar, name),
		 " ".join(quote(x) for x in value))
		return


	envar = os.environ.get('HOST_%s' % name, None)
	if envar is None:
		return

	value = Utils.to_list(envar) if envar != '' else [envar]

	conf.env[wafname] = value
	conf.msg('Will use cross-compilation %s from HOST_%s' \
	 % (name, name),
	 " ".join(quote(x) for x in value))
def update_platform(workspace_id: int, platform_id: int, platform_data) -> dict:
    """
    Update the platform entry

    :param workspace_id:
    :param platform_id:
    :return: The updated platform definition
    """
    platform_name = shlex.quote(platform_data['name'])
    platform_url = shlex.quote(platform_data['url'])
    session = db_session()
    workspace = session.query(Workspace).filter(Workspace.id == workspace_id).first()
    if workspace is None:
        raise NotFound("workspace with id {} could not be found".format(workspace_id))

    platform = session.query(Platform). \
        filter(Platform.workspace == workspace). \
        filter(Platform.id == platform_id). \
        first()
    if platform is None:
        raise NotFound("Platform with id {} could not be found".format(platform_id))

    if platform_name != platform.name:
        existing_platforms = session.query(Platform). \
            filter(Platform.workspace == workspace). \
            filter(Platform.name == platform_data['name']). \
            all()
        if len(existing_platforms) > 0:
            raise NameConflict("Platform with name {} already exists".format(platform_data['name']))

    platform.name = platform_name
    platform.url = platform_url
    update_workspace_descriptor(platform.workspace)
    session.commit()
    return platform.as_dict()
Example #10
0
File: bash.py Project: Siecje/xonsh
def complete_from_bash(prefix, line, begidx, endidx, ctx):
    """Completes based on results from BASH completion."""
    update_bash_completion()
    splt = line.split()
    cmd = splt[0]
    func = BASH_COMPLETE_FUNCS.get(cmd, None)
    fnme = BASH_COMPLETE_FILES.get(cmd, None)
    if func is None or fnme is None:
        return set()
    idx = n = 0
    for n, tok in enumerate(splt):
        if tok == prefix:
            idx = line.find(prefix, idx)
            if idx >= begidx:
                break
        prev = tok
    if len(prefix) == 0:
        prefix = '""'
        n += 1
    else:
        prefix = shlex.quote(prefix)

    script = BASH_COMPLETE_SCRIPT.format(
        filename=fnme, line=' '.join(shlex.quote(p) for p in splt),
        comp_line=shlex.quote(line), n=n, func=func, cmd=cmd,
        end=endidx + 1, prefix=prefix, prev=shlex.quote(prev))
    try:
        out = subprocess.check_output(
            [xp.bash_command()], input=script, universal_newlines=True,
            stderr=subprocess.PIPE, env=builtins.__xonsh_env__.detype())
    except (subprocess.CalledProcessError, FileNotFoundError):
        out = ''

    rtn = set(out.splitlines())
    return rtn
Example #11
0
    def file(self, root, filename):
        self.file_count += 1

        fullpath = os.path.join(root, filename)

        byte_size = os.lstat(fullpath).st_size

        self.size_total += byte_size

        self.ctx.current_file = fullpath

        local_vars = {
            '_': os.path.basename(filename),
            'p': fullpath,
            'ap': os.path.abspath(fullpath),
            'apq': shlex.quote(os.path.abspath(fullpath)),
            'pq': shlex.quote(fullpath),
            'q': shlex.quote(filename),
        }

        fmt = string.Formatter()
        for (literal_text, field_name, format_spec, _) in fmt.parse(self.fmt_str):
            if literal_text is not None:
                sys.stdout.write(literal_text)

            if field_name is not None:
                value = eval(field_name, self.global_vars, local_vars)  # pylint: disable=W0123
                sys.stdout.write(format(value, format_spec))
Example #12
0
	def send_notify(self, uid, title, body):
		icon,name,surname = self._get_icon(uid)
		summary = title + ", "  + name + ' ' + surname
		summary = shlex.quote(summary)
		body = shlex.quote(self._sanitize(body))
		os.system("notify-send -i {icon} {summary} {body}".format(
			icon = icon, summary = summary, body = body))
Example #13
0
 def get_command(self, file, **options):
     # on darwin open returns immediately resulting in the temp
     # file removal while app is opening
     command = "open -a /Applications/Preview.app"
     command = "(%s %s; sleep 20; rm -f %s)&" % (command, quote(file),
                                                 quote(file))
     return command
Example #14
0
async def async_exec(*args, display=False):
    """Execute, return code & log."""
    argsp = []
    for arg in args:
        if os.path.isfile(arg):
            argsp.append("\\\n  {}".format(shlex.quote(arg)))
        else:
            argsp.append(shlex.quote(arg))
    printc('cyan', *argsp)
    try:
        kwargs = {'loop': LOOP, 'stdout': asyncio.subprocess.PIPE,
                  'stderr': asyncio.subprocess.STDOUT}
        if display:
            kwargs['stderr'] = asyncio.subprocess.PIPE
        proc = await asyncio.create_subprocess_exec(*args, **kwargs)
    except FileNotFoundError as err:
        printc(FAIL, "Could not execute {}. Did you install test requirements?"
               .format(args[0]))
        raise err

    if not display:
        # Readin stdout into log
        stdout, _ = await proc.communicate()
    else:
        # read child's stdout/stderr concurrently (capture and display)
        stdout, _ = await asyncio.gather(
            read_stream(proc.stdout, sys.stdout.write),
            read_stream(proc.stderr, sys.stderr.write))
    exit_code = await proc.wait()
    stdout = stdout.decode('utf-8')
    return exit_code, stdout
Example #15
0
    def run_configure_script(self):
        "runs configure-landscape, returns output (LDS hostname)"

        ldscreds = self.config.getopt('landscapecreds')
        args = {"bin": self.lscape_configure_bin,
                "admin_email": shlex.quote(ldscreds['admin_email']),
                "admin_name": shlex.quote(ldscreds['admin_name']),
                "sys_email": shlex.quote(ldscreds['system_email']),
                "maas_host": shlex.quote(
                    self.config.getopt('maascreds')['api_host'])}

        cmd = ("{bin} --admin-email {admin_email} "
               "--admin-name {admin_name} "
               "--system-email {sys_email} "
               "--maas-host {maas_host}".format(**args))

        log.debug("Running landscape configure: {}".format(cmd))

        out = utils.get_command_output(cmd, timeout=None)

        if out['status']:
            log.error("Problem with configuring Landscape: {}.".format(out))
            raise Exception("Error configuring Landscape.")

        return out['output'].strip()
Example #16
0
    def send_command(self, server_cmd):
        """
        Sends the given command to all screen sessions with the world's screen
        name.

        :raises WorldIsOfflineError:
            if the world is offline.

        .. warning::

            There is no guarantee, that the server reacted to the command.
        """
        pids = self.pids()

        # Break if the world is offline.
        if not pids:
            raise WorldIsOfflineError(self)

        # Translate the server command for *cross-server* support.
        server_cmd = self._server.translate_command(server_cmd)

        # Quote the command.
        # The '\n' simulates pressing the ENTER key in a screen session.
        server_cmd += "\n\n"
        server_cmd = shlex.quote(server_cmd)

        # Send the command to the server.
        for pid in pids:
            sys_cmd = "screen -S {0}.{1} -p 0 -X stuff {2}"\
                      .format(pid, shlex.quote(self.screen_name()), server_cmd)
            sys_cmd = shlex.split(sys_cmd)
            subprocess.call(sys_cmd)
        return None
Example #17
0
    def start(self, collection, docker, ping, database_name):
        options = self.options
        """Launches a cAdvisor container on the instance."""
        volumes = {
            '/': {'bind': '/rootfs', 'ro': True},
            '/var/run': {'bind': '/var/run', 'ro': False},
            '/sys': {'bind': '/sys', 'ro': True},
            '/var/lib/docker': {'bind': '/var/lib/docker', 'ro': True}
        }

        logger.debug("cAdvisor: Writing stats to %s" % database_name)
        command_args = " ".join([
            "-storage_driver=influxdb",
            "-log_dir=/",
            "-storage_driver_db=%s" % quote(database_name),
            "-storage_driver_host=%s:%d" % (quote(options.host),
                                            options.port),
            "-storage_driver_user=%s" % quote(options.user),
            "-storage_driver_password=%s" % quote(options.password),
            "-storage_driver_secure=%d" % options.secure,
            # TODO: Calculate based on the run time.
            "-storage_driver_buffer_duration=5s"
        ])
        yield docker.run_containers(collection, self.info.name,
                                    None, command_args, volumes,
                                    ports={8080: 8080})

        yield self.wait(collection, ping)
def create_catalogue(workspace_id: int, catalogue_data):
    """
    Creates a catalgoue in the given workspace. A catalogue is defined by its name and url. These are given as
    json data

    :param workspace_id: Workspace ID of the target workspace, where the catalogue should get created.
    :return: Catalogue descriptor
    """
    catalogue_name = shlex.quote(catalogue_data['name'])
    catalogue_url = shlex.quote(catalogue_data['url'])
    session = db_session()
    workspace = session.query(Workspace).filter(Workspace.id == workspace_id).first()
    if workspace is None:
        raise NotFound("workspace with id {} could not be found".format(workspace_id))

    existing_catalogues = session.query(Catalogue). \
        filter(Catalogue.workspace == workspace). \
        filter(Catalogue.name == catalogue_data['name']). \
        all()

    if len(existing_catalogues) > 0:
        raise NameConflict("catalogue with name {} already exists".format(catalogue_data['name']))
    catalogue = Catalogue(name=catalogue_name, url=catalogue_url, workspace=workspace)
    session.add(catalogue)
    session.commit()
    update_workspace_descriptor(catalogue.workspace)
    return catalogue.as_dict()
def update_catalogue(workspace_id, catalogue_id, catalogue_data):
    """
    Updates a specific catalogue by its id. The catalogue
    applies the given name and url, that are in the json parameter.
    :param workspace_id: The Workspace ID
    :param catalogue_id: The Catalogue ID
    :return: The updated Catalogue descriptor
    """
    catalogue_name = shlex.quote(catalogue_data['name'])
    catalogue_url = shlex.quote(catalogue_data['url'])
    session = db_session()
    workspace = session.query(Workspace).filter(Workspace.id == workspace_id).first()
    if workspace is None:
        raise NotFound("workspace with id {} could not be found".format(workspace_id))

    catalogue = session.query(Catalogue). \
        filter(Catalogue.workspace == workspace). \
        filter(Catalogue.id == catalogue_id). \
        first()
    if catalogue is None:
        raise NotFound("catalogue with id {} could not be found".format(catalogue_id))

    if catalogue_name != catalogue.name:
        existing_catalogues = session.query(catalogue). \
            filter(catalogue.workspace == workspace). \
            filter(catalogue.name == catalogue_data['name']). \
            all()
        if len(existing_catalogues) > 0:
            raise NameConflict("catalogue with name {} already exists".format(catalogue_data['name']))

    catalogue.name = catalogue_name
    catalogue.url = catalogue_url
    session.commit()
    update_workspace_descriptor(catalogue.workspace)
    return catalogue.as_dict()
Example #20
0
	def runExtensionCommand(self, command, filefilter, defaultext):
		import shlex
		of = ('%of' in command)
		html = ('%html' in command)
		if of:
			if defaultext and not filefilter:
				filefilter = '*'+defaultext
			fileName = QFileDialog.getSaveFileName(self,
				self.tr('Export document'), '', filefilter)[0]
			if not fileName:
				return
			if defaultext and not QFileInfo(fileName).suffix():
				fileName += defaultext
		else:
			fileName = 'out' + defaultext
		basename = '.%s.retext-temp' % self.currentTab.getBaseName()
		if html:
			tmpname = basename+'.html'
			self.saveHtml(tmpname)
		else:
			tmpname = basename + self.currentTab.getActiveMarkupClass().default_extension
			self.currentTab.writeTextToFile(tmpname)
		command = command.replace('%of', shlex.quote(fileName))
		command = command.replace('%html' if html else '%if', shlex.quote(tmpname))
		try:
			Popen(str(command), shell=True).wait()
		except Exception as error:
			errorstr = str(error)
			QMessageBox.warning(self, '', self.tr('Failed to execute the command:')
			+ '\n' + errorstr)
		QFile(tmpname).remove()
Example #21
0
    def clone(self, remote_address, target_directory=None, bare=False, recursive=True,
                branch=None, no_checkout=False, add_as_origin=False):
        if target_directory is not None:
            target_directory = Path(target_directory)
            self._set_wkdir(target_directory.dirname)
        cmd = [
            "clone",
            "--bare" if bare else "",
            "--recursive" if recursive else "",
            "--branch {}".format(shlex.quote(branch)) if branch is not None else "",
            "--no-checkout" if no_checkout else "",
            remote_address,
##            shlex.quote(Path(remote_address).nice_full_path),
            shlex.quote(Path(target_directory).basename) if target_directory is not None else ""
        ]
        if target_directory is None:
            self._local = Path(os.path.join(self._wkdir.full_path, Path(remote_address).basename))
        else:
            self._local = Path(target_directory)
        self._set_cmd(cmd)._run()
        if add_as_origin:
            self._set_cmd([
                "remote",
                'add',
                'origin',
                remote_address
            ])._run()
        return self
Example #22
0
    def install(
            self,
            ssh_client: paramiko.client.SSHClient,
            cluster: FlintrockCluster):

        print("[{h}] Installing Spark...".format(
            h=ssh_client.get_transport().getpeername()[0]))

        try:
            if self.version:
                with ssh_client.open_sftp() as sftp:
                    sftp.put(
                        localpath=os.path.join(SCRIPTS_DIR, 'install-spark.sh'),
                        remotepath='/tmp/install-spark.sh')
                    sftp.chmod(path='/tmp/install-spark.sh', mode=0o755)
                url = self.download_source.format(v=self.version)
                ssh_check_output(
                    client=ssh_client,
                    command="""
                        set -e
                        /tmp/install-spark.sh {url}
                        rm -f /tmp/install-spark.sh
                    """.format(url=shlex.quote(url)))
            else:
                ssh_check_output(
                    client=ssh_client,
                    command="""
                        set -e
                        sudo yum install -y git
                        sudo yum install -y java-devel
                        """)
                ssh_check_output(
                    client=ssh_client,
                    command="""
                        set -e
                        git clone {repo} spark
                        cd spark
                        git reset --hard {commit}
                        if [ -e "make-distribution.sh" ]; then
                            ./make-distribution.sh -Phadoop-2.6
                        else
                            ./dev/make-distribution.sh -Phadoop-2.6
                        fi
                    """.format(
                        repo=shlex.quote(self.git_repository),
                        commit=shlex.quote(self.git_commit)))
            ssh_check_output(
                client=ssh_client,
                command="""
                    set -e
                    for f in $(find spark/bin -type f -executable -not -name '*.cmd'); do
                        sudo ln -s "$(pwd)/$f" "/usr/local/bin/$(basename $f)"
                    done
                    echo "export SPARK_HOME='$(pwd)/spark'" >> .bashrc
                """)
        except Exception as e:
            # TODO: This should be a more specific exception.
            print("Error: Failed to install Spark.", file=sys.stderr)
            print(e, file=sys.stderr)
            raise
Example #23
0
def xcheck_host_prog(conf, name, tool, wafname=None):
	wafname = wafname or name

	chost, chost_envar = get_chost_stuff(conf)

	specific = None
	if chost:
		specific = os.environ.get('%s_%s' % (chost_envar, name))

	if specific:
		value = Utils.to_list(specific)
		conf.env[wafname] += value
		conf.msg('Will use cross-compilation %s from %s_%s' % (name, chost_envar, name),
		 " ".join(quote(x) for x in value))
		return
	else:
		envar = os.environ.get('HOST_%s' % name)
		if envar is not None:
			value = Utils.to_list(envar)
			conf.env[wafname] = value
			conf.msg('Will use cross-compilation %s from HOST_%s' % (name, name),
			 " ".join(quote(x) for x in value))
			return

	if conf.env[wafname]:
		return

	value = None
	if chost:
		value = '%s-%s' % (chost, tool)

	if value:
		conf.env[wafname] = value
		conf.msg('Will use cross-compilation %s from CHOST' % wafname, value)
Example #24
0
        def run(self):
            from subprocess import call
            from pathlib import Path
            import os.path
            import inspect

            import shlex

            tm_path = shlex.quote(self.input()['tm'].fn)
            lm_path = shlex.quote(self.input()['lm']['blm'].fn)

            home = os.path.expanduser('~')
            dir_name = Path(inspect.stack()[-1][1]).absolute().parent
            current_dir_from_home = dir_name.relative_to(home)

            print(current_dir_from_home)

            toktagger_cmd = 'cd {cdir}; source {venv}; ./toktagger.py -t {tm}  -l {lm} -f /'.format(
                cdir='~/' + shlex.quote(str(current_dir_from_home)),
                venv=shlex.quote('venv/bin/activate'),
                tm=tm_path,
                lm=lm_path)
            print(toktagger_cmd)
            parallel_cmd = 'parallel {params} -k --block-size {blocksize} --pipe {cmd}'.format(
                params=self.parallel_params,
                blocksize=self.parallel_blocksize,
                cmd=shlex.quote(toktagger_cmd))

            cmd = shlex.split(parallel_cmd)
            print('running... ', parallel_cmd)

            with self.input()['input'].open(
                'r') as in_file, self.output().open('w') as out_file:
                retcode = call(cmd, stdin=in_file, stdout=out_file)
            assert retcode == 0
Example #25
0
 def disp_kv(key, val):
     """ display a shell-escaped version of value ``val`` of ``key``,
         using terminal 'dim' attribute for read-only variables.
     """
     return (self.dim(shlex.quote(val))
             if key in self.server.readonly_env
             else shlex.quote(val))
Example #26
0
def daemux_start(transitions, session="pmjq", shell='sh'):
    """Instantiate the transitions, each in its own tmux window"""
    for t in transitions:
        t = normalize(t)
        commands = []
        # Template "directories" deals with watch-able templates
        # that use a list as input
        for dirs_key in [x for x in ["inputs", "outputs", "errors"]
                         if x in t]:
            commands.append("watch -n1 "+shlex.quote(
                COMMAND_TEMPLATES['directories'].format(
                    dirs=' '.join(
                        map(lambda d:
                            os.path.dirname(smart_unquote(d)),
                            t[dirs_key])))))
        # Template "stderr" deals with the log files
        if "stderr" in t:
            commands.append("watch -n1 "+shlex.quote(
                COMMAND_TEMPLATES['stderr'].format(
                    stderr=os.path.dirname(
                        smart_unquote(t['stderr'])))))
        # The command
        if shell == "sh":
            commands.append(pmjq_command(t))
        elif shell == 'fish':
            commands.append(pmjq_command(t, redirect='^'))
        # The other templates can be used as is
        for k in [k for k in COMMAND_TEMPLATES
                  if k not in ['directories', 'stderr', 'cmd']]:
            if k in t:
                commands.append(COMMAND_TEMPLATES[k].format(**t))
        for i, cmd in enumerate(commands):
            daemux.start(cmd, session=session, window=t['id'], pane=i,
                         layout='tiled')
Example #27
0
    def keygen(self, filename='', passphrase=''):
        """
        Generate a public/private key pair and store them in ``filename``, encrypted by ``passphrase``

        :param str filename:    File name to store the private key in. The file name for the public key
                                will be derived from this name by suffixing it with ``.pub``
        :param str passphrase:  The passphrase used for encrypting the private key. Please note this passphrase
                                will only be accepted if it's longer than 4 characters. In case the passphrase
                                being empty or too short, ssh-keygen will ask for a passphrase using the system's
                                ssh-askpass mechanism.
        """
        self._arguments.extend([
            '-q',
            '-t', self._algorithm,
            '-b', str(self._keylength),
            '-O', 'clear',
            '-O', 'permit-pty',
            '-C', shlex.quote('{user}@{host}'.format(user=os.getlogin(), host=os.uname().nodename)),
            '-f', shlex.quote(filename)
        ])
        if passphrase and len(passphrase) > 4:
            self._arguments.extend([
                '-N', shlex.quote(passphrase)
            ])
        self._execute()
Example #28
0
 def __str__(self):
     ret = ''
     for attr in 'cmd', 'ret_code', 'out', 'err':
         value = getattr(self, attr, None)
         if value is not None and str(value).strip():
             mesg = ''
             if attr == 'cmd' and self.cmd_kwargs.get('stdin_files'):
                 mesg += 'cat'
                 for file_path in self.cmd_kwargs.get('stdin_files'):
                     mesg += ' ' + quote(file_path)
                 mesg += ' | '
             if attr == 'cmd' and isinstance(value, list):
                 mesg += ' '.join(quote(item) for item in value)
             else:
                 mesg = str(value).strip()
             if attr == 'cmd' and self.cmd_kwargs.get('stdin_str'):
                 mesg += ' <<<%s' % quote(self.cmd_kwargs.get('stdin_str'))
             if len(mesg.splitlines()) > 1:
                 fmt = self.JOB_LOG_FMT_M
             else:
                 fmt = self.JOB_LOG_FMT_1
             if not mesg.endswith('\n'):
                 mesg += '\n'
             ret += fmt % {
                 'cmd_key': self.cmd_key,
                 'attr': attr,
                 'mesg': mesg}
     return ret.rstrip()
Example #29
0
def _snap_command(path, args, ncpu, reference, output_folder, sample_name, read1, read2=None):
    """
    Args:
        path (str): path to aligner executable
        args (str): raw arguments to be passed to the aligner
        ncpu: number of alignment threads to launch
        reference: (str): reference filename
        output_folder (str): directory for aligner output
        sample_name (str): 
        read1 (str): absolute path to read1 fastq[.gz|.bz2]
        read2 (str): absolute path to read2 fastq[.gz|.bz2]

    Returns:
        string: command to execute bowtie2 aligner
    """
    import os

    aligner_command = '{snap} {single_or_paired} {ref_dir} {read1} {read2} -t {ncpu} -b {snap_args} -o sam -'.format(**{
        'snap': path,
        'single_or_paired': 'paired' if read2 else 'single',
        'ref_dir': shlex.quote(os.path.join(output_folder, 'reference', 'snap')),
        'read1': shlex.quote(read1),
        'read2': shlex.quote(read2) if read2 else '',
        'ncpu': shlex.quote(str(ncpu)),
        'snap_args': ' '.join(map(shlex.quote, shlex.split(args)))
    })
    return aligner_command
Example #30
0
    def configure(
            self,
            ssh_client: paramiko.client.SSHClient,
            cluster: FlintrockCluster):

        template_paths = [
            'spark/conf/spark-env.sh',
            'spark/conf/slaves',
        ]

        ssh_check_output(
            client=ssh_client,
            command="mkdir -p spark/conf",
        )

        for template_path in template_paths:
            ssh_check_output(
                client=ssh_client,
                command="""
                    echo {f} > {p}
                """.format(
                    f=shlex.quote(
                        get_formatted_template(
                            path=os.path.join(THIS_DIR, "templates", template_path),
                            mapping=generate_template_mapping(
                                cluster=cluster,
                                spark_executor_instances=self.spark_executor_instances,
                                hadoop_version=self.hadoop_version,
                                spark_version=self.version or self.git_commit,
                            ))),
                    p=shlex.quote(template_path)))
Example #31
0
def mpi_run(settings, nics, env, command, stdout=None, stderr=None):
    """
    Runs mpi_run.

    Args:
        settings: Settings for running MPI.
                  Note: settings.num_proc and settings.hosts must not be None.
        nics: Interfaces to include by MPI.
        env: Environment dictionary to use for running command.
        command: Command and arguments to run as a list of string.
        stdout: Stdout of the mpi process.
                Only used when settings.run_func_mode is True.
        stderr: Stderr of the mpi process.
                Only used when settings.run_func_mode is True.
    """
    mpi_impl_flags, impl_binding_args = _get_mpi_implementation_flags(
        settings.tcp_flag)
    if mpi_impl_flags is None:
        raise Exception(_MPI_NOT_FOUND_ERROR_MSG)

    ssh_port_arg = '-mca plm_rsh_args \"-p {ssh_port}\"'.format(
        ssh_port=settings.ssh_port) if settings.ssh_port else ''

    # if user does not specify any hosts, mpirun by default uses local host.
    # There is no need to specify localhost.
    hosts_arg = '-H {hosts}'.format(hosts=settings.hosts)

    tcp_intf_arg = '-mca btl_tcp_if_include {nics}'.format(
        nics=','.join(nics)) if nics else ''
    nccl_socket_intf_arg = '-x NCCL_SOCKET_IFNAME={nics}'.format(
        nics=','.join(nics)) if nics else ''

    # On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues
    if settings.num_hosts and settings.num_hosts >= _LARGE_CLUSTER_THRESHOLD:
        mpi_impl_flags.append('-mca plm_rsh_no_tree_spawn true')
        mpi_impl_flags.append('-mca plm_rsh_num_concurrent {}'.format(
            settings.num_hosts))

    binding_args = settings.binding_args if settings.binding_args else ' '.join(
        impl_binding_args)

    # Pass all the env variables to the mpirun command.
    mpirun_command = (
        'mpirun --allow-run-as-root --tag-output '
        '-np {num_proc} {hosts_arg} '
        '{binding_args} '
        '{mpi_args} '
        '{ssh_port_arg} '
        '{tcp_intf_arg} '
        '{nccl_socket_intf_arg} '
        '{output_filename_arg} '
        '{env} {extra_mpi_args} {command}'  # expect a lot of environment variables
        .format(num_proc=settings.num_proc,
                hosts_arg=hosts_arg,
                binding_args=binding_args,
                mpi_args=' '.join(mpi_impl_flags),
                tcp_intf_arg=tcp_intf_arg,
                nccl_socket_intf_arg=nccl_socket_intf_arg,
                ssh_port_arg=ssh_port_arg,
                output_filename_arg='--output-filename ' +
                settings.output_filename if settings.output_filename else '',
                env=' '.join('-x %s' % key for key in sorted(env.keys())
                             if env_util.is_exportable(key)),
                extra_mpi_args=settings.extra_mpi_args
                if settings.extra_mpi_args else '',
                command=' '.join(quote(par) for par in command)))

    if settings.verbose >= 2:
        print(mpirun_command)

    # we need the driver's PATH in env to run mpirun,
    # env for mpirun is different to env encoded in mpirun_command
    if 'PATH' not in env and 'PATH' in os.environ:
        env = copy.copy(env)  # copy env so we do not leak env modifications
        env['PATH'] = os.environ['PATH']

    # Execute the mpirun command.
    if settings.run_func_mode:
        exit_code = safe_shell_exec.execute(mpirun_command,
                                            env=env,
                                            stdout=stdout,
                                            stderr=stderr)
        if exit_code != 0:
            raise RuntimeError(
                "mpirun failed with exit code {exit_code}".format(
                    exit_code=exit_code))
    else:
        os.execve('/bin/sh', ['/bin/sh', '-c', mpirun_command], env)
Example #32
0
def connect(ssh_cmd, rhostport, python, stderr, options):
    portl = []

    if re.sub(r'.*@', '', rhostport or '').count(':') > 1:
        if rhostport.count(']') or rhostport.count('['):
            result = rhostport.split(']')
            rhost = result[0].strip('[')
            if len(result) > 1:
                result[1] = result[1].strip(':')
                if result[1] != '':
                    portl = ['-p', str(int(result[1]))]
        # can't disambiguate IPv6 colons and a port number. pass the hostname
        # through.
        else:
            rhost = rhostport
    else:  # IPv4
        l = (rhostport or '').rsplit(':', 1)
        rhost = l[0]
        if len(l) > 1:
            portl = ['-p', str(int(l[1]))]

    if rhost == '-':
        rhost = None

    z = zlib.compressobj(1)
    content = readfile('sshuttle.assembler')
    optdata = ''.join("%s=%r\n" % (k, v) for (k, v) in list(options.items()))
    optdata = optdata.encode("UTF8")
    content2 = (empackage(z, 'sshuttle') +
                empackage(z, 'sshuttle.cmdline_options', optdata) +
                empackage(z, 'sshuttle.helpers') +
                empackage(z, 'sshuttle.ssnet') +
                empackage(z, 'sshuttle.hostwatch') +
                empackage(z, 'sshuttle.server') + b"\n")

    pyscript = r"""
                import sys, os;
                verbosity=%d;
                sys.stdin = os.fdopen(0, "rb");
                exec(compile(sys.stdin.read(%d), "assembler.py", "exec"))
                """ % (helpers.verbose or 0, len(content))
    pyscript = re.sub(r'\s+', ' ', pyscript.strip())

    if not rhost:
        # ignore the --python argument when running locally; we already know
        # which python version works.
        argv = [sys.executable, '-c', pyscript]
    else:
        if ssh_cmd:
            sshl = shlex.split(ssh_cmd)
        else:
            sshl = ['ssh']
        if python:
            pycmd = "'%s' -c '%s'" % (python, pyscript)
        else:
            pycmd = ("P=python3; $P -V 2>%s || P=python; "
                     "exec \"$P\" -c %s") % (os.devnull, quote(pyscript))
            pycmd = ("exec /bin/sh -c %s" % quote(pycmd))
        argv = (sshl + portl + [rhost, '--', pycmd])
    (s1, s2) = socket.socketpair()

    def setup():
        # runs in the child process
        s2.close()

    s1a, s1b = os.dup(s1.fileno()), os.dup(s1.fileno())
    s1.close()
    debug2('executing: %r\n' % argv)
    p = ssubprocess.Popen(argv,
                          stdin=s1a,
                          stdout=s1b,
                          preexec_fn=setup,
                          close_fds=True,
                          stderr=stderr)
    os.close(s1a)
    os.close(s1b)
    s2.sendall(content)
    s2.sendall(content2)
    return p, s2
Example #33
0
def connect(ssh_cmd, rhostport, python, stderr, options):
    username, password, port, host = parse_hostport(rhostport)
    if username:
        rhost = "{}@{}".format(username, host)
    else:
        rhost = host

    z = zlib.compressobj(1)
    content = get_module_source('sshuttle.assembler')
    optdata = ''.join("%s=%r\n" % (k, v) for (k, v) in list(options.items()))
    optdata = optdata.encode("UTF8")
    content2 = (empackage(z, 'sshuttle') +
                empackage(z, 'sshuttle.cmdline_options', optdata) +
                empackage(z, 'sshuttle.helpers') +
                empackage(z, 'sshuttle.ssnet') +
                empackage(z, 'sshuttle.hostwatch') +
                empackage(z, 'sshuttle.server') + b"\n")

    pyscript = r"""
                import sys, os;
                verbosity=%d;
                sys.stdin = os.fdopen(0, "rb");
                exec(compile(sys.stdin.read(%d), "assembler.py", "exec"))
                """ % (helpers.verbose or 0, len(content))
    pyscript = re.sub(r'\s+', ' ', pyscript.strip())

    if not rhost:
        # ignore the --python argument when running locally; we already know
        # which python version works.
        argv = [sys.executable, '-c', pyscript]
    else:
        if ssh_cmd:
            sshl = shlex.split(ssh_cmd)
        else:
            sshl = ['ssh']
        if port is not None:
            portl = ["-p", str(port)]
        else:
            portl = []
        if python:
            pycmd = "'%s' -c '%s'" % (python, pyscript)
        else:
            pycmd = ("P=python3; $P -V 2>%s || P=python; "
                     "exec \"$P\" -c %s") % (os.devnull, quote(pyscript))
            pycmd = ("/bin/sh -c {}".format(quote(pycmd)))

        if password is not None:
            os.environ['SSHPASS'] = str(password)
            argv = (["sshpass", "-e"] + sshl + portl + [rhost, '--', pycmd])

        else:
            argv = (sshl + portl + [rhost, '--', pycmd])
    (s1, s2) = socket.socketpair()

    def setup():
        # runs in the child process
        s2.close()

    s1a, s1b = os.dup(s1.fileno()), os.dup(s1.fileno())
    s1.close()
    debug2('executing: %r\n' % argv)
    p = ssubprocess.Popen(argv,
                          stdin=s1a,
                          stdout=s1b,
                          preexec_fn=setup,
                          close_fds=True,
                          stderr=stderr)
    os.close(s1a)
    os.close(s1b)
    s2.sendall(content)
    s2.sendall(content2)
    return p, s2
Example #34
0
import shlex
import subprocess
import tempfile
import asm_processor

dir_path = os.path.dirname(os.path.realpath(__file__))
prelude = os.path.join(dir_path, "prelude.s")

all_args = sys.argv[1:]
sep1 = all_args.index('--')
sep2 = all_args.index('--', sep1 + 1)

compiler = all_args[:sep1]

assembler = all_args[sep1 + 1:sep2]
assembler_sh = ' '.join(shlex.quote(x) for x in assembler)

compile_args = all_args[sep2 + 1:]
in_file = compile_args[-1]
out_ind = compile_args.index('-o')
out_file = compile_args[out_ind + 1]
del compile_args[-1]
del compile_args[out_ind + 1]
del compile_args[out_ind]

in_dir = os.path.split(os.path.realpath(in_file))[0]
opt_flags = [
    x for x in compile_args
    if x in ['-g3', '-g', '-O1', '-O2', '-framepointer']
]
Example #35
0
            signal.signal(signal.SIGINT, lambda x, y: True)
            # Set the prompt
            args.append('--init-command')
            prompt_cmd = '''functions --copy fish_prompt original_fish_prompt
            function fish_prompt
                echo -n '[gst-{}] '(original_fish_prompt)
            end'''.format(gst_version)
            args.append(prompt_cmd)
        elif args[0].endswith('zsh'):
            tmpdir = tempfile.TemporaryDirectory()
            # Let the GC remove the tmp file
            tmprc = open(os.path.join(tmpdir.name, '.zshrc'), 'w')
            zshrc = os.path.expanduser('~/.zshrc')
            if os.path.exists(zshrc):
                with open(zshrc, 'r') as src:
                    shutil.copyfileobj(src, tmprc)
            tmprc.write(
                '\nexport PROMPT="[gst-{}] $PROMPT"'.format(gst_version))
            tmprc.flush()
            env['ZDOTDIR'] = tmpdir.name
    try:
        if options.only_environment:
            for name, value in env.items():
                print('{}={}'.format(name, shlex.quote(value)))
                print('export {}'.format(name))
        else:
            exit(subprocess.call(args, close_fds=False, env=env))

    except subprocess.CalledProcessError as e:
        exit(e.returncode)
 def command_string(self) -> str:
     return ' '.join(shlex.quote(c) for c in self.command)
Example #37
0
def sublime(paths):
    quoted_paths = [shlex.quote(p) for p in paths]
    return App(command=_cmd(["subl", "-n"] + quoted_paths), xClass="Subl")
Example #38
0
 def path(self) -> str:
     return quote('/dev/md/' + self.name)
Example #39
0
 def path(self) -> str:
     # If LV or VG name has an hyphen '-', LVM doubles it in the path
     return quote('/dev/mapper/' + self.vg_name.replace('-', '--') + '-' +
                  self.lv_name.replace('-', '--'))
Example #40
0
def run_workflow_using_multiple_service_tables(
        input_datas,
        path_to_knime_executable,
        path_to_knime_workflow,
        input_service_table_node_ids,
        output_service_table_node_ids,
        *,
        live_passthru_stdout_stderr=False,
        output_as_pandas_dataframes=True,
        input_json_filename_pattern="input_%d.json",
        output_json_filename_pattern="output_%d.json",
    ):
    """Executes the requested KNIME workflow, feeding the supplied data
    to the Container Input (Table) nodes in that workflow and returning the
    output from the workflow's Container Output (Table) nodes."""

    abspath_to_knime_workflow = Path(path_to_knime_workflow).absolute()
    if not Path(path_to_knime_executable).exists():
        raise ValueError(f"Executable not found: {path_to_knime_executable}")

    with tempfile.TemporaryDirectory() as temp_dir:
        logging.debug(f"using temp dir: {temp_dir}")

        option_flags_input_service_table_nodes = []
        for node_id, data in zip(input_service_table_node_ids, input_datas):
            input_json_filename = input_json_filename_pattern % node_id
            input_json_filepath = Path(temp_dir, input_json_filename)

            # Support pandas DataFrame-like inputs.
            try:
                data = convert_dataframe_to_knime_friendly_dict(data)
            except AttributeError:
                pass

            with open(input_json_filepath, "w") as input_json_fh:
                json.dump(data, input_json_fh)

            option_flags_input_service_table_nodes.append(
                f'-option={node_id},inputPathOrUrl,"{input_json_filepath}",String'
            )

        option_flags_output_service_table_nodes = []
        expected_output_json_files = []
        for node_id in output_service_table_node_ids:
            output_json_filename = output_json_filename_pattern % node_id
            output_json_filepath = Path(temp_dir, output_json_filename)

            option_flags_input_service_table_nodes.append(
                f'-option={node_id},outputPathOrUrl,"{output_json_filepath}",String',
            )
            expected_output_json_files.append(output_json_filepath)

        data_dir = Path(temp_dir, "knime_data")

        # shlex.quote handles executable paths containing spaces, etc.
        # On Windows, cmd shell requires double-quotes, hence replace()
        shell_command = " ".join([
            shlex.quote(path_to_knime_executable).replace("'", '"'),
            "-nosplash",
            "-debug",
            "--launcher.suppressErrors",
            "-application org.knime.product.KNIME_BATCH_APPLICATION",
            f"-data {data_dir}",
            f'-workflowDir="{abspath_to_knime_workflow}"',
            " ".join(option_flags_input_service_table_nodes),
            " ".join(option_flags_output_service_table_nodes),
        ])
        logging.info(f"knime invocation: {shell_command}")

        result = subprocess.run(
            shell_command,
            shell=True,
            stdout=subprocess.PIPE if not live_passthru_stdout_stderr else None,
            stderr=subprocess.PIPE if not live_passthru_stdout_stderr else None,
        )
        logging.info(f"exit code from KNIME execution: {result.returncode}")

        knime_outputs = []
        try:
            for output_json_filepath in expected_output_json_files:
                with open(output_json_filepath) as output_json_fh:
                    single_node_knime_output = json.load(output_json_fh)
                knime_outputs.append(single_node_knime_output)
        except FileNotFoundError:
            logging.error(f"captured stdout: {result.stdout}")
            logging.error(f"captured stderr: {result.stderr}")
            raise ChildProcessError("Output from KNIME not found")

        if output_as_pandas_dataframes:
            try:
                import pandas as pd
                for i, output in enumerate(knime_outputs):
                    df_columns = list(
                        k for d in output['table-spec']
                        for k, v in d.items()
                    )
                    knime_outputs[i] = pd.DataFrame(
                        output['table-data'],
                        columns=df_columns
                    )
            except ImportError:
                logging.warning("requested output as DataFrame not possible")
            except Exception as e:
                logging.error("error while converting KNIME output to DataFrame")
                raise e

        if result.returncode != 0:
            logging.info(f"captured stdout: {result.stdout}")
            logging.info(f"captured stderr: {result.stderr}")

    return knime_outputs
Example #41
0
 def path(self) -> str:
     prefix = 'PARTUUID=' if self.partition else 'UUID='
     return '"$(findfs ' + quote(prefix + self.uuid) + ')"'
Example #42
0
 def path(self) -> str:
     return quote(self.mountpoint)
Example #43
0
def get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
                            override_cluster_name):
    """Create the cluster head node, which in turn creates the workers."""
    provider = get_node_provider(config["provider"], config["cluster_name"])
    try:
        head_node_tags = {
            TAG_RAY_NODE_TYPE: "head",
        }
        nodes = provider.non_terminated_nodes(head_node_tags)
        if len(nodes) > 0:
            head_node = nodes[0]
        else:
            head_node = None

        if not head_node:
            confirm("This will create a new cluster", yes)
        elif not no_restart:
            confirm("This will restart cluster services", yes)

        launch_hash = hash_launch_conf(config["head_node"], config["auth"])
        if head_node is None or provider.node_tags(head_node).get(
                TAG_RAY_LAUNCH_CONFIG) != launch_hash:
            if head_node is not None:
                confirm("Head node config out-of-date. It will be terminated",
                        yes)
                logger.info(
                    "get_or_create_head_node: "
                    "Terminating outdated head node {}".format(head_node))
                provider.terminate_node(head_node)
            logger.info("get_or_create_head_node: Launching new head node...")
            head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash
            head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
                config["cluster_name"])
            provider.create_node(config["head_node"], head_node_tags, 1)

        nodes = provider.non_terminated_nodes(head_node_tags)
        assert len(nodes) == 1, "Failed to create head node."
        head_node = nodes[0]

        # TODO(ekl) right now we always update the head node even if the hash
        # matches. We could prompt the user for what they want to do here.
        runtime_hash = hash_runtime_conf(config["file_mounts"], config)
        logger.info("get_or_create_head_node: Updating files on head node...")

        # Rewrite the auth config so that the head node can update the workers
        remote_key_path = "~/ray_bootstrap_key.pem"
        remote_config = copy.deepcopy(config)
        remote_config["auth"]["ssh_private_key"] = remote_key_path

        # Adjust for new file locations
        new_mounts = {}
        for remote_path in config["file_mounts"]:
            new_mounts[remote_path] = remote_path
        remote_config["file_mounts"] = new_mounts
        remote_config["no_restart"] = no_restart

        # Now inject the rewritten config and SSH key into the head node
        remote_config_file = tempfile.NamedTemporaryFile(
            "w", prefix="ray-bootstrap-")
        remote_config_file.write(json.dumps(remote_config))
        remote_config_file.flush()
        config["file_mounts"].update({
            remote_key_path:
            config["auth"]["ssh_private_key"],
            "~/ray_bootstrap_config.yaml":
            remote_config_file.name
        })

        if restart_only:
            init_commands = config["head_start_ray_commands"]
        elif no_restart:
            init_commands = config["head_setup_commands"]
        else:
            init_commands = (config["head_setup_commands"] +
                             config["head_start_ray_commands"])

        updater = NodeUpdaterThread(
            node_id=head_node,
            provider_config=config["provider"],
            provider=provider,
            auth_config=config["auth"],
            cluster_name=config["cluster_name"],
            file_mounts=config["file_mounts"],
            initialization_commands=config["initialization_commands"],
            setup_commands=init_commands,
            runtime_hash=runtime_hash,
        )
        updater.start()
        updater.join()

        # Refresh the node cache so we see the external ip if available
        provider.non_terminated_nodes(head_node_tags)

        if config.get("provider", {}).get("use_internal_ips", False) is True:
            head_node_ip = provider.internal_ip(head_node)
        else:
            head_node_ip = provider.external_ip(head_node)

        if updater.exitcode != 0:
            logger.error("get_or_create_head_node: "
                         "Updating {} failed".format(head_node_ip))
            sys.exit(1)
        logger.info(
            "get_or_create_head_node: "
            "Head node up-to-date, IP address is: {}".format(head_node_ip))

        monitor_str = "tail -n 100 -f /tmp/ray/session_*/logs/monitor*"
        use_docker = bool(config["docker"]["container_name"])
        if override_cluster_name:
            modifiers = " --cluster-name={}".format(
                quote(override_cluster_name))
        else:
            modifiers = ""
        print("To monitor auto-scaling activity, you can run:\n\n"
              "  ray exec {} {}{}{}\n".format(
                  config_file, "--docker " if use_docker else " ",
                  quote(monitor_str), modifiers))
        print("To open a console on the cluster:\n\n"
              "  ray attach {}{}\n".format(config_file, modifiers))

        print("To ssh manually to the cluster, run:\n\n"
              "  ssh -i {} {}@{}\n".format(config["auth"]["ssh_private_key"],
                                           config["auth"]["ssh_user"],
                                           head_node_ip))
    finally:
        provider.cleanup()
Example #44
0
 def path(self) -> str:
     prefix = 'PARTLABEL=' if self.partition else 'LABEL='
     return '"$(findfs ' + quote(prefix + self.label) + ')"'
Example #45
0
 def copy(self, src, dest):
     dest = shlex.quote(dest)
     src = shlex.quote(src)
     self.execute(f"cp {src} {dest}")
Example #46
0
 def path(self) -> str:
     return quote(self.datapath)
Example #47
0
def build(options: BuildOptions):
    try:
        subprocess.check_call(['docker', '--version'])
    except Exception:
        print(
            'cibuildwheel: Docker not found. Docker is required to run Linux builds. '
            'If you\'re building on Travis CI, add `services: [docker]` to your .travis.yml.'
            'If you\'re building on Circle CI in Linux, add a `setup_remote_docker` step to your .circleci/config.yml',
            file=sys.stderr)
        exit(2)

    python_configurations = get_python_configurations(options.build_selector)
    platforms = [
        ('cp', 'manylinux_x86_64', options.manylinux_images['x86_64']),
        ('cp', 'manylinux_i686', options.manylinux_images['i686']),
        ('cp', 'manylinux_aarch64', options.manylinux_images['aarch64']),
        ('cp', 'manylinux_ppc64le', options.manylinux_images['ppc64le']),
        ('cp', 'manylinux_s390x', options.manylinux_images['s390x']),
        ('pp', 'manylinux_x86_64', options.manylinux_images['pypy_x86_64']),
    ]

    for implementation, platform_tag, docker_image in platforms:
        platform_configs = [
            c for c in python_configurations
            if c.identifier.startswith(implementation)
            and c.identifier.endswith(platform_tag)
        ]
        if not platform_configs:
            continue

        container_name = 'cibuildwheel-{}'.format(uuid.uuid4())
        try:
            call([
                'docker',
                'create',
                '--env',
                'CIBUILDWHEEL',
                '--name',
                container_name,
                '-i',
                '-v',
                '/:/host',  # ignored on CircleCI
                docker_image,
                '/bin/bash'
            ])

            call([
                'docker', 'cp',
                os.path.abspath(options.project_dir) + '/.',
                container_name + ':/project'
            ])

            call(['docker', 'start', container_name])

            for config in platform_configs:
                if options.dependency_constraints:
                    constraints_file = options.dependency_constraints.get_for_python_version(
                        config.version)

                    # `docker cp` causes 'no space left on device' error when
                    # a container is running and the host filesystem is
                    # mounted. https://github.com/moby/moby/issues/38995
                    # Use `docker exec` instead.
                    with open(constraints_file, 'rb') as f:
                        call(
                            [
                                'docker', 'exec', '-i', container_name, 'sh',
                                '-c', 'cat > /constraints.txt'
                            ],
                            input=f.read(),
                        )

                call(['docker', 'exec', '-i', container_name, '/bin/bash'],
                     universal_newlines=True,
                     input='''
                        # give xtrace output an extra level of indent inside docker
                        PS4='    + '

                        set -o errexit
                        set -o xtrace
                        mkdir -p /output
                        cd /project

                        PYBIN="{config_python_bin}"

                        export PATH="$PYBIN:$PATH"
                        {environment_exports}

                        # check the active python and pip are in PYBIN
                        if [ "$(which pip)" != "$PYBIN/pip" ]; then
                        echo "cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it."
                        exit 1
                        fi
                        if [ "$(which python)" != "$PYBIN/python" ]; then
                        echo "cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it."
                        exit 1
                        fi

                        if [ ! -z {before_build} ]; then
                            sh -c {before_build}
                        fi

                        # Build the wheel
                        rm -rf /tmp/built_wheel
                        mkdir /tmp/built_wheel
                        pip wheel . -w /tmp/built_wheel --no-deps {build_verbosity_flag}
                        built_wheel=(/tmp/built_wheel/*.whl)

                        # repair the wheel
                        rm -rf /tmp/repaired_wheels
                        mkdir /tmp/repaired_wheels
                        # NOTE: 'built_wheel' here is a bash array of glob matches; "$built_wheel" returns
                        # the first element
                        if [[ "$built_wheel" == *none-any.whl ]] || [ -z {repair_command} ]; then
                            # pure Python wheel or empty repair command
                            mv "$built_wheel" /tmp/repaired_wheels
                        else
                            sh -c {repair_command} repair_command "$built_wheel"
                        fi
                        repaired_wheels=(/tmp/repaired_wheels/*.whl)

                        if [ ! -z {test_command} ]; then
                            # Set up a virtual environment to install and test from, to make sure
                            # there are no dependencies that were pulled in at build time.
                            pip install {dependency_install_flags} virtualenv
                            venv_dir=`mktemp -d`/venv
                            python -m virtualenv --no-download "$venv_dir"
                            export __CIBW_VIRTUALENV_PATH__=$venv_dir

                            # run the tests in a subshell to keep that `activate`
                            # script from polluting the env
                            (
                                source "$venv_dir/bin/activate"

                                echo "Running tests using `which python`"

                                if [ ! -z {before_test} ]; then
                                    sh -c {before_test}
                                fi

                                # Install the wheel we just built
                                # Note: If auditwheel produced two wheels, it's because the earlier produced wheel
                                # conforms to multiple manylinux standards. These multiple versions of the wheel are
                                # functionally the same, differing only in name, wheel metadata, and possibly include
                                # different external shared libraries. so it doesn't matter which one we run the tests on.
                                # Let's just pick the first one.
                                pip install "${{repaired_wheels[0]}}"{test_extras}

                                # Install any requirements to run the tests
                                if [ ! -z "{test_requires}" ]; then
                                    pip install {test_requires}
                                fi

                                # Run the tests from a different directory
                                pushd $HOME
                                sh -c {test_command}
                                popd
                            )
                            # exit if tests failed (needed for older bash versions)
                            if [ $? -ne 0 ]; then
                              exit 1;
                            fi

                            # clean up
                            rm -rf "$venv_dir"
                        fi

                        # we're all done here; move it to output
                        mv "${{repaired_wheels[@]}}" /output
                        for repaired_wheel in "${{repaired_wheels[@]}}"; do
                            chown {uid}:{gid} "/output/$(basename "$repaired_wheel")"
                        done
                    '''.format(
                         config_python_bin=config.path + '/bin',
                         test_requires=' '.join(options.test_requires),
                         test_extras=options.test_extras,
                         test_command=shlex.quote(
                             prepare_command(options.test_command,
                                             project='/project') if options.
                             test_command else ''),
                         before_build=shlex.quote(
                             prepare_command(options.before_build,
                                             project='/project') if options.
                             before_build else ''),
                         build_verbosity_flag=' '.join(
                             get_build_verbosity_extra_flags(
                                 options.build_verbosity)),
                         repair_command=shlex.quote(
                             prepare_command(options.repair_command,
                                             wheel='"$1"',
                                             dest_dir='/tmp/repaired_wheels')
                             if options.repair_command else ''),
                         environment_exports='\n'.join(
                             options.environment.as_shell_commands()),
                         uid=os.getuid(),
                         gid=os.getgid(),
                         before_test=shlex.quote(
                             prepare_command(options.before_test,
                                             project='/project') if options.
                             before_test else ''),
                         dependency_install_flags='-c /constraints.txt'
                         if options.dependency_constraints else '',
                     ))

            # copy the output back into the host
            call([
                'docker', 'cp', container_name + ':/output/.',
                os.path.abspath(options.output_dir)
            ])
        except subprocess.CalledProcessError as error:
            troubleshoot(options.project_dir, error)
            exit(1)
        finally:
            # Still gets executed, even when 'exit(1)' gets called
            call(['docker', 'rm', '--force', '-v', container_name])
Example #48
0
 def hardlink(self, src, dest):
     dest = shlex.quote(dest)
     src = shlex.quote(src)
     self.execute(f"ln {src} {dest}")
Example #49
0
 def create_remote_file(self, vm, filename, content):
     self.loop.run_until_complete(
         vm.run_for_stdio('cat > {}'.format(shlex.quote(filename)),
                          user='******',
                          input=content.encode('utf-8')))
Example #50
0
 def __repr__(self):
     """Create representation of Task."""
     return '<Task({}): command={}, config={}>'.format(
         self.taskname,
         repr(' '.join([quote(opt) for opt in self._build_command()])),
         repr(self.config))
Example #51
0
File: run.py Project: rhdxmr/virtme
def main():
    args = _ARGPARSER.parse_args()

    arch = architectures.get(args.arch)
    is_native = (args.arch == uname.machine)

    qemu = qemu_helpers.Qemu(arch.qemuname)
    qemu.probe()

    need_initramfs = args.force_initramfs or qemu.cannot_overmount_virtfs

    config = mkinitramfs.Config()

    kimg,dtb,modfiles,moddir = find_kernel_and_mods(arch, args)
    config.modfiles = modfiles
    if config.modfiles:
        need_initramfs = True

    qemuargs = [qemu.qemubin]
    kernelargs = []

    # Put the '-name' flag first so it's easily visible in ps, top, etc.
    if args.name:
        qemuargs.extend(['-name', args.name])
        kernelargs.append('virtme_hostname=%s' % args.name)

    # Set up virtfs
    export_virtfs(qemu, arch, qemuargs, args.root, '/dev/root', readonly=(not args.rw))

    guest_tools_path = guest_tools.find_guest_tools()
    if guest_tools_path is None:
        raise ValueError("couldn't find guest tools -- virtme is installed incorrectly")

    export_virtfs(qemu, arch, qemuargs, guest_tools_path,
                  'virtme.guesttools')

    initcmds = ['mkdir -p /run/virtme/guesttools',
                '/bin/mount -n -t 9p -o ro,version=9p2000.L,trans=virtio,access=any virtme.guesttools /run/virtme/guesttools',
                'exec /run/virtme/guesttools/virtme-init']

    # Map modules
    if moddir is not None:
        export_virtfs(qemu, arch, qemuargs, moddir, 'virtme.moddir')

    # Set up mounts
    mount_index = 0
    for dirtype, dirarg in itertools.chain((('rwdir', i) for i in args.rwdir),
                                           (('rodir', i) for i in args.rodir)):
        m = _RWDIR_RE.match(dirarg)
        if not m:
            arg_fail('invalid --%s parameter %r' % (dirtype, dirarg))
        if m.group(2) is not None:
            guestpath = m.group(1)
            hostpath = m.group(2)
        else:
            hostpath = m.group(1)
            guestpath = os.path.relpath(hostpath, args.root)
            if guestpath.startswith('..'):
                arg_fail('%r is not inside the root' % hostpath)

        idx = mount_index
        mount_index += 1
        tag = 'virtme.initmount%d' % idx
        export_virtfs(qemu, arch, qemuargs, hostpath, tag, readonly=(dirtype != 'rwdir'))
        kernelargs.append('virtme_initmount%d=%s' % (idx, guestpath))

    # Turn on KVM if available
    if is_native:
        qemuargs.extend(['-machine', 'accel=kvm:tcg'])

    # Add architecture-specific options
    qemuargs.extend(arch.qemuargs(is_native))

    # Set up / override baseline devices
    qemuargs.extend(['-parallel', 'none'])
    qemuargs.extend(['-net', 'none'])

    if not args.graphics and not args.script_sh and not args.script_exec:
        # It would be nice to use virtconsole, but it's terminally broken
        # in current kernels.  Nonetheless, I'm configuring the console
        # manually to make it easier to tweak in the future.
        qemuargs.extend(['-echr', '1'])
        qemuargs.extend(['-serial', 'none'])
        qemuargs.extend(['-chardev', 'stdio,id=console,signal=off,mux=on'])

        # We should be using the new-style -device serialdev,chardev=xyz,
        # but many architecture-specific serial devices don't support that.
        qemuargs.extend(['-serial', 'chardev:console'])

        qemuargs.extend(['-mon', 'chardev=console'])

        kernelargs.extend(arch.earlyconsole_args())
        kernelargs.extend(arch.serial_console_args())
        qemuargs.extend(arch.qemu_nodisplay_args())

        # PS/2 probing is slow; give the kernel a hint to speed it up.
        kernelargs.extend(['psmouse.proto=exps'])

        # Fix the terminal defaults (and set iutf8 because that's a better
        # default nowadays).  I don't know of any way to keep this up to date
        # after startup, though.
        try:
                terminal_size = os.get_terminal_size()
                kernelargs.extend(['virtme_stty_con=rows %d cols %d iutf8' %
                                   (terminal_size.lines, terminal_size.columns)])
        except OSError as e:
                # don't die if running with a non-TTY stdout
                if e.errno != errno.ENOTTY:
                        raise

        # Propagate the terminal type
        if 'TERM' in os.environ:
            kernelargs.extend(['TERM=%s' % os.environ['TERM']])

    if args.balloon:
        qemuargs.extend(['-balloon', 'virtio'])

    if args.disk:
        qemuargs.extend(['-device', '%s,id=scsi' % arch.virtio_dev_type('scsi')])

        for i,d in enumerate(args.disk):
            namefile = d.split('=', 1)
            if len(namefile) != 2:
                arg_fail('invalid argument to --disk')
            name,fn = namefile
            if '=' in fn or ',' in fn:
                arg_fail("--disk filenames cannot contain '=' or ','")
            if '=' in fn or ',' in name:
                arg_fail("--disk device names cannot contain '=' or ','")
            driveid = 'disk%d' % i
            qemuargs.extend(['-drive', 'if=none,id=%s,file=%s' % (driveid, fn),
                             '-device', 'scsi-hd,drive=%s,vendor=virtme,product=disk,serial=%s' % (driveid, name)])

    has_script = False

    def do_script(shellcmd, use_exec=False):
        if args.graphics:
            arg_fail('scripts and --graphics are mutually exclusive')

        nonlocal has_script
        nonlocal need_initramfs
        if has_script:
            arg_fail('conflicting script options')
        has_script = True
        need_initramfs = True  # TODO: Fix this

        # Turn off default I/O
        qemuargs.extend(arch.qemu_nodisplay_args())

        # Send kernel logs to stderr
        qemuargs.extend(['-serial', 'none'])
        qemuargs.extend(['-chardev', 'file,id=console,path=/proc/self/fd/2'])

        # We should be using the new-style -device serialdev,chardev=xyz,
        # but many architecture-specific serial devices don't support that.
        qemuargs.extend(['-serial', 'chardev:console'])

        # Set up a virtserialport for script I/O
        qemuargs.extend(['-chardev', 'stdio,id=stdio,signal=on,mux=off'])
        qemuargs.extend(['-device', arch.virtio_dev_type('serial')])
        qemuargs.extend(['-device', 'virtserialport,name=virtme.scriptio,chardev=stdio'])

        # Scripts shouldn't reboot
        qemuargs.extend(['-no-reboot'])

        # Ask virtme-init to run the script
        config.virtme_data[b'script'] = """#!/bin/sh

        {prefix}{shellcmd}
        """.format(shellcmd=shellcmd, prefix="exec " if use_exec else "").encode('ascii')

        # Nasty issue: QEMU will set O_NONBLOCK on fds 0, 1, and 2.
        # This isn't inherently bad, but it can cause a problem if
        # another process is reading from 1 or writing to 0, which is
        # exactly what happens if you're using a terminal and you
        # redirect some, but not all, of the tty fds.  Work around it
        # by giving QEMU private copies of the open object if either
        # of them is a terminal.
        for oldfd,mode in ((0,os.O_RDONLY), (1,os.O_WRONLY), (2,os.O_WRONLY)):
            if os.isatty(oldfd):
                try:
                    newfd = os.open('/proc/self/fd/%d' % oldfd, mode)
                except OSError:
                    pass
                else:
                    os.dup2(newfd, oldfd)
                    os.close(newfd)

    if args.script_sh is not None:
        do_script(args.script_sh)

    if args.script_exec is not None:
        do_script(shlex.quote(args.script_exec), use_exec=True)

    if args.net:
        qemuargs.extend(['-net', 'nic,model=virtio'])
        qemuargs.extend(['-net', 'user'])
        kernelargs.extend(['virtme.dhcp'])

    if args.pwd:
        rel_pwd = os.path.relpath(os.getcwd(), args.root)
        if rel_pwd.startswith('..'):
            print('current working directory is not contained in the root')
            return 1
        kernelargs.append('virtme_chdir=%s' % rel_pwd)

    if need_initramfs:
        if args.busybox is not None:
            config.busybox = args.busybox
        else:
            config.busybox = mkinitramfs.find_busybox(args.root, is_native)
            if config.busybox is None:
                print('virtme-run: initramfs is needed, and no busybox was found',
                      file=sys.stderr)
                return 1

        # Set up the initramfs (warning: hack ahead)
        tmpfd,tmpname = tempfile.mkstemp('irfs')
        os.unlink(tmpname)
        tmpfile = os.fdopen(tmpfd, 'r+b')
        mkinitramfs.mkinitramfs(tmpfile, config)
        tmpfile.flush()
        fcntl.fcntl(tmpfd, fcntl.F_SETFD, 0)
        initrdpath = '/proc/self/fd/%d' % tmpfile.fileno()
    else:
        # No initramfs!  Warning: this is slower than using an initramfs
        # because the kernel will wait for device probing to finish.
        # Sigh.
        kernelargs.extend([
            'rootfstype=9p',
            'rootflags=version=9p2000.L,trans=virtio,access=any',
            'raid=noautodetect',
            'rw' if args.rw else 'ro',
        ])
        initrdpath = None
        initcmds.insert(0, 'mount -t tmpfs run /run')

    # Now that we're done setting up kernelargs, append user-specified args
    # and then initargs
    kernelargs.extend(args.kopt)

    # Unknown options get turned into arguments to init, which is annoying
    # because we're explicitly passing '--' to set the arguments directly.
    # Fortunately, 'init=' will clear any arguments parsed so far, so make
    # sure that 'init=' appears directly before '--'.
    kernelargs.append('init=/bin/sh')
    kernelargs.append('--')
    kernelargs.extend(['-c', ';'.join(initcmds)])

    if args.xen is None:
        # Load a normal kernel
        qemuargs.extend(['-kernel', kimg])
        if kernelargs:
            qemuargs.extend(['-append',
                             ' '.join(quote_karg(a) for a in kernelargs)])
        if initrdpath is not None:
            qemuargs.extend(['-initrd', initrdpath])
        if dtb is not None:
            qemuargs.extend(['-dtb', dtb])
    else:
        # Use multiboot syntax to load Xen
        qemuargs.extend(['-kernel', args.xen])
        qemuargs.extend(['-initrd', '%s %s%s' % (
            kimg,
            ' '.join(quote_karg(a).replace(',', ',,') for a in kernelargs),
            (',%s' % initrdpath) if initrdpath is not None else '')])

    # Handle --qemu-opt(s)
    qemuargs.extend(args.qemu_opt)
    if args.qemu_opts is not None:
        qemuargs.extend(args.qemu_opts)

    if args.show_command:
        print(' '.join(shlex.quote(a) for a in qemuargs))

    # Go!
    if not args.dry_run:
        os.execv(qemu.qemubin, qemuargs)
Example #52
0
def call(args, input=None, universal_newlines=False):
    print('+ ' + ' '.join(shlex.quote(a) for a in args))
    subprocess.run(args,
                   input=input,
                   universal_newlines=universal_newlines,
                   check=True)
Example #53
0
def run_on_master(
    cluster,
    system_paasta_config,
    cmd_parts,
    timeout=None,
    err_code=-1,
    graceful_exit=False,
    stdin=None,
):
    """Find connectable master for :cluster: and :system_paasta_config: args and
    invoke command from :cmd_parts:, wrapping it in ssh call.

    :returns (exit code, output)

    :param cluster: cluster to find master in
    :param system_paasta_config: system configuration to lookup master data
    :param cmd_parts: passed into paasta_tools.utils._run as command along with
        ssh bits
    :param timeout: see paasta_tools.utils._run documentation (default: None)
    :param err_code: code to return along with error message when something goes
        wrong (default: -1)
    :param graceful_exit: wrap command in a bash script that waits for input and
        kills the original command; trap SIGINT and send newline into stdin
    """
    try:
        master = connectable_master(cluster, system_paasta_config)
    except NoMasterError as e:
        return (err_code, str(e))

    if graceful_exit:
        # signals don't travel over ssh, kill process when anything lands on stdin instead
        cmd_parts.append(
            # send process to background and capture it's pid
            '& p=$!; ' +
            # wait for stdin with timeout in a loop, exit when original process finished
            'while ! read -t1; do ! kill -0 $p 2>/dev/null && kill $$; done; '
            +
            # kill original process if loop finished (something on stdin)
            'kill $p; wait', )
        stdin = subprocess.PIPE
        stdin_interrupt = True
        popen_kwargs = {'preexec_fn': os.setsid}
    else:
        stdin_interrupt = False
        popen_kwargs = {}

    cmd_parts = [
        'ssh', '-q', '-t', '-t', '-A', master,
        "sudo /bin/bash -c %s" % quote(' '.join(cmd_parts))
    ]

    log.debug("Running %s" % ' '.join(cmd_parts))

    return _run(
        cmd_parts,
        timeout=timeout,
        stream=True,
        stdin=stdin,
        stdin_interrupt=stdin_interrupt,
        popen_kwargs=popen_kwargs,
    )
Example #54
0
def get_or_create_head_node(config,
                            config_file,
                            no_restart,
                            restart_only,
                            yes,
                            override_cluster_name,
                            _provider=None,
                            _runner=subprocess):
    """Create the cluster head node, which in turn creates the workers."""
    provider = (_provider or get_node_provider(config["provider"],
                                               config["cluster_name"]))

    config = copy.deepcopy(config)
    raw_config_file = config_file  # used for printing to the user
    config_file = os.path.abspath(config_file)
    try:
        head_node_tags = {
            TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
        }
        nodes = provider.non_terminated_nodes(head_node_tags)
        if len(nodes) > 0:
            head_node = nodes[0]
        else:
            head_node = None

        if not head_node:
            cli_logger.confirm(yes, "No head node found. "
                               "Launching a new cluster.",
                               _abort=True)
            cli_logger.old_confirm("This will create a new cluster", yes)
        elif not no_restart:
            cli_logger.old_confirm("This will restart cluster services", yes)

        if head_node:
            if restart_only:
                cli_logger.confirm(
                    yes, "Updating cluster configuration and "
                    "restarting the cluster Ray runtime. "
                    "Setup commands will not be run due to `{}`.\n",
                    cf.bold("--restart-only"),
                    _abort=True)
            elif no_restart:
                cli_logger.print(
                    "Cluster Ray runtime will not be restarted due "
                    "to `{}`.", cf.bold("--no-restart"))
                cli_logger.confirm(yes, "Updating cluster configuration and "
                                   "running setup commands.",
                                   _abort=True)
            else:
                cli_logger.print(
                    "Updating cluster configuration and running full setup.")
                cli_logger.confirm(
                    yes,
                    cf.bold("Cluster Ray runtime will be restarted."),
                    _abort=True)
        cli_logger.newline()

        # TODO(ekl) this logic is duplicated in node_launcher.py (keep in sync)
        head_node_config = copy.deepcopy(config["head_node"])
        if "head_node_type" in config:
            head_node_tags[TAG_RAY_USER_NODE_TYPE] = config["head_node_type"]
            head_node_config.update(config["available_node_types"][
                config["head_node_type"]]["node_config"])

        launch_hash = hash_launch_conf(head_node_config, config["auth"])
        if head_node is None or provider.node_tags(head_node).get(
                TAG_RAY_LAUNCH_CONFIG) != launch_hash:
            with cli_logger.group("Acquiring an up-to-date head node"):
                if head_node is not None:
                    cli_logger.print(
                        "Currently running head node is out-of-date with "
                        "cluster configuration")
                    cli_logger.print(
                        "hash is {}, expected {}",
                        cf.bold(
                            provider.node_tags(head_node).get(
                                TAG_RAY_LAUNCH_CONFIG)), cf.bold(launch_hash))
                    cli_logger.confirm(yes, "Relaunching it.", _abort=True)
                    cli_logger.old_confirm(
                        "Head node config out-of-date. It will be terminated",
                        yes)

                    cli_logger.old_info(
                        logger, "get_or_create_head_node: "
                        "Shutting down outdated head node {}", head_node)

                    provider.terminate_node(head_node)
                    cli_logger.print("Terminated head node {}", head_node)

                cli_logger.old_info(
                    logger,
                    "get_or_create_head_node: Launching new head node...")

                head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash
                head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
                    config["cluster_name"])
                provider.create_node(head_node_config, head_node_tags, 1)
                cli_logger.print("Launched a new head node")

                start = time.time()
                head_node = None
                with cli_logger.timed("Fetching the new head node"):
                    while True:
                        if time.time() - start > 50:
                            cli_logger.abort(
                                "Head node fetch timed out.")  # todo: msg
                            raise RuntimeError("Failed to create head node.")
                        nodes = provider.non_terminated_nodes(head_node_tags)
                        if len(nodes) == 1:
                            head_node = nodes[0]
                            break
                        time.sleep(1)
                cli_logger.newline()

        with cli_logger.group(
                "Setting up head node",
                _numbered=("<>", 1, 1),
                # cf.bold(provider.node_tags(head_node)[TAG_RAY_NODE_NAME]),
                _tags=dict()):  # add id, ARN to tags?

            # TODO(ekl) right now we always update the head node even if the
            # hash matches.
            # We could prompt the user for what they want to do here.
            # No need to pass in cluster_sync_files because we use this
            # hash to set up the head node
            (runtime_hash, file_mounts_contents_hash) = hash_runtime_conf(
                config["file_mounts"], None, config)

            cli_logger.old_info(
                logger,
                "get_or_create_head_node: Updating files on head node...")

            # Rewrite the auth config so that the head
            # node can update the workers
            remote_config = copy.deepcopy(config)

            # drop proxy options if they exist, otherwise
            # head node won't be able to connect to workers
            remote_config["auth"].pop("ssh_proxy_command", None)

            if "ssh_private_key" in config["auth"]:
                remote_key_path = "~/ray_bootstrap_key.pem"
                remote_config["auth"]["ssh_private_key"] = remote_key_path

            # Adjust for new file locations
            new_mounts = {}
            for remote_path in config["file_mounts"]:
                new_mounts[remote_path] = remote_path
            remote_config["file_mounts"] = new_mounts
            remote_config["no_restart"] = no_restart

            # Now inject the rewritten config and SSH key into the head node
            remote_config_file = tempfile.NamedTemporaryFile(
                "w", prefix="ray-bootstrap-")
            remote_config_file.write(json.dumps(remote_config))
            remote_config_file.flush()
            config["file_mounts"].update(
                {"~/ray_bootstrap_config.yaml": remote_config_file.name})

            if "ssh_private_key" in config["auth"]:
                config["file_mounts"].update({
                    remote_key_path:
                    config["auth"]["ssh_private_key"],
                })
            cli_logger.print("Prepared bootstrap config")

            if restart_only:
                init_commands = []
                ray_start_commands = config["head_start_ray_commands"]
            elif no_restart:
                init_commands = config["head_setup_commands"]
                ray_start_commands = []
            else:
                init_commands = config["head_setup_commands"]
                ray_start_commands = config["head_start_ray_commands"]

            if not no_restart:
                warn_about_bad_start_command(ray_start_commands)

            updater = NodeUpdaterThread(
                node_id=head_node,
                provider_config=config["provider"],
                provider=provider,
                auth_config=config["auth"],
                cluster_name=config["cluster_name"],
                file_mounts=config["file_mounts"],
                initialization_commands=config["initialization_commands"],
                setup_commands=init_commands,
                ray_start_commands=ray_start_commands,
                process_runner=_runner,
                runtime_hash=runtime_hash,
                file_mounts_contents_hash=file_mounts_contents_hash,
                docker_config=config.get("docker"))
            updater.start()
            updater.join()

            # Refresh the node cache so we see the external ip if available
            provider.non_terminated_nodes(head_node_tags)

            if config.get("provider", {}).get("use_internal_ips",
                                              False) is True:
                head_node_ip = provider.internal_ip(head_node)
            else:
                head_node_ip = provider.external_ip(head_node)

            if updater.exitcode != 0:
                # todo: this does not follow the mockup and is not good enough
                cli_logger.abort("Failed to setup head node.")

                cli_logger.old_error(
                    logger, "get_or_create_head_node: "
                    "Updating {} failed", head_node_ip)
                sys.exit(1)

            cli_logger.old_info(
                logger, "get_or_create_head_node: "
                "Head node up-to-date, IP address is: {}", head_node_ip)

        monitor_str = "tail -n 100 -f /tmp/ray/session_*/logs/monitor*"
        if override_cluster_name:
            modifiers = " --cluster-name={}".format(
                quote(override_cluster_name))
        else:
            modifiers = ""

        if cli_logger.old_style:
            print("To monitor autoscaling activity, you can run:\n\n"
                  "  ray exec {} {}{}\n".format(config_file,
                                                quote(monitor_str), modifiers))
            print("To open a console on the cluster:\n\n"
                  "  ray attach {}{}\n".format(config_file, modifiers))

            print("To get a remote shell to the cluster manually, run:\n\n"
                  "  {}\n".format(
                      updater.cmd_runner.remote_shell_command_str()))

        cli_logger.newline()
        with cli_logger.group("Useful commands"):
            cli_logger.print("Monitor autoscaling with")
            cli_logger.print(cf.bold("  ray exec {}{} {}"), raw_config_file,
                             modifiers, quote(monitor_str))

            cli_logger.print("Connect to a terminal on the cluster head")
            cli_logger.print(cf.bold("  ray attach {}{}"), raw_config_file,
                             modifiers)
    finally:
        provider.cleanup()
Example #55
0
async def wrap_notarization_with_sudo(config,
                                      key_config,
                                      all_paths,
                                      path_attr="zip_path"):
    """Wrap the notarization requests with sudo.

    Apple creates a lockfile per user for notarization. To notarize concurrently,
    we use sudo against a set of accounts (``config['local_notarization_accounts']``).

    Args:
        config (dict): the running config
        key_config (dict): the config for this signing key
        all_paths (list): the list of ``App`` objects
        path_attr (str, optional): the attribute that the zip path is under.
            Defaults to ``zip_path``

    Raises:
        IScriptError: on failure

    Returns:
        dict: uuid to log path

    """
    futures = []
    accounts = config["local_notarization_accounts"]
    counter = 0
    uuids = {}

    for app in all_paths:
        app.check_required_attrs([path_attr, "parent_dir"])

    while counter < len(all_paths):
        futures = []
        for account in accounts:
            app = all_paths[counter]
            app.notarization_log_path = f"{app.parent_dir}-notarization.log"
            bundle_id = get_bundle_id(key_config["base_bundle_id"],
                                      counter=str(counter))
            zip_path = getattr(app, path_attr)
            # XXX potentially run the notarization + get_uuid_from_log in a
            #     helper function per app, so we can retry them individually on
            #     error. That would also let us record the path per UUID,
            #     should we need that complexity later.
            #     Not doing that now, so notarization errors are more visible.
            base_cmdln = " ".join([
                "xcrun",
                "altool",
                "--notarize-app",
                "-f",
                zip_path,
                "--primary-bundle-id",
                '"{}"'.format(bundle_id),
                "-u",
                key_config["apple_notarization_account"],
                "--asc-provider",
                key_config["apple_asc_provider"],
                "--password",
            ])
            cmd = [
                "sudo", "su", account, "-c", base_cmdln + " {}".format(
                    shlex.quote(key_config["apple_notarization_password"]))
            ]
            log_cmd = ["sudo", "su", account, "-c", base_cmdln + " ********"]
            futures.append(
                asyncio.ensure_future(
                    retry_async(
                        run_command,
                        args=[cmd],
                        kwargs={
                            "log_path": app.notarization_log_path,
                            "log_cmd": log_cmd,
                            "exception": IScriptError
                        },
                        retry_exceptions=(IScriptError, ),
                        attempts=10,
                    )))
            counter += 1
            if counter >= len(all_paths):
                break
        await raise_future_exceptions(futures)
    for app in all_paths:
        uuids[get_uuid_from_log(
            app.notarization_log_path)] = app.notarization_log_path
    return uuids
Example #56
0
    def _safe_load_file(self, file_path):
        '''Takes a file path and loads it in a safe way evading posible
        injections'''

        return os.path.expanduser(shlex.quote(file_path))
Example #57
0
    def insert_sosflow(self, sosd_path, sos_analysis_path, run_path, ppn):
        """Insert a new component at start of list to launch sosflow daemon.
        Should be called only once."""
        assert self.run_components[0].name != 'sosflow'

        # sos_args must be calculated before adding sosflow as a RunComponent,
        # as get_total_nodes() needs to return only application nodes and not
        # any nodes required by sosflow.

        num_listeners = self._get_total_sosflow_component_nodes()
        # return if no components are setup to use sosflow. That is,
        # sosflow=False in `codes` for all components
        if num_listeners == 0:
            return

        # From Kevin Huck, U of Oregon
        max_listeners_per_aggregator = 64
        num_aggregators = math.ceil(num_listeners /
                                    max_listeners_per_aggregator)

        # Add sos aggregators to be run
        #   common aggregator parameters
        sos_args = [
            '-l',
            str(num_listeners), '-a',
            str(num_aggregators), '-w',
            shlex.quote(run_path)
        ]
        sos_cmd = ' '.join([sosd_path] + sos_args)
        sos_fork_cmd = sos_cmd + ' -k @LISTENER_RANK@ -r listener'

        #   now add each aggregator, starting with the analysis aggregator
        listener_node_offset = 0
        for i in range(num_aggregators):
            sosd_args = sos_args + [
                '-k',
                str(i),
                '-r',
                'aggregator',
            ]

            rc_name = 'sosflow_aggregator_' + str(i)
            rc_exe_path = sosd_path

            # If sos analysis is enabled, the first aggregator should be
            # the sos analysis script instead of a plain sosd aggregator.
            if i == 0 and self.sosflow_analysis:
                rc_name = "sosflow_analysis"
                rc_exe_path = sos_analysis_path
                sosd_args = [sosd_path] + sosd_args

            self.node_layout.add_node({rc_name: 1})

            rc = RunComponent(rc_name,
                              rc_exe_path,
                              sosd_args,
                              nprocs=1,
                              sleep_after=5,
                              working_dir=self.run_path)
            rc.env['sos_cmd'] = sos_cmd
            rc.env['SOS_FORK_COMMAND'] = sos_fork_cmd
            rc.env['SOS_CMD_PORT'] = '22500'
            rc.env['SOS_EVPATH_MEETUP'] = run_path
            rc.env['TAU_SOS'] = '1'
            self.run_components.insert(i, rc)

            listener_node_offset += 1

        # add env vars to each run, including sosflow daemon
        # NOTE: not sure how many if any are required for sosd, but
        # should not hurt to have them, and simplifies the offset
        # calculation
        for rc in self.run_components:
            # ignore component if not setup to use sosflow
            if not rc.linked_with_sosflow:
                continue

            # TODO: is this actually used directly?
            rc.env['sos_cmd'] = sos_cmd
            rc.env['SOS_FORK_COMMAND'] = sos_fork_cmd

            code_node = self.node_layout.get_node_containing_code(rc.name)

            # TODO: we don't yet know how SOSFLOW will support apps that
            # do node shairng, so for now require that there is no
            # sharing.
            assert len(code_node) == 1

            code_procs_per_node = code_node[rc.name]
            code_nodes = int(math.ceil(rc.nprocs / code_procs_per_node))

            # Set the TCP port that the listener will listen to,
            # and the port that clients will attempt to connect to.
            rc.env['SOS_CMD_PORT'] = '22500'

            # Set the directory where the SOS listeners and aggregators
            # will use to establish EVPath links to each other
            rc.env['SOS_EVPATH_MEETUP'] = run_path

            # Tell TAU that it should connect to SOS
            # and send TAU data to SOS when adios_close(),
            # adios_advance_step() calls are made,
            # and when the application terminates.
            rc.env['TAU_SOS'] = '1'

            # Tell SOS how many application ranks per node there are
            # How do you get this information?
            # TODO: This will change when we have the ability to set a
            # different number of procs per node
            rc.env['SOS_APP_RANKS_PER_NODE'] = str(code_procs_per_node)

            # Tell SOS what 'rank' it's listeners should start with
            # the aggregator was 'rank' 0, so this node's listener will be 1
            # This offset is the node count where this fob component starts
            rc.env['SOS_LISTENER_RANK_OFFSET'] = str(listener_node_offset)

            # TODO: this assumes node exclusive. To support node sharing
            # with custom layouts, will need to know layout here and
            # calculate actual node usage. This potentially duplicates
            # functionality needed in workflow, should eventual converge
            # so they are using the same model.
            listener_node_offset += code_nodes
Example #58
0
def archive_file_scan(path, extensions, launch_prefix=""):

    ext = os.path.splitext(path)[1][1:].lower()
    # print(path)
    # print(ext)
    try:
        if ext == 'rar':
            matches = 0
            count = 0
            line = launch_prefix + "unrar lb -p- " + shlex.quote(
                path) + " " + shlex.quote(os.path.dirname(path)) + os.sep
            result = subprocess.run(shlex.split(line), stdout=subprocess.PIPE)
            file_list = result.stdout.decode("utf-8", 'ignore').split("\n")
            # print(file_list)
            for fi in file_list:
                for ty in extensions:
                    if fi[len(ty) * -1:].lower() == ty:
                        matches += 1
                        break
                    elif is_ignorable_file(fi):
                        count -= 1
                        break
                    elif is_music_related(fi):
                        matches += 5
                count += 1
            if count > 200:
                #print("RAR archive has many files")
                #print("   --- " + path)
                return 0
            if matches == 0:
                #print("RAR archive does not appear to contain audio files")
                #print("   --- " + path)
                return 0
            if count == 0:
                #print("Archive has no files")
                #print("   --- " + path)
                return 0

        elif ext == '7z':
            matches = 0
            count = 0
            line = launch_prefix + "7z l " + shlex.quote(
                path)  # + " " + shlex.quote(os.path.dirname(path)) + os.sep
            result = subprocess.run(shlex.split(line), stdout=subprocess.PIPE)
            file_list = result.stdout.decode("utf-8", 'ignore').split("\n")
            # print(file_list)

            for fi in file_list:

                if '....A' not in fi:
                    continue
                for ty in extensions:
                    if fi[len(ty) * -1:].lower() == ty:
                        matches += 1
                        break
                    elif is_ignorable_file(fi):
                        count -= 1
                        break
                    elif is_music_related(fi):
                        matches += 5
                count += 1

            if count > 200:
                #print("7z archive has many files")
                #print("   --- " + path)
                return 0
            if matches == 0:
                #print("7z archive does not appear to contain audio files")
                #print("   --- " + path)
                return 0
            if count == 0:
                #print("7z archive has no files")
                #print("   --- " + path)
                return 0

        elif ext == "zip":

            zip_ref = zipfile.ZipFile(path, 'r')
            matches = 0
            count = 0
            #print(zip_ref.namelist())
            for fi in zip_ref.namelist():
                for ty in extensions:
                    if fi[len(ty) * -1:].lower() == ty:
                        matches += 1
                        break
                    elif is_ignorable_file(fi):
                        count -= 1
                        break
                    elif is_music_related(fi):
                        matches += 5
                count += 1
            if count == 0:
                #print("Archive has no files")
                #print("   --- " + path)
                return 0
            if count > 300:
                #print("Zip archive has many files")
                #print("   --- " + path)
                return 0
            if matches == 0:
                #print("Zip archive does not appear to contain audio files")
                #print("   --- " + path)
                return 0
        else:
            return 0

    except:
        print("Archive test error")

        return 0

    if count == 0:
        return 0

    ratio = matches / count
    if count < 5 and matches > 0:
        ratio = 100
    return ratio
Example #59
0
def main():
    arg_parser = argparse.ArgumentParser()

    arg_parser.add_argument(
        '--llvm_version',
        help='Version number MAJOR.MINOR of the LLVM toolchain',
        required=True)

    arg_parser.add_argument(
        '--disassembler',
        help='Path to disassembler, or just "binja", if installed.',
        required=True)

    arg_parser.add_argument(
        '--workspace_dir',
        help='Directory in which intermediate and final files are placed',
        required=True)

    arg_parser.add_argument('--binary',
                            help='Path to the binary to be lifted',
                            required=True)

    arg_parser.add_argument('--clang',
                            help='Path to clang, if not using remill-clang',
                            required=False,
                            default="")

    arg_parser.add_argument('--dry_run',
                            help='Should the actual commands be executed?',
                            default=False,
                            required=False)

    arg_parser.add_argument('--legacy_mode',
                            help='Are we producing legacy mode bitcode?',
                            default=False,
                            required=False,
                            action='store_true')

    arg_parser.add_argument(
        '--extra_args',
        help=
        'A space-delimited list of any extra arguments to pass to the lifter.',
        default="",
        required=False)

    args, command_args = arg_parser.parse_known_args()

    # Set up the workspace.
    args.workspace_dir = os.path.realpath(args.workspace_dir)
    bin_dir = os.path.join(args.workspace_dir, 'bin')
    lib_dir = os.path.join(args.workspace_dir, 'lib')
    obj_dir = os.path.join(args.workspace_dir, 'obj')
    lifted_obj_dir = os.path.join(args.workspace_dir, 'lifted_obj')
    cfg_dir = os.path.join(args.workspace_dir, 'cfg')
    bc_dir = os.path.join(args.workspace_dir, 'bc')
    log_dir = os.path.join(args.workspace_dir, 'log')

    print("mkdir -p {}".format(args.workspace_dir))
    print("mkdir -p {}".format(bin_dir))
    print("mkdir -p {}".format(lifted_obj_dir))
    print("mkdir -p {}".format(lib_dir))
    print("mkdir -p {}".format(obj_dir))
    print("mkdir -p {}".format(cfg_dir))
    print("mkdir -p {}".format(bc_dir))
    print("mkdir -p {}".format(log_dir))

    make_directory(args.workspace_dir)
    make_directory(bin_dir)
    make_directory(bin_dir)
    make_directory(lifted_obj_dir)
    make_directory(lib_dir)
    make_directory(obj_dir)
    make_directory(cfg_dir)
    make_directory(bc_dir)
    make_directory(log_dir)

    path = os.path.realpath(args.binary)
    path_hash = hash_file(path)

    binary = os.path.join(obj_dir, path_hash)
    lifted_binary = os.path.join(lifted_obj_dir, path_hash)
    cfg = os.path.join(cfg_dir, "{}.cfg".format(path_hash))
    bitcode = os.path.join(bc_dir, "{}.bc".format(path_hash))
    log = os.path.join(log_dir, "{}.log".format(path_hash))

    # Copy the binary into the workspace's object directory.
    print("cp {} {}".format(path, binary))
    print("chmod a+x {}".format(binary))
    if not os.path.isfile(binary):
        shutil.copyfile(args.binary, binary)
        make_executable(binary)

    # Copy the shared libraries into the workspace's object directory, and then
    # add symbolic links from the workspace's library directory into the object
    # directory.
    libs = []
    for name, path in binary_libraries(binary):
        path_hash = hash_file(path)
        library = os.path.join(obj_dir, "{}.so".format(path_hash))

        if not os.path.isfile(library):
            shutil.copyfile(path, library)
            make_executable(library)

        sym_name = os.path.join(lib_dir, name)
        if os.path.exists(sym_name):
            os.remove(sym_name)

        try:
            os.symlink(library, sym_name)
        except:
            pass

        print("cp {} {}".format(path, library))
        print("chmod a+x {}".format(library))
        print("rm {}".format(sym_name))
        print("ln {} {}".format(library, sym_name))

        libs.append(sym_name)

    os_name = 'linux'
    binary_name = os.path.basename(args.binary)
    address_size, arch, is_pie = binary_info(binary)

    # Disassembler Seetings
    da = ''
    if ('binja' == args.disassembler
        ) or 'binaryninja' == 'binja' == args.disassembler:
        da = 'binja'
    else:
        ida_version = {
            "x86_avx": "idal",
            "amd64_avx": "idal64",
            "aarch64": "idal64"
        }[arch]
        da = quote(os.path.join(args.disassembler, ida_version))

    # Disassemble the binary.
    disass_args = [
        'mcsema-disass', '--arch', arch, '--os', os_name, '--binary',
        quote(binary), '--output',
        quote(cfg), '--entrypoint', 'main', '--disassembler', da, '--log_file',
        quote(log)
    ]

    if is_pie:
        disass_args.append("--pie-mode")

    disass_args.extend(command_args)

    print(" ".join(disass_args))
    ret = subprocess.call(disass_args)
    if ret:
        return ret

    # Lift the binary.
    mcsema_lift_args = [
        'mcsema-lift-{}'.format(args.llvm_version), '--arch', arch, '--os',
        os_name, '--cfg', cfg, '--output', bitcode
    ]

    if args.extra_args != "":
        for arg in args.extra_args.split(' '):
            mcsema_lift_args.append(arg)

    if args.legacy_mode:
        mcsema_lift_args.append('--legacy_mode')

    print(" ".join(mcsema_lift_args))
    ret = subprocess.call(mcsema_lift_args)
    if ret:
        return ret

    # Not compiling a binary.
    if args.legacy_mode:
        return 0

    # Build up the command-line invocation to clang.
    clang_args = []

    if (args.clang != ""):
        clang_args = [os.path.join(args.clang)]
    else:
        clang_args = [
            os.path.join('remill-clang-{}'.format(args.llvm_version))
        ]

    clang_args += [
        '-rdynamic', is_pie and '-fPIC' or '', is_pie and '-pie' or '', '-o',
        lifted_binary, bitcode,
        '/usr/local/lib/libmcsema_rt{}-{}.a'.format(address_size,
                                                    args.llvm_version), '-lm'
    ]

    for lib in libs:
        clang_args.append(lib)

    # Compile back to an executable.
    print(" ".join(clang_args))
    ret = subprocess.call(clang_args)
    if ret:
        return ret

    # Create two scripts to run the original and native.
    run_native = os.path.join(bin_dir, binary_name)
    with open(run_native, "w") as f:
        f.write("""#!/usr/bin/env bash
LD_LIBRARY_PATH={} {} "$@"
""".format(lib_dir, binary))

    run_lifted = os.path.join(bin_dir, "{}.lifted".format(binary_name))
    with open(run_lifted, "w") as f:
        f.write("""#!/usr/bin/env bash
LD_LIBRARY_PATH={} {} "$@"
""".format(lib_dir, lifted_binary))

    make_executable(run_native)
    make_executable(run_lifted)

    return 0
def _get_uncommented_code(path: pathlib.Path, *, iquotes_options: str,
                          compiler: str) -> bytes:
    command = """{} {} -fpreprocessed -dD -E {}""".format(
        compiler, iquotes_options, shlex.quote(str(path)))
    return subprocess.check_output(command, shell=True)