Example #1
0
def munge_command(inputvar, srcext, objextname, cmd):
    cmd = shlex.split(cmd.strip())
    munged = []
    # The first thing on the line will be the compiler itself; throw
    # that out.  Find dummy.srcext and dummy.objext, and substitute
    # appropriate Makefile variable names. Also, determine what objext
    # actually is.
    dummy_srcext = "dummy." + srcext
    objext = None
    for arg in cmd[1:]:
        if arg == dummy_srcext:
            munged.append(inputvar) # either $< or $^, depending
        elif arg.startswith("dummy."):
            munged.append("$@")
            objext = arg[len("dummy."):]
        else:
            if shellquote(arg) != arg:
                raise SystemExit("error: command {!r}: "
                                 "cannot put {!r} into a makefile"
                                 .format(cmd, arg))
            munged.append(arg)

    if not objext:
        raise SystemExit("error: command {!r}: failed to determine {}"
                         .format(cmd, objextname))

    return " ".join(munged), objext
Example #2
0
def run_cmd(cmd, env=None, cwd=None, allow_fail=False):
    cmd_str = " ".join(shellquote(arg) for arg in cmd)
    print("+ " + cmd_str)
    if allow_fail:
        subprocess.call(cmd, env=env, cwd=cwd)
    else:
        subprocess.check_call(cmd, env=env, cwd=cwd)
Example #3
0
    def test_even_more_moves(self):
        root = self.mkdtemp()
        self.watchmanCommand("watch", root)
        base = self.watchmanCommand("find", root, ".")
        clock = base["clock"]

        self.suspendWatchman()
        # Explicitly using the shell to run these commands
        # as the original test case wasn't able to reproduce
        # the problem with whatever sequence and timing of
        # operations was produced by the original php test
        subprocess.check_call(
            (
                "cd %s && "
                "mkdir d1 d2 && "
                "touch d1/a && "
                "mkdir d3 && "
                "mv d1 d2 d3 && "
                "mv d3/* . && "
                "mv d1 d2 d3 && "
                "mv d3/* . && "
                "mv d1/a d2"
            )
            % shellquote(root),
            shell=True,
        )
        self.resumeWatchman()
        self.assertFileList(root, files=["d1", "d2", "d2/a", "d3"], cursor=clock)
Example #4
0
    def run(self, options):
        "Run a command using your application's environment"
        self.set_env(self.read_env(options))

        cmd = ' '.join(shellquote(arg) for arg in options.command)
        p = Process(cmd, stdout=sys.stdout, stderr=sys.stderr)
        p.wait()
    def build_cmd(self):
        default_command = self.default_index()
        con_info = self.connection_info()
        for project in self.conf:
            if project == '.defaults':
                continue
            for operation in self.conf[project]:
                if operation in self.allowed_operations:
                    unit = self.conf[project][operation]['unit']
                    count = self.conf[project][operation]['count']
                    raw_regex = self.conf[project][self.RAW_REGEX]
                    
                    if not raw_regex:
                        if project.startswith('.') or project.startswith(self.PROJ_PREFIX):
                            default_command = default_command \
                                    + " --exclude " + shellquote('^' + re.escape(project + '.') + '.*$')
                            this_project = '^' + re.escape(project + '.') + '.*$'
                        else:
                            default_command = default_command \
                                    + " --exclude " + shellquote('^' + re.escape(self.PROJ_PREFIX + project + '.') + '.*$')
                            this_project = '^' + re.escape(self.PROJ_PREFIX + project + '.') + '.*$'
                    else:
                        this_project = project
                        default_command = default_command + " --exclude " + "'" + project + "'"
                    self.curator_settings[operation].setdefault(unit, {}).setdefault(count, []).append(this_project)
                    self.logger.debug('Using [%s] [%d] for [%s]', unit, count, this_project)
                else:
                    if operation not in self.allowed_params:
                        self.logger.error('an unsupported or unknown operation ' + operation + ' was provided... Record skipped')
        
        self.commands.append(default_command)
        for operation in self.curator_settings:
            for unit in self.curator_settings[operation]:
                for value in self.curator_settings[operation][unit]:

                    # construct regex to match all projects for this op/time/unit
                    # regex escape any regex special characters in the project name (there shouldn't be, but just in case)
                    # shellquote to protect any shell special chars in the constructed regex
                    tab_cmd = '/usr/bin/curator --loglevel ' + self.curator_log_level + ' ' \
                            + con_info + ' ' + operation + ' indices --timestring %Y.%m.%d' \
                            + ' --older-than ' + str(value) + ' --time-unit ' + unit \
                            + ' --regex ' \
                            + shellquote('(' + '|'.join(map(
                                lambda project: project,
                                self.curator_settings[operation][unit][value])) + ')')
                    self.commands.append(tab_cmd)
 def default_index(self):
     """
     Default command is always present. Even if empty configuration is used.
     If no configuration is supplied default indices are deleted every
     $CURATOR_DEFAULT_DAYS days.
     """
     default_command = ''
     if '.defaults' in self.conf and 'delete' in self.conf['.defaults']:
         unit = self.conf['.defaults']['delete']['unit']
         count = self.conf['.defaults']['delete']['count']
         base_default_cmd = '/usr/bin/curator --loglevel ' + self.curator_log_level + ' ' \
                 + self.connection_info() + ' delete indices --timestring %Y.%m.%d'
         default_command = base_default_cmd \
                 + ' --older-than ' + str(count) \
                 + ' --time-unit ' + unit \
                 + ' --exclude ' + shellquote('^' + re.escape('.searchguard.') + '.*$') \
                 + ' --exclude ' + shellquote('^' + re.escape('.kibana') + '.*$')
     return default_command
Example #7
0
def _print_env_diff(env):
    current_keys = set(os.environ.keys())
    wanted_env = set(env.keys())

    unset_keys = current_keys.difference(wanted_env)
    for k in sorted(unset_keys):
        print("+ unset %s" % k)

    added_keys = wanted_env.difference(current_keys)
    for k in wanted_env.intersection(current_keys):
        if os.environ[k] != env[k]:
            added_keys.add(k)

    for k in sorted(added_keys):
        if ("PATH" in k) and (os.pathsep in env[k]):
            print("+ %s=\\" % k)
            for elem in env[k].split(os.pathsep):
                print("+      %s%s\\" % (shellquote(elem), os.pathsep))
        else:
            print("+ %s=%s \\" % (k, shellquote(env[k])))
Example #8
0
    def run(self, options):
        "Run a command using your application's environment"
        self.set_env(self.read_env(options))

        if compat.ON_WINDOWS:
            # do not quote on Windows, subprocess will handle it for us
            # using the MSFT quoting rules
            cmd = options.command
        else:
            cmd = ' '.join(shellquote(arg) for arg in options.command)

        p = Process(cmd, stdout=sys.stdout, stderr=sys.stderr)
        p.wait()
Example #9
0
def run_cmd(cmd, env=None, cwd=None, allow_fail=False):
    print("---")
    try:
        cmd_str = " \\\n+      ".join(shellquote(arg) for arg in cmd)
    except TypeError:
        # eg: one of the elements is None
        raise RunCommandError("problem quoting cmd: %r" % cmd)

    if env:
        assert isinstance(env, Env)
        _print_env_diff(env)

        # Convert from our Env type to a regular dict.
        # This is needed because python3 looks up b'PATH' and 'PATH'
        # and emits an error if both are present.  In our Env type
        # we'll return the same value for both requests, but we don't
        # have duplicate potentially conflicting values which is the
        # spirit of the check.
        env = dict(env.items())

    if cwd:
        print("+ cd %s && \\" % shellquote(cwd))
        # Our long path escape sequence may confuse cmd.exe, so if the cwd
        # is short enough, strip that off.
        if is_windows() and (len(cwd) < 250) and cwd.startswith("\\\\?\\"):
            cwd = cwd[4:]

    print("+ %s" % cmd_str)

    if allow_fail:
        return subprocess.call(cmd, env=env, cwd=cwd)

    try:
        return subprocess.check_call(cmd, env=env, cwd=cwd)
    except (TypeError, ValueError, OSError) as exc:
        raise RunCommandError(
            "%s while running `%s` with env=%r\nos.environ=%r"
            % (str(exc), cmd_str, env, os.environ)
        )
Example #10
0
    def test_fishy(self):
        root = self.mkdtemp()

        os.mkdir(os.path.join(root, "foo"))
        self.touchRelative(root, "foo", "a")

        self.watchmanCommand("watch", root)
        base = self.watchmanCommand("find", root, ".")
        clock = base["clock"]

        self.suspendWatchman()
        # Explicitly using the shell to run these commands
        # as the original test case wasn't able to reproduce
        # the problem with whatever sequence and timing of
        # operations was produced by the original php test
        subprocess.check_call(
            "cd %s && mv foo bar && ln -s bar foo" % shellquote(root), shell=True
        )

        self.resumeWatchman()
        self.assertFileList(root, files=["bar", "bar/a", "foo"], cursor=clock)
Example #11
0
    problem_def = testset[args.problem]
    problem_validator = eval(problem_def['validator'])
    problem_cases = get_cases(problem_def, args.cases)
    program = get_program(args.program)

    if args.show_example:
        show_example(problem_def['defaults'], next(problem_cases)[1])
        sys.exit()

    failed_cases = []
    for case_num, case_def in problem_cases:
        print('Running case %d... ' % (case_num,), end='')
        case_ret = run_and_score_case(
            program, problem_def['defaults'], case_def, problem_validator)
        if case_ret is None:
            print('OK!')
        else:
            failed_cases.append(case_num)
            print('Failed:')
            print(case_ret)

    if failed_cases:
        print('\nSome test cases have failed. '
              'To rerun the failing cases execute:')
        testset_opt = ''
        if args.testset:
            testset_opt = ' --testset %s' % (shellquote(args.testset),)
        cases_opt = '--cases ' + ','.join([str(fc) for fc in failed_cases])
        print('python validator.py%s %s %s %s' %
              (testset_opt, cases_opt, args.problem, program))
Example #12
0
    -channel RGB +level-colors \#400000, -channel ALL
    \( +clone -channel RGB +level-colors \#a00000, -channel ALL -blur 0x{blurlen0} \)
    \( +clone -channel RGB +level-colors \#ff2000, -channel ALL -blur 0x{blurlen1} -motion-blur 0x{traillen1}+120
       -rotate 90 -wave -{wave1a},{wave1l} -rotate -90 \)
    \( -clone 0--1 -layers merge -channel RGB -channel ALL -motion-blur 0x{traillen2}+110
       -rotate 90 -wave {wave2a},{wave2l} -rotate -90 \)
    \( +clone -modulate 125,80,115.0 -channel A +level 0%,90% -channel ALL
       -resize {fac3pc}% -motion-blur 0x{traillen3}+100 -resize {fac3ipc}%
       -rotate 90 -roll +{rolllen3}+0 -wave {wave3a},{wave3l} -roll -{rolllen3}+0 -rotate -90 \)
    \( +clone -modulate 115,65,103.0 -channel A +level 0%,75% -channel ALL
       -resize {fac4pc}% -motion-blur 0x{traillen4}+90 -resize {fac4ipc}%
       -rotate 90 -roll +{rolllen4}+0 -wave {wave4a},{wave4l} -roll -{rolllen4}+0 -rotate -90 \)
    \( +clone -modulate 45,65,100.0 -channel A +level 0%,50% -channel ALL
       -resize {fac5pc}% -motion-blur 0x{traillen5}+90 -resize {fac5ipc}% """

                           # -geometry +0-{shiftuplen5}
                           """
       -rotate 90 -roll +{rolllen5}+0 -wave {wave5a},{wave5l} -roll -{rolllen5}+0 -rotate -90 \)
    -layers merge """.format(**values))

else:

    raise ValueError("Unknown format: '{}'".format(format))

print("Running:  {convert} {inputfile} {innercmd} {inputfile}".format(
    convert=convertexe,
    inputfile=inputfile,
    innercmd=" ".join([shellquote(x) for x in innercmd])))

subprocess.check_output([convertexe, inputfile] + innercmd + [inputfile])
Example #13
0
def _spawn_next_transfer(method, file, ip, port, username, ssh_private_key, eo,
                         reo, procs, psprocs, psdst):
    # type: (str, tuple, str, int, str, pathlib.Path, str, str, list,
    #        list, list) -> None
    """Spawn the next transfer given a file tuple
    :param str method: transfer method
    :param tuple file: file tuple
    :param str ip: ip address
    :param int port: port
    :param str username: username
    :param pathlib.Path: ssh private key
    :param str eo: extra options
    :param str reo: rsync extra options
    :param list procs: process list
    :param list psprocs: split files process list
    :param list psdst: split files dstpath list
    """
    src = file[0]
    dst = file[1]
    begin = file[2]
    end = file[3]
    if method == 'multinode_scp':
        if begin is None and end is None:
            cmd = ('scp -o StrictHostKeyChecking=no '
                   '-o UserKnownHostsFile={} -p {} -i {} '
                   '-P {} {} {}@{}:"{}"'.format(os.devnull, eo,
                                                ssh_private_key, port,
                                                shellquote(src), username, ip,
                                                shellquote(dst)))
        else:
            cmd = ('ssh -T -x -o StrictHostKeyChecking=no '
                   '-o UserKnownHostsFile={} {} -i {} '
                   '-p {} {}@{} \'cat > "{}"\''.format(os.devnull, eo,
                                                       ssh_private_key, port,
                                                       username, ip,
                                                       shellquote(dst)))
    elif method == 'multinode_rsync+ssh':
        if begin is not None or end is not None:
            raise RuntimeError('cannot rsync with file offsets')
        cmd = ('rsync {} -e "ssh -T -x -o StrictHostKeyChecking=no '
               '-o UserKnownHostsFile={} {} -i {} -p {}" {} {}@{}:"{}"'.format(
                   reo, os.devnull, eo, ssh_private_key, port, shellquote(src),
                   username, ip, shellquote(dst)))
    else:
        raise ValueError('Unknown transfer method: {}'.format(method))
    if begin is None and end is None:
        procs.append(util.subprocess_nowait(cmd, shell=True))
    else:
        proc = util.subprocess_attach_stdin(cmd, shell=True)
        with open(src, 'rb') as f:
            f.seek(begin, 0)
            curr = begin
            while curr < end:
                buf = f.read(_MAX_READ_BLOCKSIZE_BYTES)
                if buf is None or len(buf) == 0:
                    break
                proc.stdin.write(buf)
                curr += len(buf)
            proc.stdin.close()
        psprocs.append(proc)
        dstsp = dst.split('.')
        if dstsp[-1].startswith(_FILE_SPLIT_PREFIX):
            dstpath = '.'.join(dstsp[:-1])
        else:
            dstpath = dst
        psdst.append(dstpath)
Example #14
0
        except ValidatorException as e:
            failed_cases.append(case_num)
            print('Failed:')
            print(str(e))

    print('\nValidation result: %d/%d cases pass.\n' % (
        len(ok_cases), len(ok_cases) + len(failed_cases)))

    tot_meas = {}
    for nc, meas in ok_cases:
        for k, v in meas.items():
            tot_meas[k] = tot_meas.get(k, 0) + v
    for k, v in tot_meas.items():
        print("For passing cases total %s: %s" % (k, v))

    if failed_cases:
        print('\nSome test cases have failed. '
              'To rerun the failing cases execute:')
        misc_opts = ''
        if args.verbose:
            misc_opts = ' --verbose'
        if args.timeout_multiplier:
            misc_opts += ' --timeout-multiplier ' + args.timeout_multiplier
        if args.testset:
            misc_opts = '%s --testset %s' % (
                misc_opts, shellquote(args.testset),)
        cases_opt = '--cases ' + ','.join([str(fc) for fc in failed_cases])
        print('python validator.py%s %s %s %s' %
              (misc_opts, cases_opt, args.problem, program))

base_default_cmd = '/usr/bin/curator --loglevel ' + curlvl + ' ' + connection_info + ' delete indices --timestring %Y.%m.%d'
default_command = base_default_cmd + ' --older-than ' + str(default_value) + ' --time-unit ' + default_time_unit + ' --exclude .searchguard* --exclude .kibana*'

proj_prefix = 'project.'

for project in decoded:
    if project == '.defaults':
        continue
    for operation in decoded[project]:
        if operation in allowed_operations:
            for unit in decoded[project][operation]:
                value = int(decoded[project][operation][unit])

                if unit in allowed_units:
                    default_command = default_command + " --exclude " + shellquote(re.escape(project + '.') + '*')

                    if unit.lower() == "weeks":
                        unit = "days"
                        value = value * 7

                    if project.startswith('.') or project.startswith(proj_prefix):
                        this_project = project
                    else:
                        this_project = proj_prefix + project
                    curator_settings[operation].setdefault(unit, {}).setdefault(value, []).append(this_project)
                else:
                    if unit.lower() == "hours":
                        logger.error('time unit "hours" is currently not supported due to our current index level granularity is in days')
                    else:
                        logger.error('an unknown time unit of ' + unit + ' was provided... Record skipped')
Example #16
0
def run_cmd(cmd, env=None, cwd=None):
    cmd_str = " ".join(shellquote(arg) for arg in cmd)
    print("+ " + cmd_str)
    subprocess.check_call(cmd, env=env, cwd=cwd)
Example #17
0
 def log_and_spawn(self, cmd, spawn_fn=self.compiler.spawn):
     print("Running:", " ".join([shellquote(x) for x in cmd]))
     spawn_fn(cmd)
Example #18
0
def get_program(args):
    return ' '.join([shellquote(a) for a in args])
Example #19
0
            failed_cases.append(case_num)
            print('Failed:')
            print(str(e))

    print('\nValidation result: %d/%d cases pass.\n' %
          (len(ok_cases), len(ok_cases) + len(failed_cases)))

    tot_meas = {}
    for nc, meas in ok_cases:
        for k, v in meas.items():
            tot_meas[k] = tot_meas.get(k, 0) + v
    for k, v in tot_meas.items():
        print("For passing cases total %s: %s" % (k, v))

    if failed_cases:
        print('\nSome test cases have failed. '
              'To rerun the failing cases execute:')
        misc_opts = ''
        if args.verbose:
            misc_opts = ' --verbose'
        if args.timeout_multiplier:
            misc_opts += ' --timeout-multiplier ' + args.timeout_multiplier
        if args.testset:
            misc_opts = '%s --testset %s' % (
                misc_opts,
                shellquote(args.testset),
            )
        cases_opt = '--cases ' + ','.join([str(fc) for fc in failed_cases])
        print('python validator.py%s %s %s %s' %
              (misc_opts, cases_opt, args.problem, program))
Example #20
0
def sys_login(username, password):
    return subprocess.check_output('echo "'+ shellquote(password[:40]) +'" | sudo  -S -u ' + shellquote(username[:25]) +' groups', shell=True)[:-1]
Example #21
0
def run_cmd(cmd, env=None, cwd=None):
    cmd_str = ' '.join(shellquote(arg) for arg in cmd)
    print('+ ' + cmd_str)
    subprocess.check_call(cmd, env=env, cwd=cwd)
Example #22
0
    problem_def = testset[args.problem]
    problem_validator = eval(problem_def['validator'])
    problem_cases = get_cases(problem_def, args.cases)
    program = get_program(args.program)

    if args.show_example:
        show_example(problem_def['defaults'], next(problem_cases)[1])
        sys.exit()

    failed_cases = []
    for case_num, case_def in problem_cases:
        print('Running case %d... ' % (case_num, ), end='')
        case_ret = run_and_score_case(program, problem_def['defaults'],
                                      case_def, problem_validator)
        if case_ret is None:
            print('OK!')
        else:
            failed_cases.append(case_num)
            print('Failed:')
            print(case_ret)

    if failed_cases:
        print('\nSome test cases have failed. '
              'To rerun the failing cases execute:')
        testset_opt = ''
        if args.testset:
            testset_opt = ' --testset %s' % (shellquote(args.testset), )
        cases_opt = '--cases ' + ','.join([str(fc) for fc in failed_cases])
        print('python validator.py%s %s %s %s' %
              (testset_opt, cases_opt, args.problem, program))
Example #23
0
                program, problem_def['defaults'], case_def, problem_validator)
            ok_cases.append((case_num, case_meas))
            print('OK!')
        except ValidatorException as e:
            failed_cases.append(case_num)
            print('Failed:')
            print(str(e))

    print('\nValidation result: %d/%d cases pass.\n' % (
        len(ok_cases), len(ok_cases) + len(failed_cases)))

    tot_meas = {}
    for nc, meas in ok_cases:
        for k, v in meas.items():
            tot_meas[k] = tot_meas.get(k, 0) + v
    for k, v in tot_meas.items():
        print("For passing cases total %s: %s" % (k, v))

    if failed_cases:
        print('\nSome test cases have failed. '
              'To rerun the failing cases execute:')
        misc_opts = ''
        if args.verbose:
            misc_opts = ' --verbose'
        if args.testset:
            misc_opts = '%s --testset %s' % (
                misc_opts, shellquote(args.testset),)
        cases_opt = '--cases ' + ','.join([str(fc) for fc in failed_cases])
        print('python validator.py%s %s %s %s' %
              (misc_opts, cases_opt, args.problem, program))
Example #24
0
def get_program(args):
    return ' '.join([shellquote(a) for a in args])
Example #25
0
def cromwell(
    uri,
    inputs,
    input_file,
    json_only,
    empty,
    check_quant,
    rundir=None,
    options_file=None,
    jarfile=None,
    config=None,
    path=None,
    **kwargs,
):
    path = path or []

    # load WDL document
    doc = load(uri, path, check_quant=check_quant, read_source=read_source)

    # validate the provided inputs and prepare Cromwell-style JSON
    target, _, input_json = runner_input(doc, inputs, input_file, empty)

    if json_only:
        print(json.dumps(input_json, indent=2))
        sys.exit(0)

    try:
        rundir = provision_run_dir(target.name, rundir)
    except FileExistsError:
        die("--dir must be an existing directory or one that can be created")
    os.makedirs(os.path.join(rundir, "cromwell"))

    # write the JSON inputs file
    input_json_filename = None
    print("Cromwell input: " + json.dumps(input_json, indent=2),
          file=sys.stderr)
    input_json_filename = os.path.join(rundir, "inputs.json")
    with open(input_json_filename, "w") as outfile:
        print(json.dumps(input_json, indent=2), file=outfile)

    # write Cromwell options
    cromwell_options = {
        "final_workflow_log_dir": os.path.join(rundir, "cromwell")
    }

    if options_file:
        with open(options_file, "r") as infile:
            more_options = json.load(infile)
            for k in more_options:
                if k not in [
                        "final_workflow_log_dir", "use_relative_output_paths"
                ]:
                    cromwell_options[k] = more_options[k]

    cromwell_options_filename = os.path.join(rundir, "cromwell",
                                             "options.json")
    with open(cromwell_options_filename, "w") as options_json:
        print(json.dumps(cromwell_options, indent=2), file=options_json)

    # setup Cromwell config file
    config_setting = None
    if config:
        config_setting = "-Dconfig.file={}".format(config)
    elif "CROMWELL_CONFIG" in os.environ:
        config_setting = "-Dconfig.file={}".format(
            os.getenv("CROMWELL_CONFIG"))

    # launch Cromwell
    jarpath = ensure_cromwell_jar(jarfile)
    cromwell_cmd = ["java", "-DLOG_LEVEL=warn", "-DLOG_MODE=pretty"]
    cromwell_cmd.extend([config_setting] if config_setting else [])
    cromwell_cmd.extend([
        "-jar",
        jarpath,
        "run",
        (os.path.abspath(uri) if "://" not in uri else uri),
        "-o",
        cromwell_options_filename,
        "-i",
        input_json_filename,
    ])

    for p in path:
        # FIXME issue #131
        cromwell_cmd.append("--imports")
        cromwell_cmd.append(p)
    print(" ".join(["+"] + [shellquote(s) for s in cromwell_cmd]),
          file=sys.stderr)
    proc = subprocess.Popen(cromwell_cmd,
                            cwd=os.path.join(rundir, "cromwell"),
                            stdout=subprocess.PIPE)

    # stream in Cromwell stdout, which mixes a bunch of stuff. tee it to stderr
    # while recording it so we can go back to look for the output JSON later.
    cromwell_output_lines = []
    while proc.poll() is None:
        line = proc.stdout.readline()
        if line:
            line = str(line, "utf-8").rstrip()
            print(line, file=sys.stderr)
            cromwell_output_lines.append(line)
    assert isinstance(proc.returncode, int)

    # deal with Cromwell outputs

    # remove world-write permissions from created temp files
    subprocess.call(["chmod", "-Rf", "o-w", rundir])

    if proc.returncode == 0:
        # sniff for the outputs JSON as the last subsequence of stdout lines
        # delimited by { and }
        last_lbrace = None
        last_rbrace = None
        try:
            last_lbrace = max(loc
                              for loc, val in enumerate(cromwell_output_lines)
                              if val == "{")
            last_rbrace = max(loc
                              for loc, val in enumerate(cromwell_output_lines)
                              if val == "}")
        except ValueError:
            pass
        try:
            if last_lbrace is None or last_rbrace is None or last_lbrace >= last_rbrace:
                raise KeyError
            outputs_json = json.loads("\n".join(
                cromwell_output_lines[last_lbrace:(last_rbrace + 1)]))
        except:
            die("failed to find outputs JSON in Cromwell standard output")
        runner_organize_outputs(target, outputs_json, rundir)

    sys.exit(proc.returncode)
Example #26
0
def extract_outtakes(filename, maxpercode=0):
    re_lengths = re.compile(br'debug_popen: len\(stdin\):(\d+) => returncode:(-?\d+)'
                            br' len\(stdout\):(\d+) len\(stderr\):(\d+)')
    valid_retcodes = set(range(100))
    retcounters = Counter()
    with open(filename, 'rb') as fp:
        count = 0
        cfgfile = None
        command = None
        while True:
            line = fp.readline()
            if not line:
                break
            prefix = b'debug_popen: '
            m = re_lengths.match(line)
            if m:
                lin, retcode, lout, lerr = [int(x) for x in m.groups()]
                if retcode in valid_retcodes:
                    continue
                if 1 <= maxpercode <= retcounters[retcode]:
                    continue
                retcounters[retcode] += 1
                otdir = os.path.join(DESTDIR, "c_%s_%s" % (str(retcode), str(count)))
                try:
                    os.makedirs(otdir)
                except OSError as exc:
                    if exc.errno != errno.EEXIST:
                        raise
                if command:
                    fname = os.path.join(otdir, 'callfmt.sh')
                    with open(fname, 'wb') as cfg:
                        if lin > 0:
                            command = command + b' < stdin.txt'
                        cfg.write(command + b'\n')
                    os.chmod(fname, 0o755)
                    command = None
                with open(os.path.join(otdir, 'retcode.txt'), 'wb') as cfg:
                    cfg.write(bytestr(str(retcode)) + b'\n')
                if cfgfile:
                    shutil.copyfile(cfgfile, os.path.join(otdir, os.path.basename(cfgfile)))
                    cfgfile = None
                for chan, chanlen in zip(['stdin', 'stdout', 'stderr'], [lin, lout, lerr]):
                    if chanlen == 0:
                        continue
                    line = fp.readline()
                    if line == b'debug_popenio: ' + bytestr(chan) + b':"""\\\n':
                        data = fp.read(chanlen)
                        with open(os.path.join(otdir, '%s.txt' % chan), 'wb') as cfp:
                            cfp.write(data)
                        fp.readline()  # This should be """
            elif line.startswith(prefix):
                line = line[len(prefix):]
                line = line.rstrip(b'\r\n')
                args = re_ansi.split(line)
                cmdargs = []
                if len(args) > 1 and not args[0] and not args[-1]:
                    for idx, arg in enumerate(args[1:-1]):
                        if idx % 2 == 1:
                            if arg == b' ':
                                continue
                            else:
                                write(b"Unexpected debug_popen line: " + line, fp=sys.stderr)
                        uarg = arg.decode('raw-unicode-escape')
                        if idx > 0 and os.path.abspath(arg) and os.path.isfile(arg):
                            cfgfile = uarg
                            uarg = os.path.basename(uarg)
                        cmdargs.append(shellquote(uarg).encode('raw-unicode-escape'))
                if cmdargs:
                    command = b' '.join(cmdargs)
                    count += 1
    return 0
Example #27
0
def cromwell(uri,
             inputs,
             json_only,
             empty,
             check_quant,
             rundir=None,
             path=None,
             **kwargs):
    path = path or []

    # load WDL document
    doc = WDL.load(uri, path, check_quant=check_quant, import_uri=import_uri)

    # validate the provided inputs
    target, input_dict = cromwell_input(doc, inputs, empty)

    if json_only:
        print(json.dumps(input_dict, indent=2))
        sys.exit(0)

    # provision a run directory
    if rundir:
        rundir = os.path.abspath(rundir)
        try:
            os.makedirs(rundir, exist_ok=False)
        except FileExistsError:
            die("workflow directory already exists: " + rundir)
    else:
        now = datetime.today()
        try:
            rundir = os.path.join(
                os.getcwd(),
                now.strftime("%Y%m%d_%H%M%S") + "_" + target.name)
            os.makedirs(rundir, exist_ok=False)
        except FileExistsError:
            rundir = os.path.join(
                os.getcwd(),
                now.strftime("%Y%m%d_%H%M%S_") + str(now.microsecond) + "_" +
                target.name,
            )
            os.makedirs(rundir, exist_ok=False)
    print("+ mkdir -p " + rundir, file=sys.stderr)
    os.makedirs(os.path.join(rundir, "cromwell"))

    # write the JSON inputs file
    input_json_filename = None
    print("Cromwell input: " + json.dumps(input_dict, indent=2),
          file=sys.stderr)
    input_json_filename = os.path.join(rundir, "inputs.json")
    with open(input_json_filename, "w") as input_json:
        print(json.dumps(input_dict, indent=2), file=input_json)

    # write Cromwell options
    cromwell_options = {
        "final_workflow_log_dir": os.path.join(rundir, "cromwell")
    }
    cromwell_options_filename = os.path.join(rundir, "cromwell",
                                             "options.json")
    with open(cromwell_options_filename, "w") as options_json:
        print(json.dumps(cromwell_options, indent=2), file=options_json)

    # launch Cromwell
    jarpath = ensure_cromwell_jar()
    cromwell_cmd = [
        "java",
        "-DLOG_LEVEL=warn",
        "-DLOG_MODE=pretty",
        "-jar",
        jarpath,
        "run",
        (os.path.abspath(uri) if "://" not in uri else uri),
        "-o",
        cromwell_options_filename,
        "-i",
        input_json_filename,
    ]
    for p in path:
        cromwell_cmd.append("--imports")
        cromwell_cmd.append(p)
    print(" ".join(["+"] + [shellquote(s) for s in cromwell_cmd]),
          file=sys.stderr)
    proc = subprocess.Popen(cromwell_cmd,
                            cwd=os.path.join(rundir, "cromwell"),
                            stdout=subprocess.PIPE)

    # stream in Cromwell stdout, which mixes a bunch of stuff. tee it to stderr
    # while recording it so we can go back to look for the output JSON later.
    cromwell_output_lines = []
    while proc.poll() is None:
        line = proc.stdout.readline()
        if line:
            line = str(line, "utf-8").rstrip()
            print(line, file=sys.stderr)
            cromwell_output_lines.append(line)
    assert isinstance(proc.returncode, int)

    # deal with Cromwell outputs

    # remove world-write permissions from created temp files
    subprocess.call(["chmod", "-Rf", "o-w", rundir])

    if proc.returncode == 0:
        # sniff for the outputs JSON as the last subsequence of stdout lines
        # delimited by { and }
        last_lbrace = None
        last_rbrace = None
        try:
            last_lbrace = max(loc
                              for loc, val in enumerate(cromwell_output_lines)
                              if val == "{")
            last_rbrace = max(loc
                              for loc, val in enumerate(cromwell_output_lines)
                              if val == "}")
        except ValueError:
            pass
        try:
            if last_lbrace is None or last_rbrace is None or last_lbrace >= last_rbrace:
                raise KeyError
            outputs_json = json.loads("\n".join(
                cromwell_output_lines[last_lbrace:(last_rbrace + 1)]))
        except:
            die("failed to find outputs JSON in Cromwell standard output")
        organize_cromwell_outputs(target, outputs_json, rundir)

    sys.exit(proc.returncode)
    default_time_unit = "days"
    default_value = default_value * 7

base_default_cmd = '/usr/bin/curator --loglevel ' + curlvl + ' ' + connection_info + ' delete indices --timestring %Y.%m.%d'
default_command = base_default_cmd + ' --older-than ' + str(default_value) + ' --time-unit ' + default_time_unit + ' --exclude .searchguard* --exclude .kibana*'

for project in decoded:
    if project == '.defaults':
        continue
    for operation in decoded[project]:
        if operation in allowed_operations:
            for unit in decoded[project][operation]:
                value = int(decoded[project][operation][unit])

                if unit in allowed_units:
                    default_command = default_command + " --exclude " + shellquote(re.escape(project + '.') + '*')

                    if unit.lower() == "weeks":
                        unit = "days"
                        value = value * 7

                    curator_settings[operation].setdefault(unit, {}).setdefault(value, []).append(project)
                else:
                    if unit.lower() == "hours":
                        logger.error('time unit "hours" is currently not supported due to our current index level granularity is in days')
                    else:
                        logger.error('an unknown time unit of ' + unit + ' was provided... Record skipped')
        else:
            logger.error('an unsupported or unknown operation ' + operation + ' was provided... Record skipped')

my_cron  = CronTab()
Example #29
0
def _singlenode_transfer(dest, src, dst, username, ssh_private_key, rls):
    # type: (DestinationSettings, str, str, pathlib.Path, dict) -> None
    """Transfer data to a single node
    :param DestinationSettings dest: destination settings
    :param str src: source path
    :param str dst: destination path
    :param str username: username
    :param pathlib.Path: ssh private key
    :param dict rls: remote login settings
    """
    # get remote settings
    _rls = next(iter(rls.values()))
    ip = _rls.remote_login_ip_address
    port = _rls.remote_login_port
    del _rls
    # modify dst with relative dest
    if util.is_not_empty(dest.relative_destination_path):
        dst = '{}{}'.format(dst, dest.relative_destination_path)
        # create relative path on host
        logger.debug('creating remote directory: {}'.format(dst))
        dirs = ['mkdir -p {}'.format(dst)]
        mkdircmd = ('ssh -T -x -o StrictHostKeyChecking=no '
                    '-o UserKnownHostsFile={} -i {} -p {} {}@{} {}'.format(
                        os.devnull, ssh_private_key, port, username, ip,
                        util.wrap_commands_in_shell(dirs)))
        rc = util.subprocess_with_output(mkdircmd,
                                         shell=True,
                                         suppress_output=True)
        if rc == 0:
            logger.info('remote directories created on {}'.format(dst))
        else:
            logger.error('remote directory creation failed')
            return
        del dirs
    # determine if recursive flag must be set
    psrc = pathlib.Path(src)
    recursive = '-r' if psrc.is_dir() else ''
    # set command source path and adjust dst path
    if recursive:
        cmdsrc = '.'
    else:
        cmdsrc = shellquote(src)
    # transfer data
    if dest.data_transfer.method == 'scp':
        cmd = ('scp -o StrictHostKeyChecking=no '
               '-o UserKnownHostsFile={} -p {} {} -i {} '
               '-P {} {} {}@{}:"{}"'.format(
                   os.devnull, dest.data_transfer.scp_ssh_extra_options,
                   recursive, ssh_private_key, port, cmdsrc, username, ip,
                   shellquote(dst)))
    elif dest.data_transfer.method == 'rsync+ssh':
        cmd = ('rsync {} {} -e "ssh -T -x -o StrictHostKeyChecking=no '
               '-o UserKnownHostsFile={} {} -i {} -p {}" {} {}@{}:"{}"'.format(
                   dest.data_transfer.rsync_extra_options, recursive,
                   os.devnull, dest.data_transfer.scp_ssh_extra_options,
                   ssh_private_key, port, cmdsrc, username, ip,
                   shellquote(dst)))
    else:
        raise ValueError('Unknown transfer method: {}'.format(
            dest.data_transfer.method))
    logger.info('begin ingressing data from {} to {}'.format(src, dst))
    start = datetime.datetime.now()
    rc = util.subprocess_with_output(cmd,
                                     shell=True,
                                     cwd=src if recursive else None)
    diff = datetime.datetime.now() - start
    if rc == 0:
        logger.info(
            'finished ingressing data from {0} to {1} in {2:.2f} sec'.format(
                src, dst, diff.total_seconds()))
    else:
        logger.error(
            'data ingress from {} to {} failed with return code: {}'.format(
                src, dst, rc))