Example #1
0
def test_format_command_strip_leading_dashes(path):
    ds = Dataset(path).create()
    eq_(format_command(ds, ["--", "cmd", "--opt"]), "cmd --opt")
    eq_(format_command(ds, ["--"]), "")
    # Can repeat to escape.
    eq_(format_command(ds, ["--", "--", "ok"]), "-- ok")
    # String stays as is.
    eq_(format_command(ds, "--"), "--")
Example #2
0
def test_format_command_strip_leading_dashes(path):
    ds = Dataset(path).create()
    eq_(format_command(ds, ["--", "cmd", "--opt"]), "cmd --opt")
    eq_(format_command(ds, ["--"]), "")
    # Can repeat to escape.
    eq_(format_command(ds, ["--", "--", "ok"]), "-- ok")
    # String stays as is.
    eq_(format_command(ds, "--"), "--")
Example #3
0
def _datalad_format_command(ds, spec):
    """Adjust `spec` to use `datalad run`-style formatting.

    Create "*_array" keys and format commands with DataLad's `format_command`.
    """
    from datalad.core.local.run import format_command
    # DataLad's GlobbedPaths _should_ be the same as ours, but let's use
    # DataLad's to avoid potential discrepancies with datalad-run's behavior.
    from datalad.core.local.run import GlobbedPaths

    batch_parameters = spec.get("_resolved_batch_parameters") or [{}]
    spec["_command_array"] = []
    spec["_inputs_array"] = []
    spec["_outputs_array"] = []
    for cp in batch_parameters:
        fmt_kwds = {}
        for key in ["inputs", "outputs"]:
            if key in spec:
                parametrized = [io.format(p=cp) for io in spec[key]]
                gp = GlobbedPaths(parametrized)
                spec["_{}_array".format(key)].append(gp.expand(dot=False))
                fmt_kwds[key] = gp
        fmt_kwds["p"] = cp
        cmd_str = spec.get("_container_command_str",
                           spec["_resolved_command_str"])
        spec["_command_array"].append(format_command(ds, cmd_str, **fmt_kwds))

    exinputs = spec.get("_extra_inputs", [])
    spec["_extra_inputs_array"] = [exinputs] * len(batch_parameters)
Example #4
0
    def fn(dset, results):
        ds_repo = dset.repo
        header = """\
#!/bin/sh
#
# This file was generated by running (the equivalent of)
#
#   datalad rerun --script={script}{since} {revision}
#
# in {ds}{path}\n"""
        ofh.write(header.format(
            script=script,
            since="" if since is None else " --since=" + since,
            revision=ds_repo.get_hexsha(revision),
            ds='dataset {} at '.format(dset.id) if dset.id else '',
            path=dset.path))

        for res in results:
            if res["status"] != "ok":
                yield res
                return

            if "run_info" not in res:
                continue

            run_info = res["run_info"]
            cmd = run_info["cmd"]

            expanded_cmd = format_command(
                dset, cmd,
                **dict(run_info,
                       dspath=dset.path,
                       pwd=op.join(dset.path, run_info["pwd"])))

            msg = res["run_message"]
            if msg == _format_cmd_shorty(expanded_cmd):
                msg = ''

            ofh.write(
                "\n" + "".join("# " + ln
                               for ln in msg.splitlines(True)) +
                "\n")
            commit_descr = ds_repo.describe(res["commit"])
            ofh.write('# (record: {})\n'.format(
                commit_descr if commit_descr else res["commit"]))

            ofh.write(expanded_cmd + "\n")
        if ofh is not sys.stdout:
            ofh.close()

        if ofh is sys.stdout:
            yield None
        else:
            yield get_status_dict(
                "run", ds=dset, status="ok",
                path=script,
                message=("Script written to %s", script))
Example #5
0
    def fn(dset, results):
        header = """\
#!/bin/sh
#
# This file was generated by running (the equivalent of)
#
#   datalad rerun --script={script}{since} {revision}
#
# in {ds}{path}\n"""
        ofh.write(header.format(
            script=script,
            since="" if since is None else " --since=" + since,
            revision=dset.repo.get_hexsha(revision),
            ds='dataset {} at '.format(dset.id) if dset.id else '',
            path=dset.path))

        for res in results:
            if res["status"] != "ok":
                yield res
                return

            if "run_info" not in res:
                continue

            run_info = res["run_info"]
            cmd = run_info["cmd"]

            expanded_cmd = format_command(
                dset, cmd,
                **dict(run_info,
                       dspath=dset.path,
                       pwd=op.join(dset.path, run_info["pwd"])))

            msg = res["run_message"]
            if msg == _format_cmd_shorty(expanded_cmd):
                msg = ''

            ofh.write(
                "\n" + "".join("# " + ln
                               for ln in msg.splitlines(True)) +
                "\n")
            commit_descr = dset.repo.describe(res["commit"])
            ofh.write('# (record: {})\n'.format(
                commit_descr if commit_descr else res["commit"]))

            ofh.write(expanded_cmd + "\n")
        if ofh is not sys.stdout:
            ofh.close()

        if ofh is sys.stdout:
            yield None
        else:
            yield get_status_dict(
                "run", ds=dset, status="ok",
                path=script,
                message=("Script written to %s", script))
Example #6
0
        op.join(dataset.path, "participants.tsv"),
        op.join(dataset.path, "participants.json")
    ]
    from datalad.core.local.run import format_command
    # TODO: This pattern is likely incomplete. Also: run prob. needs to break
    # down format_command into smaller pieces (needs mere substitutions)
    # TODO: Post run issue. Globs in outputs need to be evaluted AFTER execution
    # (again). May not yet exist.

    outputs = [subject_dir] + participants
    task = dataset.config.get("datalad.run.substitutions.bids-task")
    if task and task != "None":
        outputs.append(
            op.join(
                dataset.path,
                format_command(dataset,
                               "task-{bids-task}_{bids-modality}.json")))
    # we expect location to be a directory (with DICOMS somewhere beneath)
    if not op.isdir(location):
        raise ValueError("%s is not a directory" % location)

    from datalad.utils import with_pathsep
    # append location with /* to specify inputs for containers-run
    # we need to get those files, but nothing from within a possible .datalad
    # for example
    inputs = [with_pathsep(location) + "*", rel_spec_path]

    run_results = list()
    with patch.dict('os.environ', {
            'HIRNI_STUDY_SPEC': rel_spec_path,
            'HIRNI_SPEC2BIDS_SUBJECT': subject
    }):