Esempio n. 1
0
def generate_overlaps(singles, mapped):
    """
    Generate improper partitions, i.e. those with some overlap between subsets.

    :param Iterable[str] singles: collection of flag-like option names
    :param dict[str, NoneType] mapped: flag-like option name mapped to null
    :return Iterable[(str, dict[str, NoneType])]: collection of pairs in which
        first component of each pair is collection of flags for CLI-like
        specification simulation, and second component is specification of
        remaining flags as pipeline args for project config
    """
    common = set(singles) & set(mapped.keys())
    assert set() == common, "Nonempty intersection: {}".format(
        ", ".join(common))
    singles_update = [
        list(singles) + list(m) for m in powerset(mapped.keys(), min_items=1)
    ]
    mapped_update = [{f: None
                      for f in fs} for fs in powerset(singles, min_items=1)]
    aug_maps = []
    for mx in mapped_update:
        m = copy.copy(mapped)
        m.update(mx)
        aug_maps.append(m)
    return [(s, mapped) for s in singles_update] + [(singles, m)
                                                    for m in aug_maps]
Esempio n. 2
0
class GrabProjectDataTests:
    """ Tests for grabbing Sample-independent Project configuration data. """

    @named_param(argnames="data", argvalues=[None, [], {}])
    def test_no_data(self, data):
        """ Parsing empty Project/data yields empty data subset. """
        assert {} == grab_project_data(data)

    @named_param(
        argnames="sections",
        argvalues=powerset(SAMPLE_INDEPENDENT_PROJECT_SECTIONS, nonempty=True))
    def test_does_not_need_all_sample_independent_data(
            self, sections, basic_project_data, sample_independent_data):
        """ Subset of all known independent data that's present is grabbed. """
        p = PathExAttMap(sample_independent_data)
        expected = {s: data for s, data in basic_project_data.items()
                    if s in sections}
        observed = grab_project_data(p)
        compare_mappings(expected, observed)

    @named_param(
        argnames="extra_data",
        argvalues=powerset(
            [{NEW_PIPES_KEY: [{"b": 1}, {"c": 2}]}, {"pipeline_config": {}}],
            nonempty=True))
    def test_grabs_only_sample_independent_data(
            self, sample_independent_data, extra_data):
        """ Only Project data defined as Sample-independent is retrieved. """

        # Create the data to pass the the argument to the call under test.
        data = copy.deepcopy(sample_independent_data)
        data_updates = {}
        for extra in extra_data:
            data_updates.update(extra)
        data.update(data_updates)

        # Convert to the correct argument type for this test case.
        p = PathExAttMap(data)

        # Make the equivalence assertion.
        expected = sample_independent_data
        observed = grab_project_data(p)
        compare_mappings(expected, observed)
Esempio n. 3
0
def generate_flags_partitions(flags):
    """
    Generate all partitions of a CLI flag options.
    
    Each partition will be such that each flag is either designated for CLI
    specification or for project config specification, but not both.

    :param Iterable[str] flags: collection of flag-like options to partition
    :return Iterable[(str, dict[str, NoneType])]: collection of pairs in which
        first component of each pair is collection of flags for CLI-like
        specification simulation, and second component is specification of
        remaining flags as pipeline args for project config
    """
    return [(ps, {f: None
                  for f in flags if f not in ps}) for ps in powerset(flags)]
Esempio n. 4
0
def test_check_all_requires_iterable_transformations_argument(
        commands, transforms, expectation):
    """ If transformations arg is non-null, it must be iterable. """
    def call():
        return piper_utils.determine_uncallable(commands,
                                                transformations=transforms)

    if isinstance(expectation, type) and issubclass(expectation, Exception):
        with pytest.raises(expectation):
            call()
    else:
        assert expectation(call())


@pytest.mark.parametrize("commands",
                         powerset(["ls", "picard.jar", "$ENVVAR"],
                                  nonempty=True))
def test_transformation_accumulation(commands):
    """ Accumulation of transformations works as expected """
    mapjar = lambda c: "java -jar {}".format(c)
    envjar = "env.jar"
    transforms = [(lambda c: c == "$ENVVAR", lambda _: envjar),
                  (lambda c: c.endswith(".jar"), mapjar)]
    exps = {
        "ls": "ls",
        "picard.jar": mapjar("picard.jar"),
        "$ENVVAR": mapjar(envjar)
    }
    with mock.patch.object(piper_utils,
                           "is_command_callable",
                           return_value=False):
        res = piper_utils.determine_uncallable(commands,
Esempio n. 5
0

@pytest.mark.parametrize(["optargs", "expected"], [([
    ("-X", None), ("--revert", 1), ("-O", "outfile"),
    ("--execute-locally", None), ("-I", ["file1", "file2"])
], "-X --revert 1 -O outfile --execute-locally -I file1 file2")])
def test_build_cli_extra(optargs, expected, ordwrap):
    """ Check that CLI optargs are rendered as expected. """
    observed = build_cli_extra(ordwrap(optargs))
    print("expected: {}".format(expected))
    print("observed: {}".format(observed))
    assert expected == observed


@pytest.mark.parametrize("optargs",
                         powerset([(None, "a"), (1, "one")], nonempty=True))
def test_illegal_cli_extra_input_is_exceptional(optargs, ordwrap):
    """ Non-string keys are illegal and cause a TypeError. """
    with pytest.raises(TypeError):
        build_cli_extra(ordwrap(optargs))


def test_dests_by_subparser_return_type():
    """ Check if the return type is dict of lists keyed by subcommand name """
    parser, msgs_by_cmd = build_parser()
    assert isinstance(parser.dests_by_subparser(), dict)
    assert all(
        [isinstance(v, list) for k, v in parser.dests_by_subparser().items()])
    assert all(
        [k in parser.dests_by_subparser() for k in list(msgs_by_cmd.keys())])
Esempio n. 6
0
from looper.const import PIPELINE_REQUIREMENTS_KEY
from looper.pipeline_interface import PL_KEY, PROTOMAP_KEY
from looper.pipereqs import KEY_EXEC_REQ, KEY_FILE_REQ, KEY_FOLDER_REQ
from looper.project_piface_group import ProjectPifaceGroup
import oldtests
from oldtests.helpers import build_pipeline_iface
from ubiquerg import powerset

__author__ = "Vince Reuter"
__email__ = "*****@*****.**"

GOOD_EXEC_REQS_DATA = [(r, KEY_EXEC_REQ) for r in ["ls", "date"]]
GOOD_PATH_REQS_DATA = [("$HOME", KEY_FOLDER_REQ), (__file__, KEY_FILE_REQ)]
GOOD_REQS_MAPS = [
    dict(c)
    for c in powerset(GOOD_PATH_REQS_DATA + GOOD_EXEC_REQS_DATA, nonempty=True)
]
GOOD_REQS_LISTS = [
    list(c)
    for c in powerset([r for r, _ in GOOD_EXEC_REQS_DATA], nonempty=True)
]

BAD_EXEC_REQS_DATA = [(r, KEY_EXEC_REQ) for r in [__file__, "$HOME"]]
BAD_PATH_REQS_DATA = [("not-a-file", KEY_FILE_REQ), ("notdir", KEY_FOLDER_REQ)]
BAD_REQS_MAPS = list(
    map(dict, powerset(BAD_EXEC_REQS_DATA + BAD_PATH_REQS_DATA,
                       nonempty=True)))
BAD_REQS_LISTS = list(
    map(list, powerset([r for r, _ in BAD_PATH_REQS_DATA], nonempty=True)))

ANNS_FILE_NAME = "anns.csv"
Esempio n. 7
0
    [PSample, LSample, PeppySampleSubtype, LooperSampleSubtype])
def test_sample_yaml_includes_filepath(tmpdir, data_type, sample):
    """ A Sample's disk representation includes key-value for that path. """
    fp = sample.to_yaml(subs_folder_path=tmpdir.strpath)
    assert os.path.isfile(fp)
    with open(fp, 'r') as f:
        data = yaml.load(f, yaml.SafeLoader)
    assert SAMPLE_YAML_FILE_KEY in data
    assert fp == data[SAMPLE_YAML_FILE_KEY]


PIPEKEYS = list(PIPELINE_TO_REQD_INFILES_BY_SAMPLE.keys())
OUTA = "outa"
OUTB = "outb"
DUMMY_OUTPUTS = [("A", OUTA), ("B", OUTB)]
OUTPUT_COMBOS = [dict(c) for c in powerset(DUMMY_OUTPUTS)]
OUTPUT_SPECS = [
    dict(spec) for spec in itertools.product(*[[(pk, oc)
                                                for oc in OUTPUT_COMBOS]
                                               for pk in PIPEKEYS])
]


@pytest.fixture
def prj(tmpdir, request):
    """ Write annotations file, piface file, and prjcfg file, and provide Project instance. """
    testpath = tmpdir.strpath
    for pipe in PIPEKEYS:
        pipe_path = os.path.join(testpath, pipe)
        with open(pipe_path, 'w'):
            print("Touching pipe file: {}".format(pipe_path))
Esempio n. 8
0
    EXS_KEY, HEADLINE, DETAIL_LINES, LONG_DESC_KEY, SHORT_DESC_KEY
from ubiquerg import powerset

__author__ = "Vince Reuter"
__email__ = "*****@*****.**"


@pytest.mark.parametrize("blank_line_sep", [False, True])
@pytest.mark.parametrize(
    "pool",
    build_args_space(
        allow_empty=False,
        **{
            EXS_KEY: [{
                EXS_KEY: items
            } for items in powerset([CODE_EX1, CODE_EX2], nonempty=True)]
        }))
def test_examples(pool, ds_spec, blank_line_sep):
    """ Check that number of example blocks parsed is as expected. """
    parser = lucidoc.RstDocstringParser()
    # Hack for post-hoc modification of specification fixture
    blank_space_param = "space_between_examples"
    setattr(ds_spec, blank_space_param, blank_line_sep)
    ds = ds_spec.render()
    exs = parser.examples(ds)
    num_ex_exp = ds.count(RST_EXAMPLE_TAG)
    num_ex_obs = sum(1 for _ in filter(
        lambda s: s.startswith("```") and not s.strip() == "```", exs))
    assert num_ex_exp == num_ex_obs, \
        "{} example(s) and {} example tag(s)\nExamples chunks: {}".\
            format(num_ex_obs, num_ex_exp, exs)
Esempio n. 9
0

SHORT_DESC_KEY = "headline"
LONG_DESC_KEY = "detail"
DESC_KEY = "description"
PAR_KEY = "params"
RET_KEY = "returns"
ERR_KEY = "raises"
EXS_KEY = "examples"


DESC_POOL = [{SHORT_DESC_KEY: h, LONG_DESC_KEY: d} for h, d in [
    (None, None), (HEADLINE, None), (None, DETAIL_LINES), (HEADLINE, DETAIL_LINES)
]]
PARAM_POOL = [{PAR_KEY: items} for items in
              powerset([BOOL_PARAM, FUNC_PARAM, ITER_PARAM, UNION_PARAM])]
RETURN_POOL = [{RET_KEY: items} for items in [RETURN, RETURN_MUTLI_LINE]]
ERROR_POOL = [{ERR_KEY: items} for items in powerset([VALUE_ERROR, TYPE_ERROR])]
CODE_POOL = [{EXS_KEY: items} for items in powerset([CODE_EX1, CODE_EX2])]
SPACE_POOL = [dict(zip(
    ("pre_tags_space", "trailing_space"), flags))
    for flags in itertools.product([False, True], [False, True])
]


def build_args_space(allow_empty, **kwargs):
    """
    Create collection of docstring specification parameter schemes.

    :param bool allow_empty: Whether an empty scheme (no parameters) is valid.
    :param kwargs: hook for direct specification for specific tag type(s) of
Esempio n. 10
0
def pytest_generate_tests(metafunc):
    """ Module-level test case parameterization """
    if "protected" in metafunc.fixturenames:
        metafunc.parametrize("protected", powerset(DATA.keys()))
    # Tests should hold regardless of parser style.
    metafunc.parametrize("parse_style", PARSERS.keys())
Esempio n. 11
0
        elif data != pi_with_resources.select_pipeline(pipe):
            unequal.append(pipe)

    assert len(known) == seen
    assert [] == missing, get_err_msg(missing, "missing")
    try:
        assert [] == unequal
    except AssertionError:
        print(get_err_msg(unequal, "with unmatched data"))
        print("KNOWN: {}".format(known))
        print("ITERPIPES: {}".format(", ".join(pi_with_resources.iterpipes())))
        raise


@pytest.mark.parametrize("exclude",
                         powerset(PipelineInterface.REQUIRED_SECTIONS))
def test_requires_pipelines_and_protocol_mapping(basic_pipe_iface_data,
                                                 bundled_piface, exclude):
    """ Check PipelineInterface's requirement for important sections. """
    pipe_iface_config = copy.deepcopy(bundled_piface)
    missing = [
        s for s in PipelineInterface.REQUIRED_SECTIONS
        if s not in pipe_iface_config
    ]
    assert [] == missing, \
        "Missing PI config section(s): {}".format(", ".join(missing))
    pipe_iface_config = {
        k: v
        for k, v in pipe_iface_config.items() if k not in exclude
    }
    assert [] == [s for s in exclude if s in pipe_iface_config]
Esempio n. 12
0
    ret2 = parser.returns(ds2)
    assert isinstance(ret1, lucidoc.RetTag)
    assert isinstance(ret2, lucidoc.RetTag)
    assert ret1.typename == ret2.typename
    assert ret1.description == ret2.description


@pytest.mark.parametrize("tag", [":raise", ":raises"])
@pytest.mark.parametrize(
    "pool",
    build_args_space(
        allow_empty=False,
        **{
            ERR_KEY: [{
                ERR_KEY: items
            } for items in powerset([VALUE_ERROR, TYPE_ERROR], nonempty=True)]
        }))
def test_raise_vs_raises(tag, pool, ds_spec):
    """ Both styles of exception docstring are allowed. """
    ds1 = ds_spec.render()
    parser = lucidoc.RstDocstringParser()
    err1 = parser.raises(ds1)
    ds2 = ds1.replace(":raise", tag).replace(":raises", tag)
    err2 = parser.raises(ds2)
    assert len(err1) > 0
    assert len(err1) == len(err2)
    assert all(isinstance(e, lucidoc.ErrTag) for e in err1)
    assert all(isinstance(e, lucidoc.ErrTag) for e in err2)
    assert all(e1.typename == e2.typename for e1, e2 in zip(err1, err2))
    assert all(e1.description == e2.description for e1, e2 in zip(err1, err2))