def test_createschema_empty_pairs(test_args, expected):
    with pytest.raises(SystemExit) as e:
        with patch.object(sys, 'argv', test_args):
            chewBBACA.main()

    assert e.type == SystemExit
    assert expected in e.value.code
예제 #2
0
def test_modules_exit_codes(test_args, expected):
    with pytest.raises(SystemExit) as e:
        with patch.object(sys, 'argv', test_args):
            chewBBACA.main()

    assert e.type == SystemExit
    assert e.value.code == expected
def test_allelecall_valid(test_args, expected):
    with patch.object(sys, 'argv', test_args):
        capture = py.io.StdCapture()
        chewBBACA.main()
        stdout, stderr = capture.reset()

    # check text printed to stdout
    assert 'Writing output files' in stdout

    # check output files
    for root, dirs, files in os.walk(test_args[7]):
        output_files = [os.path.join(root, file)
                        for file in files
                        if 'logging_info.txt' != file]

    expected_files = [os.path.join(expected, file)
                      for file in os.listdir(expected)
                      if 'logging_info.txt' != file]

    files = output_files + expected_files
    basename_dict = {}
    for f in files:
        basename = os.path.basename(f)
        basename_dict.setdefault(basename, []).append(f)

    # assert that files in each pair are equal
    file_cmps = []
    for k, v in basename_dict.items():
        file_cmps.append(filecmp.cmp(v[0], v[1], shallow=False))

    assert all(file_cmps) is True
예제 #4
0
def test_invalid_input(test_args, expected):

    # create empty dir for empty dir test
    if 'empty_dir' in test_args[3] and os.path.isdir(test_args[3]) is False:
        os.mkdir(test_args[3])

    with pytest.raises(SystemExit) as e:
        with patch.object(sys, 'argv', test_args):
            chewBBACA.main()

    assert e.type == SystemExit
    assert expected in e.value.code
예제 #5
0
def test_schemaEvaluator_invalid_input(test_args, expected):

    with pytest.raises(SystemExit) as e:
        with patch.object(sys, "argv", test_args):
            chewBBACA.main()

    try:
        shutil.rmtree(test_args[5])
    except Exception as e2:
        pass

    assert e.type == SystemExit
    assert expected in e.value.code
def test_createschema_valid(test_args, expected):
    with patch.object(sys, 'argv', test_args):
        capture = py.io.StdCapture()
        chewBBACA.main()
        stdout, stderr = capture.reset()

    # check output files
    schema_seed = os.path.join(test_args[5], 'schema_seed')
    output_files = [
        os.path.join(schema_seed, file) for file in os.listdir(schema_seed)
        if 'short' != file
    ]
    output_files.sort()

    expected_seed = os.path.join(expected, 'schema_seed')
    expected_files = [
        os.path.join(expected_seed, file) for file in os.listdir(expected_seed)
        if 'short' != file
    ]
    expected_files.sort()

    # get config files
    genes_lists = [output_files.pop(0), expected_files.pop(0)]
    schemas_configs = [output_files.pop(0), expected_files.pop(0)]

    # compare configs
    assert pickle_loader(genes_lists[0]).sort() == pickle_loader(
        genes_lists[1]).sort()
    assert pickle_loader(schemas_configs[0]) == pickle_loader(
        schemas_configs[1])

    files = output_files + expected_files
    basename_dict = {}
    for f in files:
        basename = os.path.basename(f)
        basename_dict.setdefault(basename, []).append(f)

    # assert that files in each pair are equal
    file_cmps = []
    for k, v in basename_dict.items():
        file_cmps.append(filecmp.cmp(v[0], v[1], shallow=False))

    assert all(file_cmps) is True
예제 #7
0
def test_schemaEvaluator_valid(test_args, expected):
    with patch.object(sys, "argv", test_args):
        capture = py.io.StdCapture()
        chewBBACA.main()
        stdout, stderr = capture.reset()

    # check if the report has been created
    assert "The report has been created." in stdout

    # check output HTML files
    output_html_files = [
        os.path.join(test_args[5], "html_files", file)
        for file in os.listdir(os.path.join(test_args[5], "html_files"))
    ]
    output_html_files.sort()

    expected_html_files = [
        os.path.join(expected, "html_files", file)
        for file in os.listdir(os.path.join(expected, "html_files"))
    ]
    expected_html_files.sort()

    output_prot_files = [
        os.path.join(
            test_args[5], "SchemaEvaluator_pre_computed_data", "prot_files", file
        )
        for file in os.listdir(
            os.path.join(
                test_args[5], "SchemaEvaluator_pre_computed_data", "prot_files"
            )
        )
        if "exceptions" != file
    ]
    output_prot_files.sort()

    html_files = output_html_files + expected_html_files
    basename_html_dict = {}
    for f1 in html_files:
        basename1 = os.path.basename(f1)
        basename_html_dict.setdefault(basename1, []).append(f1)

    # assert that files in each pair are equal
    file_cmps_html = []
    for k, v in basename_html_dict.items():
        file_cmps_html.append(filecmp.cmp(v[0], v[1], shallow=False))

    assert all(file_cmps_html) is True

    # check output MAIN files
    output_main_files = [
        os.path.join(test_args[5], "SchemaEvaluator_pre_computed_data", file)
        for file in os.listdir(
            os.path.join(test_args[5], "SchemaEvaluator_pre_computed_data")
        )
        if "prot_files" != file
    ]
    output_main_files.sort()

    expected_main_files = [
        os.path.join(expected, "SchemaEvaluator_pre_computed_data", file)
        for file in os.listdir(
            os.path.join(expected, "SchemaEvaluator_pre_computed_data")
        )
        if "prot_files" != file
    ]
    expected_main_files.sort()

    main_files = output_main_files + expected_main_files
    basename_main_dict = {}
    for f2 in main_files:
        basename2 = os.path.basename(f2)
        basename_main_dict.setdefault(basename2, []).append(f2)

    # assert that files in each pair are equal
    file_cmps_main = []
    for k2, v2 in basename_main_dict.items():
        file_cmps_main.append(filecmp.cmp(v2[0], v2[1], shallow=False))

    assert all(file_cmps_main) is True

    # check output PROTEIN files
    output_prot_files = [
        os.path.join(
            test_args[5], "SchemaEvaluator_pre_computed_data", "prot_files", file
        )
        for file in os.listdir(
            os.path.join(
                test_args[5], "SchemaEvaluator_pre_computed_data", "prot_files"
            )
        )
        if "exceptions" != file
    ]
    output_prot_files.sort()

    expected_prot_files = [
        os.path.join(expected, "SchemaEvaluator_pre_computed_data", "prot_files", file)
        for file in os.listdir(
            os.path.join(expected, "SchemaEvaluator_pre_computed_data", "prot_files")
        )
        if "exceptions" != file
    ]
    expected_prot_files.sort()

    prot_files = output_prot_files + expected_prot_files
    basename_prot_dict = {}
    for f in prot_files:
        basename3 = os.path.basename(f)
        basename_prot_dict.setdefault(basename3, []).append(f)

    # assert that files in each pair are equal
    file_cmps_prot = []
    for k3, v3 in basename_prot_dict.items():
        file_cmps_prot.append(filecmp.cmp(v3[0], v3[1], shallow=False))

    assert all(file_cmps_prot) is True

    # check output EXCEPTION files
    output_exc_files = [
        os.path.join(
            test_args[5],
            "SchemaEvaluator_pre_computed_data",
            "prot_files",
            "exceptions",
            file,
        )
        for file in os.listdir(
            os.path.join(
                test_args[5],
                "SchemaEvaluator_pre_computed_data",
                "prot_files",
                "exceptions",
            )
        )
    ]
    output_exc_files.sort()

    expected_exc_files = [
        os.path.join(
            expected,
            "SchemaEvaluator_pre_computed_data",
            "prot_files",
            "exceptions",
            file,
        )
        for file in os.listdir(
            os.path.join(
                expected,
                "SchemaEvaluator_pre_computed_data",
                "prot_files",
                "exceptions",
            )
        )
    ]
    expected_exc_files.sort()

    exc_files = output_exc_files + expected_exc_files
    basename_exc_dict = {}
    for f in exc_files:
        basename4 = os.path.basename(f)
        basename_exc_dict.setdefault(basename4, []).append(f)

    # assert that files in each pair are equal
    file_cmps_exc = []
    for k4, v4 in basename_exc_dict.items():
        file_cmps_exc.append(filecmp.cmp(v4[0], v4[1], shallow=False))

    assert all(file_cmps_exc) is True