示例#1
0
def test_api(capsys, run_semgrep_in_tmp):
    # Test that exposed python API works and prints out nothing to stderr or stdout
    output = invoke_semgrep(
        Path("rules/eqeq.yaml"),
        [
            Path("targets/bad/invalid_python.py"),
            Path("targets/basic/stupid.py")
        ],
    )

    captured = capsys.readouterr()
    assert len(output["errors"]) == 1
    assert len(output["results"]) == 1
    assert captured.out == ""
    assert captured.err == ""

    # Check that logging code isnt handled by default root handler and printed to stderr
    x = subprocess.run(
        [
            "python3",
            "-c",
            "from semgrep.semgrep_main import invoke_semgrep; from pathlib import Path; invoke_semgrep(Path('rules/eqeq.yaml'),[Path('targets/bad/invalid_python.py'), Path('targets/basic/stupid.py')],)",
        ],
        encoding="utf-8",
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
    )
    assert x.stdout == ""
    assert x.stderr == ""
示例#2
0
文件: test.py 项目: tkisason/semgrep
def invoke_semgrep_multi(filename, *args, **kwargs):
    try:
        output = invoke_semgrep(filename, *args, **kwargs)
    except Exception as error:
        return (filename, error, {})
    else:
        return (filename, None, output)
示例#3
0
def invoke_semgrep_multi(
        config: Path, targets: List[Path],
        **kwargs: Any) -> Tuple[Path, Optional[Exception], Any]:
    try:
        output = invoke_semgrep(config, targets, **kwargs)
    except Exception as error:
        return (config, error, {})
    else:
        return (config, None, output)
示例#4
0
文件: test.py 项目: mtkis22/semgrep
def generate_file_pairs(location: Path, ignore_todo: bool, strict: bool,
                        semgrep_verbose: bool, unsafe: bool) -> None:
    filenames = list(location.rglob("*"))
    no_tests = []
    tested = []
    semgrep_error = []
    print("starting tests...")
    for filename in filenames:
        if (filename.suffix in YML_EXTENSIONS
                and not filename.name.startswith(".")
                and not filename.parent.name.startswith(".")):
            # find all filenames that have the same name but not extension, or are in a folder with the same name as a the yaml file
            yaml_file_name_without_ext = filename.with_suffix("")

            children_test_files = [
                p for p in filenames
                if str(p.with_suffix("")) == (str(yaml_file_name_without_ext))
            ]
            # remove yaml files from the test lists
            test_files = [
                path for path in children_test_files
                if path.suffix not in YML_EXTENSIONS and path.is_file()
            ]
            if not len(test_files):
                no_tests.append(filename)
                continue
            # invoke semgrep
            try:
                output_json = invoke_semgrep(
                    filename,
                    test_files,
                    no_git_ignore=True,
                    no_rewrite_rule_ids=True,
                    strict=strict,
                    dangerously_allow_arbitrary_code_execution_from_rules=
                    unsafe,
                )
                tested.append((filename,
                               score_output_json(output_json, test_files,
                                                 ignore_todo)))
            except Exception as ex:
                print(
                    f"semgrep error running with config {filename} on {test_files}:\n{ex}"
                )
                semgrep_error.append(filename)

    if len(semgrep_error) and strict:
        print("exiting due to semgrep/config errors and strict flag")
        sys.exit(1)

    print(f"{len(no_tests)} yaml files missing tests")
    debug_print(f"missing tests: {no_tests}")
    print(f"{len(tested)} yaml files tested")
    print("check id scoring:")
    print("=" * 80)
    failed_tests = []
    total_confusion = [0, 0, 0, 0]

    for (filename, (output, expected_reported_by_check_id,
                    num_todo)) in tested:
        print(filename)
        if not len(output.items()):
            print(f"  no checks fired (TODOs: {num_todo})")
        for check_id, (tp, tn, fp, fn) in output.items():
            good = (fp == 0) and (fn == 0)
            if not good:
                failed_tests.append((filename, check_id,
                                     expected_reported_by_check_id[check_id]))
            status = "✔" if good else "✖"
            todo_text = f"(TODOs: {num_todo})" if num_todo > 0 else ""
            confusion = [tp, tn, fp, fn]
            # add to the total confusion matrix
            total_confusion = [
                total_confusion[i] + confusion[i]
                for i in range(len(confusion))
            ]
            print(
                f"  {status} - {check_id.ljust(60)}{confusion_matrix_to_string(confusion)} {todo_text}"
            )

    print("=" * 80)
    print(
        f"final confusion matrix: {confusion_matrix_to_string(total_confusion)}"
    )
    print("=" * 80)

    if len(failed_tests) > 0:
        print(f"failing rule files: ")
        for (filename, check_id, failed_test_files) in failed_tests:
            print(f" ✖ FAILED rule file: {filename} check: {check_id}")
            for test_file_path, (expected,
                                 reported) in failed_test_files.items():
                print(
                    f"              in test: {test_file_path}, expected lines: {sorted(expected)} != reported: {sorted(reported)}"
                )
        print(
            f"{len(failed_tests)} checks failed tests (run with verbose flag for more details)"
        )
        sys.exit(1)
    else:
        print("all tests passed")
        sys.exit(0)