Example #1
0
def update_tests(testfile,
                 include_private=False,
                 skip_patterns=None,
                 include_classes=True):
    """Given a testing file, load in as a GridRunner, load the module again,
       and update with new tests not found. Optionally take patterns
       to skip. This is akin to check_tests in check.py, but instead we
       update the runner and save the file.

       Arguments:
          - testfile (str) : the yaml test file
          - include_private (bool) : include "private" functions
          - skip_patterns (list) : list of test keys (patterns) to exclude
          - include_classes (bool) : include classes in update (True)
    """
    if not os.path.exists(testfile):
        sys.exit(f"{testfile} does not exist.")

    if not re.search("[.](yml|yaml)$", testfile):
        sys.exit("Test file must have yml|yaml extension.")

    skip_patterns = skip_patterns or []
    runner = GridRunner(testfile)

    # The config holds the filename we derive tests from, and tests, keep a lookup
    files = []
    existing = set()

    for name, section in runner.config.items():
        # Get either the file path, module name, or relative path
        filename = extract_modulename(section.get("filename"),
                                      os.path.dirname(testfile))
        files.append(filename)
        [existing.add(x) for x in section.get("tests", {}).keys()]
        functions = extract_functions(filename,
                                      include_private,
                                      quiet=True,
                                      include_classes=include_classes)

        # Regular expression for existing takes into account different import paths
        regex = "(%s)$" % "|".join(list(existing) + skip_patterns)
        for key, params in functions.items():

            # Make sure functions start with same name as previous level
            if name in key:
                key = name + ".".join(
                    [x for x in key.split(name)[1:] if x != "."])

            if not re.search(regex, key):
                print(f"Adding function {key}")
                runner.config[name][key] = params

    # Save back to file
    runner.save(testfile)
    return runner
Example #2
0
def test_classes():
    """Test that gridtest can load classes
    """
    from gridtest.main.test import GridRunner

    test_file = os.path.join(here, "modules", "car-tests.yml")
    runner = GridRunner(test_file)
    tests = runner.get_tests()

    test = tests["car.Car.1"]
    assert not test.result
    assert "isinstance" in test.params
    test.run()
    assert type(test.result).__name__ == test.params["isinstance"]
Example #3
0
def main(args, extra):

    # Default file for grids is grids.yml
    if not args.input:
        args.input = ["grids.yml"]
    input_file = args.input.pop(0)

    if not os.path.exists(input_file):
        sys.exit(f"{input_file} does not exist.")

    runner = GridRunner(input_file)
    grids = runner.get_grids()

    # If no name specified, print grid listing
    if args.input:
        name = args.input[0]
        if name in grids:
            grid = grids[name]
        else:
            sys.exit(f"{name} is not a valid grid name in {input_file}")

        if args.arg and args.arg not in grid.args:
            sys.exit(f"{args.arg} is not an argument in grid {grid}.")

        # Print count (length) of a variable, or number o grids
        if args.count and args.arg:
            try:
                print(
                    f"Variable {args.arg} has length {len(grid.args[args.arg])}."
                )
            except:
                print(f"{grid.args[args.arg]}")

        # Just count of global
        elif args.count:
            print(f"{len(list(grid))} argument sets produced.")

        # Just print the argument
        elif args.arg:
            print(grid.args[args.arg])

        # Export data to file
        elif args.export:
            grids = list(grid)
            write_json(grids, args.export)
        else:
            for argset in grid:
                print(argset)
    else:
        print("\n".join(list(grids.keys())))
Example #4
0
def test_gridrunner(runner):
    """Load a gridtest runner and test for a basic file.
    """
    assert "basic" in runner.config
    assert len(runner.config["basic"]["tests"]) >= 5
    assert runner.run() == 0
    assert runner.run(parallel=False) == 0

    # Test gridrunner with temporary substitutes
    from gridtest.main.test import GridRunner

    test_file = os.path.join(here, "modules", "temp-tests.yml")
    runner = GridRunner(test_file)
    assert "temp" in runner.config
    assert len(runner.config["temp"]["tests"]) >= 2
    assert runner.run() == 0
    assert runner.run(parallel=False) == 0
Example #5
0
def main(args, extra):

    runner = GridRunner(args.filename)
    return_code = runner.run(
        nproc=args.nproc,
        parallel=not args.serial,
        verbose=args.verbose,
        regexp=args.pattern,
        name=args.name,
        interactive=args.interactive,
        cleanup=not args.no_cleanup,
        save=args.save,
        save_compact=args.save_compact,
        save_report=args.save_report,
        report_template=args.report_template,
        save_metrics=args.save_metrics,
    )
    sys.exit(return_code)
Example #6
0
def get_missing_tests(testfile,
                      include_private=False,
                      skip_patterns=None,
                      include_classes=True):
    """Given a testing file, load in as a GridRunner, load the module again,
       and check if new tests need to be generated. Optionally take patterns
       to skip. If no new tests are added, we return 0. Otherwise, we exit with
       1. This is similar to black linting, and is intended for running in CI
       to pass if a user has written all tests to correpond with their module
       (akin to a more rigorous coverage tool).

       Arguments:
          - testfile (str) : the yaml test file
          - include_private (bool) : include "private" functions
          - skip_patterns (list) : list of test keys (patterns) to exclude
    """
    if not os.path.exists(testfile):
        sys.exit(f"{testfile} does not exist.")

    if not re.search("[.](yml|yaml)$", testfile):
        sys.exit("Test file must have yml|yaml extension.")

    skip_patterns = skip_patterns or []
    runner = GridRunner(testfile)

    # The config holds the filename we derive tests from, and tests
    files = []
    existing = set()

    for name, section in runner.config.items():
        # Get either the file path, module name, or relative path
        filename = extract_modulename(section.get("filename"),
                                      os.path.dirname(testfile))
        files.append(filename)
        [existing.add(x) for x in section.get("tests", {}).keys()]

    # Keep track of new sections seen
    sections = []
    spec = dict()

    # Regular expression for existing takes into account different import paths
    regex = "(%s)$" % "|".join(list(existing) + skip_patterns)

    # Import each file as a module, or a module name, exit on error
    for filename in files:
        functions = extract_functions(
            filename,
            include_private=include_private,
            quiet=True,
            include_classes=include_classes,
        )
        sections += [
            k for k, v in functions.get("tests", {}).items()
            if k not in existing and not re.search(regex, k)
        ]
    return sections
Example #7
0
def test_substitute_gridtest():
    """Test that the gridtest (test instantiation) performs the substitution.
    """
    from gridtest.main.test import GridRunner

    test_file = os.path.join(here, "modules", "temp-tests.yml")
    runner = GridRunner(test_file)

    # Before running anything, the runner config should have tmp_path, tmp_dir
    assert ("{% tmp_dir %}" in runner.config["temp"]["tests"]
            ["temp.create_directory"][0]["args"]["dirname"])
    assert ("{% tmp_path %}" in runner.config["temp"]["tests"]
            ["temp.write_file"][0]["args"]["filename"])

    tests = runner.get_tests()

    # After we create the tests, we have variable substitution
    assert ("{% tmp_dir %}"
            not in tests["temp.create_directory.0"].params["args"]["dirname"])
    assert "{% tmp_path %}" not in tests["temp.write_file.0"].params["args"][
        "filename"]

    # The directory should exist (tmp_dir creates for the test) but not filename
    assert os.path.exists(
        tests["temp.create_directory.0"].params["args"]["dirname"])
    assert not os.path.exists(
        tests["temp.write_file.0"].params["args"]["filename"])

    # Run the tests
    for name, test in tests.items():
        test.run(cleanup=False)

    # Assert that the output files are not cleaned up
    assert os.path.exists(
        tests["temp.create_directory.0"].params["args"]["dirname"])
    assert os.path.exists(
        tests["temp.write_file.0"].params["args"]["filename"])

    # Run again with cleanup
    for name, test in tests.items():
        test.run(cleanup=True)
Example #8
0
def get_runner(args):
    """if the user provides a gridtest file to load, return a runner
    """
    runner = None
    if args.input is not None:
        if not os.path.exists(args.input):
            sys.exit(f"Input file {args.input} does not exist.")
        try:
            runner = GridRunner(args.input)
        except:
            sys.exit(
                "Error creating GridRunner, try running shell without test yaml file to debug."
            )
    return runner
Example #9
0
def test_istrue_isfalse():
    """Test that the gridtest istrue, isfalse, exists, works as expected.
    """
    from gridtest.main.test import GridRunner

    test_file = os.path.join(here, "modules", "truefalse-tests.yml")
    runner = GridRunner(test_file)
    tests = runner.get_tests()

    # This test should have istrue and isfalse statements
    test = tests["truefalse.add.0"]
    for result in ["istrue", "isfalse"]:
        assert result in test.params

    # Before run, not parsed
    assert test.params["istrue"] == "isinstance({{ result }}, float)"
    assert test.params["isfalse"] == "isinstance({{ result }}, int)"

    # Run the test!
    test.run()
    assert test.result == 3.0
    assert test.params["istrue"] == "isinstance(3.0, float)"
    assert test.params["isfalse"] == "isinstance(3.0, int)"
Example #10
0
def test_metrics():
    """Test that gridtest can load metrics specifications
    """
    from gridtest.main.test import GridRunner

    test_file = os.path.join(here, "modules", "metrics.yml")
    runner = GridRunner(test_file)
    tests = runner.get_tests()

    # List and min/max should expand to 3
    assert (len([x for x in tests.keys() if "gotosleep" in x])) == 3
    runner.run()

    # Ensure that invalid specs aren't honored
    runner = GridRunner(test_file)
    runner.config["metrics"]["tests"]["metrics.gotosleep"][0]["args"][
        "seconds"]["invalid"] = 1

    with pytest.raises(SystemExit):
        tests = runner.get_tests()
Example #11
0
def test_grids():
    """Test loading and using different kinds of grids.
    """
    from gridtest.main.grids import Grid
    from gridtest.main.test import GridRunner
    from grids.script import get_pokemon_id

    grids_file = os.path.join(here, "grids", "grids.yml")
    runner = GridRunner(grids_file)

    # Test get_grids function via runner
    grids = runner.get_grids()
    for key, grid in grids.items():
        print(list(grid))

    # Case 1: a grid with a custom function
    grid = Grid(name="generate_pids",
                params={
                    "functions": {
                        "pid": get_pokemon_id
                    },
                    "count": 10
                })
    assert grid.name == "generate_pids"
    assert len(list(grid)) == 10

    # Case 2: Grid with system function
    entry = {
        "functions": {
            "pid": "random.choice"
        },
        "count": 10,
        "args": {
            "seq": [[1, 2, 3]]
        },
    }
    grid = Grid(name="random_choice", params=entry)

    assert grid.name == "random_choice"
    assert len(list(grid)) == 10
    assert len(grid.argsets) == 0

    # Case 3: cache results
    entry["cache"] = True
    grid = Grid(name="random_choice", params=entry)
    assert len(grid.argsets) == 10

    # Case 4: Generate empty returns empty arguments
    grid = Grid(name="generate_empty", params={"count": 10})
    assert len(list(grid)) == 10

    # Case 5: Generate matrix with single level lists parameterizes over them
    params = {"args": {"x": [1, 2, 3], "y": [1, 2, 3]}}
    grid = Grid("generate_matrix", params=params)
    assert len(list(grid)) == 9

    # Case 6: List of lists uses list as input argument
    params = {
        "args": {
            "x": [[1, 2, 3], [4, 5, 6]],
            "y": [[1, 2, 3], [4, 5, 6]]
        }
    }
    grid = Grid("generate_lists_matrix", params=params)
    assert len(list(grid)) == 4

    # Case 7: min, max and by with one argument
    entry = {"args": {"x": {"min": 0, "max": 10, "by": 2}}}
    grid = Grid("generate_by_min_max", params=entry)
    assert len(list(grid)) == 5

    # Case 7: min, max, and two arguments
    entry = {
        "args": {
            "y": {
                "min": 0,
                "max": 10,
                "by": 2
            },
            "x": {
                "min": 10,
                "max": 20,
                "by": 2
            },
        }
    }
    grid = Grid("generate_by_min_max_twovars", params=entry)
    assert len(list(grid)) == 25
Example #12
0
def runner():
    from gridtest.main.test import GridRunner

    test_file = os.path.join(here, "modules", "basic-tests.yml")
    return GridRunner(test_file)