Пример #1
0
def test_pytask_execute_task_setup(marker_name, expectation):
    task = Task(base_name="task", path=Path(), function=None)
    kwargs = {"reason": ""} if marker_name == "skip_ancestor_failed" else {}
    task.markers = [Mark(marker_name, (), kwargs)]

    with expectation:
        pytask_execute_task_setup(task)
Пример #2
0
def test_live_execution_sequentially(capsys, tmp_path):
    path = tmp_path.joinpath("task_module.py")
    task = Task(base_name="task_example", path=path, function=lambda x: x)
    task.short_name = "task_module.py::task_example"

    live_manager = LiveManager()
    live = LiveExecution(
        live_manager=live_manager,
        n_entries_in_table=20,
        verbose=1,
        editor_url_scheme="no_link",
    )

    live_manager.start()
    live.update_running_tasks(task)
    live_manager.pause()

    # Test pause removes the table.
    captured = capsys.readouterr()
    assert "Task" not in captured.out
    assert "Outcome" not in captured.out
    assert "task_module.py::task_example" not in captured.out
    assert "running" not in captured.out
    assert "Completed: 0/x" not in captured.out

    live_manager.resume()
    live_manager.start()
    live_manager.stop()

    # Test table with running task.
    captured = capsys.readouterr()
    assert "Task" in captured.out
    assert "Outcome" in captured.out
    assert "task_module.py::task_example" in captured.out
    assert "running" in captured.out
    assert "Completed: 0/x" in captured.out

    live_manager.start()

    report = ExecutionReport(task=task,
                             outcome=TaskOutcome.SUCCESS,
                             exc_info=None)

    live_manager.resume()
    live.update_reports(report)
    live_manager.stop()

    # Test final table with reported outcome.
    captured = capsys.readouterr()
    assert "Task" in captured.out
    assert "Outcome" in captured.out
    assert "task_module.py::task_example" in captured.out
    assert "running" not in captured.out
    assert TaskOutcome.SUCCESS.symbol in captured.out
    assert "Completed: 1/x" in captured.out
Пример #3
0
def dag():
    dag = nx.DiGraph()
    for i in range(4):
        dag.add_node(f".::{i}",
                     task=Task(base_name=str(i), path=Path(), function=None))
        dag.add_node(f".::{i + 1}",
                     task=Task(base_name=str(i + 1),
                               path=Path(),
                               function=None))
        dag.add_edge(f".::{i}", f".::{i + 1}")

    return dag
Пример #4
0
def test_remove_marks_from_task(
    markers, marker_name, expected_markers, expected_others
):
    task = Task(base_name="name", path=Path(), function=None, markers=markers)
    _, result_markers = remove_marks(task, marker_name)
    assert task.markers == expected_others
    assert result_markers == expected_markers
Пример #5
0
def test_format_task_id(
    base_name,
    short_name,
    editor_url_scheme,
    use_short_name,
    relative_to,
    expected,
):
    path = _THIS_FILE

    task = Task(base_name=base_name, path=path, function=task_func)
    if short_name is not None:
        task.short_name = short_name

    result = format_task_id(task, editor_url_scheme, use_short_name, relative_to)
    assert result == expected
Пример #6
0
def test_live_execution_displays_skips_and_persists(capsys, tmp_path, verbose,
                                                    outcome):
    path = tmp_path.joinpath("task_module.py")
    task = Task(base_name="task_example", path=path, function=lambda x: x)
    task.short_name = "task_module.py::task_example"

    live_manager = LiveManager()
    live = LiveExecution(
        live_manager=live_manager,
        n_entries_in_table=20,
        verbose=verbose,
        editor_url_scheme="no_link",
    )

    live_manager.start()
    live.update_running_tasks(task)
    live_manager.pause()

    report = ExecutionReport(task=task, outcome=outcome, exc_info=None)

    live_manager.resume()
    live.update_reports(report)
    live_manager.stop()

    # Test final table with reported outcome.
    captured = capsys.readouterr()

    if verbose < 2 and outcome in (
            TaskOutcome.SKIP,
            TaskOutcome.SKIP_UNCHANGED,
            TaskOutcome.SKIP_PREVIOUS_FAILED,
            TaskOutcome.PERSISTENCE,
    ):
        # An empty table is not shown.
        assert "Task" not in captured.out
        assert "Outcome" not in captured.out

        assert "task_module.py::task_example" not in captured.out
        assert f"│ {outcome.symbol}" not in captured.out
    else:
        assert "Task" in captured.out
        assert "Outcome" in captured.out
        assert "task_module.py::task_example" in captured.out
        assert f"│ {outcome.symbol}" in captured.out

    assert "running" not in captured.out
Пример #7
0
def test_find_shortest_uniquely_identifiable_names_for_tasks(tmp_path):
    tasks = []
    expected = {}

    dir_identifiable_by_base_name = tmp_path.joinpath(
        "identifiable_by_base_name")
    dir_identifiable_by_base_name.mkdir()
    path_identifiable_by_base_name = dir_identifiable_by_base_name.joinpath(
        "t.py")

    for base_name in ("base_name_ident_0", "base_name_ident_1"):
        task = Task(base_name=base_name,
                    path=path_identifiable_by_base_name,
                    function=None)
        tasks.append(task)
        expected[task.name] = "t.py::" + base_name

    dir_identifiable_by_module_name = tmp_path.joinpath(
        "identifiable_by_module")
    dir_identifiable_by_module_name.mkdir()

    for module in ("t.py", "m.py"):
        module_path = dir_identifiable_by_module_name / module
        task = Task(base_name="task_a", path=module_path, function=None)
        tasks.append(task)
        expected[task.name] = module + "::task_a"

    dir_identifiable_by_folder = tmp_path / "identifiable_by_folder"
    dir_identifiable_by_folder_a = dir_identifiable_by_folder / "a"
    dir_identifiable_by_folder_a.mkdir(parents=True)
    dir_identifiable_by_folder_b = dir_identifiable_by_folder / "b"
    dir_identifiable_by_folder_b.mkdir()

    for base_path in (dir_identifiable_by_folder_a,
                      dir_identifiable_by_folder_b):
        module_path = base_path / "t.py"
        task = Task(base_name="task_t", path=module_path, function=None)
        tasks.append(task)
        expected[task.name] = base_path.name + "/t.py::task_t"

    result = _find_shortest_uniquely_identifiable_name_for_tasks(tasks)
    assert result == expected
def test_check_if_root_nodes_are_available():
    dag = nx.DiGraph()

    root = Path.cwd() / "src"

    path = root.joinpath("task_dummy")
    task = Task(base_name="task", path=path, function=None)
    task.path = path
    task.base_name = "task_dummy"
    dag.add_node(task.name, task=task)

    available_node = Node.from_path(root.joinpath("available_node"))
    dag.add_node(available_node.name, node=available_node)
    dag.add_edge(available_node.name, task.name)

    with does_not_raise():
        _check_if_root_nodes_are_available(dag)

    missing_node = Node.from_path(root.joinpath("missing_node"))
    dag.add_node(missing_node.name, node=missing_node)
    dag.add_edge(missing_node.name, task.name)

    with pytest.raises(ResolvingDependenciesError):
        _check_if_root_nodes_are_available(dag)
Пример #9
0
def test_live_execution_displays_subset_of_table(capsys, tmp_path,
                                                 n_entries_in_table):
    path = tmp_path.joinpath("task_module.py")
    running_task = Task(base_name="task_running",
                        path=path,
                        function=lambda x: x)
    running_task.short_name = "task_module.py::task_running"

    live_manager = LiveManager()
    live = LiveExecution(
        live_manager=live_manager,
        n_entries_in_table=n_entries_in_table,
        verbose=1,
        editor_url_scheme="no_link",
        n_tasks=2,
    )

    live_manager.start()
    live.update_running_tasks(running_task)
    live_manager.stop(transient=False)

    captured = capsys.readouterr()
    assert "Task" in captured.out
    assert "Outcome" in captured.out
    assert "::task_running" in captured.out
    assert " running " in captured.out
    assert "Completed: 0/2" in captured.out

    completed_task = Task(base_name="task_completed",
                          path=path,
                          function=lambda x: x)
    completed_task.short_name = "task_module.py::task_completed"
    live.update_running_tasks(completed_task)
    report = ExecutionReport(task=completed_task,
                             outcome=TaskOutcome.SUCCESS,
                             exc_info=None)

    live_manager.resume()
    live.update_reports(report)
    live_manager.stop()

    # Test that report is or is not included.
    captured = capsys.readouterr()
    assert "Task" in captured.out
    assert "Outcome" in captured.out
    assert "::task_running" in captured.out
    assert " running " in captured.out
    assert "Completed: 1/2" in captured.out

    if n_entries_in_table == 1:
        assert "task_module.py::task_completed" not in captured.out
        assert "│ ." not in captured.out
    else:
        assert "task_module.py::task_completed" in captured.out
        assert "│ ." in captured.out
Пример #10
0
def test_pytask_resolve_dependencies_create_dag():
    root = Path.cwd() / "src"
    task = Task(
        base_name="task_dummy",
        path=root,
        function=None,
        depends_on={
            0: Node.from_path(root / "node_1"),
            1: Node.from_path(root / "node_2"),
        },
    )

    dag = pytask_resolve_dependencies_create_dag([task])

    assert all(
        any(i in node for i in ["node_1", "node_2", "task"])
        for node in dag.nodes)
Пример #11
0
import os
import time
import uuid
from threading import Thread, current_thread, active_count
import redis
from pytask import Task

redis_client = redis.StrictRedis(host='localhost',
                                 port=os.environ.get('REDIS_PORT'),
                                 db=0)


def wait_for_it(list_key):
    results = redis_client.brpop(list_key)
    print("survey sez: {}".format(results))
    print("current thread: {}".format(current_thread()))
    print("{} threads total".format(active_count()))


while True:
    time.sleep(1)
    new_task = Task(uuid.uuid1(), uuid.uuid1())
    print("sending {}".format(new_task.json_message()))

    redis_client.publish('incoming', new_task.message["respond_to"])
    redis_client.lpush('effecty', new_task.json_message())

    t = Thread(target=wait_for_it, args=(new_task.message["respond_to"], ))
    t.start()
Пример #12
0
@pytest.mark.unit
def test_node_and_neighbors(dag):
    for i in range(1, 4):
        nodes = sorted(node_and_neighbors(dag, f".::{i}"))
        assert nodes == [f".::{j}" for j in range(i - 1, i + 2)]


@pytest.mark.unit
@pytest.mark.parametrize(
    "tasks, expectation, expected",
    [
        pytest.param(
            [
                Task(
                    base_name="1",
                    path=Path(),
                    function=None,
                    markers=[Mark("try_last", (), {})],
                )
            ],
            does_not_raise(),
            {".::1": -1},
            id="test try_last",
        ),
        pytest.param(
            [
                Task(
                    base_name="1",
                    path=Path(),
                    function=None,
                    markers=[Mark("try_first", (), {})],
                )
Пример #13
0
def test_get_marks_from_task(markers, marker_name, expected):
    task = Task(base_name="name", path=Path(), function=None, markers=markers)
    result = get_marks(task, marker_name)
    assert result == expected
Пример #14
0
def test_set_marks_to_task(markers):
    task = Task(base_name="name", path=Path(), function=None)
    result = set_marks(task, markers)
    assert result.markers == markers
Пример #15
0
def test_has_mark_for_task(markers, marker_name, expected):
    task = Task(base_name="name", path=Path(), function=None, markers=markers)
    result = has_mark(task, marker_name)
    assert result is expected
Пример #16
0
def test_live_execution_skips_do_not_crowd_out_displayed_tasks(
        capsys, tmp_path):
    path = tmp_path.joinpath("task_module.py")
    task = Task(base_name="task_example", path=path, function=lambda x: x)
    task.short_name = "task_module.py::task_example"

    live_manager = LiveManager()
    live = LiveExecution(
        live_manager=live_manager,
        n_entries_in_table=20,
        verbose=1,
        editor_url_scheme="no_link",
    )

    live_manager.start()
    live.update_running_tasks(task)
    live_manager.stop()

    # Test table with running task.
    captured = capsys.readouterr()
    assert "Task" in captured.out
    assert "Outcome" in captured.out
    assert "task_module.py::task_example" in captured.out
    assert "running" in captured.out

    # Add one displayed reports and many more not displayed reports to crowd out the
    # valid one.
    successful_task = Task(base_name="task_success",
                           path=path,
                           function=lambda x: x)
    successful_task.short_name = "task_module.py::task_success"

    tasks = []
    for i in range(25):
        skipped_task = Task(base_name=f"task_skip_{i}",
                            path=path,
                            function=lambda x: x)
        skipped_task.short_name = f"task_module.py::task_skip_{i}"
        tasks.append(skipped_task)

    live_manager.start()
    live.update_running_tasks(successful_task)
    for task in tasks:
        live.update_running_tasks(task)
    live_manager.stop()

    captured = capsys.readouterr()
    assert "running" in captured.out
    assert "task_success" in captured.out
    for i in range(25):
        assert f"task_skip_{i}" in captured.out

    live_manager.resume()
    report = ExecutionReport(task=successful_task,
                             outcome=TaskOutcome.SUCCESS,
                             exc_info=None)
    live.update_reports(report)
    for task in tasks:
        report = ExecutionReport(task=task,
                                 outcome=TaskOutcome.SKIP,
                                 exc_info=None)
        live.update_reports(report)
    live_manager.stop()

    # Test final table with reported outcome.
    captured = capsys.readouterr()
    assert "Task" in captured.out
    assert "Outcome" in captured.out
    assert "task_module.py::task_example" in captured.out
    assert "task_module.py::task_success" in captured.out
    assert "running" in captured.out
    assert TaskOutcome.SUCCESS.symbol in captured.out
    assert "task_skip" not in captured.out