def test_all_errors():
    fw_client, assign_readers_gear = init_gear("assign-readers")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_config=True,
    )

    assert job.state == "complete"

    fw_client, assign_cases_gear = init_gear("assign-batch-cases")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_cases_gear,
        DATA_ROOT / "assign_batch_cases/all_errs_config.json",
        clear_config=True,
    )

    assert job.state == "failed"

    # Cleanup
    purge_reader_group(fw_client)
def test_valid_config(tmpdir):
    fw_client, assign_readers_gear = init_gear("assign-readers")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_config=True,
    )

    assert job.state == "complete"

    fw_client, assign_cases_gear = init_gear("assign-cases")

    job, session, _, _ = run_gear_w_config(
        fw_client,
        assign_cases_gear,
        DATA_ROOT / "assign_cases/config.json",
        clear_input=True,
    )

    assert job.state == "complete"

    session = session.reload()
    analysis = [
        analysis for analysis in session.analyses if analysis.job.id == job.id
    ].pop()

    for an_file in analysis.files:
        analysis.download_file(an_file.name, tmpdir / an_file.name)

    # Test exported_data.csv
    exported_df = pd.read_csv(tmpdir / "exported_data.csv")
    for i in exported_df.index:
        assert fw_client.lookup(exported_df.loc[i, "export_path"])

    # Test reader_project_case_data.csv
    reader_df = pd.read_csv(tmpdir / "reader_project_case_data.csv")
    for i in reader_df.index:
        reader_project = fw_client.get(reader_df.id[i]).reload()
        assert reader_project.info["project_features"][
            "assignments"
        ] == ast.literal_eval(reader_df.assignments[i])
        assert reader_df.max_cases[i] >= reader_df.num_assignments[i]

    # Test master_project_case_data.csv
    cases_df = pd.read_csv(tmpdir / "master_project_case_data.csv")
    for i in cases_df.index:
        case_session = fw_client.get(cases_df.id[i]).reload()
        assert case_session.info["session_features"]["assignments"] == ast.literal_eval(
            cases_df.assignments[i]
        )
        assert cases_df.case_coverage[i] >= cases_df.assigned_count[i]

    assert cases_df.assigned_count.sum() == reader_df.num_assignments.sum()

    # Cleanup
    purge_reader_group(fw_client)
def test_indiv_assignment(tmpdir):
    assign_readers_max_cases(tmpdir)

    # assign single reader
    fw_client, assign_readers_gear = init_gear("assign-readers")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_input=True,
    )
    assert job.state == "complete"

    # assign indiv assignment to that reader
    fw_client, assign_single_case_gear = init_gear("assign-single-case")
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config.json",
        clear_input=True,
    )

    assert job.state == "complete"

    # attempt to re-assign the same session
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config.json",
        clear_input=True,
    )

    assert job.state == "failed"

    # attempt to assign num_cases=case_coverage to new reader
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config_full_case.json",
        clear_input=True,
    )
    assert job.state == "failed"

    # attempt to assign case to num_cases=max_cases of full reader
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config_full_reader.json",
        clear_input=True,
    )

    assert job.state == "failed"

    # Cleanup
    purge_reader_group(fw_client)
def test_resolve_tie(tmpdir):
    assign_readers_max_cases(tmpdir)

    # assign single reader
    fw_client, assign_readers_gear = init_gear("assign-readers")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_input=True,
    )

    fw_client, assign_single_case_gear = init_gear("assign-single-case")
    # Assign assigned_cases=3=case_coverage to new reader.
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config_tie_breaker_1.json",
        clear_input=True,
    )

    assert job.state == "complete"

    # Assign assigned_cases=4=case_coverage to new reader.
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config_nancy.json",
        clear_input=True,
    )

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config_tie_breaker_2.json",
        clear_input=True,
    )

    assert job.state == "failed"

    # Assign assigned_cases < 3 to a new reader
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config_tie_breaker_3.json",
        clear_input=True,
    )

    assert job.state == "failed"

    # Cleanup
    purge_reader_group(fw_client)
def assign_readers_max_cases(tmpdir):
    fw_client, assign_readers_gear = init_gear("assign-readers")

    _, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_config=True,
    )

    fw_client, assign_cases_gear = init_gear("assign-cases")

    job, destination, _, _ = run_gear_w_config(
        fw_client,
        assign_cases_gear,
        DATA_ROOT / "assign_cases/config.json",
        clear_input=True,
    )
    # Inject assessments

    # Wait for the cases to be indexed
    time.sleep(30)

    # inject assessments into the first case of the three readers
    destination = destination.reload()
    analysis = [
        analysis for analysis in destination.analyses
        if analysis.job.id == job.id
    ].pop()

    measurements = json.load(
        open(DATA_ROOT / "gather_cases/measurements.json", "r"))
    assessment_keys = [
        "no_tear",
        "low_partial_tear",
        "high_partial_tear",
        "full_tear",
        "full_contig",
    ]

    reader_case_data_csv = "reader_project_case_data.csv"
    analysis.download_file(reader_case_data_csv, tmpdir / reader_case_data_csv)

    reader_df = pd.read_csv(tmpdir / reader_case_data_csv)
    for i in reader_df.index:
        reader_project = fw_client.get(reader_df.id[i]).reload()
        project_features = reader_project.info["project_features"]
        for j in range(5):
            assignment = project_features["assignments"][j]
            dest_session = fw_client.get(assignment["dest_session"])
            dest_session.update_info(measurements[assessment_keys[j]])
def test_apply_consensus_assignment(tmpdir):
    assign_readers_max_cases(tmpdir)

    fw_client, assign_single_case_gear = init_gear("assign-single-case")
    # test valid and invalid application of consensus assignment

    # Apply existing consensus assessment to particular reader
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config_consensus_1.json",
        clear_input=True,
    )

    assert job.state == "complete"

    # Attempt to apply non-existent consensus assessment to particular reader
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config_consensus_2.json",
        clear_input=True,
    )

    assert job.state == "failed"

    # Attempt to apply consensus assessment to a reader without the indicated case
    fw_client, assign_readers_gear = init_gear("assign-readers")

    _, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_input=True,
    )

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config_consensus_3.json",
        clear_input=True,
    )

    assert job.state == "failed"

    # Cleanup
    purge_reader_group(fw_client)
def test_no_readers():
    fw_client, assign_cases_gear = init_gear("assign-batch-cases")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_cases_gear,
        DATA_ROOT / "assign_batch_cases/no_errors_config.json",
        clear_config=True,
    )

    assert job.state == "failed"
def test_no_readers():
    fw_client, assign_cases_gear = init_gear("assign-single-case")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_cases_gear,
        DATA_ROOT / "assign_single_case/config.json",
        clear_input=True,
    )

    assert job.state == "failed"
def test_valid_reader():

    # assign reader
    fw_client, assign_readers_gear = init_gear("assign-readers")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_input=True,
    )

    # Assign to valid reader
    fw_client, assign_single_case_gear = init_gear("assign-single-case")
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config.json",
        clear_input=True,
    )

    assert job.state == "complete"

    # Assign to invalid reader
    config = {
        "reader_email": "*****@*****.**",
        "assignment_reason": "Individual Assignment",
    }

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_single_case_gear,
        DATA_ROOT / "assign_single_case/config.json",
        clear_input=True,
        replace_config=config,
    )

    assert job.state == "failed"

    # Cleanup
    purge_reader_group(fw_client)
Beispiel #10
0
def test_gather_cases_no_readers():
    fw_client, gather_cases_gear = init_gear("gather-cases")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        gather_cases_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_config=True,
        clear_input=True,
    )
    # this is not failing with respects to non reader projects....
    assert job.state == "failed"
def test_each_error_w_one_success(tmpdir):
    fw_client, assign_readers_gear = init_gear("assign-readers")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_config=True,
    )

    assert job.state == "complete"

    # Assign one more reader for testing case_coverage
    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config_nancy.json",
        clear_input=True,
    )

    assert job.state == "complete"

    fw_client, assign_cases_gear = init_gear("assign-batch-cases")

    job, container, _, _ = run_gear_w_config(
        fw_client,
        assign_cases_gear,
        DATA_ROOT / "assign_batch_cases/one_success_else_err_config.json",
        clear_config=True,
    )

    assert job.state == "complete"

    # Grab results file
    container = container.reload()

    analysis = [
        analysis for analysis in container.analyses
        if analysis.job.id == job.id
    ].pop()

    batch_results_csv = "batch_results.csv"
    analysis.download_file(batch_results_csv, tmpdir / batch_results_csv)
    batch_results_df = pd.read_csv(tmpdir / batch_results_csv)

    reader_case_data_csv = "reader_project_case_data.csv"
    analysis.download_file(reader_case_data_csv, tmpdir / reader_case_data_csv)
    reader_case_data_df = pd.read_csv(tmpdir / reader_case_data_csv)

    # Valid Session
    indx = 15
    session_id = batch_results_df.loc[indx, "session_id"]
    reader_email = batch_results_df.loc[indx, "reader_email"]

    message = (
        f"Session with id ({session_id}) is not found in this Master Project. "
        f"Proceeding without making this assignment to reader ({reader_email})."
    )

    assert bool(batch_results_df.loc[indx, "passed"]) is False
    assert batch_results_df.loc[indx, "message"] == message

    # Valid Reader Project
    indx = 16
    reader_email = batch_results_df.loc[indx, "reader_email"]
    message = (
        f"The reader ({reader_email}) has not been established. "
        "Please run `assign-readers` to establish a project for this reader")
    assert bool(batch_results_df.loc[indx, "passed"]) is False
    assert batch_results_df.loc[indx, "message"] == message

    # Existing Session in Reader Project
    indx = 17
    session_id = batch_results_df.loc[indx, "session_id"]
    session = fw_client.get(session_id)
    session_label = session.label
    reader_email = batch_results_df.loc[indx, "reader_email"]
    message = (
        f"Selected session ({session_label}) has already been assigned to "
        f"reader ({reader_email}).")
    assert bool(batch_results_df.loc[indx, "passed"]) is False
    assert batch_results_df.loc[indx, "message"] == message

    # Reader is at capacity (num_assignments == max_cases)
    indx = 18

    reader_email = batch_results_df.loc[indx, "reader_email"]
    max_cases = reader_case_data_df[reader_case_data_df.reader_id ==
                                    reader_email].max_cases[0]
    message = (f"Cannot assign more than {max_cases} cases to "
               f"reader ({reader_email}). "
               "Consider increasing max_cases for this reader "
               "or choosing another reader.")
    assert bool(batch_results_df.loc[indx, "passed"]) is False
    assert batch_results_df.loc[indx, "message"] == message

    # Session at case_coverage
    indx = 19

    session_id = batch_results_df.loc[indx, "session_id"]
    session = fw_client.get(session_id)
    session_label = session.label
    case_coverage = 3
    message = (f"Assigning this case ({session_label}) exceeds "
               f"case_coverage ({case_coverage}) for this case."
               "Assignment will not proceed.")

    assert bool(batch_results_df.loc[indx, "passed"]) is False
    assert batch_results_df.loc[indx, "message"] == message

    # Cleanup
    purge_reader_group(fw_client)
Beispiel #12
0
def test_pipeline_injecting_assessment(tmpdir):
    fw_client, assign_readers_gear = init_gear("assign-readers")

    job, _, _, _ = run_gear_w_config(
        fw_client,
        assign_readers_gear,
        DATA_ROOT / "assign_readers/config.json",
        clear_config=True,
    )

    assert job.state == "complete"

    fw_client, assign_cases_gear = init_gear("assign-cases")

    job, session, _, _ = run_gear_w_config(
        fw_client,
        assign_cases_gear,
        DATA_ROOT / "assign_cases/config.json",
        clear_input=True,
    )

    assert job.state == "complete"

    # Wait for the cases to be indexed
    time.sleep(30)

    # inject assessments into the first case of the three readers
    session = session.reload()
    analysis = [
        analysis for analysis in session.analyses if analysis.job.id == job.id
    ].pop()

    measurements = json.load(
        open(DATA_ROOT / "gather_cases/measurements.json", "r"))
    assessment_keys = [
        "no_tear",
        "low_partial_tear",
        "high_partial_tear",
        "full_tear",
        "full_contig",
        "full_tear_imagePath_error",
    ]

    reader_case_data_csv = "reader_project_case_data.csv"
    analysis.download_file(reader_case_data_csv, tmpdir / reader_case_data_csv)

    reader_df = pd.read_csv(tmpdir / reader_case_data_csv)
    for i in reader_df.index:
        reader_project = fw_client.get(reader_df.id[i]).reload()
        project_features = reader_project.info["project_features"]
        for j in range(5):
            assignment = project_features["assignments"][j]
            dest_session = fw_client.get(assignment["dest_session"])
            dest_session.update_info(measurements[assessment_keys[j]])

    # Inject imagePath error into the case of the last reader
    dest_session.update_info(measurements[assessment_keys[5]])

    # Run the gather-cases gear
    fw_client, gather_cases_gear = init_gear("gather-cases")

    job, session, _, _ = run_gear_w_config(
        fw_client,
        gather_cases_gear,
        DATA_ROOT / "gather_cases/config.json",
        clear_config=True,
        clear_input=True,
    )

    assert job.state == "complete"

    # check the results
    session = session.reload()
    analysis = [
        analysis for analysis in session.analyses if analysis.job.id == job.id
    ].pop()

    summary_data_csv = "master_project_summary_data.csv"
    analysis.download_file(summary_data_csv, tmpdir / summary_data_csv)

    summary_data_df = pd.read_csv(tmpdir / summary_data_csv)
    # specify the master project from the first source session
    source_session = fw_client.get(summary_data_df.id[0])
    master_project = fw_client.get(source_session.parents.project).reload()
    project_features = master_project.info["project_features"]
    for i in summary_data_df.index:
        source_session = fw_client.get(summary_data_df.id[i])
        session_features = source_session.info["session_features"]
        case_state = project_features["case_states"][i]
        # check master project features
        assert summary_data_df.case_coverage[i] == case_state["case_coverage"]
        assert summary_data_df.assigned[i] == case_state["assigned"]
        assert summary_data_df.unassigned[i] == case_state["unassigned"]
        assert summary_data_df.classified[i] == case_state["classified"]
        assert summary_data_df.measured[i] == case_state["measured"]
        assert summary_data_df.completed[i] == case_state["completed"]
        # check source session features
        assert summary_data_df.assigned[i] == session_features[
            "assigned_count"]
        assert summary_data_df.case_coverage[i] == session_features[
            "case_coverage"]

    # Ensure ohifViewer data was migrated to the correct session
    for i in reader_df.index:
        csv_assignments = ast.literal_eval(reader_df.assignments[i])
        for j in range(len(csv_assignments)):
            print(reader_df.reader_id[i], assessment_keys[j])
            source_session = fw_client.get(
                csv_assignments[j]["source_session"]).reload()
            assignments = source_session.info["session_features"][
                "assignments"]
            reader_project_id = reader_df.id[i]
            assignment = [
                assignment for assignment in assignments
                if assignment["project_id"] == reader_project_id
            ].pop()
            if assignment.get("read"):
                read = measurements[assessment_keys[j]]["ohifViewer"]["read"]
                assert assignment["read"] == read
            if assignment.get("measurements"):
                # Test for the imagePath error
                if reader_project_id == dest_session.parents[
                        "project"] and j == 4:
                    k = 5
                else:
                    k = j
                measurement = measurements[
                    assessment_keys[k]]["ohifViewer"]["measurements"]
                assert assignment["measurements"] == measurement

    # Cleanup
    purge_reader_group(fw_client)