def test_dump_kwargs(tmp_dir, y_true_y_pred_y_score, mocker): live = Live() y_true, _, y_score = y_true_y_pred_y_score spy = mocker.spy(metrics, "roc_curve") live.log_plot("roc", y_true, y_score, drop_intermediate=True) spy.assert_called_once_with(y_true, y_score, drop_intermediate=True)
def test_step_exception(tmp_dir, y_true_y_pred_y_score): live = Live() out = tmp_dir / live.dir / Plot.subfolder y_true, y_pred, _ = y_true_y_pred_y_score live.log_plot("confusion_matrix", y_true, y_pred) assert (out / "confusion_matrix.json").exists() with pytest.raises(NotImplementedError): live.next_step()
def test_log_prc_curve(tmp_dir, y_true_y_pred_y_score, mocker): live = Live() out = tmp_dir / live.dir / Plot.subfolder y_true, _, y_score = y_true_y_pred_y_score spy = mocker.spy(metrics, "precision_recall_curve") live.log_plot("precision_recall", y_true, y_score) spy.assert_called_once_with(y_true, y_score) assert (out / "precision_recall.json").exists()
def test_log_calibration_curve(tmp_dir, y_true_y_pred_y_score, mocker): live = Live() out = tmp_dir / live.dir / Plot.subfolder y_true, _, y_score = y_true_y_pred_y_score spy = mocker.spy(calibration, "calibration_curve") live.log_plot("calibration", y_true, y_score) spy.assert_called_once_with(y_true, y_score) assert (out / "calibration.json").exists()
def test_cleanup(tmp_dir, y_true_y_pred_y_score): live = Live() out = tmp_dir / live.dir / Plot.subfolder y_true, y_pred, _ = y_true_y_pred_y_score live.log_plot("confusion_matrix", y_true, y_pred) assert (out / "confusion_matrix.json").exists() Live() assert not (tmp_dir / live.dir / Plot.subfolder).exists()
def test_log_confusion_matrix(tmp_dir, y_true_y_pred_y_score, mocker): live = Live() out = tmp_dir / live.dir / Plot.subfolder y_true, y_pred, _ = y_true_y_pred_y_score live.log_plot("confusion_matrix", y_true, y_pred) cm = json.loads((out / "confusion_matrix.json").read_text()) assert isinstance(cm, list) assert isinstance(cm[0], dict) assert cm[0]["actual"] == str(y_true[0]) assert cm[0]["predicted"] == str(y_pred[0])
def test_make_report_open(tmp_dir, mocker): mocked_open = mocker.patch("webbrowser.open") live = Live() live.log_plot("confusion_matrix", [0, 0, 1, 1], [1, 0, 0, 1]) live.make_report() live.make_report() mocked_open.assert_called_once() mocked_open = mocker.patch("webbrowser.open") live = Live(auto_open=False) live.log_plot("confusion_matrix", [0, 0, 1, 1], [1, 0, 0, 1]) live.make_report() assert not mocked_open.called mocked_open = mocker.patch("webbrowser.open") live = Live(report=None) live.log("foo", 1) live.next_step() assert not mocked_open.called
def test_get_renderers(tmp_dir, mocker): live = Live() for i in range(2): live.log("foo", i) img = Image.new("RGB", (10, 10), (i, i, i)) live.log_image("image.png", img) live.next_step() live.set_step(None) live.log_plot("confusion_matrix", [0, 0, 1, 1], [1, 0, 0, 1]) image_renderers = get_image_renderers(tmp_dir / live.dir / LiveImage.subfolder) assert len(image_renderers) == 2 image_renderers = sorted(image_renderers, key=lambda x: x.datapoints[0]["rev"]) for n, renderer in enumerate(image_renderers): assert renderer.datapoints == [{ "src": mocker.ANY, "rev": os.path.join(str(n), "image.png") }] scalar_renderers = get_scalar_renderers(tmp_dir / live.dir / Scalar.subfolder) assert len(scalar_renderers) == 1 assert scalar_renderers[0].datapoints == [ { "foo": "0", "rev": "workspace", "step": "0", "timestamp": mocker.ANY }, { "foo": "1", "rev": "workspace", "step": "1", "timestamp": mocker.ANY }, ] plot_renderers = get_plot_renderers(tmp_dir / live.dir / Plot.subfolder) assert len(plot_renderers) == 1 assert plot_renderers[0].datapoints == [ { "actual": "0", "rev": "workspace", "predicted": "1" }, { "actual": "0", "rev": "workspace", "predicted": "0" }, { "actual": "1", "rev": "workspace", "predicted": "0" }, { "actual": "1", "rev": "workspace", "predicted": "1" }, ] assert plot_renderers[0].properties == ConfusionMatrix.get_properties()