Exemplo n.º 1
0
def _init_endpoint_record(graph_server, model: V2ModelServer):
    logger.info("Initializing endpoint records")
    try:
        project, uri, tag, hash_key = parse_versioned_object_uri(
            graph_server.function_uri)

        if model.version:
            versioned_model_name = f"{model.name}:{model.version}"
        else:
            versioned_model_name = f"{model.name}:latest"

        model_endpoint = ModelEndpoint(
            metadata=ModelEndpointMetadata(project=project,
                                           labels=model.labels),
            spec=ModelEndpointSpec(
                function_uri=graph_server.function_uri,
                model=versioned_model_name,
                model_class=model.__class__.__name__,
                model_uri=model.model_path,
                stream_path=config.model_endpoint_monitoring.store_prefixes.
                default.format(project=project, kind="stream"),
                active=True,
            ),
            status=ModelEndpointStatus(),
        )

        db = mlrun.get_run_db()

        db.create_or_patch_model_endpoint(
            project=project,
            endpoint_id=model_endpoint.metadata.uid,
            model_endpoint=model_endpoint,
        )
    except Exception as e:
        logger.error("Failed to create endpoint record", exc=e)
Exemplo n.º 2
0
def test_list_endpoints(db: Session, client: TestClient):
    endpoints_in = [_mock_random_endpoint("active") for _ in range(5)]

    for endpoint in endpoints_in:
        _write_endpoint_to_kv(endpoint)

    response = client.get(
        url="/api/projects/test/model-endpoints",
        headers={"X-V3io-Session-Key": _get_access_key()},
    )

    endpoints_out = [
        ModelEndpoint(**e["endpoint"]) for e in response.json()["endpoints"]
    ]

    endpoints_in_set = {
        json.dumps(e.dict(), sort_keys=True)
        for e in endpoints_in
    }
    endpoints_out_set = {
        json.dumps(e.dict(), sort_keys=True)
        for e in endpoints_out
    }
    endpoints_intersect = endpoints_in_set.intersection(endpoints_out_set)

    assert len(endpoints_intersect) == 5
Exemplo n.º 3
0
def test_get_endpoint_metrics(db: Session, client: TestClient):
    path = config.model_endpoint_monitoring.store_prefixes.default.format(
        project=TEST_PROJECT, kind=EVENTS
    )
    _, container, path = parse_model_endpoint_store_prefix(path)

    frames = get_frames_client(
        token=_get_access_key(), container=container, address=config.v3io_framesd,
    )

    start = datetime.utcnow()

    for i in range(5):
        endpoint = _mock_random_endpoint()
        write_endpoint_to_kv(_get_access_key(), endpoint)
        frames.create(backend="tsdb", table=path, rate="10/m", if_exists=1)

        total = 0

        dfs = []

        for i in range(10):
            count = randint(1, 10)
            total += count
            data = {
                "predictions_per_second_count_1s": count,
                "endpoint_id": endpoint.metadata.uid,
                "timestamp": start - timedelta(minutes=10 - i),
            }
            df = pd.DataFrame(data=[data])
            dfs.append(df)

        frames.write(
            backend="tsdb",
            table=path,
            dfs=dfs,
            index_cols=["timestamp", "endpoint_id"],
        )

        response = client.get(
            url=f"/api/projects/{TEST_PROJECT}/model-endpoints/{endpoint.metadata.uid}?metric=predictions_per_second_count_1s",  # noqa
            headers={"X-V3io-Session-Key": _get_access_key()},
        )

        endpoint = ModelEndpoint(**response.json())

        assert len(endpoint.status.metrics) > 0

        predictions_per_second = endpoint.status.metrics[
            "predictions_per_second_count_1s"
        ]

        assert predictions_per_second.name == "predictions_per_second_count_1s"

        response_total = sum((m[1] for m in predictions_per_second.values))

        assert total == response_total
Exemplo n.º 4
0
def _mock_random_endpoint(state: Optional[str] = None) -> ModelEndpoint:
    def random_labels():
        return {f"{choice(string.ascii_letters)}": randint(0, 100) for _ in range(1, 5)}

    return ModelEndpoint(
        metadata=ModelEndpointMetadata(project=TEST_PROJECT, labels=random_labels()),
        spec=ModelEndpointSpec(
            function_uri=f"test/function_{randint(0, 100)}:v{randint(0, 100)}",
            model=f"model_{randint(0, 100)}:v{randint(0, 100)}",
            model_class="classifier",
        ),
        status=ModelEndpointStatus(state=state),
    )
Exemplo n.º 5
0
def _mock_random_endpoint(state: str = "") -> ModelEndpoint:
    return ModelEndpoint(
        metadata=ModelEndpointMetadata(
            project="test",
            tag=f"v{randint(0, 100)}",
            labels={
                f"{choice(string.ascii_letters)}": randint(0, 100) for _ in range(1, 5)
            },
        ),
        spec=ModelEndpointSpec(
            model=f"model_{randint(0, 100)}",
            function=f"function_{randint(0, 100)}",
            model_class="classifier",
        ),
        status=ObjectStatus(state=state),
    )
Exemplo n.º 6
0
def test_list_endpoints(db: Session, client: TestClient):
    endpoints_in = [_mock_random_endpoint("testing") for _ in range(5)]

    for endpoint in endpoints_in:
        write_endpoint_to_kv(_get_access_key(), endpoint)

    response = client.get(
        url=f"/api/projects/{TEST_PROJECT}/model-endpoints",
        headers={"X-V3io-Session-Key": _get_access_key()},
    )

    endpoints_out = [ModelEndpoint(**e) for e in response.json()["endpoints"]]

    in_endpoint_ids = set(map(lambda e: e.metadata.uid, endpoints_in))
    out_endpoint_ids = set(map(lambda e: e.metadata.uid, endpoints_out))

    endpoints_intersect = in_endpoint_ids.intersection(out_endpoint_ids)
    assert len(endpoints_intersect) == 5
Exemplo n.º 7
0
def _init_endpoint_record(context, model_logger: Optional[_ModelLogPusher]):
    if model_logger is None or isinstance(model_logger.output_stream,
                                          _DummyStream):
        return

    try:
        project, uri, tag, hash_key = parse_versioned_object_uri(
            model_logger.function_uri)

        if model_logger.model.version:
            model = f"{model_logger.model.name}:{model_logger.model.version}"
        else:
            model = model_logger.model.name

        model_endpoint = ModelEndpoint(
            metadata=ModelEndpointMetadata(project=project,
                                           labels=model_logger.model.labels),
            spec=ModelEndpointSpec(
                function_uri=model_logger.function_uri,
                model=model,
                model_class=model_logger.model.__class__.__name__,
                model_uri=model_logger.model.model_path,
                stream_path=model_logger.stream_path,
                active=True,
            ),
            status=ModelEndpointStatus(),
        )

        db = mlrun.get_run_db()

        db.create_or_patch(
            project=project,
            endpoint_id=model_endpoint.metadata.uid,
            model_endpoint=model_endpoint,
        )
    except Exception as e:
        logger.error("Failed to create endpoint record", exc=e)
Exemplo n.º 8
0
    def get_endpoint(
        access_key: str,
        project: str,
        endpoint_id: str,
        metrics: Optional[List[str]] = None,
        start: str = "now-1h",
        end: str = "now",
        feature_analysis: bool = False,
    ) -> ModelEndpoint:
        """
        Returns a ModelEndpoint object with additional metrics and feature related data.

        :param access_key: V3IO access key for managing user permissions
        :param project: The name of the project
        :param endpoint_id: The id of the model endpoint
        :param metrics: A list of metrics to return for each endpoint, read more in 'TimeMetric'
        :param start: The start time of the metrics
        :param end: The end time of the metrics
        :param feature_analysis: When True, the base feature statistics and current feature statistics will be added to
        the output of the resulting object
        """

        logger.info(
            "Getting model endpoint record from kv",
            endpoint_id=endpoint_id,
        )

        client = get_v3io_client(endpoint=config.v3io_api)

        path = config.model_endpoint_monitoring.store_prefixes.default.format(
            project=project, kind=ENDPOINTS)
        _, container, path = parse_model_endpoint_store_prefix(path)

        endpoint = client.kv.get(
            container=container,
            table_path=path,
            key=endpoint_id,
            access_key=access_key,
            raise_for_status=RaiseForStatus.never,
        )
        endpoint = endpoint.output.item

        if not endpoint:
            raise MLRunNotFoundError(f"Endpoint {endpoint_id} not found")

        labels = endpoint.get("labels")

        feature_names = endpoint.get("feature_names")
        feature_names = _json_loads_if_not_none(feature_names)

        label_names = endpoint.get("label_names")
        label_names = _json_loads_if_not_none(label_names)

        feature_stats = endpoint.get("feature_stats")
        feature_stats = _json_loads_if_not_none(feature_stats)

        current_stats = endpoint.get("current_stats")
        current_stats = _json_loads_if_not_none(current_stats)

        drift_measures = endpoint.get("drift_measures")
        drift_measures = _json_loads_if_not_none(drift_measures)

        monitor_configuration = endpoint.get("monitor_configuration")
        monitor_configuration = _json_loads_if_not_none(monitor_configuration)

        endpoint = ModelEndpoint(
            metadata=ModelEndpointMetadata(
                project=endpoint.get("project"),
                labels=_json_loads_if_not_none(labels),
                uid=endpoint_id,
            ),
            spec=ModelEndpointSpec(
                function_uri=endpoint.get("function_uri"),
                model=endpoint.get("model"),
                model_class=endpoint.get("model_class") or None,
                model_uri=endpoint.get("model_uri") or None,
                feature_names=feature_names or None,
                label_names=label_names or None,
                stream_path=endpoint.get("stream_path") or None,
                algorithm=endpoint.get("algorithm") or None,
                monitor_configuration=monitor_configuration or None,
                active=endpoint.get("active") or None,
            ),
            status=ModelEndpointStatus(
                state=endpoint.get("state") or None,
                feature_stats=feature_stats or None,
                current_stats=current_stats or None,
                first_request=endpoint.get("first_request") or None,
                last_request=endpoint.get("last_request") or None,
                accuracy=endpoint.get("accuracy") or None,
                error_count=endpoint.get("error_count") or None,
                drift_status=endpoint.get("drift_status") or None,
            ),
        )

        if feature_analysis and feature_names:
            endpoint_features = get_endpoint_features(
                feature_names=feature_names,
                feature_stats=feature_stats,
                current_stats=current_stats,
            )
            if endpoint_features:
                endpoint.status.features = endpoint_features
                endpoint.status.drift_measures = drift_measures

        if metrics:
            endpoint_metrics = get_endpoint_metrics(
                access_key=access_key,
                project=project,
                endpoint_id=endpoint_id,
                start=start,
                end=end,
                metrics=metrics,
            )
            if endpoint_metrics:
                endpoint.status.metrics = endpoint_metrics

        return endpoint
Exemplo n.º 9
0
    def get_endpoint(
        access_key: str,
        project: str,
        endpoint_id: str,
        metrics: Optional[List[str]] = None,
        start: str = "now-1h",
        end: str = "now",
        features: bool = False,
    ) -> ModelEndpointState:
        """
        Returns the current state of an endpoint.


        :param access_key: V3IO access key for managing user permissions
        :param project: The name of the project
        :param endpoint_id: The id of the model endpoint
        :param metrics: A list of metrics to return for each endpoint, read more in 'TimeMetric'
        :param start: The start time of the metrics
        :param end: The end time of the metrics
        """
        verify_endpoint(project, endpoint_id)

        endpoint = get_endpoint_kv_record_by_id(
            access_key, project, endpoint_id, ENDPOINT_TABLE_ATTRIBUTES,
        )

        if not endpoint:
            url = f"/projects/{project}/model-endpoints/{endpoint_id}"
            raise MLRunNotFoundError(f"Endpoint {endpoint_id} not found - {url}")

        endpoint_metrics = None
        if metrics:
            endpoint_metrics = _get_endpoint_metrics(
                access_key=access_key,
                project=project,
                endpoint_id=endpoint_id,
                start=start,
                end=end,
                name=metrics,
            )

        return ModelEndpointState(
            endpoint=ModelEndpoint(
                metadata=ModelEndpointMetadata(
                    project=endpoint.get("project"),
                    tag=endpoint.get("tag"),
                    labels=json.loads(endpoint.get("labels", "")),
                ),
                spec=ModelEndpointSpec(
                    model=endpoint.get("model"),
                    function=endpoint.get("function"),
                    model_class=endpoint.get("model_class"),
                ),
                status=ObjectStatus(state="active"),
            ),
            first_request=endpoint.get("first_request"),
            last_request=endpoint.get("last_request"),
            error_count=endpoint.get("error_count"),
            drift_status=endpoint.get("drift_status"),
            metrics=endpoint_metrics,
        )
Exemplo n.º 10
0
    def list_endpoints(
        access_key: str,
        project: str,
        model: Optional[str] = None,
        function: Optional[str] = None,
        tag: Optional[str] = None,
        labels: Optional[List[str]] = None,
        metrics: Optional[List[str]] = None,
        start: str = "now-1h",
        end: str = "now",
    ) -> List[ModelEndpointState]:
        """
        Returns a list of `ModelEndpointState` objects. Each object represents the current state of a model endpoint.
        This functions supports filtering by the following parameters:
        1) model
        2) function
        3) tag
        4) labels
        By default, when no filters are applied, all available endpoints for the given project will be listed.

        In addition, this functions provides a facade for listing endpoint related metrics. This facade is time-based
        and depends on the 'start' and 'end' parameters. By default, when the metrics parameter is None, no metrics are
        added to the output of this function.

        :param access_key: V3IO access key for managing user permissions
        :param project: The name of the project
        :param model: The name of the model to filter by
        :param function: The name of the function to filter by
        :param tag: A tag to filter by
        :param labels: A list of labels to filter by. Label filters work by either filtering a specific value of a label
        (i.e. list("key==value")) or by looking for the existence of a given key (i.e. "key")
        :param metrics: A list of metrics to return for each endpoint, read more in 'TimeMetric'
        :param start: The start time of the metrics
        :param end: The end time of the metrics
        """

        client = get_v3io_client(endpoint=config.v3io_api)
        cursor = client.kv.new_cursor(
            container=config.model_endpoint_monitoring.container,
            table_path=f"{project}/{ENDPOINTS_TABLE_PATH}",
            access_key=access_key,
            attribute_names=ENDPOINT_TABLE_ATTRIBUTES,
            filter_expression=_build_kv_cursor_filter_expression(
                project, function, model, tag, labels
            ),
        )
        endpoints = cursor.all()

        endpoint_state_list = []
        for endpoint in endpoints:
            endpoint_metrics = {}
            if metrics:
                endpoint_metrics = _get_endpoint_metrics(
                    access_key=access_key,
                    project=project,
                    endpoint_id=endpoint.get("id"),
                    name=metrics,
                    start=start,
                    end=end,
                )

            # Collect labels (by convention labels are labeled with underscore '_'), ignore builtin '__name' field
            state = ModelEndpointState(
                endpoint=ModelEndpoint(
                    metadata=ModelEndpointMetadata(
                        project=endpoint.get("project"),
                        tag=endpoint.get("tag"),
                        labels=json.loads(endpoint.get("labels")),
                    ),
                    spec=ModelEndpointSpec(
                        model=endpoint.get("model"),
                        function=endpoint.get("function"),
                        model_class=endpoint.get("model_class"),
                    ),
                    status=ObjectStatus(state="active"),
                ),
                first_request=endpoint.get("first_request"),
                last_request=endpoint.get("last_request"),
                error_count=endpoint.get("error_count"),
                drift_status=endpoint.get("drift_status"),
                metrics=endpoint_metrics,
            )
            endpoint_state_list.append(state)

        return endpoint_state_list
Exemplo n.º 11
0
async def test_get_endpoint_metrics(db: Session, client: TestClient):
    frames = get_frames_client(
        token=_get_access_key(),
        container="projects",
        address=config.v3io_framesd,
    )

    start = datetime.utcnow()

    for i in range(5):
        endpoint = _mock_random_endpoint()
        await write_endpoint_to_kv(_get_access_key(), endpoint)
        await run_in_threadpool(
            frames.create,
            backend="tsdb",
            table=f"test/{ENDPOINT_EVENTS_TABLE_PATH}",
            rate="10/m",
            if_exists=1,
        )

        total = 0

        dfs = []

        for i in range(10):
            count = randint(1, 10)
            total += count
            data = {
                "predictions_per_second_count_1s": count,
                "endpoint_id": endpoint.metadata.uid,
                "timestamp": start - timedelta(minutes=10 - i),
            }
            df = pd.DataFrame(data=[data])
            dfs.append(df)

        await run_in_threadpool(
            frames.write,
            backend="tsdb",
            table=f"test/{ENDPOINT_EVENTS_TABLE_PATH}",
            dfs=dfs,
            index_cols=["timestamp", "endpoint_id"],
        )

        response = await run_in_threadpool(
            client.get,
            url=
            f"/api/projects/test/model-endpoints/{endpoint.metadata.uid}?metric=predictions_per_second_count_1s",
            headers={"X-V3io-Session-Key": _get_access_key()},
        )

        endpoint = ModelEndpoint(**response.json())

        assert len(endpoint.status.metrics) > 0

        predictions_per_second = endpoint.status.metrics[
            "predictions_per_second_count_1s"]

        assert predictions_per_second.name == "predictions_per_second_count_1s"

        response_total = sum((m[1] for m in predictions_per_second.values))

        assert total == response_total
Exemplo n.º 12
0
def get_endpoint(
        project: str,
        endpoint_id: str,
        start: str = Query(default="now-1h"),
        end: str = Query(default="now"),
        metrics: bool = Query(default=False),
        features: bool = Query(default=False),
):
    """
    Return the current state of an endpoint, meaning all additional data the is relevant to a specified endpoint.
    This function also takes into account the start and end times and uses the same time-querying as v3io-frames.
    """

    _verify_endpoint(project, endpoint_id)

    endpoint = _get_endpoint_kv_record_by_id(
        endpoint_id,
        ENDPOINT_TABLE_ATTRIBUTES_WITH_FEATURES,
    )

    if not endpoint:
        url = f"/projects/{project}/model-endpoints/{endpoint_id}"
        raise MLRunNotFoundError(f"Endpoint {endpoint_id} not found - {url}")

    endpoint_metrics = None
    if metrics:
        endpoint_metrics = _get_endpoint_metrics(
            endpoint_id=endpoint_id,
            start=start,
            end=end,
            name=["predictions", "latency"],
        )

    endpoint_features = None
    if features:
        endpoint_features = _get_endpoint_features(
            project=project,
            endpoint_id=endpoint_id,
            features=endpoint.get("features"))

    return ModelEndpointState(
        endpoint=ModelEndpoint(
            metadata=ModelEndpointMetadata(
                project=endpoint.get("project"),
                tag=endpoint.get("tag"),
                labels=json.loads(endpoint.get("labels", "")),
            ),
            spec=ModelEndpointSpec(
                model=endpoint.get("model"),
                function=endpoint.get("function"),
                model_class=endpoint.get("model_class"),
            ),
            status=ObjectStatus(state="active"),
        ),
        first_request=endpoint.get("first_request"),
        last_request=endpoint.get("last_request"),
        error_count=endpoint.get("error_count"),
        alert_count=endpoint.get("alert_count"),
        drift_status=endpoint.get("drift_status"),
        metrics=endpoint_metrics,
        features=endpoint_features,
    )
Exemplo n.º 13
0
def list_endpoints(
        project: str,
        model: Optional[str] = Query(None),
        function: Optional[str] = Query(None),
        tag: Optional[str] = Query(None),
        labels: List[str] = Query([], alias="label"),
        start: str = Query(default="now-1h"),
        end: str = Query(default="now"),
        metrics: bool = Query(default=False),
):
    """
    Returns a list of endpoints of type 'ModelEndpoint', supports filtering by model, function, tag and labels.
    Lables can be used to filter on the existance of a label:
    `api/projects/{project}/model-endpoints/?label=mylabel`

    Or on the value of a given label:
    `api/projects/{project}/model-endpoints/?label=mylabel=1`

    Multiple labels can be queried in a single request by either using `&` seperator:
    `api/projects/{project}/model-endpoints/?label=mylabel=1&label=myotherlabel=2`

    Or by using a `,` (comma) seperator:
    `api/projects/{project}/model-endpoints/?label=mylabel=1,myotherlabel=2`
    """

    client = get_v3io_client()
    cursor = client.kv.new_cursor(
        container=config.httpdb.model_endpoint_monitoring.container,
        table_path=ENDPOINTS_TABLE_PATH,
        attribute_names=ENDPOINT_TABLE_ATTRIBUTES,
        filter_expression=_build_kv_cursor_filter_expression(
            project, function, model, tag, labels),
    )
    endpoints = cursor.all()

    endpoint_state_list = []
    for endpoint in endpoints:

        endpoint_metrics = None
        if metrics:
            endpoint_metrics = _get_endpoint_metrics(
                endpoint_id=get_endpoint_id(ModelEndpoint(**endpoint)),
                name=["predictions", "latency"],
                start=start,
                end=end,
            )

        # Collect labels (by convention labels are labeled with underscore '_'), ignore builtin '__name' field
        state = ModelEndpointState(
            endpoint=ModelEndpoint(
                metadata=ModelEndpointMetadata(
                    project=endpoint.get("project"),
                    tag=endpoint.get("tag"),
                    labels=json.loads(endpoint.get("labels")),
                ),
                spec=ModelEndpointSpec(
                    model=endpoint.get("model"),
                    function=endpoint.get("function"),
                    model_class=endpoint.get("model_class"),
                ),
                status=ObjectStatus(state="active"),
            ),
            first_request=endpoint.get("first_request"),
            last_request=endpoint.get("last_request"),
            error_count=endpoint.get("error_count"),
            alert_count=endpoint.get("alert_count"),
            drift_status=endpoint.get("drift_status"),
            metrics=endpoint_metrics,
        )
        endpoint_state_list.append(state)

    return ModelEndpointStateList(endpoints=endpoint_state_list)