Exemplo n.º 1
0
def predict(complete_conf, arg_dict=None, cache=True):
    """Carry out prediction for the model specified in the configuration

    Params:
        complete_conf: configuration dict
        arg_dict:      arguments dict for the prediction (analogous to what it would get from a web API)
        cache:         (default: True) whether to cache the data sources/sinks and helper objects (cache lookup is done by model name and model version). If in doubt, leave at default.

    Returns:
        model's prediction output
    """
    logger.debug("Applying model for prediction...")

    dso, dsi = _get_data_sources_and_sinks(complete_conf,
                                           tags="predict",
                                           cache=cache)

    model_wrapper, metadata = _get_model(complete_conf, cache=cache)
    inner_model = model_wrapper.contents

    model_conf = complete_conf["model"]
    output = model_wrapper.predict(model_conf, dso, dsi, inner_model, arg_dict
                                   or {})
    output = resource.to_plain_python_obj(output)

    return output
Exemplo n.º 2
0
    def predict_using_model(self, args_dict):
        logger.debug("Prediction input %s", dict(args_dict))
        logger.info("Starting prediction")
        args_ordered_dict = OrderedDict(sorted(args_dict.items()))
        inner_model = self.model_wrapper.contents
        predict_args = [
            self.model_config,
            self.datasources,
            self.datasinks,
            inner_model,
            args_ordered_dict,
        ]
        if hasattr(self.model_wrapper, "__graph"):
            with self.model_wrapper.__graph.as_default():
                logger.info("Restored tensorflow model's graph")
                raw_output = self.model_wrapper.predict(*predict_args)
        else:
            raw_output = self.model_wrapper.predict(*predict_args)

        if (
            self.model_wrapper.have_columns_been_ordered
            and not resource._order_columns_called
        ):
            logger.warning(
                "Model has been trained on ordered columns, but "
                "prediction does not call function order_columns."
            )

        output = resource.to_plain_python_obj(raw_output)
        logger.debug("Prediction output %s", output)
        return output
Exemplo n.º 3
0
def test_to_plain_python_obj_error():
    """Test the error case."""
    class FailingObject:
        pass

    output = r.to_plain_python_obj(FailingObject())
    with pytest.raises(TypeError):
        json.dumps(output)
Exemplo n.º 4
0
def predict(
    complete_conf: Dict,
    arg_dict: Optional[Dict] = None,
    cache: bool = True,
    model: Optional[ModelInterface] = None,
    use_live_code: bool = False,
):
    """Carry out prediction for the model specified in the configuration.

    :param complete_conf: configuration dict
    :type complete_conf: dict
    :param cache: Whether to cache the data sources/sinks and helper objects (cache lookup is done by model name and model version). If in doubt, leave at default.
    :type cache: optional bool, default: True
    :param arg_dict: Arguments dict for the prediction (analogous to what it would get from a web API)
    :type arg_dict: optional Dict, default: None
    :param model: Test this model instead of loading it from `model_store`. This parameter exists mainly for making debugging and unit testing your model's code easier.
    :type model: optional object implementing ModelInterface, default: None
    :param use_live_code: Use the current `predict` function instead of the one persisted with the model in the `model_store`. This parameter exists mainly for making debugging and unit testing your model's code easier.
    :type use_live_code: optional bool, default: False

    :return: model's prediction output
    """
    logger.debug("Applying model for prediction...")

    dso, dsi = _get_data_sources_and_sinks(complete_conf,
                                           tags=["predict"],
                                           cache=cache)
    model_conf = complete_conf["model"]

    if model:
        model_wrapper = model
    else:
        model_wrapper, _ = _get_model(complete_conf, cache=cache)

    inner_model = model_wrapper.contents

    if use_live_code:
        # Create a fresh model object from current code and transplant existing contents
        m_cls = _get_model_class(complete_conf, cache=cache)
        curr_model_wrapper: ModelInterface = m_cls(contents=inner_model)
        model_wrapper = curr_model_wrapper

    output = model_wrapper.predict(model_conf, dso, dsi, inner_model, arg_dict
                                   or {})
    _check_ordered_columns(complete_conf, model_wrapper, "prediction code")

    output = resource.to_plain_python_obj(output)

    return output
Exemplo n.º 5
0
def test_to_plain_python_obj_mixed(test_input):
    """Test to convert mixed arrays to json-compatible object."""
    # It's enough that we don't get an exception here
    output = r.to_plain_python_obj(test_input)
    # We should not get a json conversion error
    json.dumps(output)
Exemplo n.º 6
0
def test_to_plain_python_obj_pandas(test_input, expected):
    """Test to convert pandas arrays to json-compatible object."""
    output = r.to_plain_python_obj(test_input)
    assert output == expected
    # We should not get a json conversion error
    json.dumps(output)