Пример #1
0
import numpy as np
import pandas as pd

from regression_model.processing.data_management import load_pipeline
from regression_model.config import config
from regression_model.processing.validation import validate_inputs
from regression_model import __version__ as _version

import logging
import typing as t

_logger = logging.getLogger(__name__)

pipeline_file_name = f"{config.PIPELINE_SAVE_FILE}{_version}.pkl"
_price_pipe = load_pipeline(file_name=pipeline_file_name)


def make_prediction(*, input_data: t.Union[pd.DataFrame, dict]) -> dict:
    """Make a prediction using a saved model pipeline.

    Args:
        input_data: Array of model prediction inputs.

    Returns:
        Predictions for each input row, as well as the model version.
    """

    data = pd.DataFrame(input_data)
    validated_data = validate_inputs(input_data=data)

    prediction = _price_pipe.predict(validated_data[config.FEATURES])
Пример #2
0
import logging
import typing as t


def convert_input(jsonData) -> dict:
    #res = pd.read_json(jsonData, orient='records')
    res = pd.DataFrame(jsonData)
    print(res.shape)
    return res


_logger = logging.getLogger(__name__)

pipeline_file_name = f'{config.PIPELINE_SAVE_FILE}{_version}.pkl'
_basket_pipe = load_pipeline(file_name=pipeline_file_name)

BASKET_FEATURES = [
    100010, 100015, 100016, 100017, 100018, 300057, 300058, 300060, 300061,
    300062, 300064, 300065, 300570, 300640, 500811, 500812, 500813, 500814,
    500815, 500816, 500818, 500819, 500821, 500822, 500823, 500825, 500827
]


def make_predict2(input_data: t.Union[pd.DataFrame, dict], ) -> dict:

    x_raw = pd.DataFrame(input_data)

    xx = x_raw.pivot_table('Quantity', ['TransactionId', 'StoreId'],
                           'MerchandiseId')
    xx_index = xx.index
Пример #3
0
import numpy as np
import pandas as pd

from regression_model.processing.data_management import load_pipeline
from regression_model.config import config
from regression_model.processing.validation import validate_inputs
from regression_model import __version__ as _version
import logging

_logger = logging.getLogger(__name__)

pipeline_file_name = f"{config.PIPELINE_SAVE_FILE}{_version}.pkl"
_energy_pipe = load_pipeline(filename = pipeline_file_name)

def make_prediction(*, input_data) -> dict:
    """Make predictions using the saved model pipeline."""

    data = pd.DataFrame(input_data)
    validated_data = validate_inputs(input_data=data)
    prediction = _energy_pipe.predict(validated_data[config.FEATURES])

    results = {"predictions": prediction, "version": _version}

    _logger.info(
     f"Making predictions with model version: {_version} "
     f"Inputs: {validated_data} "
     f"Predictions: {results}"
    )

    return results
Пример #4
0
import pandas as pd

from regression_model.processing.data_management import load_pipeline
from regression_model.config import config
from regression_model.processing.validation import validate_inputs
from regression_model import __version__ as _version

import logging

_logger = logging.getLogger(__name__)

pipeline_file_name = f"{config.PIPELINE_SAVE_FILE}{_version}.pkl"
_titanic_pipe = load_pipeline(file_name=pipeline_file_name)


def make_prediction(*, input_data) -> dict:
    """Make a prediction using the saved model pipeline."""

    data = pd.DataFrame(input_data)
    validated_data = validate_inputs(input_data=data)
    prediction = _titanic_pipe.predict(validated_data[config.FEATURES])

    results = {"predictions": prediction, "version": _version}

    _logger.info(f"Making predictions with model version: {_version} "
                 f"Inputs: {validated_data} "
                 f"Predictions: {results}")

    return results
Пример #5
0
"""

import numpy as np
import pandas as pd

from regression_model.processing.data_management import load_pipeline
from regression_model.config import config
from regression_model import __version__ as _version

import logging
import typing as t

_logger = logging.getLogger(__name__)

pipeline_file_name = f"{config.PIPELINE_SAVE_FILE}{_version}.pkl"
pipeline = load_pipeline(file_name=pipeline_file_name)


def make_prediction(
    *,
    input_data: t.Union[pd.DataFrame, dict],
    form_input=False,
) -> dict:
    """Make a prediction using a saved model pipeline.

    Args:
        input_data: Array of model prediction inputs.

    Returns:
        Predictions for each input row, as well as the model version.
    """
Пример #6
0
import numpy as np
import pandas as pd

from regression_model.processing.data_management import load_pipeline
from regression_model.config import config
from regression_model.processing.validation import validate_inputs
from sklearn.pipeline import Pipeline

pipeline_filename = "regression_model.pkl"
_price_pipeline: Pipeline = load_pipeline(pipeline_filename)


def make_prediction(data: dict) -> dict:
    data = pd.read_json(data)
    validated_data = validate_inputs(data[config.FEATURES])
    prediction = _price_pipeline.predict(validated_data)
    output = np.exp(prediction)
    response = {"predictions": output}
    return response