model_path = Model.get_model_path(
        os.getenv("AZUREML_MODEL_DIR").split('/')[-2])

    model = joblib.load(model_path)


input_sample = numpy.array(
    [numpy.arange(1.0, 129.0).tolist(),
     numpy.arange(128.0, 0, -1).tolist()])
output_sample = numpy.array([2, 2])


# Inference_schema generates a schema for your web service
# It then creates an OpenAPI (Swagger) specification for the web service
# at http://<scoring_base_url>/swagger.json
@input_schema('data', NumpyParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data, request_headers):
    result = model.predict(data)

    # Demonstrate how we can log custom data into the Application Insights
    # traces collection.
    # The 'X-Ms-Request-id' value is generated internally and can be used to
    # correlate a log entry with the Application Insights requests collection.
    # The HTTP 'traceparent' header may be set by the caller to implement
    # distributed tracing (per the W3C Trace Context proposed specification)
    # and can be used to correlate the request to external systems.
    print(('{{"RequestId":"{0}", '
           '"TraceParent":"{1}", '
           '"NumberOfPredictions":{2}}}').format(
               request_headers.get("X-Ms-Request-Id", ""),
예제 #2
0
    "height": 48.8,
    "curb-weight": 2548,
    "engine-type": "dohc",
    "num-of-cylinders": "four",
    "engine-size": 130,
    "fuel-system": "mpfi",
    "bore": 3.47,
    "stroke": 2.68,
    "compression-ratio": 9,
    "horsepower": 111,
    "peak-rpm": 5000,
    "city-mpg": 21,
    "highway-mpg": 27
}])

output_sample = np.array(
    [0]
)  # This is a integer type sample. Use the data type that reflects the expected result


@input_schema('data', PandasParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
    try:
        result = model.predict(data)
        # you can return any datatype as long as it is JSON-serializable
        return result.tolist()
    except Exception as e:
        error = str(e)
        return error
        "age": pd.Series([0.0], dtype="float64"),
        "anaemia": pd.Series([0], dtype="int64"),
        "creatinine_phosphokinase": pd.Series([0], dtype="int64"),
        "diabetes": pd.Series([0], dtype="int64"),
        "ejection_fraction": pd.Series([0], dtype="int64"),
        "high_blood_pressure": pd.Series([0], dtype="int64"),
        "platelets": pd.Series([0.0], dtype="float64"),
        "serum_creatinine": pd.Series([0.0], dtype="float64"),
        "serum_sodium": pd.Series([0], dtype="int64"),
        "sex": pd.Series([0], dtype="int64"),
        "smoking": pd.Series([0], dtype="int64"),
        "time": pd.Series([0], dtype="int64")
    }))
input_sample = StandardPythonParameterType({'data': data_sample})

result_sample = NumpyParameterType(np.array([0]))
output_sample = StandardPythonParameterType({'Results': result_sample})

try:
    log_server.enable_telemetry(INSTRUMENTATION_KEY)
    log_server.set_verbosity('INFO')
    logger = logging.getLogger('azureml.automl.core.scoring_script_v2')
except:
    pass


def init():
    global model
    # This name is model.id of model that we want to deploy deserialize the model file back
    # into a sklearn model
    model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model.onnx')
예제 #4
0
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType


def init():
    global model

    #Print statement for appinsights custom traces:
    print("model initialized" + time.strftime("%H:%M:%S"))

    model_path = Model.get_model_path(model_name='diabetes_model')
    model = load(model_path)


input_sample = NumpyParameterType(
    np.array([[1, 2, 3, 4, 54, 6, 7, 8, 88, 10],
              [10, 9, 8, 37, 36, 45, 4, 33, 2, 1]]))
output_sample = PandasParameterType(
    pd.DataFrame({"result": [27791.59951581891, 10958.615160340678]}))


@input_schema('data', input_sample)
@output_schema(output_sample)
def run(data):
    try:
        result = model.predict(data)
        return json.dumps({"result": result.tolist()})
    except Exception as e:
        error = str(e)
        print(error + time.strftime("%H:%M:%S"))
        return json.dumps({"error": error})
예제 #5
0
    series_model_root = Model.get_model_path('EngineFailurePrediction')
    print('Series Model root:', series_model_root)
    #series_model_file = os.path.join(series_model_root, 'model')
    #print('Series Model file:', series_model_file)
    series_model = tf.keras.models.load_model(series_model_root)
    series_model.compile(loss=('binary_crossentropy'),
                         optimizer='adam',
                         metrics=['acc'])
    print(series_model.summary())


input_sample = np.random.rand(2, 1, 30, 24)
output_sample = np.array([[0], [1]])


@input_schema('data', NumpyParameterType(input_sample, enforce_shape=False))
@output_schema(NumpyParameterType(output_sample))
def run(data):
    print(type(data))

    data = np.array(data)

    log_data({"data shape": str(data.shape)})

    # If one sample is given, we'll reshape to have multiple dimensions
    if (len(data.shape) == 2):
        log_data({"message": "Reshaping to 3D array"})
        data = data.reshape(1, data.shape[0], data.shape[1])
    if (len(data.shape) == 4):
        log_data({"message": "Reshaping 4D array to 3D array"})
        data = data.reshape(data.shape[0], data.shape[2], data.shape[3])
예제 #6
0
# and store it in a global variable so your run() method can access it later.
def init():
    global model

    # The AZUREML_MODEL_DIR environment variable indicates
    # a directory containing the model file you registered.
    model_filename = 'diabetes_reg_remote_model.pkl'
    model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], model_filename)

    model = joblib.load(model_path)


# The run() method is called each time a request is made to the scoring API.
#
# Shown here are the optional input_schema and output_schema decorators
# from the inference-schema pip package. Using these decorators on your
# run() method parses and validates the incoming payload against
# the example input you provide here. This will also generate a Swagger
# API document for your web service.
@input_schema(
    'data',
    NumpyParameterType(
        np.array([[59, 2, 32.1, 101.0, 157, 93.2, 38.0, 4.0, 4.8598, 87]])))
@output_schema(NumpyParameterType(np.array([151.000])))
def run(data):
    # Use the model object loaded by init().
    result = model.predict(data)

    # You can return any JSON-serializable object.
    return result.tolist()
예제 #7
0
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
from inference_schema.parameter_types.spark_parameter_type import SparkParameterType
from inference_schema.parameter_types.standard_py_parameter_type import StandardPythonParameterType
from inference_schema.schema_decorators import input_schema, output_schema
from pyspark.sql.session import SparkSession

numpy_input_data = [('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))]
numpy_sample_input = np.array(numpy_input_data,
                              dtype=np.dtype([('name', np.unicode_, 16),
                                              ('grades', np.float64, (2, ))]))
numpy_output_data = [(8.0, 7.0), (6.0, 7.0)]
numpy_sample_output = np.array(numpy_output_data, dtype='float64, float64')


@input_schema('param', NumpyParameterType(numpy_sample_input))
@output_schema(NumpyParameterType(numpy_sample_output))
def numpy_func(param):
    """

    :param param:
    :type param: np.ndarray
    :return:
    :rtype: np.ndarray
    """
    assert type(param) is np.ndarray
    return param['grades']


pandas_input_data = {'name': ['Sarah', 'John'], 'age': [25, 26]}
pandas_sample_input = pd.DataFrame(data=pandas_input_data)
예제 #8
0
# The init() method is called once, when the web service starts up.
# Typically you would deserialize the model file, as shown here using joblib,
# and store it in a global variable so your run() method can access it later.
def init():
    global model
    global inputs_dc, prediction_dc
    # The AZUREML_MODEL_DIR environment variable indicates a directory containing the model file you registered.
    model_file_name = "model.pkl"
    model_path = os.path.join(os.environ.get("AZUREML_MODEL_DIR"), model_file_name)
    model = joblib.load(model_path)
    inputs_dc = ModelDataCollector("sample-model", designation="inputs", feature_names=["feat1", "feat2", "feat3", "feat4"])
    prediction_dc = ModelDataCollector("sample-model", designation="predictions", feature_names=["prediction"])


# The run() method is called each time a request is made to the scoring API.
# Shown here are the optional input_schema and output_schema decorators
# from the inference-schema pip package. Using these decorators on your
# run() method parses and validates the incoming payload against
# the example input you provide here. This will also generate a Swagger
# API document for your web service.
@input_schema('data', NumpyParameterType(np.array([[0.1, 1.2, 2.3, 3.4]])))
@output_schema(StandardPythonParameterType({'predict': [['Iris-virginica']]}))
def run(data):
    # Use the model object loaded by init().
    result = model.predict(data)
    inputs_dc.collect(data) #this call is saving our input data into Azure Blob
    prediction_dc.collect(result) #this call is saving our input data into Azure Blob

    # You can return any JSON-serializable object.
    return { "predict": result.tolist() }
예제 #9
0
# The init() method is called once, when the web service starts up.
#
# Typically you would deserialize the model file, as shown here using joblib,
# and store it in a global variable so your run() method can access it later.
def init():
    global model

    # The AZUREML_MODEL_DIR environment variable indicates
    # a directory containing the model file you registered.
    model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], 'model.pkl')

    model = joblib.load(model_path)


# The run() method is called each time a request is made to the scoring API.
#
# Shown here are the optional input_schema and output_schema decorators
# from the inference-schema pip package. Using these decorators on your
# run() method parses and validates the incoming payload against
# the example input you provide here. This will also generate a Swagger
# API document for your web service.
@input_schema('data', NumpyParameterType(np.array([[0, 0, 0]])))
@output_schema(NumpyParameterType(np.array([0])))
def run(data):
    # Use the model object loaded by init().
    result = model.predict(data)

    # You can return any JSON-serializable object.
    return result.tolist()
예제 #10
0
                                   feature_names=[
                                       "Temperature_C", "Humidity",
                                       "Wind_speed_kmph",
                                       "Wind_bearing_degrees", "Visibility_km",
                                       "Pressure_millibars",
                                       "Current_weather_condition"
                                   ])
    prediction_dc = ModelDataCollector(
        "Support vector classifier model",
        designation="predictions",
        feature_names=["Future_weather_condition"])


@input_schema('data',
              NumpyParameterType(
                  np.array([[34.927778, 0.24, 7.3899, 83, 16.1000, 1016.51,
                             1]])))
@output_schema(NumpyParameterType(np.array([0])))
def run(data):

    try:
        inputs_dc.collect(data)
    except Exception as e:
        tc.track_event('ValueNotFoundException', {'error_message': str(e)},
                       {'ValueError': 201})
        tc.flush()

    try:
        # scale data
        data = scaler.transform(data)
    except Exception as e:
예제 #11
0
# and store it in a global variable so your run() method can access it later.
def init():
    global model

    # The AZUREML_MODEL_DIR environment variable indicates
    # a directory containing the model file you registered.
    model_filename = 'sklearn_regression_model.pkl'
    model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], model_filename)

    model = joblib.load(model_path)


# The run() method is called each time a request is made to the scoring API.
#
# Shown here are the optional input_schema and output_schema decorators
# from the inference-schema pip package. Using these decorators on your
# run() method parses and validates the incoming payload against
# the example input you provide here. This will also generate a Swagger
# API document for your web service.
@input_schema('data',
              NumpyParameterType(
                  np.array([[0.1, 1.2, 2.3, 3.4, 4.5, 5.6, 6.7, 7.8, 8.9,
                             9.0]])))
@output_schema(NumpyParameterType(np.array([4429.929236457418])))
def run(data):
    # Use the model object loaded by init().
    result = model.predict(data)

    # You can return any JSON-serializable object.
    return result.tolist()