def output_fn(prediction, accept):
    """Format prediction output
    
    The default accept/content-type between containers for serial inference is JSON.
    We also want to set the ContentType or mimetype as the same value as accept so the next
    container can read the response payload correctly.
    """
    if accept == "application/json":
        instances = []
        for row in prediction.tolist():
            instances.append({"features": row})

        json_output = {"instances": instances}

        return worker.Response(json.dumps(json_output), mimetype=accept)
    elif accept == 'text/csv':
        return worker.Response(encoders.encode(prediction, accept), mimetype=accept)
    else:
        raise RuntimeException("{} accept type is not supported by this script.".format(accept))
Example #2
0
def output_fn(prediction, accept):
    """Format prediction output

    The default accept/content-type between containers for serial inference is JSON.
    We also want to set the ContentType or mimetype as the same value as accept so the next
    container can read the response payload correctly.
    """
    if accept == "application/json":
        instances = []
        for row in prediction.tolist():
            instances.append({"features": row})

        json_output = {"instances": instances}

        return worker.Response(json.dumps(json_output),
                               accept,
                               mimetype=accept)
    elif accept == 'text/csv':
        return worker.Response(encoders.encode(prediction, accept),
                               accept,
                               mimetype=accept)
    else:
        raise RuntimeException(
            "{} accept type is not supported by this script.".format(accept))
def transformation():

    print("data: ", request.data[:100])
    print("cookies: ", request.cookies)
    print("headers: ", dict(request.headers))
    print("args: ", request.args)

    load_model()

    content_type = request.headers["Content-Type"]
    print("Content type", content_type)
    accept = request.headers["Accept"]
    print("Accept", accept)

    input_data = request.data.decode()

    first_entry = input_data.split("\n", 1)[0].split(",", 1)[0]
    print("First entry is: ", first_entry)
    df = None

    if first_entry == "label" or first_entry.startswith("category_"):
        recs = [(row[0], set(row[1:]))
                for row in csv.reader(StringIO(input_data))]
        if first_entry == "label":
            df = pd.DataFrame.from_records(
                recs[1:], columns=[label_column, feature_column])
        else:
            df = pd.DataFrame.from_records(
                recs, columns=[label_column, feature_column])
        # This is a labelled example, includes the ring label
        print("Length indicates that label is included")
    else:
        print("Length indicates that label is not included.")
        # This is an unlabelled example.
        recs = [(set(row), ) for row in csv.reader(StringIO(input_data))]
        df = pd.DataFrame.from_records(recs, columns=[feature_column])

    print("merged df", df.head())
    features = preprocessor.transform(df["words"])
    prediction = None

    if label_column in df:
        print("label_column in input_data")
        labels = le.transform(df[label_column])
        # Return the label (as the first column) and the set of features.
        prediction = np.insert(features.todense(), 0, labels, axis=1)
    else:
        print("label_column not in input_data")
        # Return only the set of features
        prediction = features.todense()

    if accept == "application/json":
        instances = []
        for row in prediction.tolist():
            instances.append({"features": row})

        json_output = {"instances": instances}

        return Response(json.dumps(json_output), mimetype=accept)
    # TODO: use custom flag to indicate that this is in a pipeline rather than relying on the '*/*'
    elif accept == "text/csv" or accept == "*/*":
        return Response(encoders.encode(prediction, "text/csv"),
                        mimetype="text/csv")
    else:
        raise RuntimeError(
            "{} accept type is not supported by this script.".format(accept))
Example #4
0
def output_fn(prediction, accept):
    """
    Convert the torch tensor to numpy and return it
    """
    return worker.Response(encoders.encode(prediction, accept), accept)
def post(client, payload, content_type, accept):
    return client.post(path='/invocations', headers={'accept': accept}, data=encoders.encode(payload, content_type),
                       content_type=content_type)
def output_fn(prediction, accept_type):
    """Encodes prediction to accept type.

    The SageMaker Scikit-learn model server invokes this method with the result of prediction and
    serializes this according to the response MIME type. It expects the input to be numpy array and encodes
    to requested response MIME type.

    Parameters
    ----------
    prediction : array-like
        the object returned from predict_fn

    accept_type : str
        the expected MIME type of the response

    Returns
    -------
    : Response obj
        serialized predictions in accept type

    """
    if isinstance(prediction, worker.Response):
        return prediction

    if _is_inverse_label_transform():
        try:
            output_keys = _get_selected_output_keys()
            return worker.Response(response=encoder_factory[accept_type](
                prediction, output_keys),
                                   status=http_client.OK,
                                   mimetype=accept_type)
        except KeyError:
            # Selectable inference is not turned on
            if accept_type == 'text/csv':
                return worker.Response(response=encoders.encode(
                    prediction, accept_type),
                                       status=http_client.OK,
                                       mimetype=accept_type)
            return worker.Response(
                response=f"Accept type '{accept_type}' is not supported "
                f"during inverse label transformation.",
                status=http_client.NOT_ACCEPTABLE)

    if isinstance(prediction, tuple):
        X, y = prediction
    else:
        X, y = _split_features_target(prediction)

    if accept_type == 'application/x-recordio-protobuf':
        return worker.Response(response=encoders.array_to_recordio_protobuf(
            _sparsify_if_needed(X).astype('float32'),
            y.astype('float32') if y is not None else y),
                               status=http_client.OK,
                               mimetype=accept_type)

    if accept_type == 'text/csv':
        if y is not None:
            X = np.column_stack(
                (np.ravel(y), X.todense() if sparse.issparse(X) else X))

        return worker.Response(response=encoders.encode(X, accept_type),
                               status=http_client.OK,
                               mimetype=accept_type)
    return worker.Response(
        response=f"Accept type '{accept_type}' is not supported.",
        status=http_client.NOT_ACCEPTABLE)
Example #7
0
def default_output_fn(prediction, accept):
    return worker.Response(response=encoders.encode(prediction, accept),
                           mimetype=accept)