예제 #1
0
def invoke(request):
    """Where the magic happens..."""

    with monitor(labels=_labels, name="transform_request"):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="invoke"):
        # TODO: Handle any failure responses such as Fallback/Circuit-Breaker, etc

        timeout_seconds = 1200

        # TODO: Can we use internal dns name (predict-mnist)
        # TODO: Pass along the request-tracing headers
        url_model_a = 'http://predict-83f05e58transfer-v1pythoncpu1b79207e:8080/invoke'
        response_a = requests.post(url=url_model_a,
                                   data=transformed_request,
                                   timeout=timeout_seconds)

        url_model_b = 'http://predict-83f05e58transfer-v1pythoncpu40c1d1f5:8080/invoke'
        response_b = requests.post(url=url_model_b,
                                   data=transformed_request,
                                   timeout=timeout_seconds)

    # TODO: Aggregate the responses into a single response
    #       * Classification:  Return the majority class from all predicted classes
    #       * Regression:  Average the result
    # TODO: Include all models that participated in the response (including confidences, timings, etc)

    response = [response_a.json(), response_b.json()]

    with monitor(labels=_labels, name="transform_response"):
        transformed_response = _transform_responsei(response_a, response_b)

    return transformed_response
예제 #2
0
def invoke(request):
    """
    Transform bytes posted to the api into a Tensor.
    Classify the image
    Transform the model prediction output from a 1D array to a list of classes and probabilities

    :param bytes request:   byte array containing the content required by the predict method

    :return:                Response obj serialized to a JSON formatted str
                                containing a list of classes and a list of probabilities
    """
    try:

        with monitor(labels=_labels, name="transform_request"):
            transformed_request = _transform_request(request)

        with monitor(labels=_labels, name="invoke"):
            response = _model(transformed_request)

        with monitor(labels=_labels, name="transform_response"):
            transformed_response = _transform_response(response)

        return transformed_response

    except Exception:
        _logger.error('pipeline_invoke_python.invoke.Exception:',
                      exc_info=True)
예제 #3
0
def invoke(request):
    """Where the magic happens..."""
    transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="invoke"):
        data_iter = mx.io.NDArrayIter(transformed_request, None, 1)
        response = mod.predict(data_iter)
    return _transform_response(response)

    with monitor(labels=_labels, name="transform_response"):
        transformed_response = _transform_response(response)
예제 #4
0
def invoke(request: bytes) -> str:
    with monitor(labels=_labels, name='transform_request'):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name='invoke'):
        response = _summary(transformed_request)

    with monitor(labels=_labels, name='transform_response'):
        transformed_response = _transform_response(response)

    return transformed_response
예제 #5
0
def predict(request: bytes) -> bytes:
    '''Where the magic happens...'''

    with monitor(labels=_labels, name="transform_request"):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="predict"):
        predictions = _model.predict(transformed_request)

    with monitor(labels=_labels, name="transform_response"):
        return _transform_response(predictions)
def invoke(request):
    '''Where the magic happens...'''
    with monitor(labels=_labels, name="transform_request"):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="invoke"):
        response  = _model.predict(transformed_request)

    with monitor(labels=_labels, name="transform_response"):
        transformed_response = _transform_response(response)

    return transformed_response
def invoke(request):                                           #<== Required.  Called on every prediction
    '''Where the magic happens...'''

    with monitor(labels=_labels, name="transform_request"):    #<== Optional.  Expose fine-grained metrics
        transformed_request = _transform_request(request)      #<== Optional.  Transform input (json) into TensorFlow (tensor)

    with monitor(labels=_labels, name="invoke"):               #<== Optional.  Calls _model.predict()
        response = _model.predict(transformed_request)

    with monitor(labels=_labels, name="transform_response"):   #<== Optional.  Transform TensorFlow (tensor) into output (json)
        transformed_response = _transform_response(response)

    return transformed_response                                #<== Required.  Returns the predicted value(s)
예제 #8
0
def predict(request: bytes) -> bytes:
    '''Where the magic happens...'''
# TODO:  Check this out:  https://github.com/MtDersvan/tf_playground/blob/master/wide_and_deep_tutorial/wide_and_deep_basic_serving.md
# TODO:  Check this out, as well:  https://www.tensorflow.org/programmers_guide/saved_model
    with monitor(labels=_labels, name="transform_request"):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="predict"):
        predictions = _model.predict(transformed_request)

    with monitor(labels=_labels, name="transform_response"):
        transformed_response = _transform_response(predictions)

    return transformed_response
def invoke(request):
    """Where the magic happens..."""

    with monitor(labels=_labels, name="transform_request"):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="invoke"):
        cmd = "pipeline_invoke.sh %s" % transformed_request
        response_bytes = _subprocess.check_output(cmd, shell=True)
        response = response_bytes.decode('utf-8')

    with monitor(labels=_labels, name="transform_response"):
        transformed_response = _transform_response(response)

    return transformed_response
예제 #10
0
def invoke(request):
    with monitor(labels=_labels, name="invoke"):
        request_str = request.decode('utf-8')
        avatar_url = json.loads(request_str)['sender']['avatar_url']
        github_login = json.loads(request_str)['sender']['login']
        classification_response = inception.invoke(avatar_url)

        classification_response_json= json.loads(classification_response)

        classification_response_formatted = '\\n '.join("%s%%\t%s" % (str((100 * item['score']))[0:4], item['name']) for item in classification_response_json)

        cmd = 'curl -X POST --data-urlencode "payload={\\"unfurl_links\\": true, \\"channel\\": \\"#community\\", \\"username\\": \\"pipelineai_bot\\", \\"text\\": \\"%s has starred the PipelineAI GitHub Repo!\n%s\nTheir avatar picture is classified as follows:\n%s\nTo classify your avatar picture, star the PipelineAI GitHub Repo @ https://github.com/PipelineAI/pipeline\\"}" https://hooks.slack.com/services/T/B/o' % (github_login, avatar_url, (classification_response_formatted or ''))
        response = subprocess.check_output(cmd, shell=True).decode('utf-8')

# https://github.com/alexellis/faas-twitter-fanclub/blob/master/tweet-stargazer/handler.py
#
#        auth = tweepy.OAuthHandler(os.environ["consumer_key"], os.environ["consumer_secret"])
#        auth.set_access_token(os.environ["access_token"], os.environ["access_token_secret"])
#        github_login = json.loads(request_str)['sender']['login']
#        api = tweepy.API(auth)
#        api.update_with_media('%s' % filename, '%s' % github_login)

        filename = avatar_url.split('/')
        if filename:
            idx = len(filename) - 1
            filename = filename[idx]
            if os.path.exists('inception/%s' % filename):
                os.remove('inception/%s' % filename)

        return {'response': response}
예제 #11
0
def invoke(request):
    '''Where the magic happens...'''

    with monitor(labels=_labels, name="transform_request"):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="invoke"):
        input_details = _model.get_input_details()
        _model.set_tensor(input_details[0]['index'], transformed_request)
        _model.invoke()
        response = _model.get_output_details()

    with monitor(labels=_labels, name="transform_response"):
        transformed_response = _transform_response(response)

    return transformed_response
예제 #12
0
def predict(request: bytes) -> bytes:
    with monitor(labels=_labels, name="predict"):

        request_str = request.decode('utf-8')
        print(request_str)

        avatar_url = json.loads(request_str)['sender']['avatar_url']
        print(avatar_url)

        #        stream_body = '{"records": [{"value":%s}]}' % request_str
        #        response = requests.post(url=_stream_endpoint_url,
        #                                 headers=_stream_accept_and_content_type_headers,
        #                                 data=stream_body.encode('utf-8'),
        #                                 timeout=30)

        #import urllib
        #avatar_url = urllib.parse.quote(avatar_url)

        cmd = 'curl -X POST --data "token=xoxa-228608739446-303548610531-303548610803-376b8dcda37e59fc571c660eb0fb9c1d&channel=demo-community&text=%s" http://slack.com:443/api/chat.postMessage' % avatar_url

        #        cmd = 'curl -X POST --data-urlencode "payload={\\"unfurl_links\\": true, \\"channel\\": \\"#demo-community\\", \\"username\\": \\"pipelineai_bot\\", \\"text\\": \\"%s\\"}" http://hooks.slack.com:443/services/T6QHWMRD4/B9KNAA0BS/dsglc5SFARz3hISU4pDlAms3' % avatar_url
        print(cmd)

        import subprocess
        subprocess.call(cmd, shell=True)

        #        if slack_response.status_code != 200:
        #            raise ValueError(
        #                'Request to slack returned an error %s, the response is:\n%s'
        #                % (slack_response.status_code, slack_response.text)
        #        )

        return {'response': 'OK'}
def predict(request: bytes) -> bytes:
    '''Where the magic happens...'''
    transformed_request = _json_to_pd_df(request)
    data = transformed_request.drop('Time', axis=1)
    data['Amount'] = StandardScaler().fit_transform(
        data['Amount'].values.reshape(-1, 1))
    y_test = data['Class']
    X_test = data.drop(['Class'], axis=1)

    X_values = X_test.values
    #autoencoder = load_model(_model)

    with monitor(labels=_labels, name="predict"):
        predictions = _model.predict(X_test)
    mse = np.mean(np.power(X_test - predictions, 2), axis=1)
    error_df = pd.DataFrame({
        'reconstruction_error': mse,
        'true_class': y_test
    })
    # to build the confusion matrix
    threshold = 2.9

    y_pred = [
        1 if e > threshold else 0 for e in error_df.reconstruction_error.values
    ]

    conf_matrix = confusion_matrix(error_df.true_class, y_pred)

    return _numpy_to_json(conf_matrix)
예제 #14
0
def invoke(request):
    '''Where the magic happens...'''

    with monitor(labels=_labels, name="transform_request"):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="invoke"):
# TODO:  Using python requests, implement a call to 1 other model (for now)
#          using a model that has been deployed to dev or prod
#        For now, just use the full external URL displayed in the http snippet
#          in the dev or prod UI.
#
#        response = ...

    url_model_a = 'https://dev.cloud.pipeline.ai/01234567mnist/invoke'
    response = _requests.post(
        url=url_model_a,
        data=request,
        form_data,
        timeout=timeout_seconds
    )


    with monitor(labels=_labels, name="transform_response"):
        transformed_response = _transform_response(response)

    return transformed_response

# Note:  Don't change this...
def _transform_request(request):
    request_str = request.decode('utf-8')
    request_json = json.loads(request_str)
    request_np = ((255 - np.array(request_json['image'], dtype=np.uint8)) / 255.0).reshape(1, 28, 28)
    return {"image": request_np}

# Note:  Don't change this...
def _transform_response(response):
    return json.dumps({"classes": response['classes'].tolist(),
                       "probabilities": response['probabilities'].tolist(),
                      })

# Note:  This is a mini test
if __name__ == '__main__':
    with open('../input/predict/test_request.json', 'rb') as fb:
        request_bytes = fb.read()
        response_bytes = invoke(request_bytes)
        print(response_bytes)
예제 #15
0
def invoke(request):
    """Where the magic happens..."""
    transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="invoke"):
        response = _predict(transformed_request)

    return _transform_response(response)
예제 #16
0
def predict(request: bytes) -> bytes:
    '''Where the magic happens...'''
    transformed_request = _json_to_numpy(request)

    with monitor(labels=_labels, name="predict"):
        predictions = _model.predict(transformed_request)

    return _numpy_to_json(predictions)
def invoke(request):
    '''Where the magic happens...'''
    transformed_request = _json_to_numpy(request)

    with monitor(labels=_labels, name="invoke"):
        response = _model.predict(transformed_request)

    return _numpy_to_json(response)
예제 #18
0
def invoke(request):
    '''Where the magic happens...'''

    _logger.debug('invoke: raw request: %s' % request)
    with monitor(labels=_labels, name="transform_request"):
        transformed_request = _transform_request(request)
    _logger.debug('invoke: transformed request: %s' % transformed_request)

    with monitor(labels=_labels, name="invoke"):
        response = _model(transformed_request)
    _logger.debug('invoke: raw response: %s' % response)

    with monitor(labels=_labels, name="transform_response"):
        transformed_response = _transform_response(response)
    _logger.debug('invoke: transformed response: %s' % transformed_response)

    return transformed_response
예제 #19
0
def invoke(request):
    with monitor(labels=_labels, name="invoke"):

        request_str = request.decode('utf-8')
        print(request_str)

        avatar_url = json.loads(request_str)['sender']['avatar_url']
        print(avatar_url)
        
#        stream_body = '{"records": [{"value":%s}]}' % request_str
#        response = requests.post(url=_stream_endpoint_url,
#                                 headers=_stream_accept_and_content_type_headers,
#                                 data=stream_body.encode('utf-8'),
#                                 timeout=30)

        #import urllib
        #avatar_url = urllib.parse.quote(avatar_url)

        # Note:  https:// doesn't work through istio - it appears we need to use http://...:443/, however this doesn't work well with the slack api (Shows a CloudFront issue when we try to do this...)

#        headers = {'Content-Type': 'text/plain'}
#        response = requests.post(headers=headers,
#                                 url='https://slack.com/api/chat.postMessage?token=[INSERT-TOKEN-HERE]&channel=demo-community&text=%s' % avatar_url)

#        response = response.text

        cmd = 'curl -X POST --data-urlencode "payload={\\"unfurl_links\\": true, \\"channel\\": \\"#demo-community\\", \\"username\\": \\"pipelineai_bot\\", \\"text\\": \\"%s\\"}" https://hooks.slack.com/services/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' % avatar_url 
        print(cmd)
        import subprocess
        response = subprocess.check_output(cmd, shell=True).decode('utf-8')

#        payload = {
#                   "unfurl_links": "true", 
#                   "channel": "#demo-community", 
#                   "username": "******",
#                   "text": "%s" % avatar_url
#                  }

#        response = requests.post(
#                                 url='https://hooks.slack.com/services/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',
#                                 data=payload
#                                )
#        response = response.text

#        import subprocess
#        response = subprocess.check_output(cmd, shell=True).decode('utf-8')

#        if slack_response.status_code != 200:
#            raise ValueError(
#                'Request to slack returned an error %s, the response is:\n%s'
#                % (slack_response.status_code, slack_response.text)
#        )

        return {'response': response}
예제 #20
0
def invoke(request):
    """Where the magic happens..."""

    with monitor(labels=_labels, name="transform_request"):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name="invoke"):
        # TODO: Handle any failure responses such as Fallback/Circuit-Breaker, etc

        # TODO: Can we use internal dns name (predict-mnist)
        # TODO: Pass along the request-tracing headers
        url_model_a = 'https://community.cloud.pipeline.ai/predict/83f05e58/mnista/invoke'
        response_a = _requests.post(url=url_model_a,
                                    data=request,
                                    form_data,
                                    timeout=timeout_seconds)

        url_model_b = 'https://community.cloud.pipeline.ai/predict/83f05e58/mnistb/invoke'
        response_b = _requests.post(url=url_model_b,
                                    data=request,
                                    form_data,
                                    timeout=timeout_seconds)

        url_model_c = 'https://community.cloud.pipeline.ai/predict/83f05e58/mnistc/invoke'
        response_c = _requests.post(url=url_model_c,
                                    data=request,
                                    form_data,
                                    timeout=timeout_seconds)

        # TODO: Aggregate the responses into a single response
        #       * Classification:  Return the majority class from all predicted classes
        #       * Regression:  Average the result
        # TODO: Include all models that participated in the response (including confidences, timings, etc)

        response = response_c

    with monitor(labels=_labels, name="transform_response"):
        transformed_response = _transform_response(response)

    return transformed_response
def invoke(request: bytes) -> str:
    """
    Transform bytes posted to the api into a python dictionary containing the
    existing resource routes by tag and weight.
    Predict least expensive routes and adjust higher weights to lower cost routes.
    Transform the model prediction output from python dictionary to a JSON formatted str
    containing the new resource routes by tag and weight

    :param bytes request:   bytes containing the payload to supply to the predict method

    :return:                Response obj serialized to a JSON formatted str
                                containing the new resource routes by tag and weight
    """
    with monitor(labels=_labels, name='transform_request'):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name='invoke'):
        response = _echo(transformed_request)

    with monitor(labels=_labels, name='transform_response'):
        transformed_response = _transform_response(response)

    return transformed_response
예제 #22
0
def invoke(request: bytes) -> str:
    """
    Transform bytes posted to the api into an XGBoost DMatrix which is an
    internal data structure that is used by XGBoost which is optimized for
    both memory efficiency and training speed.
    Classify the image
    Transform the model prediction output from a 1D array to a list of classes and probabilities

    :param bytes request:   bytes containing the payload to supply to the predict method

    :return:                Response obj serialized to a JSON formatted str
                            containing a list of classes and a list of probabilities
    """
    with monitor(labels=_labels, name='transform_request'):
        transformed_request = _transform_request(request)

    with monitor(labels=_labels, name='invoke'):
        response = _model.predict(transformed_request)

    with monitor(labels=_labels, name='transform_response'):
        transformed_response = _transform_response(response)

    return transformed_response
예제 #23
0
def predict(request: bytes) -> bytes:
    with monitor(labels=_labels, name="predict"):

        request_str = request.decode('utf-8')

        body = '{"records": [{"value":%s}]}' % request_str

        response = requests.post(url=_endpoint_url,
                                 headers=_accept_and_content_type_headers,
                                 data=body.encode('utf-8'),
                                 timeout=30)

        _sc.api_call("chat.postMessage",
                     channel="G9L5CFPHD",
                     text=response.text)

        return {'response': response.text}