Beispiel #1
0
    def test_predict(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = prediction_service_pb2.PredictResponse(
            **expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[expected_response])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = automl_v1.PredictionServiceClient()

        # Setup Request
        name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
        payload = {}

        response = client.predict(name, payload)
        assert expected_response == response

        assert len(channel.requests) == 1
        expected_request = prediction_service_pb2.PredictRequest(
            name=name, payload=payload)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
Beispiel #2
0
    def get_prediction(self, sent):
        '''
        Obtains the prediction from the input sentence and returns the
        normalized sentence

        Args: sent (string) - input sentence

        Return: request (PredictObject) - predictiton output
        ''' 
        
        params = {}
        
        # Setup API 
        options = ClientOptions(api_endpoint='automl.googleapis.com')
        
        # Create prediction object
        predictor = automl_v1.PredictionServiceClient(client_options=options)

        # Format input sentence
        payload = self.inline_text_payload(sent)
        
        # Make prediction API call
        request = predictor.predict(self.model_name, payload, params)

        # Return the output of the API call
        return request
def classify_doc(bucket, filename):
    options = ClientOptions(api_endpoint='automl.googleapis.com')
    prediction_client = automl_v1.PredictionServiceClient(
        client_options=options)

    _, ext = os.path.splitext(filename)
    if ext in [".pdf", "txt", "html"]:
        payload = _gcs_payload(bucket, filename)
    elif ext in ['.tif', '.tiff', '.png', '.jpeg', '.jpg']:
        payload = _img_payload(bucket, filename)
    else:
        print(
            f"Could not sort document gs://{bucket}/{filename}, unsupported file type {ext}")
        return None
    if not payload:
        print(
            f"Missing document gs://{bucket}/{filename} payload, cannot sort")
        return None
    request = prediction_client.predict(
        os.environ["SORT_MODEL_NAME"], payload, {})
    label = max(request.payload, key=lambda x: x.classification.score)
    threshold = float(os.environ.get('SORT_MODEL_THRESHOLD')) or 0.7
    displayName = label.display_name if label.classification.score > threshold else None
    print(f"Labeled document gs://{bucket}/{filename} as {displayName}")
    return displayName
def get_prediction(content, project_id, model_id):
    prediction_client = automl_v1.PredictionServiceClient()

    name = 'projects/{}/locations/us-central1/models/{}'.format(project_id, model_id)
    payload = {'image': {'image_bytes': content }}
    request = prediction_client.predict(name=name, payload=payload)
    return request  # waits till request is returned
Beispiel #5
0
    def test_batch_predict(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = prediction_service_pb2.BatchPredictResult(
            **expected_response)
        operation = operations_pb2.Operation(
            name="operations/test_batch_predict", done=True)
        operation.response.Pack(expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = automl_v1.PredictionServiceClient()

        # Setup Request
        name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
        input_config = {}
        output_config = {}

        response = client.batch_predict(name, input_config, output_config)
        result = response.result()
        assert expected_response == result

        assert len(channel.requests) == 1
        expected_request = prediction_service_pb2.BatchPredictRequest(
            name=name, input_config=input_config, output_config=output_config)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
Beispiel #6
0
def stock_tweet_classifier(tweet_string):
    """Passes tweet into trained AutoML model, outputs classification on whether it is stock-related"""

    options = ClientOptions(api_endpoint='automl.googleapis.com')
    model_name = 'projects/313817029040/locations/us-central1/models/TCN8645127876691099648'
    credentials = service_account.Credentials.from_service_account_file(
        'AutoMLAuth.json')
    prediction_client = automl_v1.PredictionServiceClient(
        client_options=options, credentials=credentials)

    text_snip = {
        'text_snippet': {
            'content': tweet_string,
            'mime_type': 'text/plain'
        }
    }
    payload = automl_v1.ExamplePayload(text_snip)
    # print(payload)
    request = prediction_client.predict(name=model_name, payload=payload)

    classification = request.payload[0].display_name

    if classification == 'stock':
        return True
    else:
        return False
Beispiel #7
0
def get_prediction(content, model_name):
    options = ClientOptions(api_endpoint='automl.googleapis.com')
    prediction_client = automl_v1.PredictionServiceClient(
        client_options=options)

    payload = format_text_payload(content)
    params = {}
    request = prediction_client.predict(model_name, payload, params)
    return request  # waits until request is returned
Beispiel #8
0
def predict(input, model_name):
  options = ClientOptions(api_endpoint='eu-automl.googleapis.com')
  prediction_client = automl_v1.PredictionServiceClient(client_options=options)

  payload = {'text_snippet': {'content': input, 'mime_type': 'text/plain'} }
  params = {}
  automl_request = automl_v1.PredictRequest(name=model_name, payload=payload, params=params)
  automl_response = prediction_client.predict(automl_request)
  return automl_response
Beispiel #9
0
def get_prediction(model_name, content):
    options = ClientOptions(api_endpoint='automl.googleapis.com')
    prediction_client = automl_v1.PredictionServiceClient(
        client_options=options)

    payload = {'text_snippet': {'content': content, 'mime_type': 'text/plain'}}
    params = {}
    request = prediction_client.predict(model_name, payload, params)

    return request  # waits until request is returned
def get_prediction(file_path, model_name):
    options = ClientOptions(api_endpoint='automl.googleapis.com')
    prediction_client = automl_v1.PredictionServiceClient(
        client_options=options)

    #payload = inline_text_payload(file_path)
    payload = pdf_payload(file_path)

    params = {}
    request = prediction_client.predict(model_name, payload, params)
    return request  # waits until request is returned
Beispiel #11
0
def get_prediction(file_path, model_name,column_name=None):
  options = ClientOptions(api_endpoint='automl.googleapis.com')
  prediction_client = automl_v1.PredictionServiceClient(client_options=options)

  payload = inline_text_payload(file_path)
  # Uncomment the following line (and comment the above line) if want to predict on PDFs.
  # payload = pdf_payload(file_path)

  params = {}
  request = prediction_client.predict(model_name, payload, params)
  return request  # waits until request is returned
Beispiel #12
0
def get_prediction_text(content, model_name):
    options = ClientOptions(api_endpoint='automl.googleapis.com')
    prediction_client = automl_v1.PredictionServiceClient(
        client_options=options)

    payload = {'text_snippet': {'content': content, 'mime_type': 'text/plain'}}
    # Uncomment the following line (and comment the above line) if want to predict on PDFs.
    # payload = pdf_payload(file_path)

    params = {}
    request = prediction_client.predict(model_name, payload, params)
    return request  # waits until request is returned
Beispiel #13
0
def get_prediction(
    text,
    model_name="projects/635112130949/locations/us-central1/models/TCN3179860195295625216"
):
    options = ClientOptions(api_endpoint='automl.googleapis.com')
    prediction_client = automl_v1.PredictionServiceClient(
        client_options=options)

    payload = inline_text_payload(text)

    params = {}
    request = prediction_client.predict(model_name, payload, params)
    return request  # waits until request is returned
def main(input_file, model_name):
    content = input_file.read()
    options = ClientOptions(api_endpoint='automl.googleapis.com')
    prediction_client = automl_v1.PredictionServiceClient(
        client_options=options
    )
    payload = {'text_snippet': {'content': content, 'mime_type': 'text/plain'}}
    params = {}
    request = prediction_client.predict(model_name, payload, params)
    for result in request.payload:
        label = result.display_name
        match = result.classification.score
        print(f'Label: {label} : {match:.5f}')
Beispiel #15
0
def get_prediction(content, project_id, model_id):
    os.environ[
        'GOOGLE_APPLICATION_CREDENTIALS'] = 'CodeChella-315303cc6526.json'
    prediction_client = automl_v1.PredictionServiceClient()

    name = 'projects/{}/locations/us-central1/models/{}'.format(
        project_id, model_id)
    payload = {'image': {'image_bytes': content}}
    params = {}
    request = prediction_client.predict(name=name, payload=payload)
    json_string = type(request).to_json(request)
    jsn = json.loads(json_string)
    return (jsn['payload'][0]['displayName'])  # waits till request is returned
Beispiel #16
0
    def test_predict_exception(self):
        # Mock the API response
        channel = ChannelStub(responses=[CustomException()])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = automl_v1.PredictionServiceClient()

        # Setup request
        name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
        payload = {}

        with pytest.raises(CustomException):
            client.predict(name, payload)
Beispiel #17
0
    def test_batch_predict_exception(self):
        # Setup Response
        error = status_pb2.Status()
        operation = operations_pb2.Operation(
            name="operations/test_batch_predict_exception", done=True)
        operation.error.CopyFrom(error)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = automl_v1.PredictionServiceClient()

        # Setup Request
        name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
        input_config = {}
        output_config = {}

        response = client.batch_predict(name, input_config, output_config)
        exception = response.exception()
        assert exception.errors[0] == error
Beispiel #18
0
def get_prediction(file_path, model_name):
    options = ClientOptions(api_endpoint='automl.googleapis.com')
    credentials = service_account.Credentials.from_service_account_file(
        'rapid-hall-302622-7d92a5d1344e.json')
    prediction_client = automl_v1.PredictionServiceClient(
        client_options=options, credentials=credentials)

    #text_snip = inline_text_payload(file_path)
    text_snip = {
        'text_snippet': {
            'content': "this is some test string",
            'mime_type': 'text/plain'
        }
    }
    payload = automl_v1.ExamplePayload(text_snip)
    print(payload)
    request = prediction_client.predict(name=model_name, payload=payload)

    # for annotation_payload in request:
    #     print(annotation_payload.display_name)

    return request.payload[0].display_name  # waits until request is returned
Beispiel #19
0
def diagnose(path):
    project_id = "telemed-300210"
    model_id = "ICN2967249830855835648"

    file_path = path

    with open(file_path, 'rb') as ff:
        content = ff.read()

    prediction_client = automl_v1.PredictionServiceClient()

    model_full_id = automl.AutoMlClient.model_path(
        project_id, "us-central1", model_id)

    payload = {'image': {'image_bytes': content}}
    params = {}
    result = prediction_client.predict(name=model_full_id, payload=payload, params=params)
    print("Prediction results:")
    for result in result.payload:
        print("Predicted entity label: {}".format(result.display_name))
        print("\n")
        var = result.display_name
        return var
Beispiel #20
0
from flask import Flask, render_template, request, url_for
#from google.cloud import automl
import sys
from google.api_core.client_options import ClientOptions
from google.cloud import automl_v1

project_id = 'nu-msds434'
model_id = 'TST4666162421537177600'

options = ClientOptions(api_endpoint='automl.googleapis.com')
prediction_client = automl_v1.PredictionServiceClient(client_options=options)

model_full_id = 'projects/755666330619/locations/us-central1/models/TST4666162421537177600'

app = Flask(__name__)


@app.route("/")
def index():
    return render_template("index.html")


@app.route("/results", methods=['POST', 'GET'])
def predict():
    if request.method == 'POST':
        comment = request.form['comment']
        #data = [comment]
        text_snippet = automl_v1.TextSnippet(content=comment,
                                             mime_type="text/plain")
        payload = automl_v1.ExamplePayload(text_snippet=text_snippet)
        my_prediction = prediction_client.predict(name=model_full_id,