コード例 #1
0
def predict(project_id, model_id, content):
    """Predict."""
    # [START automl_language_entity_extraction_predict]
    from google.cloud import automl

    # TODO(developer): Uncomment and set the following variables
    # project_id = "YOUR_PROJECT_ID"
    # model_id = "YOUR_MODEL_ID"
    # content = "text to predict"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                                   model_id)

    # Supported mime_types: 'text/plain', 'text/html'
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet
    text_snippet = automl.TextSnippet(content=content, mime_type="text/plain")
    payload = automl.ExamplePayload(text_snippet=text_snippet)

    response = prediction_client.predict(name=model_full_id, payload=payload)

    for annotation_payload in response.payload:
        print("Text Extract Entity Types: {}".format(
            annotation_payload.display_name))
        print("Text Score: {}".format(
            annotation_payload.text_extraction.score))
        text_segment = annotation_payload.text_extraction.text_segment
        print("Text Extract Entity Content: {}".format(text_segment.content))
        print("Text Start Offset: {}".format(text_segment.start_offset))
        print("Text End Offset: {}".format(text_segment.end_offset))
コード例 #2
0
def predict(project_id, model_id, file_path):
    """Predict."""
    # [START automl_translate_predict]
    from google.cloud import automl

    # TODO(developer): Uncomment and set the following variables
    # project_id = "YOUR_PROJECT_ID"
    # model_id = "YOUR_MODEL_ID"
    # file_path = "path_to_local_file.txt"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                                   model_id)

    # Read the file content for translation.
    with open(file_path, "rb") as content_file:
        content = content_file.read()
    content.decode("utf-8")

    text_snippet = automl.TextSnippet(content=content)
    payload = automl.ExamplePayload(text_snippet=text_snippet)

    response = prediction_client.predict(name=model_full_id, payload=payload)
    translated_content = response.payload[0].translation.translated_content

    print(u"Translated content: {}".format(translated_content.content))
コード例 #3
0
ファイル: main.py プロジェクト: akrantz01/skew
async def process(req: models.Request):
    """
    Analyze a given piece of text to find the bias. Any given piece of text will be checked against the database to
    check if it has already been processed. If it has not yet been processed, it will be sent for inference on
    a GCP NLP trained model.
    """
    # Either fetch the content from the site or use the provided text
    if req.url != "":
        try:
            article = Article(req.url)
            article.download()
            article.parse()

            if article.text != "" and article.text is not None:
                text = article.text
            else:
                text = req.text
        except ArticleException:
            text = req.text
    else:
        text = req.text

    # Calculate hash of id and text
    job_hash = compute_job_hash(req.id, text)

    # Ensure job has not been computed or is being computed
    job_ref = db.collection("text").document(job_hash)
    job = job_ref.get()
    if job.exists:
        job_data = job.to_dict()
        return {
            "success": True,
            "bias": job_data.get("bias"),
            "extent": job_data.get("extent")
        }

    # Process the data
    snippet = automl.TextSnippet(content=text, mime_type="text/plain")
    payload = automl.ExamplePayload(text_snippet=snippet)
    response = predictor.predict(name=MODEL_PATH, payload=payload)
    bias, extent = extract_from_categories(response.payload)

    # Set the processed data to the database
    job_ref.set({
        "hash": job_hash,
        "bias": bias,
        "extent": extent
    })

    return {
        "success": True,
        "bias": bias,
        "extent": extent
    }
コード例 #4
0
ファイル: main_copy.py プロジェクト: aaisha-s/NU-MSDS434
def predict():
    if request.method == 'POST':
        comment = request.form['comment']
        #data = [comment]
        text_snippet = automl.TextSnippet(content=comment,
                                          mime_type="text/plain")
        payload = automl.ExamplePayload(text_snippet=text_snippet)
        my_prediction = prediction_client.predict(name=model_full_id,
                                                  payload=payload)

        return render_template('results.html',
                               prediction=my_prediction,
                               comment=comment)
コード例 #5
0
def prediction():
    if request.method == 'POST':
        commentInput = request.form
        for key, item in commentInput.items():
            text_snippet = automl.TextSnippet(content=item,
                                              mime_type="text/plain")
        payload = automl.ExamplePayload(text_snippet=text_snippet)
        response = prediction_client.predict(name=model_full_id,
                                             payload=payload)

        for annotation_payload in response.payload:
            sentPred = annotation_payload.display_name
            break

        return render_template('results.html',
                               sentPred=sentPred,
                               content=content)
コード例 #6
0
def predict(content):

    # Supported mime_types: 'text/plain', 'text/html'
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet
    text_snippet = automl.TextSnippet(content=content, mime_type="text/plain")
    payload = automl.ExamplePayload(text_snippet=text_snippet)

    response = prediction_client.predict(name=model_full_id, payload=payload)

    for annotation_payload in response.payload:
        # print(
        #     u"Predicted class name: {}".format(annotation_payload.display_name)
        # )
        # print(
        #     u"Predicted class score: {}".format(
        #         annotation_payload.classification.score
        #     )
        # )
        return annotation_payload.display_name
        break
コード例 #7
0
def predict(content):
    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                                   model_id)

    # Supported mime_types: 'text/plain', 'text/html'
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet
    text_snippet = automl.TextSnippet(content=content, mime_type="text/plain")
    payload = automl.ExamplePayload(text_snippet=text_snippet)

    response = prediction_client.predict(name=model_full_id, payload=payload)

    for annotation_payload in response.payload:
        print("Predicted class name: {}".format(
            annotation_payload.display_name))
        print("Predicted sentiment score: {}".format(
            annotation_payload.text_sentiment.sentiment))
        return annotation_payload.text_sentiment.sentiment
コード例 #8
0
ファイル: predictors.py プロジェクト: s-fellner/BSA_Rolex
def predict(content):

    from google.cloud import automl

    # You must first create a dataset, using the `eu` endpoint, before you can
    # call other operations such as: list, get, import, delete, etc.
    client_options = {'api_endpoint': 'eu-automl.googleapis.com:443'}
    project_id = '685330484131'
    # (model AI Crowd) model_id = 'TCN3300918624537018368'
    model_id = 'TCN4629621252099670016'

    prediction_client = automl.PredictionServiceClient(
        client_options=client_options)

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "eu", model_id)
    text_snippet = automl.TextSnippet(content=content, mime_type="text/plain")
    payload = automl.ExamplePayload(text_snippet=text_snippet)

    response = prediction_client.predict(name=model_full_id, payload=payload)
    return response
コード例 #9
0
    def nlp_predict(self, model_id: (str, 'the id of the deployed nlp model'),
                    content: (str,
                              'the text to submit to the prediction model')):

        response_payload = None
        try:
            model_full_id = self.automl_client.model_path(
                self.project_id, self.region, model_id)

            text_snippet = automl.TextSnippet(content=content,
                                              mime_type="text/plain")
            payload = automl.ExamplePayload(text_snippet=text_snippet)
            #payload = {'text_snippet': {'content': content, 'mime_type': 'text/plain' }}

            response = self.prediction_client.predict(name=model_full_id,
                                                      payload=payload)
            response_payload = response.payload

        except Exception:
            logger.exception("")

        return response_payload
コード例 #10
0
import os

from google.cloud import automl
from dotenv import load_dotenv

load_dotenv()

project_id = os.getenv("PROJECT_ID")
model_id = os.getenv("MODEL_ID")
content = "How does the len() function work?"

prediction_client = automl.PredictionServiceClient()

# Get the full path of the model.
model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                               model_id)

text_snippet = automl.TextSnippet(content=content, mime_type="text/plain")
payload = automl.ExamplePayload(text_snippet=text_snippet)

response = prediction_client.predict(name=model_full_id, payload=payload)

for annotation_payload in response.payload:
    print(u"Predicted question tag: {}".format(
        annotation_payload.display_name))
    print(u"Predicted score: {}".format(
        annotation_payload.classification.score))