def predict(filePath):
    project_id = "762814515048"
    model_id = "ICN244084984296505344"
    file_path = filePath
    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                                   model_id)

    # Read the file.
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.Image(image_bytes=content)
    payload = automl.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
    params = {"score_threshold": "0.7"}

    request = automl.PredictRequest(name=model_full_id,
                                    payload=payload,
                                    params=params)
    response = prediction_client.predict(request=request)

    print("Prediction results:")
    results = []
    for result in response.payload:
        results.append(result.display_name)
        results.append(result.classification.score)
        print("Predicted class name: {}".format(result.display_name))
        print("Predicted class score: {}".format(result.classification.score))
        return (results)
示例#2
0
def get_results_from_automl(filepath):

    project_id = "numeric-polygon-283403"
    model_id = "ICN2007288218877165568"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                                   model_id)

    # Read the file.
    with open(filepath, "rb") as content_file:
        content = content_file.read()

    image = automl.Image(image_bytes=content)
    payload = automl.ExamplePayload(image=image)

    # params is additional domain-specific parameters, score_threshold is used to filter the result
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
    params = {"score_threshold": "0.5"}

    request = automl.PredictRequest(name=model_full_id,
                                    payload=payload,
                                    params=params)
    response = prediction_client.predict(request=request)

    results = []
    for result in response.payload:
        results.append([result.display_name, result.classification.score])

    return results
示例#3
0
def isWearingMask(file_path):
    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                                   model_id)

    # Read the file.
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.Image(image_bytes=content)
    payload = automl.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
    params = {"score_threshold": "0.1"}

    request = automl.PredictRequest(name=model_full_id,
                                    payload=payload,
                                    params=params)
    response = prediction_client.predict(request=request)

    #print("Prediction results:")
    print(len(response.payload))
    for result in response.payload:
        print("Predicted class name: {}".format(result.display_name))
        print("Predicted class score: {}".format(result.classification.score))
        return True if (result.display_name == "with_mask"
                        and result.classification.score > 0.9999) else False

    return False
示例#4
0
def gesture_detection(file_path):
    project_id = 'gesture-detection-265519'
    model_id = 'ICN7380709295957999616'

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = prediction_client.model_path(project_id, 'us-central1',
                                                 model_id)

    # Read the file.
    with open(file_path, 'rb') as content_file:
        content = content_file.read()

    image = automl.types.Image(image_bytes=content)
    payload = automl.types.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    params = {'score_threshold': '0.5'}

    response = prediction_client.predict(model_full_id, payload, params)

    display_dict = {
        "ONE": 1,
        "TWO": 2,
        "THREE": 3,
        "FOUR": 4,
        "FIVE": 5,
        "NONE": 0
    }
    for result in response.payload:
        return display_dict[result.display_name]
    return 0
def predict(project_id, model_id, file_path):
    """Predict."""
    # [START automl_vision_classification_predict]
    from google.cloud import automl

    # TODO(developer): Uncomment and set the following variables
    # project_id = "YOUR_PROJECT_ID"
    # model_id = "YOUR_MODEL_ID"
    # file_path = "path_to_local_file.jpg"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = prediction_client.model_path(project_id, "us-central1",
                                                 model_id)

    # Read the file.
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.types.Image(image_bytes=content)
    payload = automl.types.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    params = {"score_threshold": "0.8"}

    response = prediction_client.predict(model_full_id, payload, params)
    print("Prediction results:")
    for result in response.payload:
        print("Predicted class name: {}".format(result.display_name))
        print("Predicted class score: {}".format(result.classification.score))
示例#6
0
def predict(project_id, model_id, file_path):
    """Predict."""
    # [START automl_translate_predict]
    from google.cloud import automl

    # TODO(developer): Uncomment and set the following variables
    # project_id = "YOUR_PROJECT_ID"
    # model_id = "YOUR_MODEL_ID"
    # file_path = "path_to_local_file.txt"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = prediction_client.model_path(project_id, "us-central1",
                                                 model_id)

    # Read the file content for translation.
    with open(file_path, "rb") as content_file:
        content = content_file.read()
    content.decode("utf-8")

    text_snippet = automl.types.TextSnippet(content=content)
    payload = automl.types.ExamplePayload(text_snippet=text_snippet)

    response = prediction_client.predict(model_full_id, payload)
    translated_content = response.payload[0].translation.translated_content

    print(u"Translated content: {}".format(translated_content.content))
示例#7
0
def get_Google_Prediction():
    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = prediction_client.model_path(project_id, "us-central1",
                                                 model_id)

    # Read the file.
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.types.Image(image_bytes=content)
    payload = automl.types.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
    params = {"score_threshold": "0.1"}

    response = prediction_client.predict(model_full_id, payload, params)

    # no analyze the results
    google_result = response.payload

    #google_response = getResponse(google_result.display_name, google_result.classification.score)

    return google_result
def batch_predict(project_id, model_id, input_uri, output_uri):
    """Batch predict"""
    # [START automl_batch_predict]
    from google.cloud import automl

    # TODO(developer): Uncomment and set the following variables
    # project_id = "YOUR_PROJECT_ID"
    # model_id = "YOUR_MODEL_ID"
    # input_uri = "gs://YOUR_BUCKET_ID/path/to/your/input/csv_or_jsonl"
    # output_uri = "gs://YOUR_BUCKET_ID/path/to/save/results/"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = f"projects/{project_id}/locations/us-central1/models/{model_id}"

    gcs_source = automl.GcsSource(input_uris=[input_uri])

    input_config = automl.BatchPredictInputConfig(gcs_source=gcs_source)
    gcs_destination = automl.GcsDestination(output_uri_prefix=output_uri)
    output_config = automl.BatchPredictOutputConfig(
        gcs_destination=gcs_destination)

    response = prediction_client.batch_predict(name=model_full_id,
                                               input_config=input_config,
                                               output_config=output_config)

    print("Waiting for operation to complete...")
    print(
        f"Batch Prediction results saved to Cloud Storage bucket. {response.result()}"
    )
示例#9
0
 def __init__(self, MODEL_CONFIG):
     # The projectID in Google Cloud
     self.project_id = MODEL_CONFIG['project_id']
     # The modelID of the model in the natural language section of GCP
     self.model_id = MODEL_CONFIG['model_id']
     # Creating a client to receive predictions
     self.client = automl.PredictionServiceClient()
示例#10
0
def predict(project_id, model_id, content):
    """Predict."""
    # [START automl_language_text_classification_predict]
    from google.cloud import automl

    # TODO(developer): Uncomment and set the following variables
    # project_id = "YOUR_PROJECT_ID"
    # model_id = "YOUR_MODEL_ID"
    # content = "text to predict"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = prediction_client.model_path(
        project_id, "us-central1", model_id
    )

    # Supported mime_types: 'text/plain', 'text/html'
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet
    text_snippet = automl.types.TextSnippet(
        content=content, mime_type="text/plain"
    )
    payload = automl.types.ExamplePayload(text_snippet=text_snippet)

    response = prediction_client.predict(model_full_id, payload)

    for annotation_payload in response.payload:
        print(
            u"Predicted class name: {}".format(annotation_payload.display_name)
        )
        print(
            u"Predicted class score: {}".format(
                annotation_payload.classification.score
            )
        )
示例#11
0
def get_prediction(content, project_id, model_id):
    prediction_client = automl.PredictionServiceClient()

    name = 'projects/{}/locations/us-central1/models/{}'.format(
        project_id, model_id)
    payload = {'image': {'image_bytes': content}}
    params = {}
    request = prediction_client.predict(name, payload, params)
    return request  # waits till request is returned
示例#12
0
 def test_default_credentials_prediction_client(self):
     env = EnvironmentVarGuard()
     env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
     env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
     with env:
         prediction_client = automl.PredictionServiceClient()
         self.assertIsNotNone(prediction_client.credentials)
         self.assertIsInstance(prediction_client.credentials, KaggleKernelCredentials)
         self.assertTrue(prediction_client._connection.user_agent.startswith("kaggle-gcp-client/1.0"))
示例#13
0
def automl_prediction_service_batch_predict(
    model_path,
    gcs_input_uris: str = None,
    gcs_output_uri_prefix: str = None,
    bq_input_uri: str = None,
    bq_output_uri: str = None,
    params=None,
    retry=None,  #google.api_core.gapic_v1.method.DEFAULT,
    timeout=None,  #google.api_core.gapic_v1.method.DEFAULT,
    metadata: dict = None,
) -> NamedTuple('Outputs', [('gcs_output_directory', str),
                            ('bigquery_output_dataset', str)]):
    import sys
    import subprocess
    subprocess.run([
        sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.4.0',
        '--quiet', '--no-warn-script-location'
    ],
                   env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'},
                   check=True)

    input_config = {}
    if gcs_input_uris:
        input_config['gcs_source'] = {'input_uris': gcs_input_uris}
    if bq_input_uri:
        input_config['bigquery_source'] = {'input_uri': bq_input_uri}

    output_config = {}
    if gcs_output_uri_prefix:
        output_config['gcs_destination'] = {
            'output_uri_prefix': gcs_output_uri_prefix
        }
    if bq_output_uri:
        output_config['bigquery_destination'] = {'output_uri': bq_output_uri}

    from google.cloud import automl
    client = automl.PredictionServiceClient()
    response = client.batch_predict(
        model_path,
        input_config,
        output_config,
        params,
        retry,
        timeout,
        metadata,
    )
    print('Operation started:')
    print(response.operation)
    result = response.result()
    metadata = response.metadata
    print('Operation finished:')
    print(metadata)
    output_info = metadata.batch_predict_details.output_info
    # Workaround for Argo issue - it fails when output is empty: https://github.com/argoproj/argo/pull/1277/files#r326028422
    return (output_info.gcs_output_directory
            or '-', output_info.bigquery_output_dataset or '-')
示例#14
0
    def __init__(self, model_name=None, prediction_client=None):
        self.config = None
        # The model name
        # e.g. a value like value like
        # "projects/976279526634/locations/us-central1/models/TCN654213816573231104'"
        self.model_name = model_name

        if not prediction_client:
            prediction_client = automl.PredictionServiceClient()

        self._prediction_client = prediction_client
示例#15
0
def predict(content):
    options = ClientOptions(api_endpoint="automl.googleapis.com")
    prediction_client = automl.PredictionServiceClient(client_options=options)
    payload = {
        "text_snippet": {
            "content": content,
            "mime_type": "text/plain"
        }
    }
    request = prediction_client.predict(name=model_name, payload=payload)

    data = {}
    for item in request.payload:
        data[item.display_name] = item.classification.score

    return data
示例#16
0
def func():

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = prediction_client.model_path(project_id, "us-central1",
                                                 model_id)

    # Read the file.
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.types.Image(image_bytes=content)
    payload = automl.types.ExamplePayload(image=image)

    return get_prediction(content, project_id, model_id)
def predict(project_id, model_id, file_path):
    """Predict."""
    # [START automl_vision_object_detection_predict]
    from google.cloud import automl

    # TODO(developer): Uncomment and set the following variables
    # project_id = "YOUR_PROJECT_ID"
    # model_id = "YOUR_MODEL_ID"
    # file_path = "path_to_local_file.jpg"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = prediction_client.model_path(
        project_id, "us-central1", model_id
    )

    # Read the file.
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.types.Image(image_bytes=content)
    payload = automl.types.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
    params = {"score_threshold": "0.8"}

    response = prediction_client.predict(model_full_id, payload, params)
    print("Prediction results:")
    for result in response.payload:
        print("Predicted class name: {}".format(result.display_name))
        print(
            "Predicted class score: {}".format(
                result.image_object_detection.score
            )
        )
        bounding_box = result.image_object_detection.bounding_box
        print("Normalized Vertices:")
        for vertex in bounding_box.normalized_vertices:
            print("\tX: {}, Y: {}".format(vertex.x, vertex.y))
示例#18
0
def predict(content):
    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                                   model_id)

    # Supported mime_types: 'text/plain', 'text/html'
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet
    text_snippet = automl.TextSnippet(content=content, mime_type="text/plain")
    payload = automl.ExamplePayload(text_snippet=text_snippet)

    response = prediction_client.predict(name=model_full_id, payload=payload)

    for annotation_payload in response.payload:
        print("Predicted class name: {}".format(
            annotation_payload.display_name))
        print("Predicted sentiment score: {}".format(
            annotation_payload.text_sentiment.sentiment))
        return annotation_payload.text_sentiment.sentiment
示例#19
0
    def predict(self, text):
        """
        Receives a text input
        Outputs a payload with the prediction of the text sentiment
        """
        try:
            # All below is given code from GCP
            prediction_client = automl.PredictionServiceClient()
            model_full_id = prediction_client.model_path(
                self.project_id, 'us-central1', self.model_id)

            text_snippet = automl.types.TextSnippet(
                content=text,
                mime_type='text/plain')  # Types: 'text/plain', 'text/html'
            payload = automl.types.ExamplePayload(text_snippet=text_snippet)

            response = prediction_client.predict(model_full_id, payload)
            return response.payload
        except Exception as error:
            print(error)
示例#20
0
def predict(content):

    from google.cloud import automl

    # You must first create a dataset, using the `eu` endpoint, before you can
    # call other operations such as: list, get, import, delete, etc.
    client_options = {'api_endpoint': 'eu-automl.googleapis.com:443'}
    project_id = '685330484131'
    # (model AI Crowd) model_id = 'TCN3300918624537018368'
    model_id = 'TCN4629621252099670016'

    prediction_client = automl.PredictionServiceClient(
        client_options=client_options)

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "eu", model_id)
    text_snippet = automl.TextSnippet(content=content, mime_type="text/plain")
    payload = automl.ExamplePayload(text_snippet=text_snippet)

    response = prediction_client.predict(name=model_full_id, payload=payload)
    return response
示例#21
0
def get_prediction(file_path):

    project_id = "msds-434-final"
    model_id = "ICN7344581542892535808"
    # file_path = "uploads/city.png" ## for local image testing

    credentials = service_account.Credentials.from_service_account_file("/home/jesse_lybianto/msds-434-final/msds-434-final-f32a0ccc78d5.json")
    prediction_client = automl.PredictionServiceClient(credentials=credentials)

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(
        project_id, "us-central1", model_id
    )

    # Read the file.
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.Image(image_bytes=content)
    payload = automl.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
    params = {"score_threshold": "0.0"}

    request = automl.PredictRequest(
        name=model_full_id,
        payload=payload,
        params=params
    )
    response = prediction_client.predict(request=request)
    print(response.payload)
    return response.payload

    # For non-modular console output
    # print("Prediction results:")
    # for result in response.payload:
    #     print("Predicted class name: {}".format(result.display_name))
    #     print("Predicted class score: {}".format(result.classification.score))
示例#22
0
def get_disaster(bucket, filename):
    project_id = "swamphacksvi-266915"
    model_id = "ICN6150171066523189248"

    prediction_client = automl.PredictionServiceClient()

    model_full_id = prediction_client.model_path(project_id, "us-central1",
                                                 model_id)
    bucket = storage_client.bucket(bucket)
    blob = bucket.blob(filename)
    image = automl.types.Image(image_bytes=blob.download_as_string())
    payload = automl.types.ExamplePayload(image=image)
    params = {"score_threshold": "0.8"}
    response = prediction_client.predict(model_full_id, payload, params)

    disasters = [result.display_name for result in response.payload]
    print("Prediction Results: ", disasters)

    doc_id = filename.split('.')[0]
    doc = disaster_docs.document(doc_id)
    doc.update({'disaster': disasters})
    print(f'After update: {doc.get().to_dict()}')
示例#23
0
def home_page():
    # from flask import request
    if request.method == 'GET':
        context = {"done": False}
        return render_template('site.html', **context)
    else:
        # get image content from POST request inputs
        file = request.files["myFile"]
        image_content = b''
        for data in file.stream:  # read image as a stream
            image_content += data

        # google api variables
        project_id = "alpine-infinity-290102"

        prediction_client = automl.PredictionServiceClient()

        # Get the full path of the model.
        model_full_id = automl.AutoMlClient.model_path(project_id,
                                                       "us-central1", model_id)

        image = automl.Image(image_bytes=image_content)
        payload = automl.ExamplePayload(image=image)

        req = automl.PredictRequest(
            name=model_full_id,
            payload=payload,
        )
        response = prediction_client.predict(request=req)
        class_ = response.payload[0].display_name
        measure = response.payload[0].classification.score

        context = {
            "done": True,
            "guess_mal": class_ == "Malignant",
            "class_": class_,
            "measure": '{:.2%}'.format(measure)
        }
        return render_template('site.html', **context)
示例#24
0
def produce_automl_results(dir_img_path: str = "",
                           dir_destination_path: str = ""):
    from google.cloud import automl

    project_id = "hidden"
    model_id = "hidden"

    imgs = os.listdir(dir_img_path)
    relevant_images = [
        os.path.join(dir_img_path, i) for i in imgs if "png" in i
    ]

    prediction_client = automl.PredictionServiceClient()
    project_id = "hidden"
    model_id = "hidden"

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                                   model_id)

    file_path = "../../data/005.png"
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.Image(image_bytes=content)
    payload = automl.ExamplePayload(image=image)

    params = {}

    request = automl.PredictRequest(name=model_full_id,
                                    payload=payload,
                                    params=params)
    response = prediction_client.predict(request=request)

    print("Prediction results:")
    for result in response.payload:
        print("Predicted class name: {}".format(result.display_name))
        print("Predicted class score: {}".format(result.classification.score))
示例#25
0
    def __init__(self, prediction_threshold: float = 0.8):
        """
        Wrapper class for asynchronous, batch-oriented image classification with Google Vision AutoML.
        :param prediction_threshold: Score threshold for predictions (default: 0.8)
        :raises ValueError for invalid prediction_threshold values
        """

        if not (0 <= prediction_threshold <= 1):
            raise ValueError(
                "Prediction threshold must be within the range [0,1].")

        # Let Google Cloud libraries pick up credentials from environment variables
        config, _ = read_config()
        os.environ["PROJECT_ID"] = config["project_id"]
        os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = config["credentials"]

        # Google Cloud Vision AutoML
        self.__gvision_client = automl.PredictionServiceClient()
        self.__full_model_id = self.__gvision_client.model_path(
            project=config["project_id"],
            location=config["location"],
            model=config["model_id"])
        self.__prediction_threshold = prediction_threshold
示例#26
0
def shape():
    project_id = "quixotic-sol-281406"
    model_id = "ICN8423774195787235328"
    file_path = "images/result.jpg"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = prediction_client.model_path(project_id, "us-central1",
                                                 model_id)

    # Read the file.
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.types.Image(image_bytes=content)
    payload = automl.types.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
    params = {"score_threshold": "0.8"}

    response = prediction_client.predict(model_full_id, payload, params)
    # print("Prediction results:")
    for result in response.payload:
        # print("Predicted class name: {}".format(result.display_name))
        # print(
        #"Predicted class score: {}".format(
        #      result.image_object_detection.score
        #)
        #)
        bounding_box = result.image_object_detection.bounding_box
        # print("Normalized Vertices:")
        #for vertex in bounding_box.normalized_vertices:
        #   print("\tX: {}, Y: {}".format(vertex.x, vertex.y))
        return result.display_name
示例#27
0
def label(img):

    project_id = "salary-dost"
    model_id = "ICN4622096194519171072"

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = prediction_client.model_path(project_id, "us-central1",
                                                 model_id)

    content = img
    imgByte = io.BytesIO()

    content.save(imgByte, format='jpeg')
    imgByte = imgByte.getvalue()

    image = automl.types.Image(image_bytes=imgByte)
    payload = automl.types.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
    params = {"score_threshold": "0.8"}

    response = prediction_client.predict(model_full_id,
                                         payload,
                                         params,
                                         timeout=30.0)
    #print("Prediction results:")
    for result in response.payload:
        #print("Predicted class name: {}".format(result.display_name))
        #print("Predicted class score: {}".format(result.classification.score))
        res = result.display_name

    return res
示例#28
0
    print("Training operation name: {}".format(response.operation.name))
    print("Training started...")

    model_id = response.result().name.split('/')[-1]
    
    start_deploying_time = time.time()
    
    model_full_id = client.model_path(project_id, "us-central1", model_id)

    response = client.deploy_model(name=model_full_id)
    response.result()

    start_prediction_time = time.time()
    
    prediction_client = automl.PredictionServiceClient()

    preds = []
    for img in tqdm.tqdm(data['test_images']):
        score = None
        while True:
            try:
                score = get_prediction(img)
                break
            except:
                time.sleep(5)
        preds.append(score)
    preds = np.array(preds)

    metrics = get_metrics(data['test_labels'], preds)
示例#29
0
def automl_vision(file_path):
    """Predict."""
    # [START automl_vision_classification_predict]
    #print(file_path)
    # autoML JSon 키 설정
    #credential_path = r"키.json"
    os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path
    project_id = "수정"
    model_id = "수정"

    # 원본 이미지
    img = cv2.imread(file_path)
    # 원본 이미지 크기
    img_height = img.shape[0]
    img_width = img.shape[1]
    # RGB 색상으로 변환
    orig = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    prediction_client = automl.PredictionServiceClient()

    # Get the full path of the model.
    model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1",
                                                   model_id)

    # 파일 읽어들이기
    with open(file_path, "rb") as content_file:
        content = content_file.read()

    image = automl.Image(image_bytes=content)
    payload = automl.ExamplePayload(image=image)

    # params is additional domain-specific parameters.
    # score_threshold is used to filter the result
    # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
    params = {"score_threshold": "0.8"}

    request = automl.PredictRequest(name=model_full_id,
                                    payload=payload,
                                    params=params)
    response = prediction_client.predict(request=request)

    tags = []
    for result in response.payload:
        # 태그로 잡힌 박스 몇 개인지
        bounding_box = result.image_object_detection.bounding_box

        # normalized vertices로 되어있는 좌표를 실사이즈로 변환 및 직사각형의 네 꼭짓점 좌표를 구함
        lu = bounding_box.normalized_vertices[0]  # left/up x, y 좌표
        rd = bounding_box.normalized_vertices[1]  # right/down x, y 좌표
        w = rd.x - lu.x
        h = rd.y - lu.y

        # cropping
        cropped_img = orig[int(lu.y * img_height):int((lu.y + h) * img_height),
                           int(lu.x * img_width):int((lu.x + w) * img_width)]

        texts = my_detect_text_mat(
            cropped_img)  #잘린 이미지들 속 글자 추출하기 위해 my_detect_text_mat() 호출
        tags.append(texts[0].description)

    return tags
示例#30
0
def predict(model_id, project_id, csvpath_test, resultpath_cloud):

    # project_id = "YOUR_PROJECT_ID"
    # model_id = "YOUR_MODEL_ID"
    input_uri = csvpath_test
    output_uri = resultpath_cloud

    prediction_client = automl.PredictionServiceClient()


    # Get the full path of the model.
    model_full_id = prediction_client.model_path(
        project_id, "us-central1", model_id
    )

    gcs_source = automl.types.GcsSource(input_uris=[input_uri])

    input_config = automl.types.BatchPredictInputConfig(gcs_source=gcs_source)
    gcs_destination = automl.types.GcsDestination(output_uri_prefix=output_uri)
    output_config = automl.types.BatchPredictOutputConfig(
        gcs_destination=gcs_destination
    )

    # Print model stats
    # client = automl.AutoMlClient()
    # print("List of model evaluations:")
    # for evaluation in client.list_model_evaluations(model_full_id, ""):
    #     print("Model evaluation name: {}".format(evaluation.name))
    #     print(
    #         "Model annotation spec id: {}".format(
    #             evaluation.annotation_spec_id
    #         )
    #     )
    #     print("Create Time:")
    #     print("\tseconds: {}".format(evaluation.create_time.seconds))
    #     print("\tnanos: {}".format(evaluation.create_time.nanos / 1e9))
    #     print(
    #         "Evaluation example count: {}".format(
    #             evaluation.evaluated_example_count
    #         )
    #     )
    #     print(
    #         "Classification model evaluation metrics: {}".format(
    #             evaluation.classification_evaluation_metrics
    #         )
    #     )

    print('batch prediction beginning...')
    while True:
        try:
            response = prediction_client.batch_predict(model_full_id, input_config, output_config, params={'score_threshold': '0'})
            break
        except google.api_core.exceptions.ResourceExhausted:
            print('Concurrent batch prediction quota exhausted. This is a common error. Waiting 4 sec...')
            sleep(4)
        except google.api_core.exceptions.DeadlineExceeded:
            print('Deadline exceeded, whatever, keep trying')
            sleep(4)
        except google.api_core.exceptions.ServiceUnavailable:
            print('Service unavailable error... what??')
            sleep(4)
        except google.api_core.exceptions.NotFound:
            print(f'Model {model_full_id} not found. probably training not finished. waiting 30 sec')
            sleep(30)
        except Exception as e:
            raise  # print(f'some other random error: {e}')

    print(f"Batch prediction operation id {response.operation.name.split('/')[-1]} has started \n\n\n")
    print(f"Waiting for batch prediction operation id {response.operation.name.split('/')[-1]} to complete...")

    # def callback(operation_future):
    #     # Handle result.
    #     print("Batch Prediction results saved to Cloud Storage bucket. {}".format(operation_future.result()))
    #
    # response.add_done_callback(callback)
    while True:
        if response.done():
            break
        else:
            sleep(300)
            print('Model still predicting ...')
    print("Batch Prediction results saved to Cloud Storage bucket. {}".format(response.result()))