Esempio n. 1
0
def predict_face(image: list):
    """
        permet de predire lidentité de l'etudiant
    """
    # Byte
    image_contents = cv2.imencode('.jpg', image)[1].tostring()

    # Now there is a trained endpoint that can be used to make a prediction
    predictor = CustomVisionPredictionClient(config.prediction_key,
                                             endpoint=config.ENDPOINT)

    #with open(base_image_url + "images/Test/test_image.jpg", "rb") as image_contents:
    results = predictor.classify_image(config.projet_id,
                                       config.publish_iteration_name,
                                       image_contents)

    # hight probability
    prediction = results.predictions[0]
    """for prediction in results.predictions:
        if prediction.probability > prob:
            tag_name = prediction.tag_name
            prob = prediction.probability

        print ("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100))
    """
    return prediction
Esempio n. 2
0
def main():
    """
    Image classification
    """
    args = parse_args()
    config = json.load(open(args.config, "r"))

    # Get the predictor
    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": config["prediction_key"]}
    )
    predictor = CustomVisionPredictionClient(config["ENDPOINT"], prediction_credentials)

    # ======================================================================================
    # Open the sample image and get back the prediction results.
    project_id = get_project_id(config)
    with open(args.image, "rb") as image_contents:
        results = predictor.classify_image(
            project_id,
            config["publish_iteration_name"],
            image_contents.read(),
        )

        # Display the results.
        for prediction in results.predictions:
            print(
                "{0}: {1:.2f}%".format(
                    prediction.tag_name, prediction.probability * 100
                )
            )
Esempio n. 3
0
def main():
    from dotenv import load_dotenv

    try:
        # Get Configuration Settings
        load_dotenv()
        prediction_endpoint = os.getenv('PredictionEndpoint')
        prediction_key = os.getenv('PredictionKey')
        project_id = os.getenv('ProjectID')
        model_name = os.getenv('ModelName')

        # Authenticate a client for the training API
        credentials = ApiKeyCredentials(
            in_headers={"Prediction-key": prediction_key})
        prediction_client = CustomVisionPredictionClient(
            endpoint=prediction_endpoint, credentials=credentials)

        # Classify test images
        for image in os.listdir('test-images'):
            image_data = open(os.path.join('test-images', image), "rb").read()
            results = prediction_client.classify_image(project_id, model_name,
                                                       image_data)

            # Loop over each label prediction and print any with probability > 50%
            for prediction in results.predictions:
                if prediction.probability > 0.5:
                    print(
                        image, ': {} ({:.0%})'.format(prediction.tag_name,
                                                      prediction.probability))
    except Exception as ex:
        print(ex)
Esempio n. 4
0
def success():
    image = request.args.get('name', None)
    ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com/"
    prediction_key = "4c99a663c6984df6b912025e2c7e1dee"
    my_project_id = "c9f9158e-e82d-4541-926a-15699c59a4b4"
    publish_iteration_name = "Iteration3"

    test_data = urlopen(image).read()
    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": prediction_key})
    predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
    results = predictor.detect_image(project_id=my_project_id,
                                     published_name=publish_iteration_name,
                                     image_data=test_data)
    first_value = 0
    maximum = 0
    print(first_value)

    for prediction in results.predictions:
        next_value = int(prediction.probability * 100)
        if maximum <= next_value:
            maximum = next_value
            tag = prediction.tag_name
        print(int(prediction.probability * 100))
        print(maximum)
        print(tag)
        print(
            "\t" + prediction.tag_name +
            ": {0:.2f}% bbox.left = {1:.2f}, bbox.top = {2:.2f}, bbox.width = {3:.2f}, bbox.height = {4:.2f}"
            .format(prediction.probability * 100, prediction.bounding_box.left,
                    prediction.bounding_box.top, prediction.bounding_box.width,
                    prediction.bounding_box.height))
    return tag
Esempio n. 5
0
    def localAnalysis(self, location):
        # Initialize Object Predictor
        predictor = CustomVisionPredictionClient(self.prediction_key,
                                                 endpoint=self.ENDPOINT)

        # Open the sample image and get back the prediction results.
        with open(location, mode="rb") as test_data:
            results = predictor.detect_image(self.project_id,
                                             self.publish_iteration_name,
                                             test_data)

        # Display the results.
        notes = []
        for prediction in results.predictions:
            if prediction.probability > self.min_confidence:
                notes.append([
                    prediction.tag_name, prediction.bounding_box.left,
                    prediction.bounding_box.top, prediction.probability
                ])

        # Sort the data.
        self.__noteSort(notes)

        # print("Note, Left, Top, Prob")
        # for note in notes:
        #     print(note)

        output = []
        for note in notes:
            output.append(note[0])

        return output
Esempio n. 6
0
def azure_request(img_by):
    project_ID = "xxx"
    iteration_name = "xxx"
    key = "xxx"
    endpointurl = "xxx"

    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": key}
        )

    predictor = CustomVisionPredictionClient(
        endpointurl,
        prediction_credentials
        )

    results = predictor.classify_image(
        project_ID, 
        iteration_name, 
        img_by
        )
    predict = {}
    for prediction in results.predictions:
        predict[prediction.tag_name] = prediction.probability 

    return predict # 予測を辞書で返却
Esempio n. 7
0
def find_seal(image_path, angle):
    predictor = CustomVisionPredictionClient(prediction_key, endpoint=endpoint)

    project_id = None
    if angle == 'wet-head-right':
        project_id = config.HEAD_RIGHT
    if angle == 'wet-head-left':
        project_id = config.HEAD_LEFT
    if angle == 'bottling-left':
        project_id = config.BOTTLING_LEFT
    if angle == 'bottling-straight':
        project_id = config.BOTTLING_STRAIGHT
    if angle == 'bottling-right':
        project_id = config.BOTTLING_RIGHT

    # Open the image and get back the prediction results.
    with open(image_path, mode="rb") as image_contents:
        results = predictor.classify_image(project_id, iteration_name,
                                           image_contents.read())

    image_predictions = {}

    for prediction in results.predictions:
        image_predictions[prediction.tag_name] = prediction.probability
        print("\t" + prediction.tag_name +
              ": {0:.2f}%".format(prediction.probability * 100))

    return image_predictions
def resultado(request):
    font                   = cv2.FONT_HERSHEY_SIMPLEX
    fontScale              = 0.7
    fontColor              = (0,0,255)
    lineType               = 2
    name=request.GET["namefile"]
    credentials = ApiKeyCredentials(in_headers={"Prediction-key": "<Prediction Key Aquí>"})
    predictor = CustomVisionPredictionClient("<Zona regional aqui>", credentials)
    blob = BlobClient.from_connection_string(conn_str=connection_string, container_name="images", blob_name=f"{name} training.png")
    url = request.GET["link"] 
    urllib.request.urlretrieve(url, "python.png")
    imagen=cv2.imread("python.png")
    height, width, channels = imagen.shape
    Resultado = predictor.detect_image_url("<Prediction Key>", "<Iteration>", url) 
    for prediction in Resultado.predictions:
        if prediction.probability > 0.4:
            bbox = prediction.bounding_box
            tag = prediction.tag_name
            probabilidad= int(prediction.probability * 100)
            result_image = cv2.rectangle(imagen, (int(bbox.left * width), int(bbox.top * height)), (int((bbox.left + bbox.width) * width), int((bbox.top + bbox.height) * height)), (0, 255, 0), 3)
            bottomLeftCornerOfText = (int(bbox.left*width),int(((bbox.top*height)+(bbox.height*height))))
            cv2.putText(result_image,str(probabilidad)+"% "+tag,
            bottomLeftCornerOfText, 
            font, 
            fontScale,
            fontColor,
            lineType)
            cv2.imwrite('result.png', result_image)
    with open("result.png","rb") as data:
        blob.upload_blob(data)
    
    return render(request,"resultado.html",{"imagen":blob.url,"namefile":name})
Esempio n. 9
0
    def __init__(
        self,
        endpoint,
        training_key,
        prediction_key,
        prediction_ressource_id,
        project_id,
        iteration_id,
        iteration_name,
        training_images,
    ):

        self.endpoint = endpoint
        self.training_key = training_key
        self.prediction_key = prediction_key
        self.prediction_ressource_id = prediction_ressource_id
        self.project_id = project_id
        self.iteration_id = iteration_id
        self.iteration_name = iteration_name
        self.training_images = training_images

        #Initializing Prediction Client
        self.predictor = CustomVisionPredictionClient(self.prediction_key,
                                                      self.endpoint)

        #Initializing Training Client
        self.trainer = CustomVisionTrainingClient(self.training_key,
                                                  self.endpoint)
Esempio n. 10
0
def customVisionDetectObjects(containerName, blobName):
    """
        Description: This function detects selected workstation objects, using a custom vision algorithm, in an image retrieved from an Azure Blob Storage, via its URL. 
        Input: (containerName -> string), (blobName -> string)
        Output: List -> [[prediction.tag_name, prediction.probability, prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height], ...]
    """
    print("\n\n===== Custom Vision Object Detection=====\n")
    #Retriving the probability threshold to recognise an object
    result_probThreshold = dbc.retrieve_probThreshold("customVisionObjects")
    probThreshold = float(result_probThreshold[0])

    # Client Authentication
    predictor = CustomVisionPredictionClient(CCV_endpoint, CCV_credentials)

    # Get URL image with different objects
    remote_image_url_objects = hbl.getBlobURI(containerName, blobName)

    # Call API with URL
    custom_vision_prediction = predictor.detect_image_url_with_no_store(project_id, published_name, remote_image_url_objects)

    # Detect objects in an image and store results in nested resultList of form: [[prediction.tag_name, prediction.probability], [prediction.tag_name, prediction.probability], ...]
    resultList = []
    
    #print("Detecting objects in remote image:")
    if len(custom_vision_prediction.predictions) == 0:
        pass
    else:
         for prediction in custom_vision_prediction.predictions:
             if (prediction.probability >= probThreshold):
                resultList.append([prediction.tag_name, prediction.probability, prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height])
    return resultList
Esempio n. 11
0
def detect_weight_region(image):
    # top_left_corner = (21,120)
    # bottom_right_corner = (217, 210)
    # cv2.imshow("image",image)
    image_shape = image.shape
    print(image_shape)
    credentials = ApiKeyCredentials(in_headers = {"Prediction-Key":"bf595a2cb1854d988a1f9d26834cd4e2"})
    predictor =  CustomVisionPredictionClient("https://pankaj.cognitiveservices.azure.com/", credentials)
    cv2.imwrite('detect_weight_region.png', image)
    digit = ""
    with open("detect_weight_region.png", mode ='rb') as captured_image:
        # print("load digit image... and predict ")
        results = predictor.detect_image("de960dda-1a51-444e-9fe9-84f8fbc4eff1", "Iteration2", captured_image)
        maxm_percentage = 0.0
        ans = []
        for prediction in results.predictions:
            if(prediction.probability > maxm_percentage):
                maxm_percentage = prediction.probability
                ans = [image_shape[0]*prediction.bounding_box.left, image_shape[1]*prediction.bounding_box.top, image_shape[0]*prediction.bounding_box.width, image_shape[1]*prediction.bounding_box.height]
            print("\t" + prediction.tag_name + ": {0:.2f}% bbox.left = {1:.2f}, bbox.top = {2:.2f}, bbox.width = {3:.2f}, bbox.height = {4:.2f}".format(prediction.probability * 100, prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height))
        # for prediction in results.predictions:
        #     if(prediction.probability> maxm_percentage):
        #         digit = prediction.tag_name
        #         maxm_percentage = prediction.probability
    return ans
Esempio n. 12
0
def azure_detect_object(img_path):
    """
    detect gloves and screwdriver in given image
    :param img_path: Image path
    :return:
    """
    res = {}
    template_prediction_key = "54c71598e9434d5fa7853360c4a9e4ce"
    template_project_id = "54e7f828-d0c8-49d3-8802-9b402612b7c7"
    template_iteration_name = "Iteration2"
    template_prediction_endpoint = "https://southeastasia.api.cognitive.microsoft.com"
    predictor = CustomVisionPredictionClient(
        template_prediction_key, endpoint=template_prediction_endpoint)

    with open(img_path, "rb") as image_contents:
        results = predictor.detect_image(
            template_project_id,
            template_iteration_name,
            image_contents.read(),
            custom_headers={'Content-Type': 'application/octet-stream'})
        res = []
        for prediction in results.predictions:

            res.append(
                (prediction.tag_name, prediction.probability,
                 prediction.bounding_box.left, prediction.bounding_box.top,
                 prediction.bounding_box.width,
                 prediction.bounding_box.height))
    return res
Esempio n. 13
0
def customVisionDetectObjectsLocalDisplay(imageFileName):
    """
        Description: This function detects selected workstation objects, using a custom vision algorithm, in an image retrieved from an Azure Blob Storage, via its URL. It then prints out the results to the console. 
        Source: The Azure Custom Vision SDK Sample was used as guidance and for finding an elegant way to print the results to the console (https://github.com/Azure-Samples/cognitive-services-python-sdk-samples/blob/master/samples/vision/custom_vision_prediction_samples.py)
        Input: (containerName -> string), (blobName -> string)
        Output: No direct output, but it writes the results of the object detection to the console.
    """
    print("\n\n===== Custom Vision Object Detection=====\n")
    #Retriving the probability threshold to recognise an object
    result_probThreshold = dbc.retrieve_probThreshold("customVisionObjects")
    probThreshold = float(result_probThreshold[0])

    # Client Authentication
    predictor = CustomVisionPredictionClient(CCV_endpoint, CCV_credentials)

    # Get URL image with different objects
    #remote_image_url_objects = hbl.getBlobURI(containerName, blobName)
    with open(dm.createTargetDirectory("Images") + imageFileName, "rb") as image_contents:
        # Call API with URL
        custom_vision_prediction = predictor.detect_image_with_no_store(project_id, published_name, image_contents.read())
    
    print("Objects Detected with Custom Vision and a Probability Threshold >= 0.2:")
    if len(custom_vision_prediction.predictions) == 0:
        print("No objects detected.")
    else:
         for prediction in custom_vision_prediction.predictions:
             if (prediction.probability >= probThreshold):
                print("->\t" + prediction.tag_name +
                ": {0:.2f}%".format(prediction.probability * 100))
Esempio n. 14
0
def get_pattern_predictions(image_path):
    predictor = CustomVisionPredictionClient(prediction_key, endpoint=endpoint)

    # Open the image and get back the prediction results.
    with open(image_path, mode="rb") as test_data:
        results = predictor.detect_image(project_id, iteration_name, test_data)

    return {"predictions": results.predictions}
Esempio n. 15
0
def call_custom_vision(image_url):
    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": custom_vision_prediction_key})
    predictor = CustomVisionPredictionClient(custom_vision_endpoint,
                                             prediction_credentials)
    results = predictor.detect_image_url(project_id, publish_iteration_name,
                                         image_url)
    return results
Esempio n. 16
0
 def __init__(self, prediction_key, endpoint, iteration_id, iteration_name):
     """Makes a call to custom vision api and use trained model to detect people
     """
     credentials = ApiKeyCredentials(
         in_headers={"Prediction-key": prediction_key})
     self._predictor = CustomVisionPredictionClient(endpoint, credentials)
     self._iteration_id = iteration_id
     self._iteration_name = iteration_name
Esempio n. 17
0
def process_image():
    file = request.files['file']
    file.save("predict")
    # Read the image via file.stream
    #img = Image.open(file.stream)
    #test_img_file = os.path.join('data', 'object-detection', 'ofr.jpg')
    test_img_file = 'predict'
    test_img = Image.open(test_img_file)
    imgbin = open(test_img_file,mode="rb")
    test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
    print('Ready to predict using model {} in project {}'.format(model_name, project_id))

    # Get a prediction client for the object detection model
    credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
    predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)

    
    print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
    # Detect objects in the test image
    with open(test_img_file, mode="rb") as test_data:
        results = predictor.detect_image(project_id, model_name, test_data)
    # Create a figure to display the results
    fig = plt.figure(figsize=(10, 12))
    #fig = plt.figure(figsize=(test_img_h,test_img_w))
    plt.axis('off')

    # Display the image with boxes around each detected object
    draw = ImageDraw.Draw(test_img)
    lineWidth = int(np.array(test_img).shape[1]/100)
    object_colors = {
        "bebida": "lightgreen",
        "calavera_completa": "yellow",
        "calavera_de_dulce": "yellow",
        "cempasuchil": "orange",
        "comida": "blue",
        "cruz": "gold",
        "fruta": "magenta",
        "pan_de_muerto": "darkcyan",
        "papel_picado": "red",
        "retrato": "cyan"
    }
    found = []
    for prediction in results.predictions:
        color = 'white' # default for 'other' object tags
        if (prediction.probability*100) > 50:
            if prediction.tag_name in object_colors:
                color = object_colors[prediction.tag_name]
                found.append(prediction.tag_name)
            left = prediction.bounding_box.left * test_img_w 
            top = prediction.bounding_box.top * test_img_h 
            height = prediction.bounding_box.height * test_img_h
            width =  prediction.bounding_box.width * test_img_w
            points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
            draw.line(points, fill=color, width=lineWidth)
            #plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
            plt.annotate(prediction.tag_name,(left,top), backgroundcolor=color)
    test_img.save("./static/Imagenes/out.jpg")
    return jsonify(found)
Esempio n. 18
0
def run_sample():
    try:
        # Create the BlockBlockService that is used to call the Blob service for the storage account
        block_blob_service = BlockBlobService(
            account_name='functionsimgpro9756',
            account_key='****************************')

        # Create a container called 'quickstartblobs'.
        container_name = 'samples-workitems'
        block_blob_service.create_container(container_name)

        # Set the permission so the blobs are public.
        block_blob_service.set_container_acl(
            container_name, public_access=PublicAccess.Container)
        local_file_name = "Mobile_camera_Feed.jpg"
        full_path_to_file = "./Mobile_camera_Feed.jpg"

        predictor = CustomVisionPredictionClient(prediction_key,
                                                 endpoint=ENDPOINT)
        project = find_project()

        while True:
            print("HI!!!")
            img_resp = requests.get(url)
            img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)
            time.sleep(0.1)
            try:
                img = cv2.imdecode(img_arr, -1)
            except:
                print("An exception occurred")

            cv2.imwrite("Mobile_camera_Feed.jpg", img)
            block_blob_service.create_blob_from_path(container_name,
                                                     local_file_name,
                                                     full_path_to_file)

            #Make Prediction
            print("Make prediction")
            with open("Mobile_camera_Feed.jpg", mode="rb") as test_data:
                results = predictor.predict_image(project.id, test_data.read())

            waste_type = results.predictions[0].tag_name
            # Display the results.
            print(waste_type)

            sendUpdateToArduino(waste_type)
            # for prediction in results.predictions:
            #     # print(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100))
            #     print(prediction.tag_name)

            if cv2.waitKey(1) == 27:
                break
            sleep(10)

    except Exception as e:
        os.remove(full_path_to_file)
        print(e)
Esempio n. 19
0
 def __init__(self, training_key, prediction_key, endpoint, project_name,\
      publish_iteration_name):
     #コンストラクタの定義
     self.predictor = CustomVisionPredictionClient(prediction_key,
                                                   endpoint=endpoint)
     self.trainer = CustomVisionTrainingClient(training_key,
                                               endpoint=endpoint)
     self.project_name = project_name
     self.publish_iteration_name = publish_iteration_name
Esempio n. 20
0
def find_seal(image_path):
    predictor = CustomVisionPredictionClient(prediction_key, endpoint=endpoint)

    # Open the image and get back the prediction results.
    with open(image_path, mode="rb") as image_contents:
        results = predictor.classify_image(project_id, iteration_name,
                                           image_contents.read())

    return results
def predict_project(prediction_key, project, iteration):
    predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT)

    # Open the sample image and get back the prediction results.
    with open(os.path.join(IMAGES_FOLDER, "Test", "test_od_image.jpg"), mode="rb") as test_data:
        results = predictor.predict_image(project.id, test_data, iteration.id)

    # Display the results.
    for prediction in results.predictions:
        print ("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100), prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height)
def predict_project(prediction_key, file_path):
    predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT)

    with open(os.path.join(IMAGES_FOLDER, file_path), mode="rb") as test_data:
        results = predictor.predict_image(PROJECT_ID, test_data.read())

    # Display the results.
    for prediction in results.predictions:
        print("\t" + prediction.tag_name +
              ": {0:.2f}%".format(prediction.probability * 100))
Esempio n. 23
0
    def predictWithID(self, database, modelID, inputDataIDs, onFinished):

        # load model record
        session = database.cursor()
        session.execute(
            "SELECT remote_id FROM " + self._datatableName + " WHERE id = %s",
            (modelID, ))
        result = session.fetchone()
        session.close()

        if result:
            projectID = result[0]

            predictOK = True
            resultMap = {}

            if len(inputDataIDs) > 0:
                try:
                    # Now there is a trained endpoint that can be used to make a prediction
                    trainer = CustomVisionTrainingClient(
                        self._trainingKey, endpoint=self._endPoint)
                    predictor = CustomVisionPredictionClient(
                        self._predictionKey, endpoint=self._endPoint)
                    project = trainer.get_project(projectID)

                    # load photos
                    for photoID in inputDataIDs:

                        image, _, err = self._serverAPI.getResource(
                            database, photoID)
                        if err is None:
                            isOK, encodedImage = cv2.imencode('.png', image)
                            predictResponse = predictor.classify_image(
                                project.id, projectID, encodedImage)
                            predictResult = predictResponse.predictions
                            if len(predictResult) > 0:
                                resultMap[photoID] = {
                                    "CLASS": predictResult[0].tag_name,
                                    "SCORE": predictResult[0].probability
                                }

                except Exception as err:
                    predictOK = False
                    resultMap = {}
                    print("ERROR (", self._datatableName, ") Model", modelID,
                          "failed to predict: " + str(err))

                    #raise err

            onFinished({"ISOK": predictOK, "RESULT": resultMap})

        else:
            print("ERROR (", self._datatableName, ") Model", modelID,
                  "failed to predict, model record not found by plugin.")
            onFinished({"ISOK": False})
Esempio n. 24
0
def getPredictionBatch(ENDPOINT, publish_iteration_name, prediction_key,
                       prediction_resource_id, file, training_key,
                       project_name):
    # Now there is a trained endpoint that can be used to make a prediction
    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": prediction_key})
    training_credentials = ApiKeyCredentials(
        in_headers={"Training-key": training_key})
    predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
    trainer = CustomVisionTrainingClient(ENDPOINT, training_credentials)
    projects = trainer.get_projects()

    res_batch = {}
    js_res = {}

    #Retrieve the object dection project and its tags
    #Current assumes one tag
    for p in projects:
        if p.name == project_name:
            project = trainer.get_project(p.id)
            tags = trainer.get_tags(project.id)
            print('Project Found')

    for url in file:
        info = url.split()

        name = info[0]
        url = info[1]
        try:
            response = requests.get(url)
        except:
            print("error retrieving image: " + url)
            exit(-1)
        # Open the sample image and get back the prediction results.
        results = predictor.detect_image(project.id, publish_iteration_name,
                                         response.content)

        # Display the results.
        js_res["vehicle"] = []
        for prediction in results.predictions:

            x = {
                "confidence": "{0:.2f}%".format(prediction.probability * 100),
                "bbox_left": "{0:.2f}".format(prediction.bounding_box.left),
                "bbox_right": "{0:.2f}".format(prediction.bounding_box.top),
                "bbox_width": "{0:.2f}".format(prediction.bounding_box.width),
                "bbox_height": "{0:.2f}".format(prediction.bounding_box.height)
            }

            x = json.dumps(x)
            js_res[prediction.tag_name].append(x)
        res_batch[name] = js_res

    return res_batch
Esempio n. 25
0
def main():
    from dotenv import load_dotenv

    try:
        # Get Configuration Settings
        load_dotenv()
        prediction_endpoint = os.getenv('PredictionEndpoint')
        prediction_key = os.getenv('PredictionKey')
        project_id = os.getenv('ProjectID')
        model_name = os.getenv('ModelName')

        # Authenticate a client for the training API
        credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
        prediction_client = CustomVisionPredictionClient(endpoint=prediction_endpoint, credentials=credentials)

        # Load image and get height, width and channels
        image_file = 'produce.jpg'
        print('Detecting objects in', image_file)
        image = Image.open(image_file)
        h, w, ch = np.array(image).shape

        # Detect objects in the test image
        with open(image_file, mode="rb") as image_data:
            results = prediction_client.detect_image(project_id, model_name, image_data)

        # Create a figure for the results
        fig = plt.figure(figsize=(8, 8))
        plt.axis('off')

        # Display the image with boxes around each detected object
        draw = ImageDraw.Draw(image)
        lineWidth = int(w/100)
        color = 'magenta'
        for prediction in results.predictions:
            # Only show objects with a > 50% probability
            if (prediction.probability*100) > 50:
                # Box coordinates and dimensions are proportional - convert to absolutes
                left = prediction.bounding_box.left * w 
                top = prediction.bounding_box.top * h 
                height = prediction.bounding_box.height * h
                width =  prediction.bounding_box.width * w
                # Draw the box
                points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
                draw.line(points, fill=color, width=lineWidth)
                # Add the tag name and probability
                plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
        plt.imshow(image)
        outputfile = 'output.jpg'
        fig.savefig(outputfile)
        print('Results saved in ', outputfile)
    except Exception as ex:
        print(ex)
Esempio n. 26
0
def string_from_image(url_input):
    #print("here")
    predictor = CustomVisionPredictionClient(
        config["cv_id"],
        endpoint="https://eastus.api.cognitive.microsoft.com/")

    results = predictor.classify_image_url(config["project_id"], "version0.2",
                                           url_input)

    prediction = results.predictions[0]
    print(prediction)

    return prediction.tag_name.replace(' ', '+')
def get_pattern_predictions(image_path):
    predictor = CustomVisionPredictionClient(prediction_key, endpoint=endpoint)

    print 'About to OD the image ' + image_path
    # Open the image and get back the prediction results.
    with open(image_path, mode="rb") as test_data:
        print 'Opened file'
        results = predictor.detect_image(project_id, iteration_name, test_data)

    print results
    # TODO: order the predictions in order to aid readability

    return {"predictions": results.predictions}
def predict_project(subscription_key):
    predictor = CustomVisionPredictionClient(
        subscription_key, "https://westeurope.api.cognitive.microsoft.com")

    # Find or train a new project to use for prediction.
    project = find_or_train_project()

    with open(os.path.join(IMAGES_FOLDER, "Test", "test_image.jpg"), mode="rb") as test_data:
         results = predictor.classify_image(project.id, PUBLISH_ITERATION_NAME, test_data.read())

    # Display the results.
    for prediction in results.predictions:
        print("\t" + prediction.tag_name +
              ": {0:.2f}%".format(prediction.probability * 100))
Esempio n. 29
0
def main():
    """
    Object Detection with Azure Custom Vision
    """
    args = parse_args()
    config = json.load(open(args.config, "r"))

    # Get the predictor
    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": config["prediction_key"]})
    predictor = CustomVisionPredictionClient(config["ENDPOINT"],
                                             prediction_credentials)

    # ======================================================================================
    # Open the sample image and get back the prediction results.
    project_id = get_project_id(config)
    with open(args.image, "rb") as test_data:
        results = predictor.detect_image(
            project_id,
            config["publish_iteration_name"],
            test_data,
        )

    # ======================================================================================
    # Draw the bounding boxes on the image
    img = Image.open(args.image)
    draw = ImageDraw.Draw(img)
    font = ImageFont.truetype("../static/TaipeiSansTCBeta-Regular.ttf",
                              size=int(5e-2 * img.size[1]))
    for prediction in results.predictions:
        if prediction.probability > 0.5:
            bbox = prediction.bounding_box.as_dict()
            left = bbox['left'] * img.size[0]
            top = bbox['top'] * img.size[1]
            right = left + bbox['width'] * img.size[0]
            bot = top + bbox['height'] * img.size[1]
            draw.rectangle([left, top, right, bot],
                           outline=(255, 0, 0),
                           width=3)
            draw.text(
                [left, abs(top - 5e-2 * img.size[1])],
                "{0} {1:0.2f}".format(prediction.tag_name,
                                      prediction.probability * 100),
                fill=(255, 0, 0),
                font=font,
            )

    img.save("output.png")
    print("Done!")
    print("Please check ouptut.png")
Esempio n. 30
0
    def __init__(self):
        self.ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com"

        # Project keys
        self.training_key = "dae03cb013f840658708cd62781d90c1"
        self.prediction_key = "6212b24516c6492190c63d2b32084079"
        self.project_id = "a97fb679-77e7-4e07-b946-81c752ee3112"

        self.probability_min = 60

        # Now there is a trained endpoint that can be used to make a prediction

        self.predictor = CustomVisionPredictionClient(self.prediction_key,
                                                      endpoint=self.ENDPOINT)

from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient


import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
get_ipython().magic(u'matplotlib inline')

# Use two test images
test_img1_url = 'http://www.pachd.com/free-images/food-images/apple-01.jpg'
test_img2_url = 'http://www.pachd.com/free-images/food-images/carrot-01.jpg'

# Create an instance of prediction service
predictor = CustomVisionPredictionClient(PREDICTION_KEY, endpoint=ENDPOINT)

# Get prediction for image 1
result1 = predictor.predict_image_url(PROJECT_ID, url=test_img1_url)
# The results include a prediction for each tag, descending order of probability - so we'll get
prediction1 = result1.predictions[0].tag_name + ": {0:.2f}%".format(result1.predictions[0].probability)

# Get prediction for image 2
result2 = predictor.predict_image_url(PROJECT_ID, url=test_img2_url)
# The results include a prediction for each tag, descending order of probability - so we'll get
prediction2 = result2.predictions[0].tag_name + ": {0:.2f}%".format(result2.predictions[0].probability)

# Download images and show them
response = requests.get(test_img1_url)
img1 = Image.open(BytesIO(response.content))