示例#1
0
    def __init__(self):
        self.classifier = edgeiq.Classification("alwaysai/googlenet")
        self.classifier.load(engine=edgeiq.Engine.DNN)
        self.imgs_path = "./images/"

        print("Engine: {}".format(self.classifier.engine))
        print("Accelerator: {}\n".format(self.classifier.accelerator))
        print("Model:\n{}\n".format(self.classifier.model_id))
        print("Labels:\n{}\n".format(self.classifier.labels))
示例#2
0
def main():
    classifier = edgeiq.Classification("alwaysai/googlenet")
    classifier.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(classifier.engine))
    print("Accelerator: {}\n".format(classifier.accelerator))
    print("Model:\n{}\n".format(classifier.model_id))
    print("Labels:\n{}\n".format(classifier.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(
            queue_depth=len(image_paths), inter_msg_time=3) as streamer:
        black_img= cv2.imread('black.jpg')
        for image_path in image_paths:
            image_display = cv2.imread(image_path)
            image = image_display.copy()

            results = classifier.classify_image(image)

            # Generate text to display on streamer
            text = ["Model: {}".format(classifier.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))

            if results.predictions:
                image_text = "Label: {}, {:.2f}".format(
                        results.predictions[0].label,
                        results.predictions[0].confidence)
                cv2.putText(
                        image_display, image_text, (5, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

                for idx, prediction in enumerate(results.predictions[:5]):
                    text.append("{}. label: {}, confidence: {:.5}".format(
                        idx + 1, prediction.label, prediction.confidence))
                    if prediction.label == "Not Safe For Work":
                        resized_black_image = edge_tools.resize(black_img, image.shape[1], image.shape[0], keep_scale=False)
                        image_display = edge_tools.blend_images(resized_black_image, image, 0.1)
                
            else:
                text.append("No classification for this image.")

            streamer.send_data(image_display, text)
        streamer.wait()

    print("Program Ending")
示例#3
0
def classifiers_from(array_of_objects, engine=edgeiq.Engine.DNN):
    '''
    Taking the configuration array of converted JSON objects and initializing all the classifiers. Returns an
    array of tuples (classifer, array_of_target_labels, confidence_level_threshold_for_detections)
    '''
    result = []
    for object in array_of_objects:
        model_id = object[MODEL_ID]
        confidence_level = object[THRESHOLD]
        targets = object[TARGETS]
        print(
            'app.py: classifier_from: initializing classifier with model id: {}'
            .format(model_id))
        classifier = edgeiq.Classification(model_id)
        classifier.load(engine=engine)
        result.append((classifier, targets, confidence_level))
    return result
示例#4
0
    def classification_base(model, confidence, image_array):
        classifier = edgeiq.Classification(model)
        classifier.load(engine=edgeiq.Engine.DNN)

        results = classifier.classify_image(image_array,
                                            confidence_level=confidence)
        if results.predictions:
            image_text = "{}, {}%".format(
                results.predictions[0].label.title().strip(),
                round(results.predictions[0].confidence * 100, 2))
            label_width, label_height = cv2.getTextSize(
                image_text, cv2.QT_FONT_NORMAL, 1, 2)[0]
            scale = image_array.shape[1] / label_width

            new_label_width, new_label_height = cv2.getTextSize(
                image_text, cv2.QT_FONT_NORMAL, scale, 2)[0]
            cv2.putText(image_array, image_text, (0, new_label_height + 5),
                        cv2.QT_FONT_NORMAL, scale, (0, 0, 255), 1)

            return image_array, results, image_text
        return image_array, results, None
示例#5
0
def gen():
    #Load in our machine learning models!
    detector_config = {
        "engine": edgeiq.Engine.DNN_OPENVINO,
        "accelerator": edgeiq.Accelerator.MYRIAD
    }

    #Get the face detector:
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(**detector_config)
    describe_model(facial_detector, "Face")

    #Get the gender detector
    gender_detector = edgeiq.Classification("alwaysai/gendernet")
    gender_detector.load(**detector_config)
    describe_model(gender_detector, "Gender")

    #Get the age detector
    age_detector = edgeiq.Classification("alwaysai/agenet")
    age_detector.load(**detector_config)
    describe_model(age_detector, "Age")

    texts = ["No patient detected!"]

    with edgeiq.WebcamVideoStream(cam=0) as webcam:
        # loop detection
        while True:
            frame = webcam.read()

            #Flip the image upside down bc of how we orient the camera
            frame = np.flipud(frame)

            # detect human faces
            face_results = facial_detector.detect_objects(frame,
                                                          confidence_level=.5)

            if len(face_results.predictions) > 0:
                face = frame[face_results.predictions[0].box.
                             start_y:face_results.predictions[0].box.end_y,
                             face_results.predictions[0].box.
                             start_x:face_results.predictions[0].box.end_x]

                #Detect gender and age
                gender_results = gender_detector.classify_image(
                    face, confidence_level=.9)
                age_results = age_detector.classify_image(face)

                frame = blur_detections(frame, face_results.predictions)

                # Find the index of highest confidence
                if len(gender_results.predictions) > 0 and len(
                        age_results.predictions) > 0:
                    top_prediction1 = gender_results.predictions[0]
                    top_prediction2 = age_results.predictions[0]
                    texts = []
                    texts.append("Gender Classification:")
                    texts.append("{}, {:.1f}%".format(
                        top_prediction1.label,
                        top_prediction1.confidence * 100))
                    texts.append("Age Classification:")
                    texts.append("{}, {:.1f}%".format(
                        top_prediction2.label,
                        top_prediction2.confidence * 100))
            else:
                texts = ["No patient detected!"]

            #HACK: Add a panel to the right side of the image
            label_panel = np.zeros(
                (frame.shape[0], frame.shape[1] // 2, frame.shape[2])) + 255
            org_coords = [(frame.shape[0] // 15, i * frame.shape[1] // 10)
                          for i in range(1, 5)]
            for i, text in enumerate(texts):
                label_panel = imwrite(label_panel,
                                      text,
                                      org_coords[i],
                                      thickness=1 + ((i % 2) == 0))

            frame = np.concatenate((frame, label_panel), axis=1)

            #Encode and deploy
            ret, jpeg = cv2.imencode('.jpg', frame)
            frame = jpeg.tobytes()
            #yield frame
            yield (b'\r\n--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
示例#6
0
def main():
    classifier = edgeiq.Classification("alwaysai/googlenet")
    classifier.load(engine=edgeiq.Engine.DNN_CUDA,
                    accelerator=edgeiq.Accelerator.NVIDIA)

    print("Engine: {}".format(classifier.engine))
    print("Accelerator: {}\n".format(classifier.accelerator))
    print("Model:\n{}\n".format(classifier.model_id))
    print("Labels:\n{}\n".format(classifier.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=224)
                results = classifier.classify_image(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(classifier.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                if results.predictions:
                    print(results.predictions[0].label)
                    if results.predictions[0].label == 'water bottle':
                        # label the frame
                        image_text = "Label: {}, {:.2f}".format(
                            results.predictions[0].label,
                            results.predictions[0].confidence)
                        cv2.putText(frame, image_text, (5, 25),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
                                    2)

                        for idx, prediction in enumerate(
                                results.predictions[:5]):
                            text.append(
                                "{}. label: {}, confidence: {:.5}".format(
                                    idx + 1, prediction.label,
                                    prediction.confidence))
                    else:
                        text.append("No water bottles detected.")

                else:
                    text.append("No water bottles detected.")

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#7
0
def main():

    # Step 1b: first make a detector to detect facial objects
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    # Step 2a: then make a classifier to classify the age of the image
    classifier = edgeiq.Classification("alwaysai/agenet")
    classifier.load(engine=edgeiq.Engine.DNN)

    # Step 2b: descriptions printed to console
    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    print("Engine: {}".format(classifier.engine))
    print("Accelerator: {}\n".format(classifier.accelerator))
    print("Model:\n{}\n".format(classifier.model_id))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:

                # Step 3a: track how many faces are detected in a frame
                count = 1

                # read in the video stream
                frame = video_stream.read()

                # detect human faces
                results = facial_detector.detect_objects(frame,
                                                         confidence_level=.5)

                # Step 3b: altering the labels to show which face was detected
                for p in results.predictions:
                    p.label = "Face " + str(count)
                    count = count + 1

                # Step 3c: alter the original frame mark up to just show labels
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_labels=True,
                                            show_confidences=False)

                # generate labels to display the face detections on the streamer
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                # Step 3d:
                text.append("Faces:")

                # Step 4a: add a counter for the face detection label
                age_label = 1

                # append each predication to the text output
                for prediction in results.predictions:

                    # Step 4b: append labels for face detection & classification
                    text.append("Face {} ".format(age_label))

                    age_label = age_label + 1

                    ## to show confidence, use the following instead of above:
                    # text.append("Face {}: detected with {:2.2f}% confidence,".format(
                    #count, prediction.confidence * 100))

                    # Step 4c: cut out the face and use for the classification
                    face_image = edgeiq.cutout_image(frame, prediction.box)

                    # Step 4d: attempt to classify the image in terms of age
                    age_results = classifier.classify_image(face_image)

                    # Step 4e: if there are predictions for age classification,
                    # generate these labels for the output stream
                    if age_results.predictions:
                        text.append("is {}".format(
                            age_results.predictions[0].label, ))
                    else:
                        text.append("No age prediction")

                    ## to append classification confidence, use the following
                    ## instead of the above if/else:

                    # if age_results.predictions:
                    #     text.append("age: {}, confidence: {:.2f}\n".format(
                    #         age_results.predictions[0].label,
                    #         age_results.predictions[0].confidence))
                    # else:
                    #     text.append("No age prediction")

                # send the image frame and the predictions to the output stream
                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#8
0
def main():

    # first make a detector to detect facial objects
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    # then make a classifier to classify the age of the image
    classifier = edgeiq.Classification("alwaysai/agenet")
    classifier.load(engine=edgeiq.Engine.DNN)

    # descriptions printed to console
    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    print("Engine: {}".format(classifier.engine))
    print("Accelerator: {}\n".format(classifier.accelerator))
    print("Model:\n{}\n".format(classifier.model_id))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()

                # detect human faces
                results = facial_detector.detect_objects(frame,
                                                         confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_labels=False)

                # generate labels to display the facial detections on the streamer output
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                # append each predication to the text output
                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                # attempt to classify the image in terms of age
                age_results = classifier.classify_image(frame)

                # if there are predictions for the age classification,
                # generate these labels for the output stream
                if age_results.predictions:
                    text.append("Label: {}, {:.2f}".format(
                        age_results.predictions[0].label,
                        age_results.predictions[0].confidence))
                else:
                    text.append("No age predication")

                # send the image frame and the predictions to the output stream
                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#9
0
def main():
    try:
        classifier1 = edgeiq.Classification("alwaysai/gendernet")
        classifier2 = edgeiq.Classification("alwaysai/agenet")

        classifier1.load(edgeiq.Engine.DNN)
        print("Engine 1: {}".format(classifier1.engine))
        print("Accelerator 1: {}\n".format(classifier1.accelerator))
        print("Model 1:\n{}\n".format(classifier1.model_id))
        print("Labels:\n{}\n".format(classifier1.labels))

        classifier2.load(edgeiq.Engine.DNN)
        print("Engine 2: {}".format(classifier2.engine))
        print("Accelerator 2: {}\n".format(classifier2.accelerator))
        print("Model 2:\n{}\n".format(classifier2.model_id))
        print("Labels:\n{}\n".format(classifier2.labels))

        image_paths = sorted(list(edgeiq.list_images("images/")))
        print("Images:\n{}\n".format(image_paths))

        with edgeiq.Streamer(queue_depth=len(image_paths),
                             inter_msg_time=3) as streamer:
            for image_path in image_paths:
                image_display = cv2.imread(image_path)
                image = image_display.copy()

                results1 = classifier1.classify_image(image,
                                                      confidence_level=.95)
                results2 = classifier2.classify_image(image)

                # Generate text to display on streamer
                text = ["Model 1: {}".format(classifier1.model_id)]
                text.append("Model 2: {}".format(classifier2.model_id))
                text.append(
                    "Inference time: {:1.3f} s".format(results1.duration +
                                                       results2.duration))

                # Find the index of highest confidence
                if len(results1.predictions) > 0:
                    top_prediction1 = results1.predictions[0]
                    top_prediction2 = results2.predictions[0]
                    text1 = "Classification: {}, {:.2f}%".format(
                        top_prediction1.label,
                        top_prediction1.confidence * 100)
                    text2 = "Classification: {}, {:.2f}%".format(
                        top_prediction2.label,
                        top_prediction2.confidence * 100)
                else:
                    text1 = "Can not classify this image, confidence under " \
                            "95 percent for Gender Identification"
                    text2 = None
                # Show the image on which inference was performed with text
                cv2.putText(image_display, text1, (5, 25),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
                text.append(text1)
                if text2 is not None:
                    cv2.putText(image_display, text2, (5, 45),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
                    text.append(text2)

                streamer.send_data(image_display, text)
            streamer.wait()

    finally:
        print("Program Ending")
示例#10
0
def gen():
    #Load in our machine learning models!
    detector_config = {
        "engine": edgeiq.Engine.DNN_OPENVINO,
        "accelerator": edgeiq.Accelerator.MYRIAD
    }

    #Get the face detector:
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(**detector_config)
    describe_model(facial_detector, "Face")

    #Get the gender detector
    gender_detector = edgeiq.Classification("alwaysai/gendernet")
    gender_detector.load(**detector_config)
    describe_model(gender_detector, "Gender")

    #Get the age detector
    age_detector = edgeiq.Classification("alwaysai/agenet")
    age_detector.load(**detector_config)
    describe_model(age_detector, "Age")

    with edgeiq.WebcamVideoStream(cam=0) as webcam:
        # Allow webcam to warm up
        time.sleep(2.0)

        # loop detection
        while True:
            frame = webcam.read()
            # detect human faces
            face_results = facial_detector.detect_objects(frame,
                                                          confidence_level=.5)

            #Detect gender and age
            gender_results = gender_detector.classify_image(
                frame, confidence_level=.9)
            age_results = age_detector.classify_image(frame)

            frame = blur_detections(frame, face_results.predictions)

            # Find the index of highest confidence
            if len(gender_results.predictions) > 0 and len(
                    age_results.predictions) > 0:
                top_prediction1 = gender_results.predictions[0]
                top_prediction2 = age_results.predictions[0]
                text1 = "Classification: {}, {:.2f}%".format(
                    top_prediction1.label, top_prediction1.confidence * 100)
                text2 = "Classification: {}, {:.2f}%".format(
                    top_prediction2.label, top_prediction2.confidence * 100)
            else:
                text1 = "Can not classify this image, confidence under " \
                        "90 percent for Gender Identification"
                text2 = None

            # Generate text to display on streamer
            text = ["Gender Model: {}".format(gender_detector.model_id)]
            text.append("Age Model: {}".format(age_detector.model_id))
            text.append("Face Model: {}".format(facial_detector.model_id))
            text.append(
                "Inference time: {:1.3f} s".format(gender_results.duration +
                                                   age_results.duration +
                                                   face_results.duration))
            text.append("Faces:")

            for prediction in face_results.predictions:
                text.append("{}: {:2.2f}%".format(prediction.label,
                                                  prediction.confidence * 100))

            text.append(text1)
            if text2 != None:
                text.append(text2)

            #Encode and deploy
            ret, jpeg = cv2.imencode('.jpg', frame)
            frame = jpeg.tobytes()
            #yield frame
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
示例#11
0
文件: app.py 项目: johancc/AromaV2
def generateAgeModel():
    age_model = edgeiq.Classification("alwaysai/agenet")
    age_model.load(engine=edgeiq.Engine.DNN)
    return age_model
示例#12
0
文件: app.py 项目: johancc/AromaV2
def generateGenderModel():
    classification = edgeiq.Classification("alwaysai/gendernet")
    classification.load(engine=edgeiq.Engine.DNN)
    return classification