Exemple #1
0
    def get_mask_results(self, predictions, image):
        """Searches each prediction box section of the input image for a mask,
        and generates a new prediction in the overall image based on the prediction.

        Args:
            predictions (list): List of ObjectDetectionPredictions
            image (numpy array): The image to inference on

        Returns:
            list: Returns a list of ObjectDetectionPrediction elements
        """
        mask_results = []
        for prediction in deepcopy(predictions):

            # use the person prediction to narrow the focus and search for masks
            new_image = edgeiq.cutout_image(image, prediction.box)
            mask_predictions = self.mask_detector.detect_objects(new_image, confidence_level=0.2).predictions

            if len(mask_predictions) > 0:
                pred = mask_predictions[0]

                # update the label with the mask model's label if it is found
                prediction.label = pred.label

                # make the new box in the original frame
                new_start_x = prediction.box.start_x + pred.box.start_x
                new_end_x = new_start_x + pred.box.width
                new_start_y = prediction.box.start_y + pred.box.start_y
                new_end_y = new_start_y + pred.box.height

                # send back this new box to be marked up in the frame
                new_box = edgeiq.BoundingBox(new_start_x, new_start_y, new_end_x, new_end_y)
                prediction.box = new_box
            else:
                prediction.label = "no-mask-detected"

            mask_results.append(prediction)
        return mask_results
Exemple #2
0
def main():

    # Step 1b: first make a detector to detect facial objects
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    # Step 2a: then make a classifier to classify the age of the image
    classifier = edgeiq.Classification("alwaysai/agenet")
    classifier.load(engine=edgeiq.Engine.DNN)

    # Step 2b: descriptions printed to console
    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    print("Engine: {}".format(classifier.engine))
    print("Accelerator: {}\n".format(classifier.accelerator))
    print("Model:\n{}\n".format(classifier.model_id))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:

                # Step 3a: track how many faces are detected in a frame
                count = 1

                # read in the video stream
                frame = video_stream.read()

                # detect human faces
                results = facial_detector.detect_objects(frame,
                                                         confidence_level=.5)

                # Step 3b: altering the labels to show which face was detected
                for p in results.predictions:
                    p.label = "Face " + str(count)
                    count = count + 1

                # Step 3c: alter the original frame mark up to just show labels
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_labels=True,
                                            show_confidences=False)

                # generate labels to display the face detections on the streamer
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                # Step 3d:
                text.append("Faces:")

                # Step 4a: add a counter for the face detection label
                age_label = 1

                # append each predication to the text output
                for prediction in results.predictions:

                    # Step 4b: append labels for face detection & classification
                    text.append("Face {} ".format(age_label))

                    age_label = age_label + 1

                    ## to show confidence, use the following instead of above:
                    # text.append("Face {}: detected with {:2.2f}% confidence,".format(
                    #count, prediction.confidence * 100))

                    # Step 4c: cut out the face and use for the classification
                    face_image = edgeiq.cutout_image(frame, prediction.box)

                    # Step 4d: attempt to classify the image in terms of age
                    age_results = classifier.classify_image(face_image)

                    # Step 4e: if there are predictions for age classification,
                    # generate these labels for the output stream
                    if age_results.predictions:
                        text.append("is {}".format(
                            age_results.predictions[0].label, ))
                    else:
                        text.append("No age prediction")

                    ## to append classification confidence, use the following
                    ## instead of the above if/else:

                    # if age_results.predictions:
                    #     text.append("age: {}, confidence: {:.2f}\n".format(
                    #         age_results.predictions[0].label,
                    #         age_results.predictions[0].confidence))
                    # else:
                    #     text.append("No age prediction")

                # send the image frame and the predictions to the output stream
                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #3
0
def main():
    facial_detector = edgeiq.ObjectDetection(
            "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    fps = edgeiq.FPS()
    event_video_writer = edgeiq.EventVideoWriter(pre_roll=3, post_roll=3, fps=1)

    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            i=0
            face_dict = {0: "Vignesh", 1: "Parth", 2: "Rosna", 3: "Vipul"}

            fireb = firebase.FirebaseApplication('https://soteria-hacksc.firebaseio.com/', None)
            fire_res = fireb.get('/faces_detected', None)
            print(fire_res)
            tflag = False

            # loop detection
            while True:
                frame = webcam.read()
                event_video_writer.update(frame)
                # detect human faces
                results = facial_detector.detect_objects(
                        frame, confidence_level=.5)
                frame = edgeiq.markup_image(
                        frame, results.predictions, show_labels=False)

                # Generate text to display on streamer
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Faces:")

                bb_img_list = []

                for prediction in results.predictions:
                    text.append("{:2.2f}%".format(prediction.confidence * 100))
                    bbox = prediction.box
                    bb_image = edgeiq.cutout_image(frame,bbox)
                    bb_img_list.append(bb_image)

                try :
                    cv2.imwrite('./unknown_'+str(i)+'.jpg',bb_img_list[0])
                    unknown_image = face_recognition.load_image_file("./unknown_"+str(i)+".jpg")
                    unknown_encoding = face_recognition.face_encodings(unknown_image)[0]
                except :
                    unknown_image = face_recognition.load_image_file("./unknown.jpg")
                    unknown_encoding = face_recognition.face_encodings(unknown_image)[0]
                    print("Unknown face!!")
                    
                j = 0                
                while j<4:
                    known_image = face_recognition.load_image_file("./"+str(j)+".jpg")
                    biden_encoding = face_recognition.face_encodings(known_image)[0]
                    
                    if face_recognition.compare_faces([biden_encoding], unknown_encoding)[0]:
                        print("Face recognised: ", j)
                        text.append("Recognised: ")
                        text.append(face_dict[j])
                        fire_data = {i: {'id': face_dict[j], 'location': '34.020091, -118.286119'}}
                        fire_result = fireb.post('/faces_detected', fire_data)
                        print(fire_result)

                    j += 1


                alert_res = fireb.get('/alert', None)
                if not tflag and alert_res['bool'] == 1:
                    print("Alert!!")
                    text.append("Alert!!! Potential Imminent Danger around you")
                    output_path="video_clip.avi"
                    event_video_writer.start_event(output_path=output_path, callback_function=cb)
                    timeout = time.time() + 30
                    tflag = True

                if tflag and time.time() > timeout:
                    print(fps.compute_fps())
                    print("Video complete!")
                    event_video_writer.finish_event()
                    timeout = time.time() + 60*5


                streamer.send_data(frame, text)
                # streamer.send_data(bb_img_list[0], text)

                fps.update()
                i += 1

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
def main():

    # First make a detector to detect facial objects
    hand_detector = edgeiq.ObjectDetection("alwaysai/hand_detection")
    hand_detector.load(engine=edgeiq.Engine.DNN)

    # Then make a detector to detect the sign of the hand
    sign_detector = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    sign_detector.load(engine=edgeiq.Engine.DNN)

    # Descriptions printed to console
    print("Engine: {}".format(hand_detector.engine))
    print("Accelerator: {}\n".format(hand_detector.accelerator))
    print("Model:\n{}\n".format(hand_detector.model_id))
    print("Labels:\n{}\n".format(hand_detector.labels))

    print("Engine: {}".format(sign_detector.engine))
    print("Accelerator: {}\n".format(sign_detector.accelerator))
    print("Model:\n{}\n".format(sign_detector.model_id))
    print("Labels:\n{}\n".format(sign_detector.labels))

    fps = edgeiq.FPS()

    # Variables to limit inference
    counter = 0
    DETECT_RATE = 10
    sign_monitor = SignMonitor()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # Loop detection
            while True:
                counter += 1
                if counter % DETECT_RATE == 0:

                    # Read in the video stream
                    frame = video_stream.read()

                    # Detect human faces
                    results = hand_detector.detect_objects(frame,
                                                           confidence_level=.5)

                    # Alter the original frame mark up to just show labels
                    frame = edgeiq.markup_image(frame,
                                                results.predictions,
                                                show_labels=True,
                                                show_confidences=False)

                    # Generate labels to display the face detections on the streamer
                    text = ["Model: {}".format(hand_detector.model_id)]
                    text.append("Inference time: {:1.3f} s".format(
                        results.duration))

                    text.append("Signs:")

                    # Add a counter for the face detection label
                    sign_label = 1

                    # Append each predication to the text output
                    for prediction in results.predictions:

                        # Append labels for face detection & classification
                        text.append("Sign {} ".format(sign_label))

                        sign_label = sign_label + 1

                        # Cut out the hand and use for the sign detection
                        hand_image = edgeiq.cutout_image(frame, prediction.box)

                        # Attempt to classiidnetify sign object
                        sign_results = sign_detector.detect_objects(
                            hand_image, confidence_level=.9)

                        sign = None

                        # If a sign was detected, append the label
                        if sign_results.predictions:
                            sign = sign_results.predictions[0]
                            text.append(
                                "sign: {}, confidence: {:.2f}\n".format(
                                    sign_results.predictions[0].label,
                                    sign_results.predictions[0].confidence))

                        if sign is not None:
                            sign_monitor.update(sign.label)

                    # Send the image frame and the predictions to the output stream
                    streamer.send_data(frame, text)

                    fps.update()

                    if streamer.check_exit():
                        break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #5
0
def start_file_detection_and_tracking(delivery_object,
                                      filter_for,
                                      model_name,
                                      filename,
                                      detection_confidence=.5,
                                      enable_streamer=True,
                                      streamer_show_labels=True,
                                      tracker_deregister_frames=20,
                                      tracker_max_distance=50,
                                      should_log=False):
    """Starts a detection loop"""
    obj_detect = object_detector(model_name)
    tracker = edgeiq.CentroidTracker(
        deregister_frames=tracker_deregister_frames,
        max_distance=tracker_max_distance)
    fps = edgeiq.FPS()

    try:
        # Enables video camera and streamer

        # TODO: add streamer disable feature here

        with edgeiq.FileVideoStream(
                filename) as video_stream, edgeiq.Streamer() as streamer:

            # Start tracking of frames per second
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                text = []
                # Run detection
                # detect human faces
                results = obj_detect.detect_objects(
                    frame, confidence_level=detection_confidence)

                # TODO: Add filter option here

                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_labels=streamer_show_labels)

                # Generate text to display on streamer
                text.append("Model: {}".format(obj_detect.model_id))
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                predictions = []
                objects = tracker.update(results.predictions)
                # predictions = results.predictions
                for (object_id, prediction) in objects.items():
                    # print(vars(prediction))
                    text.append("{}: {}: {:2.2f}%".format(
                        object_id, prediction.label,
                        prediction.confidence * 100))
                    predictions.append(prediction)

                    if delivery.should_send_image(object_id):
                        # Extract image
                        face_image = edgeiq.cutout_image(frame, prediction.box)
                        # Send data to server
                        delivery.send_image(object_id, prediction.label,
                                            face_image)
                    elif delivery.should_send_data(object_id):
                        delivery.send_data(object_id, prediction.label)

                    # if delivery.should_send_data(object_id):
                    #     delivery.send_data(object_id, prediction.label)

                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()
                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        if should_log == True:
            print("[INFO] elapsed time: {:.2f}".format(
                fps.get_elapsed_seconds()))
            print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))
            print("Program Ending")