Ejemplo n.º 1
0
def main():
    text = "Facial Landmarks with Dlib"
    try:
        shape_predictor = "shape_predictor_68_face_landmarks.dat"
        dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)

        image_paths = sorted(list(edgeiq.list_images("images/")))
        print("Images:\n{}\n".format(image_paths))

        with edgeiq.Streamer(queue_depth=len(image_paths),
                             inter_msg_time=3) as streamer:
            for image_path in image_paths:
                image = cv2.imread(image_path)
                image, gray_image = dlib_flm.image_preprocessor(image)
                facial_coordinates, rectangles = dlib_flm.detect_faces_shapes(
                    gray_image)

                # Loop to markup image
                for (i, rectangle) in enumerate(rectangles):
                    (x, y, w,
                     h) = dlib_flm.dlib_rectangle_to_cv_bondingbox(rectangle)
                    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)
                    cv2.putText(image, "Face #{}".format(i + 1),
                                (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 255, 0), 2)

                for facial_coordinate in facial_coordinates:
                    for (x, y) in facial_coordinate:
                        cv2.circle(image, (x, y), 3, (255, 0, 0), -1)

                streamer.send_data(image, text)
            streamer.wait()
    finally:
        print("Program Ending")
Ejemplo n.º 2
0
def main():
    text = "Facial Part Detection with Dlib"
    try:
        shape_predictor = "shape_predictor_68_face_landmarks.dat"
        dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)

        image_paths = sorted(list(edgeiq.list_images("images/")))
        print("Images:\n{}\n".format(image_paths))

        with edgeiq.Streamer(queue_depth=len(image_paths), inter_msg_time=3) as streamer:
            for image_path in image_paths:
                image = cv2.imread(image_path)
                resized_image, gray_image = dlib_flm.image_preprocessor(image)
                facial_coordinates, rectangles =dlib_flm.detect_faces_shapes(gray_image)

                for facial_coordinate in facial_coordinates:
                    for (name, (i, j)) in FACIAL_LANDMARKS_IDXS.items():
                        print(name)
                        clone = resized_image.copy()
                        cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            1.0, (255, 0, 0), 2)
                        for (x, y) in facial_coordinate[i:j]:
                            cv2.circle(clone, (x, y), 3, (255, 0, 0), -1)
                            streamer.send_data(clone, text)
                        streamer.wait()
    finally:
         print("Program Ending")
Ejemplo n.º 3
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation("alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Model:\n{}\n".format(semantic_segmentation.model_id))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=3) as streamer:
        for image_path in image_paths:
            image = cv2.imread(image_path)

            results = semantic_segmentation.segment_image(image)

            # Generate text to display on streamer
            text = ["Model: {}".format(semantic_segmentation.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))
            text.append("Legend:")
            text.append(semantic_segmentation.build_legend())

            mask = semantic_segmentation.build_image_mask(results.class_map)
            blended = edgeiq.blend_images(image, mask, alpha=0.5)

            streamer.send_data(blended, text)
            streamer.wait()

        print("Program Ending")
Ejemplo n.º 4
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=4) as streamer:
        for image_path in image_paths:
            # Load image from disk
            image = cv2.imread(image_path)

            results = obj_detect.detect_objects(image, confidence_level=.5)
            image = edgeiq.markup_image(image,
                                        results.predictions,
                                        colors=[(255, 255, 255)])

            # Generate text to display on streamer
            text = ["<b>Model:</b> {}".format(obj_detect.model_id)]
            text.append("<b>Inference time:</b> {:1.3f} s".format(
                results.duration))
            text.append("<b>Objects:</b>")

            for prediction in results.predictions:
                text.append("{}: {:2.2f}%".format(prediction.label,
                                                  prediction.confidence * 100))
            if image_path == 'images/example_08.jpg':
                text.append("<br><br><b><em>Hello, World!</em></b>")

            streamer.send_data(image, text)
        streamer.wait()

    print("Program Ending")
Ejemplo n.º 5
0
def main():
    text = "Facial Recognition"
    try:
        print("status", "loading encodings + face detector...")
        data = pickle.loads(open("encodings.pickle", "rb").read())

        image_paths = sorted(list(edgeiq.list_images("images/")))
        print("Images:\n{}\n".format(image_paths))

        with edgeiq.Streamer(queue_depth=len(image_paths),
                             inter_msg_time=3) as streamer:
            for image_path in image_paths:
                image = cv2.imread(image_path)
                rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                face_locations = face_recognition.face_locations(rgb)
                face_encodings = face_recognition.face_encodings(
                    rgb, face_locations)

                face_names = []
                for face_encoding in face_encodings:
                    # See if the face is a match for the known face(s)
                    matches = face_recognition.compare_faces(
                        data["encodings"], face_encoding)
                    name = "Unknown"

                    face_distances = face_recognition.face_distance(
                        data["encodings"], face_encoding)

                    # the smallest distance is the closest to the encoding
                    minDistance = min(face_distances)

                    # save the name if the distance is below the tolerance
                    if minDistance < 0.6:
                        idx = np.where(face_distances == minDistance)[0][0]
                        name = data["names"][idx]
                    else:
                        name = "Unknown"

                    face_names.append(name)

                # Display the results
                for (top, right, bottom,
                     left), name in zip(face_locations, face_names):
                    # Draw a box around the face
                    cv2.rectangle(image, (left, top), (right, bottom),
                                  (0, 0, 255), 2)

                    # Draw a label with a name below the face
                    cv2.rectangle(image, (left, bottom - 20), (right, bottom),
                                  (0, 0, 255), cv2.FILLED)
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(image, name, (left + 6, bottom - 3), font,
                                0.75, (0, 255, 0), 2)

                streamer.send_data(image, text)
            streamer.wait()
    finally:
        print("Program Ending")
Ejemplo n.º 6
0
def main():

    # Load configuration data from the alwaysai.app.json file
    config = load_json(CONFIG_FILE)
    classifiers_config = config[CLASSIFIERS]
    classifiers_needed_to_agree = config[CLASSIFIERS_NEEDED_TO_AGREE]
    found_folder = config[FOUND_FOLDER]
    empty_folder = config[EMPTY_FOLDER]

    # Spin up all the classifiers listed in the configuration file into an array
    classifiers = classifiers_from(classifiers_config, auto_detected_engine())

    # Get all paths for images in the designated source folder
    image_paths = sorted(list(edgeiq.list_images(SOURCE_FOLDER + '/')))

    # Info for console output
    starting_image_count = len(image_paths)
    print("app.py: main: Checking {} images from '{}' folder ...".format(
        starting_image_count, SOURCE_FOLDER))
    print(
        'app.py: main: {:.1f}% or more of {} classifiers must be in agreement before a target object is considered found.'
        .format(classifiers_needed_to_agree * 100, len(classifiers)))

    for image_path in image_paths:
        image_display = cv2.imread(image_path)
        image = image_display.copy()

        # Run through all classifiers looking for target labels
        found = False
        if targets_detected_among(classifiers, image_path, image,
                                  classifiers_needed_to_agree) == True:
            found = True

        # Sort image to appropriate folder if target label detected
        sort_image_by_detection(found, image_path, empty_folder, found_folder)

    # Print info to console upon completion
    print("app.py: main: Completed sorting of {} images".format(
        starting_image_count))
    found_images_count = len(list(edgeiq.list_images(found_folder)))
    print("app.py: main: {} images in the output folder".format(
        found_images_count))
Ejemplo n.º 7
0
def main():
    obj_det = edgeiq.ObjectDetection("tester2204/CE-Recog")
    if edgeiq.is_jetson():
        obj_det.load(engine=edgeiq.Engine.DNN_CUDA)
        print("Nvidia Jetson Detected\n")
    else:
        obj_det.load(engine=edgeiq.Engine.DNN)
        print("Device is not a Nvidia Jetson Board\n")
    print("Initializing Application...\n")
    print("Model:\n{}\n".format(obj_det.model_id))
    print("Engine:\n{}\n".format(obj_det.engine))
    print("Labels:\n{}\n".format(obj_det.labels))

    #imgURL = "https://specials-images.forbesimg.com/imageserve/5e88b867e2bb040006427704/0x0.jpg"
    #urllib.request.urlretrieve(imgURL, "this.jpg") #Change based on OS and User

    #image = "Images/this.jpg"

    image_lists = sorted(list(edgeiq.list_images("Images/")))

    with edgeiq.Streamer(queue_depth=len(image_lists),
                         inter_msg_time=7) as streamer:
        i = 0
        while i < 3:
            for image_list in image_lists:
                show_image = cv2.imread(image_list)
                image = show_image.copy()

                results = obj_det.detect_objects(image, confidence_level=.5)

                image = edgeiq.markup_image(image,
                                            results.predictions,
                                            colors=obj_det.colors)

                shown = ["Model: {}".format(obj_det.model_id)]
                shown.append("Inference time: {:1.3f} s".format(
                    results.duration))
                shown.append("Objects:")

                for prediction in results.predictions:
                    shown.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))
                streamer.send_data(image, shown)
            streamer.wait()
            i = i + 1

    #if streamer.check_exit():
    print("That's it folks!")
    print("Thanks for using Ben's Object Recognition Model & Software")
    print("Sponsored by: Darien's Face")
Ejemplo n.º 8
0
def main():
    classifier = edgeiq.Classification("alwaysai/googlenet")
    classifier.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(classifier.engine))
    print("Accelerator: {}\n".format(classifier.accelerator))
    print("Model:\n{}\n".format(classifier.model_id))
    print("Labels:\n{}\n".format(classifier.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(
            queue_depth=len(image_paths), inter_msg_time=3) as streamer:
        black_img= cv2.imread('black.jpg')
        for image_path in image_paths:
            image_display = cv2.imread(image_path)
            image = image_display.copy()

            results = classifier.classify_image(image)

            # Generate text to display on streamer
            text = ["Model: {}".format(classifier.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))

            if results.predictions:
                image_text = "Label: {}, {:.2f}".format(
                        results.predictions[0].label,
                        results.predictions[0].confidence)
                cv2.putText(
                        image_display, image_text, (5, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

                for idx, prediction in enumerate(results.predictions[:5]):
                    text.append("{}. label: {}, confidence: {:.5}".format(
                        idx + 1, prediction.label, prediction.confidence))
                    if prediction.label == "Not Safe For Work":
                        resized_black_image = edge_tools.resize(black_img, image.shape[1], image.shape[0], keep_scale=False)
                        image_display = edge_tools.blend_images(resized_black_image, image, 0.1)
                
            else:
                text.append("No classification for this image.")

            streamer.send_data(image_display, text)
        streamer.wait()

    print("Program Ending")
Ejemplo n.º 9
0
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    key_points = [
            'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',
            'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',
            'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle']

    header = {}
    for key_point in key_points:
        header['{} x'.format(key_point)] = []
        header['{} y'.format(key_point)] = []

    for pose in POSES:
        df = pd.DataFrame(header)
        print('Generating results for {}'.format(pose))
        image_paths = edgeiq.list_images(os.path.join('images', 'downloads', pose))

        for image_path in image_paths:
            try:
                image = cv2.imread(image_path)
                results = pose_estimator.estimate(image)
                if len(results.poses) > 0:
                    results = results.poses[0].key_points
                    # Filter only desired key points
                    results = {key: value for key, value in results.items() if key in key_points}
                    new_row = pd.DataFrame(header)
                    for key, value in results.items():
                        if key not in key_points:
                            continue
                        new_row['{} x'.format(key)] = [value[0]]
                        new_row['{} y'.format(key)] = value[1]

                    df = df.append(new_row, ignore_index=True)
                else:
                    print('Skipping {}, no pose detected!'.format(image_path))
            except Exception as e:
                print('Exception on {}! {}'.format(image_path, e))

        df.to_csv('{}.csv'.format(pose))
Ejemplo n.º 10
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=3) as streamer:
        for image_path in image_paths:
            # Load image from disk
            image = cv2.imread(image_path)

            results = obj_detect.detect_objects(image, confidence_level=.5)
            image = edgeiq.markup_image(image,
                                        results.predictions,
                                        colors=obj_detect.colors)

            # Generate text to display on streamer
            text = ["Model: {}".format(obj_detect.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))
            text.append("Objects:")

            for prediction in results.predictions:
                text.append("{}: {:2.2f}%".format(prediction.label,
                                                  prediction.confidence * 100))

            streamer.send_data(image, text)
        streamer.wait()

    print("Program Ending")
Ejemplo n.º 11
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=3) as streamer:
        for image_path in image_paths:
            # Load image from disk
            image = cv2.imread(image_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            r, g, b = cv2.split(image)
            fig = plt.figure()
            axis = fig.add_subplot(1, 1, 1, projection="3d")

            pixel_colors = image.reshape(
                (np.shape(image)[0] * np.shape(image)[1], 3))
            norm = colors.Normalize(vmin=-1., vmax=1.)
            norm.autoscale(pixel_colors)
            pixel_colors = norm(pixel_colors).tolist()

            axis.scatter(r.flatten(),
                         g.flatten(),
                         b.flatten(),
                         facecolors=pixel_colors,
                         marker=".")
            axis.set_xlabel("Red")
            axis.set_ylabel("Green")
            axis.set_zlabel("Blue")
            plt.show()

            #convert from rgb to hsv and pick out 2 shades
            hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
            hsv_drot = (18, 24, 61)
            hsv_lrot = (13, 203, 55)

            #build the color mask
            mask = cv2.inRange(hsv_image, hsv_lrot, hsv_drot)
            res = cv2.bitwise_and(image, image, mask=mask)
            plt.subplot(1, 2, 1)
            plt.imshow(mask, cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(res)
            plt.show()

            #2nd layer mask, did not display
            hsv_olive = (34, 32, 120)
            hsv_dolive = (37, 240, 27)
            mask_ol = cv2.inRange(hsv_image, hsv_olive, hsv_dolive)
            res_w = cv2.bitwise_and(image, image, mask=mask_ol)
            plt.subplot(1, 2, 1)
            plt.imshow(mask_ol, cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(res_w)
            plt.show()

            #final mask
            final_mask = mask + mask_ol
            final_result = cv2.bitwise_and(image, image, mask=final_mask)
            plt.subplot(1, 2, 1)
            plt.imshow(final_mask, cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(final_result)
            plt.show()

            #testing .shape and typecast image
            print("The type of this input is {}".format(type(image)))
            print("Shape: {}".format(image.shape))

            #piee
            ##text.append(get_colors(get_image(image_path), 4, True))

            # Generate text to display on streamer
            text = ["Model: {}".format(obj_detect.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))

            #need to convert from bgr to rgb
            swapped_colors = swap(obj_detect.colors)
            text.append("Colors printed!")
            # text.append(swapped_colors)

            print(swapped_colors)

            # print(obj_detect.colors)

            # converted = np.array([np.array(rgb) for rgb in swapped_colors]) // numpy arrays with lists (like numpy contained within itself, list of lists)

            # print(converted.shape)

            results = obj_detect.detect_objects(image, confidence_level=.5)

            image = edgeiq.markup_image(image,
                                        results.predictions,
                                        colors=obj_detect.colors)
            # print(rgb2hex(swapped_colors))

            # print(converted)

            # iterate through tuple list and convert
            # for x in obj_detect.colors:
            #     text.append(rgb2hex(swapped_colors))
            #     text.append(format(x))

            text.append("Objects:")

            for prediction in results.predictions:
                text.append("{}: {:2.2f}%".format(prediction.label,
                                                  prediction.confidence * 100))

            streamer.send_data(image, text)

        streamer.wait()

    print("Program Ending")
Ejemplo n.º 12
0
def main():
    try:
        classifier1 = edgeiq.Classification("alwaysai/gendernet")
        classifier2 = edgeiq.Classification("alwaysai/agenet")

        classifier1.load(edgeiq.Engine.DNN)
        print("Engine 1: {}".format(classifier1.engine))
        print("Accelerator 1: {}\n".format(classifier1.accelerator))
        print("Model 1:\n{}\n".format(classifier1.model_id))
        print("Labels:\n{}\n".format(classifier1.labels))

        classifier2.load(edgeiq.Engine.DNN)
        print("Engine 2: {}".format(classifier2.engine))
        print("Accelerator 2: {}\n".format(classifier2.accelerator))
        print("Model 2:\n{}\n".format(classifier2.model_id))
        print("Labels:\n{}\n".format(classifier2.labels))

        image_paths = sorted(list(edgeiq.list_images("images/")))
        print("Images:\n{}\n".format(image_paths))

        with edgeiq.Streamer(queue_depth=len(image_paths),
                             inter_msg_time=3) as streamer:
            for image_path in image_paths:
                image_display = cv2.imread(image_path)
                image = image_display.copy()

                results1 = classifier1.classify_image(image,
                                                      confidence_level=.95)
                results2 = classifier2.classify_image(image)

                # Generate text to display on streamer
                text = ["Model 1: {}".format(classifier1.model_id)]
                text.append("Model 2: {}".format(classifier2.model_id))
                text.append(
                    "Inference time: {:1.3f} s".format(results1.duration +
                                                       results2.duration))

                # Find the index of highest confidence
                if len(results1.predictions) > 0:
                    top_prediction1 = results1.predictions[0]
                    top_prediction2 = results2.predictions[0]
                    text1 = "Classification: {}, {:.2f}%".format(
                        top_prediction1.label,
                        top_prediction1.confidence * 100)
                    text2 = "Classification: {}, {:.2f}%".format(
                        top_prediction2.label,
                        top_prediction2.confidence * 100)
                else:
                    text1 = "Can not classify this image, confidence under " \
                            "95 percent for Gender Identification"
                    text2 = None
                # Show the image on which inference was performed with text
                cv2.putText(image_display, text1, (5, 25),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
                text.append(text1)
                if text2 is not None:
                    cv2.putText(image_display, text2, (5, 45),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
                    text.append(text2)

                streamer.send_data(image_display, text)
            streamer.wait()

    finally:
        print("Program Ending")