Example #1
0
            else:
                prediced_label = action_type
                print("Ground_truth label is :", prediced_label)

            if 1:
                # Draw skeleton
                if ith_skel == 0:
                    my_detector.draw(image_disp, humans)
                
                # Draw bounding box and action type
                myfunc.drawActionResult(image_disp, skeleton, prediced_label)

        # Write result to txt/png
        if SAVE_RESULTANT_SKELETON_TO_TXT_AND_IMAGE:
            myio.save_skeletons(SAVE_DETECTED_SKELETON_TO 
                + myfunc.int2str(ith_img, 5)+".txt", skelsInfo)
            cv2.imwrite(SAVE_DETECTED_SKELETON_IMAGES_TO 
                + myfunc.int2str(ith_img, 5)+".png", image_disp)

        if 1: # Display
            cv2.imshow("action_recognition", 
                cv2.resize(image_disp,(0,0),fx=1.5,fy=1.5))
            q = cv2.waitKey(1)
            if q!=-1 and chr(q) == 'q':
                break

        # Loop
        print("\n")
        ith_img += 1

        # Draw predicting score for only 1 person (not using for)
        if DO_INFER_ACTIONS and len(dict_id2skeleton):
            multipeople_classifier.get(
                id='min').draw_scores_onto_image(image_disp)

        # -- Write skeleton.txt and image.png
        if SAVE_RESULTANT_SKELETON_TO_TXT_AND_IMAGE:

            ids = sorted(dict_id2skeleton.keys())
            skel_to_save = [
                img_info + dict_id2skeleton[id].tolist() for id in ids
            ]

            myio.save_skeletons(
                SAVE_DETECTED_SKELETON_TO + myfunc.int2str(ith_img, 5) +
                ".txt", skel_to_save)
            cv2.imwrite(
                SAVE_DETECTED_SKELETON_IMAGES_TO + myfunc.int2str(ith_img, 5) +
                ".png", image_disp)

            if FROM_TXTSCRIPT or FROM_WEBCAM:  # Save source image
                cv2.imwrite(
                    SAVE_DETECTED_SKELETON_IMAGES_TO +
                    myfunc.int2str(ith_img, 5) + "_src.png", img)

        # -- Display
        if 1:
            if ith_img == 1:
                window_name = "action_recognition"
                cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
Example #3
0
        hands = model_openpose.get_hands_in_xy(humans)

        if 1:  # Draw skeleton
            model_openpose.draw(image_disp, humans)
            model_openpose.draw_fps(image_disp)
            for hand in hands:
                cv2.circle(image_disp,
                           center=(hand[0], hand[1]),
                           radius=10,
                           color=[255, 0, 0],
                           thickness=2,
                           lineType=cv2.LINE_AA)

        # Write result to png
        if 1:
            cv2.imwrite(
                CURR_PATH + "result_images/" + myfunc.int2str(ith_img, 5) +
                ".png", image_disp)

        if 1:  # Display
            image_disp = cv2.resize(image_disp, (0, 0), fx=1.5,
                                    fy=1.5)  # resize to make picture bigger
            cv2.imshow("action_recognition", image_disp)
            q = cv2.waitKey(1)
            if q != -1 and chr(q) == 'q':
                break

        # Loop
        print("\n")
        ith_img += 1