# Draw predicting score for only 1 person (not using for) if DO_INFER_ACTIONS and len(dict_id2skeleton): multipeople_classifier.get( id='min').draw_scores_onto_image(image_disp) # -- Write skeleton.txt and image.png if SAVE_RESULTANT_SKELETON_TO_TXT_AND_IMAGE: ids = sorted(dict_id2skeleton.keys()) skel_to_save = [ img_info + dict_id2skeleton[id].tolist() for id in ids ] myio.save_skeletons( SAVE_DETECTED_SKELETON_TO + myfunc.int2str(ith_img, 5) + ".txt", skel_to_save) cv2.imwrite( SAVE_DETECTED_SKELETON_IMAGES_TO + myfunc.int2str(ith_img, 5) + ".png", image_disp) if FROM_TXTSCRIPT or FROM_WEBCAM: # Save source image cv2.imwrite( SAVE_DETECTED_SKELETON_IMAGES_TO + myfunc.int2str(ith_img, 5) + "_src.png", img) # -- Display if 1: if ith_img == 1: window_name = "action_recognition" cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
print("prediced label is :", prediced_label) else: prediced_label = action_type print("Ground_truth label is :", prediced_label) if 1: # Draw skeleton if ith_skel == 0: my_detector.draw(image_disp, humans) # Draw bounding box and action type drawActionResult(image_disp, skeleton, prediced_label) # Write result to txt/png if SAVE_RESULTANT_SKELETON_TO_TXT_AND_IMAGE: myio.save_skeletons(SAVE_DETECTED_SKELETON_TO + int2str(ith_img, 5)+".txt", skelsInfo) cv2.imwrite(SAVE_DETECTED_SKELETON_IMAGES_TO + int2str(ith_img, 5)+".png", image_disp) if 1: # Display cv2.imshow("action_recognition", cv2.resize(image_disp,(0,0),fx=1.5,fy=1.5)) q = cv2.waitKey(1) if q!=-1 and chr(q) == 'q': break # Loop print("\n") ith_img += 1