def main(argv, common_opts): args = parse_args(argv) seed_all(12345) init_algorithms(deterministic=True) torch.set_grad_enabled(False) device = common_opts['device'] assert args.multicrop == False, 'TODO: Implement multi-crop for single image inference.' model = load_model(args.model).to(device).eval() input_specs: ImageSpecs = model.data_specs.input_specs image: PIL.Image.Image = PIL.Image.open(args.image, 'r') image.thumbnail((input_specs.width, input_specs.height)) inp = input_specs.convert(image).to(device, torch.float32) output = model(inp[None, ...])[0] norm_skel3d = ensure_cartesian(output.to(CPU, torch.float64), d=3) fig = plt.figure(figsize=(16, 8)) ax1 = fig.add_subplot(1, 2, 1) ax2: Axes3D = fig.add_subplot(1, 2, 2, projection='3d') ax1.imshow(input_specs.unconvert(inp.to(CPU))) plot_skeleton_on_axes3d(norm_skel3d, CanonicalSkeletonDesc, ax2, invert=True) plt.show()
def main(argv, common_opts): args = parse_args(argv) seed_all(12345) init_algorithms(deterministic=True) torch.set_grad_enabled(False) device = common_opts['device'] if args.model: # model = load_model(args.model).to(device).eval() model = load_model(args.model).eval() data_specs = model.data_specs else: model = None data_specs = DataSpecs( ImageSpecs(224, mean=ImageSpecs.IMAGENET_MEAN, stddev=ImageSpecs.IMAGENET_STDDEV), JointsSpecs(CanonicalSkeletonDesc, n_dims=3), ) dataset = get_dataset(args.dataset, data_specs, use_aug=False) app = MainGUIApp(dataset, device, model) app.mainloop()
def main(argv, common_opts): args = parse_args(argv) seed_all(12345) init_algorithms(deterministic=True) torch.set_grad_enabled(False) device = common_opts['device'] model = load_model(args.model).to(device).eval() dataset = get_dataset(args.dataset, model.data_specs, use_aug=False) if args.multicrop: dataset.multicrop = True loader = make_unbatched_dataloader(dataset) else: loader = make_dataloader(dataset, batch_size=1) if args.dataset.startswith('h36m-'): known_depth = True included_joints = list(range(CanonicalSkeletonDesc.n_joints)) else: known_depth = False included_joints = [ CanonicalSkeletonDesc.joint_names.index(joint_name) for joint_name in VNect_Common_Skeleton ] print('Use ground truth root joint depth? {}'.format(known_depth)) print('Number of joints in evaluation: {}'.format(len(included_joints))) df = run_evaluation_3d(model, device, loader, included_joints, known_depth=known_depth, print_progress=True) print('### By sequence') print() print( tabulate(df.drop(columns=['activity_id']).groupby('seq_id').mean(), headers='keys', tablefmt='pipe')) print() print('### By activity') print() print( tabulate(df.drop(columns=['seq_id']).groupby('activity_id').mean(), headers='keys', tablefmt='pipe')) print() print('### Overall') print() print( tabulate( df.drop(columns=['activity_id', 'seq_id']).mean().to_frame().T, headers='keys', tablefmt='pipe'))
def main(): args = parse_args() if (args.mode == 'I' or args.mode == 'i'): filename = os.path.basename(args.path) filename_noext = os.path.splitext(filename)[0] model = load_model(args.model).to(CPU).eval() coords_img, coords_raw, img_input, img_skele3d = infer_joints(model, args.path) # print(img_skele3d.shape) img_skele3d = PIL.Image.fromarray(img_skele3d) # Calculate step angle vector1 = coords_img[8,:2] - coords_img[10,:2] vecto2 = (0,-1) print(angleBetween(vector1, vecto2)*180/np.pi, "right") vector1 = coords_img[11,:2] - coords_img[13,:2] # vector1=(.5,0,0) vecto2 = (0,-1) print(angleBetween(vector1, vecto2)*180/np.pi, "left") draw_skele_on_image(img_input, coords_img[:,:2]) # Used for qualitative evaluation # draw_color_joints(img_input, coords_img[:,:2]) joints_loc = output_to_JSON(coords_raw, filename_noext) # img_skele3d.show() img_skele3d.save('./outputs/3d/' + filename_noext + '.png') img_input.save('./outputs/' + filename) #with open('./outputs/joint_loc.json', 'w') as fp: # json.dump(joints_loc, fp, indent=4) MayaExporter.WriteToMayaAscii('./outputs/3d/' + filename_noext + '.ma', joints_loc) MayaExporter.WriteAtomFile('./outputs/3d/' + filename_noext + '_anim.atom', 'C:/Users/imagi/Documents/maya/projects/default/scenes/sk_mannikin_margipose.0007.ma', 1, 1, (coords_raw*100)) if(args.mode =='D' or args.mode == 'd'): files = os.listdir(args.path) # print(files) start = time.time() model = load_model(args.model).to(CPU).eval() end = time.time() print(end-start, "To load Model") count = 0 joints_loc_list = [] for image in files: start = time.time() filename_noext = os.path.splitext(image)[0] coords_img, coords_raw, img_input, img_skele3d = infer_joints(model, args.path+image) # print(filename_noext) img_skele3d = PIL.Image.fromarray(img_skele3d) draw_skele_on_image(img_input, coords_img[:,:2]) # Used for qualitative evaluation # draw_color_joints(img_input, coords_img[:,:2]) joints_loc = output_to_JSON(coords_raw, filename_noext) img_skele3d.save('./outputs/3d/' + filename_noext + '.png') img_input.save('./outputs/' + image) joints_loc_list.append(joints_loc) count += 1 print(time.time()-start, "(s)", "completed " + str(count) + "/" + str(len(files))) MayaExporter.WriteToMayaAscii('./outputs/3d/' + filename_noext + '.ma', joints_loc) MayaExporter.WriteAtomFile('./outputs/3d/' + filename_noext + '_anim.atom', 'C:/Users/imagi/Documents/maya/projects/default/scenes/sk_mannikin_margipose.0007.ma', 1, 1, (coords_raw*100)) if(args.mode =='V' or args.mode == 'v'): filename = os.path.basename(args.path) filename_noext = os.path.splitext(filename)[0] (frameArray, fps) = VideoFrames.ExtractFrames(args.path) frameArray = np.asarray(frameArray, dtype=np.uint8) skel3DArray = np.zeros((480, 640, 3, frameArray.shape[3]), dtype=np.uint8) finalFrameArray = np.zeros((256, 256, 3, frameArray.shape[3]), dtype=np.uint8) strideAngles = np.zeros((frameArray.shape[3],3)) # print(frameArray.shape[3]) start = time.time() model = load_model(args.model).to(CPU).eval() end = time.time() print(end-start, "(s) to load Model") sign = -1 for i in range(frameArray.shape[3]): # for i in range(10): start = time.time() img = PIL.Image.fromarray(frameArray[:,:,:,i][..., ::-1]) coords_img, coords_raw, img_scaled, skel3DArray[:,:,:,i] = infer_joints(model, img) # Calculate step angle vector1 = coords_img[8,:2] - coords_img[10,:2] vecto2 = (0,-1) rightStride = angleBetween(vector1, vecto2)*180/np.pi # print(angleBetween(vector1, vecto2)*180/np.pi, "right") vector1 = coords_img[11,:2] - coords_img[13,:2] # vector1=(.5,0,0) vecto2 = (0,-1) leftStride = angleBetween(vector1, vecto2)*180/np.pi # print(angleBetween(vector1, vecto2)*180/np.pi, "left") strideAngles[i,0] = (i+1)/fps strideAngles[i,1] = rightStride strideAngles[i,2] = leftStride draw_skele_on_image(img_scaled, coords_img[:,:2]) # draw_evaluation_image(img_scaled, coords_img[:,:2], rightStride, leftStride) # img3d = PIL.Image.fromarray(skel3DArray[:,:,:,i]) finalFrameArray[:,:,:,i] = np.array(img_scaled, dtype=np.uint8) # skel3DArray[:,:,:,i] = np.array(img3d, dtype=np.uint8) print(time.time()-start, "(s)", "frames completed " + str(i+1) + "/" + str(frameArray.shape[3])) # img3d.save('./outputs/3d/'+'skel3d_'+str(i)+'.jpg') # PIL.Image.SAVE(skel3DArray[:,:,:,-1]) # plt.imsave() # plt.show() VideoFrames.FrametoVid(finalFrameArray, skel3DArray, fps, filename_noext) plt.subplot(2,1,1) plt.plot(strideAngles[:,0], strideAngles[:,1], color='red') plt.title('Right Leg') plt.ylim(0,30) plt.ylabel('Rect Swing Angle (deg)') plt.xlim(0,6) plt.xlabel('(s)') plt.subplot(2,1,2) plt.plot(strideAngles[:,0], strideAngles[:,2], color='blue') plt.title('Left Leg') plt.ylim(0,30) plt.ylabel('Rect Swing Angle (deg)') plt.xlim(0,6) plt.xlabel('(s)') plt.tight_layout() plt.show()