def create_video_with_all_detections( config, videos, DLCscorername, displayedbodyparts="all", destfolder=None ): """ Create a video labeled with all the detections stored in a '*_full.pickle' file. Parameters ---------- config : str Absolute path to the config.yaml file videos : list of str A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored. DLCscorername: str Name of network. E.g. 'DLC_resnet50_project_userMar23shuffle1_50000 displayedbodyparts: list of strings, optional This selects the body parts that are plotted in the video. Either ``all``, then all body parts from config.yaml are used orr a list of strings that are a subset of the full list. E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts. destfolder: string, optional Specifies the destination folder that was used for storing analysis data (default is the path of the video). """ from deeplabcut.pose_estimation_tensorflow.lib.inferenceutils import ( convertdetectiondict2listoflist, ) import pickle, re cfg = auxiliaryfunctions.read_config(config) for video in videos: videofolder = os.path.splitext(video)[0] if destfolder is None: outputname = "{}_full.mp4".format(videofolder + DLCscorername) full_pickle = os.path.join(videofolder + DLCscorername + "_full.pickle") else: auxiliaryfunctions.attempttomakefolder(destfolder) outputname = os.path.join( destfolder, str(Path(video).stem) + DLCscorername + "_full.mp4" ) full_pickle = os.path.join( destfolder, str(Path(video).stem) + DLCscorername + "_full.pickle" ) if not (os.path.isfile(outputname)): print("Creating labeled video for ", str(Path(video).stem)) with open(full_pickle, "rb") as file: data = pickle.load(file) header = data.pop("metadata") all_jointnames = header["all_joints_names"] if displayedbodyparts == "all": numjoints = len(all_jointnames) bpts = range(numjoints) else: # select only "displayedbodyparts" bpts = [] for bptindex, bp in enumerate(all_jointnames): if bp in displayedbodyparts: bpts.append(bptindex) numjoints = len(bpts) frame_names = list(data) frames = [int(re.findall(r"\d+", name)[0]) for name in frame_names] colorclass = plt.cm.ScalarMappable(cmap=cfg["colormap"]) C = colorclass.to_rgba(np.linspace(0, 1, numjoints)) colors = (C[:, :3] * 255).astype(np.uint8) pcutoff = cfg["pcutoff"] dotsize = cfg["dotsize"] clip = vp(fname=video, sname=outputname, codec="mp4v") ny, nx = clip.height(), clip.width() for n in trange(clip.nframes): frame = clip.load_frame() try: ind = frames.index(n) dets = convertdetectiondict2listoflist(data[frame_names[ind]], bpts) for i, det in enumerate(dets): color = colors[i] for x, y, p, _ in det: if p > pcutoff: rr, cc = circle(y, x, dotsize, shape=(ny, nx)) frame[rr, cc] = color except ValueError: # No data stored for that particular frame print(n, "no data") pass try: clip.save_frame(frame) except: print(n, "frame writing error.") pass clip.close() else: print("Detections already plotted, ", outputname)
header = data.pop("metadata") all_jointnames = header["all_joints_names"] numjoints = len(all_jointnames) bpts = range(numjoints) frame_names = list(data) frames = [int(re.findall(r"\d+", name)[0]) for name in frame_names] #n = 0 #ind = frames.index(n) df = pd.DataFrame(index=frames, columns=col) for n in tqdm(frames): #for n in range(2): dets = convertdetectiondict2listoflist(data[frame_names[n]], bpts) # print(n) for m in range(11): for p in range(3): # print(n, m - 11, p) if m < 7: try: df.iloc[n, 3 * m + p] = dets[m][0][p] df.iloc[n, (3 * m + p) + 33] = dets[m][1][p] # print('succes') except: # print('fail') pass else: try: df.iloc[n, 3 * m + p] = dets[m][0][p]