Based on the performance of DeepLabCut, four scenarios are possible: ● A visible body part with an accurate DeepLabCut prediction. These labels do not need any modification. ● A visible body part, but the wrong DeepLabCut prediction. Move the label’s location to the actual position of the body part. ● An invisible, occluded body part. Remove the predicted label by DeepLabCut with a right click. Every predicted label is shown, even when DeepLabCut is uncertain. This is necessary, so that the user can potentially move the predicted label. However, to help the user to remove all invisible body parts, the low-likelihood predictions are shown as open circles (rather than disks). ● Invalid image. in the unlikely event that there are any invalid images, the user should remove such images and their corresponding predictions, if any. Here, the GUI will prompt the user to remove an image identified as invalid. """ if "step_15" in stages_to_run or "refine_labels" in stages_to_run: # open a gui deeplabcut.refine_labels(config_path, multianimal=False) """ This will launch a GUI with which you can refine the labels (Fig. 6). Use the ‘Load Labels’ button to select one of the subdirectories where the extracted frames are stored. Each label will be identified by a unique color. To identify low-confidence labels, specify the threshold of the likelihood. This causes the body parts with likelihood below this threshold to appear as circles and the ones above the threshold to appear as solid disks while retaining the same color scheme. Next, to adjust the position of the label, hover the mouse over the label to identify the specific body part, then left-click it and drag it to a different location. To delete a label, right-click on the label (once a label is deleted, it cannot be retrieved). """ """ After correcting the labels for all the frames in each of the subdirectories, merge the dataset to create a new dataset. To do this, type the following: """
def refine_labels(self, event): self.merge.Enable(True) deeplabcut.refine_labels(self.config)
# evaluate the trained network #dlc.evaluate_network(config_path, plotting=True) full_path_to_videos = [] root = 'add your path' for path in videos: full_path_to_videos.append(root + '/' + path) # video analysis and plotting results dlc.analyze_videos(config_path, full_path_to_videos, shuffle=1, save_as_csv=False, videotype='.mp4') dlc.filterpredictions(config_path, full_path_to_videos, shuffle=1, save_as_csv=True, videotype='.mp4') videoCreate = ["add your videos"] dlc.create_labeled_video(config_path, videoCreate, filtered=True) # refine videos refineVideos = ["add your videos"] dlc.extract_outlier_frames(config_path, refineVideos, outlieralgorithm='jump') dlc.refine_labels(config_path) dlc.merge_datasets(config_path) # then, back to "create_training_dataset()"
import deeplabcut import tensorflow as tf '''deeplabcut.create_new_project('1_2_4_chamber','MC',['C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3477.2b\\05_08_2019\\BRAC34772b top_left 05_08_2019 12_40_54 1_trimmed.mp4'], working_directory='C:\\Users\\analysis\\Desktop')''' videopath =['C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3477.2b\\13_08_2019\\BRAC34772b top_right 13_08_2019 14_39_52 2_trimmed.mp4', 'C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3583.3e\\23_07_2019\\BRAC35833e 23_07_2019 13_35_06 4_trimmed.mp4', 'C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3616.3c\\23_07_2019\\BRAC36163c 23_07_2019 12_41_11 3_trimmed.mp4'] config = 'C:\\Users\\analysis\\Desktop\\1_2_4_chamber-MC-2019-08-23\\config.yaml' '''deeplabcut.add_new_videos(config,videopath) deeplabcut.extract_frames(config,'automatic','kmeans')''' deeplabcut.label_frames(config) deeplabcut.check_labels(config) deeplabcut.create_training_dataset(config) deeplabcut.train_network(config) deeplabcut.extract_outlier_frames(config,videopath) deeplabcut.refine_labels(config) deeplabcut.merge_datasets(config) deeplabcut.create_training_dataset(config) deeplabcut.train_network(config)
def dlc2nems(siteid=None, vids=None, suffix=".lick.avi", site_path="/auto/data/daq/Clathrus/CLT011/", path_config='/auto/data/dlc/multivid-CLL-2022-01-14/config.yaml'): #change path_config if using a different DLC model if siteid is not None: dfiles = db.pd_query( f"SELECT * FROM gDataRaw WHERE not(bad) AND cellid='{siteid}'") site_path = dfiles.at[0, 'resppath'] elif site_path is None: raise ValueError("siteid or site_path required.") if vids is None: vids = [] # get list of all videos in site folder if no vids given as input for f in os.listdir(site_path): if f.endswith( suffix): #non-compressed vids end in lick.original.avi vids.append(f) path_sorted = os.path.join(site_path, 'sorted') if not os.path.exists(path_sorted): os.makedirs(path_sorted) print('Created sorted directory in', site_path) print('Number of videos to be analyzed:', len(vids)) print('Results will be saved in:', path_sorted) vid_paths = [os.path.join(site_path, v) for v in vids] output_aliased = [ os.path.join(path_sorted, v.replace(".avi", ".dlc.h5")) for v in vids ] if 1: dlc.analyze_videos(path_config, vid_paths, videotype='avi', destfolder=path_sorted) for v, a in zip(vids, output_aliased): # Get list of all files only in the given directory list_of_files = filter( os.path.isfile, glob.glob( os.path.join(path_sorted, v.replace(".avi", "")) + '*.h5')) # Sort list of files based on last modification time in ascending order list_of_files = sorted(list_of_files, key=os.path.getmtime) if not os.path.exists(a): os.system(f"ln -s {list_of_files[-1]} {a}") if 0: dlc.create_labeled_video(path_config, vid_paths, videotype='avi', destfolder=path_sorted) if 0: # identify "bad" frames and save in training set dlc.extract_outlier_frames(path_config, vid_paths, destfolder=path_sorted, automatic=True) # gui to relabel the bad frames dlc.refine_labels(path_config) if 0: dlc.merge_datasets(path_config) dlc.create_training_dataset(path_config, net_type='resnet_50', augmenter_type='imgaug') if 0: # before running, update pose_cfg.yaml to use last snapshot from previous iteration as initial condition # (rather than starting over from visnet) dlc.train_network(path_config, shuffle=1, displayiters=100)
os.system(f"ln -s {list_of_files[-1]} {a}") if 'video' in action_list: log.info('Creating labeled video') dlc.create_labeled_video(path_config, vid_paths, videotype='avi', destfolder=path_sorted) if 'summary' in action_list: face_tools.summary_plot(vid_paths) if 'refine' in action_list: # identify "bad" frames and save in training set dlc.extract_outlier_frames(path_config, vid_paths, destfolder=path_sorted, automatic=True) # gui to relabel the bad frames dlc.refine_labels(path_config) if 0: dlc.merge_datasets(path_config) dlc.create_training_dataset(path_config, net_type='resnet_50', augmenter_type='imgaug') if 0: # before running, update pose_cfg.yaml to use last snapshot from previous iteration as initial condition # (rather than starting over from visnet) dlc.train_network(path_config, shuffle=1, displayiters=100) log.info("face_fit_script complete") if db_exists & queueid > 0: nd.update_job_complete(queueid)
##opt#check annotated frames deeplabcut.check_labels(config_path) #create training dataset deeplabcut.create_training_dataset(config_path,num_shuffles=1) #train the network --> additional parameters deeplabcut.train_network(config_path, shuffle=1, trainingsetindex=0, gputouse=390.87, max_snapshots_to_keep=5, autotune=False, displayiters=None, saveiters=None) #evaluate the trained network deeplabcut.evaluate_network(config_path,shuffle=[1], plotting=True) #analyze new video deeplabcut.analyze_videos(config_path,['path of test video'],shuffle=1, save_as_csv=True) #create labeled video --> optional parameters deeplabcut.create_labeled_video(config_path,['pathof test video']) #plot trajectory of the extracted poses across the analyzed video deeplabcut.plot_trajectories(‘config_path’,['path of test video']) #extract outlier frames deeplabcut.extract_outlier_frames(‘config_path’,['path of test video']) #refine labels int raining set for outlier condition deeplabcut.refine_labels('config_path') #merge corrected frames dataset to existing deeplabcut.merge_datasets('config_path')