コード例 #1
0
 def merge_dataset(self, event):
     dlg = wx.MessageDialog(
         None,
         "1. Make sure that you have refined all the labels before merging the dataset.\n\n2. If you merge the dataset, you need to re-create the training dataset before you start the training.\n\n3. Are you ready to merge the dataset?",
         "Warning",
         wx.YES_NO | wx.ICON_WARNING,
     )
     result = dlg.ShowModal()
     if result == wx.ID_YES:
         notebook = self.GetParent()
         notebook.SetSelection(4)
         deeplabcut.merge_datasets(self.config, forceiterate=None)
コード例 #2
0
ファイル: testscript.py プロジェクト: zanecn/DeepLabCut
print("EXTRACT OUTLIERS")
deeplabcut.extract_outlier_frames(path_config_file,[newvideo],outlieralgorithm='jump',epsilon=0,automatic=True, destfolder=dfolder)


file=os.path.join(cfg['project_path'],'labeled-data',vname,"machinelabels-iter"+ str(cfg['iteration']) + '.h5')

print("RELABELING")
DF=pd.read_hdf(file,'df_with_missing')
DLCscorer=np.unique(DF.columns.get_level_values(0))[0]
DF.columns.set_levels([scorer.replace(DLCscorer,scorer)],level=0,inplace=True)
DF =DF.drop('likelihood',axis=1,level=2)
DF.to_csv(os.path.join(cfg['project_path'],'labeled-data',vname,"CollectedData_" + scorer + ".csv"))
DF.to_hdf(os.path.join(cfg['project_path'],'labeled-data',vname,"CollectedData_" + scorer + '.h5'),'df_with_missing',format='table', mode='w')

print("MERGING")
deeplabcut.merge_datasets(path_config_file)

print("CREATING TRAININGSET")
deeplabcut.create_training_dataset(path_config_file)

cfg=deeplabcut.auxiliaryfunctions.read_config(path_config_file)
posefile=os.path.join(cfg['project_path'],'dlc-models/iteration-'+str(cfg['iteration'])+'/'+ cfg['Task'] + cfg['date'] + '-trainset' + str(int(cfg['TrainingFraction'][0] * 100)) + 'shuffle' + str(1),'train/pose_cfg.yaml')
DLC_config=deeplabcut.auxiliaryfunctions.read_plainconfig(posefile)
DLC_config['save_iters']=5
DLC_config['display_iters']=1
DLC_config['multi_step']=[[0.001,5]]

print("CHANGING training parameters to end quickly!")
deeplabcut.auxiliaryfunctions.write_config(posefile,DLC_config)

print("TRAIN")
コード例 #3
0
        )
    )
    DF.to_hdf(
        os.path.join(
            cfg["project_path"],
            "labeled-data",
            vname,
            "CollectedData_" + scorer + ".h5",
        ),
        "df_with_missing",
        format="table",
        mode="w",
    )

    print("MERGING")
    deeplabcut.merge_datasets(config_path)  # iteration + 1

    print("CREATING TRAININGSET updated training set")
    deeplabcut.create_training_dataset(config_path, net_type=NET)

    print("Training network...")
    deeplabcut.train_network(config_path, maxiters=N_ITER)
    print("Network trained.")

    print("Evaluating network...")
    deeplabcut.evaluate_network(config_path, plotting=True)

    print("Network evaluated....")

    print("Analyzing video with auto_track....")
    deeplabcut.analyze_videos(
コード例 #4
0
DF.to_csv(
    os.path.join(
        cfg["project_path"], "labeled-data", vname, "CollectedData_" + scorer + ".csv"
    )
)
DF.to_hdf(
    os.path.join(
        cfg["project_path"], "labeled-data", vname, "CollectedData_" + scorer + ".h5"
    ),
    "df_with_missing",
    format="table",
    mode="w",
)

print("MERGING")
deeplabcut.merge_datasets(path_config_file)  # iteration + 1

print("CREATING TRAININGSET")
deeplabcut.create_training_dataset(
    path_config_file, net_type=net_type, augmenter_type=augmenter_type2
)

cfg = deeplabcut.auxiliaryfunctions.read_config(path_config_file)
posefile = os.path.join(
    cfg["project_path"],
    "dlc-models/iteration-"
    + str(cfg["iteration"])
    + "/"
    + cfg["Task"]
    + cfg["date"]
    + "-trainset"
コード例 #5
0
ファイル: template.py プロジェクト: HeathRossie/dlc_template
# evaluate the trained network
#dlc.evaluate_network(config_path, plotting=True)

full_path_to_videos = []
root = 'add your path'
for path in videos:
    full_path_to_videos.append(root + '/' + path)

# video analysis and plotting results
dlc.analyze_videos(config_path,
                   full_path_to_videos,
                   shuffle=1,
                   save_as_csv=False,
                   videotype='.mp4')
dlc.filterpredictions(config_path,
                      full_path_to_videos,
                      shuffle=1,
                      save_as_csv=True,
                      videotype='.mp4')

videoCreate = ["add your videos"]
dlc.create_labeled_video(config_path, videoCreate, filtered=True)

# refine videos
refineVideos = ["add your videos"]
dlc.extract_outlier_frames(config_path, refineVideos, outlieralgorithm='jump')
dlc.refine_labels(config_path)
dlc.merge_datasets(config_path)
# then, back to "create_training_dataset()"
コード例 #6
0
        This will launch a GUI with which you can refine the labels (Fig. 6). Use the ‘Load Labels’ button to
        select one of the subdirectories where the extracted frames are stored. Each label will be identified
        by a unique color. To identify low-confidence labels, specify the threshold of the likelihood. This
        causes the body parts with likelihood below this threshold to appear as circles and the ones above
        the threshold to appear as solid disks while retaining the same color scheme. Next, to adjust the
        position of the label, hover the mouse over the label to identify the specific body part, then left-click
        it and drag it to a different location. To delete a label, right-click on the label (once a label is deleted,
        it cannot be retrieved).
        """

    """
    After correcting the labels for all the frames in each of the subdirectories, merge the dataset to
    create a new dataset. To do this, type the following:
    """
    if "step_16" in stages_to_run or "merge_datasets" in stages_to_run:
        deeplabcut.merge_datasets(config_path, forceiterate=None)
    """
    The iteration parameter in the config.yaml file will be automatically updated.
    Once the datasets are merged, you can test if the merging process was successful by
    plotting all the labels (Step 7). Next, with this expanded image set, you can now create a
    novel training set and train the network as described in Steps 8 and 9. The training dataset will be
    stored in the same place as before but under a different ‘iteration- #’ subdirectory, where the ‘#’ is
    the new value of iteration variable stored in the project’s configuration file (this is
    automatically done).
    If, after training, the network generalizes well to the data (i.e., run evaluate_network in
    Step 10), proceed to Step 11 to analyze new videos. Otherwise, consider labeling more data
    (optional Stage X).
    """

    # --------------------------------------------------------------------
    # Stage XI: working with the output files of DeepLabCut
コード例 #7
0
        os.path.join(
            proj_path, 'labeled_videos',
            'cam{}DLC_resnet50_CMGPretrainedNetworkDec3shuffle1_250000_labeled.mp4'
            .format(serial)))
analyzed_training_videos_dir = [os.path.join(proj_path, 'labeled_videos')]

# %% Refinement?
deeplabcut.extract_frames(config_path,
                          mode='automatic',
                          algo='uniform',
                          crop=False,
                          userfeedback=False)

deeplabcut.label_frames(config_path)

deeplabcut.merge_datasets(config_path)

deeplabcut.create_training_dataset(config_path)

deeplabcut.train_network(config_path,
                         gputouse=0,
                         saveiters=25000,
                         maxiters=250000)

# %% 3 Triangulation from multiple cameras
method = 'full_rank'
threshold = 0.9
triangulated_path = os.path.join(
    proj_path, 'triangulated_{}_{}'.format(method, threshold))
if not os.path.exists(triangulated_path):
    os.mkdir(triangulated_path)
コード例 #8
0
ファイル: random.py プロジェクト: tochenan/data_analyais_DLC
import deeplabcut
import tensorflow as tf

'''deeplabcut.create_new_project('1_2_4_chamber','MC',['C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3477.2b\\05_08_2019\\BRAC34772b top_left 05_08_2019 12_40_54 1_trimmed.mp4'],
working_directory='C:\\Users\\analysis\\Desktop')'''

videopath =['C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3477.2b\\13_08_2019\\BRAC34772b top_right 13_08_2019 14_39_52 2_trimmed.mp4',
'C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3583.3e\\23_07_2019\\BRAC35833e 23_07_2019 13_35_06 4_trimmed.mp4',
'C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3616.3c\\23_07_2019\\BRAC36163c 23_07_2019 12_41_11 3_trimmed.mp4']
config = 'C:\\Users\\analysis\\Desktop\\1_2_4_chamber-MC-2019-08-23\\config.yaml'


'''deeplabcut.add_new_videos(config,videopath)
deeplabcut.extract_frames(config,'automatic','kmeans')'''
deeplabcut.label_frames(config)

deeplabcut.check_labels(config)
deeplabcut.create_training_dataset(config)
deeplabcut.train_network(config)


deeplabcut.extract_outlier_frames(config,videopath)
deeplabcut.refine_labels(config)
deeplabcut.merge_datasets(config)
deeplabcut.create_training_dataset(config)
deeplabcut.train_network(config)
コード例 #9
0
def dlc2nems(siteid=None,
             vids=None,
             suffix=".lick.avi",
             site_path="/auto/data/daq/Clathrus/CLT011/",
             path_config='/auto/data/dlc/multivid-CLL-2022-01-14/config.yaml'):

    #change path_config if using a different DLC model
    if siteid is not None:
        dfiles = db.pd_query(
            f"SELECT * FROM gDataRaw WHERE not(bad) AND cellid='{siteid}'")
        site_path = dfiles.at[0, 'resppath']
    elif site_path is None:
        raise ValueError("siteid or site_path required.")

    if vids is None:
        vids = []
        # get list of all videos in site folder if no vids given as input
        for f in os.listdir(site_path):
            if f.endswith(
                    suffix):  #non-compressed vids end in lick.original.avi
                vids.append(f)

    path_sorted = os.path.join(site_path, 'sorted')

    if not os.path.exists(path_sorted):
        os.makedirs(path_sorted)
        print('Created sorted directory in', site_path)

    print('Number of videos to be analyzed:', len(vids))
    print('Results will be saved in:', path_sorted)

    vid_paths = [os.path.join(site_path, v) for v in vids]
    output_aliased = [
        os.path.join(path_sorted, v.replace(".avi", ".dlc.h5")) for v in vids
    ]
    if 1:
        dlc.analyze_videos(path_config,
                           vid_paths,
                           videotype='avi',
                           destfolder=path_sorted)

        for v, a in zip(vids, output_aliased):
            # Get list of all files only in the given directory
            list_of_files = filter(
                os.path.isfile,
                glob.glob(
                    os.path.join(path_sorted, v.replace(".avi", "")) + '*.h5'))
            # Sort list of files based on last modification time in ascending order
            list_of_files = sorted(list_of_files, key=os.path.getmtime)
            if not os.path.exists(a):
                os.system(f"ln -s {list_of_files[-1]} {a}")

    if 0:
        dlc.create_labeled_video(path_config,
                                 vid_paths,
                                 videotype='avi',
                                 destfolder=path_sorted)

    if 0:
        # identify "bad" frames and save in training set
        dlc.extract_outlier_frames(path_config,
                                   vid_paths,
                                   destfolder=path_sorted,
                                   automatic=True)

        # gui to relabel the bad frames
        dlc.refine_labels(path_config)

    if 0:
        dlc.merge_datasets(path_config)
        dlc.create_training_dataset(path_config,
                                    net_type='resnet_50',
                                    augmenter_type='imgaug')

    if 0:
        # before running, update pose_cfg.yaml to use last snapshot from previous iteration as initial condition
        # (rather than starting over from visnet)
        dlc.train_network(path_config, shuffle=1, displayiters=100)
コード例 #10
0
    if 'video' in action_list:
        log.info('Creating labeled video')
        dlc.create_labeled_video(path_config, vid_paths, videotype='avi', destfolder=path_sorted)

    if 'summary' in action_list:
        face_tools.summary_plot(vid_paths)

    if 'refine' in action_list:
        # identify "bad" frames and save in training set
        dlc.extract_outlier_frames(path_config, vid_paths, destfolder=path_sorted, automatic=True)

        # gui to relabel the bad frames
        dlc.refine_labels(path_config)

    if 0:
        dlc.merge_datasets(path_config)
        dlc.create_training_dataset(path_config, net_type='resnet_50', augmenter_type='imgaug')

    if 0:
        # before running, update pose_cfg.yaml to use last snapshot from previous iteration as initial condition
        # (rather than starting over from visnet)
        dlc.train_network(path_config, shuffle=1, displayiters=100)

    log.info("face_fit_script complete")

    if db_exists & queueid > 0:
        nd.update_job_complete(queueid)


コード例 #11
0
ファイル: DLC.py プロジェクト: justkittenaround/Working-Parts
##opt#check annotated frames
deeplabcut.check_labels(config_path)

#create training dataset
deeplabcut.create_training_dataset(config_path,num_shuffles=1)

#train the network --> additional parameters
deeplabcut.train_network(config_path, shuffle=1, trainingsetindex=0, gputouse=390.87, max_snapshots_to_keep=5, autotune=False, displayiters=None, saveiters=None)

#evaluate the trained network
deeplabcut.evaluate_network(config_path,shuffle=[1], plotting=True)

#analyze new video
deeplabcut.analyze_videos(config_path,['path of test video'],shuffle=1, save_as_csv=True)

#create labeled video --> optional parameters
deeplabcut.create_labeled_video(config_path,['pathof test video'])

#plot trajectory of the extracted poses across the analyzed video
deeplabcut.plot_trajectories(‘config_path’,['path of test video'])

#extract outlier frames
deeplabcut.extract_outlier_frames(‘config_path’,['path of test video'])

#refine labels int raining set for outlier condition
deeplabcut.refine_labels('config_path')

#merge corrected frames dataset to existing
deeplabcut.merge_datasets('config_path')