Esempio n. 1
0
    def process_motion_tracking(self, config, create_labelled_video=False):
        """
        Run DeepLabCut motion tracking on behavioural videos.
        """
        # bloated so imported when needed
        import deeplabcut  # pylint: disable=import-error

        self.extract_videos()

        config = Path(config).expanduser()
        if not config.exists():
            raise PixelsError(f"Config at {config} not found.")

        for recording in self.files:
            if 'camera_data' in recording:
                video = self.interim / recording['camera_data'].with_suffix(
                    '.avi')
                if not video.exists():
                    raise PixelsError(
                        f"Path {video} should exist but doesn't... discuss.")

                deeplabcut.analyze_videos(config, [video])
                deeplabcut.plot_trajectories(config, [video])
                if create_labelled_video:
                    deeplabcut.create_labeled_video(config, [video])
Esempio n. 2
0
def generate_track_h5(config_path,untracked_videos,suffix=".mp4"):    
    os.environ["DLClight"]='True' # all the child process have this env but not its father process
    if (platform.system()=="Linux"):
        import deeplabcut
        deeplabcut.analyze_videos(config_path,untracked_videos,shuffle=1,save_as_csv=True,videotype=suffix)
        deeplabcut.plot_trajectories(config_path,untracked_videos)
        deeplabcut.create_labeled_video(config_path,untracked_videos)
Esempio n. 3
0
    def analyze_videos(self,event):

        shuffle = self.shuffle.GetValue()
        trainingsetindex = self.trainingset.GetValue()

        if self.csv.GetStringSelection() == "Yes":
            save_as_csv = True
        else:
            save_as_csv = False

        if self.cfg['cropping']:
            crop = self.cfg['x1'], self.cfg['x2'], self.cfg['y1'], self.cfg['y2']
        else:
            crop = None

        if self.dynamic.GetStringSelection() == "No":
            dynamic = (False, .5, 10)
        else:
            dynamic = (True, .5, 10)

        if self.filter.GetStringSelection() == "No":
            filter = None
        else:
            filter = True

        # if self.draw_skeleton.GetStringSelection() == "Yes":
        #     self.draw = True
        # else:
        #     self.draw = True

#        print(self.config,self.filelist,self.videotype.GetValue(),shuffle,trainingsetindex,gputouse=None,save_as_csv=save_as_csv,destfolder=self.destfolder,cropping=cropping)
        deeplabcut.analyze_videos(self.config, self.filelist, videotype=self.videotype.GetValue(), shuffle=shuffle,
                                  trainingsetindex=trainingsetindex, gputouse=None, save_as_csv=save_as_csv,
                                  destfolder=self.destfolder, crop=crop, dynamic=dynamic)
        if self.filter.GetStringSelection() == "Yes":
            deeplabcut.filterpredictions(self.config, self.filelist, videotype=self.videotype.GetValue(), shuffle=shuffle, trainingsetindex=trainingsetindex, filtertype='median', windowlength=5, p_bound=0.001, ARdegree=3, MAdegree=1, alpha=0.01, save_as_csv=True, destfolder=self.destfolder)

        if self.create_labeled_videos.GetStringSelection() == "Yes":
            deeplabcut.create_labeled_video(self.config,self.filelist,self.videotype.GetValue(),shuffle=shuffle, trainingsetindex=trainingsetindex, draw_skeleton= self.draw,trailpoints = self.trail_points.GetValue(), filtered=True)

        if self.trajectory.GetStringSelection() == "Yes":
            deeplabcut.plot_trajectories(self.config, self.filelist, displayedbodyparts=self.bodyparts,
                                         videotype=self.videotype.GetValue(), shuffle=shuffle, trainingsetindex=trainingsetindex, filtered=True, showfigures=False, destfolder=self.destfolder)
def runDeepLabCut(Inputfilepath,OutputFilepath):
    '''Function inputs are filepath to videos to be tracked by DLC and the folder to save the output to
    Videos are copied to output folder, than processed in DLC based on the dlc config path 
    DLC output is saved in outputfilepath and the output is also converted to npy and saved as well
    '''
    
    #####################Copy Videos to DLC Folder############
    for dir in [Inputfilepath]:#Iterates through input folder
        for video in os.listdir(dir):#Iterates through each video in folder
            #ffmpeg call to copy videos to dlc folder
            subprocess.call(['ffmpeg', '-i', Inputfilepath+'/'+video,  OutputFilepath+'/'+video])


    #################### DeepLabCut ############################
    for dir in [OutputFilepath]:# Loop through dlc folder
        for video in os.listdir(dir):
            #Analyze the videos through deeplabcut
            deeplabcut.analyze_videos(baseProjectPath+'/'+DLCconfigPath, [OutputFilepath +'/'+ video], save_as_csv=True)
            deeplabcut.plot_trajectories(baseProjectPath+'/'+DLCconfigPath,[OutputFilepath +'/'+ video])

    for dir in [OutputFilepath]:#Loop through dlc folder
        for video in dir:# for each video in folder
            #Create a DLC video   
            deeplabcut.create_labeled_video(baseProjectPath+'/'+DLCconfigPath, glob.glob(os.path.join(OutputFilepath ,'*mp4')))

    #If there is not a folder for dlc npy output, create one
    if not os.path.exists(OutputFilepath + 'DLCnpy'):
        os.mkdir(OutputFilepath+ 'DLCnpy')
    
    #Load all dlc csv output files  
    csvfiles = glob.glob(OutputFilepath+'/*csv')
    #For loop gets csv data from all cameras
    j=0
    for data in csvfiles:     
        datapoints = pd.read_csv(data) # read in the csv data 
        print(datapoints)            

        parsedDlcData = datapoints.iloc[3:,7:10].values#the last element in the array is the P value
        #print(parsedDlcData)
    
        print(parsedDlcData)
        np.save(OutputFilepath+'/DLCnpy/dlc_'+cam_names[j]+'.npy',parsedDlcData)#Save data
        j+=1
Esempio n. 5
0
print("<<<<<<<")
import concurrent.futures
#videolists = glob.glob("/gpfsdata/home/sqiu/data/video/novel_context/20180704/asf/*asf")
#videolists = glob.glob("/gpfsdata/home/sqiu/data/video/novel_context/20180706_chs/asf/*asf")
videolists = glob.glob(
    "/gpfsdata/home/sqiu/data/video/stage_1_training/*/*.mp4")
if len(videolists) == 0:
    print("there is no video selected")
    sys.exit()
else:
    print(videolists)

config_path = r'/gpfsdata/home/sqiu/job/dlc/linear_track_40cm-QS-2019-09-06/config.yaml'


def task(gpuNo):
    deeplabcut.analyze_videos(config_path,
                              videolists,
                              shuffle=1,
                              save_as_csv=True,
                              videotype=".mp4",
                              gputouse=gpuNo)


with concurrent.futures.ProcessPoolExecutor() as executor:
    for i, _ in enumerate(executor.map(task, [0, 1, 2]), 1):
        print(i, len(videolists))
#deeplabcut.analyze_videos(config_path,videolists,shuffle=1,save_as_csv=True,videotype=".mp4",gputouse=0)
deeplabcut.plot_trajectories(config_path, videolists)
deeplabcut.create_labeled_video(config_path, videolists)
Esempio n. 6
0
        DLC_config = deeplabcut.auxiliaryfunctions.edit_config(posefile, edits)

    print("TRAIN NETWORK", shuffle)
    deeplabcut.train_network(
        path_config_file,
        shuffle=shuffle,
        saveiters=10000,
        displayiters=200,
        maxiters=maxiters,
        max_snapshots_to_keep=11,
    )

    print("EVALUATE")
    deeplabcut.evaluate_network(path_config_file,
                                Shuffles=[shuffle],
                                plotting=True)

    print("Analyze Video")

    videofile_path = os.path.join(os.getcwd(), "openfield-Pranav-2018-10-30",
                                  "videos", "m3v1mp4.mp4")

    deeplabcut.analyze_videos(path_config_file, [videofile_path],
                              shuffle=shuffle)

    print("Create Labeled Video and plot")
    deeplabcut.create_labeled_video(path_config_file, [videofile_path],
                                    shuffle=shuffle)
    deeplabcut.plot_trajectories(path_config_file, [videofile_path],
                                 shuffle=shuffle)
                                           outpath=os.path.join(
                                               cfg['project_path'], 'videos'))
        vname = Path(newvideo).stem
    deeplabcut.analyze_videos(path_config_file, [newvideo],
                              shuffle=shuffle,
                              save_as_csv=True,
                              destfolder=dfolder)

    print("CREATE VIDEO")
    deeplabcut.create_labeled_video(path_config_file, [newvideo],
                                    shuffle=shuffle,
                                    destfolder=dfolder)

    print("Making plots")
    deeplabcut.plot_trajectories(path_config_file, [newvideo],
                                 shuffle=shuffle,
                                 destfolder=dfolder)

    print("EXTRACT OUTLIERS")
    deeplabcut.extract_outlier_frames(path_config_file, [newvideo],
                                      shuffle=shuffle,
                                      outlieralgorithm='jump',
                                      epsilon=0,
                                      automatic=True,
                                      destfolder=dfolder)
    file = os.path.join(cfg['project_path'], 'labeled-data', vname,
                        "machinelabels-iter" + str(cfg['iteration']) + '.h5')

    print("RELABELING")
    DF = pd.read_hdf(file, 'df_with_missing')
    DLCscorer = np.unique(DF.columns.get_level_values(0))[0]
    def analyze_videos(self, event):

        shuffle = self.shuffle.GetValue()
        trainingsetindex = self.trainingset.GetValue()

        if self.cfg.get("multianimalproject", False):
            print("Analyzing ... ")
        else:
            if self.csv.GetStringSelection() == "Yes":
                save_as_csv = True
            else:
                save_as_csv = False
            if self.dynamic.GetStringSelection() == "No":
                dynamic = (False, 0.5, 10)
            else:
                dynamic = (True, 0.5, 10)
            if self.filter.GetStringSelection() == "No":
                filter = None
            else:
                filter = True

        if self.cfg["cropping"] == "True":
            crop = self.cfg["x1"], self.cfg["x2"], self.cfg["y1"], self.cfg[
                "y2"]
        else:
            crop = None

        if self.cfg.get("multianimalproject", False):
            if self.robust.GetStringSelection() == "No":
                robust = False
            else:
                robust = True
            scorername = deeplabcut.analyze_videos(
                self.config,
                self.filelist,
                videotype=self.videotype.GetValue(),
                shuffle=shuffle,
                trainingsetindex=trainingsetindex,
                gputouse=None,
                cropping=crop,
                robust_nframes=robust,
            )
            if self.create_video_with_all_detections.GetStringSelection(
            ) == "Yes":
                trainFrac = self.cfg["TrainingFraction"][trainingsetindex]
                scorername, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                    self.cfg, shuffle, trainFraction=trainFrac)
                print(scorername)
                deeplabcut.create_video_with_all_detections(
                    self.config, self.filelist, DLCscorername=scorername)

        else:
            scorername = deeplabcut.analyze_videos(
                self.config,
                self.filelist,
                videotype=self.videotype.GetValue(),
                shuffle=shuffle,
                trainingsetindex=trainingsetindex,
                gputouse=None,
                save_as_csv=save_as_csv,
                cropping=crop,
                dynamic=dynamic,
            )
            if self.filter.GetStringSelection() == "Yes":
                deeplabcut.filterpredictions(
                    self.config,
                    self.filelist,
                    videotype=self.videotype.GetValue(),
                    shuffle=shuffle,
                    trainingsetindex=trainingsetindex,
                    filtertype="median",
                    windowlength=5,
                    save_as_csv=True,
                )

            if self.trajectory.GetStringSelection() == "Yes":
                if self.showfigs.GetStringSelection() == "No":
                    showfig = False
                else:
                    showfig = True
                deeplabcut.plot_trajectories(
                    self.config,
                    self.filelist,
                    displayedbodyparts=self.bodyparts,
                    videotype=self.videotype.GetValue(),
                    shuffle=shuffle,
                    trainingsetindex=trainingsetindex,
                    filtered=True,
                    showfigures=showfig,
                )
Esempio n. 9
0
def runOPandDLC():
    #Set directory, and original filepath
    os.chdir("/Windows/system32")

    origfilepath = baseProjectPath + '/' + subject + '/' + sessionID
    rawfilepath = baseProjectPath + '/' + subject + '/' + sessionID + '/Raw'

    #Create main filepath for Intermediate processed
    if not os.path.exists(origfilepath + '/Intermediate'):
        os.mkdir(origfilepath + '/Intermediate')
    interfilepath = origfilepath + '/Intermediate'

    if not os.path.exists(origfilepath + '/Processed'):
        os.mkdir(origfilepath + '/Processed')

    datadir1 = [rawfilepath + '/' + rawVideoFolder]

    #Create a folder for the resized videos
    if not os.path.exists(interfilepath + '/Resized'):
        os.mkdir(interfilepath + '/Resized')
    filepath1 = interfilepath + '/Resized'
    datadir2 = [filepath1]

    #Create a folder for the resized videos
    if not os.path.exists(interfilepath + '/Undistorted'):
        os.mkdir(interfilepath + '/Undistorted')
    filepath2 = interfilepath + '/Undistorted'
    datadir3 = [filepath2]

    #Create a folder for the deeplabcut output
    if not os.path.exists(interfilepath + '/DeepLabCut'):
        os.mkdir(interfilepath + '/DeepLabCut')
    filepath3 = interfilepath + '/DeepLabCut'
    datadir4 = [filepath3]

    #Create a folder for the openpose output
    if not os.path.exists(interfilepath + '/OpenPose'):
        os.mkdir(interfilepath + '/OpenPose')
    filepath4 = interfilepath + '/OpenPose'

    #Create a folder for videos
    if not os.path.exists(interfilepath + '/VideoOutput'):
        os.mkdir(interfilepath + '/VideoOutput')
    filepath5 = interfilepath + '/VideoOutput'

    for dir in datadir1:
        for video in os.listdir(dir):
            subprocess.call([
                'ffmpeg', '-i',
                rawfilepath + '/' + rawVideoFolder + '/' + video, '-vf',
                'scale=1280:960', filepath1 + '/' + video
            ])

    #Use ffmpeg to resize videos and save them in the just created resized video folder

    #camBvids = open(filepath1+'/camBvids.txt','a')
    #camCvids = open(filepath1+'/camCvids.txt','a')
    #camDvids = open(filepath1+'/camDvids.txt','a')
    #if num_of_Video_parts >1 :
    #   for dir in datadir1:
    #       for video in os.listdir(dir):
    #          if video[:4] == 'CamA':
    #              camAvids = open(filepath1+'/camAvids.txt','a')
    #               camAvids.write('CamA')
    #          if video[:4] == 'CamB':
    #             camBvids.write(filepath1+'/'+video)
    #         if video[:4] == 'CamC':
    #             camCvids.write(filepath1+'/'+video)
    #        if video[:4] == 'CamD':
    #           camDvids.write(filepath1+'/'+video)

    # subprocess.call(['ffmpeg', '-f', 'concat', '-safe', '0', '-i', filepath1+'/camAvids.txt', '-c' ,'copy' ,filepath1+'/CamA.mp4'])
    #subprocess.call(['ffmpeg', '-f', 'concat', '-safe', '0', '-i', filepath1+'/camBvids.txt', '-c' ,'copy' ,filepath1+'/CamB.mp4'])
    #subprocess.call(['ffmpeg', '-f', 'concat', '-safe', '0', '-i', filepath1+'/camCvids.txt', '-c' ,'copy' ,filepath1+'/CamC.mp4'])
    #subprocess.call(['ffmpeg', '-f', 'concat', '-safe', '0', '-i', filepath1+'/camDvids.txt', '-c' ,'copy' ,filepath1+'/CamD.mp4'])

    #Use ffmpeg to Undistort videos
    for dir in datadir2:
        for video in os.listdir(dir):
            subprocess.call([
                'ffmpeg', '-i', filepath1 + '/' + video, '-vf',
                "lenscorrection=cx=0.5:cy=0.5:k1=-.115:k2=-0.022",
                filepath2 + '/' + video
            ])

    #Use deeplabcut to analyze videos and save the results to the folder for processed videos

    for dir in datadir3:
        for video in os.listdir(dir):

            deeplabcut.analyze_videos(baseProjectPath + '/' + DLCconfigPath,
                                      [filepath2 + '/' + video],
                                      videotype='mp4',
                                      destfolder=filepath3,
                                      save_as_csv=True)
            deeplabcut.plot_trajectories(baseProjectPath + '/' + DLCconfigPath,
                                         [filepath2 + '/' + video],
                                         videotype='mp4',
                                         destfolder=filepath3)
        #  deeplabcut.create_labeled_video(DLCconfigPath,[filepath2 +'/'+ video],videotype = 'mp4', destfolder = filepath5)

    #Change directory to openpose and run openpose on the videos then save the results to the processed video folder

    os.chdir("/Users/MatthisLab/openpose")
    for dir in datadir3:
        for video in os.listdir(dir):
            videoName = video[:4]
            subprocess.call([
                'bin/OpenPoseDemo.exe', '--video', filepath2 + '/' + video,
                '--hand', '--face', '--write_video',
                filepath5 + '/OpenPose' + videoName + '.avi', '--write_json',
                filepath4 + '/' + videoName
            ])
Esempio n. 10
0
# In[14]:


deeplabcut.evaluate_network(path_config)


# In[15]:


deeplabcut.analyze_videos(path_config, ['/home/amr/Trial_DeepLabCut/mouse.avi'], save_as_csv=True)


# In[26]:


deeplabcut.create_labeled_video(path_config, ['/home/amr/Trial_DeepLabCut/mouse.avi'])


# In[27]:


deeplabcut.create_labeled_video(path_config, ['/home/amr/Trial_DeepLabCut/mouse.avi'], save_frames=True)


# In[29]:


deeplabcut.plot_trajectories(path_config, ['/home/amr/Trial_DeepLabCut/mouse.avi'])

Esempio n. 11
0
def plot_trajectories(projectId):
    config_path = projectRepository.find_one({'_id': ObjectId(projectId)
                                              })['config_path']
    deeplabcut.plot_trajectories(config_path, [], filtered=True)
    return "Not Implemented", 501
Esempio n. 12
0
        destfolder=DESTFOLDER,
        dynamic=(True, 0.1, 5),
    )

    print("analyze again...")
    deeplabcut.analyze_videos(path_config_file, [newvideo],
                              save_as_csv=True,
                              destfolder=DESTFOLDER)

    print("CREATE VIDEO")
    deeplabcut.create_labeled_video(path_config_file, [newvideo],
                                    destfolder=DESTFOLDER,
                                    save_frames=True)

    print("Making plots")
    deeplabcut.plot_trajectories(path_config_file, [newvideo],
                                 destfolder=DESTFOLDER)

    print("EXTRACT OUTLIERS")
    deeplabcut.extract_outlier_frames(
        path_config_file,
        [newvideo],
        outlieralgorithm="jump",
        epsilon=0,
        automatic=True,
        destfolder=DESTFOLDER,
    )

    deeplabcut.extract_outlier_frames(
        path_config_file,
        [newvideo],
        outlieralgorithm="fitting",
Esempio n. 13
0
print("\n")
print("\n")
print("'config_path' is:", config_path)
print("'dlc.__version__' is:'", dlc.__version__)
print("\n")
print("This is the name of the program:", sys.argv[0])
print("str(sys.argv):", str(sys.argv), "\n")
print("\n")
print("\n")

edits = {'snapshotindex': snapshotindex}

dlc.auxiliaryfunctions.edit_config(config_path, edits)

print('\nediting the config file... ')
for item in edits.items():
    print(item)

print('edit completed!')

dlc.plot_trajectories(config=config_path, videos=videos_path_list, videotype='.mp4', shuffle=shuffleindex,
                      trainingsetindex=0, filtered=False, showfigures=False, destfolder=None)

print("dlc_plot_trajectories.py with the call", str(sys.argv), "is done!")

print("returning snapshotindex back to 'all'...!")
edits = {'snapshotindex': 'all'}
dlc.auxiliaryfunctions.edit_config(config_path, edits)
print("snapshotindex is set back to 'all'")
Esempio n. 14
0
    /February/February1
    /February/February2

    etc.
'''
'''
subfolders=getsubfolders(basepath)
for subfolder in subfolders: #this would be January, February etc. in the upper example
    print("Starting analyze data in:", subfolder)
    subsubfolders=getsubfolders(subfolder)
    for subsubfolder in subsubfolders: #this would be Febuary1, etc. in the upper example...'''
subfolders = getsubfolders(basepath)
'''for subfolder in subfolders:
    print("Starting analyze data in:", subfolder)
    subsubfolders=getsubfolders(subfolder)
    for subsubfolder in subsubfolders:
        print("Starting analyze data in:", subsubfolder)'''
for path in videopath:
    for vtype in ['.mp4']:
        deeplabcut.analyze_videos(config, [path],
                                  shuffle=shuffle,
                                  videotype=vtype,
                                  save_as_csv=True)
        deeplabcut.filterpredictions(config, [path],
                                     videotype=vtype,
                                     shuffle=shuffle)
        deeplabcut.plot_trajectories(config, [path], videotype=vtype)
        deeplabcut.create_labeled_video(config, [path],
                                        videotype=vtype,
                                        filtered=True)
Esempio n. 15
0
    def create_videos(self, event):

        shuffle = self.shuffle.GetValue()
        trainingsetindex = self.trainingset.GetValue()
        # self.filelist = self.filelist + self.vids

        if self.filter.GetStringSelection() == "No":
            filtered = False
        else:
            filtered = True

        if self.video_slow.GetStringSelection() == "Yes":
            self.slow = True
        else:
            self.slow = False

        if len(self.bodyparts) == 0:
            self.bodyparts = "all"

        config_file = auxiliaryfunctions.read_config(self.config)
        if config_file.get("multianimalproject", False):
            print("Creating a video with the " + self.trackertypes.GetValue() +
                  " tracker method!")
            if self.plot_idv.GetStringSelection() == "Yes":
                color_by = "individual"
            else:
                color_by = "bodypart"

            deeplabcut.create_labeled_video(
                self.config,
                self.filelist,
                self.videotype.GetValue(),
                shuffle=shuffle,
                trainingsetindex=trainingsetindex,
                save_frames=self.slow,
                draw_skeleton=self.draw,
                displayedbodyparts=self.bodyparts,
                trailpoints=self.trail_points.GetValue(),
                filtered=filtered,
                color_by=color_by,
                track_method=self.trackertypes.GetValue(),
            )

            if self.trajectory.GetStringSelection() == "Yes":
                deeplabcut.plot_trajectories(
                    self.config,
                    self.filelist,
                    displayedbodyparts=self.bodyparts,
                    videotype=self.videotype.GetValue(),
                    shuffle=shuffle,
                    trainingsetindex=trainingsetindex,
                    filtered=filtered,
                    showfigures=False,
                    track_method=self.trackertypes.GetValue(),
                )
        else:
            deeplabcut.create_labeled_video(
                self.config,
                self.filelist,
                self.videotype.GetValue(),
                shuffle=shuffle,
                trainingsetindex=trainingsetindex,
                save_frames=self.slow,
                draw_skeleton=self.draw,
                displayedbodyparts=self.bodyparts,
                trailpoints=self.trail_points.GetValue(),
                filtered=filtered,
            )
        df.to_hdf(
            picklefile.replace("pickle", "h5"),
            "df_with_missing",
            format="table",
            mode="w",
        )
        df.to_hdf(
            picklefile.replace("sk", "bx").replace("pickle", "h5"),
            "df_with_missing",
            format="table",
            mode="w",
        )

    print("Plotting trajectories...")
    deeplabcut.plot_trajectories(config_path, [new_video_path],
                                 "mp4",
                                 track_method="box")
    deeplabcut.plot_trajectories(config_path, [new_video_path],
                                 "mp4",
                                 track_method="skeleton")
    print("Trajectory plotted.")

    print("Creating labeled video...")
    deeplabcut.create_labeled_video(
        config_path,
        [new_video_path],
        "mp4",
        save_frames=False,
        color_by="individual",
        track_method="box",
    )
Esempio n. 17
0
def create_pretrained_project(
    project,
    experimenter,
    videos,
    model="full_human",
    working_directory=None,
    copy_videos=False,
    videotype=None,
    analyzevideo=True,
    filtered=True,
    createlabeledvideo=True,
    trainFraction=None,
):
    """
    Creates a new project directory, sub-directories and a basic configuration file.
    Change its parameters to your projects need.

    The project will also be initialized with a pre-trained model from the DeepLabCut model zoo!

    http://www.mousemotorlab.org/dlc-modelzoo

    Parameters
    ----------
    project : string
        String containing the name of the project.

    experimenter : string
        String containing the name of the experimenter.

    model: string, options see  http://www.mousemotorlab.org/dlc-modelzoo
        Current option and default: 'full_human'  Creates a demo human project and analyzes a video with ResNet 101 weights pretrained on MPII Human Pose. This is from the DeeperCut paper
        by Insafutdinov et al. https://arxiv.org/abs/1605.03170 Please make sure to cite it too if you use this code!

    videos : list
        A list of string containing the full paths of the videos to include in the project.

    working_directory : string, optional
        The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.

    copy_videos : bool, optional  ON WINDOWS: TRUE is often necessary!
        If this is set to True, the videos are copied to the ``videos`` directory. If it is False,symlink of the videos are copied to the project/videos directory. The default is ``False``; if provided it must be either
        ``True`` or ``False``.

    analyzevideo " bool, optional
        If true, then the video is analzyed and a labeled video is created. If false, then only the project will be created and the weights downloaded. You can then access them

    filtered: bool, default false
        Boolean variable indicating if filtered pose data output should be plotted rather than frame-by-frame predictions.
        Filtered version can be calculated with deeplabcut.filterpredictions

    trainFraction: By default value from *new* projects. (0.95)
            Fraction that will be used in dlc-model/trainingset folder name.

    Example
    --------
    Linux/MacOs loading full_human model and analzying video /homosapiens1.avi
    >>> deeplabcut.create_pretrained_project('humanstrokestudy','Linus',['/data/videos/homosapiens1.avi'], copy_videos=False)

    Loading full_cat model and analzying video "felixfeliscatus3.avi"
    >>> deeplabcut.create_pretrained_project('humanstrokestudy','Linus',['/data/videos/felixfeliscatus3.avi'], model='full_cat')

    Windows:
    >>> deeplabcut.create_pretrained_project('humanstrokestudy','Bill',[r'C:\yourusername\rig-95\Videos\reachingvideo1.avi'],r'C:\yourusername\analysis\project' copy_videos=True)
    Users must format paths with either:  r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )

    """
    if model in globals()["Modeloptions"]:
        cwd = os.getcwd()

        cfg = deeplabcut.create_new_project(project, experimenter, videos,
                                            working_directory, copy_videos,
                                            videotype)
        if trainFraction is not None:
            auxiliaryfunctions.edit_config(
                cfg, {"TrainingFraction": [tranFraction]})

        config = auxiliaryfunctions.read_config(cfg)
        if model == "full_human":
            config["bodyparts"] = [
                "ankle1",
                "knee1",
                "hip1",
                "hip2",
                "knee2",
                "ankle2",
                "wrist1",
                "elbow1",
                "shoulder1",
                "shoulder2",
                "elbow2",
                "wrist2",
                "chin",
                "forehead",
            ]
            config["skeleton"] = [
                ["ankle1", "knee1"],
                ["ankle2", "knee2"],
                ["knee1", "hip1"],
                ["knee2", "hip2"],
                ["hip1", "hip2"],
                ["shoulder1", "shoulder2"],
                ["shoulder1", "hip1"],
                ["shoulder2", "hip2"],
                ["shoulder1", "elbow1"],
                ["shoulder2", "elbow2"],
                ["chin", "forehead"],
                ["elbow1", "wrist1"],
                ["elbow2", "wrist2"],
            ]
            config["default_net_type"] = "resnet_101"
        else:  # just make a case and put the stuff you want.
            # TBD: 'partaffinityfield_graph' >> use to set skeleton!
            pass

        auxiliaryfunctions.write_config(cfg, config)
        config = auxiliaryfunctions.read_config(cfg)

        train_dir = Path(
            os.path.join(
                config["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction=config["TrainingFraction"][0],
                        shuffle=1,
                        cfg=config,
                    )),
                "train",
            ))
        test_dir = Path(
            os.path.join(
                config["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction=config["TrainingFraction"][0],
                        shuffle=1,
                        cfg=config,
                    )),
                "test",
            ))

        # Create the model directory
        train_dir.mkdir(parents=True, exist_ok=True)
        test_dir.mkdir(parents=True, exist_ok=True)

        modelfoldername = auxiliaryfunctions.GetModelFolder(
            trainFraction=config["TrainingFraction"][0], shuffle=1, cfg=config)
        path_train_config = str(
            os.path.join(config["project_path"], Path(modelfoldername),
                         "train", "pose_cfg.yaml"))
        path_test_config = str(
            os.path.join(config["project_path"], Path(modelfoldername), "test",
                         "pose_cfg.yaml"))

        # Download the weights and put then in appropriate directory
        print("Dowloading weights...")
        auxfun_models.DownloadModel(model, train_dir)

        pose_cfg = deeplabcut.auxiliaryfunctions.read_plainconfig(
            path_train_config)
        print(path_train_config)
        # Updating config file:
        dict = {
            "default_net_type": pose_cfg["net_type"],
            "default_augmenter": pose_cfg["dataset_type"],
            "bodyparts": pose_cfg["all_joints_names"],
            "skeleton": [],  # TODO: update with paf_graph
            "dotsize": 6,
        }
        auxiliaryfunctions.edit_config(cfg, dict)

        # Create the pose_config.yaml files
        parent_path = Path(os.path.dirname(deeplabcut.__file__))
        defaultconfigfile = str(parent_path / "pose_cfg.yaml")
        trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(config)
        datafilename, metadatafilename = auxiliaryfunctions.GetDataandMetaDataFilenames(
            trainingsetfolder,
            trainFraction=config["TrainingFraction"][0],
            shuffle=1,
            cfg=config,
        )

        # downloading base encoder / not required unless on re-trains (but when a training set is created this happens anyway)
        # model_path, num_shuffles=auxfun_models.Check4weights(pose_cfg['net_type'], parent_path, num_shuffles= 1)

        # Updating training and test pose_cfg:
        snapshotname = [fn for fn in os.listdir(train_dir)
                        if ".meta" in fn][0].split(".meta")[0]
        dict2change = {
            "init_weights": str(os.path.join(train_dir, snapshotname)),
            "project_path": str(config["project_path"]),
        }

        UpdateTrain_pose_yaml(pose_cfg, dict2change, path_train_config)
        keys2save = [
            "dataset",
            "dataset_type",
            "num_joints",
            "all_joints",
            "all_joints_names",
            "net_type",
            "init_weights",
            "global_scale",
            "location_refinement",
            "locref_stdev",
        ]

        MakeTest_pose_yaml(pose_cfg, keys2save, path_test_config)

        video_dir = os.path.join(config["project_path"], "videos")
        if analyzevideo == True:
            print("Analyzing video...")
            deeplabcut.analyze_videos(cfg, [video_dir],
                                      videotype,
                                      save_as_csv=True)

        if createlabeledvideo == True:
            if filtered:
                deeplabcut.filterpredictions(cfg, [video_dir], videotype)

            print("Plotting results...")
            deeplabcut.create_labeled_video(cfg, [video_dir],
                                            videotype,
                                            draw_skeleton=True,
                                            filtered=filtered)
            deeplabcut.plot_trajectories(cfg, [video_dir],
                                         videotype,
                                         filtered=filtered)

        os.chdir(cwd)
        return cfg, path_train_config

    else:
        return "N/A", "N/A"
Esempio n. 18
0
def usingDeep(path_config, project):
    exit = False
    while exit == False:
        video_path = path_config.split("/")
        video_path = '/' + video_path[1] + '/' + video_path[
            2] + '/' + video_path[3] + '/' + video_path[4] + '/videos/'
        print_usage("project")
        action = input()
        while action not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]:
            try:
                action = int(action)
                if action not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]:
                    print("please type number between 0 and 12")
                    action = input()
            except ValueError:
                print("Please enter number")
                action = input()
            print("been here")
        if action == 0:
            return False
        elif action == 1:
            print("do you want to crop the video? yes/no")
            if input() == "yes" or "y":
                print(
                    "how many videos you you want to crop? (use number: 1,2,3 etc)"
                )
                crops = int(input())
                print("only crop all the video's and than exit")
                for loop in range(0, crops):
                    deeplabcut.extract_frames(path_config,
                                              'manual',
                                              'kmeans',
                                              crop=True)
            deeplabcut.extract_frames(path_config,
                                      'automatic',
                                      'kmeans',
                                      crop=True)
        elif action == 2:
            deeplabcut.label_frames(path_config)
        elif action == 3:
            deeplabcut.check_labels(path_config)
        elif action == 4:
            deeplabcut.create_training_dataset(path_config)
        elif action == 5:
            with open("training_network.py") as fp:
                lines = fp.readlines()
                lines[3] = lines[3].split("=")
                lines[3] = lines[3][0] + "= '" + path_config + "'\n"

            with open("training_network.py", "w") as fp:
                for line in lines:
                    fp.writelines(line)

            print("run: sbatch slurm.sh")
            return True
        elif action == 6:
            try:
                deeplabcut.evaluate_network(path_config,
                                            Shuffles=[1],
                                            trainingsetindex=0,
                                            plotting=None,
                                            show_errors=True,
                                            comparisonbodyparts='all',
                                            gputouse=None,
                                            rescale=False)
            except OSError as e:
                print("file does not exist")
        elif action == 7:
            print("\nType video name in project/videos you want to analyze")
            video_path = video_path + create_dict_list(
                path_config[:-11] + "videos/", 1)[0]
            with open("training_network.py") as fp:
                lines = fp.readlines()
                lines[3] = lines[3].split("=")
                lines[3] = lines[3][0] + "= '" + path_config + "'\n"
                lines[4] = lines[4].split("=")
                lines[4] = lines[4][0] + "= '" + video_path + "'\n"

            with open("training_network.py", "w") as fp:
                for line in lines:
                    fp.writelines(line)
            print(
                "run: sbatch slurm.sh after changing the command in training_network.py"
            )
            return True
        elif action == 8:
            print(
                "\nChoose the video in project/videos you want to plot trajectories from"
            )
            video_path = video_path + create_dict_list(
                path_config[:-11] + "videos/", 1)[0]
            print(video_path)
            deeplabcut.plot_trajectories(path_config, [video_path],
                                         filtered=True)
        elif action == 9:
            print(
                "\nChoose the video in project/videos you want to make a labeled video from"
            )
            video_path = video_path + create_dict_list(
                path_config[:-11] + "videos/", 1)[0]
            deeplabcut.create_labeled_video(path_config, [video_path],
                                            videotype='.mp4',
                                            draw_skeleton=True)
        elif action == 10:
            print("\nChoose where to upload the video from")
            video_path = '/data/11012579/videos/' + create_dict_list(
                '/data/11012579/videos/', 0)[0]
            print("\nChoose which video to upload")
            video_path_list = [
                video_path + "/" + create_dict_list(video_path, 1)[0]
            ]
            while True:
                print("\nDo you want to add more videos?\nType yes or no")
                if input() == 'yes':
                    video_path_list.append(video_path + "/" +
                                           create_dict_list(video_path, 1)[0])
                else:
                    deeplabcut.add_new_videos(path_config,
                                              video_path_list,
                                              copy_videos=False)
                    break
        elif action == 11:
            print("also here")
            Dlc_results2 = pd.read_hdf(
                '/data/11012579/videos/vidDLC_resnet50_demo_project_grab2Feb7shuffle1_11500.h5'
            )
            Dlc_results2.plot()
        else:
            print_usage("error")

        print("klaar")
Esempio n. 19
0
def analyze_and_clean_arm_dat(dat_mantle_dlc, adir, pcutoff,
                              body_part_line_list, play_back_speed):

    #gets a path to the raw video that the previous script created when making the above csv
    vid = glob.glob(adir + '\\*.mp4')[0]

    #gets the crop parameters from mantle dlc as a string list
    print('getting crop parameters')
    crop_parameters = dat_mantle_dlc['crop_parameters']
    #gets the (x1,y1) crop parameters as a list of 1x2 floats
    crop_x1y1_parameters = [
        from_crop_parameters_get_x1y1(s) for s in crop_parameters
    ]
    #gets a list of the paths to the original images cropped from
    #img_paths_list = dat_mantle_dlc['img_paths'].to_list()

    #path to were the next step of dlc analysis will go
    path_arm_dir = adir + '\\arm_dlc_data'
    print('checking if: ' + path_arm_dir + 'exists')
    #if the directory hasent been made before we will make it and then run the dlc analysis
    if os.path.exists(path_arm_dir) != True:
        print('making: ' + path_arm_dir)
        os.mkdir(path_arm_dir)
        path_config = 'C:\\Users\\wlwee\\Documents\\python\\fhl_three_target_experiment\\MODEL\\arms-weert-2020-10-18\\config.yaml'

        deeplabcut.analyze_videos(path_config, [vid],
                                  videotype='mp4',
                                  save_as_csv=True,
                                  destfolder=path_arm_dir)
        deeplabcut.create_labeled_video(path_config, [vid],
                                        videotype='mp4',
                                        draw_skeleton='True',
                                        destfolder=path_arm_dir)
        deeplabcut.plot_trajectories(path_config, [vid],
                                     videotype='mp4',
                                     destfolder=path_arm_dir)
    else:
        print(path_arm_dir +
              ' already exists, skipping dlc arm model analysis')

    #opens the dlc arm model data
    print('getting dlc data')
    path_arm_dlc = glob.glob(path_arm_dir + '\\*.h5')[0]
    print('opening: ' + path_arm_dlc)
    dat_arm_dlc = pd.read_hdf(path_arm_dlc)
    #converts the arm model data frame into a numpy array which is easier to work with
    arm_dlc_numpy = dat_arm_dlc.to_numpy()

    #makes a path to were we will be storing the cleaned arm model data
    path_clean_arm_data_dir = path_arm_dir + '//clean_arm_data'
    #if the directory hasent been made before this makes it
    print('checking if: ' + path_clean_arm_data_dir + 'exists')
    if os.path.exists(path_clean_arm_data_dir) != True:
        os.mkdir(path_clean_arm_data_dir)
    else:
        print(path_clean_arm_data_dir + ' already exists, not making it again')

    #makes four empty dataframes which we will use to store the results from the below script as csvs
    dat_arm_dlc_clean = pd.DataFrame()
    dat_arm_dlc_clean_real = pd.DataFrame()
    dat_arm_speeds = pd.DataFrame()
    dat_arm_speeds_real = pd.DataFrame()

    print()
    for ii in range(
            0,
            len(arm_dlc_numpy[0]) - 2,
            3):  #steps left to right through the dataframe by body part

        body_part = dat_arm_dlc.columns[ii][1]
        print(body_part)

        print(ii, ii + 1, ii + 2)

        x = []
        y = []
        p = []
        s = 0

        for iii in range(
                0, len(arm_dlc_numpy)
        ):  #steps top to bottom through the dataframe for a body part
            x_a = arm_dlc_numpy[iii][ii]  #grab the dlc x value
            y_a = arm_dlc_numpy[iii][ii + 1]  #grab the dlc y value
            p_a = arm_dlc_numpy[iii][ii + 2]  #grab the probability value

            if p_a < p_cutoff:  #if p_a is less than p_cutoff we set values to be the last sig detection
                x_a = arm_dlc_numpy[s][ii]
                y_a = arm_dlc_numpy[s][ii + 1]
                p_a = arm_dlc_numpy[s][ii + 2]

            else:  #p_a > p_cutoff we save the index into a variable s for calling above
                s = iii

            p.append(p_a)
            x.append(x_a)
            y.append(y_a)

            d_x = []
            d_y = []
            abs_d_x = []
            abs_d_y = []

        for n in range(
                1,
                len(x) - 1
        ):  #time steps through the extracted values from the dataframe, drops 1st and last values
            speed_x = (x[n] - x[n - 1])
            speed_y = (y[n] - y[n - 1])
            '''
            #doing a speed replacement to the last known location ruins it
            #this is because if it jumps a short distance it will get stuck
            if speed_x > 120:
                x[n] = x[n-1]
                speed_x = abs(x[n] - x[n-1])
            if speed_y > 120:
                y[n] = y[n-1]
                speed_y = abs(y[n] - y[n-1])'''

            d_x.append(speed_x)
            d_y.append(speed_y)
            abs_d_x.append(abs(speed_x))
            abs_d_y.append(abs(speed_y))

        #checking what the speeds are like before filtering
        mean_abs_dx, st_abs_x = st.mean(abs_d_x), st.stdev(abs_d_x)
        mean_abs_dy, st_abs_y = st.mean(abs_d_y), st.stdev(abs_d_y)

        mean_dx, st_x = st.mean(d_x), st.stdev(d_x)
        mean_dy, st_y = st.mean(d_y), st.stdev(d_y)

        ## some speed filtering step should be here
        ## pure linear interpolation does not fix jumps it 'smoothes' them

        # finds indexes where the speeds are outside of mean + 2*stdev d_x
        # does not filter slow speeds only fast speeds
        index_to_interpolate = []
        for i in range(0, len(d_x)):
            '''
            #mantle
            if ii <= 6:
                if d_x[i] > mean_dx + 2 * st_x or d_y[i] > mean_dy + 2 * st_y:
                    index_to_interpolate.append(i + 1) '''
            #arms
            if ii > 6:
                #if abs(d_x[i]) > mean_abs_dx + 1.5 * st_abs_x or abs(d_y[i]) > mean_abs_dy + 1.5 * st_abs_y or abs(d_x[i]) > 80 or abs(d_y[i]) > 80:
                if abs(d_x[i]) > 50 or abs(d_y[i]) > 50:
                    index_to_interpolate.append(i + 1)

        interpolate_index_tuple = []
        x_i, y_i = x, y  #creates two new lists so we can manipulate x, y lists w/o changing originals
        for i in range(0, len(index_to_interpolate)):

            more, less = False, False  #two booleans so we can loop until they are both True

            #grabs the first before and after indexs
            x1 = x_i[index_to_interpolate[i] - 1]
            y1 = y_i[index_to_interpolate[i] - 1]
            x2 = x_i[index_to_interpolate[i] + 1]
            y2 = y_i[index_to_interpolate[i] + 1]

            itt_1 = index_to_interpolate[i] - 1
            itt_2 = index_to_interpolate[i] + 1

            n, m = 1, 1  #grabs starting number of steps and step count
            while more == False and less == False:
                if i + n < len(
                        index_to_interpolate
                ):  #we iterate n forward until we get to a frame that does not need to be iterated
                    if index_to_interpolate[i] + n == index_to_interpolate[i +
                                                                           n]:
                        #if we have a list iterate 16,17,18,20 and we are at index 0
                        #we step till index 2 then stop there
                        #we interpolate till the x2 index 18 + 1
                        x2 = x_i[index_to_interpolate[i + n] + 1]
                        y2 = y_i[index_to_interpolate[i + n] + 1]
                        itt_2 = index_to_interpolate[i + n] + 1
                        n = n + 1
                    else:
                        more = True
                else:
                    more = True

                if i - m > 0:  #same as above but going backwards
                    if index_to_interpolate[i] - m == index_to_interpolate[i -
                                                                           m]:
                        x1 = x_i[index_to_interpolate[i - m] - 1]
                        y1 = y_i[index_to_interpolate[i - m] - 1]
                        itt_1 = index_to_interpolate[i - m] - 1
                        m = m + 1
                    else:
                        less = True
                else:
                    less = True

            interpolate_index_tuple.append([itt_1, itt_2, i - m, i + n])
            #n equals steps forward from the bad x value to the x value we want to iterate to
            #m equals steps backwards from the bad x value to the x value we want to iterate to
            #steps = n + m
            at = m / (n + m)
            '''
            #incase of errors uncomment this section
            print()
            print(ii, iii)
            print(x1,y1)
            print(x2,y2)
            print(n+m,m,n,at)
            '''
            #see interpolate def above for behavior
            x_inter, y_inter = interpolate(float(x1), float(x2), float(y1),
                                           float(y2), at)
            #rewrite x_i for the interpolate index to x_inter
            x_i[index_to_interpolate[i]] = x_inter
            y_i[index_to_interpolate[i]] = y_inter
            '''
            print(x_inter, y_inter)
            print()        
            '''

        d_xi, d_yi = [], [
        ]  #new variables to hold interapolation speeds so we can investigate them

        for n in range(1, len(x_i) - 1):

            speed_xi = (x_i[n] - x_i[n - 1])
            speed_yi = (y_i[n] - y_i[n - 1])
            d_xi.append(speed_xi)
            d_yi.append(speed_yi)

        x_ri, y_ri = [], []

        for i in range(
                0, len(x_i)
        ):  #get the 'real' x, y values by shifting relative to the x1, y1 crop parameters
            x_ri_a = x_i[i] + crop_x1y1_parameters[i][0]
            y_ri_a = y_i[i] + crop_x1y1_parameters[i][1]

            x_ri.append(x_ri_a)
            y_ri.append(y_ri_a)

        d_xi_r, d_yi_r = [], []

        for n in range(1, len(x_ri) - 1):
            speed_xi_r = (x_ri[n] - x_ri[n - 1])
            speed_yi_r = (y_ri[n] - y_ri[n - 1])
            d_xi_r.append(speed_xi_r)
            d_yi_r.append(speed_yi_r)

        #here we calculate speed means and standard deviations for the filtered data before and after it is returned to the 'real' reference frame
        mean_dxi, st_xi = st.mean(d_xi), st.stdev(d_xi)
        mean_dyi, st_yi = st.mean(d_yi), st.stdev(d_yi)

        mean_dxi_r, st_xi_r = st.mean(d_xi_r), st.stdev(d_xi_r)
        mean_dyi_r, st_yi_r = st.mean(d_yi_r), st.stdev(d_yi_r)

        print('number interpolated frames ' + str(len(index_to_interpolate)))
        print('unfiltered speed ' + body_part + '_x: ' +
              str(round(mean_dx, 3)) + ' +- ' + str(round(st_x, 3)))
        print('filtered speed ' + body_part + '_x: ' +
              str(round(mean_dxi, 3)) + ' +- ' + str(round(st_xi, 3)))
        print('filtered real speed ' + body_part + '_x: ' +
              str(round(mean_dxi_r, 3)) + ' +- ' + str(round(st_xi_r, 3)))
        print('unfiltered speed ' + body_part + '_y: ' +
              str(round(mean_dy, 3)) + ' +- ' + str(round(st_y, 3)))
        print('filtered speed ' + body_part + '_y: ' +
              str(round(mean_dyi, 3)) + ' +- ' + str(round(st_yi, 3)))
        print('filtered real speed ' + body_part + '_y: ' +
              str(round(mean_dyi_r, 3)) + ' +- ' + str(round(st_yi_r, 3)))
        p_test = p[2:]
        print('p min ' + str(round(min(p_test), 5)))
        print()

        #creates the dataframes that will store our filtered positional data
        #octopus reference frame and real reference frame
        #column names are tuples
        dat_arm_dlc_clean[dat_arm_dlc.columns[ii]] = x_i
        dat_arm_dlc_clean[dat_arm_dlc.columns[ii + 1]] = y_i
        dat_arm_dlc_clean[dat_arm_dlc.columns[ii + 2]] = p

        dat_arm_dlc_clean_real[dat_arm_dlc.columns[ii]] = x_ri
        dat_arm_dlc_clean_real[dat_arm_dlc.columns[ii + 1]] = y_ri
        dat_arm_dlc_clean_real[dat_arm_dlc.columns[ii + 2]] = p

        #creates the dataframes that will store our filtered speed data
        #octopus reference frame and real reference frame
        #column names are tuples
        dat_arm_speeds[dat_arm_dlc.columns[ii]] = d_xi
        dat_arm_speeds[dat_arm_dlc.columns[ii + 1]] = d_yi

        dat_arm_speeds_real[dat_arm_dlc.columns[ii]] = d_xi_r
        dat_arm_speeds_real[dat_arm_dlc.columns[ii + 1]] = d_yi_r

    path_dat_arm_dlc_clean_csv = path_clean_arm_data_dir + '\\octo_ref_clean_arm_data.csv'
    dat_arm_dlc_clean.to_csv(path_dat_arm_dlc_clean_csv)

    path_dat_arm_dlc_clean_real_csv = path_clean_arm_data_dir + '\\octo_real_ref_clean_arm_data.csv'
    dat_arm_dlc_clean_real.to_csv(path_dat_arm_dlc_clean_real_csv)

    path_dat_arm_speeds_csv = path_clean_arm_data_dir + '\\octo_ref_speed.csv'
    dat_arm_speeds.to_csv(path_dat_arm_speeds_csv)

    path_dat_arm_speeds_real_csv = path_clean_arm_data_dir + '\\octo_ref_speed_real.csv'
    dat_arm_speeds_real.to_csv(path_dat_arm_speeds_real_csv)

    ## below we are going to make a video using our filtered data to draw points and lines upon it
    ## we do this to investigate what our filtered predictions look like
    numpy_arm_dlc_clean = dat_arm_dlc_clean.to_numpy()
    numpy_arm_dlc_clean_real = dat_arm_dlc_clean_real.to_numpy()
    numpy_arm_speed = dat_arm_speeds.to_numpy()
    numpy_arm_speed_real = dat_arm_speeds_real.to_numpy()
    vid_dlc_labeled = glob.glob(path_arm_dir + '\\*.mp4')[0]
    video_clean_labels_name = path_clean_arm_data_dir + '\\clean_labels_vid.mp4'
    video_clean_labels_hull_name = path_clean_arm_data_dir + '\\clean_labels_hull_vid.mp4'

    #open the three target location .csv
    target_location_path = os.path.dirname(
        os.path.dirname(adir)) + '\\three_target_location.csv'
    if os.path.exists(target_location_path) != True:
        print(target_location_path + ' does not exist!')
    else:
        print('opening ' + target_location_path)
        dat_target = pd.read_csv(target_location_path)

    #turn the dat_target dataframe into a numpy array
    numpy_target = dat_target.to_numpy()
    food_location_xy = []
    #loop left to right through the first row of the array and find which target has food
    for i in range(0, len(numpy_target[0]), 3):
        print(numpy_target[0][i], numpy_target[0][i + 1],
              numpy_target[0][i + 2])
        if numpy_target[0][i + 2] == 1:
            food_location_xy.append(
                [numpy_target[0][i], numpy_target[0][i + 1]])
    food_location_xy = food_location_xy[0]

    cap = cv2.VideoCapture(vid)
    cap_dlc = cv2.VideoCapture(vid_dlc_labeled)
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fs = 0

    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    video = cv2.VideoWriter(video_clean_labels_name,
                            cv2.VideoWriter_fourcc(*'mp4v'), 7.0,
                            (width, height))

    video_hull = cv2.VideoWriter(video_clean_labels_hull_name,
                                 cv2.VideoWriter_fourcc(*'mp4v'), 7.0,
                                 (width, height))

    font = cv2.FONT_HERSHEY_SIMPLEX

    hull_list = []
    hull_list_rotated = []
    l_eye_list, r_eye_list, mean_eye_list = [], [], []

    while (cap.isOpened()):
        ret, frame = cap.read()
        ret_dlc, frame_dlc = cap_dlc.read()
        frame_edit = frame

        xy_lst = numpy_arm_dlc_clean[fs].tolist()  #grab list of coordinates
        del xy_lst[2::3]  #reshape the list to remove p-values
        hull_points = []
        for i in range(0, len(xy_lst), 2):
            hull_points.append([
                xy_lst[i], xy_lst[i + 1]
            ])  #turn the list of body parts into a coordinate list

        #give the coordinate list to the convexHull function, saves a list of coordinates
        hull = convexHull(hull_points, len(hull_points))
        #make new pts that is in the format opencv fillPoly wants
        pts = np.array(hull, np.int32)
        pts = pts.reshape((-1, 1, 2))

        #create a copy of frame_edit to overlay on frame edit
        overlay = frame_edit.copy()
        alpha = 0.3  #transparency for the frame_edit overlay
        cv2.fillPoly(overlay, [pts],
                     (100, 255, 255))  #put a polygon on the overlay
        frame_edit = cv2.addWeighted(overlay, alpha, frame_edit, 1 - alpha,
                                     0)  #put the overlay on frame_edit

        #save the hull
        hull_list.append(hull)

        #convex hull octopus frame of view re-orientation

        #save the eye coordinates
        le_xy = [numpy_arm_dlc_clean[fs][0], numpy_arm_dlc_clean[fs][1]]
        l_eye_list.append(le_xy)
        re_xy = [numpy_arm_dlc_clean[fs][3], numpy_arm_dlc_clean[fs][4]]
        r_eye_list.append(re_xy)

        #get mean eye coordinates
        mean_e_xy = [
            st.mean([le_xy[0], re_xy[0]]),
            st.mean([le_xy[1], re_xy[1]])
        ]
        mean_eye_list.append(mean_e_xy)

        #move left eye and right eye to the origin as we can get angle to rotate the hull
        l_xy_o = [le_xy[0] - mean_e_xy[0], le_xy[1] - mean_e_xy[1]]
        r_xy_o = [re_xy[0] - mean_e_xy[0], re_xy[1] - mean_e_xy[1]]

        #atan2 gives negative and positive radian values relative to the positive x-axis
        theta = math.atan2(l_xy_o[1], l_xy_o[0])
        #theta_r = math.atan2(r_xy_o[1],r_xy_o[0])

        #we will rotate the right eye to the positive x-axis and left eye to the negative x-axis
        le_re_reorientated_list = []
        le_xy_r = rotate([0, 0], l_xy_o, (math.pi - theta))
        re_xy_r = rotate([0, 0], r_xy_o, (math.pi - theta))
        le_re_reorientated_list.append([le_xy_r, re_xy_r])

        #since we are dealing with big floats for my sake I set the eyes to zero
        #the eyes so far are not helpful
        if round(le_xy_r[1], 5) > 0 or round(re_xy_r[1], 5) > 0:
            print(
                str(fs, le_xy_r[1], re_xy_r[1]) +
                ' eye rotate error theta_l < 0 and l_xy_o[0] < r_xy_o[0]')
        else:
            le_xy_r[1] = 0.0
            re_xy_r[1] = 0.0

        #here we rotate the hull
        rotated_hull = []
        for hull_cord in hull:
            ho_xy = [hull_cord[0] - mean_e_xy[0], hull_cord[1] - mean_e_xy[1]]
            ho_xy_r = rotate([0, 0], ho_xy, (math.pi - theta))
            ho_xy_r = [ho_xy_r[0] + width / 2, ho_xy_r[1] + height / 2]
            rotated_hull.append(ho_xy_r)

        hull_list_rotated.append(rotated_hull)
        '''
        frame_hull_unrotated = np.zeros((width,height,3),np.uint8)
        pts = np.array(hull, np.int32) 
        pts = pts.reshape((-1,1,2)) 
        cv2.fillPoly(frame_hull_unrotated,[pts],(255,255,255))'''

        frame_hull_rotated = np.zeros((width, height, 3), np.uint8)
        pts = np.array(rotated_hull, np.int32)
        pts = pts.reshape((-1, 1, 2))
        cv2.fillPoly(frame_hull_rotated, [pts], (255, 255, 255))

        cv2.circle(frame_hull_rotated, (int(width / 2), int(height / 2)), 1,
                   (0, 0, 0))

        ##get the real eye coordinates so we can point at the food
        #save the eye coordinates
        le_xy_real = [
            numpy_arm_dlc_clean_real[fs][0], numpy_arm_dlc_clean_real[fs][1]
        ]
        re_xy_real = [
            numpy_arm_dlc_clean_real[fs][3], numpy_arm_dlc_clean_real[fs][4]
        ]
        #get real mean eye coordinates
        mean_e_xy_r = [
            st.mean([le_xy_real[0], re_xy_real[0]]),
            st.mean([le_xy_real[1], re_xy_real[1]])
        ]

        food_location_xy_r = [
            food_location_xy[0] - mean_e_xy_r[0],
            food_location_xy[1] - mean_e_xy_r[1]
        ]

        food_location_xy_r_r = rotate([0, 0], food_location_xy_r,
                                      (math.pi - theta))
        food_location_xy_r_r_o = [
            food_location_xy_r_r[0] + width / 2,
            food_location_xy_r_r[1] + height / 2
        ]

        #get the mean speed of the eyes
        if fs > 0 and fs < frame_count - 2:
            speed_le_xy = [
                numpy_arm_speed_real[fs][0], numpy_arm_speed_real[fs][1]
            ]
            speed_re_xy = [
                numpy_arm_speed_real[fs][2], numpy_arm_speed_real[fs][3]
            ]

            speed_em_xy = [
                st.mean([speed_le_xy[0], speed_re_xy[0]]),
                st.mean([speed_le_xy[1], speed_re_xy[1]])
            ]
            speed_em_xy_rotated = rotate([0, 0], speed_em_xy,
                                         (math.pi - theta))

            sxy = [
                int(10 * speed_em_xy_rotated[0] + width / 2),
                int(10 * speed_em_xy_rotated[1] + height / 2)
            ]

            print(food_location_xy_r_r)
            cv2.arrowedLine(frame_hull_rotated,
                            (int(width / 2), int(height / 2)),
                            (int(food_location_xy_r_r_o[0]),
                             int(food_location_xy_r_r_o[1])), (200, 100, 0),
                            thickness=2)

            cv2.arrowedLine(frame_hull_rotated,
                            (int(width / 2), int(height / 2)),
                            (sxy[0], sxy[1]), (100, 200, 0),
                            thickness=3)

            speed_text = '(' + str(round(
                speed_em_xy_rotated[0], 2)) + ',' + str(
                    round(speed_em_xy_rotated[1], 2)) + ')'
            cv2.putText(frame_hull_rotated, speed_text, (sxy[0], sxy[1]), font,
                        0.25, (100, 200, 0))

        #draw whitish grey lines using body_part_line_list
        for part in body_part_line_list:
            xy_l0 = int(numpy_arm_dlc_clean[fs][part[0] * 3]), int(
                numpy_arm_dlc_clean[fs][part[0] * 3 + 1])
            xy_l1 = int(numpy_arm_dlc_clean[fs][part[1] * 3]), int(
                numpy_arm_dlc_clean[fs][part[1] * 3 + 1])
            cv2.line(frame_edit, xy_l0, xy_l1, (255 / 2, 255 / 2, 255 / 2), 1)

        #draw color coded body parts
        #goes left to right through the numpy array by steps of 3
        ss = 0
        for i in range(0, len(numpy_arm_dlc_clean[fs]), 3):
            xy_c = int(numpy_arm_dlc_clean[fs][i]), int(
                numpy_arm_dlc_clean[fs][i + 1])

            col1 = int(255 - 255 / len(numpy_arm_dlc_clean[fs]) * i)
            col2 = int(255 / len(numpy_arm_dlc_clean[fs]) * i)

            cv2.circle(frame_edit, xy_c, 3, (col1, 0, col2), -1)
            if fs > 0 and fs < frame_count - 2:
                speed_xt = numpy_arm_speed[fs][i - ss]
                speed_yt = numpy_arm_speed[fs][i - ss + 1]

                speed_xy = str(round(math.sqrt(speed_xt**2 + speed_yt**2), 1))

                cv2.putText(frame_edit, speed_xy, xy_c, font, 0.25,
                            (255 / 2, 255 / 2, 255 / 2))

                ss = ss + 1
        '''cv2.putText(frame_edit,
                    'speed ')'''

        #cv2.imshow('FrameClear',frame)
        cv2.imshow('FrameEdit', frame_edit)
        cv2.imshow('FrameDLC', frame_dlc)
        #cv2.imshow('FrameUnrotatedHull',frame_hull_unrotated)
        cv2.imshow('FrameRotatedHull', frame_hull_rotated)

        video.write(frame_edit)
        video_hull.write(frame_hull_rotated)

        cv2.waitKey(25)
        fs = fs + 1
        if fs == frame_count:
            break

    video_hull.release()
    video.release()
    cap_dlc.release()
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 20
0
def create_pretrained_human_project(project,
                                    experimenter,
                                    videos,
                                    working_directory=None,
                                    copy_videos=False,
                                    videotype='.avi',
                                    createlabeledvideo=True,
                                    analyzevideo=True):
    """
    Creates a demo human project and analyzes a video with ResNet 101 weights pretrained on MPII Human Pose. This is from the DeeperCut paper by Insafutdinov et al. https://arxiv.org/abs/1605.03170 Please make sure to cite it too if you use this code!

    Parameters
    ----------
    project : string
        String containing the name of the project.

    experimenter : string
        String containing the name of the experimenter.

    videos : list
        A list of string containing the full paths of the videos to include in the project.

    working_directory : string, optional
        The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.

    copy_videos : bool, optional
        If this is set to True, the videos are copied to the ``videos`` directory. If it is False,symlink of the videos are copied to the project/videos directory. The default is ``False``; if provided it must be either
        ``True`` or ``False``.
    analyzevideo " bool, optional
        If true, then the video is analzyed and a labeled video is created. If false, then only the project will be created and the weights downloaded. You can then access them

    Example
    --------
    Linux/MacOs
    >>> deeplabcut.create_pretrained_human_project('human','Linus',['/data/videos/mouse1.avi'],'/analysis/project/',copy_videos=False)

    Windows:
    >>> deeplabcut.create_pretrained_human_project('human','Bill',[r'C:\yourusername\rig-95\Videos\reachingvideo1.avi'],r'C:\yourusername\analysis\project' copy_videos=False)
    Users must format paths with either:  r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )
    --------
    """

    cfg = deeplabcut.create_new_project(project, experimenter, videos,
                                        working_directory, copy_videos,
                                        videotype)

    config = auxiliaryfunctions.read_config(cfg)
    config['bodyparts'] = [
        'ankle1', 'knee1', 'hip1', 'hip2', 'knee2', 'ankle2', 'wrist1',
        'elbow1', 'shoulder1', 'shoulder2', 'elbow2', 'wrist2', 'chin',
        'forehead'
    ]
    config['skeleton'] = [['ankle1', 'knee1'], ['ankle2', 'knee2'],
                          ['knee1', 'hip1'], ['knee2', 'hip2'],
                          ['hip1', 'hip2'], ['shoulder1', 'shoulder2'],
                          ['shoulder1', 'hip1'], ['shoulder2', 'hip2'],
                          ['shoulder1', 'elbow1'], ['shoulder2', 'elbow2'],
                          ['chin', 'forehead'], ['elbow1', 'wrist1'],
                          ['elbow2', 'wrist2']]
    config['default_net_type'] = 'resnet_101'
    auxiliaryfunctions.write_config(cfg, config)
    config = auxiliaryfunctions.read_config(cfg)

    train_dir = Path(
        os.path.join(
            config['project_path'],
            str(
                auxiliaryfunctions.GetModelFolder(
                    trainFraction=config['TrainingFraction'][0],
                    shuffle=1,
                    cfg=config)), 'train'))
    test_dir = Path(
        os.path.join(
            config['project_path'],
            str(
                auxiliaryfunctions.GetModelFolder(
                    trainFraction=config['TrainingFraction'][0],
                    shuffle=1,
                    cfg=config)), 'test'))

    # Create the model directory
    train_dir.mkdir(parents=True, exist_ok=True)
    test_dir.mkdir(parents=True, exist_ok=True)

    modelfoldername = auxiliaryfunctions.GetModelFolder(
        trainFraction=config['TrainingFraction'][0], shuffle=1, cfg=config)

    path_train_config = str(
        os.path.join(config['project_path'], Path(modelfoldername), 'train',
                     'pose_cfg.yaml'))
    path_test_config = str(
        os.path.join(config['project_path'], Path(modelfoldername), 'test',
                     'pose_cfg.yaml'))

    # Download the weights and put then in appropriate directory
    cwd = os.getcwd()
    os.chdir(train_dir)
    print(
        "Checking if the weights are already available, otherwise I will download them!"
    )
    weightfilename = auxfun_models.download_mpii_weigths(train_dir)
    os.chdir(cwd)

    # Create the pose_config.yaml files
    parent_path = Path(os.path.dirname(deeplabcut.__file__))
    defaultconfigfile = str(parent_path / 'pose_cfg.yaml')
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(config)
    datafilename, metadatafilename = auxiliaryfunctions.GetDataandMetaDataFilenames(
        trainingsetfolder,
        trainFraction=config['TrainingFraction'][0],
        shuffle=1,
        cfg=config)
    bodyparts = config['bodyparts']
    net_type = 'resnet_101'
    num_shuffles = 1
    model_path, num_shuffles = auxfun_models.Check4weights(
        net_type, parent_path, num_shuffles)
    items2change = {
        "dataset": 'dataset-test.mat',  #datafilename,
        "metadataset": metadatafilename,
        "num_joints": len(bodyparts),
        "all_joints": [[i] for i in range(len(bodyparts))],
        "all_joints_names": [str(bpt) for bpt in bodyparts],
        "init_weights":
        weightfilename.split('.index')[0],  #'models/mpii/snapshot-1030000',
        "project_path": str(config['project_path']),
        "net_type": net_type,
        "dataset_type": "default"
    }
    trainingdata = MakeTrain_pose_yaml(items2change, path_train_config,
                                       defaultconfigfile)

    keys2save = [
        "dataset", "dataset_type", "num_joints", "all_joints",
        "all_joints_names", "net_type", 'init_weights', 'global_scale',
        'location_refinement', 'locref_stdev'
    ]
    MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)

    video_dir = os.path.join(config['project_path'], 'videos')

    if analyzevideo == True:
        # Analyze the videos
        deeplabcut.analyze_videos(cfg, [video_dir],
                                  videotype,
                                  save_as_csv=True)
    if createlabeledvideo == True:
        deeplabcut.create_labeled_video(cfg, [video_dir],
                                        videotype,
                                        draw_skeleton=True)
        deeplabcut.plot_trajectories(cfg, [video_dir], videotype)
    return cfg, path_train_config
    We also provide a function to plot the data overtime and pixels in frames. The provided plotting 
    function in this toolbox utilizes matplotlib 44 ; therefore, these plots can easily be customized. To call
    this function, type the following:

    The ouput files can also be easily imported into many programs for further behavioral analysis
    (see Stage XI and ‘Anticipated results’).
    """

    if "filterpredictions" in stages_to_run:
        deeplabcut.filterpredictions(config_path, videos_to_filter_predictions, videotype='avi', shuffle=1, trainingsetindex=0,
                                     filtertype='median', windowlength=5,
                                     p_bound=.001, ARdegree=3, MAdegree=1, alpha=.01,
                                     save_as_csv=True, destfolder=None)

    if "step_12" in stages_to_run or "plot_trajectories" in stages_to_run:
        deeplabcut.plot_trajectories(config_path, videos_to_plot_trajectories, videotype='.avi', shuffle=1,
                                     trainingsetindex=0, filtered=False, showfigures=False, destfolder=None)

    if "step_12_bis" in stages_to_run or "analyzeskeleton" in stages_to_run:
        deeplabcut.analyzeskeleton(config_path, videos_to_plot_trajectories, videotype='avi', shuffle=1,
                                   trainingsetindex=0,
                                   save_as_csv=True,
                                   destfolder=None)

    """
    In addition, the toolbox provides a function to create labeled videos based on the extracted poses by
    plotting the labels on top of the frame and creating a video. To use it to create multiple labeled
    videos (provided either as each video path or as a folder path), type the following:
    >> deeplabcut.create_labeled_video(config_path,[‘Full path of video 1’, ‘Full path of video 2’])
    This function has various parameters; in particular, the user can set the colormap, the
    dotsize, and the alphavalue of the labels in the config.yaml file, and can pass a variable called
    displayedbodyparts to select only a subset of parts to be plotted. The user can also save
        track_method=TESTTRACKER
    )
    print("Tracklets created...")

    deeplabcut.stitch_tracklets(
        config_path,
        [new_video_path],
        "mp4",
        output_name=os.path.splitext(new_video_path)[0] + scorer + "_el.h5",
        track_method=TESTTRACKER
    )

    print("Plotting trajectories...")
    deeplabcut.plot_trajectories(
        config_path,
        [new_video_path],
        "mp4",
        track_method=TESTTRACKER
    )
    print("Trajectory plotted.")

    print("Creating labeled video...")
    deeplabcut.create_labeled_video(
        config_path,
        [new_video_path],
        "mp4",
        save_frames=False,
        color_by="individual",
        track_method=TESTTRACKER
    )
    print("Labeled video created.")
    )
    deeplabcut.convert_detections2tracklets(
        config_path, [new_video_path], "mp4", track_method="ellipse"
    )
    print("Tracklets created...")

    deeplabcut.stitch_tracklets(
        config_path,
        [new_video_path],
        "mp4",
        output_name=os.path.splitext(new_video_path)[0] + scorer + "_el.h5",
    )

    print("Plotting trajectories...")
    deeplabcut.plot_trajectories(
        config_path, [new_video_path], "mp4", track_method="ellipse"
    )
    print("Trajectory plotted.")

    print("Creating labeled video...")
    deeplabcut.create_labeled_video(
        config_path,
        [new_video_path],
        "mp4",
        save_frames=False,
        color_by="individual",
        track_method="ellipse",
    )
    print("Labeled video created.")

    print("Filtering predictions...")
Esempio n. 24
0
    from moviepy.editor import VideoFileClip,VideoClip
    clip = VideoFileClip(video[0])
    clip.reader.initialize()
    def make_frame(t):
        return clip.get_frame(1)

    newclip = VideoClip(make_frame, duration=1)
    newclip.write_videofile(newvideo,fps=30)

deeplabcut.analyze_videos(path_config_file,[newvideo],save_as_csv=True, destfolder=dfolder)

print("CREATE VIDEO")
deeplabcut.create_labeled_video(path_config_file,[newvideo], destfolder=dfolder)

print("Making plots")
deeplabcut.plot_trajectories(path_config_file,[newvideo], destfolder=dfolder)


print("EXTRACT OUTLIERS")
deeplabcut.extract_outlier_frames(path_config_file,[newvideo],outlieralgorithm='jump',epsilon=0,automatic=True, destfolder=dfolder)


file=os.path.join(cfg['project_path'],'labeled-data',vname,"machinelabels-iter"+ str(cfg['iteration']) + '.h5')

print("RELABELING")
DF=pd.read_hdf(file,'df_with_missing')
DLCscorer=np.unique(DF.columns.get_level_values(0))[0]
DF.columns.set_levels([scorer.replace(DLCscorer,scorer)],level=0,inplace=True)
DF =DF.drop('likelihood',axis=1,level=2)
DF.to_csv(os.path.join(cfg['project_path'],'labeled-data',vname,"CollectedData_" + scorer + ".csv"))
DF.to_hdf(os.path.join(cfg['project_path'],'labeled-data',vname,"CollectedData_" + scorer + '.h5'),'df_with_missing',format='table', mode='w')
Esempio n. 25
0
    def analyze_videos(self, event):

        shuffle = self.shuffle.GetValue()
        videotype = self.videotype.GetValue(),

        if self.cfg.get("multianimalproject", False):
            print("DLC network loading and video analysis starting ... ")
            auto_track = True
        else:
            if self.csv.GetStringSelection() == "Yes":
                save_as_csv = True
            else:
                save_as_csv = False
            if self.dynamic.GetStringSelection() == "No":
                dynamic = (False, 0.5, 10)
            else:
                dynamic = (True, 0.5, 10)
            if self.filter.GetStringSelection() == "No":
                _filter = False
            else:
                _filter = True

        if self.cfg["cropping"] == "True":
            crop = self.cfg["x1"], self.cfg["x2"], self.cfg["y1"], self.cfg[
                "y2"]
        else:
            crop = None

        if self.cfg.get("multianimalproject", False):
            if self.robust.GetStringSelection() == "No":
                robust = False
            else:
                robust = True
            scorername = deeplabcut.analyze_videos(
                self.config,
                self.filelist,
                self.videotype.GetValue(),
                shuffle=shuffle,
                gputouse=None,
                cropping=crop,
                robust_nframes=robust,
                auto_track=True,
                n_tracks=self.ntracks.GetValue(),
                calibrate=self.calibrate.GetStringSelection() == "Yes",
                identity_only=self.identity_toggle.GetStringSelection() ==
                "Yes",
            )

            if self.create_video_with_all_detections.GetStringSelection(
            ) == "Yes":
                deeplabcut.create_video_with_all_detections(
                    self.config,
                    self.filelist,
                    self.videotype.GetValue(),
                    shuffle=shuffle,
                )
            if self.filter.GetStringSelection() == "Yes":
                deeplabcut.filterpredictions(
                    self.config,
                    self.filelist,
                    self.videotype.GetValue(),
                )

            if self.csv.GetStringSelection() == "Yes":
                deeplabcut.analyze_videos_converth5_to_csv(self.filelist,
                                                           listofvideos=True)

            if self.nwb.GetStringSelection() == "Yes":
                deeplabcut.analyze_videos_converth5_to_nwb(
                    self.config,
                    self.filelist,
                    listofvideos=True,
                )

            if self.trajectory.GetStringSelection() == "Yes":
                if self.showfigs.GetStringSelection() == "No":
                    showfig = False
                else:
                    showfig = True
                deeplabcut.plot_trajectories(self.config,
                                             self.filelist,
                                             self.videotype.GetValue(),
                                             showfigures=showfig)
        else:
            scorername = deeplabcut.analyze_videos(
                self.config,
                self.filelist,
                self.videotype.GetValue(),
                shuffle=shuffle,
                gputouse=None,
                save_as_csv=save_as_csv,
                cropping=crop,
                dynamic=dynamic,
            )
            if _filter:
                deeplabcut.filterpredictions(
                    self.config,
                    self.filelist,
                    self.videotype.GetValue(),
                    shuffle=shuffle,
                    filtertype="median",
                    windowlength=5,
                    save_as_csv=save_as_csv,
                )

            if self.trajectory.GetStringSelection() == "Yes":
                if self.showfigs.GetStringSelection() == "No":
                    showfig = False
                else:
                    showfig = True
                deeplabcut.plot_trajectories(
                    self.config,
                    self.filelist,
                    self.videotype.GetValue(),
                    displayedbodyparts=self.bodyparts,
                    shuffle=shuffle,
                    filtered=_filter,
                    showfigures=showfig,
                )
Esempio n. 26
0
deeplabcut.check_labels(path_config_file) #this creates a subdirectory with the frames + your labels
# Reviewed the labels, the seem to be ok

# Downloading the ResNets dataset:
deeplabcut.create_training_dataset(path_config_file)

# Training the dataset
deeplabcut.train_network(path_config_file)

# Evaluating the results
deeplabcut.evaluate_network(path_config_file)

# Analyzing video
videofile_path = ['dlc-blinking/an3_vid2_full/eyes_only-Guy-2019-01-25/videos/animal_3_video_2_150fps_correct.mp4',
                  'dlc-blinking/an3_vid2_full/eyes_only-Guy-2019-01-25/videos/march_8_animal_1_video_150fps_correct.mp4'] #Enter the list of videos to analyze.
videofile_path = ['dlc-blinking/whisk/whisk_only-Guy-2019-02-01/videos/crush_19_01_07_animal_3.mp4',
                  'dlc-blinking/whisk/whisk_only-Guy-2019-02-01/videos/animal_3_video_2_150fps_correct.mp4']
deeplabcut.analyze_videos(path_config_file,videofile_path,save_as_csv=True)


deeplabcut.create_labeled_video(path_config_file, ['D:\\dlc-blinking\\an3_vid2_full\\whisk_only-Guy-2019-02-01\\videos\\crush_19_01_07_animal_3.mp4'], save_frames=True)
deeplabcut.create_labeled_video(path_config_file, ['D:\\dlc-blinking\\whisk\\whisk_only-Guy-2019-02-01\\videos\\animal_3_video_2_150fps_correct.mp4'], save_frames=True)

%matplotlib notebook #for making interactive plots.
deeplabcut.plot_trajectories(path_config_file,videofile_path)


# TICTOCS:
# training - up to 72/96 hours
# analyzing - 45 minutes and 1.5 hours
# labeling - 25 minutes and 50 minutes
Esempio n. 27
0
    for video in os.listdir(dir):
        subprocess.call([
            'ffmpeg', '-i', filepath1 + '/' + video, '-vf',
            "lenscorrection=cx=0.5:cy=0.5:k1=-.115:k2=-0.022",
            filepath2 + '/' + video
        ])

#Use deeplabcut to analyze videos and save the results to the folder for processed videos

for dir in datadir3:
    for video in os.listdir(dir):
        print(video)
        deeplabcut.analyze_videos(configPath, [filepath2 + '/' + video],
                                  destfolder=filepath3,
                                  save_as_csv=True)
        deeplabcut.plot_trajectories(configPath, [filepath2 + '/' + video],
                                     destfolder=filepath3)
        #deeplabcut.create_labeled_video(configPath,[filepath2 +'/'+ video],videotype = 'mp4', destfolder = filepath3)

#Change directory to openpose and run openpose on the videos then save the results to the processed video folder

os.chdir("/Users/MatthisLab/openpose")
for dir in datadir3:
    for video in os.listdir(dir):
        videoName = video[:-4]
        subprocess.call([
            'bin/OpenPoseDemo.exe', '--video', filepath2 + '/' + video,
            '--hand', '--face', '--write_video', filepath4 + '/' + videoName,
            '--write_json', filepath4 + '/' + videoName
        ])