Exemple #1
0
def extract(path_config_file, mode='automatic', algo="kmeans"):
    # Set userfeedback to True if hoping to decide whether to extract frames on a video by video basis
    dlc.extract_frames(path_config_file,
                       mode=mode,
                       algo=algo,
                       userfeedback=False,
                       crop=False)
    def extract_frames(self, event):
        mode = self.method
        algo = self.algo_choice.GetValue()
        if self.crop_choice.GetStringSelection() == 'True':
            crop = True
        else:
            crop = False
        if self.crop_choice.GetStringSelection() == 'GUI':
            crop = 'GUI'
        else:
            crop = False

        if self.feedback_choice.GetStringSelection() == 'Yes':
            userfeedback = True
        else:
            userfeedback = False

        if self.opencv_choice.GetStringSelection() == 'Yes':
            opencv = True
        else:
            opencv = False

        slider_width = self.slider_width.GetValue()

        deeplabcut.extract_frames(self.config,
                                  mode,
                                  algo,
                                  crop=crop,
                                  userfeedback=userfeedback,
                                  cluster_step=self.cluster_step.GetValue(),
                                  cluster_resizewidth=30,
                                  cluster_color=False,
                                  opencv=opencv,
                                  slider_width=slider_width)
Exemple #3
0
def DLCLabel(config_path):
    """
    Utility to facilitate labelling DeepLabCut frames from the MesoNet GUI.

    :param config_path: (required) The path to a DeepLabCut configuration file (.yaml).
    """
    deeplabcut.extract_frames(config_path, crop=False)
    deeplabcut.label_frames(config_path)
    deeplabcut.check_labels(config_path)
Exemple #4
0
def create_dlc_project(name, author, files, props):
    config_path = deeplabcut.create_new_project(name, author, files)
    update_dlc_config(config_path, props)
    deeplabcut.extract_frames(config_path,
                              'automatic',
                              'kmeans',
                              userfeedback=False)
    deeplabcut.label_frames(config_path)
    deeplabcut.check_labels(config_path)

    return config_path.replace('/config.yaml', '')
Exemple #5
0
def extract_frames(projectId):
    config_path = projectRepository.find_one({'_id': ObjectId(projectId)
                                              })['config_path']
    if (request.is_json):
        deeplabcut.extract_frames(
            config_path, 'automatic', request.json['algo'],
            request.json['crop'], False, request.json['cluster_step'],
            request.json['cluster_resizewidth'], request.json['cluster_color'],
            request.json['opencv'], request.json['slider_width'], True)
    else:
        deeplabcut.extract_frames(config_path, "automatic", "kmeans", False,
                                  False, 1, 30, False, True, 25, True)
    return "Done"
        def clicked_add():

            self.load_an_exsiting_project(window)
            window.destroy()
            self.select_body_parts()
            self.add_new_videos()
            # for item in self.video_list:
            #     if len(item) < 2:
            #         self.video_list.remove(item)
            print(self.video_list)
            if len(self.video_list) > 0:
                deeplabcut.add_new_videos(self.path_config_file, self.video_list, copy_videos=True)
                deeplabcut.extract_frames(self.path_config_file,'automatic', 'kmeans', checkcropping=False, crop=True)

            self.working_window_for_deeplabcut()
        def clicked_create():
            self.select_working_directory(window)
            window.destroy()
            self.get_window_for_project_name()
            self.path_config_file = deeplabcut.create_new_project(self.name_of_project, self.experimenter, self.video_list,
                                                             working_directory=self.working_directory,
                                                             copy_videos=True)
            self.select_body_parts()
            self.add_new_videos()
            print(self.video_list)
            if len(self.video_list) > 0:
                deeplabcut.add_new_videos(self.path_config_file, self.video_list, copy_videos=True)
                deeplabcut.extract_frames(self.path_config_file, 'automatic', 'kmeans', checkcropping=False, crop=True)

            self.working_window_for_deeplabcut()
Exemple #8
0
print("Imported DLC!")
basepath=os.path.dirname(os.path.abspath('testscript.py'))
videoname='reachingvideo1'
video=[os.path.join(basepath,'Reaching-Mackenzie-2018-08-30/videos/'+videoname+'.avi')]

print("CREATING PROJECT")
path_config_file=deeplabcut.create_new_project(task,scorer,video,copy_videos=True)
cfg=read_config(path_config_file)
cfg['numframes2pick']=5
cfg['pcutoff']=0.01
cfg['TrainingFraction']=[.8]

write_config(path_config_file,cfg)

print("EXTRACTING FRAMES")
deeplabcut.extract_frames(path_config_file,mode='automatic',algo='kmeans')

print("CREATING-SOME LABELS FOR THE FRAMES")
frames=os.listdir(os.path.join(cfg['project_path'],'labeled-data',videoname))
#As this next step is manual, we update the labels by putting them on the diagonal (fixed for all frames)
for index,bodypart in enumerate(cfg['bodyparts']): 
        columnindex = pd.MultiIndex.from_product([[scorer], [bodypart], ['x', 'y']],names=['scorer', 'bodyparts', 'coords'])
        frame = pd.DataFrame(np.ones((len(frames),2))*50*index, columns = columnindex, index = [os.path.join('labeled-data',videoname,fn) for fn in frames])
        if index==0:
            dataFrame=frame
        else:
            dataFrame = pd.concat([dataFrame, frame],axis=1)

dataFrame.to_csv(os.path.join(cfg['project_path'],'labeled-data',videoname,"CollectedData_" + scorer + ".csv"))
dataFrame.to_hdf(os.path.join(cfg['project_path'],'labeled-data',videoname,"CollectedData_" + scorer + '.h5'),'df_with_missing',format='table', mode='w')
Exemple #9
0
]

print("CREATING PROJECT")
path_config_file = deeplabcut.create_new_project(task,
                                                 scorer,
                                                 video,
                                                 copy_videos=True)
cfg = read_config(path_config_file)
cfg['numframes2pick'] = 5
cfg['pcutoff'] = 0.01
cfg['TrainingFraction'] = [.8]

write_config(path_config_file, cfg)

print("EXTRACTING FRAMES")
deeplabcut.extract_frames(path_config_file, mode='automatic')

print("CREATING-SOME LABELS FOR THE FRAMES")
frames = os.listdir(
    os.path.join(cfg['project_path'], 'labeled-data', videoname))
#As this next step is manual, we update the labels by putting them on the diagonal (fixed for all frames)
for index, bodypart in enumerate(cfg['bodyparts']):
    columnindex = pd.MultiIndex.from_product(
        [[scorer], [bodypart], ['x', 'y']],
        names=['scorer', 'bodyparts', 'coords'])
    frame = pd.DataFrame(
        100 + np.ones((len(frames), 2)) * 50 * index,
        columns=columnindex,
        index=[os.path.join('labeled-data', videoname, fn) for fn in frames])
    if index == 0:
        dataFrame = frame
Exemple #10
0
    def __importFromYAMLFile(self):

        config_path = self._file.value

        if config_path == "":
            QMessageBox.warning(self, "Import error",
                                "No YAML file was selected. Aborted import!")
            return

        with open(config_path, 'r') as f:
            try:
                dict_yaml = yaml.load(f, Loader=yaml.FullLoader)
            except yaml.YAMLError as exc:
                QMessageBox.warning(
                    self, "Import error",
                    "Could not import the chosen YAML file. Aborted import!")
                return

        try:
            self.scorer = dict_yaml.get("scorer")
            self.videos = dict_yaml.get("video_sets")
            self.bodyparts = dict_yaml.get("bodyparts")

        except AttributeError as err:
            QMessageBox.warning(
                self, "Import error",
                "The YAML is not configured correctly. It needs to have a scorer, a videos and a bodyparts attribute. Each video also needs to have a crop attribute."
            )
            return

        if len(self.videos) != len(set(self.videos)):
            QMessageBox.warning(
                self, "Import error",
                "Two videos can't have the same name! Aborted import!")
            return

        if len(self.bodyparts) != len(set(self.bodyparts)):
            QMessageBox.warning(
                self, "Import error",
                "Two bodyparts can't have the same name! Aborted import!")
            return

        for video in self.videos.keys():

            v = self.mainwindow.project.create_video()
            v.filepath = abspath(video)

            for part in self.bodyparts:
                obj = v.create_object()
                obj.name = part
                obj.create_path()

            #adds the pair video and track to the videos dictionary
            track = self.mainwindow.timeline.add_track(title=v.name)

            deeplabcut.extract_frames(config_path, userfeedback=False)

            frames_directory = join(abspath(dirname(config_path)),
                                    "labeled-data", v.name)

            frames = self.get_frames_from_directory_with_images(
                frames_directory)
            for frame in frames:
                self.mainwindow.timeline.add_event(begin=frame,
                                                   end=frame + 1,
                                                   track=track)

        QMessageBox.information(self, "Import finished",
                                "Completed import from YAML file")
configfile, path_train_config = deeplabcut.create_pretrained_human_project(
    Task,
    YourName,
    video,
    videotype='avi',
    analyzevideo=False,
    createlabeledvideo=False,
    copy_videos=False)  #must leave copy_videos=True

lastvalue = 5
DLC_config = deeplabcut.auxiliaryfunctions.read_plainconfig(path_train_config)
pretrainedDeeperCutweights = DLC_config['init_weights']

print("EXTRACTING FRAMES")
deeplabcut.extract_frames(configfile, mode='automatic', userfeedback=False)

print("CREATING-SOME LABELS FOR THE FRAMES")
cfg = deeplabcut.auxiliaryfunctions.read_config(configfile)
frames = os.listdir(
    os.path.join(cfg['project_path'], 'labeled-data', videoname))
#As this next step is manual, we update the labels by putting them on the diagonal (fixed for all frames)
for index, bodypart in enumerate(cfg['bodyparts']):
    columnindex = pd.MultiIndex.from_product(
        [[cfg['scorer']], [bodypart], ['x', 'y']],
        names=['scorer', 'bodyparts', 'coords'])
    frame = pd.DataFrame(
        100 + np.ones((len(frames), 2)) * 50 * index,
        columns=columnindex,
        index=[os.path.join('labeled-data', videoname, fn) for fn in frames])
    if index == 0:
    print("Project created.")

    print("Editing config...")
    cfg = auxiliaryfunctions.edit_config(
        config_path,
        {
            "numframes2pick": NUM_FRAMES,
            "TrainingFraction": [TRAIN_SIZE],
            "identity": True,
            "uniquebodyparts": ["corner1", "corner2"],
        },
    )
    print("Config edited.")

    print("Extracting frames...")
    deeplabcut.extract_frames(config_path, mode="automatic", userfeedback=False)
    print("Frames extracted.")

    print("Creating artificial data...")
    rel_folder = os.path.join("labeled-data", os.path.splitext(video)[0])
    image_folder = os.path.join(cfg["project_path"], rel_folder)
    n_animals = len(cfg["individuals"])
    (
        animals,
        bodyparts_single,
        bodyparts_multi,
    ) = auxfun_multianimal.extractindividualsandbodyparts(cfg)
    animals_id = [i for i in range(n_animals) for _ in bodyparts_multi] + [
        n_animals
    ] * len(bodyparts_single)
    map_ = dict(zip(range(len(animals)), animals))
Exemple #13
0
import deeplabcut
import os, yaml
from pathlib import Path

##once#create the project, set working directory for videos, and find videos
deeplabcut.create_new_project('name_of_project','author', ['path_to_video.avi' ], working_directory='path_to_folder', copy_videos=False) 

#specify path to config.yaml
####change yaml for the project
config_path = 'copy the path to the created yaml file here.yaml'

##opt# add more videos
deeplabcut.add_new_videos(config_path, [video_directory], copy_videos=False)

#data selection (auto)
deeplabcut.extract_frames(config_path,'automatic','uniform', crop=False, checkcropping=False)

##opt#extract data frames by hand
deeplabcut.extract_frames(config_path,'manual')

#label frames
deeplabcut.label_frames(config_path)

##opt#check annotated frames
deeplabcut.check_labels(config_path)

#create training dataset
deeplabcut.create_training_dataset(config_path,num_shuffles=1)

#train the network --> additional parameters
deeplabcut.train_network(config_path, shuffle=1, trainingsetindex=0, gputouse=390.87, max_snapshots_to_keep=5, autotune=False, displayiters=None, saveiters=None)
Exemple #14
0
def extract_frames(path_config, mode):
    deeplabcut.extract_frames(path_config, mode)
Exemple #15
0
date = 8
base_dlc_folder = join(const.base_save_folder, const.rat_folder, const.date_folders[date],
                       'Analysis', 'Deeplabcut')
base_projects_folder = join(base_dlc_folder, 'projects')

cropped_video_filename = join(base_dlc_folder, 'BonsaiCroping', 'Croped_video.avi')

# ----------------------------------------------------------------------------------------------------------------------
config_path = deeplabcut.create_new_project(project='V1', experimenter='', videos=[cropped_video_filename],
                                            working_directory=base_projects_folder, copy_videos=True)

# Use the line below to 'reload the existing project
config_path = join(base_projects_folder, 'V1--2019-06-30', 'config.yaml')

# Edit the config.yaml file
deeplabcut.extract_frames(config_path, 'manual')

deeplabcut.label_frames(config_path)

deeplabcut.check_labels(config_path)

deeplabcut.create_training_dataset(config_path)

deeplabcut.train_network(config_path, gputouse=1)

deeplabcut.evaluate_network(config_path, plotting=True)

deeplabcut.analyze_videos(config_path, [cropped_video_filename], gputouse=1,
                          shuffle=1, save_as_csv=False, videotype='avi')

deeplabcut.create_labeled_video(config_path, [cropped_video_filename])
def cli_create_project(project_path,
                       video_paths,
                       name='eye_video_demo',
                       experimenter='experimenter',
                       recursive=False,
                       format_='mp4',
                       exclude=[],
                       num_frames=20,
                       train=False,
                       analyze=False,
                       create_videos=False):
    """run all steps to create a DLC project"""

    if len(video_paths) == 0:
        # use provided example
        video_files = [op.join(op.split(op.realpath(__file__))[0], 'data', 'example_eye_camera_video.mp4')]
    else:
        # get video files
        video_files = []
        for vp in list(video_paths):
            video_files.extend(get_video_files(vp, recursive, exclude,
                                               file_format=format_.lower()))

    # list all video files (and convert to mp4 if required)
    for i, vf in enumerate(video_files):
        print("found video file: %s" % vf)

        if op.splitext(vf)[1] == '.h264':
            vide_files[i] = convert_h264_to_mp4(vf)

    # create a new project
    config_path = deeplabcut.create_new_project(name, experimenter, video_files,
                                                working_directory=project_path,
                                                copy_videos=False)

    config = deeplabcut.utils.read_config(config_path)
    config['bodyparts'] = ['pupil_center', 'nasal_corner', 'temporal_corner']
    config['numframes2pick'] = num_frames
    deeplabcut.utils.write_config(config_path, config)

    # extract and label frames
    deeplabcut.extract_frames(config_path,
                              mode='automatic',
                              algo='kmeans',
                              crop=True)
    deeplabcut.label_frames(config_path)
    deeplabcut.check_labels(config_path)

    # create training dataset
    deeplabcut.create_training_dataset(config_path)

    if train:
        # train and evaluate the network
        deeplabcut.train_network(config_path)
        deeplabcut.evaluate_network(config_path)

        if analyze:
            deeplabcut.analyze_videos(config_path, video_files)

            if create_videos:
                # create a video
                deeplabcut.create_labeled_video(config_path, video_files)
Exemple #17
0
import deeplabcut
import tensorflow as tf

'''deeplabcut.create_new_project('1_2_4_chamber','MC',['C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3477.2b\\05_08_2019\\BRAC34772b top_left 05_08_2019 12_40_54 1_trimmed.mp4'],
working_directory='C:\\Users\\analysis\\Desktop')'''

videopath =['C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3477.2b\\13_08_2019\\BRAC34772b top_right 13_08_2019 14_39_52 2_trimmed.mp4',
'C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3583.3e\\23_07_2019\\BRAC35833e 23_07_2019 13_35_06 4_trimmed.mp4',
'C:\\Users\\analysis\\Desktop\\Mingran\\analysed_trimmed_data\\BRAC3616.3c\\23_07_2019\\BRAC36163c 23_07_2019 12_41_11 3_trimmed.mp4']
config = 'C:\\Users\\analysis\\Desktop\\1_2_4_chamber-MC-2019-08-23\\config.yaml'


'''deeplabcut.add_new_videos(config,videopath)
deeplabcut.extract_frames(config,'automatic','kmeans')'''
deeplabcut.label_frames(config)

deeplabcut.check_labels(config)
deeplabcut.create_training_dataset(config)
deeplabcut.train_network(config)


deeplabcut.extract_outlier_frames(config,videopath)
deeplabcut.refine_labels(config)
deeplabcut.merge_datasets(config)
deeplabcut.create_training_dataset(config)
deeplabcut.train_network(config)
deeplabcut.create_new_project('Mouse', 'Amr', ['/home/amr/Trial_DeepLabCut/mouse.avi'])


# # <font color=red>Now, You open the config.yaml file to modify the body part you want to track and if you want, you can modify the number of frames to pick</font>

# In[3]:


path_config = '/home/amr/Trial_DeepLabCut/Mouse-Amr-2018-12-03/config.yaml'


# In[4]:


deeplabcut.extract_frames(path_config, 'automatic', 'kmeans')


# In[9]:


deeplabcut.label_frames(path_config, Screens=2)


# Now, a GUI will open for you to label the body part you want

# In[10]:


deeplabcut.check_labels(path_config)
Exemple #19
0
else:
    video = os.path.join(os.getcwd(), 'OurPrj',
                         task + '-' + experimenter + '-2019-04-16', 'videos',
                         'Az_Neural.avi')
    path_config_file = deeplabcut.create_new_project(
        task,
        experimenter,
        video,
        working_directory=working_dir,
        copy_videos=True)
    path_prj, config_file_name = os.path.split(path_config_file)

if not framesextracted:
    print("Extracting frame from videos")
    deeplabcut.extract_frames(path_config_file,
                              'automatic',
                              'uniform',
                              crop=False)
else:
    print("frames have been extracted already!!")

if not frameslabeled:
    print("Need labeled frames")
    deeplabcut.label_frames(path_config_file)  # label frames
else:
    print("Frames have been labeled already!!!!!")

if not traindatasetexist:  # create training dataset if not exist
    deeplabcut.check_labels(path_config_file)
    deeplabcut.create_training_dataset(path_config_file, num_shuffles=1)
else:
    print("Training dataset exists")
Exemple #20
0
def usingDeep(path_config, project):
    exit = False
    while exit == False:
        video_path = path_config.split("/")
        video_path = '/' + video_path[1] + '/' + video_path[
            2] + '/' + video_path[3] + '/' + video_path[4] + '/videos/'
        print_usage("project")
        action = input()
        while action not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]:
            try:
                action = int(action)
                if action not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]:
                    print("please type number between 0 and 12")
                    action = input()
            except ValueError:
                print("Please enter number")
                action = input()
            print("been here")
        if action == 0:
            return False
        elif action == 1:
            print("do you want to crop the video? yes/no")
            if input() == "yes" or "y":
                print(
                    "how many videos you you want to crop? (use number: 1,2,3 etc)"
                )
                crops = int(input())
                print("only crop all the video's and than exit")
                for loop in range(0, crops):
                    deeplabcut.extract_frames(path_config,
                                              'manual',
                                              'kmeans',
                                              crop=True)
            deeplabcut.extract_frames(path_config,
                                      'automatic',
                                      'kmeans',
                                      crop=True)
        elif action == 2:
            deeplabcut.label_frames(path_config)
        elif action == 3:
            deeplabcut.check_labels(path_config)
        elif action == 4:
            deeplabcut.create_training_dataset(path_config)
        elif action == 5:
            with open("training_network.py") as fp:
                lines = fp.readlines()
                lines[3] = lines[3].split("=")
                lines[3] = lines[3][0] + "= '" + path_config + "'\n"

            with open("training_network.py", "w") as fp:
                for line in lines:
                    fp.writelines(line)

            print("run: sbatch slurm.sh")
            return True
        elif action == 6:
            try:
                deeplabcut.evaluate_network(path_config,
                                            Shuffles=[1],
                                            trainingsetindex=0,
                                            plotting=None,
                                            show_errors=True,
                                            comparisonbodyparts='all',
                                            gputouse=None,
                                            rescale=False)
            except OSError as e:
                print("file does not exist")
        elif action == 7:
            print("\nType video name in project/videos you want to analyze")
            video_path = video_path + create_dict_list(
                path_config[:-11] + "videos/", 1)[0]
            with open("training_network.py") as fp:
                lines = fp.readlines()
                lines[3] = lines[3].split("=")
                lines[3] = lines[3][0] + "= '" + path_config + "'\n"
                lines[4] = lines[4].split("=")
                lines[4] = lines[4][0] + "= '" + video_path + "'\n"

            with open("training_network.py", "w") as fp:
                for line in lines:
                    fp.writelines(line)
            print(
                "run: sbatch slurm.sh after changing the command in training_network.py"
            )
            return True
        elif action == 8:
            print(
                "\nChoose the video in project/videos you want to plot trajectories from"
            )
            video_path = video_path + create_dict_list(
                path_config[:-11] + "videos/", 1)[0]
            print(video_path)
            deeplabcut.plot_trajectories(path_config, [video_path],
                                         filtered=True)
        elif action == 9:
            print(
                "\nChoose the video in project/videos you want to make a labeled video from"
            )
            video_path = video_path + create_dict_list(
                path_config[:-11] + "videos/", 1)[0]
            deeplabcut.create_labeled_video(path_config, [video_path],
                                            videotype='.mp4',
                                            draw_skeleton=True)
        elif action == 10:
            print("\nChoose where to upload the video from")
            video_path = '/data/11012579/videos/' + create_dict_list(
                '/data/11012579/videos/', 0)[0]
            print("\nChoose which video to upload")
            video_path_list = [
                video_path + "/" + create_dict_list(video_path, 1)[0]
            ]
            while True:
                print("\nDo you want to add more videos?\nType yes or no")
                if input() == 'yes':
                    video_path_list.append(video_path + "/" +
                                           create_dict_list(video_path, 1)[0])
                else:
                    deeplabcut.add_new_videos(path_config,
                                              video_path_list,
                                              copy_videos=False)
                    break
        elif action == 11:
            print("also here")
            Dlc_results2 = pd.read_hdf(
                '/data/11012579/videos/vidDLC_resnet50_demo_project_grab2Feb7shuffle1_11500.h5'
            )
            Dlc_results2.plot()
        else:
            print_usage("error")

        print("klaar")
Exemple #21
0
proj_path = os.path.join(BASE_DIR, dlc_proj_name)
if config_path is None:
    config_path = os.path.join(proj_path, 'config.yaml')
labeled_csv_path = os.path.join(proj_path, 'labeled_videos')
if not os.path.isdir(labeled_csv_path):
    os.mkdir(labeled_csv_path)

print('New config_path: "{}"'.format(config_path))

# Edit the config file to represent your tracking

# DLC Cropping
deeplabcut.extract_frames(config_path,
                          mode='automatic',
                          algo='uniform',
                          crop=False,
                          userfeedback=False)

deeplabcut.label_frames(config_path)

deeplabcut.check_labels(config_path)

deeplabcut.create_training_dataset(config_path)

# 0 is the GPU number, see in nvidia-smi
deeplabcut.train_network(config_path,
                         gputouse=0,
                         saveiters=25000,
                         maxiters=250000)
videos = glob.glob(
    '/home/weertman/Documents/Theresa_FHL/Armina_DeepLabCut/random_train_images/*.png'
)

path_config = '/home/weertman/Documents/Theresa_FHL/Armina_DeepLabCut/Arimina_model_1/'
if os.path.exists(path_config) != True:
    os.mkdir(path_config)

path_config = deeplabcut.create_new_project(project,
                                            experimenter,
                                            videos,
                                            copy_videos=True,
                                            working_directory=path_config)

path_config = '/home/weertman/Documents/Theresa_FHL/Armina_DeepLabCut/Arimina_model_1/Armina_model_1-Theresa-2020-07-01/config.yaml'
'''
deeplabcut.extract_frames(path_config,
                          mode = 'automatic',   
                          algo = 'uniform',
                          userfeedback = False,
                          crop = False)
'''
deeplabcut.label_frames(path_config)

deeplabcut.create_training_dataset(path_config)
deeplabcut.train_network(path_config, maxiters=200000, displayiters=10)
deeplabcut.evaluate_network(path_config)

deeplabcut.analyze_time_lapse_frames(
    path_config,
    '/home/weertman/Documents/Theresa_FHL/Armina_DeepLabCut/left_test_images/',
Exemple #23
0
print("CREATING PROJECT")
path_config_file = dlc.create_new_project(task,
                                          scorer,
                                          video,
                                          copy_videos=True)

cfg = dlc.auxiliaryfunctions.read_config(path_config_file)
cfg['numframes2pick'] = 5
cfg['pcutoff'] = 0.01
cfg['TrainingFraction'] = [.8]
cfg['skeleton'] = [['bodypart1', 'bodypart2'], ['bodypart1', 'bodypart3']]

dlc.auxiliaryfunctions.write_config(path_config_file, cfg)

print("EXTRACTING FRAMES")
dlc.extract_frames(path_config_file, mode='automatic', userfeedback=False)

print("CREATING SOME LABELS FOR THE FRAMES")
frames = os.listdir(
    os.path.join(cfg['project_path'], 'labeled-data', videoname))
#As this next step is manual, we update the labels by putting them on the diagonal (fixed for all frames)
for index, bodypart in enumerate(cfg['bodyparts']):
    columnindex = pd.MultiIndex.from_product(
        [[scorer], [bodypart], ['x', 'y']],
        names=['scorer', 'bodyparts', 'coords'])
    frame = pd.DataFrame(
        100 + np.ones((len(frames), 2)) * 50 * index,
        columns=columnindex,
        index=[os.path.join('labeled-data', videoname, fn) for fn in frames])
    if index == 0:
        dataFrame = frame
Exemple #24
0
 def extract_frames(self):
     # There probably is a more elegant way to do this but it preseves the ability to run in an interactive session
     # try: # if is iPython
     #    #%matplotlib inline
     # finally:
     deeplabcut.extract_frames(self.full_config_path())
Exemple #25
0
import deeplabcut

config_path = r'C:\gpfsdata\home\sqiu\job\dlc\ephy_bi_avi-Qs-2019-05-09\config.yaml'

if config_path:
    deeplabcut.extract_frames(config_path, 'automatic', 'uniform')
else:
    print('config_path does not exist.')
    sys.exit()

deeplabcut.label_frames(config_path)

deeplabcut.check_labels(config_path)

#then transfer to cluster
print(">>>>>>>")
print(
    "1-2 remember change the path way of 'config_path' of dlc-cluster-03-create_dataset-and-training.py in cluster"
)
print("2-2 remember to change the 'projection_path' in config.yaml of clucter")
print("<<<<<<<")
class tutorial():
    def __init__(self, algo, crop, userfeedback, shuffle, saveiters, displayiters, angle, center, scale, filenames):
        self.algo = "automatic"
        self.crop = False
        self.userfeedback = False
        self.shuffle = 1
        self.saveiters = 200
        self.displayiters = 100
        self.angle = -4.5
        self.center = None
        self.scale = 1.0
        #Generate all rotating videos
        self.filenames = glob.glob('*.mp4') #Return the file name with .mp4 extention

    def rotate(self, image, angle, center=None, scale=1):
        #scale = 1: original size
        rows,cols,ch = image.shape
        if center == None:
            center = (cols / 2, rows / 2)
        M = cv2.getRotationMatrix2D(center, angle, scale)
        #Matrix: Rotate with center by angles
        dst = cv2.warpAffine(image,M,(cols,rows))
        #After rotation
    return dst


    def videorotate(self, filenames, output_name, display_video = False):
        # capture video
        cap = cv2.VideoCapture(filename)

        #read video frame by frame
        #extract original video frame features
        sz = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

        fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))

        fps = int(cap.get(cv2.CAP_PROP_FPS))

        #Make a directory to store the rotated videos
        path = "./rotated"
        try:
            os.mkdir(path)
        except OSError:
            pass
        else:
            print ("Successfully created the directory %s " % path)

        #Automatically name the rotated videos
        file = "./rotated/" + output_name
        out = cv2.VideoWriter(file, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, sz)
        #Integrate all frames to video


        #Read videos and rotate by certain degrees
        while(cap.isOpened()):
            #flip for truning(fliping) frames of video
            ret,img = cap.read()
            try:
                img2 = rotate(img, -4.5)
                #Flipped Vertically
                out.write(img2)
                if display_video == True:
                    cv2.imshow('rotated video',img2)

                k=cv2.waitKey(30) & 0xff
                #once you inter Esc capturing will stop
                if k==27:
                    break
            except:
                print (filename, 'successfully rotated!!!' )
                break
        cap.release()
        out.release()
        cv2.destroyAllWindows()

    for i in filenames:
        output_name = os.path.splitext(i)[0] + " rotated.mp4"


    cwd = os.chdir("./rotated")
    #we are using rotated videos
    cwd = os.getcwd()
    mp4files = [f for f in listdir(cwd) if isfile(join(cwd, f)) and os.path.splitext(f)[1] == ".mp4"]
    #Get all mp4 files

    task='Reaching' # Enter the name of your experiment Task
    experimenter='Donghan' # Enter the name of the experimenter
    video=mp4files # Enter the paths of your videos you want to grab frames from.
    now = datetime.datetime.now()

    try:
        path_config_file=deeplabcut.create_new_project(task,experimenter,video, working_directory='/home/donghan/DeepLabCut/data/rotated',copy_videos=True)
    #change the working directory to where you want the folders created.
    except:
        overwrite = input("Do you want to overwrite the folder since it already exists? y/n:")
        if overwrite == 'y':
            os.rmdir(task + '-' + experimenter + '-' + str(now.year) + '-' + str(now.month) + '-' + str(now.day))
            path_config_file=deeplabcut.create_new_project(task,experimenter,video, working_directory='/home/donghan/DeepLabCut/data/rotated',copy_videos=True)
        else:
            continue

# The function returns the path, where your project is.
# You could also enter this manually (e.g. if the project is already created and you want to pick up, where you stopped...)
#path_config_file = '/home/Mackenzie/Reaching/config.yaml' # Enter the path of the config file that was just created from the above step (check the folder)

    %matplotlib inline
    deeplabcut.extract_frames(path_config_file,'automatic',crop=False, userfeedback=False) #there are other ways to grab frames, such as by clustering 'kmeans'; please see the paper.
    #You can change the cropping to false, then delete the checkcropping part!
    #userfeedback: ask if users would like to continue or stop

    %gui wx
    deeplabcut.label_frames(path_config_file)

    deeplabcut.check_labels(path_config_file) #this creates a subdirectory with the frames + your labels

    deeplabcut.create_training_dataset(path_config_file)

    deeplabcut.train_network(path_config_file, shuffle=1, saveiters=200, displayiters=10)
#Other parameters include trainingsetindex=0,gputouse=None,max_snapshots_to_keep=5,autotune=False,maxiters=None
#Detailed function explanation can be found here https://github.com/AlexEMG/DeepLabCut/blob/efa95129061b1ba1535f7361fe76e9267568a156/deeplabcut/pose_estimation_tensorflow/training.py

    deeplabcut.evaluate_network(path_config_file)

    videofile_path = ['1035 SI_A, Aug 15, 13 17 7 rotated.mp4'] #Enter the list of videos to analyze.
    deeplabcut.analyze_videos(path_config_file,videofile_path)

    deeplabcut.create_labeled_video(path_config_file,videofile_path)
    Select videos from which to grab frames:
        Use videos with images from
        -Different sessions reflecting (if the case) varying light conditions, backgrounds, setups, and camera angles
        -Different individuals, especially if they look different (i.e., brown and black mice)
    
    In our case: different ages !
    
    The toolbox contains three methods for extracting frames, namely, by clustering based on visual content, by
    randomly sampling in a uniform way across time, or by manually grabbing frames of interest using a custom GUI.
    
    For the behaviors we have tested so far, a dataset of 50–200 frames gave good results
    """

    if "step_4" in stages_to_run or "data_selection" in stages_to_run:
        deeplabcut.extract_frames(config=config_path,
                                  mode='automatic', algo='kmeans', crop=False, userfeedback=False, cluster_step=1,
                                  cluster_resizewidth=30, cluster_color=False, opencv=True, slider_width=25)
        """
        The extracted frames from all the videos are stored in a separate subdirectory named after the
        video file’s name under the ‘labeled-data’ directory.
        
        When running the function extract_frames, if the parameter crop=True, then
        frames will be cropped to the interactive user feedback provided (which is then written to the
        config.yaml file). Upon calling extract_frames, it will ask the user to draw a boundingbox
        in the GUI
        
        he provided function selects frames from the videos in a temporally uniformly distributed way
        (uniform), by clustering based on visual appearance (kmeans), or by manual selection (Fig. 3).
        Uniform selection of frames works best for behaviors in which the postures vary in a temporally
        independent way across the video. However, some behaviors might be sparse, as in a case of
        reaching in which the reach and pull are very fast and the mouse is not moving much between
Exemple #28
0

# DLC Set up on blinking

import deeplabcut
import matplotlib

task='whisk_only' # Enter the name of your experiment Task
experimenter='Guy' # Enter the name of the experimenter
video=['videos/animal_3_video_2_150fps_correct.mp4', 'videos/crush_19_01_07_animal_3.mp4'] # Enter the paths of your videos you want to grab frames from.

deeplabcut.create_new_project(task,experimenter,video, working_directory='dlc-blinking/whisk',copy_videos=True) #change the working directory to where you want the folders created.

%matplotlib inline
path_config_file = '/dlc-blinking/whisk/whisk_only-Guy-2019-02-01/config.yaml' # Enter the path of the config file that was just created from the above step (check the folder)
deeplabcut.extract_frames(path_config_file,'automatic','uniform',crop=True, checkcropping=True, opencv=False) #there are other ways to grab frames, such as by clustering 'kmeans'; please see the paper. 


# changed the cropping dimensions in the config.yaml file
%gui wx
deeplabcut.label_frames(path_config_file)

# Lables have now been created

deeplabcut.check_labels(path_config_file) #this creates a subdirectory with the frames + your labels
# Reviewed the labels, the seem to be ok

# Downloading the ResNets dataset:
deeplabcut.create_training_dataset(path_config_file)

# Training the dataset
Exemple #29
0
os.chdir('chage to your path')
videos = glob.glob('*.mp4')
learningVideo = ["add video name for learning"]

config_path = dlc.create_new_project('PJ name',
                                     'my name',
                                     learningVideo,
                                     working_directory='add your directory',
                                     copy_videos=False)

# if the project was already created, run this
config_path = 'path to yaml file'

# extract frame
dlc.extract_frames(config_path, 'automatic', 'kmeans')

# label the data
dlc.label_frames(config_path)

# check labels
#dlc.check_labels(config_path)

# creating training dataset
dlc.create_training_dataset(config_path, num_shuffles=1)

# start training
dlc.train_network(config_path)

# evaluate the trained network
#dlc.evaluate_network(config_path, plotting=True)