Beispiel #1
0
def load_model(cfg, shuffle=1, trainingsetindex=0, TFGPUinference=True, modelprefix=""):
    """

    Loads a tensorflow session with a DLC model from the associated configuration
    Return a tensorflow session with DLC model given cfg and shuffle

    Parameters:
    -----------
    cfg : dict
        Configuration read from the project's main config.yaml file

    shuffle : int, optional
        which shuffle to use

    trainingsetindex : int. optional
        which training fraction to use, identified by its index

    TFGPUinference : bool, optional
        use tensorflow inference model? default = True

    Returns:
    --------
    sess : tensorflow session
        tensorflow session with DLC model from the provided configuration, shuffle, and trainingsetindex

    checkpoint file path : string
        the path to the checkpoint file associated with the loaded model
    """

    ########################
    ### find snapshot to use
    ########################

    train_fraction = cfg["TrainingFraction"][trainingsetindex]
    model_folder = os.path.join(
        cfg["project_path"],
        str(
            auxiliaryfunctions.GetModelFolder(
                train_fraction, shuffle, cfg, modelprefix=modelprefix
            )
        ),
    )
    path_test_config = os.path.normpath(model_folder + "/test/pose_cfg.yaml")
    path_train_config = os.path.normpath(model_folder + "/train/pose_cfg.yaml")

    try:
        dlc_cfg = load_config(str(path_train_config))
        # dlc_cfg_train = load_config(str(path_train_config))
    except FileNotFoundError:
        raise FileNotFoundError(
            "It seems the model for shuffle %s and trainFraction %s does not exist."
            % (shuffle, train_fraction)
        )

    # Check which snapshots are available and sort them by # iterations
    try:
        Snapshots = np.array(
            [
                fn.split(".")[0]
                for fn in os.listdir(os.path.join(model_folder, "train"))
                if "index" in fn
            ]
        )
    except FileNotFoundError:
        raise FileNotFoundError(
            "Snapshots not found! It seems the dataset for shuffle %s has not been trained/does not exist.\n Please train it before trying to export.\n Use the function 'train_network' to train the network for shuffle %s."
            % (shuffle, shuffle)
        )

    if len(Snapshots) == 0:
        raise FileNotFoundError(
            "The train folder for iteration %s and shuffle %s exists, but no snapshots were found.\n Please train this model before trying to export.\n Use the function 'train_network' to train the network for iteration %s shuffle %s."
            % (cfg["iteration"], shuffle, cfg["iteration"], shuffle)
        )

    if cfg["snapshotindex"] == "all":
        print(
            "Snapshotindex is set to 'all' in the config.yaml file. Changing snapshot index to -1!"
        )
        snapshotindex = -1
    else:
        snapshotindex = cfg["snapshotindex"]

    increasing_indices = np.argsort([int(m.split("-")[1]) for m in Snapshots])
    Snapshots = Snapshots[increasing_indices]

    ####################################
    ### Load and setup CNN part detector
    ####################################

    # Check if data already was generated:
    dlc_cfg["init_weights"] = os.path.join(
        model_folder, "train", Snapshots[snapshotindex]
    )
    trainingsiterations = (dlc_cfg["init_weights"].split(os.sep)[-1]).split("-")[-1]
    dlc_cfg["num_outputs"] = cfg.get("num_outputs", dlc_cfg.get("num_outputs", 1))
    dlc_cfg["batch_size"] = None

    # load network
    if TFGPUinference:
        sess, _, _ = predict.setup_GPUpose_prediction(dlc_cfg)
        output = ["concat_1"]
    else:
        sess, _, _ = predict.setup_pose_prediction(dlc_cfg)
        if dlc_cfg["location_refinement"]:
            output = ["Sigmoid", "pose/locref_pred/block4/BiasAdd"]
        else:
            output = ["Sigmoid", "pose/part_pred/block4/BiasAdd"]

    input = tf.get_default_graph().get_operations()[0].name

    return sess, input, output, dlc_cfg
Beispiel #2
0
increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
Snapshots = Snapshots[increasing_indices]
print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)
nframes = 300
tf.reset_default_graph()
# Check if data already was generated:
dlc_config['init_weights'] = os.path.join(modelfolder, 'train',
                                          Snapshots[snapshotindex])
trainingsiterations = (dlc_config['init_weights'].split(
    os.sep)[-1]).split('-')[-1]
# Update number of output and batchsize
dlc_config['num_outputs'] = cfg.get('num_outputs',
                                    dlc_config.get('num_outputs', 1))
batchsize = 1
dlc_config['batch_size'] = cfg['batch_size']
sess, inputs, outputs = predict.setup_GPUpose_prediction(dlc_config)
pose_tensor = predict.extract_GPUprediction(
    outputs, dlc_config)  #extract_output_tensor(outputs, dlc_cfg)
PredictedData = np.zeros(
    (nframes,
     dlc_config['num_outputs'] * 3 * len(dlc_config['all_joints_names'])))
counter = 0
step = max(10, int(nframes / 100))
x_range = list(range(0, (3 * len(dlc_config['all_joints_names'])), 3))
y_range = list(range(1, (3 * len(dlc_config['all_joints_names'])), 3))
batch_ind = 0
batch_num = 0
ny, nx = int(cap_C.get(4)), int(cap_C.get(3))
frames = np.empty((batchsize, ny, nx, 3),
                  dtype='ubyte')  # this keeps all frames in a batch
Beispiel #3
0
def analyze_videos(config,videos, videotype='avi', shuffle=1, trainingsetindex=0,
                    gputouse=None, save_as_csv=False, destfolder=None, batchsize=None,
                    cropping=None,get_nframesfrommetadata=True, TFGPUinference=True,dynamic=(False,.5,10)):
    """
    Makes prediction based on a trained network. The index of the trained network is specified by parameters in the config file (in particular the variable 'snapshotindex')

    You can crop the video (before analysis), by changing 'cropping'=True and setting 'x1','x2','y1','y2' in the config file. The same cropping parameters will then be used for creating the video.

    Output: The labels are stored as MultiIndex Pandas Array, which contains the name of the network, body part name, (x, y) label position \n
            in pixels, and the likelihood for each frame per body part. These arrays are stored in an efficient Hierarchical Data Format (HDF) \n
            in the same directory, where the video is stored. However, if the flag save_as_csv is set to True, the data can also be exported in \n
            comma-separated values format (.csv), which in turn can be imported in many programs, such as MATLAB, R, Prism, etc.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle: int, optional
        An integer specifying the shuffle index of the training dataset used for training the network. The default is 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
    See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video). Note that for subsequent analysis this
        folder also needs to be passed.

    batchsize: int, default from pose_cfg.yaml
        Change batch size for inference; if given overwrites value in pose_cfg.yaml

    TFGPUinference: bool, default: True
        Perform inference on GPU with Tensorflow code. Introduced in "Pretraining boosts out-of-domain robustness for pose estimation" by
        Alexander Mathis, Mert Yüksekgönül, Byron Rogers, Matthias Bethge, Mackenzie W. Mathis Source: https://arxiv.org/abs/1909.11229

    dynamic: triple containing (state,detectiontreshold,margin)
        If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e. any body part > detectiontreshold),
        then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This  window is
        expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost, i.e. <detectiontreshold). The
        current position is utilized for updating the crop window for the next frame (this is why the margin is important and should be set large
        enough given the movement of the animal).

    Examples
    --------

    Windows example for analyzing 1 video
    >>> deeplabcut.analyze_videos('C:\\myproject\\reaching-task\\config.yaml',['C:\\yourusername\\rig-95\\Videos\\reachingvideo1.avi'])
    --------

    If you want to analyze only 1 video
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'])
    --------

    If you want to analyze all videos of type avi in a folder:
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos'],videotype='.avi')
    --------

    If you want to analyze multiple videos
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'])
    --------

    If you want to analyze multiple videos with shuffle = 2
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'], shuffle=2)

    --------
    If you want to analyze multiple videos with shuffle = 2 and save results as an additional csv file too
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'], shuffle=2,save_as_csv=True)
    --------

    """
    if 'TF_CUDNN_USE_AUTOTUNE' in os.environ:
        del os.environ['TF_CUDNN_USE_AUTOTUNE'] #was potentially set during training

    if gputouse is not None: #gpu selection
            os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)

    tf.reset_default_graph()
    start_path=os.getcwd() #record cwd to return to this directory in the end

    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]

    if cropping is not None:
        cfg['cropping']=True
        cfg['x1'],cfg['x2'],cfg['y1'],cfg['y2']=cropping
        print("Overwriting cropping parameters:", cropping)
        print("These are used for all videos, but won't be save to the cfg file.")

    modelfolder=os.path.join(cfg["project_path"],str(auxiliaryfunctions.GetModelFolder(trainFraction,shuffle,cfg)))
    path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
    try:
        dlc_cfg = load_config(str(path_test_config))
    except FileNotFoundError:
        raise FileNotFoundError("It seems the model for shuffle %s and trainFraction %s does not exist."%(shuffle,trainFraction))

    # Check which snapshots are available and sort them by # iterations
    try:
      Snapshots = np.array([fn.split('.')[0]for fn in os.listdir(os.path.join(modelfolder , 'train'))if "index" in fn])
    except FileNotFoundError:
      raise FileNotFoundError("Snapshots not found! It seems the dataset for shuffle %s has not been trained/does not exist.\n Please train it before using it to analyze videos.\n Use the function 'train_network' to train the network for shuffle %s."%(shuffle,shuffle))

    if cfg['snapshotindex'] == 'all':
        print("Snapshotindex is set to 'all' in the config.yaml file. Running video analysis with all snapshots is very costly! Use the function 'evaluate_network' to choose the best the snapshot. For now, changing snapshot index to -1!")
        snapshotindex = -1
    else:
        snapshotindex=cfg['snapshotindex']

    increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
    Snapshots = Snapshots[increasing_indices]

    print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)

    ##################################################
    # Load and setup CNN part detector
    ##################################################

    # Check if data already was generated:
    dlc_cfg['init_weights'] = os.path.join(modelfolder , 'train', Snapshots[snapshotindex])
    trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1]
    # Update number of output and batchsize
    dlc_cfg['num_outputs'] = cfg.get('num_outputs', dlc_cfg.get('num_outputs', 1))

    if batchsize==None:
        #update batchsize (based on parameters in config.yaml)
        dlc_cfg['batch_size']=cfg['batch_size']
    else:
        dlc_cfg['batch_size']=batchsize
        cfg['batch_size']=batchsize

    if dynamic[0]: #state=true
        #(state,detectiontreshold,margin)=dynamic
        print("Starting analysis in dynamic cropping mode with parameters:", dynamic)
        dlc_cfg['num_outputs']=1
        TFGPUinference=False
        dlc_cfg['batch_size']=1
        print("Switching batchsize to 1, num_outputs (per animal) to 1 and TFGPUinference to False (all these features are not supported in this mode).")

    # Name for scorer:
    DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations=trainingsiterations)
    if dlc_cfg['num_outputs']>1:
        if  TFGPUinference:
            print("Switching to numpy-based keypoint extraction code, as multiple point extraction is not supported by TF code currently.")
            TFGPUinference=False
        print("Extracting ", dlc_cfg['num_outputs'], "instances per bodypart")
        xyz_labs_orig = ['x', 'y', 'likelihood']
        suffix = [str(s+1) for s in range(dlc_cfg['num_outputs'])]
        suffix[0] = '' # first one has empty suffix for backwards compatibility
        xyz_labs = [x+s for s in suffix for x in xyz_labs_orig]
    else:
        xyz_labs = ['x', 'y', 'likelihood']

    #sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
    if TFGPUinference:
        sess, inputs, outputs = predict.setup_GPUpose_prediction(dlc_cfg)
    else:
        sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)

    pdindex = pd.MultiIndex.from_product([[DLCscorer],
                                          dlc_cfg['all_joints_names'],
                                          xyz_labs],
                                         names=['scorer', 'bodyparts', 'coords'])

    ##################################################
    # Datafolder
    ##################################################
    Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
    if len(Videos)>0:
        #looping over videos
        for video in Videos:
            DLCscorer=AnalyzeVideo(video,DLCscorer,DLCscorerlegacy,trainFraction,cfg,dlc_cfg,sess,inputs, outputs,pdindex,save_as_csv, destfolder,TFGPUinference,dynamic)

        os.chdir(str(start_path))
        print("The videos are analyzed. Now your research can truly start! \n You can create labeled videos with 'create_labeled_video'.")
        print("If the tracking is not satisfactory for some videos, consider expanding the training set. You can use the function 'extract_outlier_frames' to extract any outlier frames!")
        return DLCscorer #note: this is either DLCscorer or DLCscorerlegacy depending on what was used!
    else:
        print("No video/s found. Please check your path!")
        return DLCscorer
  def __init__(self, config ,shuffle=1, trainingsetindex=0, 
                     gputouse=None, rgb=True):
    '''
    The constructor loads the deeplearning model from the specified path
    config - path where the model config.yaml file is stored
    shuffle - Not sure how it affects the algorithm. It was set to one
             inside the core library. So keep sticking to 1 unless you 
             have a strong need to change
    trainingsetindex - always set it 0. Not sure of its effect. Used deep inside
                      the core library
    gputouse - gpu card number to use. The algorithm uses GPU by default 
               when tensorflow GPU version is installed. If GPU is not
               getting utlised, this is a good parameter to check. But setting
               this to None in our present system still utilises the GPU.
    rgd - set to true if we use a RGB image. For grayscale image, which is our
          case this is always False
    '''

    if 'TF_CUDNN_USE_AUTOTUNE' in os.environ:
        del os.environ['TF_CUDNN_USE_AUTOTUNE'] #was potentially set during training

    if gputouse is not None:  # gpu selection
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)

    vers = (tf.__version__).split('.')

    if int(vers[0]) == 1 and int(vers[1]) > 12:
        TF = tf.compat.v1
    else:
        TF = tf

    TF.reset_default_graph()

    cfg = auxiliaryfunctions.read_config(config)
    cfg['batch_size'] = 1
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    modelfolder=os.path.join(cfg["project_path"],
                             str(auxiliaryfunctions.GetModelFolder
                                      (trainFraction,shuffle,cfg)))
    path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
    try:
        dlc_cfg = load_config(str(path_test_config))
    except FileNotFoundError:
        raise FileNotFoundError("It seems the model for shuffle %s and" 
                                "trainFraction %s does not exist."%(shuffle,
                                                             trainFraction))
    # Check which snapshots are available and sort them by # iterations
    try:
      Snapshots = np.array([fn.split('.')[0]for fn in 
                           os.listdir(os.path.join(modelfolder , 
                                                   'train'))if "index" in fn])
    except FileNotFoundError:
        raise FileNotFoundError("Snapshots not found! It seems the dataset"
                                "for shuffle %s has not been trained/does not"
                                "exist.\n Please train it before using it to"
                                "analyze videos.\n Use the "
                                "function 'train_network' to train the "
                                "network for shuffle %s."%(shuffle,shuffle))

    if cfg['snapshotindex'] == 'all':
        print("Snapshotindex is set to 'all' in the config.yaml file. "
              "Running video analysis with all snapshots is very costly!"
              "Use the function 'evaluate_network' to choose the best" 
              "the snapshot. For now, changing snapshot index to -1!")
        snapshotindex = -1
    else:
        snapshotindex=cfg['snapshotindex']

    increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
    Snapshots = Snapshots[increasing_indices]

    print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)

    ##################################################
    # Load and setup CNN part detector
    ##################################################

    # Check if data already was generated:
    dlc_cfg['init_weights'] = os.path.join(modelfolder , 'train', Snapshots[snapshotindex])
    trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1]

    #update batchsize (based on parameters in config.yaml)
    dlc_cfg['batch_size'] = cfg['batch_size']

    # Name for scorer:

    # update number of outputs and adjust pandas indices
    dlc_cfg['num_outputs'] = cfg.get('num_outputs', 1)

    # Name for scorer:
    self.sess, self.inputs, self.outputs = predict.setup_GPUpose_prediction(dlc_cfg)

    if gputouse is not None: #gpu selectinon
            os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)
    self.rgb = rgb
    self.cfg = cfg
    self.dlc_cfg = dlc_cfg
    self.pose_tensor = predict.extract_GPUprediction(self.outputs, self.dlc_cfg)

    if self.cfg['cropping']:
      self.ny, self.nx=checkcropping(self.cfg,cap)