def AnalyzeVideo(video,DLCscorer,trainFraction,cfg,dlc_cfg,sess,inputs, outputs,pdindex,save_as_csv, destfolder=None):
    ''' Helper function for analyzing a video '''
    print("Starting to analyze % ", video)
    vname = Path(video).stem
    if destfolder is None:
        destfolder = str(Path(video).parents[0])
    dataname = os.path.join(destfolder,vname + DLCscorer + '.h5')
    try:
        # Attempt to load data...
        pd.read_hdf(dataname)
        print("Video already analyzed!", dataname)
    except FileNotFoundError:
        print("Loading ", video)
        cap=cv2.VideoCapture(video)

        fps = cap.get(5) #https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
        nframes = int(cap.get(7))
        duration=nframes*1./fps
        size=(int(cap.get(4)),int(cap.get(3)))

        ny,nx=size
        print("Duration of video [s]: ", round(duration,2), ", recorded with ", round(fps,2),"fps!")
        print("Overall # of frames: ", nframes," found with (before cropping) frame dimensions: ", nx,ny)
        start = time.time()

        print("Starting to extract posture")
        if int(dlc_cfg["batch_size"])>1:
            PredicteData,nframes=GetPoseF(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,int(dlc_cfg["batch_size"]))
        else:
            PredicteData,nframes=GetPoseS(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes)

        stop = time.time()

        if cfg['cropping']==True:
            coords=[cfg['x1'],cfg['x2'],cfg['y1'],cfg['y2']]
        else:
            coords=[0, nx, 0, ny]

        dictionary = {
            "start": start,
            "stop": stop,
            "run_duration": stop - start,
            "Scorer": DLCscorer,
            "DLC-model-config file": dlc_cfg,
            "fps": fps,
            "batch_size": dlc_cfg["batch_size"],
            "num_outputs": dlc_cfg["num_outputs"],
            "frame_dimensions": (ny, nx),
            "nframes": nframes,
            "iteration (active-learning)": cfg["iteration"],
            "training set fraction": trainFraction,
            "cropping": cfg['cropping'],
            "cropping_parameters": coords
        }
        metadata = {'data': dictionary}

        print("Saving results in %s..." %(Path(video).parents[0]))
        auxiliaryfunctions.SaveData(PredicteData[:nframes,:], metadata, dataname, pdindex, range(nframes),save_as_csv)
Ejemplo n.º 2
0
def AnalzyeVideo(video,DLCscorer,cfg,dlc_cfg,sess,inputs, outputs,pdindex,save_as_csv):
    #from moviepy.editor import VideoFileClip
    
    print(video)
    #videotype = Path(video).suffix
    print("Starting % ", video)
    vname = Path(video).stem
    dataname = os.path.join(str(Path(video).parents[0]),vname + DLCscorer + '.h5')
    try:
        # Attempt to load data...
        pd.read_hdf(dataname)
        print("Video already analyzed!", dataname)
    except FileNotFoundError:
        print("Loading ", video)
        cap=cv2.VideoCapture(video)
        
        fps = cap.get(5) #https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
        nframes = int(cap.get(7))
        duration=nframes*1./fps
        size=(int(cap.get(4)),int(cap.get(3)))
        
        ny,nx=size
        print("Duration of video [s]: ", round(duration,2), ", recorded with ", round(fps,2),"fps!")
        print("Overall # of frames: ", nframes,"without cropped frame dimensions: ", nx,ny)

        start = time.time()

        print("Starting to extract posture")
        if int(dlc_cfg["batch_size"])>1:
            PredicteData,nframes=GetPoseF(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,int(dlc_cfg["batch_size"]))
        else:
            PredicteData,nframes=GetPoseS(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes)

        stop = time.time()
        
        if cfg['cropping']==True:
            coords=[cfg['x1'],cfg['x2'],cfg['y1'],cfg['y2']]
        else:
            coords=[0, nx, 0, ny] 
            
        dictionary = {
            "start": start,
            "stop": stop,
            "run_duration": stop - start,
            "Scorer": DLCscorer,
            "config file": dlc_cfg,
            "fps": fps,
            "batch_size": dlc_cfg["batch_size"],
            "frame_dimensions": (ny, nx),
            "nframes": nframes,
            "cropping": cfg['cropping'],
            "cropping_parameters": coords
        }
        metadata = {'data': dictionary}

        print("Saving results in %s..." %(Path(video).parents[0]))
        auxiliaryfunctions.SaveData(PredicteData[:nframes,:], metadata, dataname, pdindex, range(nframes),save_as_csv)
Ejemplo n.º 3
0
                df_3d.to_csv(str(output_filename + ".csv"))

            print("Triangulated data for video", video_list[i])
            print("Results are saved under: ", destfolder)
            # have to make the dest folder none so that it can be updated for a new pair of videos
            if destfolder == str(Path(video).parents[0]):
                destfolder = None

    if len(video_list) > 0:
        print("All videos were analyzed...")
        print(
            "Now you can create 3D video(s) using deeplabcut.create_labeled_video_3d"
        )


"""
ToDo: speed up func. below and check only for one cam individually
PredicteData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
PredicteData[batch_num*batchsize:batch_num*batchsize+batch_ind, :] = pose[:batch_ind,:]
pdindex = pd.MultiIndex.from_product([[DLCscorer], dlc_cfg['all_joints_names'], ['x', 'y', 'likelihood']],names=['scorer', 'bodyparts', 'coords'])
auxiliaryfunctions.SaveData(PredicteData[:nframes,:], metadata, dataname, pdindex, framelist,save_as_csv)
"""


def undistort_points(config, dataframe, camera_pair):
    cfg_3d = auxiliaryfunctions.read_config(config)
    (
        img_path,
        path_corners,
        path_camera_matrix,
        path_undistort,
Ejemplo n.º 4
0
def analyze_time_lapse_frames(config,directory,frametype='.png',shuffle=1,trainingsetindex=0,gputouse=None,save_as_csv=False):
    """
    Analyzed all images (of type = frametype) in a folder and stores the output in one file. 
    
    You can crop the frames (before analysis), by changing 'cropping'=True and setting 'x1','x2','y1','y2' in the config file. 
    
    Output: The labels are stored as MultiIndex Pandas Array, which contains the name of the network, body part name, (x, y) label position \n
            in pixels, and the likelihood for each frame per body part. These arrays are stored in an efficient Hierarchical Data Format (HDF) \n
            in the same directory, where the video is stored. However, if the flag save_as_csv is set to True, the data can also be exported in \n
            comma-separated values format (.csv), which in turn can be imported in many programs, such as MATLAB, R, Prism, etc.
    
    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    directory: string
        Full path to directory containing the frames that shall be analyzed

    frametype: string, optional
        Checks for the file extension of the frames. Only images with this extension are analyzed. The default is ``.png``

    shuffle: int, optional
        An integer specifying the shuffle index of the training dataset used for training the network. The default is 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).
    
    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
    See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``

    Examples
    --------
    If you want to analyze all frames in /analysis/project/timelapseexperiment1
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml','/analysis/project/timelapseexperiment1')
    --------
    
    If you want to analyze all frames in /analysis/project/timelapseexperiment1
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml','/analysis/project/timelapseexperiment1', frametype='.bmp')
    --------
    
    Note: for test purposes one can extract all frames from a video with ffmeg, e.g. ffmpeg -i testvideo.avi thumb%04d.png 
    """
    if 'TF_CUDNN_USE_AUTOTUNE' in os.environ:
        del os.environ['TF_CUDNN_USE_AUTOTUNE'] #was potentially set during training
    
    tf.reset_default_graph()
    start_path=os.getcwd() #record cwd to return to this directory in the end
    
    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    modelfolder=os.path.join(cfg["project_path"],str(auxiliaryfunctions.GetModelFolder(trainFraction,shuffle,cfg)))
    path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
    try:
        dlc_cfg = load_config(str(path_test_config))
    except FileNotFoundError:
        raise FileNotFoundError("It seems the model for shuffle %s and trainFraction %s does not exist."%(shuffle,trainFraction))

    # Check which snapshots are available and sort them by # iterations
    try:
      Snapshots = np.array([fn.split('.')[0]for fn in os.listdir(os.path.join(modelfolder , 'train'))if "index" in fn])
    except FileNotFoundError:
      raise FileNotFoundError("Snapshots not found! It seems the dataset for shuffle %s has not been trained/does not exist.\n Please train it before using it to analyze videos.\n Use the function 'train_network' to train the network for shuffle %s."%(shuffle,shuffle))

    if cfg['snapshotindex'] == 'all':
        print("Snapshotindex is set to 'all' in the config.yaml file. Running video analysis with all snapshots is very costly! Use the function 'evaluate_network' to choose the best the snapshot. For now, changing snapshot index to -1!")
        snapshotindex = -1
    else:
        snapshotindex=cfg['snapshotindex']
        
    increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
    Snapshots = Snapshots[increasing_indices]
    
    print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)

    ##################################################
    # Load and setup CNN part detector
    ##################################################

    # Check if data already was generated:
    dlc_cfg['init_weights'] = os.path.join(modelfolder , 'train', Snapshots[snapshotindex])
    trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1]
    
    #update batchsize (based on parameters in config.yaml)
    dlc_cfg['batch_size']=cfg['batch_size'] 
    
    # Name for scorer:
    DLCscorer = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations=trainingsiterations)
    sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
    pdindex = pd.MultiIndex.from_product([[DLCscorer], dlc_cfg['all_joints_names'], ['x', 'y', 'likelihood']],names=['scorer', 'bodyparts', 'coords'])

    if gputouse is not None: #gpu selectinon
            os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)
    
    ##################################################
    # Loading the images
    ##################################################
    #checks if input is a directory
    if os.path.isdir(directory)==True:
        """
        Analyzes all the frames in the directory.
        """
        print("Analyzing all frames in the directory: ", directory)
        os.chdir(directory)
        framelist=np.sort([fn for fn in os.listdir(os.curdir) if (frametype in fn)])

        vname = Path(directory).stem
        dataname = os.path.join(directory,vname + DLCscorer + '.h5')
        try:
            # Attempt to load data...
            pd.read_hdf(dataname)
            print("Frames already analyzed!", dataname)
        except FileNotFoundError:
            nframes = len(framelist)
            if nframes>1:
                start = time.time()
                
                PredicteData,nframes,nx,ny=GetPosesofFrames(cfg,dlc_cfg, sess, inputs, outputs,directory,framelist,nframes,dlc_cfg['batch_size'])
                stop = time.time()
                
                if cfg['cropping']==True:
                    coords=[cfg['x1'],cfg['x2'],cfg['y1'],cfg['y2']]
                else:
                    coords=[0, nx, 0, ny] 
                    
                dictionary = {
                    "start": start,
                    "stop": stop,
                    "run_duration": stop - start,
                    "Scorer": DLCscorer,
                    "config file": dlc_cfg,
                    "batch_size": dlc_cfg["batch_size"],
                    "frame_dimensions": (ny, nx),
                    "nframes": nframes,
                    "cropping": cfg['cropping'],
                    "cropping_parameters": coords
                }
                metadata = {'data': dictionary}
        
                print("Saving results in %s..." %(directory))
                
                auxiliaryfunctions.SaveData(PredicteData[:nframes,:], metadata, dataname, pdindex, framelist,save_as_csv)
                print("The folder was analyzed. Now your research can truly start!")
                print("If the tracking is not satisfactory for some frome, consider expanding the training set.")
            else:
                print("No frames were found. Consider changing the path or the frametype.")
    
    os.chdir(str(start_path))
Ejemplo n.º 5
0
def AnalyzeVideo(video,DLCscorer,DLCscorerlegacy,trainFraction,cfg,dlc_cfg,sess,inputs, outputs,pdindex,save_as_csv, destfolder=None,TFGPUinference=True,dynamic=(False,.5,10)):
    ''' Helper function for analyzing a video. '''
    print("Starting to analyze % ", video)
    vname = Path(video).stem
    if destfolder is None:
        destfolder = str(Path(video).parents[0])

    notanalyzed,dataname, DLCscorer=auxiliaryfunctions.CheckifNotAnalyzed(destfolder,vname,DLCscorer,DLCscorerlegacy)
    if notanalyzed:
        print("Loading ", video)
        cap=cv2.VideoCapture(video)
        if not cap.isOpened():
            raise IOError('Video could not be opened. Please check that the the file integrity.')
        fps = cap.get(5) #https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
        nframes = int(cap.get(7))
        duration=nframes*1./fps
        size=(int(cap.get(4)),int(cap.get(3)))

        ny,nx=size
        print("Duration of video [s]: ", round(duration,2), ", recorded with ", round(fps,2),"fps!")
        print("Overall # of frames: ", nframes," found with (before cropping) frame dimensions: ", nx,ny)

        dynamic_analysis_state,detectiontreshold,margin=dynamic
        start = time.time()
        print("Starting to extract posture")
        if dynamic_analysis_state:
            PredictedData,nframes=GetPoseDynamic(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,detectiontreshold,margin)
            #GetPoseF_GTF(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,int(dlc_cfg["batch_size"]))
        else:
            if int(dlc_cfg["batch_size"])>1:
                if TFGPUinference:
                    PredictedData,nframes=GetPoseF_GTF(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,int(dlc_cfg["batch_size"]))
                else:
                    PredictedData,nframes=GetPoseF(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,int(dlc_cfg["batch_size"]))
            else:
                if TFGPUinference:
                    PredictedData,nframes=GetPoseS_GTF(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes)
                else:
                    PredictedData,nframes=GetPoseS(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes)

        stop = time.time()
        if cfg['cropping']==True:
            coords=[cfg['x1'],cfg['x2'],cfg['y1'],cfg['y2']]
        else:
            coords=[0, nx, 0, ny]

        dictionary = {
            "start": start,
            "stop": stop,
            "run_duration": stop - start,
            "Scorer": DLCscorer,
            "DLC-model-config file": dlc_cfg,
            "fps": fps,
            "batch_size": dlc_cfg["batch_size"],
            "frame_dimensions": (ny, nx),
            "nframes": nframes,
            "iteration (active-learning)": cfg["iteration"],
            "training set fraction": trainFraction,
            "cropping": cfg['cropping'],
            "cropping_parameters": coords
            #"gpu_info": device_lib.list_local_devices()
        }
        metadata = {'data': dictionary}

        print("Saving results in %s..." %(Path(video).parents[0]))
        auxiliaryfunctions.SaveData(PredictedData[:nframes,:], metadata, dataname, pdindex, range(nframes),save_as_csv)
        return DLCscorer
    else:
        return DLCscorer
Ejemplo n.º 6
0
                df_3d.to_csv(str(output_filename + '.csv'),
                             'df_with_missing',
                             format='table',
                             mode='w')

            print("Triangulated data for video", vname)
            print("Results are saved under: ", destfolder)

    if len(video_list) > 0:
        print("All videos were analyzed...")
        print(
            "Now you can create 3D video(s) using deeplabcut.create_labeled_video_3d"
        )


'''
ToDo: speed up func. below and check only for one cam individually
PredicteData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
PredicteData[batch_num*batchsize:batch_num*batchsize+batch_ind, :] = pose[:batch_ind,:]
pdindex = pd.MultiIndex.from_product([[DLCscorer], dlc_cfg['all_joints_names'], ['x', 'y', 'likelihood']],names=['scorer', 'bodyparts', 'coords'])
auxiliaryfunctions.SaveData(PredicteData[:nframes,:], metadata, dataname, pdindex, framelist,save_as_csv)
'''


def undistort_points(config, dataframe, camera_pair, destfolder):
    cfg_3d = auxiliaryfunctions.read_config(config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(
        cfg_3d)
    ''' 
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
Ejemplo n.º 7
0
def _analyze_frame_store(
    cfg: dict,
    frame_store_path: Path,
    video_name: Optional[str],
    dlc_scorer: str,
    dlc_scorer_legacy: str,
    predictor_cls: Type[Predictor],
    multi_output_format: str,
    num_outputs: int,
    train_frac: str,
    save_as_csv: bool,
    predictor_settings: Optional[Dict[str, Any]],
) -> str:
    # Check if the data was analyzed yet...
    v_name_sanitized = (Path(video_name).resolve().stem if
                        (video_name is not None) else "unknownVideo")
    print(v_name_sanitized)
    not_analyzed, data_name, dlc_scorer = auxiliaryfunctions.CheckifNotAnalyzed(
        str(frame_store_path.parent), v_name_sanitized, dlc_scorer,
        dlc_scorer_legacy)

    if not_analyzed:
        # Read the frame store into memory:
        with frame_store_path.open("rb") as fb:
            print(f"Processing '{frame_store_path.name}'")
            start = time.time()

            # Read in the header, setup the settings.
            try:
                frame_reader = frame_store_fmt.DLCFSReader(fb)
            except ValueError:
                frame_reader = h5_frame_store_fmt.DLCH5FSReader(fb)

            (
                num_f,
                f_h,
                f_w,
                f_rate,
                stride,
                vid_h,
                vid_w,
                off_y,
                off_x,
                bp_lst,
            ) = frame_reader.get_header().to_list()

            pd_index = GetPandasHeader(bp_lst, num_outputs,
                                       multi_output_format, dlc_scorer)

            predictor_settings = GetPredictorSettings(cfg, predictor_cls,
                                                      predictor_settings)

            video_metadata = {
                "fps":
                f_rate,
                "duration":
                float(num_f) / f_rate,
                "size": (vid_h, vid_w),
                "h5-file-name":
                data_name,
                "orig-video-path":
                str(video_name) if (video_name is not None) else
                None,  # This may be None if we were unable to find the video...
                "cropping-offset":
                None if (off_x is None or off_y is None) else (off_y, off_x),
                "dotsize":
                cfg["dotsize"],
                "colormap":
                cfg["colormap"],
                "alphavalue":
                cfg["alphavalue"],
                "pcutoff":
                cfg["pcutoff"],
            }

            # Create the plugin instance...
            print(
                f"Plugin {predictor_cls.get_name()} Settings: {predictor_settings}"
            )
            predictor_inst = predictor_cls(bp_lst, num_outputs, num_f,
                                           predictor_settings, video_metadata)

            # The pose prediction final output array...
            pose_prediction_data = np.zeros(
                (num_f, 3 * len(bp_lst) * num_outputs))

            # Begin running through frames...
            p_bar = tqdm.tqdm(total=num_f)
            frames_done = 0

            while frame_reader.has_next():
                frame = frame_reader.read_frames()
                pose = predictor_inst.on_frames(frame)
                if pose is not None:
                    # If the predictor returned a pose, add it to the final data.
                    pose_prediction_data[frames_done:frames_done + pose.
                                         get_frame_count()] = pose.get_all()
                    frames_done += pose.get_frame_count()

                p_bar.update()

            p_bar.close()

            # Post-Processing Phase:
            # Phase 2: Post processing...

            # Get all of the final poses that are still held by the predictor
            post_pbar = tqdm.tqdm(total=num_f - frames_done)
            final_poses = predictor_inst.on_end(post_pbar)
            post_pbar.close()

            # Add any post-processed frames
            if final_poses is not None:
                pose_prediction_data[frames_done:frames_done +
                                     final_poses.get_frame_count(
                                     )] = final_poses.get_all()
                frames_done += final_poses.get_frame_count()

            # Check and make sure the predictor returned all frames, otherwise throw an error.
            if frames_done != num_f:
                raise ValueError(
                    f"The predictor algorithm did not return the same amount of frames as are in the frame store.\n"
                    f"Expected Amount: {num_f}, Actual Amount Returned: {frames_done}"
                )

            stop = time.time()
            frame_reader.close()

            if cfg["cropping"]:
                coords = [cfg["x1"], cfg["x2"], cfg["y1"], cfg["y2"]]
            else:
                coords = [0, vid_w, 0, vid_h]

            sub_meta = {
                "start": start,
                "stop": stop,
                "run_duration": stop - start,
                "Scorer": dlc_scorer,
                "DLC-model-config file":
                None,  # We don't have access to this, so don't even try....
                "fps": f_rate,
                "num_outputs": num_outputs,
                "batch_size": 1,
                "multi_output_format": multi_output_format,
                "frame_dimensions": (f_h * stride, f_w * stride),
                "nframes": num_f,
                "iteration (active-learning)": cfg["iteration"],
                "training set fraction": train_frac,
                "cropping": cfg["cropping"],
                "cropping_parameters": coords,
            }
            metadata = {"data": sub_meta}

            # We are Done!!! Save data and return...
            auxiliaryfunctions.SaveData(
                pose_prediction_data,
                metadata,
                data_name,
                pd_index,
                range(num_f),
                save_as_csv,
            )

    return dlc_scorer
Ejemplo n.º 8
0
def AnalyzeStream(DLCscorer, trainFraction, cfg, dlc_cfg, sess, inputs,
                  outputs, pdindex, save_as_csv, save_frames, destfolder, name,
                  baseline):
    """Sets up camera connection for pose estimation, and handles data output."""
    # Setup camera connection
    # REPLACE WITH PATH TO YOUR SENTECH CAMERA SDK!
    sdk_location = r"C:\Users\TM_Lab\Desktop\Greg_desktop\StCamUSBPack_EN_190207\3_SDK\StandardSDK(v3.14)"
    system = SentechSystem(sdk_location)
    cam = system.get_camera(0)
    print("Camera connected! The camera model is " +
          str(cam.model.decode("utf-8")))

    print("Starting to analyze stream")
    # Accept a single connection and make a file-like object out of it
    cap = cam
    dataname = os.path.join(destfolder, DLCscorer + '_' + name + '.h5')
    dataname_led = os.path.join(destfolder, DLCscorer + '_' + name + '_LED.h5')
    led_data_cols = [
        'FrameTime', 'MovementDiffLeft', 'MovementDiffRight', 'ThresholdTime',
        'Delay', 'FlashTime', 'WaterTime'
    ]
    size = (int(cap.image_shape[0]), int(cap.image_shape[1]))
    # size = (int(cap.get(3)), int(cap.get(4)))
    w, h = size
    shutter = 1 / 500
    brightness = 68
    v_blanking = 982
    acc_tolerance = 0.20
    missing_count = 0
    nframes = 0

    print("Starting to extract posture")
    start = time.time()
    nframes = GetPoseS(cfg, dlc_cfg, sess, inputs, outputs, cap, w, h, nframes,
                       save_frames, destfolder, baseline)

    # stop the timer and display FPS information
    stop = time.time()
    fps = nframes / (stop - start)
    print("\n")
    print("[INFO] elasped time: {:.2f}".format(stop - start))
    print("[INFO] approx. FPS: {:.2f}".format(fps))

    # If there's rows with blank data at the end of the trial, record this data as missing/dropped frames
    for row in PredicteData[int(np.around(fps * 10)):nframes, :]:
        if 0 in row:
            missing_count += 1

    time.sleep(10)
    avg_array = led_arr[:, 4]
    avg_delay = avg_array[avg_array != 0].mean()
    sd_delay = np.std(avg_array[avg_array != 0])
    avg_acc = PredicteData[:nframes, acc_range].mean()

    # Prints out results of trial
    print("Empty values: {}, {} per second".format(
        str(missing_count), str(missing_count / (stop - start))))
    print("Adjusted frame rate: {}".format(
        str((nframes - missing_count) / (stop - start))))
    print("Average delay: {} s".format(str(avg_delay)))
    print("Standard dev. of delay: {} s".format((str(sd_delay))))
    print("Average tracking accuracy: {}".format((str(avg_acc))))

    # Save metadata with trial information and camera information
    dictionary = {
        "name": name,
        "start": start,
        "stop": stop,
        "run_duration": stop - start,
        "Scorer": DLCscorer,
        "DLC-model-config file": dlc_cfg,
        "fps": fps,
        "fps_adjusted": ((nframes - missing_count) / (stop - start)),
        "avg_delay": avg_delay,
        "sd_delay": sd_delay,
        "v_blanking": v_blanking,
        "shutter": shutter,
        "brightness": brightness,
        "batch_size": dlc_cfg["batch_size"],
        "frame_dimensions": (h, w),
        "nframes": nframes,
        "acc_tolerance": acc_tolerance,
        "avg_acc": avg_acc,
        "iteration (active-learning)": cfg["iteration"],
        "training set fraction": trainFraction,
        "cropping": cfg['cropping'],
        "LED_time": 0.2,
        "water_time": 0.15,
        "refractory_period": 0.3
    }
    metadata = {'data': dictionary}

    print("Saving results in {} and {}".format(dataname, dataname_led))
    auxiliaryfunctions.SaveData(PredicteData[:nframes, :], metadata, dataname,
                                pdindex, range(nframes), save_as_csv)
    auxiliaryfunctions.SaveData(led_arr[:nframes, :], metadata, dataname_led,
                                led_data_cols, range(nframes), save_as_csv)
    cam.release()