예제 #1
0
def walk_current_folder(root_path, mars_opts):
    trials_to_run = []
    fullpaths = []
    for path, subdirs, filenames in os.walk(root_path):
        for numv, fname in enumerate(filenames):

            # start by looping over all movie files that have "Top" in their name
            cond1 = all(x not in fname
                        for x in ['.seq', '.avi', '.mpg', '.mp4'])
            cond2 = 'skipped' in path
            cond3 = 'Top' not in fname and '_t.seq' not in fname
            if cond1 | cond2 | cond3:
                continue

            if 'Top' in fname or '_t.seq' in fname:
                front_fname, top_fname, mouse_name = mof.get_names(fname)
                print(front_fname + ' ' + top_fname + ' ' + mouse_name)
                if top_fname != fname: continue
                fullpath_to_front = os.path.join(path, front_fname)
                fullpath_to_top = os.path.join(path, top_fname)
                cond1 = not (os.path.exists(fullpath_to_front))
                cond2 = mars_opts['doFront']
                cond3 = not mars_opts['doTop']
                cond4 = not mars_opts['doToppcf']
                # If the front video doesn't exist and we need it, continue.
                if (cond1 and cond2):  # |(cond3 and cond4):
                    continue
            elif 'Front' in fname:
                front_fname = fname
                front_fname, top_fname, mouse_name = mof.get_names(front_fname)
                # if front_fname != fname: continue
                fullpath_to_top = os.path.join(path, top_fname)
                fullpath_to_front = os.path.join(path, front_fname)
                cond1 = not (os.path.exists(fullpath_to_top))
                cond2 = mars_opts['doTop']
                cond3 = not mars_opts['doFront']
                cond4 = not mars_opts['doToppcf']

                if (cond1 and cond2 and cond4) | cond3:
                    continue
            else:
                # This is a movie file, but doesnt have "Top" or "Front" in it. Let's skip it.
                continue

            # Save the paths we want to use.
            mouse_trial = dict()
            mouse_trial['top'] = fullpath_to_top
            mouse_trial['front'] = fullpath_to_front
            trials_to_run.append(mouse_trial)
            fullpaths.append(fullpath_to_top)

    return trials_to_run, fullpaths
예제 #2
0
def extract_pose_wrapper(video_fullpath,
                         view,
                         doOverwrite,
                         progress_bar_signal='',
                         verbose=0,
                         output_suffix='',
                         mars_opts={},
                         max_frames=999999):
    video_path = os.path.dirname(video_fullpath)
    video_name = os.path.basename(video_fullpath)
    output_folder = mof.get_mouse_output_dir(
        dir_output_should_be_in=video_path,
        video_name=video_name,
        output_suffix=output_suffix)

    extract_pose(video_fullpath=video_fullpath,
                 output_folder=output_folder,
                 output_suffix=output_suffix,
                 view=view,
                 doOverwrite=doOverwrite,
                 progress_bar_signal=progress_bar_signal,
                 mars_opts=mars_opts,
                 verbose=verbose,
                 max_frames=max_frames)
    return
예제 #3
0
def rename(path, ver_old):
    ver = mof.get_version_suffix()
    ver_old = 'v' + ver_old

    for dir, subdirs, filenames in os.walk(path):

        if ver_old in dir:
            old_dir = dir
            new_dir = dir.replace(ver_old, ver)
            if not os.path.exists(new_dir): os.rename(old_dir, new_dir)
        else: new_dir = dir

        for numv, fname in enumerate(filenames):
            if ver_old in fname:
                oldf = fname
                newf = fname.replace(ver_old, ver)
                if os.path.exists(os.path.join(new_dir, oldf)):
                    os.rename(os.path.join(new_dir, oldf),
                              os.path.join(new_dir, newf))
예제 #4
0
    def __init__(self, parent=None):
        super(MainWindow, self).__init__(parent)

        #widnow setup
        resolution = QDesktopWidget().screenGeometry()
        self.screen_w = resolution.width()
        self.screen_h = resolution.height()
        self.setGeometry(0, 0, 650, 200)
        self.setWindowTitle('bento dumper' + mof.get_version_suffix())
        self.setWindowIcon(QIcon('icons/run.png'))

        #center window
        qr = self.frameGeometry()
        cp = QtGui.QDesktopWidget().availableGeometry().center()
        qr.moveCenter(cp)
        self.move(qr.topLeft())
        #adjust size
        self.resize(self.screen_w / 2, self.screen_h / 16)
        self.Menu()
        self.Layout()

        central_widget = QtGui.QWidget()
        central_widget.setLayout(self.main_layout)
        self.setCentralWidget(central_widget)
def dump_bento(video_fullpath, output_suffix='', pose_file='', basepath=''):
    if not output_suffix:
        # Default suffix is just the version number.
        output_suffix = mof.get_version_suffix()
    video_path = os.path.dirname(video_fullpath)
    video_name = os.path.basename(video_fullpath)

    # Get the output folder for this specific mouse.
    output_folder = mof.get_mouse_output_dir(
        dir_output_should_be_in=video_path,
        video_name=video_name,
        output_suffix=output_suffix)
    mouse_name = output_folder.split('/')[-1]

    # if not movie_file:
    #     movie_name = mouse_name + '_Top_J85.seq'
    #
    #     movie_location = output_folder
    #     movie_location = os.path.split(movie_location)[0]
    #     movie_location = os.path.split(movie_location)[0]
    #
    #     movie_file = os.path.join(movie_location, movie_name)
    # else:
    # movie_file = os.path.basename(movie_file)

    if not pose_file:
        pose_basename = mof.get_pose_no_ext(video_fullpath=video_fullpath,
                                            output_folder=output_folder,
                                            view='top',
                                            output_suffix=output_suffix)

        top_pose_fullpath = pose_basename + '.mat'
    # else:
    # pose_file = os.path.basename(pose_file)
    """ This function writes an xls with information for bento in it."""
    wb = xlwt.Workbook(encoding='utf-8')
    ws1 = wb.add_sheet('Sheet1', cell_overwrite_ok=True)
    ws1.write(0, 0, basepath)  # A1
    ws1.write(0, 1, 'Ca framerate:')  # B1
    ws1.write(0, 2, 0)  # C1
    ws1.write(0, 3, 'Annot framerate:')  # D1
    ws1.write(0, 4, 30)  # E1
    ws1.write(0, 5, 'Multiple trials/Ca file:')  # F1
    ws1.write(0, 6, 0)  # G1
    ws1.write(0, 7, 'Multiple trails/annot file')  # H1
    ws1.write(0, 8, 0)  # I1
    ws1.write(0, 9, 'Includes behavior movies:')  # J1
    ws1.write(0, 10, 1)  # K1
    ws1.write(
        0, 11,
        'Offset (in seconds; positive values = annot starts before Ca):')  # L1
    ws1.write(0, 12, 0)  # M1

    ws1.write(1, 0, 'Mouse')  # A2
    ws1.write(1, 1, 'Sessn')  # B2
    ws1.write(1, 2, 'Trial')  # C2
    ws1.write(1, 3, 'Stim')  # D2
    ws1.write(1, 4, 'Calcium imaging file')  # E2
    ws1.write(1, 5, 'Start Ca')  # F2
    ws1.write(1, 6, 'Stop Ca')  # G2
    ws1.write(1, 7, 'FR Ca')  # H2
    ws1.write(1, 8, 'Alignments')  # I2
    ws1.write(1, 9, 'Annotation file')  # J2
    ws1.write(1, 10, 'Start Anno')  # K2
    ws1.write(1, 11, 'Stop Anno')  # L2
    ws1.write(1, 12, 'FR Anno')  # M2
    ws1.write(1, 13, 'Offset')  # N2
    ws1.write(1, 14, 'Behavior movie')  # O2
    ws1.write(1, 15, 'Tracking')  # P2

    ws1.write(2, 0, 1)  # A2
    ws1.write(2, 1, 1)  # B2
    ws1.write(2, 2, 1)  # C2
    ws1.write(2, 3, '')  # D2
    ws1.write(2, 4, '')  # E2
    ws1.write(2, 5, '')  # F2
    ws1.write(2, 6, '')  # G2
    ws1.write(2, 7, '')  # H2
    ws1.write(2, 8, '')  # I2
    ann = [
        os.path.join(output_folder, f) for f in os.listdir(output_folder)
        if is_gt_annotation(f) | ('pred' in f)
    ]
    ann = sorted(ann)
    ann = [get_rel_path(annot_path, basepath) for annot_path in ann]
    ws1.write(2, 9, ';'.join(ann))  # J2
    ws1.write(2, 10, '')  # K2
    ws1.write(2, 11, '')  # L2
    ws1.write(2, 12, '')  # M2
    ws1.write(2, 13, '')  # N2
    ws1.write(2, 14, get_rel_path(video_fullpath, basepath))  # O2
    ws1.write(2, 15, get_rel_path(top_pose_fullpath, basepath))  # P2

    bento_name = 'bento_' + output_suffix + '.xls'
    wb.save(os.path.join(output_folder, bento_name))
    return 1
예제 #6
0
def dump_bento_across_dir(root_path):
    ''' This function makes a bento file for a specific directory.'''
    wb = xlwt.Workbook(encoding='utf-8')
    ws1 = wb.add_sheet('Sheet1', cell_overwrite_ok=True)
    ws1.write(0, 0, os.path.abspath(root_path))  # A1
    ws1.write(0, 1, 'Ca framerate:')  # B1
    ws1.write(0, 2, 0)  # C1
    ws1.write(0, 3, 'Annot framerate:')  # D1
    ws1.write(0, 4, 30)  # E1
    ws1.write(0, 5, 'Multiple trials/Ca file:')  # F1
    ws1.write(0, 6, 0)  # G1
    ws1.write(0, 7, 'Multiple trails/annot file')  # H1
    ws1.write(0, 8, 0)  # I1
    ws1.write(0, 9, 'Includes behavior movies:')  # J1
    ws1.write(0, 10, 1)  # K1
    ws1.write(
        0, 11,
        'Offset (in seconds; positive values = annot starts before Ca):')  # L1
    ws1.write(0, 12, 0)  # M1
    ws1.write(0, 13, 'Includes tracking data:')
    ws1.write(0, 14, 0)
    ws1.write(0, 15, 'Includes audio files:')
    ws1.write(0, 16, 0)

    ws1.write(1, 0, 'Mouse')  # A2
    mouse_col_num = 0
    session_col_num = 1
    trial_col_num = 2
    ws1.write(1, 1, 'Sessn')  # B2
    ws1.write(1, 2, 'Trial')  # C2
    ws1.write(1, 3, 'Stim')  # D2
    ws1.write(1, 4, 'Calcium imaging file')  # E2
    ws1.write(1, 5, 'Start Ca')  # F2
    ws1.write(1, 6, 'Stop Ca')  # G2
    ws1.write(1, 7, 'FR Ca')  # H2
    ws1.write(1, 8, 'Alignments')  # I2
    ws1.write(1, 9, 'Annotation file')  # J2
    annot_file_col_num = 9
    ws1.write(1, 10, 'Start Anno')  # K2
    ws1.write(1, 11, 'Stop Anno')  # L2
    ws1.write(1, 12, 'FR Anno')  # M2
    ws1.write(1, 13, 'Offset')  # N2
    ws1.write(1, 14, 'Behavior movie')  # O2
    behavior_movie_col_num = 14
    ws1.write(1, 15, 'Tracking')  # P2
    tracking_file_col_num = 15
    ws1.write(1, 16, 'Audio file')
    audio_file_col_num = 16
    ws1.write(1, 17, 'tSNE')

    # ws1.write(2, 0, 1)  # A2
    # ws1.write(2, 1, 1)  # B2
    # ws1.write(2, 2, 1)  # C2
    # ws1.write(2, 3, '')  # D2
    # ws1.write(2, 4, '')  # E2
    # ws1.write(2, 5, '')  # F2
    # ws1.write(2, 6, '')  # G2
    # ws1.write(2, 7, '')  # H2
    # ws1.write(2, 8, '')  # I2
    mouse_number = 0
    row_num = 2
    # Going through everything in this directory.
    audio_filenames = []
    add_audio_count = 0
    nonaudio_filenames = []
    for path, subdirs, filenames in os.walk(root_path):
        for fname in sorted(filenames):
            fname = os.path.join(path, fname)
            if fname.endswith('.wav'):
                audio_filenames.append(fname)
            else:
                nonaudio_filenames.append(fname)

        audio_filenames = sorted(audio_filenames)
        nonaudio_filenames = sorted(nonaudio_filenames)

    for fname in nonaudio_filenames:
        try:
            cond1 = '.seq' not in fname
            cond2 = 'skipped' in path

            if (cond1) | cond2:
                continue

            if 'Top' in fname:
                front_fname, top_fname, mouse_name = mof.get_names(fname)
                fullpath_to_front = os.path.join(path, front_fname)
                fullpath_to_top = os.path.join(path, top_fname)
            else:
                # This is a seq file, but doesnt have "Top" or "Front" in it. Let's skip it.
                continue

            # Add their info to the bento file at the appropriate level.

            video_fullpath = fullpath_to_top

            output_suffix = ''
            video_path = os.path.dirname(video_fullpath)
            video_name = os.path.basename(video_fullpath)

            # Get the output folder for this specific mouse.
            output_folder = mof.get_mouse_output_dir(
                dir_output_should_be_in=video_path,
                video_name=video_name,
                output_suffix=output_suffix)
            _, _, mouse_name = mof.get_names(video_name=video_name)

            pose_basename = mof.get_pose_no_ext(video_fullpath=video_fullpath,
                                                output_folder=output_folder,
                                                view='top',
                                                output_suffix=output_suffix)

            top_pose_fullpath = pose_basename + '.json'

            same_path_ann = [
                os.path.join(root_path, f) for f in os.listdir(root_path)
                if is_annotation_file(f, mouse_name)
            ]

            ann = [
                os.path.join(output_folder, f)
                for f in os.listdir(output_folder)
                if is_annotation_file(f, mouse_name)
            ]

            ann = sorted(ann)
            ann = [get_normrel_path(f, root_path) for f in ann]

            pose_cond = os.path.exists(top_pose_fullpath)
            video_cond = os.path.exists(video_fullpath)

            should_write = (pose_cond and video_cond)

            if should_write:
                old_mouse_number = mouse_number
                mouse_number = get_mouse_number(video_fullpath)

                mouse_cond = (old_mouse_number == mouse_number)
                # TODO: Session condition
                sess_cond = (True)

                if mouse_cond and sess_cond:
                    trial_count += 1
                else:
                    trial_count = 1

                ws1.write(row_num, mouse_col_num, mouse_number)  # A2
                ws1.write(row_num, session_col_num, 1)  # B2
                ws1.write(row_num, trial_col_num, trial_count)  # C2

                ws1.write(row_num, annot_file_col_num, ';'.join(ann))  # J2
                ws1.write(row_num, 10, '')  # K2
                ws1.write(row_num, 11, '')  # L2
                ws1.write(row_num, 12, '')  # M2
                ws1.write(row_num, 13, '')  # N2

                track_file = get_normrel_path(top_pose_fullpath, root_path)

                ws1.write(row_num, behavior_movie_col_num,
                          get_normrel_path(fullpath_to_top, root_path))  # O2
                ws1.write(row_num, tracking_file_col_num, track_file)  # P2
                row_num += 1
        except Exception as e:
            print(e)
            error_msg = 'ERROR: ' + fname + ' has failed. ' + str(e)

            continue
            # End of try-except block
            # End of particular fname
            # End of the particular root_path

    last_row = row_num
    row_num = 2
    for audio_file_count, audio_file in enumerate(audio_filenames):
        # Write the files in order.
        ws1.write(row_num + audio_file_count, audio_file_col_num + 2,
                  get_normrel_path(audio_file, root_path))

    bento_name = 'bento_' + mof.get_version_suffix() + '.xls'
    wb.save(os.path.join(root_path, bento_name))
    return
예제 #7
0
def create_video_results_wrapper(top_video_fullpath,classifier_path,
                                 progress_bar_signal,
                                 view='top',
                                 doOverwrite=0,output_suffix=''):

    try:
        video_fullpath = top_video_fullpath
        video_path = os.path.dirname(video_fullpath)
        video_name = os.path.basename(video_fullpath)

        model_type = mof.get_clf_type(classifier_path=classifier_path)


        # Get the output folder for this specific mouse.
        output_folder = mof.get_mouse_output_dir(dir_output_should_be_in=video_path, video_name=video_name,
                                                 output_suffix=output_suffix)

        # Get the pose and feature files' names.
        pose_basename = mof.get_pose_no_ext(video_fullpath=top_video_fullpath,
                                            output_folder=output_folder,
                                            view='top',
                                            output_suffix=output_suffix)
        top_pose_fullpath = pose_basename + '.json'

        # Get the name of the text file we're going to save to.
        classifier_savename = mof.get_classifier_savename(video_fullpath=top_video_fullpath,
                                                          output_folder=output_folder,
                                                          view=view,
                                                          classifier_path=classifier_path,
                                                          output_suffix=output_suffix,model_type=model_type)

        # print(classifier_savename)
        predictions_exists=os.path.exists(classifier_savename)
        top_pose_exist = os.path.exists(top_pose_fullpath)
        video_savename = classifier_savename[:-3]+'mp4'
        video_exists=os.path.exists(video_savename)
        # print(video_savename)
        if not top_pose_exist:
            raise ValueError("No pose has been extracted for this video!")
        if not predictions_exists:
            raise ValueError("No behavior classified for this video!")
        if (not video_exists) | doOverwrite:
            pool = mp.Pool(1)
            manager = mp.Manager()
            queue_for_progress_bar = manager.Queue(20)
            result = pool.apply_async(create_mp4_prediction, (video_fullpath,top_pose_fullpath,classifier_savename,video_savename, queue_for_progress_bar))
            still_frames_left = True
            while still_frames_left:
                progress_bar_input = queue_for_progress_bar.get()
                if progress_bar_input:
                    still_frames_left = True
                    progress_bar_signal.emit(progress_bar_input[0], progress_bar_input[1])
                else:
                    still_frames_left = False
                    break

            pool.close()
            pool.join()
            result.get()
        else:
            print("4 - Video already exists")
            return

    except Exception as e:
        print(e)
        raise (e)
    return
예제 #8
0
def extract_pose(video_fullpath,
                 output_folder,
                 output_suffix,
                 view,
                 doOverwrite,
                 progress_bar_signal,
                 mars_opts,
                 verbose=1,
                 max_frames=999999):

    pose_basename = mof.get_pose_no_ext(video_fullpath=video_fullpath,
                                        output_folder=output_folder,
                                        view=view,
                                        output_suffix=output_suffix)
    video_name = os.path.basename(video_fullpath)

    pose_mat_name = pose_basename + '.mat'

    # Makes the output directory, if it doesn't exist.
    mof.getdir(output_folder)

    _, ext = os.path.splitext(video_fullpath)
    ext = ext[1:]  # get rid of the dot.

    already_extracted_msg = (
        '1 - Pose already extracted. Change your settings to override, if you still want to extract the pose.'
    )

    if not (ext in video_name):
        print("File type unsupported! Aborted.")
        return

    try:
        # coremltools on MACs doesn't interact well with multiprocessing, but access to the system GPU, even if it's not
        # from NVidia, more than makes up for sequential processing.  coremltools only works on MacOS Catalina and up.
        major, minor, _ = get_macOS_version_info()
        use_multiprocessing = not (major == 10 and minor >= 15)

        if verbose:
            print('1 - Extracting pose')

        if (not os.path.exists(pose_mat_name)) | (doOverwrite):

            if not (view == 'front') and not (view == 'top'):
                raise ValueError(
                    'Invalid view type, please specify top or front.')
                return

            if verbose:
                print('    creating the movie reader...')
            reader = vidReader(video_fullpath)
            NUM_FRAMES = reader.NUM_FRAMES
            IM_H = reader.IM_H
            IM_W = reader.IM_W
            fps = reader.fps
            medianFrame = []
            if mars_opts['bgSubtract']:
                if verbose:
                    print('    calculating background...')
                medianFrame = get_median_frame(vc, 'cv2')

            NUM_FRAMES = min(NUM_FRAMES, max_frames)

            # unpack user-provided bounding boxes if they exist:
            bboxes = [None] * NUM_FRAMES
            if mars_opts['useExistingBBoxes']:
                print('   Unpacking user-provided bounding boxes...')
                bboxes = unpack_bbox_wrapper(mars_opts, video_fullpath, IM_W,
                                             IM_H, NUM_FRAMES)

            if verbose:
                print('   Processing video for detection and pose ...')
            DET_IM_SIZE = 299
            POSE_IM_SIZE = 256

            if use_multiprocessing:
                if verbose:
                    print("      Creating pool...")

                workers_to_use = 8
                pool = mp.Pool(workers_to_use)
                manager = mp.Manager()
                maxsize = 5

                if verbose:
                    print(
                        "      Pool created with %d workers. \n      Creating queues"
                        % workers_to_use)

                # create managed queues
                q_start_to_predet = manager.Queue(maxsize)
                q_predet_to_det = manager.Queue(maxsize)
                q_predet_to_prehm = manager.Queue(maxsize)
                q_det_to_postdet = manager.Queue(maxsize)
                q_postdet_to_prehm = manager.Queue(maxsize)
                q_prehm_to_hm_IMG = manager.Queue(maxsize)
                q_prehm_to_posthm_BBOX = manager.Queue(maxsize)
                q_hm_to_posthm_HM = manager.Queue(maxsize)
                q_posthm_to_end = manager.Queue(maxsize)

                if verbose:
                    print("      Queues created. \n      Linking pools")

                try:

                    results_predet = pool.apply_async(
                        pre_det, (q_start_to_predet, q_predet_to_det,
                                  q_predet_to_prehm, medianFrame, IM_H, IM_W))
                    results_det = pool.apply_async(
                        run_det,
                        (q_predet_to_det, q_det_to_postdet, view, mars_opts))
                    results_postdet = pool.apply_async(
                        post_det, (q_det_to_postdet, q_postdet_to_prehm))
                    results_prehm = pool.apply_async(
                        pre_hm, (q_postdet_to_prehm, q_predet_to_prehm,
                                 q_prehm_to_hm_IMG, q_prehm_to_posthm_BBOX,
                                 IM_W, IM_H))
                    results_hm = pool.apply_async(
                        run_hm, (q_prehm_to_hm_IMG, q_hm_to_posthm_HM, view,
                                 mars_opts))
                    results_posthm = pool.apply_async(
                        post_hm,
                        (q_hm_to_posthm_HM, q_prehm_to_posthm_BBOX, IM_W, IM_H,
                         POSE_IM_SIZE, NUM_FRAMES, pose_basename))
                except Exception as e:
                    print("Error starting Pools:")
                    print(e)
                    raise (e)

                if verbose:
                    print('      Pools linked.\n      Feeding data...')
                if progress_bar_signal:
                    # Update the progress bar with the number of total frames it will be processing.
                    progress_bar_signal.emit(0, NUM_FRAMES)

                for f in range(NUM_FRAMES):
                    img = reader.getFrame(f)
                    q_start_to_predet.put([img, bboxes[f]])

                # Push through the poison pill.
                q_start_to_predet.put(get_poison_pill())

                if verbose:
                    print("      Pools Started...")
                pool.close()
                pool.join()

                if verbose:
                    print("      Pools Finished. \n      Saving...")
                top_pose_frames = results_posthm.get()

            else:  # don't use multiprocessing, but process frames in batches
                time_steps = True

                if progress_bar_signal:
                    # Update the progress bar with the number of total frames it will be processing.
                    progress_bar_signal.emit(0, NUM_FRAMES)

                if time_steps:
                    process_time = [0.] * 10
                    process_time_start = time.perf_counter()

                det_black, det_white = run_det_setup(view, mars_opts)
                det_prev_ok_loc, det_prev_ok_conf = post_det_setup()
                pose_model = run_hm_setup(view, mars_opts)
                top_pose_frames, bar = post_hm_setup(NUM_FRAMES)
                current_frame_num = 0

                if time_steps:
                    process_time[0] += time.perf_counter() - process_time_start

                BATCH_SIZE = 16
                in_q = [None] * BATCH_SIZE
                det_b_q = [None] * BATCH_SIZE
                det_w_q = [None] * BATCH_SIZE
                pose_image_q = [None] * BATCH_SIZE
                # """

                # """
                for batch in range(
                    (NUM_FRAMES + BATCH_SIZE - 1) // BATCH_SIZE):
                    batch_start = batch * BATCH_SIZE
                    batch_end = min(batch_start + BATCH_SIZE, NUM_FRAMES)

                    if time_steps:
                        process_time_start = time.perf_counter()

                    for f in range(batch_start, batch_end):
                        if time_steps:
                            process_time_start_0 = time.perf_counter()

                        ix = f - batch_start
                        img = reader.getFrame(f)

                        if time_steps:
                            process_time_1a = time.perf_counter()
                            process_time[
                                1] += process_time_1a - process_time_start_0

                        in_q[ix], pose_image_q[ix] = pre_det_inner(
                            [img, bboxes[f]], medianFrame, IM_H, IM_W)

                        if time_steps:
                            process_time_1 = time.perf_counter()
                            process_time[9] += process_time_1 - process_time_1a

                    for ix in range(batch_end - batch_start):
                        det_b_q[ix] = run_det_inner(in_q[ix], det_black,
                                                    mars_opts)

                    if time_steps:
                        process_time_2 = time.perf_counter()
                        process_time[2] += process_time_2 - process_time_1

                    for ix in range(batch_end - batch_start):
                        det_w_q[ix] = run_det_inner(in_q[ix], det_white,
                                                    mars_opts)

                    if time_steps:
                        process_time_3 = time.perf_counter()
                        process_time[3] += process_time_3 - process_time_2

                    for ix in range(batch_end - batch_start):
                        if time_steps:
                            process_time_start_1 = time.perf_counter()

                        det_out = post_det_inner([det_b_q[ix], det_w_q[ix]],
                                                 det_prev_ok_loc,
                                                 det_prev_ok_conf)

                        if time_steps:
                            process_time_4 = time.perf_counter()
                            process_time[
                                4] += process_time_4 - process_time_start_1

                        prepped_images, bboxes_confs = pre_hm_inner(
                            det_out, pose_image_q[ix], IM_W, IM_H)

                        if time_steps:
                            process_time_5 = time.perf_counter()
                            process_time[5] += process_time_5 - process_time_4

                        predicted_heatmaps = run_hm_inner(
                            prepped_images, pose_model)

                        if time_steps:
                            process_time_6 = time.perf_counter()
                            process_time[6] += process_time_6 - process_time_5

                        post_hm_inner(predicted_heatmaps, bboxes_confs, IM_W,
                                      IM_H, POSE_IM_SIZE, NUM_FRAMES,
                                      pose_basename, top_pose_frames, bar,
                                      current_frame_num)

                        if time_steps:
                            process_time_7 = time.perf_counter()
                            process_time[7] += process_time_7 - process_time_6

                        # Increment the frame_number.
                        current_frame_num += 1

                    if time_steps:
                        process_time[8] += process_time_7 - process_time_start

                    if progress_bar_signal:
                        progress_bar_signal.emit(f, 0)

                if time_steps:
                    NS_PER_SECOND = 1000000000
                    print("Process Times")
                    print("-----------------------------")
                    print(f"Setup             : {process_time[0]} sec\n")
                    print(
                        f"File Read         : {process_time[1] / NUM_FRAMES} sec / frame"
                    )
                    print(
                        f"Pre Detection     : {process_time[9] / NUM_FRAMES} sec / frame"
                    )
                    print(
                        f"Detection (black) : {process_time[2] / NUM_FRAMES} sec / frame"
                    )
                    print(
                        f"Detection (white) : {process_time[3] / NUM_FRAMES} sec / frame"
                    )
                    print(
                        f"Post Detection    : {process_time[4] / NUM_FRAMES} sec / frame"
                    )
                    print(
                        f"Pre Heatmap       : {process_time[5] / NUM_FRAMES} sec / frame"
                    )
                    print(
                        f"Heatmap (pose)    : {process_time[6] / NUM_FRAMES} sec / frame"
                    )
                    print(
                        f"Post Heatmap      : {process_time[7] / NUM_FRAMES} sec / frame"
                    )
                    print(
                        f"Total processing  : {process_time[8] / NUM_FRAMES} sec / frame"
                    )

            top_pose_frames['keypoints'] = np.array(
                top_pose_frames['keypoints'])
            top_pose_frames['scores'] = np.array(top_pose_frames['scores'])

            top_pose_frames['bbox'] = np.array(top_pose_frames['bbox'])
            top_pose_frames['bscores'] = np.array(top_pose_frames['bscores'])

            sp.savemat(pose_mat_name, top_pose_frames)

            if verbose:
                print("Saved.\nPose Extracted")
            reader.close()
            return
        else:
            if verbose:
                print(already_extracted_msg)
            return
    except Exception as e:
        print(e)
        raise (e)
    return
def classify_actions_wrapper(top_video_fullpath,
                             front_video_fullpath,
                             doOverwrite,
                             view,
                             classifier_path='',
                             output_suffix=''):
    try:
        video_fullpath = top_video_fullpath
        video_path = os.path.dirname(video_fullpath)
        video_name = os.path.basename(video_fullpath)

        model_type = mof.get_clf_type(classifier_path=classifier_path)

        # Get the output folder for this specific mouse.
        output_folder = mof.get_mouse_output_dir(
            dir_output_should_be_in=video_path,
            video_name=video_name,
            output_suffix=output_suffix)

        # Get the name of the features you should be loading.
        front_feat_basename = mof.get_feat_no_ext(
            video_fullpath=top_video_fullpath,
            output_folder=output_folder,
            view='front',
            output_suffix=output_suffix)

        top_feat_basename = mof.get_feat_no_ext(
            video_fullpath=top_video_fullpath,
            output_folder=output_folder,
            view='top',
            output_suffix=output_suffix)

        # Get the name of the text file we're going to save to.
        classifier_savename = mof.get_classifier_savename(
            video_fullpath=top_video_fullpath,
            output_folder=output_folder,
            view=view,
            classifier_path=classifier_path,
            output_suffix=output_suffix,
            model_type=model_type)

        # Make their matfile names.
        if 'pcf' in classifier_path:
            # more hiding pcf features shenanigans, hopefully the last time?
            # top_feat_basename = top_feat_basename[:-4] + 'pcf_'+top_feat_basename[-4:]
            front_feat_name = front_feat_basename + '_wnd.npz'
            top_feat_name = top_feat_basename + '_wnd.npz'
        else:
            front_feat_name = front_feat_basename + '_wnd.npz'
            top_feat_name = top_feat_basename + '_wnd.npz'

        # Check that features exist.
        top_feats_exist = os.path.exists(top_feat_name)
        front_feats_exist = os.path.exists(front_feat_name)
        classifier_savename_exist = os.path.exists(classifier_savename)
        # if False:
        if (not classifier_savename_exist) | doOverwrite:

            # If the proper features don't exist, raise an exception. Otherwise, load them.
            if (view == 'top') or (view == 'toppcf'):
                if top_feats_exist:
                    print("loading top features")
                    features = mcm.load_features_from_filename(
                        top_feat_name=top_feat_name)
                else:
                    print(
                        "Top features don't exist in the proper format/location. Aborting..."
                    )
                    raise ValueError(
                        top_feat_name.split('/')[-1] + " doesn't exist.")

            elif view == 'topfront':
                if not top_feats_exist:
                    print(
                        "Top features don't exist in the proper format/location. Aborting..."
                    )
                    raise ValueError(
                        top_feat_name.split('/')[-1] + " doesn't exist.")
                elif not front_feats_exist:
                    print(
                        "Front features don't exist in the proper format/location. Aborting..."
                    )
                    raise ValueError(
                        front_feat_name.split('/')[-1] + " doesn't exist.")
                else:  # Both featuresets exist.
                    print("loading top and front features")
                    features = mcm.load_features_from_filename(
                        top_feat_name=top_feat_name,
                        front_feat_name=front_feat_name)
            else:
                print("Classifier available for top or top and front view")
                raise ValueError('Classifier not available for only fron view')

            # Classify the actions (get the labels back).
            print("predicting labels")
            predicted_labels, predicted_labels_interaction = mcm.predict_labels(
                features, classifier_path)

            # Dump the labels into the Caltech Behavior Annotator format.
            mcm.dump_labels_CBA(predicted_labels, predicted_labels_interaction,
                                classifier_savename)
        else:
            print("3 - Predictions already exist")
            return

    except Exception as e:
        print(e)
        raise (e)
    return