示例#1
0
    def _make_tuples(self, key):
        sub = [
            f for f in fld.glob('*') if f.is_dir() and key['mouse'] in str(f)
        ][0]

        hdfs = sorted([f for f in sub.glob('*.hdf5') if 'Fiji-tag' in f.name])
        exp_names = [f.name.split('_Fiji')[0] for f in hdfs]

        exp_data = {}
        for exp in exp_names:
            h = [h for h in hdfs if exp in str(h)]
            v = [f for f in sub.glob('*.mp4') if exp in str(f)]

            if len(h) != 1 or len(v) != 1:
                continue
                # raise ValueError(f'h:{h}\nv:{v}')

            exp_data[exp] = dict(hdf=h[0], video=v[0])

        for name, data in exp_data.items():
            if '_t' in name:
                splitter = '_t'
            else:
                splitter = '_v'

            _, _, _, fps, _ = get_video_params(
                get_cap_from_file(str(data['video'])))

            try:
                f, keys, subkeys, allkeys = open_hdf(str(data['hdf']))
            except Exception as e:
                print(f'Failed to open AI file: {data["hdf"].name}:\n{e}')
                return

            roi = [k for k in keys if 'Fiji_ROI' in k][0]
            sig = f[roi][()]
            is_rec = np.zeros_like(sig)
            is_rec[sig > 0] = 1

            rec_starts = np.where(derivative(is_rec) > 0)[0]
            rec_ends = np.where(derivative(is_rec) < 0)[0]

            ekey = key.copy()
            ekey['date'] = name.split('_')[0]
            ekey['rec'] = int(name.split(splitter)[1][0])
            ekey['name'] = name
            ekey['hdf_path'] = str(data['hdf'])
            ekey['video_path'] = str(data['video'])
            ekey['video_fps'] = fps
            ekey['is_ca_rec'] = is_rec
            ekey['ca_rec_starts'] = rec_starts
            ekey['ca_rec_ends'] = rec_ends
            manual_insert_skip_duplicate(self, ekey)
示例#2
0
    def _make_tuples(self, key):
        session_fld = get_session_folder(**key)
        recs = load_yaml(metadatafile)['sessions'][key['mouse']][
            key['sess_name']]

        for n, rec in enumerate(sorted(recs)):
            # Get files
            rec_files = [f for f in os.listdir(session_fld) if rec in f]

            videos = [
                f for f in rec_files
                if f.endswith(".mp4") or f.endswith(".avi")
            ]
            if len(videos) != 1:
                if len(set([get_file_name(f) for f in videos])) == 1:
                    video = get_file_name(videos[0]) + ".mp4"
                else:
                    raise ValueError
            else:
                video = videos[0]

            temp_fld = os.path.join(self.temp_files_fld, key['mouse'])
            temp_rec_files = [f for f in os.listdir(temp_fld) if rec in f]
            ais = [fl for fl in temp_rec_files if fl == f"{rec}_Fiji-tag.hdf5"]

            if not ais:
                continue
            if len(ais) != 1: raise ValueError(f'Found ais: {ais}')
            else: ai = ais[0]

            # Open video and get number of frames
            nframes, width, height, fps, _ = get_video_params(
                get_cap_from_file(os.path.join(session_fld, video)))

            # Open AI file and get number of samples
            f, keys, subkeys, allkeys = open_hdf(os.path.join(session_fld, ai))
            n_samples = len(f['AI']['0'][()])

            rkey = key.copy()
            rkey['rec_name'] = rec
            rkey['rec_n'] = n
            rkey['videofile'] = video
            rkey['aifile'] = ai
            rkey['n_frames'] = nframes
            rkey['fps_behav'] = fps
            rkey['n_samples'] = n_samples

            manual_insert_skip_duplicate(self, rkey)
示例#3
0
    def make_behav_class_clip(self, args):
        behav_label, labels, savepath = args

        cprint(f"Saving example clip for beahviour class {behav_label}",
               "green",
               attrs=['bold'])

        # Get video file
        recdata = (Recording & f"rec_name='{self.rec}'").fetch(as_dict=True)[0]
        videopath = os.path.join(main_data_folder, recdata['mouse'],
                                 recdata['sess_name'], recdata['videofile'])

        # Prep CV2 writer
        cap = get_cap_from_file(videopath)
        nframes, width, height, fps, iscolor = get_video_params(cap)
        writer = open_cvwriter(savepath,
                               w=width,
                               h=height,
                               framerate=fps,
                               iscolor=True)

        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (50, 50)
        fontScale = 1
        lineType = 2
        text_color = (30, 220, 30)

        # Make frames
        for i in range(30000):
            ret, frame = cap.read()
            if not ret: break

            if labels[i] != behav_label: continue
            else:
                cv2.putText(frame, f'Behaviour label {behav_label}',
                            bottomLeftCornerOfText, font, fontScale,
                            text_color, lineType)
                writer.write(frame)
        writer.release()
        cprint(
            f"    finished saving example clip for beahviour class {behav_label}",
            "green",
            attrs=['bold'])
示例#4
0
def get_background_from_video(videopath, start_frame=0, avg_over=10):
    """
        Extracts background by averaging across video frames

        :param videopath: str, path to video to analyse
        :param start_frame: int, frame to start at 
        :param avg_over: int, a frame every N is used for analysis to speed things up
    """

    check_file_exists(videopath, raise_error=True)

    # Open video and get params
    cap = get_cap_from_file(videopath)
    nframes, width, height, fps = get_video_params(cap)

    # Start at selected frame
    cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)

    # Get background by averaging every N frames
    background = np.zeros((height, width))
    every_other = int(num_frames / avg_over)
    j = 0

    for i in tqdm(range(num_frames)):

        if i % every_other == 0:
            vid.set(cv2.CAP_PROP_POS_FRAMES, i)
            ret, frame = vid.read()  # get the frame

            if ret:
                # store the current frame in as a numpy array
                background += frame[:, :, 0]
                j += 1
            else:
                break

    background = (background / (j)).astype(np.uint8)
    cap.release()

    return background
import sys
sys.path.append("./")
from forceplate_config import Config
from fcutils.video.utils import manual_video_inspect

from fcutils.video.utils import get_video_params, get_cap_from_file

# ? To manually inspect a video frame by frame:
# 1) Specify the path to the video you want to analyse
# 2) Run this script


class Inspector(
        Config, ):
    def __init__(self, video_to_inspect):
        Config.__init__(self)

        manual_video_inspect(video_to_inspect)


if __name__ == "__main__":
    videofile = "D:\\Egzona\\Forceplate\\2021\\250121_DREADDs_EXP\\M_1L_cam0.avi"  # * <--- path to the video to analyse

    nframes, width, height, fps, is_color = get_video_params(
        get_cap_from_file(videofile))
    print(
        f'Video has: {nframes} (wxh: {width} x {height}) at {round(fps, 2)}fps'
    )
    inspector = Inspector(videofile)
示例#6
0
def make_trials_videos(session_video,
                       stimuli,
                       save_folder=None,
                       overwrite=False,
                       n_sec_pre=5,
                       n_sec_pos=15,
                       stim_duration_sec=9):
    """
        Creates a video with the trials for one session. 
        Id adds some text to say which trial it is and a circle to signal when the stimulus is on.

        :param  session_video: str, path to video to take the frames from
        :param stimuli: list or 1d numpy array with stimuli onset times (in number of frames from start of vid)
        :param save_folder: str, optional. Path to folder where the video will be saved
        :param n_sec_pre: number of seconds before each stimulus to keep in each trial's clip
        :param n_sec_post: number of seconds after each stimulus to keep in each trial's clip
        :param stim_duration_sec: duration of the stimulus in seconds.

    """
    if save_folder is None:
        save_folder = os.path.split(session_video)[0]
    videoname = get_file_name(session_video)
    save_path = os.path.join(save_folder, videoname + '_trials.mp4')

    if not overwrite and os.path.isfile(save_path):
        print(f'Video exists alerady at {save_path}')
        return

    # Open video
    videocap = get_cap_from_file(session_video)
    nframes, width, height, fps, _ = get_video_params(videocap)
    writer = open_cvwriter(save_path,
                           w=width,
                           h=height,
                           framerate=fps,
                           iscolor=True)

    # Prep some vars
    n_frames_pre = n_sec_pre * fps
    n_frames_pos = n_sec_pos * fps

    # Vars for text
    font = cv2.FONT_HERSHEY_SIMPLEX
    bottomLeftCornerOfText = (50, 50)
    fontScale = 1
    lineType = 2
    text_color = (30, 220, 30)

    # Vars for circle that signals when the stim is on
    circle_pos = (700, 75)
    circle_radius = 50
    circle_color = (0, 255, 0)

    # Loop over stimuli
    for stimn, stim in enumerate(stimuli):
        for framen in np.arange(stim - n_frames_pre, stim + n_frames_pos):
            frame = get_cap_selected_frame(videocap, int(framen))

            if frame is None: break

            if framen >= stim and framen <= stim + stim_duration_sec * fps:
                cv2.circle(frame, circle_pos, circle_radius, circle_color, -1)

            cv2.putText(frame, f'Stim {stimn} of {len(stimuli)-1}',
                        bottomLeftCornerOfText, font, fontScale, text_color,
                        lineType)

            writer.write(frame.astype(np.uint8))
    writer.release()