예제 #1
0
    def _make_tuples(self, key):
        fld = self.main_fld / key["mouse"]
        tracking_file = [f for f in fld.glob('*.h5') if key['name'] in f.name]
        if len(tracking_file) != 1:
            raise ValueError(f'found too many or too few: {tracking_file}')

        # Insert entry into main class
        self.insert1(key)

        # Load and clean tracking data
        bp_tracking = prepare_tracking_data(str(tracking_file[0]),
                                            median_filter=True,
                                            likelihood_th=0.9,
                                            fisheye=False,
                                            common_coord=False,
                                            compute=True)

        # Insert into the bodyparts tracking
        for bp, tracking in bp_tracking.items():
            bp_key = key.copy()
            bp_key['bp'] = bp

            bp_key['x'] = self.smooth_scale(tracking.x.values, is_speed=False)
            bp_key['y'] = self.smooth_scale(tracking.y.values, is_speed=False)
            bp_key['speed'] = self.smooth_scale(tracking.speed.values,
                                                is_speed=True)
            bp_key['dir_of_mvmt'] = rolling_mean(
                tracking.direction_of_movement.values, 3)
            bp_key['angular_velocity'] = self.smooth_scale(
                tracking.angular_velocity.values, is_deg=True)

            self.BodyPartTracking.insert1(bp_key)
예제 #2
0
    def preprocess_rec(self, rec):
        fps = (Recording & f"rec_name='{rec}'").fetch1("fps_behav")

        # Fetch data
        bp_tracking = pd.DataFrame(
            (Trackings * Trackings.BodyPartTracking & f"rec_name='{rec}'"
             & "bp!='left_ear'" & "bp!='right_ear'").fetch())
        bs_tracking = pd.DataFrame((Trackings * Trackings.BodySegmentTracking
                                    & f"rec_name='{rec}'").fetch())

        snout = bp_tracking.loc[bp_tracking.bp == "snout"].iloc[0]
        body = bp_tracking.loc[bp_tracking.bp == "body"].iloc[0]
        neck = bp_tracking.loc[bp_tracking.bp == "neck"].iloc[0]
        tail_base = bp_tracking.loc[bp_tracking.bp == "tail_base"].iloc[0]

        whole_body_segment = bs_tracking.loc[(bs_tracking.bp1 == "snout") & (
            bs_tracking.bp2 == "tail_base")].iloc[0]
        head_segment = bs_tracking.loc[(bs_tracking.bp1 == "snout")
                                       & (bs_tracking.bp2 == "neck")].iloc[0]
        upper_torso = bs_tracking.loc[(bs_tracking.bp1 == "neck")
                                      & (bs_tracking.bp2 == "body")].iloc[0]
        lower_torso = bs_tracking.loc[(bs_tracking.bp1 == "body") &
                                      (bs_tracking.bp2 == "tail_base")].iloc[0]

        # Create features from tracking
        rec_features = []
        rec_features.append(upper_torso.bone_length + lower_torso.bone_length +
                            head_segment.bone_length)
        rec_features.append(snout.speed)
        rec_features.append(tail_base.speed)
        rec_features.append(body.angular_velocity)
        rec_features.append(upper_torso.angular_velocity)
        rec_features.append(head_segment.orientation - lower_torso.orientation)

        rec_features = np.vstack(rec_features).T

        # remove nans
        rec_features = pd.DataFrame(rec_features).interpolate().values

        # smooth data with a ~60ms window (2 frames at 30fps)
        window_size = int(np.ceil(60 / (1000 / fps)))
        smoothed = np.zeros_like(rec_features)
        for i in range(rec_features.shape[1]):
            smoothed[:, i] = rolling_mean(rec_features[:, i], 2)

        # Downsample to 10 fps
        n_samples = smoothed.shape[0]
        n_samples_at_10_fps = int(np.ceil((n_samples / fps) * 10))
        resampled = resample(smoothed, n_samples_at_10_fps)

        # Visualise features
        # self.plot_features(resampled)

        return resampled
예제 #3
0
    def smooth_scale(self, data, is_speed=False, is_deg=False):
        # Smooth with rolling mean
        data = rolling_mean(data, 3)

        if not is_deg:
            if is_speed:
                # px/frame -> cm/s
                data = (data / 13.9) * 30  # x / px per cm * fps
            else:
                # px -> cm
                data = data / 13.9
        else:
            # deg/frame -> deg/s
            data = data * 30
        return data
예제 #4
0
def get_onset(sig):
    '''
        Gets onset by looiking at last
        time the signal ramped up before a max
    '''
    smoothed = rolling_mean(sig, 20)
    atmax = np.argmax(smoothed)

    deriv = derivative(smoothed)
    onset = None
    for th in (0.001, 0.01, 0.1):
        try:
            onset = np.where(np.abs(deriv[:atmax]) <= 0.001)[0][-1]
        except IndexError:
            pass
        else:
            break

    return onset
예제 #5
0
def dff(sig):
    th = np.nanpercentile(sig[:n_frames_pre], .3)
    return rolling_mean((sig - th) / th, 3)
예제 #6
0
def dff(sig):
    th = np.nanpercentile(sig[:(3 * 30)], 30)
    return rolling_mean((sig - th) / th, 3)
예제 #7
0
def run():

    for fpath in (frames_file, calibration_file):
        if not Path(fpath).exists():
            raise FileNotFoundError(f'Could not find file: {fpath}')

    # load frames data and match experiments to subfolders
    frames_data = pd.read_csv(frames_file)

    # ----------------------------------- Data  ---------------------------------- #
    def clean(string):
        return string.split('_M')[0].split('_F')[0]

    frames_data['subfolder'] = frames_data.Video.apply(clean)

    # check that all experiments sufolers are found
    subfolds_names = [fld.name for fld in sub_flds]
    if not np.all(frames_data.subfolder.isin(subfolds_names)):
        errors = frames_data.loc[~frames_data.subfolder.isin(subfolds_names)]
        raise ValueError(
            f'At least one subfolder from the frames spreadsheet was not found in the subfolders of {main_fld}:\n{errors}'
        )

    # filter data by condition
    frames_data = frames_data.loc[frames_data.Condition.isin(CONDITIONS)]
    logger.debug(f'Got {len(frames_data)} trials data:\n{frames_data.head()}')

    # load calibration data
    calibration_data = load_csv_file(calibration_file)

    logger.debug('All checks passed and all files found')
    # ---------------------------------------------------------------------------- #
    #                                 PROCESS DATA                                 #
    # ---------------------------------------------------------------------------- #

    # Load data for each video
    data = {
        "name": [],
        "fr": [],
        "fl": [],
        "hr": [],
        "hl": [],
        "CoG": [],
        "centered_CoG": [],
        "start": [],
        "end": [],
        'condition': [],
        'fps': []
    }
    for i, trial in track(frames_data.iterrows(), total=len(frames_data)):
        keep = True  # to be changed if trial is BAD
        # --------------------------- Fetch files for trial -------------------------- #=
        csv_file, video_files = parse_folder_files(main_fld / trial.subfolder,
                                                   trial.Video)
        logger.info(f'Found csv file: {csv_file}')

        # Load and trim sensors data
        sensors_data = load_csv_file(csv_file)

        sensors_data = {
            ch: rolling_mean(sensors_data[ch], 60)
            for ch in sensors
        }

        # debug plots: RAW data
        # if DEBUG:
        #     f, ax = plt.subplots(figsize=(16, 9))
        #     ax.set(title=trial.Video)

        #     colors = 'rgbm'
        #     for sens, col in zip(sensors, colors):
        #         ax.plot(sensors_data[sens], label=sens, color=col)
        #     ax.axvline(trial.Start, lw=3, color='k', label='start')

        #     baselines = dict(
        #         fr = trial.baselineFR,
        #         fl = trial.baselineFL,
        #         hr = trial.baselineHR,
        #         hl = trial.baselineHL,
        #     )
        #     for col, (ch, bl) in zip(colors, baselines.items()):
        #         ax.axhline(bl, label=f'Sensors {ch}', color=col)

        #     ax.legend()
        #     plt.show()

        # Get baselined and calibrated sensors data
        if calibrate_sensors:
            sensors_data = calibrate_sensors_data(
                sensors_data,
                sensors,
                calibration_data=calibration_data,
                weight_percent=weight_percent,
                mouse_weight=trial.Weight,
                direction=trial.Direction,
                paw=trial.Paw,
                base_voltageFR=trial.baselineFR,
                base_voltageFL=trial.baselineFL,
                base_voltageHR=trial.baselineHR,
                base_voltageHL=trial.baselineHL)

        # Check paw used or skip wrong paw trials
        if correct_for_paw:
            sensors_data = correct_paw_used(sensors_data, trial.Paw)
        elif trial.Paw.upper() != use_paw:
            continue

        # check when all paws are on sensors
        paws_on_sensors = {
            f'{paw}_on_sensor': (sensors_data[paw] > 6).astype(np.int)
            for paw in sensors
        }
        all_on_sensors = np.sum(np.vstack(list(paws_on_sensors.values())), 0)
        all_on_sensors[all_on_sensors < 4] = 0
        all_on_sensors[all_on_sensors == 4] = 1
        paws_on_sensors['all_paws_on_sensors'] = all_on_sensors
        sensors_data.update(paws_on_sensors)

        # get comulative weight on sensors
        sensors_data['tot_weight'] = np.sum(
            np.vstack([sensors_data[p] for p in sensors]), 0)
        sensors_data['weight_on_sensors'] = (sensors_data['tot_weight'] >
                                             80).astype(np.int)
        sensors_data['on_sensors'] = (
            sensors_data['weight_on_sensors']
            & sensors_data['all_paws_on_sensors']).astype(np.int)

        # get trial start (last time on_sensors == 1 before trial.Start)
        start = get_onset_offset(sensors_data['on_sensors'][:trial.Start],
                                 .5)[0][-1]
        end_frame = trial.Start + n_frames

        # remove trials where conditions are wrong
        baseline_duration = np.abs((trial.Start - start) / trial.fps)
        if baseline_duration > 5 or baseline_duration < .2:
            logger.warning(
                f'Excluding trial: {trial.Video} because the baseline was either too long or too short: {round(baseline_duration, 3)}s'
            )
            keep = False
        if not sensors_data['on_sensors'][trial.Start - int(.2 * trial.fps)]:
            logger.warning(
                f'Excluding trial: {trial.Video} because at trial.Start the conditions were not met, sorry'
            )
            keep = False

        # debug plots: CALIBRATE data
        if DEBUG:
            f, ax = plt.subplots(figsize=(16, 9))
            ax.set(title=trial.Video + f'   Is it good: {keep}')

            colors = 'rgbm'
            for sens, col in zip(sensors, colors):
                ax.plot(sensors_data[sens], label=sens, color=col)

            ax.plot(sensors_data['tot_weight'],
                    label='tot',
                    color='k',
                    lw=3,
                    zorder=-1)
            ax.plot((sensors_data['on_sensors'] * 10) + 100,
                    lw=8,
                    color=[.4, .4, .4])

            ax.axvline(trial.Start, lw=3, color='k', label='manual start')
            ax.axvline(end_frame, lw=2, color='k', label='manual end')
            ax.axvline(start, lw=3, color='r', label='trial start')

            ax.set(xlim=[start - 2000, start + 2000])
            ax.legend()
            plt.show()
        if not keep: continue

        # cut trial
        sensors_data = {
            ch: v[start:end_frame]
            for ch, v in sensors_data.items()
        }

        # compute center of gravity
        CoG, centered_CoG = compute_center_of_gravity(sensors_data)
        logger.debug('Finished "correcting" sensors data')

        # Organise data
        data["name"].append(trial.Video)
        for ch, vals in sensors_data.items():
            if ch not in data.keys():
                data[ch] = []
            data[ch].append(vals)

        data["CoG"].append(CoG)
        data["centered_CoG"].append(centered_CoG)
        data["start"].append(start)
        data["end"].append(end_frame)
        data['condition'].append(trial.Condition)
        data['fps'].append(trial.fps)

    data = pd.DataFrame.from_dict(data)
    if not len(data):
        raise ValueError("No data was loaded, something went wrong!")
    else:
        logger.info(
            f"\n\n\n=========\nLoaded data for {len(data)} trials, yay!\n=========\nCount:\n"
        )
        logger.info(data.groupby('condition')['name'].count())

    logger.info("Saving data to: {}".format(savepath))
    data.to_hdf(savepath, key='hdf')

    # ------------------------------- Save metadata ------------------------------ #
    metadata = dict(
        date=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        fps=fps,
        n_frames=n_frames,
        calibrate_sensors=calibrate_sensors,
        weight_percent=weight_percent,
        correct_for_paw=correct_for_paw,
        use_paw=use_paw,
        frames_file=frames_file,
        sensors=sensors,
    )
    logger.info("Saving metadata to: {}".format(metadata_savepath))
    save_json(metadata_savepath, metadata)
예제 #8
0
        # notch filter noise
        b_notch, a_notch = iirnotch(2.2, 3, trial.fps)
        sig = filtfilt(b_notch, a_notch, raw)
        SIGS.append(sig)

        # plot raw and altered signals
        lw = 4 if ch not in ('fr') else 5

        axarr[0].plot(time, raw, lw=lw - 3, color=color)
        axarr[0].plot(time, sig, lw=lw, color=color, label=ch)

        # plot channels derivatives
        if 'tot' not in ch:
            axarr[1].plot(time,
                          rolling_mean(derivative(sig), 30),
                          lw=3,
                          color=color,
                          label=ch)

        # plot also in the main figure
        if 'tot' not in ch:
            main_axes[n].plot(time[:start],
                              sig[:start],
                              lw=2,
                              color=color,
                              alpha=1)
            main_axes[n].plot(time[start:],
                              sig[start:],
                              lw=2,
                              color=color,
예제 #9
0
def dff(sig):
    th = np.nanpercentile(sig[:-N_SEC_PRE * 30], 30)
    return rolling_mean((sig - th) / th, 3)