Esempio n. 1
0
def plot_model(ax, behav_traces, model, label, color):
    # Make predictions for each trial
    predictions = []
    for vals in zip(*behav_traces.values()):
        exog = pd.DataFrame({k: v
                             for k, v in zip(behav_traces.keys(), vals)
                             }).interpolate()

        exog['shelt_speed'] = -derivative(exog['shelter_distance'].values)
        exog['shelt_accel'] = derivative(exog['shelt_speed'].values)
        exog['speedxsdist'] = (exog['shelt_speed'] * exog['shelter_distance'])

        exog = pd.DataFrame(preprocessing.scale(exog.values, axis=0),
                            columns=exog.columns,
                            index=exog.index)

        exog = sm.add_constant(exog, prepend=False)
        predictions.append(model.predict(exog.reset_index(drop=True)).values)
    if not predictions: raise ValueError

    # plot mean and error
    mean, err = np.nanmean(predictions, axis=0), sem(predictions, axis=0)
    x = np.arange(frames_pre - frames_pre_fit, frames_pre + frames_post_fit, 1)
    ax.fill_between(x, mean - err, mean + err, color=color, alpha=.3)
    ax.plot(x, mean, lw=3.5, alpha=0.7, color=white)
    ax.plot(x, mean, lw=3, alpha=0.8, color=color, label=label)
    ax.legend(ncol=2, fontsize="x-small", labelspacing=.3, columnspacing=1.2)

    return model, mean
Esempio n. 2
0
    def _make_tuples(self, key):
        session_fld = get_session_folder(**key)
        fps = (Recording & key).fetch1('fps_behav')

        # Load recordings AI file
        aifile = (Recording & key).fetch1("aifile")
        f, keys, subkeys, allkeys = open_hdf(os.path.join(session_fld, aifile))

        # Get the start and end time in behaviour frames
        camera_triggers = f['AI']['0'][()]
        microscope_triggers = f['AI']['1'][()]

        starts, ends = get_times_signal_high_and_low(microscope_triggers)
        cam_starts, cam_ends = get_times_signal_high_and_low(camera_triggers)

        # Get the start end time of recording in sample numbers
        print('\nExactring start end of individual recordings')
        rec_starts = [starts[0]] + [
            starts[s] for s in list(
                np.where(derivative(starts) > self.sampling_frequency)[0])
        ]
        rec_ends = []

        for rs in rec_starts:
            _ends = np.array([e for e in ends if e > rs])
            try:
                nxt = np.where(
                    derivative(_ends) > self.sampling_frequency)[0][0]
                rec_ends.append(_ends[nxt - 1])
            except:
                rec_ends.append(ends[-1])

        if len(rec_ends) != len(rec_starts):
            raise ValueError('Something went wrong')
        startends = [(s, e) for s, e in zip(rec_starts, rec_ends)]

        # Go from sample number to frame numbers
        def smpl2frm(val):
            return np.int32(round((val / self.sampling_frequency) * fps))

        startends = [(smpl2frm(s), smpl2frm(e)) for s, e in startends]

        # Get is recording
        roi_data = f['Fiji_ROI_1'][()]
        is_recording = np.zeros_like(roi_data)

        for start, end in startends:
            is_recording[start:end - 20] = 1

        key['is_ca_recording'] = is_recording
        key['starts'] = np.array([s for s, e in startends])
        key['ends'] = np.array([e for s, e in startends])
        key['camera_frames'] = camera_triggers
        key['microscope_frames'] = camera_triggers
        key['n_frames'] = len(is_recording)

        manual_insert_skip_duplicate(self, key)
Esempio n. 3
0
    def _make_tuples(self, key):
        sub = [
            f for f in fld.glob('*') if f.is_dir() and key['mouse'] in str(f)
        ][0]

        hdfs = sorted([f for f in sub.glob('*.hdf5') if 'Fiji-tag' in f.name])
        exp_names = [f.name.split('_Fiji')[0] for f in hdfs]

        exp_data = {}
        for exp in exp_names:
            h = [h for h in hdfs if exp in str(h)]
            v = [f for f in sub.glob('*.mp4') if exp in str(f)]

            if len(h) != 1 or len(v) != 1:
                continue
                # raise ValueError(f'h:{h}\nv:{v}')

            exp_data[exp] = dict(hdf=h[0], video=v[0])

        for name, data in exp_data.items():
            if '_t' in name:
                splitter = '_t'
            else:
                splitter = '_v'

            _, _, _, fps, _ = get_video_params(
                get_cap_from_file(str(data['video'])))

            try:
                f, keys, subkeys, allkeys = open_hdf(str(data['hdf']))
            except Exception as e:
                print(f'Failed to open AI file: {data["hdf"].name}:\n{e}')
                return

            roi = [k for k in keys if 'Fiji_ROI' in k][0]
            sig = f[roi][()]
            is_rec = np.zeros_like(sig)
            is_rec[sig > 0] = 1

            rec_starts = np.where(derivative(is_rec) > 0)[0]
            rec_ends = np.where(derivative(is_rec) < 0)[0]

            ekey = key.copy()
            ekey['date'] = name.split('_')[0]
            ekey['rec'] = int(name.split(splitter)[1][0])
            ekey['name'] = name
            ekey['hdf_path'] = str(data['hdf'])
            ekey['video_path'] = str(data['video'])
            ekey['video_fps'] = fps
            ekey['is_ca_rec'] = is_rec
            ekey['ca_rec_starts'] = rec_starts
            ekey['ca_rec_ends'] = rec_ends
            manual_insert_skip_duplicate(self, ekey)
Esempio n. 4
0
def get_onset_offset(signal, th, clean=True):
    """
        Get onset/offset times when a signal goes below>above and
        above>below a given threshold
        Arguments:
            signal: 1d numpy array
            th: float, threshold
            clean: bool. If true ends before the first start and 
                starts after the last end are removed
    """
    above = np.zeros_like(signal)
    above[signal >= th] = 1

    der = derivative(above)
    starts = np.where(der > 0)[0]
    ends = np.where(der < 0)[0]

    if above[0] > 0:
        starts = np.concatenate([[0], starts])
    if above[-1] > 0:
        ends = np.concatenate([ends, [len(signal)]])

    if clean:
        ends = np.array([e for e in ends if e > starts[0]])

        if np.any(ends):
            starts = np.array([s for s in starts if s < ends[-1]])

    if not np.any(starts):
        starts = np.array([0])
    if not np.any(ends):
        ends = np.array([len(signal)])

    return starts, ends
Esempio n. 5
0
def get_onset(sig):
    '''
        Gets onset by looiking at last
        time the signal ramped up before a max
    '''
    smoothed = rolling_mean(sig, 20)
    atmax = np.argmax(smoothed)

    deriv = derivative(smoothed)
    onset = None
    for th in (0.001, 0.01, 0.1):
        try:
            onset = np.where(np.abs(deriv[:atmax]) <= 0.001)[0][-1]
        except IndexError:
            pass
        else:
            break

    return onset
Esempio n. 6
0
                # Get only time points when Calcium recording is on
                rsig = Roi().get_roi_signal_clean(rec, roi_ids[n])

                signal = (TiffTimes
                          & f"rec_name='{rec}'").fetch1("is_ca_recording")
                rec_on = np.where(signal)[0]
                speed = speed[rec_on]
                ang_vel = ang_vel[rec_on]
                shelter_distance = shelter_distance[rec_on]

                # Add stuff to traces
                calcium_trace.extend(list(rsig))
                behaviour_traces['speed'].extend(list(speed))
                behaviour_traces['acceleration'].extend(list(
                    derivative(speed)))
                behaviour_traces['ang_vel'].extend(list(ang_vel))
                behaviour_traces['shelter_distance'].extend(
                    list(shelter_distance))

            # Keep only times where mouse is out of shelter
            th = 2
            keep_idx = np.where(np.array(behaviour_traces['speed']) > th)[0]

            btraces = {
                k: np.array(v)[keep_idx]
                for k, v in behaviour_traces.items()
            }
            ctrace = np.array(calcium_trace)[keep_idx]

            # Fit robust linear regression model
Esempio n. 7
0
                                                             frames_post_fit)
                        traces['escape_peak_speed_ols'].append(rsig[pre:post])
                    elif ev.type == "stim_onset":
                        container = ols_behav_traces_escape_stim
                    elif ev.type == "shelter_arrival":
                        container = ols_behav_traces_shelter_arrival
                    else:
                        container = None

                    if container is not None:
                        pre, post = int(ev.frame -
                                        frames_pre_fit), int(ev.frame +
                                                             frames_post_fit)
                        container['speed'].append(speed[pre:post])
                        container['acceleration'].append(
                            derivative(speed[pre:post]))
                        container['shelter_distance'].append(
                            shelter_distance[pre:post])
                        container['ang_vel'].append(ang_vel[pre:post])
                        # container['shelter_acceleration'].append(derivative(shelter_distance[pre:post]))

                # Plot spont events
                for i, ev in spnt.iterrows():
                    # Check if there was recording going on at this time
                    if not TiffTimes().is_recording_on_in_interval(
                            ev.frame - frames_pre,
                            ev.frame + frames_post,
                            sess_name=sess,
                            rec_name=rec):
                        continue
Esempio n. 8
0
for mouse, sess, sessname in track(mouse_sessions,
                                   description='Computing pearson corr'):
    if not DO['pearson_corr']:
        break
    # Get data
    tracking, ang_vel, speed, shelter_distance, dffs, signals, nrois, is_rec, roi_ids = \
                        get_mouse_session_data(mouse, sess, sessions, hanning_window=hanning)

    # Get chunks start end times
    tiff_starts, tiff_ends = get_tiff_starts_ends(is_rec)

    # Get times running in direction
    for n in range(2):
        run_out = np.zeros_like(is_rec)
        if n == 0:
            run_out[derivative(tracking.x) < -speed_th] = 1
        else:
            run_out[derivative(tracking.x) > speed_th] = 1

        keep_frames = run_out * is_rec

        # Slow behav
        _, smooth_speed = get_chunk_rolling_mean_subracted_signal(
            speed, tiff_starts, tiff_ends, window=window)
        _, smooth_shelter_distance = get_chunk_rolling_mean_subracted_signal(
            shelter_distance, tiff_starts, tiff_ends, window=window)

        smooth_speed = smooth_speed[keep_frames == 1]
        smooth_shelter_distance = smooth_shelter_distance[keep_frames == 1]
        # speed_dist_coeffs[n].append((ytrain.sig, model.predict(xtrain)(smooth_speed, smooth_shelter_distance)[0])
Esempio n. 9
0
        # notch filter noise
        b_notch, a_notch = iirnotch(2.2, 3, trial.fps)
        sig = filtfilt(b_notch, a_notch, raw)
        SIGS.append(sig)

        # plot raw and altered signals
        lw = 4 if ch not in ('fr') else 5

        axarr[0].plot(time, raw, lw=lw - 3, color=color)
        axarr[0].plot(time, sig, lw=lw, color=color, label=ch)

        # plot channels derivatives
        if 'tot' not in ch:
            axarr[1].plot(time,
                          rolling_mean(derivative(sig), 30),
                          lw=3,
                          color=color,
                          label=ch)

        # plot also in the main figure
        if 'tot' not in ch:
            main_axes[n].plot(time[:start],
                              sig[:start],
                              lw=2,
                              color=color,
                              alpha=1)
            main_axes[n].plot(time[start:],
                              sig[start:],
                              lw=2,
                              color=color,