コード例 #1
0
def get_psd(data, rate, win_size):

    ### Demo cells: 583 (strong periodicity) and 48 (low periodicity)
    pcf = pipe.load_pcf(r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200625')

    neurons = [583, 49]
    rate = 1/5  # samples per centimeter

    fig, ax = plt.subplots(len(neurons), 3, sharex='col')
    for i, neuron in enumerate(neurons):
        data = pcf.bin_avg_activity[neuron]
        psd, freqs = psd_array_multitaper(data, rate, adaptive=True, normalization='full')

        # Plot activity
        ax[i, 0].plot(np.arange(0, 400, 5), data)
        ax[i, 0].set_ylabel(f'mean dF/F')

        # Plot frequencies
        ax[i, 1].plot(freqs, psd)
        ax[i, 1].set_ylabel(f'PSD')

        # Plot periods
        ax[i, 2].plot(1/freqs, psd)
        ax[i, 2].set_ylabel(f'PSD')


    ax[0, 0].set_title(f'Spatial activity map')
    ax[0, 1].set_title(f'Frequency power density')
    ax[0, 2].set_title(f'Period power density')
    ax[1, 0].set_xlabel(f'VR position [cm]')
    ax[1, 1].set_xlabel(f'Frequency [1/cm]')
    ax[1, 2].set_xlabel(f'Period [cm]')
    plt.tight_layout()
コード例 #2
0
def load_all_pc_data(root):
    """
    Loads spatial activity map (pcf.bin_avg_activity) and place cell data (pcf.place_cells) of all PCF objects
    in the root directory tree.
    :param root: str, directory that holds all PCF objects to be loaded
    :return: bin_avg_act; np.array with shape (n_neurons, n_bins) holding spatial activity maps of all neurons
             pc; list with length n_place_cells holding data (indices, place fields) of all place cells
    """
    file_list = []
    for step in os.walk(root):
        pcf_files = glob(step[0]+r'\pcf_results*')
        if len(pcf_files) > 0:
            pcf_file = os.path.splitext(os.path.basename(max(pcf_files, key=os.path.getmtime)))[0]
            file_list.append((step[0], pcf_file))
    file_list.reverse()
    print(f'Found {len(file_list)} PCF files. Start loading...')

    bin_avg_act = None
    pc = None

    for file in file_list:

        curr_pcf = pipe.load_pcf(file[0], file[1])

        if bin_avg_act is None:
            bin_avg_act = deepcopy(curr_pcf.bin_avg_activity)
        else:
            idx_offset = bin_avg_act.shape[0]
            try:
                bin_avg_act = np.vstack((bin_avg_act, curr_pcf.bin_avg_activity))
            except ValueError:
                print(f'Couldnt add place cells from {file[0]} because bin number did not add up.')
                continue  # skip the rest of the loop

        if pc is None:
            pc = deepcopy(curr_pcf.place_cells)
        else:
            # Place cell index has to be offset by the amount of cells already in the array
            curr_pc_offset = []
            for i in curr_pcf.place_cells:
                i = list(i)
                i[0] = i[0] + idx_offset
                curr_pc_offset.append(tuple(i))

            pc.extend(curr_pc_offset)

    return bin_avg_act, pc
コード例 #3
0
#%% simple data

#%%
#r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch2\M19\20191121b\N2',
roots = [
    r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200318',
    r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200319',
    r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200320',
    r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200321',
    r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200322',
    r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200323'
]

pcf_list = [None] * len(roots)
for idx, root in enumerate(roots):
    pcf_list[idx] = pipe.load_pcf(root)

#%% combine all pcf objects into one big one and plot place cells
root = r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data'

#%% spatial information functions

all_sess_si = []
# for root in roots:
#     if root == r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch2\M25\20191204\N1\pcf_results_nobadtrials.pickle':
#         with open(root, 'rb') as file:
#             pcf = pickle.load(file)
#     else:
#         pcf = pipe.load_pcf(root)

for pcf in pcf_list:
コード例 #4
0
np.savetxt(
    r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\batch_processing\cell_alignments\spikerate_pc.txt',
    spikerate_pc,
    fmt='%.4f',
    delimiter='\t')
np.savetxt(
    r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\batch_processing\cell_alignments\spikerate_non_pc.txt',
    spikerate_non_pc,
    fmt='%.4f',
    delimiter='\t')

#%% Load pcf files into a dict for better indexing

pcf_dict = {}
for root in roots:
    pcf = pipe.load_pcf(root)
    pcf_dict[pcf.params['session']] = pcf

with open(
        r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch2\M19\20191204\N2\pcf_results.pickle',
        'rb') as file:
    pcf_dict['20191204'] = pickle.load(file)

#%% load alignment files and store data in a DataFrame
alignment_root = r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch2\batch_analysis'

alignment_paths = glob(alignment_root + r'\pc_alignment*.txt')
sess_list = [path.split(os.path.sep)[-2] for path in roots]

# load alignment files and enter data into a data frame with columns (data, glob_id, session, sess_id, pc_sess)
single_rows = []
コード例 #5
0
import standard_pipeline.place_cell_pipeline as pipe
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

session = r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200627'
window_size = (2, 4)

# Load PCF
pcf = pipe.load_pcf(session)

# Set RZ borders
if pcf.params['novel']:
    zone_borders = np.array([[9, 19], [34, 44], [59, 69], [84, 94]])
else:
    zone_borders = np.array([[-6, 4], [26, 36], [58, 68], [90, 100]])

# Find frame count of valve openings
df_hit_list = []
df_miss_list = []
for trial_idx, trial in enumerate(pcf.behavior):
    # Recover frames where the mouse was not moving and get frame indices
    frame_idx = np.where(np.nan_to_num(trial[:, 3], nan=1) == 1)[0]
    valve_idx = np.where(trial[:, 6] == 1)[0]
    # Check if any reward zone was without reward
    for idx, zone in enumerate(zone_borders):
        zone_data = trial[np.logical_and(zone[0] < trial[:, 1], trial[:, 1] < zone[1]), :]
        if sum(zone_data[:, 6]) == 0:
            df_miss_list.append(pd.DataFrame({'trial': [trial_idx], 'zone':[idx]}))

    # For every valve opening, get the index of the next frame and save it for later alignment
コード例 #6
0
def run_gui(path=None, data=None):
    """
    Loads PCF or CNMF data object from a provided path and loads the CaImAn GUI for component inspection.
    This GUI was tweaked to not require any storage-intensive mmap files, but can therefore not show individual frames.
    :param path: optional str, directory of the PCF or CNMF object from which component data should be loaded. If None
                and data=None, a window prompt will open to select a directory where to look for a CNMF/PCF file.
    :param data: optional, data in form of an already loaded cnm object can be provided directly
    :return:
    """

    try:
        cv2.setNumThreads(1)
    except:
        print('Open CV is naturally single threaded')

    try:
        if __IPYTHON__:
            # print(1)
            # this is used for debugging purposes only. allows to reload classes
            # when changed
            get_ipython().magic('load_ext autoreload')
            get_ipython().magic('autoreload 2')
    except NameError:
        print('Not launched under iPython')

    def make_color_img(img, gain=255, min_max=None, out_type=np.uint8):
        if min_max is None:
            min_ = img.min()
            max_ = img.max()
        else:
            min_, max_ = min_max

        img = (img - min_) / (max_ - min_) * gain
        img = img.astype(out_type)
        img = np.dstack([img] * 3)
        return img

    ### FIND DATA ###
    if data is None:  # different conditions on file loading (not necessary if data was already provided)
        if path is None:  # if no path has been given, open a window prompt to select a directory
            F = FileDialog()

            # load object saved by CNMF
            path = F.getExistingDirectory(
                caption='Select folder from which to load a PCF or CNMF file')

        try:  # first try to get CNMF data from a PCF object (should be most up-to-date)
            cnm_obj = pipe.load_pcf(path).cnmf
        except FileNotFoundError:
            try:
                cnm_obj = pipe.load_cnmf(path)
            except FileNotFoundError:
                raise FileNotFoundError(
                    f'Could not find data to load in {path}!')
    else:
        cnm_obj = data

    # movie NOT NEEDED IN VERSION WITHOUT MMAP FILE
    # if not os.path.exists(cnm_obj.mmap_file):
    #     M = FileDialog()
    #     cnm_obj.mmap_file = M.getOpenFileName(caption='Load memory mapped file', filter='*.mmap')[0]
    #
    # if fpath[-3:] == 'nwb':
    #     mov = cm.load(cnm_obj.mmap_file, var_name_hdf5='acquisition/TwoPhotonSeries')
    # else:
    #     mov = cm.load(cnm_obj.mmap_file)

    estimates = cnm_obj.estimates
    params_obj = cnm_obj.params

    # min_mov = np.min(mov)
    # max_mov = np.max(mov)

    if not hasattr(estimates, 'Cn'):
        if not os.path.exists(cnm_obj.mmap_file):
            M = FileDialog()
            cnm_obj.mmap_file = M.getOpenFileName(
                caption='Load memory mapped file', filter='*.mmap')[0]
        mov = cm.load(cnm_obj.mmap_file)

        estimates.Cn = cm.local_correlations(mov, swap_dim=False)
    Cn = estimates.Cn

    # min_mov_denoise = np.min(estimates.A)*estimates.C.min()
    # max_mov_denoise = np.max(estimates.A)*estimates.C.max()
    background_num = -1
    neuron_selected = False
    nr_index = 0

    min_background = np.min(estimates.b, axis=0) * np.min(estimates.f, axis=1)
    max_background = np.max(estimates.b, axis=0) * np.max(estimates.f, axis=1)

    if not hasattr(estimates, 'accepted_list'):
        # if estimates.discarded_components.A.shape[-1] > 0:
        #     estimates.restore_discarded_components()
        estimates.accepted_list = np.array([], dtype=np.int)
        estimates.rejected_list = np.array([], dtype=np.int)
        estimates.img_components = estimates.A.toarray().reshape(
            (estimates.dims[0], estimates.dims[1], -1),
            order='F').transpose([2, 0, 1])
        estimates.cms = np.array([
            scipy.ndimage.measurements.center_of_mass(comp)
            for comp in estimates.img_components
        ])
        estimates.idx_components = np.arange(estimates.nr)
        estimates.idx_components_bad = np.array([])
        estimates.background_image = make_color_img(estimates.Cn)
        # Generate image data
        estimates.img_components /= estimates.img_components.max(
            axis=(1, 2))[:, None, None]
        estimates.img_components *= 255
        estimates.img_components = estimates.img_components.astype(np.uint8)

    def draw_contours_overall(md):
        if md is "reset":
            draw_contours()
        elif md is "neurons":
            if neuron_selected is True:
                #if a specific neuron has been selected, only one contour should be changed while thrshcomp_line is changing
                if nr_index is 0:
                    #if user does not start to move through the frames
                    draw_contours_update(estimates.background_image, img)
                    draw_contours_update(comp2_scaled, img2)
                else:
                    # NEVER CALLED IN THIS VERSION WITHOUT MMAP SINCE NR_INDEX NEVER CHANGES (NO NR_VLINE)
                    draw_contours_update(raw_mov_scaled, img)
                    draw_contours_update(frame_denoise_scaled, img2)
            else:
                #if no specific neuron has been selected, all the contours are changing
                draw_contours()
        else:
            #md is "background":
            return

    def draw_contours():
        global thrshcomp_line, estimates, img
        bkgr_contours = estimates.background_image.copy()

        if len(estimates.idx_components) > 0:
            contours = [
                cv2.findContours(
                    cv2.threshold(img, np.int(thrshcomp_line.value()), 255,
                                  0)[1], cv2.RETR_TREE,
                    cv2.CHAIN_APPROX_SIMPLE)[0]
                for img in estimates.img_components[estimates.idx_components]
            ]
            SNRs = np.array(estimates.r_values)
            iidd = np.array(estimates.idx_components)

            idx1 = np.where(SNRs[iidd] < 0.1)[0]
            idx2 = np.where((SNRs[iidd] >= 0.1) & (SNRs[iidd] < 0.25))[0]
            idx3 = np.where((SNRs[iidd] >= 0.25) & (SNRs[iidd] < 0.5))[0]
            idx4 = np.where((SNRs[iidd] >= 0.5) & (SNRs[iidd] < 0.75))[0]
            idx5 = np.where((SNRs[iidd] >= 0.75) & (SNRs[iidd] < 0.9))[0]
            idx6 = np.where(SNRs[iidd] >= 0.9)[0]

            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx1], []), -1,
                             (255, 0, 0), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx2], []), -1,
                             (0, 255, 0), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx3], []), -1,
                             (0, 0, 255), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx4], []), -1,
                             (255, 255, 0), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx5], []), -1,
                             (255, 0, 255), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx6], []), -1,
                             (0, 255, 255), 1)

        img.setImage(bkgr_contours, autoLevels=False)

    # pg.setConfigOptions(imageAxisOrder='row-major')

    def draw_contours_update(cf, im):
        global thrshcomp_line, estimates
        curFrame = cf.copy()

        if len(estimates.idx_components) > 0:
            contours = [
                cv2.findContours(
                    cv2.threshold(img, np.int(thrshcomp_line.value()), 255,
                                  0)[1], cv2.RETR_TREE,
                    cv2.CHAIN_APPROX_SIMPLE)[0]
                for img in estimates.img_components[estimates.idx_components]
            ]
            SNRs = np.array(estimates.r_values)
            iidd = np.array(estimates.idx_components)

            idx1 = np.where(SNRs[iidd] < 0.1)[0]
            idx2 = np.where((SNRs[iidd] >= 0.1) & (SNRs[iidd] < 0.25))[0]
            idx3 = np.where((SNRs[iidd] >= 0.25) & (SNRs[iidd] < 0.5))[0]
            idx4 = np.where((SNRs[iidd] >= 0.5) & (SNRs[iidd] < 0.75))[0]
            idx5 = np.where((SNRs[iidd] >= 0.75) & (SNRs[iidd] < 0.9))[0]
            idx6 = np.where(SNRs[iidd] >= 0.9)[0]

            if min_dist_comp in idx1:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (255, 0, 0), 1)
            if min_dist_comp in idx2:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (0, 255, 0), 1)
            if min_dist_comp in idx3:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (0, 0, 255), 1)
            if min_dist_comp in idx4:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (255, 255, 0), 1)
            if min_dist_comp in idx5:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (255, 0, 255), 1)
            if min_dist_comp in idx6:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (0, 255, 255), 1)

        im.setImage(curFrame, autoLevels=False)

#%% START BUILDING THE APPLICATION WINDOW

# Always start by initializing Qt (only once per application)

    app = QtGui.QApplication([])

    # Define a top-level widget to hold everything
    w = QtGui.QWidget()

    # Create some widgets to be placed inside
    btn = QtGui.QPushButton('press me')
    text = QtGui.QLineEdit('enter text')

    # Histogram controller (win)
    win = pg.GraphicsLayoutWidget()
    win.setMaximumWidth(300)
    win.setMinimumWidth(200)
    hist = pg.HistogramLUTItem()  # Contrast/color control
    win.addItem(hist)

    # Plotting windows
    p1 = pg.PlotWidget(
    )  # raw movie window (top-mid), all contours are drawn here
    p2 = pg.PlotWidget(
    )  # trace window (bottom-mid), calcium trace of the selected component
    p3 = pg.PlotWidget(
    )  # denoised movie window (top-right), only selected contour is drawn here

    # parameter table for online evaluation and mode change
    t = ParameterTree()

    # parameter table for neuron selection/sorting
    t_action = ParameterTree()
    action_layout = QtGui.QGridLayout()

    ## Create a grid layout to manage the widgets size and position
    layout = QtGui.QGridLayout()
    w.setLayout(layout)

    # A plot area (ViewBox + axes) for displaying the image
    #p1 = win.addPlot(title="Image here")
    # Item for displaying image data
    img = pg.ImageItem()
    p1.addItem(img)

    img2 = pg.ImageItem()
    p3.addItem(img2)

    hist.setImageItem(img)

    # Draggable line for setting isocurve level
    thrshcomp_line = pg.InfiniteLine(angle=0, movable=True, pen='g')
    hist.vb.addItem(thrshcomp_line)
    hist.vb.setMouseEnabled(y=False)  # makes user interaction a little easier
    thrshcomp_line.setValue(100)
    thrshcomp_line.setZValue(1000)  # bring iso line above contrast controls

    ## Add widgets to the layout in their proper positions
    layout.addWidget(win, 1, 0)  # histogram
    layout.addWidget(p3, 0, 2)  # denoised movie

    layout.addWidget(t, 0, 0)  # upper-right table
    layout.addWidget(t_action, 1, 2)  # bottom-right table
    layout.addWidget(p1, 0, 1)  # raw movie
    layout.addWidget(p2, 1, 1)  # calcium trace window

    #enable only horizontal zoom for the traces component
    p2.setMouseEnabled(x=True, y=False)
    ## Display the widget as a new window
    w.show()

    ## Start the Qt event loop
    app.exec_()

    draw_contours()

    hist.setLevels(estimates.background_image.min(),
                   estimates.background_image.max())

    # Another plot area for displaying ROI data
    #win.nextRow()
    #p2 = win.addPlot(colspan=2)
    p2.setMaximumHeight(250)
    #win.resize(800, 800)
    #win.show()

    # set position and scale of image
    img.scale(1, 1)
    # img.translate(-50, 0)

    # zoom to fit image
    p1.autoRange()

    mode = "reset"
    p2.setTitle("mode: %s" % (mode))

    thrshcomp_line.sigDragged.connect(lambda: draw_contours_overall(mode))

    def imageHoverEvent(event):
        #Show the position, pixel, and value under the mouse cursor.
        global x, y, i, j, val
        pos = event.pos()
        i, j = pos.y(), pos.x()
        i = int(np.clip(i, 0, estimates.background_image.shape[0] - 1))
        j = int(np.clip(j, 0, estimates.background_image.shape[1] - 1))
        val = estimates.background_image[i, j, 0]
        ppos = img.mapToParent(pos)
        x, y = ppos.x(), ppos.y()

    # Monkey-patch the image to use our custom hover function.
    # This is generally discouraged (you should subclass ImageItem instead),
    # but it works for a very simple use like this.
    img.hoverEvent = imageHoverEvent

    def mouseClickEvent(event):
        global mode
        global x, y, i, j, val

        pos = img.mapFromScene(event.pos())
        x = int(pos.x())
        y = int(pos.y())

        if x < 0 or x > mov.shape[1] or y < 0 or y > mov.shape[2]:
            # if the user click outside of the movie, do nothing and jump out of the function
            return

        i, j = pos.y(), pos.x()
        i = int(np.clip(i, 0, estimates.background_image.shape[0] - 1))
        j = int(np.clip(j, 0, estimates.background_image.shape[1] - 1))
        val = estimates.background_image[i, j, 0]

        if mode is "neurons":
            show_neurons_clicked()

    p1.mousePressEvent = mouseClickEvent

    #A general rule in Qt is that if you override one mouse event handler, you must override all of them.
    def release(event):
        pass

    p1.mouseReleaseEvent = release

    def move(event):
        pass

    p1.mouseMoveEvent = move

    ## PARAMS
    params = [{
        'name': 'min_cnn_thr',
        'type': 'float',
        'value': 0.99,
        'limits': (0, 1),
        'step': 0.01
    }, {
        'name': 'cnn_lowest',
        'type': 'float',
        'value': 0.1,
        'limits': (0, 1),
        'step': 0.01
    }, {
        'name': 'rval_thr',
        'type': 'float',
        'value': 0.85,
        'limits': (-1, 1),
        'step': 0.01
    }, {
        'name': 'rval_lowest',
        'type': 'float',
        'value': -1,
        'limits': (-1, 1),
        'step': 0.01
    }, {
        'name': 'min_SNR',
        'type': 'float',
        'value': 2,
        'limits': (0, 20),
        'step': 0.1
    }, {
        'name': 'SNR_lowest',
        'type': 'float',
        'value': 0,
        'limits': (0, 20),
        'step': 0.1
    }, {
        'name': 'RESET',
        'type': 'action'
    }, {
        'name': 'SHOW BACKGROUND',
        'type': 'action'
    }, {
        'name': 'SHOW NEURONS',
        'type': 'action'
    }]

    ## Create tree of Parameter objects
    pars = Parameter.create(name='params', type='group', children=params)

    params_action = [{
        'name': 'Filter components',
        'type': 'bool',
        'value': True,
        'tip': "Filter components"
    }, {
        'name': 'View components',
        'type': 'list',
        'values': ['All', 'Accepted', 'Rejected', 'Unassigned'],
        'value': 'All'
    }, {
        'name': 'ADD GROUP',
        'type': 'action'
    }, {
        'name': 'REMOVE GROUP',
        'type': 'action'
    }, {
        'name': 'ADD SINGLE',
        'type': 'action'
    }, {
        'name': 'REMOVE SINGLE',
        'type': 'action'
    }, {
        'name': 'SAVE OBJECT',
        'type': 'action'
    }]

    pars_action = Parameter.create(name='params_action',
                                   type='group',
                                   children=params_action)

    t_action.setParameters(pars_action, showTop=False)
    t_action.setWindowTitle('Parameter Action')

    def reset_button():
        global mode
        mode = "reset"
        p2.setTitle("mode: %s" % (mode))
        #clear the upper right image
        zeros = np.asarray([[0] * 80 for _ in range(60)])
        img2.setImage(make_color_img(zeros), autoLevels=False)
        draw_contours()

    pars.param('RESET').sigActivated.connect(reset_button)

    def show_background_button():
        global bg_vline, min_background, max_background, background_num
        global mode, background_first_frame_scaled
        #clear thhe upper right image
        zeros = np.asarray([[0] * 80 for _ in range(60)])
        img2.setImage(make_color_img(zeros), autoLevels=False)

        background_num = (background_num + 1) % estimates.f.shape[0]
        mode = "background"
        p2.setTitle("mode: %s %d" % (mode, background_num))

        # display the first frame of the background
        background_first_frame = estimates.b[:, background_num].reshape(
            estimates.dims, order='F')
        min_background_first_frame = np.min(background_first_frame)
        max_background_first_frame = np.max(background_first_frame)
        background_first_frame_scaled = make_color_img(
            background_first_frame,
            min_max=(min_background_first_frame, max_background_first_frame))
        img.setImage(background_first_frame_scaled, autoLevels=False)

        # draw the trace and the infinite line
        trace_background = estimates.f[background_num]
        p2.plot(trace_background, clear=True)
        bg_vline = pg.InfiniteLine(angle=90, movable=True)
        p2.addItem(bg_vline, ignoreBounds=True)
        bg_vline.setValue(0)
        bg_vline.sigPositionChanged.connect(show_background_update)

    def show_background_update():
        global bg_index, min_background, max_background, background_scaled
        bg_index = int(bg_vline.value())
        if bg_index > -1 and bg_index < estimates.f.shape[-1]:
            # upper left component scrolls through the frames of the background
            background = estimates.b[:, background_num].dot(
                estimates.f[background_num, bg_index]).reshape(estimates.dims,
                                                               order='F')
            background_scaled = make_color_img(
                background,
                min_max=(min_background[background_num],
                         max_background[background_num]))
            img.setImage(background_scaled, autoLevels=False)

    pars.param('SHOW BACKGROUND').sigActivated.connect(show_background_button)

    def show_neurons_button():
        global mode, neuron_selected
        mode = "neurons"
        neuron_selected = False
        p2.setTitle("mode: %s" % (mode))
        #clear the upper right image
        zeros = np.asarray([[0] * 80 for _ in range(60)])
        img2.setImage(make_color_img(zeros), autoLevels=False)

    def show_neurons_clicked():
        global nr_index
        global x, y, i, j, val, min_dist_comp, contour_single, neuron_selected, comp2_scaled
        neuron_selected = True
        distances = np.sum(
            ((x, y) - estimates.cms[estimates.idx_components])**2, axis=1)**0.5
        min_dist_comp = np.argmin(distances)
        contour_all = [
            cv2.threshold(img, np.int(thrshcomp_line.value()), 255, 0)[1]
            for img in estimates.img_components[estimates.idx_components]
        ]
        contour_single = contour_all[min_dist_comp]

        # draw the traces (lower left component)
        estimates.components_to_plot = estimates.idx_components[min_dist_comp]
        p2.plot(estimates.C[estimates.components_to_plot] +
                estimates.YrA[estimates.components_to_plot],
                clear=True)

        # plot img (upper left component)
        img.setImage(estimates.background_image, autoLevels=False)
        draw_contours_update(estimates.background_image, img)
        # plot img2 (upper right component)
        comp2 = np.multiply(estimates.Cn, contour_single > 0)
        comp2_scaled = make_color_img(comp2,
                                      min_max=(np.min(comp2), np.max(comp2)))
        img2.setImage(comp2_scaled, autoLevels=False)
        draw_contours_update(comp2_scaled, img2)
        # set title for the upper two components
        p3.setTitle("pos: (%0.1f, %0.1f)  component: %d  value: %g dist:%f" %
                    (x, y, estimates.components_to_plot, val,
                     distances[min_dist_comp]))
        p1.setTitle("pos: (%0.1f, %0.1f)  component: %d  value: %g dist:%f" %
                    (x, y, estimates.components_to_plot, val,
                     distances[min_dist_comp]))

        # draw the infinite line (INACTIVE IN THIS VERSION WITHOUT MMAP FILES)
        # nr_vline = pg.InfiniteLine(angle=90, movable=True)
        # p2.addItem(nr_vline, ignoreBounds=True)
        # nr_vline.setValue(0)
        # nr_vline.sigPositionChanged.connect(show_neurons_update)
        nr_index = 0

    def show_neurons_update():  # NOT CALLED IN THIS VERSION
        global nr_index, frame_denoise_scaled, estimates, raw_mov_scaled
        global min_mov, max_mov, min_mov_denoise, max_mov_denoise
        if neuron_selected is False:
            return
        nr_index = int(nr_vline.value())
        if nr_index > 0 and nr_index < mov[:, 0, 0].shape[0]:
            # upper left component scrolls through the raw movie
            raw_mov = mov[nr_index, :, :]
            raw_mov_scaled = make_color_img(raw_mov,
                                            min_max=(min_mov, max_mov))
            img.setImage(raw_mov_scaled, autoLevels=False)
            draw_contours_update(raw_mov_scaled, img)
            # upper right component scrolls through the denoised movie
            frame_denoise = estimates.A[:, estimates.idx_components].dot(
                estimates.C[estimates.idx_components,
                            nr_index]).reshape(estimates.dims, order='F')
            frame_denoise_scaled = make_color_img(frame_denoise,
                                                  min_max=(min_mov_denoise,
                                                           max_mov_denoise))
            img2.setImage(frame_denoise_scaled, autoLevels=False)
            draw_contours_update(frame_denoise_scaled, img2)

    pars.param('SHOW NEURONS').sigActivated.connect(show_neurons_button)

    def add_group():
        estimates.accepted_list = np.union1d(estimates.accepted_list,
                                             estimates.idx_components)
        estimates.rejected_list = np.setdiff1d(estimates.rejected_list,
                                               estimates.idx_components)
        change(None, None)

    pars_action.param('ADD GROUP').sigActivated.connect(add_group)

    def remove_group():
        estimates.rejected_list = np.union1d(estimates.rejected_list,
                                             estimates.idx_components)
        estimates.accepted_list = np.setdiff1d(estimates.accepted_list,
                                               estimates.idx_components)
        change(None, None)

    pars_action.param('REMOVE GROUP').sigActivated.connect(remove_group)

    def add_single():
        estimates.accepted_list = np.union1d(estimates.accepted_list,
                                             estimates.components_to_plot)
        estimates.rejected_list = np.setdiff1d(estimates.rejected_list,
                                               estimates.components_to_plot)
        change(None, None)

    pars_action.param('ADD SINGLE').sigActivated.connect(add_single)

    def remove_single():
        estimates.rejected_list = np.union1d(estimates.rejected_list,
                                             estimates.components_to_plot)
        estimates.accepted_list = np.setdiff1d(estimates.accepted_list,
                                               estimates.components_to_plot)
        change(None, None)

    pars_action.param('REMOVE SINGLE').sigActivated.connect(remove_single)

    def save_object():
        print('Saving')

        ffll = F.getSaveFileName(filter='*.hdf5')
        print(ffll[0])
        cnm_obj.estimates = estimates
        cnm_obj.save(ffll[0])

    pars_action.param('SAVE OBJECT').sigActivated.connect(save_object)

    def action_pars_activated(param, changes):
        change(None, None)

    pars_action.sigTreeStateChanged.connect(action_pars_activated)

    ## If anything changes in the tree, print a message
    def change(param, changes):
        global estimates, pars, pars_action
        set_par = pars.getValues()
        if pars_action.param('Filter components').value():
            for keyy in set_par.keys():
                params_obj.quality.update({keyy: set_par[keyy][0]})
        else:
            params_obj.quality.update({
                'cnn_lowest': .1,
                'min_cnn_thr': 0.99,
                'rval_thr': 0.85,
                'rval_lowest': -1,
                'min_SNR': 2,
                'SNR_lowest': 0
            })
        estimates.filter_components(
            mov,
            params_obj,
            dview=None,
            select_mode=pars_action.param('View components').value())
        if mode is "background":
            return
        else:
            draw_contours()

    pars.sigTreeStateChanged.connect(change)

    change(None, None)  # set params to default
    t.setParameters(pars, showTop=False)
    t.setWindowTitle('Parameter Quality')

    ## END PARAMS

    ## Display the widget as a new window
    w.show()

    ## Start the Qt event loop
    app.exit(app.exec_())
コード例 #7
0
                  'place_thresh': 0.25,  # threshold of being considered for place fields, calculated
                  #     from difference between max and baseline dF/F
                  'min_pf_size': 15,  # minimum size in cm for a place field (should be 15-20 cm)
                  'fluo_infield': 7,
                  # factor above which the mean DF/F in the place field should lie vs. outside the field
                  'trans_time': 0.2,  # fraction of the (unbinned!) signal while the mouse is located in
                  # the place field that should consist of significant transients
                  'track_length': 400,  # length in cm of the virtual reality corridor
                  'split_size': 50}  # size in frames of bootstrapping segments

    # Load CNM object
    cnm = pipe.load_cnmf(root)

    # Initialize PCF object with the raw data (CNM object) and the parameter dict
    pcf = pc.PlaceCellFinder(cnm, pcf_params)
    old_pcf = pipe.load_pcf(root, 'pcf_results_save.pickle')

    # If necessary, perform Peters spike prediction
    pcf.cnmf.estimates.spikes = old_pcf.cnmf.estimates.spikes
    pcf.cnmf.estimates.spikes = predict_spikes(pcf.cnmf.estimates.F_dff)

    # split traces into trials
    pcf.split_traces_into_trials()

    # Import behavior and align traces to it, while removing resting frames
    pcf.import_behavior_and_align_traces()
    pcf.params['resting_removed'] = True
    pcf.bin_activity_to_vr(remove_resting=pcf.params['resting_removed'])

    # # create significant-transient-only traces
    pcf.create_transient_only_traces()
コード例 #8
0
def random():
    # Load data
    pcf = pipe.load_pcf(r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200511')

    # Neurons as samples, position bins as features
    raw_data = pcf.bin_avg_activity
    pc_idx = [x[0] for x in pcf.place_cells]
    labels = np.zeros(len(raw_data))
    labels[pc_idx] = 1

    # Standardize (z-score) data
    data = raw_data-np.mean(raw_data, axis=0)/np.std(raw_data, axis=0)

    # perform PCA (input as shape (n_samples, n_features)
    score, evectors, evals = pca(data)

    # plot the eigenvalues
    plot_eigenvalues(evals, limit=False)

    # plot variance explained
    plot_variance_explained(np.cumsum(evals)/np.sum(evals), cutoff=0.95)

    # visualize weights of the n-th principal component
    n_comp = 1
    plt.figure()
    for i in range(n_comp):
        plt.plot(weights[i], label=f'Comp {i+1}', linewidth=2)
        for zone in pcf.params['zone_borders']:
            plt.axvspan(zone[0], zone[1], color='red', alpha=0.1)
    plt.legend()

    perform_PCA(data, labels, 2, plot=True)

    # built-in PCA
    pca_model = PCA(n_components=80)  # Initializes PCA
    out = pca_model.fit(data)  # Performs PCA
    scores = pca_model.transform(data)
    weights = pca_model.components_

    # Plot first three components
    df = pd.DataFrame(np.vstack((scores.T, labels)).T)
    df.rename(columns=str, inplace=True)
    df.rename(columns={'80': 'labels'}, inplace=True)
    pio.renderers.default = 'browser'
    fig = px.scatter_3d(df, x='0', y='1', z='2', color='labels')
    fig.show()

    def perform_PCA(data, labels, n_comp, plot=False):
        pca_model = PCA(n_components=80)  # Initializes PCA
        pca_model.fit(data)  # Performs PCA
        scores = pca_model.transform(data)
        nrows = 3
        ncols = 3
        if plot:
            fig, ax= plt.subplots(nrows, ncols)
            i = 0
            for row in range(nrows):
                for col in range(ncols):
                    ax[row, col].scatter(x=scores[:, i], y=scores[:, i+1], s=10, c=labels)
                    ax[row, col].set_xlabel(f'Component {i+1}')
                    ax[row, col].set_ylabel(f'Component {i+2}')
                    i += 1

    # Plot PCA component with overlaying histogram
    plot_pc_with_hist(-score, evectors, (0, 1), labels, pcf.params)


    # t-SNE
    fig, ax = plt.subplots(2, 3)
    perplexities = [5, 30, 50, 75, 100, 500]
    count = 0
    for row in range(2):
        for col in range(3):
            pca_mod = PCA(n_components=50)
            pca_results = pca_mod.fit_transform(data)
            tsne_mod = TSNE(n_components=2, perplexity=perplexities[count], n_iter=5000)
            embed = tsne_mod.fit_transform(pca_results)
            ax[row, col].scatter(x=embed[:, 0], y=embed[:, 1], c=labels)
            ax[row, col].set_xlabel('Component 1')
            ax[row, col].set_ylabel('Component 2')
            ax[row, col].set_title(f'Perplexity {perplexities[count]}')
            count += 1

    # 3D
    for perp in perplexities:
        tsne_mod = TSNE(n_components=3, perplexity=perp, n_iter=5000)
        embed = tsne_mod.fit_transform(data)
        df = pd.DataFrame(np.vstack((embed.T, labels)).T)
        df.rename(columns=str, inplace=True)
        df.rename(columns={'3': 'labels'}, inplace=True)
        pio.renderers.default = 'browser'
        fig = px.scatter_3d(df, x='0', y='1', z='2', color='labels')
        fig.show()
コード例 #9
0
plt.yticks(fontsize=16)
plt.ylim(0,105)
plt.xlabel('VR position', fontsize=22)
plt.ylabel('Licked in bin [%]', fontsize=22)
plt.tight_layout()

ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)

#%% normalize data
import standard_pipeline.place_cell_pipeline as pipe
import statsmodels.api as sm

# Load example session
pcf = pipe.load_pcf(r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\M41\20200627')

# Get dF/F from mobile frames for example neuron
neuron = 369
all_mask = np.concatenate(pcf.params["resting_mask"]) # merge all trials
all_act = np.concatenate(pcf.session_spikes[neuron]) # merge all trials
trace = all_act[all_mask]

# plotting
plt.figure()
ax = plt.subplot(2,3,1)
sm.qqplot(trace, ax=ax, line="s")
ax.set_title("Peters spike prediction")
ax = plt.subplot(2,3,4)
sm.qqplot(trace, ax=ax, line="45")
コード例 #10
0
def get_simple_data(root, filepath=r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch3\batch_processing\simple_data.pickle',
                    overwrite=False, session_range=None, norm_range=None, norm_fields=None):
    """
    Calculates simple data points (meaning one datapoint per session/mouse, like place cell ratio, avg spike rate,
    max PVC slope) for all PCF objects in the root tree. Results are saved as a pickle file at filepath.
    :param root: str, directory that holds PCF objects to be analysed
    :param filepath: str, file path of the results pickle object (must include extension).
    :param overwrite: bool flag whether an existing pickle object should be extended or overwritten.
    :return:
    """

    file_list = []
    for step in os.walk(root):
        pcf_file = glob(step[0] + '\\pcf_result*')
        if len(pcf_file) > 0:
            file_list.append(max(pcf_file, key=os.path.getmtime))
    print(f'Found {len(file_list)} PCF files. Starting to load data...')

    if os.path.isfile(filepath) and overwrite is False:
        df = pd.read_csv(filepath, sep='\t', index_col=0, parse_dates=['session'])
        extend_df = True
        print('Extending existing file...')
        row_list = []
    else:
        df = pd.DataFrame(index=np.arange(0, len(file_list)),
                          columns=('mouse', 'session', 'n_cells', 'n_place_cells', 'ratio', 'mean_spikerate',
                                   'median_spikerate', 'pvc_slope', 'min_pvc', 'sec_peak_ratio', 'spikerate_dist',
                                   'pvc_curve'))
        extend_df = False
        print('Creating new file...')

    date_format = '%Y%m%d'
    for idx, file in enumerate(file_list):
        # check if the current session is already in the DataFrame
        if extend_df:
            curr_mouse = file.split(sep=os.sep)[-3]
            curr_session = file.split(sep=os.sep)[-2]
            if len(df.loc[(df['mouse'] == curr_mouse) & (df['session'] == curr_session)]) > 0:
                continue
        if session_range is not None:
            sess = int(file.split(sep=os.sep)[-2])
            if sess < session_range[0] or sess > session_range[1]:
                continue

        pcf = pipe.load_pcf(os.path.dirname(file), os.path.basename(file))

        pcf.params['mouse'] = pcf.params['root'].split(os.sep)[-2]
        if len(pcf.params['mouse']) > 3:
            pcf.params['mouse'] = pcf.params['mouse'][-3:]
        pcf.params['session'] = pcf.params['root'].split(os.sep)[-1]

        # Get average spike rate in Hz of all neurons
        spike_dist = np.nansum(pcf.cnmf.estimates.spikes, axis=1) / (pcf.cnmf.estimates.spikes.shape[1]/
                                                                     pcf.cnmf.params.data['fr'])
        avg_spike_rate = np.mean(spike_dist)
        median_spike_rate = np.median(spike_dist)

        # Analyse PVC curve of that session (minimum slope and height of second peak)
        try:
            # pvc_curve = pvc.pvc_curve(pcf.bin_avg_activity, max_delta_bins=150)
            curve = np.load(os.path.join(os.path.dirname(file), 'pvc.npy'))
        except FileNotFoundError:
            curve = pvc.pvc_curve(np.transpose(pcf.bin_avg_activity, (1, 0)), plot=False)[0]
        min_slope = -min(np.diff(curve[:20]))
        try:
            second_peak = curve[argrelextrema(curve, np.greater)[0][0]]/curve[argrelextrema(curve, np.less)[0][0]]
        except IndexError:
            second_peak = np.nan

        if extend_df:
            row_list.append(pd.DataFrame({'mouse': pcf.params['mouse'],
                                          'session': pcf.params['session'],
                                          'n_cells': pcf.cnmf.estimates.F_dff.shape[0],
                                          'n_place_cells': len(pcf.place_cells),
                                          'ratio': (len(pcf.place_cells)/pcf.cnmf.estimates.F_dff.shape[0])*100,
                                          'mean_spikerate': avg_spike_rate,
                                          'median_spikerate': median_spike_rate,
                                          'spikerate_dist': spike_dist,
                                          'pvc_curve': curve,
                                          'pvc_slope': min_slope,
                                          'min_pvc': min(curve[:20]),
                                          'sec_peak_ratio': second_peak}))
        else:
            # Parse data of this session into the dataframe
            df.loc[idx]['mouse'] = pcf.params['mouse']
            df.loc[idx]['session'] = pcf.params['session']
            df.loc[idx]['n_cells'] = pcf.cnmf.estimates.F_dff.shape[0]
            df.loc[idx]['n_place_cells'] = len(pcf.place_cells)
            df.loc[idx]['ratio'] = (len(pcf.place_cells)/pcf.cnmf.estimates.F_dff.shape[0])*100
            df.loc[idx]['spikerate_dist'] = spike_dist
            df.loc[idx]['pvc_curve'] = curve
            df.loc[idx]['mean_spikerate'] = avg_spike_rate
            df.loc[idx]['median_spikerate'] = median_spike_rate
            df.loc[idx]['pvc_slope'] = min_slope
            df.loc[idx]['min_pvc'] = min(curve[:20])
            df.loc[idx]['sec_peak_ratio'] = second_peak

    df.dropna(subset=['mouse'], inplace=True)

    # Set correct datatypes for columns
    df.session = df.session.astype(np.int64)
    df.n_cells = df.n_cells.astype(np.int64)
    df.n_place_cells = df.n_place_cells.astype(np.int64)
    df.ratio = df.ratio.astype(np.float64)
    df.mean_spikerate = df.mean_spikerate.astype(np.float64)
    df.median_spikerate = df.median_spikerate.astype(np.float64)
    df.pvc_slope = df.pvc_slope.astype(np.float64)
    df.min_pvc = df.min_pvc.astype(np.float64)
    df.sec_peak_ratio = df.sec_peak_ratio.astype(np.float64)

    # give sessions a continuous id for plotting
    df['session_id'] = -1
    for id, session in enumerate(sorted(df['session'].unique())):
        df.loc[df['session'] == session, 'session_id'] = id
    df['sess_norm'] = df['session'].astype(int) - min(df['session'].astype(int))

    # assign groups to mice
    stroke = ['M32', 'M40', 'M41']
    df['group'] = np.nan
    for mouse in df.mouse.unique():
        if mouse in stroke:
            df.loc[df.mouse == mouse, 'group'] = 'lesion'
        else:
            df.loc[df.mouse == mouse, 'group'] = 'control'

    # normalize data
    if norm_fields is None:
        norm_fields = ['n_cells', 'n_place_cells', 'ratio', 'mean_spikerate', 'median_spikerate', 'pvc_slope',
                       'min_pvc', 'sec_peak_ratio']

    if norm_range is not None:
        for field in norm_fields:
            # Find indices of correct rows (pre-stroke sessions for the specific animal)
            df[field + '_norm'] = -1.0
            for mouse in df.mouse.unique():
                norm_factor = df.loc[(df.mouse == mouse) &
                                     ((df.session >= norm_range[0]) & (df.session <= norm_range[1])), field].mean()
                df.loc[df.mouse == mouse, field+'_norm'] = df.loc[df.mouse == mouse, field] / norm_factor

    # add behavioral performance data
    behav_data = performance.load_performance_data(roots=[root], norm_date='20200824', stroke=df['mouse'][0])
    df = combine_simple_with_behav(df, behav_data)

    df.sort_values(by=['mouse', 'session'], inplace=True)  # order rows for mice and session dates
    df.to_pickle(filepath)  # save dataframe as pickle

    return df