コード例 #1
0
def run_gui(path=None, data=None):
    """
    Loads PCF or CNMF data object from a provided path and loads the CaImAn GUI for component inspection.
    This GUI was tweaked to not require any storage-intensive mmap files, but can therefore not show individual frames.
    :param path: optional str, directory of the PCF or CNMF object from which component data should be loaded. If None
                and data=None, a window prompt will open to select a directory where to look for a CNMF/PCF file.
    :param data: optional, data in form of an already loaded cnm object can be provided directly
    :return:
    """

    try:
        cv2.setNumThreads(1)
    except:
        print('Open CV is naturally single threaded')

    try:
        if __IPYTHON__:
            # print(1)
            # this is used for debugging purposes only. allows to reload classes
            # when changed
            get_ipython().magic('load_ext autoreload')
            get_ipython().magic('autoreload 2')
    except NameError:
        print('Not launched under iPython')

    def make_color_img(img, gain=255, min_max=None, out_type=np.uint8):
        if min_max is None:
            min_ = img.min()
            max_ = img.max()
        else:
            min_, max_ = min_max

        img = (img - min_) / (max_ - min_) * gain
        img = img.astype(out_type)
        img = np.dstack([img] * 3)
        return img

    ### FIND DATA ###
    if data is None:  # different conditions on file loading (not necessary if data was already provided)
        if path is None:  # if no path has been given, open a window prompt to select a directory
            F = FileDialog()

            # load object saved by CNMF
            path = F.getExistingDirectory(
                caption='Select folder from which to load a PCF or CNMF file')

        try:  # first try to get CNMF data from a PCF object (should be most up-to-date)
            cnm_obj = pipe.load_pcf(path).cnmf
        except FileNotFoundError:
            try:
                cnm_obj = pipe.load_cnmf(path)
            except FileNotFoundError:
                raise FileNotFoundError(
                    f'Could not find data to load in {path}!')
    else:
        cnm_obj = data

    # movie NOT NEEDED IN VERSION WITHOUT MMAP FILE
    # if not os.path.exists(cnm_obj.mmap_file):
    #     M = FileDialog()
    #     cnm_obj.mmap_file = M.getOpenFileName(caption='Load memory mapped file', filter='*.mmap')[0]
    #
    # if fpath[-3:] == 'nwb':
    #     mov = cm.load(cnm_obj.mmap_file, var_name_hdf5='acquisition/TwoPhotonSeries')
    # else:
    #     mov = cm.load(cnm_obj.mmap_file)

    estimates = cnm_obj.estimates
    params_obj = cnm_obj.params

    # min_mov = np.min(mov)
    # max_mov = np.max(mov)

    if not hasattr(estimates, 'Cn'):
        if not os.path.exists(cnm_obj.mmap_file):
            M = FileDialog()
            cnm_obj.mmap_file = M.getOpenFileName(
                caption='Load memory mapped file', filter='*.mmap')[0]
        mov = cm.load(cnm_obj.mmap_file)

        estimates.Cn = cm.local_correlations(mov, swap_dim=False)
    Cn = estimates.Cn

    # min_mov_denoise = np.min(estimates.A)*estimates.C.min()
    # max_mov_denoise = np.max(estimates.A)*estimates.C.max()
    background_num = -1
    neuron_selected = False
    nr_index = 0

    min_background = np.min(estimates.b, axis=0) * np.min(estimates.f, axis=1)
    max_background = np.max(estimates.b, axis=0) * np.max(estimates.f, axis=1)

    if not hasattr(estimates, 'accepted_list'):
        # if estimates.discarded_components.A.shape[-1] > 0:
        #     estimates.restore_discarded_components()
        estimates.accepted_list = np.array([], dtype=np.int)
        estimates.rejected_list = np.array([], dtype=np.int)
        estimates.img_components = estimates.A.toarray().reshape(
            (estimates.dims[0], estimates.dims[1], -1),
            order='F').transpose([2, 0, 1])
        estimates.cms = np.array([
            scipy.ndimage.measurements.center_of_mass(comp)
            for comp in estimates.img_components
        ])
        estimates.idx_components = np.arange(estimates.nr)
        estimates.idx_components_bad = np.array([])
        estimates.background_image = make_color_img(estimates.Cn)
        # Generate image data
        estimates.img_components /= estimates.img_components.max(
            axis=(1, 2))[:, None, None]
        estimates.img_components *= 255
        estimates.img_components = estimates.img_components.astype(np.uint8)

    def draw_contours_overall(md):
        if md is "reset":
            draw_contours()
        elif md is "neurons":
            if neuron_selected is True:
                #if a specific neuron has been selected, only one contour should be changed while thrshcomp_line is changing
                if nr_index is 0:
                    #if user does not start to move through the frames
                    draw_contours_update(estimates.background_image, img)
                    draw_contours_update(comp2_scaled, img2)
                else:
                    # NEVER CALLED IN THIS VERSION WITHOUT MMAP SINCE NR_INDEX NEVER CHANGES (NO NR_VLINE)
                    draw_contours_update(raw_mov_scaled, img)
                    draw_contours_update(frame_denoise_scaled, img2)
            else:
                #if no specific neuron has been selected, all the contours are changing
                draw_contours()
        else:
            #md is "background":
            return

    def draw_contours():
        global thrshcomp_line, estimates, img
        bkgr_contours = estimates.background_image.copy()

        if len(estimates.idx_components) > 0:
            contours = [
                cv2.findContours(
                    cv2.threshold(img, np.int(thrshcomp_line.value()), 255,
                                  0)[1], cv2.RETR_TREE,
                    cv2.CHAIN_APPROX_SIMPLE)[0]
                for img in estimates.img_components[estimates.idx_components]
            ]
            SNRs = np.array(estimates.r_values)
            iidd = np.array(estimates.idx_components)

            idx1 = np.where(SNRs[iidd] < 0.1)[0]
            idx2 = np.where((SNRs[iidd] >= 0.1) & (SNRs[iidd] < 0.25))[0]
            idx3 = np.where((SNRs[iidd] >= 0.25) & (SNRs[iidd] < 0.5))[0]
            idx4 = np.where((SNRs[iidd] >= 0.5) & (SNRs[iidd] < 0.75))[0]
            idx5 = np.where((SNRs[iidd] >= 0.75) & (SNRs[iidd] < 0.9))[0]
            idx6 = np.where(SNRs[iidd] >= 0.9)[0]

            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx1], []), -1,
                             (255, 0, 0), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx2], []), -1,
                             (0, 255, 0), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx3], []), -1,
                             (0, 0, 255), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx4], []), -1,
                             (255, 255, 0), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx5], []), -1,
                             (255, 0, 255), 1)
            cv2.drawContours(bkgr_contours,
                             sum([contours[jj] for jj in idx6], []), -1,
                             (0, 255, 255), 1)

        img.setImage(bkgr_contours, autoLevels=False)

    # pg.setConfigOptions(imageAxisOrder='row-major')

    def draw_contours_update(cf, im):
        global thrshcomp_line, estimates
        curFrame = cf.copy()

        if len(estimates.idx_components) > 0:
            contours = [
                cv2.findContours(
                    cv2.threshold(img, np.int(thrshcomp_line.value()), 255,
                                  0)[1], cv2.RETR_TREE,
                    cv2.CHAIN_APPROX_SIMPLE)[0]
                for img in estimates.img_components[estimates.idx_components]
            ]
            SNRs = np.array(estimates.r_values)
            iidd = np.array(estimates.idx_components)

            idx1 = np.where(SNRs[iidd] < 0.1)[0]
            idx2 = np.where((SNRs[iidd] >= 0.1) & (SNRs[iidd] < 0.25))[0]
            idx3 = np.where((SNRs[iidd] >= 0.25) & (SNRs[iidd] < 0.5))[0]
            idx4 = np.where((SNRs[iidd] >= 0.5) & (SNRs[iidd] < 0.75))[0]
            idx5 = np.where((SNRs[iidd] >= 0.75) & (SNRs[iidd] < 0.9))[0]
            idx6 = np.where(SNRs[iidd] >= 0.9)[0]

            if min_dist_comp in idx1:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (255, 0, 0), 1)
            if min_dist_comp in idx2:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (0, 255, 0), 1)
            if min_dist_comp in idx3:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (0, 0, 255), 1)
            if min_dist_comp in idx4:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (255, 255, 0), 1)
            if min_dist_comp in idx5:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (255, 0, 255), 1)
            if min_dist_comp in idx6:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1,
                                 (0, 255, 255), 1)

        im.setImage(curFrame, autoLevels=False)

#%% START BUILDING THE APPLICATION WINDOW

# Always start by initializing Qt (only once per application)

    app = QtGui.QApplication([])

    # Define a top-level widget to hold everything
    w = QtGui.QWidget()

    # Create some widgets to be placed inside
    btn = QtGui.QPushButton('press me')
    text = QtGui.QLineEdit('enter text')

    # Histogram controller (win)
    win = pg.GraphicsLayoutWidget()
    win.setMaximumWidth(300)
    win.setMinimumWidth(200)
    hist = pg.HistogramLUTItem()  # Contrast/color control
    win.addItem(hist)

    # Plotting windows
    p1 = pg.PlotWidget(
    )  # raw movie window (top-mid), all contours are drawn here
    p2 = pg.PlotWidget(
    )  # trace window (bottom-mid), calcium trace of the selected component
    p3 = pg.PlotWidget(
    )  # denoised movie window (top-right), only selected contour is drawn here

    # parameter table for online evaluation and mode change
    t = ParameterTree()

    # parameter table for neuron selection/sorting
    t_action = ParameterTree()
    action_layout = QtGui.QGridLayout()

    ## Create a grid layout to manage the widgets size and position
    layout = QtGui.QGridLayout()
    w.setLayout(layout)

    # A plot area (ViewBox + axes) for displaying the image
    #p1 = win.addPlot(title="Image here")
    # Item for displaying image data
    img = pg.ImageItem()
    p1.addItem(img)

    img2 = pg.ImageItem()
    p3.addItem(img2)

    hist.setImageItem(img)

    # Draggable line for setting isocurve level
    thrshcomp_line = pg.InfiniteLine(angle=0, movable=True, pen='g')
    hist.vb.addItem(thrshcomp_line)
    hist.vb.setMouseEnabled(y=False)  # makes user interaction a little easier
    thrshcomp_line.setValue(100)
    thrshcomp_line.setZValue(1000)  # bring iso line above contrast controls

    ## Add widgets to the layout in their proper positions
    layout.addWidget(win, 1, 0)  # histogram
    layout.addWidget(p3, 0, 2)  # denoised movie

    layout.addWidget(t, 0, 0)  # upper-right table
    layout.addWidget(t_action, 1, 2)  # bottom-right table
    layout.addWidget(p1, 0, 1)  # raw movie
    layout.addWidget(p2, 1, 1)  # calcium trace window

    #enable only horizontal zoom for the traces component
    p2.setMouseEnabled(x=True, y=False)
    ## Display the widget as a new window
    w.show()

    ## Start the Qt event loop
    app.exec_()

    draw_contours()

    hist.setLevels(estimates.background_image.min(),
                   estimates.background_image.max())

    # Another plot area for displaying ROI data
    #win.nextRow()
    #p2 = win.addPlot(colspan=2)
    p2.setMaximumHeight(250)
    #win.resize(800, 800)
    #win.show()

    # set position and scale of image
    img.scale(1, 1)
    # img.translate(-50, 0)

    # zoom to fit image
    p1.autoRange()

    mode = "reset"
    p2.setTitle("mode: %s" % (mode))

    thrshcomp_line.sigDragged.connect(lambda: draw_contours_overall(mode))

    def imageHoverEvent(event):
        #Show the position, pixel, and value under the mouse cursor.
        global x, y, i, j, val
        pos = event.pos()
        i, j = pos.y(), pos.x()
        i = int(np.clip(i, 0, estimates.background_image.shape[0] - 1))
        j = int(np.clip(j, 0, estimates.background_image.shape[1] - 1))
        val = estimates.background_image[i, j, 0]
        ppos = img.mapToParent(pos)
        x, y = ppos.x(), ppos.y()

    # Monkey-patch the image to use our custom hover function.
    # This is generally discouraged (you should subclass ImageItem instead),
    # but it works for a very simple use like this.
    img.hoverEvent = imageHoverEvent

    def mouseClickEvent(event):
        global mode
        global x, y, i, j, val

        pos = img.mapFromScene(event.pos())
        x = int(pos.x())
        y = int(pos.y())

        if x < 0 or x > mov.shape[1] or y < 0 or y > mov.shape[2]:
            # if the user click outside of the movie, do nothing and jump out of the function
            return

        i, j = pos.y(), pos.x()
        i = int(np.clip(i, 0, estimates.background_image.shape[0] - 1))
        j = int(np.clip(j, 0, estimates.background_image.shape[1] - 1))
        val = estimates.background_image[i, j, 0]

        if mode is "neurons":
            show_neurons_clicked()

    p1.mousePressEvent = mouseClickEvent

    #A general rule in Qt is that if you override one mouse event handler, you must override all of them.
    def release(event):
        pass

    p1.mouseReleaseEvent = release

    def move(event):
        pass

    p1.mouseMoveEvent = move

    ## PARAMS
    params = [{
        'name': 'min_cnn_thr',
        'type': 'float',
        'value': 0.99,
        'limits': (0, 1),
        'step': 0.01
    }, {
        'name': 'cnn_lowest',
        'type': 'float',
        'value': 0.1,
        'limits': (0, 1),
        'step': 0.01
    }, {
        'name': 'rval_thr',
        'type': 'float',
        'value': 0.85,
        'limits': (-1, 1),
        'step': 0.01
    }, {
        'name': 'rval_lowest',
        'type': 'float',
        'value': -1,
        'limits': (-1, 1),
        'step': 0.01
    }, {
        'name': 'min_SNR',
        'type': 'float',
        'value': 2,
        'limits': (0, 20),
        'step': 0.1
    }, {
        'name': 'SNR_lowest',
        'type': 'float',
        'value': 0,
        'limits': (0, 20),
        'step': 0.1
    }, {
        'name': 'RESET',
        'type': 'action'
    }, {
        'name': 'SHOW BACKGROUND',
        'type': 'action'
    }, {
        'name': 'SHOW NEURONS',
        'type': 'action'
    }]

    ## Create tree of Parameter objects
    pars = Parameter.create(name='params', type='group', children=params)

    params_action = [{
        'name': 'Filter components',
        'type': 'bool',
        'value': True,
        'tip': "Filter components"
    }, {
        'name': 'View components',
        'type': 'list',
        'values': ['All', 'Accepted', 'Rejected', 'Unassigned'],
        'value': 'All'
    }, {
        'name': 'ADD GROUP',
        'type': 'action'
    }, {
        'name': 'REMOVE GROUP',
        'type': 'action'
    }, {
        'name': 'ADD SINGLE',
        'type': 'action'
    }, {
        'name': 'REMOVE SINGLE',
        'type': 'action'
    }, {
        'name': 'SAVE OBJECT',
        'type': 'action'
    }]

    pars_action = Parameter.create(name='params_action',
                                   type='group',
                                   children=params_action)

    t_action.setParameters(pars_action, showTop=False)
    t_action.setWindowTitle('Parameter Action')

    def reset_button():
        global mode
        mode = "reset"
        p2.setTitle("mode: %s" % (mode))
        #clear the upper right image
        zeros = np.asarray([[0] * 80 for _ in range(60)])
        img2.setImage(make_color_img(zeros), autoLevels=False)
        draw_contours()

    pars.param('RESET').sigActivated.connect(reset_button)

    def show_background_button():
        global bg_vline, min_background, max_background, background_num
        global mode, background_first_frame_scaled
        #clear thhe upper right image
        zeros = np.asarray([[0] * 80 for _ in range(60)])
        img2.setImage(make_color_img(zeros), autoLevels=False)

        background_num = (background_num + 1) % estimates.f.shape[0]
        mode = "background"
        p2.setTitle("mode: %s %d" % (mode, background_num))

        # display the first frame of the background
        background_first_frame = estimates.b[:, background_num].reshape(
            estimates.dims, order='F')
        min_background_first_frame = np.min(background_first_frame)
        max_background_first_frame = np.max(background_first_frame)
        background_first_frame_scaled = make_color_img(
            background_first_frame,
            min_max=(min_background_first_frame, max_background_first_frame))
        img.setImage(background_first_frame_scaled, autoLevels=False)

        # draw the trace and the infinite line
        trace_background = estimates.f[background_num]
        p2.plot(trace_background, clear=True)
        bg_vline = pg.InfiniteLine(angle=90, movable=True)
        p2.addItem(bg_vline, ignoreBounds=True)
        bg_vline.setValue(0)
        bg_vline.sigPositionChanged.connect(show_background_update)

    def show_background_update():
        global bg_index, min_background, max_background, background_scaled
        bg_index = int(bg_vline.value())
        if bg_index > -1 and bg_index < estimates.f.shape[-1]:
            # upper left component scrolls through the frames of the background
            background = estimates.b[:, background_num].dot(
                estimates.f[background_num, bg_index]).reshape(estimates.dims,
                                                               order='F')
            background_scaled = make_color_img(
                background,
                min_max=(min_background[background_num],
                         max_background[background_num]))
            img.setImage(background_scaled, autoLevels=False)

    pars.param('SHOW BACKGROUND').sigActivated.connect(show_background_button)

    def show_neurons_button():
        global mode, neuron_selected
        mode = "neurons"
        neuron_selected = False
        p2.setTitle("mode: %s" % (mode))
        #clear the upper right image
        zeros = np.asarray([[0] * 80 for _ in range(60)])
        img2.setImage(make_color_img(zeros), autoLevels=False)

    def show_neurons_clicked():
        global nr_index
        global x, y, i, j, val, min_dist_comp, contour_single, neuron_selected, comp2_scaled
        neuron_selected = True
        distances = np.sum(
            ((x, y) - estimates.cms[estimates.idx_components])**2, axis=1)**0.5
        min_dist_comp = np.argmin(distances)
        contour_all = [
            cv2.threshold(img, np.int(thrshcomp_line.value()), 255, 0)[1]
            for img in estimates.img_components[estimates.idx_components]
        ]
        contour_single = contour_all[min_dist_comp]

        # draw the traces (lower left component)
        estimates.components_to_plot = estimates.idx_components[min_dist_comp]
        p2.plot(estimates.C[estimates.components_to_plot] +
                estimates.YrA[estimates.components_to_plot],
                clear=True)

        # plot img (upper left component)
        img.setImage(estimates.background_image, autoLevels=False)
        draw_contours_update(estimates.background_image, img)
        # plot img2 (upper right component)
        comp2 = np.multiply(estimates.Cn, contour_single > 0)
        comp2_scaled = make_color_img(comp2,
                                      min_max=(np.min(comp2), np.max(comp2)))
        img2.setImage(comp2_scaled, autoLevels=False)
        draw_contours_update(comp2_scaled, img2)
        # set title for the upper two components
        p3.setTitle("pos: (%0.1f, %0.1f)  component: %d  value: %g dist:%f" %
                    (x, y, estimates.components_to_plot, val,
                     distances[min_dist_comp]))
        p1.setTitle("pos: (%0.1f, %0.1f)  component: %d  value: %g dist:%f" %
                    (x, y, estimates.components_to_plot, val,
                     distances[min_dist_comp]))

        # draw the infinite line (INACTIVE IN THIS VERSION WITHOUT MMAP FILES)
        # nr_vline = pg.InfiniteLine(angle=90, movable=True)
        # p2.addItem(nr_vline, ignoreBounds=True)
        # nr_vline.setValue(0)
        # nr_vline.sigPositionChanged.connect(show_neurons_update)
        nr_index = 0

    def show_neurons_update():  # NOT CALLED IN THIS VERSION
        global nr_index, frame_denoise_scaled, estimates, raw_mov_scaled
        global min_mov, max_mov, min_mov_denoise, max_mov_denoise
        if neuron_selected is False:
            return
        nr_index = int(nr_vline.value())
        if nr_index > 0 and nr_index < mov[:, 0, 0].shape[0]:
            # upper left component scrolls through the raw movie
            raw_mov = mov[nr_index, :, :]
            raw_mov_scaled = make_color_img(raw_mov,
                                            min_max=(min_mov, max_mov))
            img.setImage(raw_mov_scaled, autoLevels=False)
            draw_contours_update(raw_mov_scaled, img)
            # upper right component scrolls through the denoised movie
            frame_denoise = estimates.A[:, estimates.idx_components].dot(
                estimates.C[estimates.idx_components,
                            nr_index]).reshape(estimates.dims, order='F')
            frame_denoise_scaled = make_color_img(frame_denoise,
                                                  min_max=(min_mov_denoise,
                                                           max_mov_denoise))
            img2.setImage(frame_denoise_scaled, autoLevels=False)
            draw_contours_update(frame_denoise_scaled, img2)

    pars.param('SHOW NEURONS').sigActivated.connect(show_neurons_button)

    def add_group():
        estimates.accepted_list = np.union1d(estimates.accepted_list,
                                             estimates.idx_components)
        estimates.rejected_list = np.setdiff1d(estimates.rejected_list,
                                               estimates.idx_components)
        change(None, None)

    pars_action.param('ADD GROUP').sigActivated.connect(add_group)

    def remove_group():
        estimates.rejected_list = np.union1d(estimates.rejected_list,
                                             estimates.idx_components)
        estimates.accepted_list = np.setdiff1d(estimates.accepted_list,
                                               estimates.idx_components)
        change(None, None)

    pars_action.param('REMOVE GROUP').sigActivated.connect(remove_group)

    def add_single():
        estimates.accepted_list = np.union1d(estimates.accepted_list,
                                             estimates.components_to_plot)
        estimates.rejected_list = np.setdiff1d(estimates.rejected_list,
                                               estimates.components_to_plot)
        change(None, None)

    pars_action.param('ADD SINGLE').sigActivated.connect(add_single)

    def remove_single():
        estimates.rejected_list = np.union1d(estimates.rejected_list,
                                             estimates.components_to_plot)
        estimates.accepted_list = np.setdiff1d(estimates.accepted_list,
                                               estimates.components_to_plot)
        change(None, None)

    pars_action.param('REMOVE SINGLE').sigActivated.connect(remove_single)

    def save_object():
        print('Saving')

        ffll = F.getSaveFileName(filter='*.hdf5')
        print(ffll[0])
        cnm_obj.estimates = estimates
        cnm_obj.save(ffll[0])

    pars_action.param('SAVE OBJECT').sigActivated.connect(save_object)

    def action_pars_activated(param, changes):
        change(None, None)

    pars_action.sigTreeStateChanged.connect(action_pars_activated)

    ## If anything changes in the tree, print a message
    def change(param, changes):
        global estimates, pars, pars_action
        set_par = pars.getValues()
        if pars_action.param('Filter components').value():
            for keyy in set_par.keys():
                params_obj.quality.update({keyy: set_par[keyy][0]})
        else:
            params_obj.quality.update({
                'cnn_lowest': .1,
                'min_cnn_thr': 0.99,
                'rval_thr': 0.85,
                'rval_lowest': -1,
                'min_SNR': 2,
                'SNR_lowest': 0
            })
        estimates.filter_components(
            mov,
            params_obj,
            dview=None,
            select_mode=pars_action.param('View components').value())
        if mode is "background":
            return
        else:
            draw_contours()

    pars.sigTreeStateChanged.connect(change)

    change(None, None)  # set params to default
    t.setParameters(pars, showTop=False)
    t.setWindowTitle('Parameter Quality')

    ## END PARAMS

    ## Display the widget as a new window
    w.show()

    ## Start the Qt event loop
    app.exit(app.exec_())
コード例 #2
0
def main():

    # Prompt user for directory containing files to be analyzed
    F           = FileDialog()  # Calls Qt backend script to create a file dialog object
    mcdir       = F.getExistingDirectory(caption='Select Motion Corrected Video Directory')
    fvids=[]
    for file in os.listdir(mcdir):
        if file.endswith("_mc.tif"):
            fvids.append(os.path.join(mcdir, file))

    # Set up a variable to determine which sections are lightsheet and which
    # are epi. This is a horrible way to handle it - need to write new code to
    # either automatically determine or prompt user for input.
    # Use 1 for lightsheet and 0 for epi.
    lsepi       = [1, 0, 1, 0, 1, 0, 0, 1, 0]

	# Grab the first two masks and assume that they are representative of every
	# lightsheet and epi neuron. Also generate an overlap mask by logical ANDing
    # the two masks together.
    maskLSin    = np.transpose(np.load(fvids[0]+'.npy'), axes=(2,1,0))
    maskEpiin   = np.transpose(np.load(fvids[1]+'.npy'), axes=(2,1,0))
    maskLSt     = []
    maskEpit    = []
    for mask in maskLSin:
        if (np.argwhere(mask).size > 0):
            maskLSt.append(mask)
    for mask in maskEpiin:
        if (np.argwhere(mask).size > 0):
            maskEpit.append(mask)
    maskLS      = np.asarray(maskLSt)
    maskEpi     = np.asarray(maskEpit)

    # Take the first lightsheet vid, find the top N neurons, do the same for
    # the first epi vid. Take the 2N masks corresponding to this, find the set
    # of unique ones, and then use the remaining masks to go through all of the
    # other vids.
    N       = 14
    # Lightsheet:
    vid     = cm.load(fvids[0])
    LSdff   = calculate_dff_set(vid, maskLS)
    # Epi:
    vid     = cm.load(fvids[1])
    EPdff   = calculate_dff_set(vid, maskEpi)

    # Sort by top, get the overlap:
    threshold   = 10
    topsLS      = argsort_traces(LSdff)
    topsEpi     = argsort_traces(EPdff)
    masks       = mask_union(maskLS[topsLS[-N:]], maskEpi[topsEpi[-N:]], threshold)
    masksTopLS  = mask_disjoint(maskLS[topsLS[-N:]], masks, threshold)
    masksTopEpi = mask_disjoint(maskEpi[topsEpi[-N:]], masks, threshold)
    maskov      = mask_joint(maskLS[topsLS[-N:]], maskEpi[topsEpi[-N:]], threshold)
    print(masksTopLS.shape)
    print(masksTopEpi.shape)
    print(maskov.shape)

    # The variable tops now contains the non-overlapping union of the top N
    # neurons from epi and from light sheet. Now run through the rest of the
    # analyses using only these masks.
    # Can grab the top epi and top lightsheet values for each neuron. Can also
    # grab on a neuron-by-neuron basis whether the peak dF/F was lightsheet or
    # epi.
    dff         = np.empty((masks.shape[0], 0))
    divs        = np.zeros(len(fvids))
    max_idx     = np.zeros((masks.shape[0], 1))
    max_val     = np.zeros((masks.shape[0], 1))
    maxepi_idx  = np.zeros((masks.shape[0], 1))
    maxepi_val  = np.zeros((masks.shape[0], 1))
    maxls_idx   = np.zeros((masks.shape[0], 1))
    maxls_val   = np.zeros((masks.shape[0], 1))
    flatmask    = flatten_masks(masks)
    lspeaks     = [[] for k in range(masks.shape[0])]
    lspeakvals  = np.empty((0))
    epipeaks    = [[] for k in range(masks.shape[0])]
    epipeakvals = np.empty((0))
    rawtraces   = np.empty((masks.shape[0], 0))
    rawbckgnd   = np.empty((masks.shape[0], 0))
    for i, fvid in enumerate(fvids):
        vid     = cm.load(fvid)
        traces      = np.empty((masks.shape[0], vid.shape[0]))
        bval        = np.empty((masks.shape[0], vid.shape[0]))
        dff_i       = np.empty((masks.shape[0], vid.shape[0]))
        for j, mask in enumerate(masks):
            traces[j], bval[j]  = extract_neuron_trace_uniform(vid, mask, flatmask, 2)
            dff_i[j]            = trace_df_f(traces[j], bval[j])
            peaks, props        = scipy.signal.find_peaks(dff_i[j], distance=10, prominence=(0.1, None))
            if (peaks.size > 0):
                if lsepi[i]:
                    lspeaks[j].append(peaks)
                    lspeakvals      = np.append(lspeakvals, dff_i[j][peaks])
                else:
                    epipeaks[j].append(peaks)
                    epipeakvals     = np.append(epipeakvals, dff_i[j][peaks])
            if (max(dff_i[j]) > max_val[j]):
                max_idx[j]  = i
                max_val[j]  = max(dff_i[j])
                if lsepi[i]:
                    maxls_idx[j]    = i
                    maxls_val[j]    = max_val[j]
                else:
                    maxepi_idx[j]   = i
                    maxepi_val[j]   = max_val[j]

        dff     = np.concatenate([dff, dff_i], axis = 1)
        rawtraces = np.concatenate([rawtraces, traces], axis = 1)
        rawbckgnd = np.concatenate([rawbckgnd, bval], axis = 1)
        divs[i] = dff.shape[1]

    # Save generated values for post-post-processing
    masksout    = np.transpose(masks, axes=(2,1,0))
    np.save(os.path.join(mcdir, 'top_masks_out.npy'), masksout)
    np.save(os.path.join(mcdir, 'top_epi_sections.npy'), maxepi_idx)
    np.save(os.path.join(mcdir, 'top_epi_values.npy'), maxepi_val)
    np.save(os.path.join(mcdir, 'top_ls_sections.npy'), maxls_idx)
    np.save(os.path.join(mcdir, 'top_ls_values.npy'), maxls_val)
    np.save(os.path.join(mcdir, 'ls_peak_vals.npy'), lspeakvals)
    np.save(os.path.join(mcdir, 'epi_peak_vals.npy'), epipeakvals)
    np.save(os.path.join(mcdir, 'dff_traces.npy'), dff)
    np.save(os.path.join(mcdir, 'dff_div_points.npy'), divs)

    # Plot out the dF/F traces, and put vertical markers at the dividers
    # between video segments. User would have to manually label them as there's
    # no real way to determine what segments are what.
    nrow = 6
    ncol = 1
    dffig, ax    = plt.subplots(nrow, ncol, sharex = True, sharey = True)
    ll   = dff[0].shape[0]
    axrange = np.linspace(0, (ll-1)/10, num=ll)
    for i, dfft in enumerate(dff):
        if i == nrow:
            ax[i-1].set_xlabel('Time (seconds)')
            break
        ax[i].plot(axrange, dfft)
        ax[i].set_ylabel(str(i+1))
        for div in divs:
            ax[i].axvline(div/10, color='r', linestyle='--')
    dffig.suptitle('Neuron dF/F Curves')
    plt.show()

    tfig, ax    = plt.subplots(nrow, ncol, sharex = True, sharey = True)
    ll   = rawtraces[0].shape[0]
    axrange = np.linspace(0, (ll-1)/10, num=ll)
    for i, tr in enumerate(rawtraces):
        if i >= nrow:
            ax[i-1].set_xlabel('Time (seconds)')
            break
        ax[i].plot(axrange, np.add(tr, rawbckgnd[i]))
        ax[i].plot(axrange, rawbckgnd[i])
        ax[i].set_ylabel(str(i+1))
        for div in divs:
            ax[i].axvline(div/10, color='r', linestyle='--')
    tfig.suptitle('Neuron Raw Traces + Background')
    plt.show()

    """
    # Next do line plot with averages + error bars.
    #   Set up lines for a given neuron, showing increase or decrease of max
    #   intensity on that neuron between lightsheet and epi.
    #   
    intensity_lineplot  = np.concatenate([maxepi_val, maxls_val], axis=1).T
    avg_ls  = np.mean(maxls_val)
    std_ls  = np.std(maxls_val)/math.sqrt(maxls_val.shape[0])
    avg_epi = np.mean(maxepi_val)
    std_epi = np.std(maxepi_val)/math.sqrt(maxepi_val.shape[0])
    binplot, ax = plt.subplots()
    plt.plot(intensity_lineplot)
    ax.bar([0, 1], [avg_epi, avg_ls], yerr=[std_epi, std_ls], align='center', capsize=10, alpha=0.5)
    ax.set_ylabel('Peak dF/F')
    ax.set_xticks([0, 1])
    ax.set_xticklabels(['Epi', 'Light-sheet'])
    ax.set_title('Contrast change, Epi vs. LS')
    plt.show()
    """
    # Histogram of spike intensities.
    histfig, ax = plt.subplots()
    if not (lspeakvals.size>0):
        lspeakvals = np.zeros(1)
    if not (epipeakvals.size>0):
        epipeakvals = np.zeros(1)
    binrange    = np.amax(np.concatenate([lspeakvals, epipeakvals]))
    binrange = 1.5 if binrange >1.5 else math.ceil(binrange*10)/10
    binset  = np.linspace(0, binrange, num=int(binrange*10+1))
    nls     = lspeakvals.shape[0]
    nepi    = epipeakvals.shape[0]
    epi_n, epi_bins, epi_patches    = ax.hist(epipeakvals, bins=binset, alpha=0.5, label='Epi-illumination', histtype='barstacked', ec='black', lw=0, color='#7f86c1')
    ls_n, ls_bins, ls_patches       = ax.hist(lspeakvals, bins=binset, alpha=0.5, label='Light-sheet', histtype='barstacked', ec='black', lw=0, color='#f48466')
    plt.legend(loc='upper right')
    histfig.suptitle('Lightsheet vs. Epi-illumination dF/F')
    plt.xlabel('dF/F')
    plt.ylabel('Spike Count')
    plt.show()

    # Plot the image with the contours (outlines of neurons), labeled
    Asparse     = scipy.sparse.csc_matrix(masksout.reshape((masksout.shape[1]*masksout.shape[0], masksout.shape[2])))
    lstop       = np.transpose(masksTopLS, axes=(2,1,0))
    epitop      = np.transpose(masksTopEpi, axes=(2,1,0))
    ovtop       = np.transpose(maskov, axes=(2,1,0))
    AsparseLS   = scipy.sparse.csc_matrix(lstop.reshape((lstop.shape[1]*lstop.shape[0], lstop.shape[2])))
    AsparseEpi  = scipy.sparse.csc_matrix(epitop.reshape((epitop.shape[1]*epitop.shape[0], epitop.shape[2])))
    AsparseOv   = scipy.sparse.csc_matrix(ovtop.reshape((ovtop.shape[1]*ovtop.shape[0], ovtop.shape[2])))
    vid         = cm.load(fvids[0])
    #Cn          = cm.local_correlations(vid.transpose(1,2,0))
    Cn          = np.zeros((vid.shape[1], vid.shape[2]))
    Cn[np.isnan(Cn)] = 0
    out=plt.figure()
    cm.utils.visualization.plot_contours(Asparse, Cn)
    out=plt.figure()
    cm.utils.visualization.plot_contours(AsparseLS, Cn)
    out=plt.figure()
    cm.utils.visualization.plot_contours(AsparseEpi, Cn)
    out=plt.figure()
    cm.utils.visualization.plot_contours(AsparseOv, Cn)

    scipy.io.savemat(os.path.join(mcdir, 'epi_histogram.mat'), {'n':epi_n, 'bins':epi_bins, 'patches':epi_patches})
    scipy.io.savemat(os.path.join(mcdir, 'ls_histogram.mat'), {'n':ls_n, 'bins':ls_bins, 'patches':ls_patches})
    scipy.io.savemat(os.path.join(mcdir, 'epi_spike_values.mat'), {'epispikes':epipeakvals})
    scipy.io.savemat(os.path.join(mcdir, 'ls_spike_values.mat'), {'lsspikes':lspeakvals})
    scipy.io.savemat(os.path.join(mcdir, 'df_over_f.mat'), {'data':dff, 'indices_between_ls_or_epi':divs})
    scipy.io.savemat(os.path.join(mcdir, 'rawtraces.mat'), {'data':rawtraces, 'indices_between_ls_or_epi':divs})
    scipy.io.savemat(os.path.join(mcdir, 'rawbackground.mat'), {'data':rawbckgnd, 'indices_between_ls_or_epi':divs})
コード例 #3
0
def test():

    def make_color_img(img, gain=255, min_max=None,out_type=np.uint8):
        if min_max is None:
            min_ = img.min()
            max_ = img.max()
        else:
            min_, max_ = min_max

        img = (img-min_)/(max_-min_)*gain
        img = img.astype(out_type)
        img = np.dstack([img]*3)
        return img

    data=None
    path=r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch2\M18\20191121b\N4'
    ### FIND DATA ###
    if data is None:    # different conditions on file loading (not necessary if data was already provided)
        if path is None:    # if no path has been given, open a window prompt to select a directory
            F = FileDialog()

            # load object saved by CNMF
            path = F.getExistingDirectory(caption='Select folder from which to load a PCF or CNMF file')

        try:    # first try to get CNMF data from a PCF object (should be most up-to-date)
            cnm_obj = pipe.load_pcf(path).cnmf
        except FileNotFoundError:
            try:
                cnm_obj = pipe.load_cnmf(path)
            except FileNotFoundError:
                raise FileNotFoundError(f'Could not find data to load in {path}!')
    else:
        cnm_obj = data


    def draw_contours_overall(md):
        if md is "reset":
            draw_contours()
        elif md is "neurons":
            if neuron_selected is True:
                #if a specific neuron has been selected, only one contour should be changed while thrshcomp_line is changing
                if nr_index is 0:
                    #if user does not start to move through the frames
                    draw_contours_update(estimates.background_image, img)
                    draw_contours_update(comp2_scaled, img2)
                else:
                    # NEVER CALLED IN THIS VERSION WITHOUT MMAP SINCE NR_INDEX NEVER CHANGES (NO NR_VLINE)
                    draw_contours_update(raw_mov_scaled, img)
                    draw_contours_update(frame_denoise_scaled, img2)
            else:
                #if no specific neuron has been selected, all the contours are changing
                draw_contours()
        else:
            #md is "background":
            return


    def draw_contours():
        global thrshcomp_line, estimates, img
        bkgr_contours = estimates.background_image.copy()

        if len(estimates.idx_components) > 0:
            contours = [cv2.findContours(cv2.threshold(img, np.int(thrshcomp_line.value()), 255, 0)[1], cv2.RETR_TREE,
                                         cv2.CHAIN_APPROX_SIMPLE)[0] for img in estimates.img_components[estimates.idx_components]]
            SNRs = np.array(estimates.r_values)
            iidd = np.array(estimates.idx_components)

            idx1 = np.where(SNRs[iidd] < 0.1)[0]
            idx2 = np.where((SNRs[iidd] >= 0.1) &
                            (SNRs[iidd] < 0.25))[0]
            idx3 = np.where((SNRs[iidd] >= 0.25) &
                            (SNRs[iidd] < 0.5))[0]
            idx4 = np.where((SNRs[iidd] >= 0.5) &
                            (SNRs[iidd] < 0.75))[0]
            idx5 = np.where((SNRs[iidd] >= 0.75) &
                            (SNRs[iidd] < 0.9))[0]
            idx6 = np.where(SNRs[iidd] >= 0.9)[0]

            cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx1], []), -1, (255, 0, 0), 1)
            cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx2], []), -1, (0, 255, 0), 1)
            cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx3], []), -1, (0, 0, 255), 1)
            cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx4], []), -1, (255, 255, 0), 1)
            cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx5], []), -1, (255, 0, 255), 1)
            cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx6], []), -1, (0, 255, 255), 1)

        img.setImage(bkgr_contours, autoLevels=False)
    # pg.setConfigOptions(imageAxisOrder='row-major')


    def draw_contours_update(cf, im):
        global thrshcomp_line, estimates
        curFrame = cf.copy()

        if len(estimates.idx_components) > 0:
            contours = [cv2.findContours(cv2.threshold(img, np.int(thrshcomp_line.value()), 255, 0)[1], cv2.RETR_TREE,
                                         cv2.CHAIN_APPROX_SIMPLE)[0] for img in estimates.img_components[estimates.idx_components]]
            SNRs = np.array(estimates.r_values)
            iidd = np.array(estimates.idx_components)

            idx1 = np.where(SNRs[iidd] < 0.1)[0]
            idx2 = np.where((SNRs[iidd] >= 0.1) &
                            (SNRs[iidd] < 0.25))[0]
            idx3 = np.where((SNRs[iidd] >= 0.25) &
                            (SNRs[iidd] < 0.5))[0]
            idx4 = np.where((SNRs[iidd] >= 0.5) &
                            (SNRs[iidd] < 0.75))[0]
            idx5 = np.where((SNRs[iidd] >= 0.75) &
                            (SNRs[iidd] < 0.9))[0]
            idx6 = np.where(SNRs[iidd] >= 0.9)[0]

            if min_dist_comp in idx1:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1, (255, 0, 0), 1)
            if min_dist_comp in idx2:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1, (0, 255, 0), 1)
            if min_dist_comp in idx3:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1, (0, 0, 255), 1)
            if min_dist_comp in idx4:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1, (255, 255, 0), 1)
            if min_dist_comp in idx5:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1, (255, 0, 255), 1)
            if min_dist_comp in idx6:
                cv2.drawContours(curFrame, contours[min_dist_comp], -1, (0, 255, 255), 1)

        im.setImage(curFrame, autoLevels=False)

    # Always start by initializing Qt (only once per application)
    app = QtGui.QApplication([])

    try:
        cv2.setNumThreads(1)
    except:
        print('Open CV is naturally single threaded')

    ## Define a top-level widget to hold everything
    w = QtGui.QWidget()

    ## Create some widgets to be placed inside
    btn = QtGui.QPushButton('press me')
    text = QtGui.QLineEdit('enter text')
    win = pg.GraphicsLayoutWidget()
    win.setMaximumWidth(300)
    win.setMinimumWidth(200)
    hist = pg.HistogramLUTItem() # Contrast/color control
    win.addItem(hist)
    p1 = pg.PlotWidget()
    p2 = pg.PlotWidget()
    p3 = pg.PlotWidget()
    t = ParameterTree()
    t_action = ParameterTree()
    action_layout = QtGui.QGridLayout()


    ## Create a grid layout to manage the widgets size and position
    layout = QtGui.QGridLayout()
    w.setLayout(layout)

    # A plot area (ViewBox + axes) for displaying the image
    #p1 = win.addPlot(title="Image here")
    # Item for displaying image data
    img = pg.ImageItem()
    p1.addItem(img)

    img2 = pg.ImageItem()
    p3.addItem(img2)

    hist.setImageItem(img)

    # Draggable line for setting isocurve level (for setting contour threshold)
    thrshcomp_line = pg.InfiniteLine(angle=0, movable=True, pen='g')
    hist.vb.addItem(thrshcomp_line)
    hist.vb.setMouseEnabled(y=False) # makes user interaction a little easier
    thrshcomp_line.setValue(100)
    thrshcomp_line.setZValue(1000) # bring iso line above contrast controls


    ## Add widgets to the layout in their proper positions
    layout.addWidget(win, 1, 0)   # histogram
    layout.addWidget(p3, 0, 2)   # denoised movie

    layout.addWidget(t, 0, 0)   # upper-right table
    layout.addWidget(t_action, 1, 2)  # bottom-right table
    layout.addWidget(p1, 0, 1)  # raw movie
    layout.addWidget(p2, 1, 1)  # calcium trace window


    #enable only horizontal zoom for the traces component
    p2.setMouseEnabled(x=True, y=False)


    # draw something in the raw-movie field and set the histogram borders correspondingly
    test_img_file = r'W:\Neurophysiology-Storage1\Wahl\Hendrik\PhD\Data\Batch2\M18\20191121b\N4\local_correlation_image.png'
    test_img = plt.imread(test_img_file)

    img.setImage(np.rot90(test_img[:, :, 0], 3))
    hist.setLevels(test_img[:, :, 0].min(), test_img[:, :, 0].max())


    p2.setMouseEnabled(x=True, y=False)


    # Another plot area for displaying ROI data
    p2.setMaximumHeight(250)


    # set position and scale of image
    img.scale(1, 1)

    # zoom to fit image
    p1.autoRange()


    mode = "reset"
    p2.setTitle("mode: %s" % (mode))


    ## Display the widget as a new window
    w.show()

    ## Start the Qt event loop
    app.exec_()
コード例 #4
0
def main():

    # Prompt user for directory containing files to be analyzed
    F = FileDialog()  # Calls Qt backend script to create a file dialog object
    mcdir = F.getExistingDirectory(
        caption='Select Motion Corrected Video Directory')
    fvids = []
    for file in os.listdir(mcdir):
        if file.endswith("_mc.tif"):
            fvids.append(os.path.join(mcdir, file))

    # Set up a variable to determine which sections are lightsheet and which
    # are epi. This is a horrible way to handle it - need to write new code to
    # either automatically determine or prompt user for input.
    # Use 1 for lightsheet and 0 for epi.
    lsepi = [1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0]

    # Threshold for non-binary masks to convert to binary
    th = 0.05

    # Load masks, videos, and dF/F curves
    lsmasks = None
    epimasks = None
    lsunion = None
    epiunion = None
    lsvidconcat = None
    epividconcat = None
    lsvidconcattimes = [0]
    epividconcattimes = [0]
    for i, file in enumerate(fvids):
        vid = cm.load(file)
        mask = load_masks(file)
        if lsepi[i]:
            if lsmasks is None:
                lsmasks = np.empty((0, mask.shape[1], mask.shape[2]))
                lsunion = mask > th
                lsvidconcat = np.empty((0, vid.shape[1], vid.shape[2]))

            lsvidconcat = cm.concatenate([lsvidconcat, vid], axis=0)
            lsmasks = np.concatenate((lsmasks, mask))
            lsunion = mask_union(lsunion, mask > th, 10)
            lsvidconcattimes.append(lsvidconcat.shape[0])
        else:
            if epimasks is None:
                epimasks = np.empty((0, mask.shape[1], mask.shape[2]))
                epiunion = mask > th
                epividconcat = np.empty((0, vid.shape[1], vid.shape[2]))

            epividconcat = cm.concatenate([epividconcat, vid], axis=0)
            epimasks = np.concatenate((epimasks, mask))
            epiunion = mask_union(epiunion, mask > th, 10)
            epividconcattimes.append(epividconcat.shape[0])

    print(epividconcattimes)
    print(epividconcat.shape)

    # Plot out the flattened masks for light-sheet and epi, and count the
    # number of unique detected neurons
    flFig, flAx = plt.subplots(1, 2)
    unFig, unAx = plt.subplots(1, 2)
    flsunion = np.zeros((lsmasks.shape[1], lsmasks.shape[2]))
    ff = np.zeros((lsmasks.shape[1], lsmasks.shape[2]))
    for lsmask in lsmasks:
        ff = np.add(ff, lsmask > th)
    for unionmask in lsunion:
        flsunion = np.add(flsunion, unionmask)
    flAx[0].imshow(ff)
    unAx[0].imshow(flsunion)
    print('Number of ls neurons: ' + str(lsunion.shape[0]))

    ff = np.zeros((lsmasks.shape[1], lsmasks.shape[2]))
    fepiunion = np.zeros((lsmasks.shape[1], lsmasks.shape[2]))
    for epimask in epimasks:
        ff = np.add(ff, epimask > th)
    for unionmask in epiunion:
        fepiunion = np.add(fepiunion, unionmask)
    flAx[1].imshow(ff)
    unAx[1].imshow(fepiunion)
    print('Number of epi neurons: ' + str(epiunion.shape[0]))

    # Mask operations to create the various sets and then plot them all out
    sharedneurons = mask_joint(lsunion, epiunion, 10)
    lsunique = mask_disjoint(sharedneurons, lsunion, 10)
    epunique = mask_disjoint(sharedneurons, epiunion, 10)
    allFig, allAx = plt.subplots(1, 3)
    allAx[0].imshow(np.sum(lsunique, axis=0))
    allAx[1].imshow(np.sum(sharedneurons, axis=0))
    allAx[2].imshow(np.sum(epunique, axis=0))
    print('Number of unique-to-ls neurons: ' + str(lsunique.shape[0]))
    print('Number of unique-to-epi neurons: ' + str(epunique.shape[0]))
    print('Number of shared neurons: ' + str(sharedneurons.shape[0]))

    lsallmasks = mask_union(sharedneurons, lsunique, 10)
    epallmasks = mask_union(sharedneurons, epunique, 10)
    # Plot out df/F traces, custom calculated, for 'zz' number of elements
    #zz=-1
    #lslsdff = calculate_dff_set(lsvidconcat, lsallmasks)
    #lsepdff = calculate_dff_set(lsvidconcat, epunique[0:zz])
    #epepdff = calculate_dff_set(epvidconcat, epallmasks)
    #eplsdff = calculate_dff_set(epvidconcat, lsunique[0:zz])

    lslsdff = np.empty((lsallmasks.shape[0], 0))
    for i, el in enumerate(lsvidconcattimes):
        if not i == 0:
            start = lsvidconcattimes[i - 1] + 1
            end = lsvidconcattimes[i]
            lslsdff = np.concatenate(
                (lslsdff, calculate_dff_set(lsvidconcat[start:end],
                                            lsallmasks)),
                axis=1)
            print(lslsdff.shape)
    lslsdff = np.clip(lslsdff, 0, None)

    epepdff = np.empty((epallmasks.shape[0], 0))
    for i, el in enumerate(epividconcattimes):
        if not i == 0:
            start = epividconcattimes[i - 1] + 1
            end = epividconcattimes[i]
            epepdff = np.concatenate(
                (epepdff, calculate_dff_set(epividconcat[start:end],
                                            epallmasks)),
                axis=1)
            print(epepdff.shape)
    epepdff = np.clip(epepdff, 0, None)

    lspeakmax = np.zeros(lslsdff.shape[0])
    lspeakcount = np.zeros(lslsdff.shape[0])
    for i, lsdff in enumerate(lslsdff):
        peaks, props = scipy.signal.find_peaks(lsdff,
                                               distance=10,
                                               prominence=(0.05, None),
                                               width=(3, None),
                                               height=(0.1, None))
        lspeakmax[i] = max(lsdff)
        if peaks.size > 0:
            lspeakcount[i] = peaks.size

    eppeakmax = np.zeros(epepdff.shape[0])
    eppeakcount = np.zeros(epepdff.shape[0])
    for i, epdff in enumerate(epepdff):
        peaks, props = scipy.signal.find_peaks(epdff,
                                               distance=10,
                                               prominence=(0.05, None),
                                               width=(3, None),
                                               height=(0.1, None))
        eppeakmax[i] = max(epdff)
        if peaks.size > 0:
            eppeakcount[i] = peaks.size

    toplscount_idxs = np.argsort(lspeakcount)
    toplspeak_idxs = np.argsort(lspeakmax)
    topls_idxs = np.concatenate((toplscount_idxs[-4:], toplspeak_idxs[-2:]))
    topsixlscount = lslsdff[topls_idxs]

    topepcount_idxs = np.argsort(eppeakcount)
    topeppeak_idxs = np.argsort(eppeakmax)
    topep_idxs = np.concatenate((topepcount_idxs[-4:], topeppeak_idxs[-2:]))
    topsixepcount = epepdff[topep_idxs]

    lsmaxheatmap = np.zeros(lsallmasks[0].shape)
    lscountheatmap = np.zeros(lsallmasks[0].shape)
    for i, mask in enumerate(lsallmasks):
        lsmaxheatmap = np.add(lsmaxheatmap, mask * lspeakmax[i])
        lscountheatmap = np.add(lscountheatmap, mask * lspeakcount[i])

    epmaxheatmap = np.zeros(epallmasks[0].shape)
    epcountheatmap = np.zeros(epallmasks[0].shape)
    for i, mask in enumerate(epallmasks):
        epmaxheatmap = np.add(epmaxheatmap, mask * eppeakmax[i])
        epcountheatmap = np.add(epcountheatmap, mask * eppeakcount[i])

    lsrankedheatmap = np.zeros(lsallmasks[0].shape)
    for i, idx in enumerate(topls_idxs):
        lsrankedheatmap = np.add(lsrankedheatmap, lsallmasks[idx] * 2)
    eprankedheatmap = np.zeros(epallmasks[0].shape)
    for i, idx in enumerate(topep_idxs):
        eprankedheatmap = np.add(eprankedheatmap, epallmasks[idx] * 2)
    imageio.imwrite('ls_topmasks.png', lsrankedheatmap)
    imageio.imwrite('epi_topmasks.png', eprankedheatmap)
    ff, ax = plt.subplots()
    ax.imshow(lsrankedheatmap)
    ff, ax = plt.subplots()
    ax.imshow(eprankedheatmap)

    # Setting up plot information
    cmap = plt.get_cmap('jet')
    # Light-sheet, maximum dF/F figure
    ff, ax = plt.subplots()
    ax.imshow(lsmaxheatmap, cmap='jet')
    vmin = math.floor(
        np.min(lsmaxheatmap[np.nonzero(lsmaxheatmap)]) * 100) / 100
    vmax = math.ceil(
        np.max(lsmaxheatmap[np.nonzero(lsmaxheatmap)]) * 100) / 100
    norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
    colors = cmap(np.linspace(1. - (vmax - vmin) / float(vmax), 1, cmap.N))
    color_map = matplotlib.colors.LinearSegmentedColormap.from_list(
        'cut_jet', colors)
    cax, _ = matplotlib.colorbar.make_axes(plt.gca())
    cbar = matplotlib.colorbar.ColorbarBase(
        cax,
        cmap=color_map,
        norm=norm,
    )
    cbar.set_ticks([vmin, (vmax + vmin) / 2, vmax])
    cbar.set_ticklabels([vmin, (vmax + vmin) / 2, vmax])
    #cax.setlabel('Max. dF/F')
    ax.axis('off')
    ff.suptitle('Heatmap of light-sheet neurons by maximum dF/F')
    plt.show()
    # Light-sheet, spike count figure
    ff, ax = plt.subplots()
    ax.imshow(lscountheatmap, cmap='jet')
    vmin = np.min(lscountheatmap[np.nonzero(lscountheatmap)])
    vmax = np.max(lscountheatmap[np.nonzero(lscountheatmap)])
    norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
    colors = cmap(np.linspace(1. - (vmax - vmin) / float(vmax), 1, cmap.N))
    color_map = matplotlib.colors.LinearSegmentedColormap.from_list(
        'cut_jet', colors)
    cax, _ = matplotlib.colorbar.make_axes(plt.gca())
    cbar = matplotlib.colorbar.ColorbarBase(
        cax,
        cmap=color_map,
        norm=norm,
    )
    cbar.set_ticks([vmin, math.floor((vmax + vmin) / 2), vmax])
    cbar.set_ticklabels([vmin, math.floor((vmax + vmin) / 2), vmax])
    #cax.setlabel('Event Count')
    ax.axis('off')
    ff.suptitle('Heatmap of light-sheet neurons by spike count')
    ff.show()
    # Epi-illumination, maximum dF/F figure
    ff, ax = plt.subplots()
    ax.imshow(epmaxheatmap, cmap='jet')
    vmin = math.floor(
        np.min(epmaxheatmap[np.nonzero(epmaxheatmap)]) * 100) / 100
    vmax = math.ceil(
        np.max(epmaxheatmap[np.nonzero(epmaxheatmap)]) * 100) / 100
    norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
    colors = cmap(np.linspace(1. - (vmax - vmin) / float(vmax), 1, cmap.N))
    color_map = matplotlib.colors.LinearSegmentedColormap.from_list(
        'cut_jet', colors)
    cax, _ = matplotlib.colorbar.make_axes(plt.gca())
    cbar = matplotlib.colorbar.ColorbarBase(
        cax,
        cmap=color_map,
        norm=norm,
    )
    cbar.set_ticks([vmin, (vmax + vmin) / 2, vmax])
    cbar.set_ticklabels([vmin, (vmax + vmin) / 2, vmax])
    #cbar.ax.setlabel('Max. dF/F')
    ax.axis('off')
    ff.suptitle('Heatmap of epi-illuminated neurons by maximum dF/F')
    ff.show()
    # Epi-illumination, spike count figure
    ff, ax = plt.subplots()
    ax.imshow(epcountheatmap, cmap='jet')
    vmin = np.min(epcountheatmap[np.nonzero(epcountheatmap)])
    vmax = np.max(epcountheatmap[np.nonzero(epcountheatmap)])
    norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
    colors = cmap(np.linspace(1. - (vmax - vmin) / float(vmax), 1, cmap.N))
    color_map = matplotlib.colors.LinearSegmentedColormap.from_list(
        'cut_jet', colors)
    cax, _ = matplotlib.colorbar.make_axes(plt.gca())
    cbar = matplotlib.colorbar.ColorbarBase(
        cax,
        cmap=color_map,
        norm=norm,
    )
    cbar.set_ticks([vmin, math.floor((vmax + vmin) / 2), vmax])
    cbar.set_ticklabels([vmin, math.floor((vmax + vmin) / 2), vmax])
    #cax.setlabel('Event Count')
    ax.axis('off')
    ff.suptitle('Heatmap of epi-illuminated neurons by spike count')
    ff.show()

    ffls, axls = plt.subplots(6, 1)
    ffep, axep = plt.subplots(6, 1)
    for i in range(6):
        axls[i].plot(topsixlscount[i])
        axep[i].plot(topsixepcount[i])
    ffls.suptitle('Top 6 Lightsheet neurons')
    ffep.suptitle('Top 6 Epi neurons')

    # export data (df/f traces and concat times) to be plotted in matlab
    scipy.io.savemat(os.path.join(mcdir, 'lightsheet_dff_data.mat'), {
        'trace': lslsdff,
        'timebreaks': lsvidconcattimes
    })
    scipy.io.savemat(os.path.join(mcdir, 'epi_dff_data.mat'), {
        'trace': epepdff,
        'timebreaks': epividconcattimes
    })
    scipy.io.savemat(os.path.join(mcdir, 'lightsheet_dff_datatops.mat'), {
        'trace': topsixlscount,
        'timebreaks': lsvidconcattimes,
        'idxs': topls_idxs
    })
    scipy.io.savemat(
        os.path.join(mcdir, 'epi_dff_datatops.mat'), {
            'trace': topsixepcount,
            'timebreaks': epividconcattimes,
            'idxs': topep_idxs
        })
    scipy.io.savemat(os.path.join(mcdir, 'epi_masks.mat'),
                     {'mask': epallmasks})
    scipy.io.savemat(os.path.join(mcdir, 'ls_masks.mat'), {'mask': lsallmasks})