Exemplo n.º 1
0
    def test_mergeROIs(self):
        roi1 = ia.WeightedROI(np.arange(9).reshape((3, 3)))
        roi2 = ia.WeightedROI(np.arange(1, 10).reshape((3, 3)))

        merged_ROI = sca.merge_weighted_rois(roi1, roi2)
        merged_ROI2 = sca.merge_binary_rois(roi1, roi2)

        assert (np.array_equal(merged_ROI.get_weighted_mask(),
                               np.arange(1, 18, 2).reshape((3, 3))))
        assert (np.array_equal(merged_ROI2.get_binary_mask(), np.ones((3, 3))))
def downsample_for_multiprocessing(params):

    nwb_path, dset_path, frame_start_i, frame_end_i, dr = params

    print('\tdownsampling frame {} - {}'.format(frame_start_i, frame_end_i))

    ff = h5py.File(nwb_path, 'r')
    chunk = ff[dset_path][frame_start_i:frame_end_i, :, :]
    ff.close()
    chunk_d = ia.z_downsample(chunk, downSampleRate=dr, is_verbose=False)
    return chunk_d
Exemplo n.º 3
0
def get_traces(params):
    t0 = time.time()

    chunk_ind, chunk_start, chunk_end, nwb_path, data_path, curr_folder, center_array, surround_array = params

    nwb_f = h5py.File(nwb_path, 'r')
    print('\nstart analyzing chunk: {}'.format(chunk_ind))
    curr_mov = nwb_f[data_path][chunk_start:chunk_end]
    nwb_f.close()

    # print 'extracting traces'
    curr_traces_center = np.empty((center_array.shape[0], curr_mov.shape[0]),
                                  dtype=np.float32)
    curr_traces_surround = np.empty((center_array.shape[0], curr_mov.shape[0]),
                                    dtype=np.float32)
    for i in range(center_array.shape[0]):
        curr_center = ia.WeightedROI(center_array[i])
        curr_surround = ia.ROI(surround_array[i])
        curr_traces_center[i, :] = curr_center.get_weighted_trace_pixelwise(
            curr_mov)

        # scale surround trace to be similar as center trace
        mean_center_weight = curr_center.get_mean_weight()
        curr_traces_surround[i, :] = curr_surround.get_binary_trace_pixelwise(
            curr_mov) * mean_center_weight

    # print 'saveing chunk {} ...'.format(chunk_ind)
    chunk_folder = os.path.join(curr_folder, 'chunks')
    if not os.path.isdir(chunk_folder):
        os.mkdir(chunk_folder)
    chunk_f = h5py.File(
        os.path.join(chunk_folder,
                     'chunk_temp_' + ft.int2str(chunk_ind, 4) + '.hdf5'))
    chunk_f['traces_center'] = curr_traces_center
    chunk_f['traces_surround'] = curr_traces_surround
    chunk_f.close()

    print('\n\t{:06d} seconds: chunk: {}; demixing finished.'.format(
        int(time.time() - t0), chunk_ind))

    return None
            # cell2_mask.plot_binary_mask_border(plotAxis=ax, color='#0000ff', borderWidth=1)
            # ax.set_title('red:'+cell1_name+'; blue:'+cell2_name)
            # plt.show()
            break

    if is_remove == 0:
        retain_cells.append(cell1_name)
        print(cell1_name, ':', cell1_mask.get_binary_area(), ': retained')

print('\ncells to be removed because of overlapping:')
print('\n'.join(remove_cells))
print('\ntotal number of reatined cells:', len(retain_cells))

# plotting
colors = pt.random_color(len(cells.keys()))
bgImg = ia.array_nor(np.max(tf.imread(background_file_name), axis=0))

f = plt.figure(figsize=(10, 10))
ax = f.add_subplot(111)
ax.imshow(ia.array_nor(bgImg),
          cmap='gray',
          vmin=0,
          vmax=0.5,
          interpolation='nearest')

f2 = plt.figure(figsize=(10, 10))
ax2 = f2.add_subplot(111)
ax2.imshow(np.zeros(bgImg.shape, dtype=np.uint8),
           vmin=0,
           vmax=1,
           cmap='gray',
Exemplo n.º 5
0
def run():
    # pixels, masks with center location within this pixel region at the image border will be discarded
    center_margin = [
        10, 30, 35, 10
    ]  # [top margin, bottom margin, left margin, right margin]

    # area range, range of number of pixels of a valid roi
    area_range = [5, 500]  # [10, 100] for bouton, [150, 1000] for soma

    # for the two masks that are overlapping, if the ratio between overlap and the area of the smaller mask is larger than
    # this value, the smaller mask will be discarded.
    overlap_thr = 0  # 0.2

    save_folder = 'figures'

    data_file_name = 'cells.hdf5'
    save_file_name = 'cells_refined.hdf5'
    background_file_name = "corrected_mean_projections.tif"

    curr_folder = os.path.dirname(os.path.realpath(__file__))
    os.chdir(curr_folder)

    if not os.path.isdir(save_folder):
        os.makedirs(save_folder)

    # read cells
    dfile = h5py.File(data_file_name, 'r')
    cells = {}
    for cellname in dfile.keys():
        cells.update({cellname: ia.WeightedROI.from_h5_group(dfile[cellname])})

    print('total number of cells:', len(cells))

    # get the names of cells which are on the edge
    edge_cells = []
    for cellname, cellmask in cells.items():
        dimension = cellmask.dimension
        center = cellmask.get_center()
        if center[0] < center_margin[0] or \
           center[0] > dimension[0] - center_margin[1] or \
           center[1] < center_margin[2] or \
           center[1] > dimension[1] - center_margin[3]:

            # cellmask.plot_binary_mask_border(color='#ff0000', borderWidth=1)
            # plt.title(cellname)
            # plt.show()

            edge_cells.append(cellname)

    print('\ncells to be removed because they are on the edges:')
    print('\n'.join(edge_cells))

    # remove edge cells
    for edge_cell in edge_cells:
        _ = cells.pop(edge_cell)

    # get dictionary of cell areas
    cell_areas = {}
    for cellname, cellmask in cells.items():
        cell_areas.update({cellname: cellmask.get_binary_area()})

    # remove cellnames that have area outside of the area_range
    invalid_cell_ns = []
    for cellname, cellarea in cell_areas.items():
        if cellarea < area_range[0] or cellarea > area_range[1]:
            invalid_cell_ns.append(cellname)
    print("cells to be removed because they do not meet area criterion:")
    print("\n".join(invalid_cell_ns))
    for invalid_cell_n in invalid_cell_ns:
        cell_areas.pop(invalid_cell_n)

    # sort cells with their binary area
    cell_areas_sorted = sorted(cell_areas.items(), key=operator.itemgetter(1))
    cell_areas_sorted.reverse()
    cell_names_sorted = [c[0] for c in cell_areas_sorted]
    # print '\n'.join([str(c) for c in cell_areas_sorted])

    # get the name of cells that needs to be removed because of overlapping
    retain_cells = []
    remove_cells = []
    for cell1_name in cell_names_sorted:
        cell1_mask = cells[cell1_name]
        is_remove = 0
        cell1_area = cell1_mask.get_binary_area()
        for cell2_name in retain_cells:
            cell2_mask = cells[cell2_name]
            cell2_area = cell2_mask.get_binary_area()
            curr_overlap = cell1_mask.binary_overlap(cell2_mask)

            if float(curr_overlap) / cell1_area > overlap_thr:
                remove_cells.append(cell1_name)
                is_remove = 1
                print(cell1_name, ':', cell1_mask.get_binary_area(),
                      ': removed')

                # f = plt.figure(figsize=(10,10))
                # ax = f.add_subplot(111)
                # cell1_mask.plot_binary_mask_border(plotAxis=ax, color='#ff0000', borderWidth=1)
                # cell2_mask.plot_binary_mask_border(plotAxis=ax, color='#0000ff', borderWidth=1)
                # ax.set_title('red:'+cell1_name+'; blue:'+cell2_name)
                # plt.show()
                break

        if is_remove == 0:
            retain_cells.append(cell1_name)
            print(cell1_name, ':', cell1_mask.get_binary_area(), ': retained')

    print('\ncells to be removed because of overlapping:')
    print('\n'.join(remove_cells))
    print('\ntotal number of reatined cells:', len(retain_cells))

    # plotting
    colors = pt.random_color(len(cells.keys()))
    bgImg = ia.array_nor(np.max(tf.imread(background_file_name), axis=0))

    f = plt.figure(figsize=(10, 10))
    ax = f.add_subplot(111)
    ax.imshow(ia.array_nor(bgImg),
              cmap='gray',
              vmin=0,
              vmax=0.5,
              interpolation='nearest')

    f2 = plt.figure(figsize=(10, 10))
    ax2 = f2.add_subplot(111)
    ax2.imshow(np.zeros(bgImg.shape, dtype=np.uint8),
               vmin=0,
               vmax=1,
               cmap='gray',
               interpolation='nearest')

    i = 0
    for retain_cell in retain_cells:
        cells[retain_cell].plot_binary_mask_border(plotAxis=ax,
                                                   color=colors[i],
                                                   borderWidth=1)
        cells[retain_cell].plot_binary_mask_border(plotAxis=ax2,
                                                   color=colors[i],
                                                   borderWidth=1)
        i += 1
    # plt.show()

    # save figures
    pt.save_figure_without_borders(f,
                                   os.path.join(
                                       save_folder,
                                       '2P_refined_ROIs_with_background.png'),
                                   dpi=300)
    pt.save_figure_without_borders(
        f2,
        os.path.join(save_folder, '2P_refined_ROIs_without_background.png'),
        dpi=300)

    # save h5 file
    save_file = h5py.File(save_file_name, 'x')
    i = 0
    for retain_cell in retain_cells:
        print(retain_cell, ':', cells[retain_cell].get_binary_area())

        currGroup = save_file.create_group('cell' + ft.int2str(i, 4))
        currGroup.attrs['name'] = retain_cell
        roiGroup = currGroup.create_group('roi')
        cells[retain_cell].to_h5_group(roiGroup)
        i += 1

    for attr, value in dfile.attrs.items():
        save_file.attrs[attr] = value

    save_file.close()
    dfile.close()
Exemplo n.º 6
0
print('planes:')
print('\n'.join(plane_ns))

for plane_n in plane_ns:
    print('\nprocessing plane: {}'.format(plane_n))

    save_folder = os.path.join(curr_folder, plane_n)
    if not os.path.isdir(save_folder):
        os.makedirs(save_folder)

    for ch_n in ch_ns:
        print('\n\tprocessing channel: {}'.format(ch_n))
        plane_folder = os.path.join(data_folder, plane_n, ch_n, 'corrected')

        # f_ns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif']
        f_ns= [f for f in os.listdir(plane_folder) if f[-4:] == '.tif' and identifier in f]
        f_ns.sort()
        print('\t\t'+'\n\t\t'.join(f_ns) + '\n')

        mov_d = []

        for f_n in f_ns:
            print('\t\tprocessing {} ...'.format(f_n))
            curr_mov = tf.imread(os.path.join(plane_folder, f_n))
            curr_mov_d = ia.rigid_transform_cv2(img=curr_mov, zoom=(1. / xy_downsample_rate))
            curr_mov_d = ia.z_downsample(curr_mov_d, downSampleRate=t_downsample_rate, is_verbose=False)
            mov_d.append(curr_mov_d)

        mov_d = np.concatenate(mov_d, axis=0)
        save_n = '{}_{}_{}_downsampled.tif'.format(os.path.split(data_folder)[1], plane_n, ch_n)
        tf.imsave(os.path.join(save_folder, save_n), mov_d)
def run():
    data_file_name = 'cells_refined.hdf5'
    background_file_name = "corrected_mean_projections.tif"
    save_folder = 'figures'

    overlap_threshold = 0.9
    surround_limit = [1, 8]

    curr_folder = os.path.dirname(os.path.realpath(__file__))
    os.chdir(curr_folder)

    if not os.path.isdir(save_folder):
        os.makedirs(save_folder)

    print('reading cells file ...')
    data_f = h5py.File(data_file_name, 'r')

    cell_ns = data_f.keys()
    cell_ns.sort()

    binary_mask_array = []
    weight_mask_array = []

    for cell_n in cell_ns:
        curr_roi = ia.ROI.from_h5_group(data_f[cell_n]['roi'])
        binary_mask_array.append(curr_roi.get_binary_mask())
        weight_mask_array.append(curr_roi.get_weighted_mask())

    data_f.close()
    binary_mask_array = np.array(binary_mask_array)
    weight_mask_array = np.array(weight_mask_array)
    print('starting mask_array shape:', weight_mask_array.shape)

    print('getting total mask ...')
    total_mask = np.zeros((binary_mask_array.shape[1], binary_mask_array.shape[2]), dtype=np.uint8)
    for curr_mask in binary_mask_array:
        total_mask = np.logical_or(total_mask, curr_mask)
    total_mask = np.logical_not(total_mask)

    plt.imshow(total_mask, interpolation='nearest')
    plt.title('total_mask')
    # plt.show()

    print('getting and surround masks ...')
    binary_surround_array = []
    for binary_center in binary_mask_array:
        curr_surround = np.logical_xor(ni.binary_dilation(binary_center, iterations=surround_limit[1]),
                                       ni.binary_dilation(binary_center, iterations=surround_limit[0]))
        curr_surround = np.logical_and(curr_surround, total_mask).astype(np.uint8)
        binary_surround_array.append(curr_surround)
        # plt.imshow(curr_surround)
        # plt.show()
    binary_surround_array = np.array(binary_surround_array)

    print("saving rois ...")
    center_areas = []
    surround_areas = []
    for mask_ind in range(binary_mask_array.shape[0]):
        center_areas.append(np.sum(binary_mask_array[mask_ind].flat))
        surround_areas.append(np.sum(binary_surround_array[mask_ind].flat))
    roi_f = h5py.File('rois_and_traces.hdf5')
    roi_f['masks_center'] = weight_mask_array
    roi_f['masks_surround'] = binary_surround_array

    roi_f.close()
    print('minimum surround area:', min(surround_areas), 'pixels.')

    f = plt.figure(figsize=(10, 10))
    ax_center = f.add_subplot(211)
    ax_center.hist(center_areas, bins=30)
    ax_center.set_title('roi center area distribution')
    ax_surround = f.add_subplot(212)
    ax_surround.hist(surround_areas, bins=30)
    ax_surround.set_title('roi surround area distribution')
    # plt.show()

    print('plotting ...')
    colors = pt.random_color(weight_mask_array.shape[0])
    bg = ia.array_nor(np.max(tf.imread(background_file_name), axis=0))

    f_c_bg = plt.figure(figsize=(10, 10))
    ax_c_bg = f_c_bg.add_subplot(111)
    ax_c_bg.imshow(bg, cmap='gray', vmin=0, vmax=0.5, interpolation='nearest')
    f_c_nbg = plt.figure(figsize=(10, 10))
    ax_c_nbg = f_c_nbg.add_subplot(111)
    ax_c_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest')
    f_s_nbg = plt.figure(figsize=(10, 10))
    ax_s_nbg = f_s_nbg.add_subplot(111)
    ax_s_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest')

    i = 0
    for mask_ind in range(binary_mask_array.shape[0]):
        pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_bg, color=colors[i], borderWidth=1)
        pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_nbg, color=colors[i], borderWidth=1)
        pt.plot_mask_borders(binary_surround_array[mask_ind], plotAxis=ax_s_nbg, color=colors[i], borderWidth=1)
        i += 1

    # plt.show()

    print('saving figures ...')
    pt.save_figure_without_borders(f_c_bg, os.path.join(save_folder, '2P_ROIs_with_background.png'), dpi=300)
    pt.save_figure_without_borders(f_c_nbg, os.path.join(save_folder, '2P_ROIs_without_background.png'), dpi=300)
    pt.save_figure_without_borders(f_s_nbg, os.path.join(save_folder, '2P_ROI_surrounds_background.png'), dpi=300)
    f.savefig(os.path.join(save_folder, 'roi_area_distribution.pdf'), dpi=300)
Exemplo n.º 8
0
def transform_for_deepscope(arr):

    arr_r = arr.transpose(2, 0, 1)
    arr_r = ia.rigid_transform(arr_r[:, :, ::-1], rotation=-145).astype(np.uint8)
    arr_r = arr_r.transpose(1, 2, 0)
    return arr_r
Exemplo n.º 9
0
import tifffile as tf
import matplotlib.pyplot as plt
import NeuroAnalysisTools.core.ImageAnalysis as ia

data_fn = 'zstack_2p_zoom2_red_aligned.tif'
save_fn = '2018-08-16-M376019-depth-profile-red.png'
start_depth = 50 # micron
step_depth = 2 # micron
pix_size = 0.7 # sutter scope, zoom2, 512 x 512
resolution =  512

curr_folder = os.path.dirname(os.path.abspath(__file__))
os.chdir(curr_folder)

data = tf.imread(data_fn)
dp = ia.array_nor(np.mean(data, axis=1))

depth_i = np.array(range(0, dp.shape[0], 50))
depth_l = depth_i * step_depth + start_depth

f = plt.figure(figsize=(8, 8))
ax = f.add_subplot(111)
ax.imshow(dp, vmin=0, vmax=1, cmap='magma', aspect=step_depth / pix_size)
ax.set_xticks([0, resolution-1])
ax.set_xticklabels(['0', '{:7.2f}'.format(resolution*pix_size)])
ax.set_yticks(depth_i)
ax.set_yticklabels(depth_l)
ax.set_xlabel('horizontal dis (um)')
ax.set_ylabel('depth (um)')

plt.show()
Exemplo n.º 10
0
channels = ['DAPI', 'GCaMP', 'mRuby', 'NeuN']
downsample_rate = 0.1

curr_folder = os.path.realpath(os.path.dirname(__file__))
os.chdir(curr_folder)

fns = [f for f in os.listdir(curr_folder) if f[-4:] == '.btf']
fns.sort()
print('\n'.join(fns))

for fn in fns:
    print('\nprocessing {} ...'.format(fn))
    big_img = tf.imread(fn)
    fname = os.path.splitext(fn)[0]
    print('shape: {}'.format(big_img.shape))
    print('dtype: {}'.format(big_img.dtype))

    # comb_img = []

    for chi, chn in enumerate(channels):
        print('\tchannel: {}'.format(chn))
        down_img_ch = ia.rigid_transform_cv2(big_img[chi],
                                             zoom=downsample_rate).astype(
                                                 np.uint16)[::-1, :]
        tf.imsave('thumbnail_{}_{:02d}_{}.tif'.format(fname, chi, chn),
                  down_img_ch)
        # comb_img.append(down_img_ch)

    # comb_img = np.array(comb_img)
    # tf.imsave('{}_downsampled.tif'.format(fname), comb_img)
Exemplo n.º 11
0
def run():
    isSave = True

    filter_sigma = 2.  # 2. for soma, 1. for bouton
    thr_high = 0.0
    thr_low = 0.1

    bg_fn = "corrected_mean_projections.tif"
    save_folder = 'figures'

    curr_folder = os.path.dirname(os.path.realpath(__file__))
    os.chdir(curr_folder)

    data_f = h5py.File('caiman_segmentation_results.hdf5', 'r')
    masks = data_f['masks'].value
    data_f.close()

    bg = ia.array_nor(np.max(tf.imread(bg_fn), axis=0))

    final_roi_dict = {}

    roi_ind = 0
    for i, mask in enumerate(masks):
        mask_dict = hl.threshold_mask_by_energy(mask,
                                                sigma=filter_sigma,
                                                thr_high=thr_high,
                                                thr_low=thr_low)
        for mask_roi in mask_dict.values():
            final_roi_dict.update({'roi_{:04d}'.format(roi_ind): mask_roi})
            roi_ind += 1

    print('Total number of ROIs:', len(final_roi_dict))

    f = plt.figure(figsize=(15, 8))
    ax1 = f.add_subplot(121)
    ax1.imshow(bg, vmin=0, vmax=0.5, cmap='gray', interpolation='nearest')
    colors1 = pt.random_color(masks.shape[0])
    for i, mask in enumerate(masks):
        pt.plot_mask_borders(mask, plotAxis=ax1, color=colors1[i])
    ax1.set_title('original ROIs')
    ax1.set_axis_off()
    ax2 = f.add_subplot(122)
    ax2.imshow(ia.array_nor(bg),
               vmin=0,
               vmax=0.5,
               cmap='gray',
               interpolation='nearest')
    colors2 = pt.random_color(len(final_roi_dict))
    i = 0
    for roi in final_roi_dict.values():
        pt.plot_mask_borders(roi.get_binary_mask(),
                             plotAxis=ax2,
                             color=colors2[i])
        i = i + 1
    ax2.set_title('filtered ROIs')
    ax2.set_axis_off()
    # plt.show()

    if isSave:

        if not os.path.isdir(save_folder):
            os.makedirs(save_folder)

        f.savefig(os.path.join(save_folder,
                               'caiman_segmentation_filtering.pdf'),
                  dpi=300)

        cell_file = h5py.File('cells.hdf5', 'w')

        i = 0
        for key, value in sorted(final_roi_dict.items()):
            curr_grp = cell_file.create_group('cell{:04d}'.format(i))
            curr_grp.attrs['name'] = key
            value.to_h5_group(curr_grp)
            i += 1

        cell_file.close()
                                                             td_rate)

            for save_file_id in range(num_file_to_save):
                save_chunk = total_movs[ch_i][save_file_id *
                                              (frames_per_file *
                                               td_rate):(save_file_id + 1) *
                                              (frames_per_file * td_rate)]
                save_path = os.path.join(
                    save_folders[ch_i],
                    '{}_{:05d}_reorged.tif'.format(file_identifier,
                                                   save_ids[ch_i]))
                if td_rate != 1:
                    print('\tdown sampling for {} ...'.format(
                        os.path.split(save_path)[1]))
                    save_chunk = ia.z_downsample(save_chunk,
                                                 downSampleRate=td_rate,
                                                 is_verbose=False)

                print('\tsaving {} ...'.format(os.path.split(save_path)[1]))
                tf.imsave(save_path, save_chunk)
                save_ids[ch_i] = save_ids[ch_i] + 1

            if total_movs[ch_i].shape[0] % (frames_per_file * td_rate) == 0:
                total_movs[ch_i] = None
            else:
                frame_num_left = total_movs[ch_i].shape[0] % (frames_per_file *
                                                              td_rate)
                total_movs[ch_i] = total_movs[ch_i][-frame_num_left:]

print('\nprocessing residual frames ...')
import os
import numpy as np
import tifffile as tf
import NeuroAnalysisTools.MotionCorrection as mc
import NeuroAnalysisTools.core.ImageAnalysis as ia

fn = 'zstack_green_aligned.tif'
scope = 'sutter' # 'sutter' or 'deepscope' or 'scientifica'

curr_folder = os.path.dirname(os.path.realpath(__file__))
os.chdir(curr_folder)

stack = tf.imread(fn)

if scope == 'sutter':
	stack_r = stack.transpose((0, 2, 1))[:, ::-1, :]

elif scope == 'deepscope':
	h_new = int(stack.shape[1] * np.sqrt(2))
	w_new = int(stack.shape[2] * np.sqrt(2))
	stack_r = ia.rigid_transform_cv2(stack, rotation=140, outputShape=(h_new, w_new))[:, :, ::-1]

elif scope == 'scientifica':
	h_new = int(stack.shape[1] * np.sqrt(2))
	w_new = int(stack.shape[2] * np.sqrt(2))
	stack_r = ia.rigid_transform_cv2(stack[:,::-1,:], rotation=135, outputShape=(h_new, w_new))

else:
	raise LookupError("Do not understand scope type. Should be 'sutter' or 'deepscope' or 'scientifica'.")

tf.imsave(os.path.splitext(fn)[0] + '_rotated.tif', stack_r.astype(stack.dtype))
def downsample_folder(working_folder,
                      td_rate,
                      file_identifier,
                      frames_per_file=500):

    file_list = [f for f in os.listdir(working_folder) if file_identifier in f and f[-14:] == '_corrected.tif']
    file_list.sort()
    print('\t\tall files:')
    print('\n'.join(['\t\t' + f for f in file_list]))

    print('\n\t\tmoving files to "not_downsampled" folder:')
    file_paths = [os.path.join(working_folder, f) for f in file_list]
    print

    not_downsampled_folder = os.path.join(working_folder, 'not_downsampled')
    os.mkdir(not_downsampled_folder)
    for file_path in file_paths:
        fn = os.path.split(file_path)[1]
        shutil.move(file_path, os.path.join(not_downsampled_folder, fn))

    file_paths_original = [os.path.join(not_downsampled_folder, fn) for fn in file_list]
    file_paths_original.sort()


    save_id = 0
    total_mov = None
    for file_path_o in file_paths_original:
        print('\t\tprocessing {} ...'.format(os.path.split(file_path_o)[1]))
        curr_mov = tf.imread(file_path_o)

        if total_mov is None:
            total_mov = curr_mov
        else:
            total_mov = np.concatenate((total_mov, curr_mov), axis=0)

        while total_mov is not None and \
                (total_mov.shape[0] >= frames_per_file * td_rate):

            num_file_to_save = total_mov.shape[0] // (frames_per_file * td_rate)

            for save_file_id in range(num_file_to_save):
                save_chunk = total_mov[save_file_id * (frames_per_file * td_rate) :
                                       (save_file_id + 1) * (frames_per_file * td_rate)]
                save_path = os.path.join(working_folder, '{}_{:05d}_corrected_downsampled.tif'.format(file_identifier,
                                                                                                      save_id))
                save_chunk = ia.z_downsample(save_chunk, downSampleRate=td_rate, is_verbose=False)

                print('\t\t\tsaving {} ...'.format(os.path.split(save_path)[1]))
                tf.imsave(save_path, save_chunk)
                save_id = save_id + 1

            if total_mov.shape[0] % (frames_per_file * td_rate) == 0:
                total_mov = None
            else:
                frame_num_left = total_mov.shape[0] % (frames_per_file * td_rate)
                total_mov = total_mov[-frame_num_left:]

    if total_mov is not None:
        save_path = os.path.join(working_folder, '{}_{:05d}_corrected_downsampled.tif'.format(file_identifier, save_id))
        save_chunk = ia.z_downsample(total_mov, downSampleRate=td_rate, is_verbose=False)
        print('\t\t\tsaving {} ...'.format(os.path.split(save_path)[1]))
        tf.imsave(save_path, save_chunk)

    return
Exemplo n.º 15
0
                'vasmap_wf_rotated': 'wide field surface vasculature map through cranial window rotated',
                'vasmap_2p_green': '2p surface vasculature map through cranial window green original, zoom1',
                'vasmap_2p_green_rotated': '2p surface vasculature map through cranial window green rotated, zoom1',
                'vasmap_2p_red': '2p surface vasculature map through cranial window red original, zoom1',
                'vasmap_2p_red_rotated': '2p surface vasculature map through cranial window red rotated, zoom1'
                }

curr_folder = os.path.dirname(os.path.realpath(__file__))
os.chdir(curr_folder)

nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0]
nwb_f = nt.RecordedFile(nwb_fn)

for mn, des in vasmap_dict.items():
    try:
        curr_m = ia.array_nor(tf.imread(mn + '.tif'))

        if is_plot:
            f = plt.figure(figsize=(10, 10))
            ax = f.add_subplot(111)
            ax.imshow(curr_m, vmin=0., vmax=1., cmap='gray', interpolation='nearest')
            ax.set_axis_off()
            ax.set_title(mn)
            plt.show()

        print('adding {} to nwb file.'.format(mn))
        nwb_f.add_acquisition_image(mn, curr_m, description=des)

    except Exception as e:
        print(e)
    f_ns.sort()
    print('\n'.join(f_ns))

    mov_join = []
    for f_n in f_ns:
        print('processing plane: {}; file: {} ...'.format(plane_n, f_n))

        curr_mov = tf.imread(os.path.join(plane_folder, f_n))

        if curr_mov.shape[0] % t_downsample_rate != 0:
            print(
                'the frame number of {} ({}) is not divisible by t_downsample_rate ({}).'
                .format(f_n, curr_mov.shape[0], t_downsample_rate))

        curr_mov_d = ia.z_downsample(curr_mov,
                                     downSampleRate=t_downsample_rate,
                                     is_verbose=False)
        mov_join.append(curr_mov_d)

    mov_join = np.concatenate(mov_join, axis=0)

    save_name = '{}_{}_{}_{}_downsampled_for_caiman.hdf5'.format(
        date_recorded, mouse_id, sess_id, plane_n)
    save_f = h5py.File(os.path.join(plane_folder, save_name))
    save_f.create_dataset('mov', data=mov_join)
    save_f.close()

    # save_name = '{}_d1_{}_d2_{}_d3_1_order_C_frames_{}_.mmap'\
    #     .format(base_name, mov_join.shape[2], mov_join.shape[1], mov_join.shape[0])
    #
    # mov_join = mov_join.reshape((mov_join.shape[0], mov_join.shape[1] * mov_join.shape[2]), order='F').transpose()
    if 'path_list' in offsets_keys:
        offsets_keys.remove('path_list')

    offsets_keys.sort()
    offsets = []
    for offsets_key in offsets_keys:
        offsets.append(offsets_f[offsets_key].value)
    offsets = np.concatenate(offsets, axis=0)
    offsets = np.array(zip(offsets[:, 1], offsets[:, 0]))
    offsets_f.close()

    mean_projection = tf.imread(
        os.path.join(plane_n, 'corrected_mean_projection.tif'))
    max_projection = tf.imread(
        os.path.join(plane_n, 'corrected_max_projections.tif'))
    max_projection = ia.array_nor(np.max(max_projection, axis=0))

    input_dict = {
        'field_name':
        plane_n,
        'original_timeseries_path':
        '/acquisition/timeseries/2p_movie_plane' + str(i),
        'corrected_file_path':
        movie_2p_fn,
        'corrected_dataset_path':
        plane_n,
        'xy_translation_offsets':
        offsets,
        'mean_projection':
        mean_projection,
        'max_projection':
Exemplo n.º 18
0
        ax_and_scatter = f.add_subplot(4, 5, 16)
        ax_and_scatter.plot(df_and['rf_{}_on_center_azi'.format(response_dir)],
                            df_and['rf_{}_on_center_alt'.format(response_dir)],
                            '.',
                            color='#ff0000')
        ax_and_scatter.plot(
            df_and['rf_{}_off_center_azi'.format(response_dir)],
            df_and['rf_{}_off_center_alt'.format(response_dir)],
            '.',
            color='#0000ff')
        ax_and_scatter.set_xlim([azi_min, azi_max])
        ax_and_scatter.set_ylim([alt_min, alt_max])

        # =============================pairwise distance=============================================
        dis_or = ia.pairwise_distance(df_or[[
            'rf_{}_onoff_center_azi'.format(response_dir),
            'rf_{}_onoff_center_alt'.format(response_dir)
        ]].values)
        ax_or_pd = f.add_subplot(4, 5, 2)
        if len(dis_or) > 0:
            ax_or_pd.hist(dis_or,
                          range=[0, 80],
                          bins=20,
                          facecolor='#aaaaaa',
                          edgecolor='none')
        ax_or_pd.get_yaxis().set_ticks([])
        ax_or_pd.set_title(
            'pw RF dis')  # pairwise receptive field center distance

        dis_on = ia.pairwise_distance(df_on[[
            'rf_{}_on_center_azi'.format(response_dir),
            'rf_{}_on_center_alt'.format(response_dir)
Exemplo n.º 19
0
big_region = (np.array(thumbnail_region) / thumbnail_d_rate).astype(np.uint64)
print('region in big image: {}'.format(big_region))

thumbnail_fns = [
    f for f in os.listdir(curr_folder) if base_name in f and f[-4:] == '.tif'
]
ch_lst = []
for chn in channels:
    curr_chi = [int(f.split('_')[-2]) for f in thumbnail_fns if chn in f]
    if len(curr_chi) != 1:
        raise LookupError
    ch_lst.append(curr_chi[0])

print('channel index list: {}'.format(ch_lst))

big_img = tf.imread(base_name + '.btf')
print('reading the big image: {}.btf ...'.format(base_name))

section_img = []

for ch_i in ch_lst:
    curr_img = big_img[ch_i][::-1, :][big_region[0]:big_region[1],
                                      big_region[2]:big_region[3]]
    curr_img = ia.rigid_transform_cv2(curr_img, zoom=d_rate).astype(np.uint16)
    section_img.append(curr_img)

section_img = np.array(section_img)

print('saving {} ...'.format(save_name))
tf.imsave(save_name, section_img)
Exemplo n.º 20
0
final_offsets_x = np.cumsum(step_offsets[:, 1])
final_offsets = np.array([final_offsets_x, final_offsets_y],
                         dtype=np.float32).transpose()

middle_frame_ind = stack_ref.shape[0] // 2
middle_offsets = final_offsets[middle_frame_ind:middle_frame_ind + 1]
final_offsets = final_offsets - middle_offsets
print('\nfinal offsets:')
print(final_offsets)

print('applying final offsets ...')

for ch in ch_app:

    stack_app = tf.imread('{}_{}.tif'.format(identifier, ch))
    stack_aligned = []

    for step_i in range(stack_app.shape[0]):
        curr_offset = final_offsets[step_i]
        frame = stack_app[step_i]
        frame_aligned = ia.rigid_transform_cv2_2d(frame,
                                                  offset=curr_offset,
                                                  fill_value=0).astype(
                                                      np.float32)
        stack_aligned.append(frame_aligned)

    stack_aligned = np.array(stack_aligned, dtype=np.float32)

    tf.imsave('{}_{}_aligned.tif'.format(identifier, ch), stack_aligned)
    # tf.imsave('{}_{}_max_projection.tif'.format(identifier, ch), np.max(stack_aligned, axis=0))
Exemplo n.º 21
0
def add_rois_and_traces(
        data_folder,
        nwb_f,
        plane_n,
        imaging_depth,
        mov_path='/processing/motion_correction/MotionCorrection'):

    mov_grp = nwb_f.file_pointer[mov_path + '/' + plane_n + '/corrected']

    data_f = h5py.File(os.path.join(data_folder, 'rois_and_traces.hdf5'), 'r')
    mask_arr_c = data_f['masks_center'].value
    mask_arr_s = data_f['masks_surround'].value
    traces_center_raw = data_f['traces_center_raw'].value
    # traces_center_demixed = data_f['traces_center_demixed'].value
    traces_center_subtracted = data_f['traces_center_subtracted'].value
    # traces_center_dff = data_f['traces_center_dff'].value
    traces_surround_raw = data_f['traces_surround_raw'].value
    neuropil_r = data_f['neuropil_r'].value
    neuropil_err = data_f['neuropil_err'].value
    data_f.close()

    if traces_center_raw.shape[1] != mov_grp['num_samples'].value:
        raise ValueError(
            'number of trace time points ({}) does not match frame number of '
            'corresponding movie ({}).'.format(traces_center_raw.shape[1],
                                               mov_grp['num_samples'].value))

    # traces_center_raw = traces_center_raw[:, :mov_grp['num_samples'].value]
    # traces_center_subtracted = traces_center_subtracted[:, :mov_grp['num_samples'].value]
    # traces_surround_raw = traces_surround_raw[:, :mov_grp['num_samples'].value]

    rf_img_max = tf.imread(
        os.path.join(data_folder, 'corrected_max_projection.tif'))
    rf_img_mean = tf.imread(
        os.path.join(data_folder, 'corrected_mean_projection.tif'))

    print('adding segmentation results ...')
    rt_mo = nwb_f.create_module('rois_and_traces_' + plane_n)
    rt_mo.set_value('imaging_depth_micron', imaging_depth)
    is_if = rt_mo.create_interface('ImageSegmentation')
    is_if.create_imaging_plane('imaging_plane', description='')
    is_if.add_reference_image('imaging_plane', 'max_projection', rf_img_max)
    is_if.add_reference_image('imaging_plane', 'mean_projection', rf_img_mean)

    for i in range(mask_arr_c.shape[0]):
        curr_cen = mask_arr_c[i]
        curr_cen_n = 'roi_' + ft.int2str(i, 4)
        curr_cen_roi = ia.WeightedROI(curr_cen)
        curr_cen_pixels_yx = curr_cen_roi.get_pixel_array()
        curr_cen_pixels_xy = np.array(
            [curr_cen_pixels_yx[:, 1], curr_cen_pixels_yx[:, 0]]).transpose()
        is_if.add_roi_mask_pixels(image_plane='imaging_plane',
                                  roi_name=curr_cen_n,
                                  desc='',
                                  pixel_list=curr_cen_pixels_xy,
                                  weights=curr_cen_roi.weights,
                                  width=512,
                                  height=512)

        curr_sur = mask_arr_s[i]
        curr_sur_n = 'surround_' + ft.int2str(i, 4)
        curr_sur_roi = ia.ROI(curr_sur)
        curr_sur_pixels_yx = curr_sur_roi.get_pixel_array()
        curr_sur_pixels_xy = np.array(
            [curr_sur_pixels_yx[:, 1], curr_sur_pixels_yx[:, 0]]).transpose()
        is_if.add_roi_mask_pixels(image_plane='imaging_plane',
                                  roi_name=curr_sur_n,
                                  desc='',
                                  pixel_list=curr_sur_pixels_xy,
                                  weights=None,
                                  width=512,
                                  height=512)
    is_if.finalize()

    trace_f_if = rt_mo.create_interface('Fluorescence')
    seg_if_path = '/processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane'
    # print seg_if_path
    ts_path = mov_path + '/' + plane_n + '/corrected'

    print('adding center fluorescence raw')
    trace_raw_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_raw')
    trace_raw_ts.set_data(traces_center_raw,
                          unit='au',
                          conversion=np.nan,
                          resolution=np.nan)
    trace_raw_ts.set_value('data_format', 'roi (row) x time (column)')
    trace_raw_ts.set_value('data_range', '[-8192, 8191]')
    trace_raw_ts.set_description(
        'fluorescence traces extracted from the center region of each roi')
    trace_raw_ts.set_time_as_link(ts_path)
    trace_raw_ts.set_value_as_link('segmentation_interface', seg_if_path)
    roi_names = [
        'roi_' + ft.int2str(ind, 4)
        for ind in range(traces_center_raw.shape[0])
    ]
    trace_raw_ts.set_value('roi_names', roi_names)
    trace_raw_ts.set_value('num_samples', traces_center_raw.shape[1])
    trace_f_if.add_timeseries(trace_raw_ts)
    trace_raw_ts.finalize()

    print('adding neuropil fluorescence raw')
    trace_sur_ts = nwb_f.create_timeseries('RoiResponseSeries',
                                           'f_surround_raw')
    trace_sur_ts.set_data(traces_surround_raw,
                          unit='au',
                          conversion=np.nan,
                          resolution=np.nan)
    trace_sur_ts.set_value('data_format', 'roi (row) x time (column)')
    trace_sur_ts.set_value('data_range', '[-8192, 8191]')
    trace_sur_ts.set_description(
        'neuropil traces extracted from the surroud region of each roi')
    trace_sur_ts.set_time_as_link(ts_path)
    trace_sur_ts.set_value_as_link('segmentation_interface', seg_if_path)
    sur_names = [
        'surround_' + ft.int2str(ind, 4)
        for ind in range(traces_center_raw.shape[0])
    ]
    trace_sur_ts.set_value('roi_names', sur_names)
    trace_sur_ts.set_value('num_samples', traces_surround_raw.shape[1])
    trace_f_if.add_timeseries(trace_sur_ts)
    trace_sur_ts.finalize()

    roi_center_n_path = '/processing/rois_and_traces_' + plane_n + '/Fluorescence/f_center_raw/roi_names'
    # print 'adding center fluorescence demixed'
    # trace_demix_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_demixed')
    # trace_demix_ts.set_data(traces_center_demixed, unit='au', conversion=np.nan, resolution=np.nan)
    # trace_demix_ts.set_value('data_format', 'roi (row) x time (column)')
    # trace_demix_ts.set_description('center traces after overlapping demixing for each roi')
    # trace_demix_ts.set_time_as_link(mov_path + '/' + plane_n + '/corrected')
    # trace_demix_ts.set_value_as_link('segmentation_interface', seg_if_path)
    # trace_demix_ts.set_value('roi_names', roi_names)
    # trace_demix_ts.set_value('num_samples', traces_center_demixed.shape[1])
    # trace_f_if.add_timeseries(trace_demix_ts)
    # trace_demix_ts.finalize()

    print('adding center fluorescence after neuropil subtraction')
    trace_sub_ts = nwb_f.create_timeseries('RoiResponseSeries',
                                           'f_center_subtracted')
    trace_sub_ts.set_data(traces_center_subtracted,
                          unit='au',
                          conversion=np.nan,
                          resolution=np.nan)
    trace_sub_ts.set_value('data_format', 'roi (row) x time (column)')
    trace_sub_ts.set_description(
        'center traces after overlap demixing and neuropil subtraction for each roi'
    )
    trace_sub_ts.set_time_as_link(mov_path + '/' + plane_n + '/corrected')
    trace_sub_ts.set_value_as_link('segmentation_interface', seg_if_path)
    trace_sub_ts.set_value_as_link('roi_names', roi_center_n_path)
    trace_sub_ts.set_value('num_samples', traces_center_subtracted.shape[1])
    trace_sub_ts.set_value('r', neuropil_r, dtype='float32')
    trace_sub_ts.set_value('rmse', neuropil_err, dtype='float32')
    trace_sub_ts.set_comments(
        'value "r": neuropil contribution ratio for each roi. '
        'value "rmse": RMS error of neuropil subtraction for each roi')
    trace_f_if.add_timeseries(trace_sub_ts)
    trace_sub_ts.finalize()

    trace_f_if.finalize()

    # print 'adding global dF/F traces for each roi'
    # trace_dff_if = rt_mo.create_interface('DfOverF')
    #
    # trace_dff_ts = nwb_f.create_timeseries('RoiResponseSeries', 'dff_center')
    # trace_dff_ts.set_data(traces_center_dff, unit='au', conversion=np.nan, resolution=np.nan)
    # trace_dff_ts.set_value('data_format', 'roi (row) x time (column)')
    # trace_dff_ts.set_description('global df/f traces for each roi center, input fluorescence is the trace after demixing'
    #                              ' and neuropil subtraction. global df/f is calculated by '
    #                              'allensdk.brain_observatory.dff.compute_dff() function.')
    # trace_dff_ts.set_time_as_link(ts_path)
    # trace_dff_ts.set_value_as_link('segmentation_interface', seg_if_path)
    # trace_dff_ts.set_value('roi_names', roi_names)
    # trace_dff_ts.set_value('num_samples', traces_center_dff.shape[1])
    # trace_dff_if.add_timeseries(trace_dff_ts)
    # trace_dff_ts.finalize()
    # trace_dff_if.finalize()

    rt_mo.finalize()
filter_sigma = 0.  # parameters only used if filter the rois
# dilation_iterations = 1. # parameters only used if filter the rois
cut_thr = 2.5  # low for more rois, high for less rois

bg_fn = "corrected_mean_projections.tif"
save_folder = 'figures'

curr_folder = os.path.dirname(os.path.realpath(__file__))
os.chdir(curr_folder)

data_f = h5py.File('caiman_segmentation_results.hdf5')
masks = data_f['masks'].value
data_f.close()

bg = ia.array_nor(np.max(tf.imread(bg_fn), axis=0))

final_roi_dict = {}

for i, mask in enumerate(masks):

    if is_filter:
        mask_nor = (mask - np.mean(mask.flatten())) / np.abs(
            np.std(mask.flatten()))
        mask_nor_f = ni.filters.gaussian_filter(mask_nor, filter_sigma)
        mask_bin = np.zeros(mask_nor_f.shape, dtype=np.uint8)
        mask_bin[mask_nor_f > cut_thr] = 1

    else:
        mask_bin = np.zeros(mask.shape, dtype=np.uint8)
        mask_bin[mask > 0] = 1
import tifffile as tf
import matplotlib.pyplot as plt
import NeuroAnalysisTools.core.ImageAnalysis as ia

data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\190822-M471944-deepscope\movie"
identifier = '110_LSNDGCUC'
start_ind = 121228
frame_num = 3

fns = []

for ind in np.arange(frame_num, dtype=np.int) + start_ind:

    if ind < 100000:
        fns.append('{}_{:05d}_00001.tif'.format(identifier, ind))
    elif ind < 1000000:
        fns.append('{}_{:06d}_00001.tif'.format(identifier, ind))
    elif ind < 10000000:
        fns.append('{}_{:07d}_00001.tif'.format(identifier, ind))

f = plt.figure(figsize=(5, 12))
for frame_i in range(frame_num):
    ax = f.add_subplot(frame_num, 1, frame_i+1)
    ax.imshow(ia.array_nor(tf.imread(os.path.join(data_folder, fns[frame_i]))), cmap='gray',
              vmin=0, vmax=0.5, interpolation='nearest')
    ax.set_title(fns[frame_i])
    ax.set_axis_off()

plt.tight_layout()
plt.show()