示例#1
0
    def _segment(self, dataset):
        def set_z(roi, z):
            old_mask = roi.mask
            roi.mask = [
                sparse.lil_matrix(old_mask[0].shape, dtype=old_mask[0].dtype)
                for _ in range(z)
            ] + [old_mask[0]]

        rois = ROIList([])
        if isinstance(self.strategy, list):
            if len(self.strategy) != dataset.frame_shape[0]:
                raise Exception('There is not exactly one strategy per plane.')
            iterator = list(
                zip(self.strategy, list(range(dataset.frame_shape[0]))))
        elif isinstance(self.strategy, SegmentationStrategy):
            iterator = list(
                zip(it.repeat(self.strategy),
                    list(range(dataset.frame_shape[0]))))

        for strategy, plane_idx in iterator:
            plane_rois = strategy.segment(dataset[:, :, plane_idx])
            for roi in plane_rois:
                set_z(roi, plane_idx)
            rois.extend(plane_rois)
        return rois
示例#2
0
    def _segment(self, dataset):
        def set_z(roi, z):
            old_mask = roi.mask
            return ROI(
                mask=[sparse.lil_matrix(old_mask[0].shape, old_mask[0].dtype)
                      for _ in range(z-1)] + [old_mask[0]])

        rois = ROIList([])
        for plane in range(dataset.frame_shape[0]):
            plane_rois = self.strategy.segment(dataset[:, :, plane])
            for roi in plane_rois:
                set_z(roi, plane)
            rois.extend(plane_rois)
        return rois
示例#3
0
    def _segment(self, dataset):

        channel = sima.misc.resolve_channels(self._params['channel'],
                                             dataset.channel_names)
        if dataset.savedir is not None:
            pca_path = os.path.join(dataset.savedir,
                                    'opca_' + str(channel) + '.npz')
        else:
            pca_path = None

        if dataset.savedir is not None:
            ica_path = os.path.join(dataset.savedir,
                                    'ica_' + str(channel) + '.npz')
        else:
            ica_path = None

        if self._params['verbose']:
            print('performing PCA...')
        components = self._params['components']
        if isinstance(components, int):
            components = list(range(components))
        _, space_pcs, time_pcs = oPCA.dataset_opca(
            dataset, channel, components[-1] + 1, path=pca_path)
        space_pcs = np.real(space_pcs)

        if self._params['verbose']:
            print('performing ICA...')
        st_components = _stica(
            space_pcs, time_pcs, mu=self._params['mu'], path=ica_path,
            n_components=space_pcs.shape[-1])

        return ROIList([ROI(st_components[..., i]) for i in
                        range(st_components.shape[-1])])
示例#4
0
def getRoi():
    ds_path = request.form.get('path')
    label = request.form.get('label')
    roi_id = request.form.get('id')

    dataset = ImagingDataset.load(ds_path)
    convertedRois = {}
    try:
        rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'),
                            label=label)
    except:
        return jsonify({})

    for i, roi in enumerate(rois):
        if roi.id == roi_id:
            break

    roi_points = []
    try:
        for i in xrange(roi.im_shape[0]):
            roi_points.append([])
    except:
        for i in xrange(np.max(np.array(roi.coords)[:, :, 2])):
            roi_points.append([])
    for poly in roi.polygons:
        coords = np.array(poly.exterior.coords)
        if np.all(coords[-1] == coords[0]):
            coords = coords[:-1]
        plane = int(coords[0, -1])
        coords = coords[:, :2].astype(int).tolist()
        roi_points[plane].append(coords)

    return jsonify({roi.id: {'label': roi.label, 'points': roi_points}})
示例#5
0
def extract(fpath):

    fdir = os.path.split(fpath)[0]
    fname = os.path.splitext(os.path.split(fpath)[1])[0]

    sima_mc_path = os.path.join(fdir, fname + '_mc.sima')

    if not os.path.exists(sima_mc_path):
        raise Exception(
            'Data not motion corrected yet; can\'t extract ROI data')

    rois = ROIList.load(
        os.path.join(fdir, fname + '_RoiSet.zip'),
        fmt='ImageJ')  # load ROIs as sima polygon objects (list)
    dataset = sima.ImagingDataset.load(os.path.join(
        fdir, fname + '_mc.sima'))  # reload motion-corrected dataset
    dataset.add_ROIs(rois, 'from_ImageJ')
    print('Extracting roi signals from %s' % fdir)
    signals = dataset.extract(rois)
    extracted_signals = np.asarray(
        signals['raw'])  # turn signals list into an np array
    np.save(os.path.join(fdir, fname + '_extractedsignals.npy'),
            extracted_signals)

    print('Done with extracting roi signals')
示例#6
0
    def apply(self, rois, dataset=None):
        """ Remove overlapping ROIs

        Parameters
        ----------
        rois : list
            list of sima.ROI ROIs
        percent_overlap : float
            percent of the smaller ROIs total area which must be covered in
            order for the ROIs to be evaluated as overlapping

        Returns
        -------
        rois : list
            A list of sima.ROI ROI objects with the overlapping ROIs combined
        """

        for roi in rois:
            roi.mask = roi.mask

        for i in range(len(rois)):  # TODO: more efficient strategy
            for j in [j for j in range(len(rois)) if j != i]:
                if rois[i] is not None and rois[j] is not None:
                    overlap = np.logical_and(rois[i], rois[j])
                    small_area = min(np.size(rois[i]), np.size(rois[j]))

                    if len(np.where(overlap)[0]) > \
                            self.percent_overlap * small_area:
                        new_shape = np.logical_or(rois[i], rois[j])

                        rois[i] = ROI(mask=new_shape.astype('bool'))
                        rois[j] = None
        return ROIList(roi for roi in rois if roi is not None)
示例#7
0
    def apply(self, rois, dataset=None):
        smoothed, _, _ = SparseROIsFromMasks._find_and_smooth(
            rois, self.static_threshold, self.smooth_size, self.sign_split,
            self.n_processes)

        return ROIList(
            SparseROIsFromMasks._extract_st_rois(smoothed, self.min_size))
示例#8
0
文件: sara.py 项目: nk53/SARA
 def apply(self, rois, dataset=None):
     rois_with_ids = []
     for index, roi in enumerate(rois):
         newroi = roi.todict()
         newroi['id'] = index
         rois_with_ids.append(newroi)
     return ROIList(rois_with_ids)
示例#9
0
文件: imaging.py 项目: hywujiang/sima
 def ROIs(self):
     try:
         with open(join(self.savedir, 'rois.pkl'), 'rb') as f:
             return {label: ROIList(**v)
                     for label, v in pickle.load(f).items()}
     except (IOError, pickle.UnpicklingError):
         return {}
    def test_extract_roi(self):
        # make sure number of loaded ROIs (from zip and extracted signals) are correct
        rois = ROIList.load(
            os.path.join(self.fdir, self.fname + '_RoiSet.zip'),
            fmt='ImageJ')  # load ROIs as sima polygon objects (list)

        assert self.sig.shape[1] == 11  # CZ manually drew 11 ROIs
        assert len(rois) == 11
示例#11
0
    def apply(self, rois, dataset):
        channel = sima.misc.resolve_channels(self._channel,
                                             dataset.channel_names)
        processed_im = _processed_image_ca1pc(dataset, channel,
                                              self._x_diameter,
                                              self._y_diameter)[0]
        shape = processed_im.shape[:2]
        ROIs = ROIList([])
        for roi in rois:
            roi_indices = np.nonzero(roi.mask[0])
            roi_indices = np.ravel_multi_index(roi_indices, shape)

            # pixel values in the cut
            vals = processed_im.flat[roi_indices]

            # indices of those values below the otsu threshold
            # if all values are identical, continue without adding an ROI
            try:
                roi_indices = roi_indices[vals < threshold_otsu(vals)]
            except ValueError:
                continue

            # apply binary opening and closing to the surviving pixels
            # expand the shape by 1 in all directions to correct for edge
            # effects of binary opening/closing
            twoD_indices = [np.unravel_index(x, shape) for x in roi_indices]
            mask = np.zeros([x + 2 for x in shape])
            for indices in twoD_indices:
                mask[indices[0] + 1, indices[1] + 1] = 1
            mask = ndimage.binary_closing(ndimage.binary_opening(mask))
            mask = mask[1:-1, 1:-1]
            roi_indices = np.where(mask.flat)[0]

            # label blobs in each cut
            labeled_array, num_features = measurements.label(mask)
            for feat in range(num_features):
                blob_inds = np.where(labeled_array.flat == feat + 1)[0]

                twoD_indices = [np.unravel_index(x, shape) for x in blob_inds]
                mask = np.zeros(shape)
                for x in twoD_indices:
                    mask[x] = 1

                ROIs.append(ROI(mask=mask))

        return ROIs
示例#12
0
    def apply(self, rois, dataset):
        channel = sima.misc.resolve_channels(self._channel,
                                             dataset.channel_names)
        processed_im = _processed_image_ca1pc(
            dataset, channel, self._x_diameter, self._y_diameter)[0]
        shape = processed_im.shape[:2]
        ROIs = ROIList([])
        for roi in rois:
            roi_indices = np.nonzero(roi.mask[0])
            roi_indices = np.ravel_multi_index(roi_indices, shape)

            # pixel values in the cut
            vals = processed_im.flat[roi_indices]

            # indices of those values below the otsu threshold
            # if all values are identical, continue without adding an ROI
            try:
                roi_indices = roi_indices[vals < threshold_otsu(vals)]
            except ValueError:
                continue

            # apply binary opening and closing to the surviving pixels
            # expand the shape by 1 in all directions to correct for edge
            # effects of binary opening/closing
            twoD_indices = [np.unravel_index(x, shape) for x in roi_indices]
            mask = np.zeros([x + 2 for x in shape])
            for indices in twoD_indices:
                mask[indices[0] + 1, indices[1] + 1] = 1
            mask = ndimage.binary_closing(ndimage.binary_opening(mask))
            mask = mask[1:-1, 1:-1]
            roi_indices = np.where(mask.flat)[0]

            # label blobs in each cut
            labeled_array, num_features = measurements.label(mask)
            for feat in range(num_features):
                blob_inds = np.where(labeled_array.flat == feat + 1)[0]

                twoD_indices = [np.unravel_index(x, shape) for x in blob_inds]
                mask = np.zeros(shape)
                for x in twoD_indices:
                    mask[x] = 1

                ROIs.append(ROI(mask=mask))

        return ROIs
示例#13
0
文件: segment.py 项目: vjlbym/sima
    def _segment(self, dataset):
        def set_z(roi, z):
            old_mask = roi.mask
            roi.mask = [sparse.lil_matrix(old_mask[0].shape, dtype=old_mask[0].dtype) for _ in range(z)] + [old_mask[0]]

        rois = ROIList([])
        if isinstance(self.strategy, list):
            if len(self.strategy) != dataset.frame_shape[0]:
                raise Exception("There is not exactly one strategy per plane.")
            iterator = list(zip(self.strategy, list(range(dataset.frame_shape[0]))))
        elif isinstance(self.strategy, SegmentationStrategy):
            iterator = list(zip(it.repeat(self.strategy), list(range(dataset.frame_shape[0]))))

        for strategy, plane_idx in iterator:
            plane_rois = strategy.segment(dataset[:, :, plane_idx])
            for roi in plane_rois:
                set_z(roi, plane_idx)
            rois.extend(plane_rois)
        return rois
示例#14
0
    def apply(self, rois, dataset=None):
        SmoothFunc = _SmoothBoundariesParallel(self.tolerance, self.min_verts)
        if self.n_processes > 1:
            pool = Pool(processes=self.n_processes)
            smooth_rois = pool.map(SmoothFunc, rois)
            pool.close()
        else:
            smooth_rois = map(SmoothFunc, rois)

        return ROIList(smooth_rois)
示例#15
0
    def _rois_from_cuts(cls, cuts):
        """Return ROI structures each containing the full extent of a cut.

        Parameters
        ----------
        cuts : list of sima.normcut.CutRegion
            The segmented regions identified by normalized cuts.

        Returns
        -------
        sima.ROI.ROIList
            ROI structures corresponding to each cut.
        """
        ROIs = ROIList([])
        for cut in cuts:
            if len(cut.indices):
                mask = np.zeros(cut.shape)
                for x in cut.indices:
                    mask[np.unravel_index(x, cut.shape)] = 1
                ROIs.append(ROI(mask=mask))
        return ROIs
示例#16
0
文件: segment.py 项目: j3tsai/sima
def _rois_from_cuts_full(cuts):
    """Return ROI structures each containing the full extent of a cut.

    Parameters
    ----------
    cuts : list of sima.normcut.CutRegion
        The segmented regions identified by normalized cuts.

    Returns
    -------
    sima.ROI.ROIList
        ROI structures corresponding to each cut.
    """
    ROIs = ROIList([])
    for cut in cuts:
        if len(cut.indices):
            mask = np.zeros(cut.shape)
            for x in cut.indices:
                mask[np.unravel_index(x, cut.shape)] = 1
            ROIs.append(ROI(mask=mask))
    return ROIs
示例#17
0
    def apply(self, rois, dataset=None):
        if not all(len(r.mask) == 1 for r in rois):
            raise ValueError('SmoothROIBoundaries applies only to 2D ROIs.')

        SmoothFunc = _SmoothBoundariesParallel(self.radius)
        if self.n_processes > 1:
            pool = Pool(processes=self.n_processes)
            smooth_rois = pool.map(SmoothFunc, rois)
            pool.close()
        else:
            smooth_rois = map(SmoothFunc, rois)

        return ROIList([roi[0] for roi in smooth_rois])
示例#18
0
def extract_rois((full_path_to_file, individualframename)):
    # Create SIMA Sequence & ImagingDataset objects from image file(s) or
    # motion correction if action == 'extract', assume that motion
    # correction was done on EC2 previously
    if action == 'both':
        try:
            motion_correction((full_path_to_file, individualframename))
        except Exception as e:
            print('Motion correction failed')
            print e
            logging.Exception('Motion correction failed')

    filename = os.path.splitext(os.path.basename(full_path_to_file))[0]
    dataset = sima.ImagingDataset.load(filename + '_mc.sima')

    # Obtain ROIs
    if to_segment:
        logging.info("Segmenting images for %s..." % filename)

        # Automated segmentation
        # Define segmentation method and post processing.
        segment_approach = sima.segment.PlaneNormalizedCuts()
        segment_approach.append(sima.segment.SparseROIsFromMasks())
        segment_approach.append(sima.segment.SmoothROIBoundaries())
        segment_approach.append(sima.segment.MergeOverlapping(threshold=0.5))

        # Apply segmentation to dataset
        rois = dataset.segment(segment_approach)

        logging.info("Done segmenting images for %s" % filename)

        print("Done segmenting images for %s" % filename)
    else:
        logging.info("Importing ROIs from ImageJ for %s..." % filename)
        print("Importing ROIs from ImageJ for %s..." % filename)

        # Load ROIs from ImageJ
        rois = ROIList.load(filename + '_mc_' + roi_filename, fmt='ImageJ')
        dataset.add_ROIs(rois, 'from_ImageJ')

        logging.info("Done importing ROIs for %s" % filename)
        print("Done importing ROIs for %s" % filename)

    # Extract signals from ROIs into numpy file
    signals = dataset.extract(rois)
    extracted_signals = np.asarray(signals['raw'])
    np.save(filename + '_extractedsignals', extracted_signals)

    logging.info("Done extracting signals")
    print("Done extracting signals")
示例#19
0
def extract_rois((full_path_to_file, individualframename)):
    # Create SIMA Sequence & ImagingDataset objects from image file(s) or
    # motion correction if action == 'extract', assume that motion
    # correction was done on EC2 previously
    if action == 'both':
        try:
            motion_correction((full_path_to_file, individualframename))
        except Exception as e:
            print('Motion correction failed')
            print e
            logging.Exception('Motion correction failed')

    filename = os.path.splitext(os.path.basename(full_path_to_file))[0]
    dataset = sima.ImagingDataset.load(filename + '_mc.sima')

    # Obtain ROIs
    if to_segment:
        logging.info("Segmenting images for %s..." % filename)

        # Automated segmentation
        # Define segmentation method and post processing.
        segment_approach = sima.segment.PlaneNormalizedCuts()
        segment_approach.append(sima.segment.SparseROIsFromMasks())
        segment_approach.append(sima.segment.SmoothROIBoundaries())
        segment_approach.append(sima.segment.MergeOverlapping(threshold=0.5))

        # Apply segmentation to dataset
        rois = dataset.segment(segment_approach)

        logging.info("Done segmenting images for %s" % filename)

        print("Done segmenting images for %s" % filename)
    else:
        logging.info("Importing ROIs from ImageJ for %s..." % filename)
        print("Importing ROIs from ImageJ for %s..." % filename)

        # Load ROIs from ImageJ
        rois = ROIList.load(filename + '_mc_' + roi_filename, fmt='ImageJ')
        dataset.add_ROIs(rois, 'from_ImageJ')

        logging.info("Done importing ROIs for %s" % filename)
        print("Done importing ROIs for %s" % filename)

    # Extract signals from ROIs into numpy file
    signals = dataset.extract(rois)
    extracted_signals = np.asarray(signals['raw'])
    np.save(filename + '_extractedsignals', extracted_signals)

    logging.info("Done extracting signals")
    print("Done extracting signals")
示例#20
0
def deleteRoi():
    ds_path = request.form.get('path')
    label = request.form.get('label')
    roi_id = request.form.get('roiId')

    dataset = ImagingDataset.load(ds_path)
    try:
        rois = dataset.ROIs[label]
    except KeyError:
        return jsonify(result='failed to located ROI List')

    rois = filter(lambda r: r.id != roi_id, rois)
    dataset.add_ROIs(ROIList(rois), label=label)

    return jsonify(result='success')
示例#21
0
def extract_dff(directory, plot=False, load_signal=False):
	hd, tl = os.path.split(directory)
	if load_signal:
		dff = np.load(os.path.join(hd, 'DFF'))		
	else:
		dataset = sima.ImagingDataset.load(directory)
		rois = ROIList.load(os.path.join(hd, 'manual', 'RoiSet.zip'), fmt='ImageJ')
		dataset.add_ROIs(rois, 'from_ImageJ')  # this immediately saves the ROIs
		signals = dataset.extract(rois, signal_channel='0', label='0') 
		signals = dataset.signals(channel='0')['0']

		# signals are in signals['raw']
		# first list index is for each cycle
		# second index is for each roi within a cycle (third is signal at time t)
		## convert each signal into a df/f trace
		k = 30
		baseline_ix = np.array([[0, 90], [240, 320]])
		traces = np.stack(signals['raw'], axis=1)
		dff = np.zeros_like(traces)
		nroi, ncycles, t = traces.shape

		# normalize the signal
		for i, roi in enumerate(traces):
			for j, cycle in enumerate(roi):
				cur_mins = []
				for b in baseline_ix:
					ITI = cycle[b[0]:b[1]]
					sroi = movmean(ITI, 30)
					cur_mins.append(np.min(sroi))
				bsl = np.min(cur_mins)
				roi_df = (cycle - bsl) / bsl
				dff[i,j,:] = roi_df  # rois x cycles x time

				# smooth the dff
				# sroi_df = sp.signal.savgol_filter(roi_df, 29, 3)
				# sroi_df = sp.ndimage.filters.gaussian_filter1d(roi_df,3)
				# dff[i,j,:] = sroi_df  # rois x cycles x time

		np.save(os.path.join(hd, 'manual', 'DFF'), dff)

	# check by plotting
	if plot:
		plt.subplots()
		plt.plot(traces[0,0,:], label='raw signal')
		plt.plot(dff[0,0,:], label='dF/F')
		plt.legend()
		plt.show()
示例#22
0
def selectRoi():
    ds_path = request.form.get('path')
    label = request.form.get('label')
    plane = float(request.form.get('z'))

    point = Point(float(request.form.get('x')), float(request.form.get('y')))

    dataset = ImagingDataset.load(ds_path)
    rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'), label=label)

    for roi in rois:
        for poly in roi.polygons:
            z_coord = np.array(poly.exterior.coords)[0, 2]
            if z_coord == plane or plane == -1:
                if poly.contains(point):
                    return jsonify(label=roi.label, id=roi.id)

    return jsonify({'error': 'roi not found'})
示例#23
0
文件: stica.py 项目: zhounapeuw/sima
    def _segment(self, dataset):

        channel = sima.misc.resolve_channels(self._params['channel'],
                                             dataset.channel_names)
        if dataset.savedir is not None:
            pca_path = os.path.join(dataset.savedir,
                                    'opca_' + str(channel) + '.npz')
        else:
            pca_path = None

        if dataset.savedir is not None:
            ica_path = os.path.join(dataset.savedir,
                                    'ica_' + str(channel) + '.npz')
        else:
            ica_path = None

        if self._params['verbose']:
            print('performing PCA...')
        components = self._params['components']
        if isinstance(components, int):
            components = list(range(components))
        _, space_pcs, time_pcs = oPCA.dataset_opca(
            dataset, channel, components[-1] + 1, path=pca_path)
        space_pcs = np.real(space_pcs)

        # Remove components greater than the number of PCs returned
        # in case more components were asked for than the number of
        # independent dimensions in the dataset.
        components = [c for c in components if c < time_pcs.shape[1]]

        if self._params['verbose']:
            print('performing ICA...')
        st_components = _stica(
            space_pcs, time_pcs, mu=self._params['mu'], path=ica_path,
            n_components=space_pcs.shape[-1])

        return ROIList([ROI(st_components[..., i]) for i in
                        range(st_components.shape[-1])])
示例#24
0
def getRois():
    ds_path = request.form.get('path')
    label = request.form.get('label')

    dataset = ImagingDataset.load(ds_path)
    convertedRois = {}
    rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'), label=label)

    for i, roi in enumerate(rois):
        if roi.label is None:
            roi.label = i
        convertedRois[roi.label] = {}
        for poly in roi.polygons:
            coords = np.array(poly.exterior.coords)
            plane = int(coords[0, -1])
            #coords = list(coords[:,:2].ravel())
            coords = coords[:, :2].tolist()
            try:
                convertedRois[roi.label][plane].append(coords)
            except KeyError:
                convertedRois[roi.label][plane] = [coords]

    return jsonify(**convertedRois)
示例#25
0
def getRois():
    ds_path = request.form.get("path")
    label = request.form.get("label")

    dataset = ImagingDataset.load(ds_path)
    convertedRois = {}
    rois = ROIList.load(os.path.join(dataset.savedir, "rois.pkl"), label=label)

    for i, roi in enumerate(rois):
        if roi.label is None:
            roi.label = i
        convertedRois[roi.label] = {}
        for poly in roi.polygons:
            coords = np.array(poly.exterior.coords)
            plane = int(coords[0, -1])
            # coords = list(coords[:,:2].ravel())
            coords = coords[:, :2].tolist()
            try:
                convertedRois[roi.label][plane].append(coords)
            except KeyError:
                convertedRois[roi.label][plane] = [coords]

    return jsonify(**convertedRois)
示例#26
0
def _remove_overlapping(rois, percent_overlap=0.9):
    """ Remove overlapping ROIs

    Parameters
    ----------
    rois : list
        list of sima.ROI ROIs
    percent_overlap : float
        percent of the smaller ROIs total area which must be covered in order
        for the ROIs to be evaluated as overlapping

    Returns
    -------
    rois : list
        A list of sima.ROI ROI objects with the overlapping ROIs combined
    """

    if percent_overlap > 0 and percent_overlap <= 1:
        for roi in rois:
            roi.mask = roi.mask

        for i in xrange(len(rois)):
            for j in [j for j in xrange(len(rois)) if j != i]:
                if rois[i] is not None and rois[j] is not None:
                    overlap = np.logical_and(rois[i].mask.toarray(),
                                             rois[j].mask.toarray())
                    small_area = np.min((rois[i].mask.size, rois[j].mask.size))

                    if len(np.where(overlap)[0]) > \
                            percent_overlap * small_area:
                        new_shape = np.logical_or(rois[i].mask.toarray(),
                                                  rois[j].mask.toarray())

                        rois[i] = ROI(mask=new_shape.astype('bool'),
                                      im_shape=rois[i].mask.shape)
                        rois[j] = None
    return ROIList(roi for roi in rois if roi is not None)
示例#27
0
def updateRoi():
    ds_path = request.form.get('path')
    label = request.form.get('label')
    points = json.loads(request.form.get('points'))
    roi_label = request.form.get('roiLabel')
    roi_id = request.form.get('roiId')

    dataset = ImagingDataset.load(ds_path)
    roi_data = []
    for i, plane in enumerate(points):
        if plane is None or not len(plane):
            continue
        array_dat = np.array(plane)
        z_dims = i * np.ones((array_dat.shape[:2] + (1, )))
        plane_data = np.concatenate((array_dat, z_dims), axis=2)
        roi_data.extend(list(plane_data))

    if len(roi_data) == 0:
        return jsonify(result="no polygons to save")

    for poly in roi_data:
        if poly.shape[0] < 3:
            raise Exception("unable to store polygon with less then 3 points")
    roi = ROI(polygons=roi_data, im_shape=dataset.frame_shape[:3])

    roi.label = roi_label
    roi.id = roi_id
    try:
        rois = dataset.ROIs[label]
    except KeyError:
        rois = []

    rois = filter(lambda r: r.id != roi_id, rois)
    rois.append(roi)
    dataset.add_ROIs(ROIList(rois), label=label)

    return jsonify(result='success')
示例#28
0
def setRoiLabel():
    ds_path = request.form.get('path')
    #old_label = request.form.get('oldLabel')
    old_label = ''
    new_label = request.form.get('newLabel')

    if new_label == '':
        new_label = 'rois'

    dataset = ImagingDataset.load(ds_path)
    if (old_label != ''):
        rois = dataset.ROIs[old_label]
    else:
        rois = ROIList([])
    dataset.add_ROIs(rois, label=new_label)

    labels = dataset.ROIs.keys()

    labels.extend(
        map(os.path.basename, glob.glob(os.path.join(ds_path, 'ica*.npz'))))
    labels.extend(
        map(os.path.basename, glob.glob(os.path.join(ds_path, 'opca*.npz'))))

    return jsonify({'labels': labels})
示例#29
0
def ROILoadFromDir(_path, _dir):
    fullRAP_roi_list = []
    for i in range(0, len(_dir)):
        fullRAP_roi_list.append(ROIList.load(_path + _dir[i], fmt='ImageJ'))
    return fullRAP_roi_list
示例#30
0
def process_data(haussio_data, mask=None, p=2, nrois_init=200):
    fn_cnmf = haussio_data.dirname_comp + '_cnmf.mat'

    tiffs_to_cnmf(haussio_data, mask)
    sys.stdout.write('Loading from {0}... '.format(
        haussio_data.dirname_comp + '_Y*.npy'))
    Y = np.load(haussio_data.dirname_comp + '_Y.npy', mmap_mode='r')
    d1, d2, T = Y.shape

    if not os.path.exists(fn_cnmf):

        cse.utilities.stop_server()

        sys.stdout.flush()
        t0 = time.time()
        Yr = np.load(haussio_data.dirname_comp + '_Yr.npy', mmap_mode='r')
        sys.stdout.write('took {0:.2f} s\n'.format(time.time()-t0))

        # how to subdivide the work among processes
        n_pixels_per_process = d1*d2/NCPUS

        options = cse.utilities.CNMFSetParms(Y, K=nrois_init, p=p, gSig=[9, 9])
        options['preprocess_params']['n_processes'] = NCPUS
        options['preprocess_params'][
            'n_pixels_per_process'] =  n_pixels_per_process
        options['init_params']['nIter'] = 10
        options['init_params']['maxIter'] = 10
        options['init_params']['use_hals'] = False
        options['spatial_params']['n_processes'] = NCPUS
        options['spatial_params'][
            'n_pixels_per_process'] = n_pixels_per_process
        options['temporal_params']['n_processes'] = NCPUS
        options['temporal_params'][
            'n_pixels_per_process'] = n_pixels_per_process

        cse.utilities.start_server(NCPUS)

        t0 = time.time()
        sys.stdout.write("Preprocessing... ")
        sys.stdout.flush()
        Yr, sn, g = cse.preprocess_data(Yr, **options['preprocess_params'])
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))
        # 224.94s

        t0 = time.time()
        sys.stdout.write("Initializing components... ")
        sys.stdout.flush()
        Ain, Cin, b_in, f_in, center = cse.initialize_components(
            Y, **options['init_params'])
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))
        # 2281.37s

        t0 = time.time()
        sys.stdout.write("Updating spatial components... ")
        sys.stdout.flush()
        A, b, Cin = cse.update_spatial_components(
            Yr, Cin, f_in, Ain, sn=sn, **options['spatial_params'])
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))
        # 252.57s

        t0 = time.time()
        sys.stdout.write("Updating temporal components... ")
        sys.stdout.flush()
        C, f, S, bl, c1, neurons_sn, g, YrA = \
            cse.update_temporal_components(
                Yr, A, b, Cin, f_in, bl=None, c1=None, sn=None, g=None,
                **options['temporal_params'])
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))
        # 455.14s

        t0 = time.time()
        sys.stdout.write("Merging ROIs... ")
        sys.stdout.flush()
        A_m, C_m, nr_m, merged_ROIs, S_m, bl_m, c1_m, sn_m, g_m = \
            cse.merge_components(
                Yr, A, b, C, f, S, sn, options['temporal_params'],
                options['spatial_params'], bl=bl, c1=c1, sn=neurons_sn, g=g,
                thr=0.7, mx=100, fast_merge=True)
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))
        # 702.55s

        t0 = time.time()
        sys.stdout.write("Updating spatial components... ")
        sys.stdout.flush()
        A2, b2, C2 = cse.update_spatial_components(
            Yr, C_m, f, A_m, sn=sn, **options['spatial_params'])
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))
        # 77.16s

        t0 = time.time()
        sys.stdout.write("Updating temporal components... ")
        sys.stdout.flush()
        C2, f2, S2, bl2, c12, neurons_sn2, g21, YrA = \
            cse.update_temporal_components(
                Yr, A2, b2, C2, f, bl=None, c1=None, sn=None, g=None,
                **options['temporal_params'])
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))
        # 483.41s

        # A: spatial components (ROIs)
        # C: denoised [Ca2+]
        # YrA: residuals ("noise")
        # S: Spikes
        savemat(fn_cnmf, {"A": A2, "C": C2, "YrA": YrA, "S": S2, "bl": bl2})
    else:
        resdict = loadmat(fn_cnmf)
        A2 = resdict["A"]
        C2 = resdict["C"]
        YrA = resdict["YrA"]
        S2 = resdict["S"]
        bl2 = resdict["bl"]

    proj_fn = haussio_data.dirname_comp + "_proj.npy"
    if not os.path.exists(proj_fn):
        zproj = utils.zproject(np.transpose(Y, (2, 0, 1)))
        np.save(proj_fn, zproj)
    else:
        zproj = np.load(proj_fn)

    # DF_F, DF = cse.extract_DF_F(Y.reshape(d1*d2, T), A2, C2)

    t0 = time.time()
    sys.stdout.write("Ordering components... ")
    sys.stdout.flush()
    A_or, C_or, srt = cse.order_components(A2, C2)
    sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))

    cse.utilities.stop_server()

    polygons = contour(A2, d1, d2, thr=0.9)
    rois = ROIList([sima.ROI.ROI(polygons=poly) for poly in polygons])

    return rois, C2, haussio_data, zproj, S2, Y, YrA
示例#31
0
文件: convert.py 项目: j3tsai/sima
def _load_version0(path):
    """Load a SIMA 0.x dataset

    Parameters
    ----------
    path : str
        The path to the original saved dataset, ending in .sima

    Examples
    --------

    >>> from sima.misc import example_data
    >>> from sima.misc.convert import _load_version0
    >>> ds = _load_version0(example_data())

    """

    def parse_channel(channel):
        """Parse an old format channel stored a dictionary

        Parameters
        ----------
        channel : dict

        Returns
        -------
        result : sima.Sequence
            A sequence equivalent to the old format channel.
        """
        _resolve_paths(channel, path)
        klass = channel.pop('__class__')
        if klass == 'sima.iterables.MultiPageTIFF':
            result = Sequence.create('TIFF', channel['path'])
            try:
                clip = channel['clip']
            except KeyError:
                pass
            else:
                if clip is not None:
                    s = (slice(None), slice(None)) + tuple(
                        slice(*[None if x is 0 else x for x in dim])
                        for dim in clip)
                    result = result[s]
            return result

        elif klass == 'sima.iterables.HDF5':
            raise Exception('TODO')
        else:
            raise Exception('Format not recognized.')

    def parse_sequence(sequence):
        channels = [parse_channel(c) for c in sequence]
        return Sequence.join(channels)

    with open(os.path.join(path, 'dataset.pkl'), 'rb') as f:
        unpickler = Unpickler(f)
        dataset_dict = unpickler.load()
    iterables = dataset_dict.pop('iterables')
    sequences = [parse_sequence(seq) for seq in iterables]

    # Apply displacements if they exist
    try:
        with open(os.path.join(path, 'displacements.pkl'), 'rb') as f:
            displacements = pkl.load(f)
    except IOError:
        pass
    else:
        assert all(np.all(d >= 0) for d in displacements)
        max_disp = np.max(list(chain(*displacements)), axis=0)
        frame_shape = np.array(sequences[0].shape)[1:]
        frame_shape[1:3] += max_disp
        sequences = [
            s.apply_displacements(d.reshape(s.shape[:3] + (2,)), frame_shape)
            for s, d in zip(sequences, displacements)]
        try:
            trim_coords = dataset_dict.pop('_lazy__trim_coords')
        except KeyError:
            try:
                trim_criterion = dataset_dict.pop('trim_criterion')
            except KeyError:
                pass
            else:
                raise Exception(
                    'Parsing of trim_criterion ' + str(trim_criterion) +
                    ' not yet implemented')
        else:
            sequences = [s[:, :, trim_coords[0][0]:trim_coords[1][0],
                           trim_coords[0][1]:trim_coords[1][1]]
                         for s in sequences]
    ds = ImagingDataset(sequences, None)
    ds.savedir = path

    # Add ROIs if they exist
    try:
        with open(os.path.join(path, 'rois.pkl'), 'rb') as f:
            rois = pkl.load(f)
    except IOError:
        pass
    else:
        roi_lists = {}
        for label, roi_list_dict in rois.iteritems():
            roi_list = []
            for roi in roi_list_dict['rois']:
                mask = roi['mask']
                polygons = roi['polygons']
                if mask is not None:
                    new_roi = ROI(mask=mask)
                else:
                    new_roi = ROI(polygons=polygons)
                new_roi.id = roi['id']
                new_roi.label = roi['label']
                new_roi.tags = roi['tags']
                new_roi.im_shape = roi['im_shape']

                roi_list.append(new_roi)
            roi_lists[label] = ROIList(roi_list)
            roi_lists[label].timestamp = roi_list_dict['timestamp']

        for label, roi_list in roi_lists.iteritems():
            ds.add_ROIs(roi_list, label=label)
    return ds
示例#32
0
 def apply(self, rois, dataset=None):
     return ROIList([r for r in rois if self._valid(r)])
示例#33
0
文件: stica.py 项目: csn92/sima
    def _segment(self, dataset):

        channel = sima.misc.resolve_channels(self._params.channel,
                                             dataset.channel_names)
        if dataset.savedir is not None:
            pca_path = os.path.join(dataset.savedir,
                                    'opca_' + str(channel) + '.npz')
        else:
            pca_path = None

        if dataset.savedir is not None:
            ica_path = os.path.join(dataset.savedir,
                                    'ica_' + str(channel) + '.npz')
        else:
            ica_path = None

        if self._params.verbose:
            print 'performing PCA...'
        if isinstance(self._params.components, int):
            self._params.components = range(self._params.components)
        _, space_pcs, time_pcs = _OPCA(dataset,
                                       channel,
                                       self._params.components[-1] + 1,
                                       path=pca_path)
        space_pcs = np.real(
            space_pcs.reshape(dataset.frame_shape[1:3] +
                              (space_pcs.shape[2], )))
        space_pcs = np.array(
            [space_pcs[:, :, i] for i in self._params.components]).transpose(
                (1, 2, 0))
        time_pcs = np.array([time_pcs[:, i]
                             for i in self._params.components]).transpose(
                                 (1, 0))

        if self._params.verbose:
            print 'performing ICA...'
        st_components = _stica(space_pcs,
                               time_pcs,
                               mu=self._params.mu,
                               path=ica_path,
                               n_components=space_pcs.shape[2])

        if self._params.x_smoothing > 0 or self._params.static_threshold > 0:
            accepted, _, _ = _find_useful_components(
                st_components,
                self._params.static_threshold,
                x_smoothing=self._params.x_smoothing)

            if self._params.min_area > 0 or self._params.spatial_sep:
                rois = _extract_st_rois(accepted,
                                        min_area=self._params.min_area,
                                        spatial_sep=self._params.spatial_sep)

            if self._params.smooth_rois:
                if self._params.verbose:
                    print 'smoothing ROIs...'
                rois = [_smooth_roi(roi)[0] for roi in rois]

            if self._params.verbose:
                print 'removing overlapping ROIs...'
            rois = _remove_overlapping(
                rois, percent_overlap=self._params.overlap_per)
        else:
            rois = [
                ROI(st_components[:, :, i])
                for i in xrange(st_components.shape[2])
            ]

        return ROIList(rois)
示例#34
0
def process_data_patches(haussio_data,
                         mask=None,
                         p=2,
                         nrois_init=400,
                         roi_iceberg=0.9):
    fn_cnmf = haussio_data.dirname_comp + '_cnmf.mat'

    tiffs_to_cnmf(haussio_data, mask)
    tmpdirname_comp = os.path.join(tempfile.gettempdir(),
                                   haussio_data.dirname_comp)
    try:
        os.makedirs(os.path.dirname(tmpdirname_comp))
    except OSError:
        pass
    sys.stdout.write('Loading from {0}... '.format(tmpdirname_comp +
                                                   '_Y*.npy'))
    Y = np.load(tmpdirname_comp + '_Y.npy', mmap_mode='r')
    d1, d2, T = Y.shape

    if not os.path.exists(fn_cnmf):

        cse.utilities.stop_server()

        sys.stdout.flush()
        t0 = time.time()
        fname_new = get_mmap_name(tmpdirname_comp, d1, d2, T)
        Yr, _, _ = cse.utilities.load_memmap(fname_new)
        sys.stdout.write('took {0:.2f} s\n'.format(time.time() - t0))

        # how to subdivide the work among processes
        n_pixels_per_process = d1 * d2 / NCPUS_PATCHES

        sys.stdout.flush()
        cse.utilities.stop_server()
        cse.utilities.start_server()
        cl = Client()
        dview = cl[:NCPUS_PATCHES]

        rf = int(
            np.ceil(np.sqrt(d1 * d2 / 4 / NCPUS_PATCHES))
        )  # half-size of the patches in pixels. rf=25, patches are 50x50
        sys.stdout.write("Patch size: {0} * {0} = {1}\n".format(
            rf * 2, rf * rf * 4))
        stride = int(rf /
                     5)  # amounpl of overlap between the patches in pixels

        t0 = time.time()
        sys.stdout.write("CNMF patches... ")
        sys.stdout.flush()
        options_patch = cse.utilities.CNMFSetParms(Y,
                                                   NCPUS_PATCHES,
                                                   p=0,
                                                   gSig=[16, 16],
                                                   K=nrois_init /
                                                   NCPUS_PATCHES,
                                                   ssub=1,
                                                   tsub=8,
                                                   thr=0.8)
        A_tot, C_tot, YrA_tot, b, f, sn_tot, opt_out = cse.map_reduce.run_CNMF_patches(
            fname_new, (d1, d2, T),
            options_patch,
            rf=rf,
            stride=stride,
            dview=dview,
            memory_fact=4.0)
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time() - t0))

        options = cse.utilities.CNMFSetParms(Y,
                                             NCPUS_PATCHES,
                                             K=A_tot.shape[-1],
                                             p=p,
                                             gSig=[16, 16],
                                             ssub=1,
                                             tsub=1)
        pix_proc = np.minimum(
            np.int((d1 * d2) / NCPUS_PATCHES / (T / 2000.)),
            np.int((d1 * d2) /
                   NCPUS_PATCHES))  # regulates the amount of memory used
        options['spatial_params']['n_pixels_per_process'] = pix_proc
        options['temporal_params']['n_pixels_per_process'] = pix_proc

        t0 = time.time()
        sys.stdout.write("Merging ROIs... ")
        sys.stdout.flush()
        A_m, C_m, nr_m, merged_ROIs, S_m, bl_m, c1_m, sn_m, g_m = \
            cse.merge_components(
                Yr, A_tot, [], np.array(C_tot), [], np.array(C_tot), [],
                options['temporal_params'],
                options['spatial_params'], dview=dview,
                thr=options['merging']['thr'], mx=np.Inf)
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time() - t0))

        options['temporal_params']['p'] = 0
        options['temporal_params'][
            'fudge_factor'] = 0.96  #change ifdenoised traces time constant is wrong
        options['temporal_params']['backend'] = 'ipyparallel'

        t0 = time.time()
        sys.stdout.write("Updating temporal components... ")
        sys.stdout.flush()
        C_m, f_m, S_m, bl_m, c1_m, neurons_sn_m, g2_m, YrA_m = \
            cse.temporal.update_temporal_components(
                Yr, A_m, np.atleast_2d(b).T, C_m, f, dview=dview,
                bl=None, c1=None, sn=None, g=None, **options['temporal_params'])
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time() - t0))
        # 483.41s
        # 2016-05-24: 74.81s

        t0 = time.time()
        sys.stdout.write("Evaluating components... ")
        sys.stdout.flush()
        traces = C_m + YrA_m

        Npeaks = 10
        final_frate = 1.0 / haussio_data.dt
        tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
        tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
        fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, sign_sam =\
            cse.utilities.evaluate_components(
                Y, traces, A_m, C_m, b, f_m,
                remove_baseline=True, N=5, robust_std=False,
                Athresh=0.1, Npeaks=Npeaks, tB=tB, tA=tA, thresh_C=0.3)

        idx_components_r = np.where(r_values >= .5)[0]
        idx_components_raw = np.where(fitness_raw < -20)[0]
        idx_components_delta = np.where(fitness_delta < -10)[0]

        idx_components = np.union1d(idx_components_r, idx_components_raw)
        idx_components = np.union1d(idx_components, idx_components_delta)

        A_m = A_m[:, idx_components]
        C_m = C_m[idx_components, :]
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time() - t0))

        t0 = time.time()
        sys.stdout.write("Updating spatial components... ")
        sys.stdout.flush()
        A2, b2, C2 = cse.spatial.update_spatial_components(
            Yr,
            C_m,
            f,
            A_m,
            sn=sn_tot,
            dview=dview,
            **options['spatial_params'])
        sys.stdout.write(' took {0:.2f} s\n'.format(time.time() - t0))
        # 77.16s
        # 2016-05-24: 99.22s

        options['temporal_params']['p'] = p
        options['temporal_params'][
            'fudge_factor'] = 0.96  #change ifdenoised traces time constant is wrong
        C2, f2, S2, bl2, c12, neurons_sn2, g21, YrA = \
            cse.temporal.update_temporal_components(
                Yr, A2, b2, C2, f, dview=dview, bl=None, c1=None, sn=None,
                g=None, **options['temporal_params'])

        traces = C2 + YrA
        fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, sign_sam =\
            cse.utilities.evaluate_components(
                Y, traces, A2, C2, b2, f2, remove_baseline=True, N=5,
                robust_std=False, Athresh=0.1, Npeaks=Npeaks, tB=tB,
                tA=tA, thresh_C=0.3)
        idx_components_r = np.where(r_values >= .6)[0]
        idx_components_raw = np.where(fitness_raw < -60)[0]
        idx_components_delta = np.where(fitness_delta < -20)[0]
        idx_components = np.union1d(idx_components_r, idx_components_raw)
        idx_components = np.union1d(idx_components, idx_components_delta)

        A2 = A2.tocsc()[:, idx_components]
        C2 = C2[idx_components, :]
        YrA = YrA[idx_components, :]
        S2 = S2[idx_components, :]

        # A: spatial components (ROIs)
        # C: denoised [Ca2+]
        # YrA: residuals ("noise")
        # S: Spikes
        savemat(fn_cnmf, {"A": A2, "C": C2, "YrA": YrA, "S": S2, "bl": bl2})
    else:
        resdict = loadmat(fn_cnmf)
        A2 = resdict["A"]
        C2 = resdict["C"]
        YrA = resdict["YrA"]
        S2 = resdict["S"]
        bl2 = resdict["bl"]

    proj_fn = haussio_data.dirname_comp + "_proj.npy"
    if not os.path.exists(proj_fn):
        zproj = utils.zproject(np.transpose(Y, (2, 0, 1)))
        np.save(proj_fn, zproj)
    else:
        zproj = np.load(proj_fn)

    # DF_F, DF = cse.extract_DF_F(Y.reshape(d1*d2, T), A2, C2)

    # t0 = time.time()
    # sys.stdout.write("Ordering components... ")
    # sys.stdout.flush()
    # A_or, C_or, srt = cse.order_components(A2, C2)
    # sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))

    cse.utilities.stop_server()

    polygons = contour(A2, d1, d2, thr=roi_iceberg)
    rois = ROIList([sima.ROI.ROI(polygons=poly) for poly in polygons])

    return rois, C2, zproj, S2, Y, YrA
示例#35
0
def process_data(haussio_data,
                 mask=None,
                 p=2,
                 nrois_init=400,
                 roi_iceberg=0.9,
                 merge_unconnected=None):
    if mask is not None:
        raise RuntimeError("mask not supported in cnmf.process_data")

    fn_cnmf = haussio_data.dirname_comp + '_cnmf.mat'
    shapefn = os.path.join(haussio_data.dirname_comp,
                           haussio.THOR_RAW_FN[:-3] + "shape.npy")
    shape = np.load(shapefn)
    if len(shape) == 5:
        d1, d2 = shape[2], shape[3]
    else:
        d1, d2 = shape[1], shape[2]
    fn_mmap = get_mmap_name(haussio_data.dirname_comp + os.path.sep + 'Yr', d1,
                            d2, shape[0])

    tiffs_to_cnmf(haussio_data)
    if os.path.exists(fn_cnmf):
        resdict = loadmat(fn_cnmf)
        if "dFoF" in resdict.keys():
            A2 = resdict["A"]
            C2 = resdict["C"]
            YrA = resdict["YrA"]
            S2 = resdict["S"]
            dFoF = resdict["dFoF"]
            bl2 = resdict["bl"]
            f = resdict["f"]
            images = haussio_data.read_raw().squeeze()
        else:
            dFoF = None
    if not os.path.exists(fn_cnmf) or dFoF is None:
        c, dview, n_processes = cm.cluster.setup_cluster(
            backend='multiprocessing', n_processes=NCPUS, single_thread=False)

        Yr, dims, T = cm.load_memmap(fn_mmap, 'r+')
        d1, d2 = dims
        images = np.reshape(Yr.T, [T] + list(dims), order='F')

        fr = 1.0 / haussio_data.dt  # imaging rate in frames per second\n",
        decay_time = 0.4  # length of a typical transient in seconds\n",

        # parameters for source extraction and deconvolution\n",
        bord_px_els = 32  # maximum shift to be used for trimming against NaNs
        p = 1  # order of the autoregressive system\n",
        gnb = 2  # number of global background components\n",
        merge_thresh = 0.8  # merging threshold, max correlation allowed\n",
        rf = int(
            np.round(np.sqrt(d1 * d2) / nrois_init)
        )  # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50\n",
        if rf < 16:
            rf = 16
        stride_cnmf = 6  # amount of overlap between the patches in pixels\n",
        npatches = np.round(d1 / (rf * 2) * d2 / (rf * 2))
        K = nrois_init / npatches  # number of components per patch\n",
        if K < 2:
            K = 2
        print(rf, npatches, K)
        gSig = [8, 8]  # expected half size of neurons\n",
        init_method = 'greedy_roi'  # initialization method (if analyzing dendritic data using 'sparse_nmf')\n",
        is_dendrites = False  # flag for analyzing dendritic data\n",
        alpha_snmf = None  # sparsity penalty for dendritic data analysis through sparse NMF\n",

        # parameters for component evaluation\n",
        min_SNR = 2.5  # signal to noise ratio for accepting a component\n",
        rval_thr = 0.8  # space correlation threshold for accepting a component\n",
        cnn_thr = 0.8  # threshold for CNN based classifier"

        cnm = caiman_cnmf.CNMF(n_processes=1,
                               k=K,
                               gSig=gSig,
                               merge_thresh=merge_thresh,
                               p=0,
                               dview=dview,
                               rf=rf,
                               stride=stride_cnmf,
                               memory_fact=1,
                               method_init=init_method,
                               alpha_snmf=alpha_snmf,
                               only_init_patch=False,
                               gnb=gnb,
                               border_pix=bord_px_els)
        cnm = cnm.fit(images)

        idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
            estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f,
                                             cnm.YrA, fr, decay_time, gSig, dims,
                                             dview = dview, min_SNR=min_SNR,
                                             r_values_min = rval_thr, use_cnn = False,
                                             thresh_cnn_lowest = cnn_thr)
        A_in, C_in, b_in, f_in = cnm.A[:, idx_components], cnm.C[
            idx_components], cnm.b, cnm.f
        cnm2 = caiman_cnmf.CNMF(n_processes=1,
                                k=A_in.shape[-1],
                                gSig=gSig,
                                p=p,
                                dview=dview,
                                merge_thresh=merge_thresh,
                                Ain=A_in,
                                Cin=C_in,
                                b_in=b_in,
                                f_in=f_in,
                                rf=None,
                                stride=None,
                                gnb=gnb,
                                method_deconvolution='oasis',
                                check_nan=True)
        cnm2 = cnm2.fit(images)

        if merge_unconnected is not None:
            idx_merge = []
            for nroi, ca_roi in enumerate(cnm2.C):
                for nroi_compare_counter, ca_roi_compare in enumerate(
                        cnm2.C[nroi + 1:]):
                    nroi_compare = nroi_compare_counter + nroi + 1
                    if nroi_compare not in idx_merge:
                        correls = np.correlate(ca_roi,
                                               ca_roi_compare,
                                               mode='same')
                        correls /= np.sqrt(
                            np.dot(ca_roi, ca_roi) *
                            np.dot(ca_roi_compare, ca_roi_compare))
                        if correls.max() > merge_unconnected:
                            print("Merging ", nroi_compare)
                            idx_merge.append(nroi_compare)
            idx_no_merge = [
                idx for idx in range(cnm2.C.shape[0]) if idx not in idx_merge
            ]
        else:
            idx_no_merge = range(cnm2.C.shape[0])
        A2 = cnm2.A[:, idx_no_merge].tocsc()
        C2 = cnm2.C[idx_no_merge]
        YrA = cnm2.YrA[idx_no_merge]
        S2 = cnm2.S[idx_no_merge]
        dFoF = cnm2.detrend_df_f(frames_window=300)[idx_no_merge]
        # A: spatial components (ROIs)
        # C: denoised [Ca2+]
        # YrA: residuals ("noise", i.e. traces = C+YrA)
        # S: Spikes
        # f: temporal background
        savemat(
            fn_cnmf, {
                "A": A2,
                "C": C2,
                "YrA": YrA,
                "S": S2,
                "dFoF": dFoF,
                "bl": cnm2.b,
                "f": cnm2.f
            })
        dview.terminate()
        cm.stop_server()

    proj_fn = haussio_data.dirname_comp + "_proj.npy"
    if not os.path.exists(proj_fn):
        zproj = utils.zproject(images)
        np.save(proj_fn, zproj)
    else:
        zproj = np.load(proj_fn)

    logfiles = glob.glob("*LOG*")
    for logfile in logfiles:
        try:
            os.unlink(logfile)
        except OSError:
            pass

    polygons = contour(A2, images.shape[1], images.shape[2], thr=roi_iceberg)
    rois = ROIList([sima.ROI.ROI(polygons=poly) for poly in polygons])

    return rois, C2, zproj, S2, images, YrA
示例#36
0
文件: segment.py 项目: j3tsai/sima
def _rois_from_cuts_ca1pc(cuts, im_set, circularity_threhold=0.5,
                          min_roi_size=20, min_cut_size=30,
                          channel=0, x_diameter=8, y_diameter=8):
    """Return ROI structures containing CA1 pyramidal cell somata.

    Parameters
    ----------
    cuts : list of sima.normcut.CutRegion
        The segmented regions identified by normalized cuts.
    circularity_threhold : float
        ROIs with circularity below threshold are discarded. Default: 0.5.
    min_roi_size : int, optional
        ROIs with fewer than min_roi_size pixels are discarded. Default: 20.
    min_cut_size : int, optional
        No ROIs are made from cuts with fewer than min_cut_size pixels.
        Default: 30.
    channel : int, optional
        The index of the channel to be used.
    x_diameter : int, optional
        The estimated x-diameter of the nuclei in pixels
    y_diameter : int, optional
        The estimated y_diameter of the nuclei in pixels

    Returns
    -------
    sima.ROI.ROIList
        ROI structures each corresponding to a CA1 pyramidal cell soma.
    """

    processed_im = _processed_image_ca1pc(im_set, channel, x_diameter,
                                          y_diameter)
    shape = processed_im.shape[:2]
    ROIs = ROIList([])
    for cut in cuts:
        if len(cut.indices) > min_cut_size:
            # pixel values in the cut
            vals = processed_im.flat[cut.indices]

            # indices of those values below the otsu threshold
            # if all values are identical, continue without adding an ROI
            try:
                roi_indices = cut.indices[vals < threshold_otsu(vals)]
            except ValueError:
                continue

            # apply binary opening and closing to the surviving pixels
            # expand the shape by 1 in all directions to correct for edge
            # effects of binary opening/closing
            twoD_indices = [np.unravel_index(x, shape) for x in roi_indices]
            mask = np.zeros([x + 2 for x in shape])
            for indices in twoD_indices:
                mask[indices[0] + 1, indices[1] + 1] = 1
            mask = ndimage.binary_closing(ndimage.binary_opening(mask))
            mask = mask[1:-1, 1:-1]
            roi_indices = np.where(mask.flat)[0]

            # label blobs in each cut
            labeled_array, num_features = label(mask)
            for feat in range(num_features):
                blob_inds = np.where(labeled_array.flat == feat + 1)[0]

                # Apply min ROI size threshold
                if len(blob_inds) > min_roi_size:
                    twoD_indices = [np.unravel_index(x, shape)
                                    for x in blob_inds]
                    mask = np.zeros(shape)
                    for x in twoD_indices:
                        mask[x] = 1

                    # APPLY CIRCULARITY THRESHOLD
                    poly_pts = np.array(mask2poly(mask)[0].exterior.coords)
                    p = 0
                    for x in range(len(poly_pts) - 1):
                        p += np.linalg.norm(poly_pts[x] - poly_pts[x + 1])

                    shape_area = len(roi_indices)
                    circle_area = np.square(p) / (4 * np.pi)
                    if shape_area / circle_area > circularity_threhold:
                        ROIs.append(ROI(mask=mask))

    return ROIs
示例#37
0
def process_data(haussio_data,
                 mask=None,
                 p=2,
                 nrois_init=400,
                 roi_iceberg=0.9):
    if mask is not None:
        raise RuntimeError("mask not supported in cnmf.process_data")

    fn_cnmf = haussio_data.dirname_comp + '_cnmf.mat'
    shapefn = os.path.join(haussio_data.dirname_comp,
                           haussio.THOR_RAW_FN[:-3] + "shape.npy")
    shape = np.load(shapefn)
    if len(shape) == 5:
        d1, d2 = shape[2], shape[3]
        fn_mmap = get_mmap_name('Yr', shape[2], shape[3], shape[0])
    else:
        d1, d2 = shape[1], shape[2]
        fn_mmap = get_mmap_name('Yr', shape[1], shape[2], shape[0])
    fn_mmap = os.path.join(haussio_data.dirname_comp, fn_mmap)
    print(fn_mmap, os.path.exists(fn_mmap), d1, d2)

    if not os.path.exists(fn_cnmf):
        # fn_raw = os.path.join(haussio_data.dirname_comp, haussio.THOR_RAW_FN)
        fn_sima = haussio_data.dirname_comp + '.sima'
        fnames = [
            fn_sima,
        ]
        fnames.sort()
        print(fnames)
        fnames = fnames

        final_frate = 1.0 / haussio_data.dt
        downsample_factor = 1  # use .2 or .1 if file is large and you want a quick answer
        final_frate *= downsample_factor

        c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                         n_processes=None,
                                                         single_thread=False)

        idx_xy = None
        base_name = 'Yr'
        name_new = cm.save_memmap_each(fnames,
                                       dview=dview,
                                       base_name=base_name,
                                       resize_fact=(1, 1, downsample_factor),
                                       remove_init=0,
                                       idx_xy=idx_xy)
        name_new.sort()
        print(name_new)

        if len(name_new) > 1:
            fname_new = cm.save_memmap_join(name_new,
                                            base_name='Yr',
                                            n_chunks=12,
                                            dview=dview)
        else:
            sys.stdout.write('One file only, not saving\n')
            fname_new = name_new[0]

        print("fname_new: " + fname_new)

        Yr, dims, T = cm.load_memmap(fname_new)
        Y = np.reshape(Yr, dims + (T, ), order='F')
        Cn = cm.local_correlations(Y)

        K = nrois_init  # number of neurons expected per patch
        gSig = [15, 15]  # expected half size of neurons
        merge_thresh = 0.8  # merging threshold, max correlation allowed
        p = 2  #order of the autoregressive system
        options = caiman_cnmf.utilities.CNMFSetParms(Y,
                                                     NCPUS,
                                                     p=p,
                                                     gSig=gSig,
                                                     K=K,
                                                     ssub=2,
                                                     tsub=2)

        Yr, sn, g, psx = caiman_cnmf.pre_processing.preprocess_data(
            Yr, dview=dview, **options['preprocess_params'])
        Atmp, Ctmp, b_in, f_in, center = caiman_cnmf.initialization.initialize_components(
            Y, **options['init_params'])

        Ain, Cin = Atmp, Ctmp
        A, b, Cin, f_in = caiman_cnmf.spatial.update_spatial_components(
            Yr,
            Cin,
            f_in,
            Ain,
            sn=sn,
            dview=dview,
            **options['spatial_params'])

        options['temporal_params'][
            'p'] = 0  # set this to zero for fast updating without deconvolution
        C, A, b, f, S, bl, c1, neurons_sn, g, YrA = caiman_cnmf.temporal.update_temporal_components(
            Yr,
            A,
            b,
            Cin,
            f_in,
            bl=None,
            c1=None,
            sn=None,
            g=None,
            **options['temporal_params'])

        A_m, C_m, nr_m, merged_ROIs, S_m, bl_m, c1_m, sn_m, g_m = caiman_cnmf.merging.merge_components(
            Yr,
            A,
            b,
            C,
            f,
            S,
            sn,
            options['temporal_params'],
            options['spatial_params'],
            dview=dview,
            bl=bl,
            c1=c1,
            sn=neurons_sn,
            g=g,
            thr=merge_thresh,
            mx=50,
            fast_merge=True)

        A2, b2, C2, f = caiman_cnmf.spatial.update_spatial_components(
            Yr, C_m, f, A_m, sn=sn, dview=dview, **options['spatial_params'])
        options['temporal_params'][
            'p'] = p  # set it back to original value to perform full deconvolution
        C2, A2, b2, f2, S2, bl2, c12, neurons_sn2, g21, YrA = caiman_cnmf.temporal.update_temporal_components(
            Yr,
            A2,
            b2,
            C2,
            f,
            dview=dview,
            bl=None,
            c1=None,
            sn=None,
            g=None,
            **options['temporal_params'])

        tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
        tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
        Npeaks = 10
        traces = C2 + YrA
        fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = \
            evaluate_components(
                Y, traces, A2, C2, b2, f2, final_frate, remove_baseline=True, N=5,
                robust_std=False, Athresh=0.1, Npeaks=Npeaks, thresh_C=0.3)

        idx_components_r = np.where(r_values >= .6)[0]
        idx_components_raw = np.where(fitness_raw < -60)[0]
        idx_components_delta = np.where(fitness_delta < -20)[0]

        min_radius = gSig[0] - 2
        masks_ws, idx_blobs, idx_non_blobs = extract_binary_masks_blob(
            A2.tocsc(),
            min_radius,
            dims,
            num_std_threshold=1,
            minCircularity=0.6,
            minInertiaRatio=0.2,
            minConvexity=.8)

        idx_components = np.union1d(idx_components_r, idx_components_raw)
        idx_components = np.union1d(idx_components, idx_components_delta)
        idx_blobs = np.intersect1d(idx_components, idx_blobs)
        idx_components_bad = np.setdiff1d(range(len(traces)), idx_components)

        A2 = A2.tocsc()[:, idx_components]
        C2 = C2[idx_components, :]
        YrA = YrA[idx_components, :]
        S2 = S2[idx_components, :]

        # A: spatial components (ROIs)
        # C: denoised [Ca2+]
        # YrA: residuals ("noise", i.e. traces = C+YrA)
        # S: Spikes
        savemat(fn_cnmf, {"A": A2, "C": C2, "YrA": YrA, "S": S2, "bl": bl2})

    else:
        resdict = loadmat(fn_cnmf)
        A2 = resdict["A"]
        C2 = resdict["C"]
        YrA = resdict["YrA"]
        S2 = resdict["S"]
        bl2 = resdict["bl"]
        Yr, dims, T = cm.load_memmap(fn_mmap)
        dims = dims[1:]
        Y = np.reshape(Yr, dims + (T, ), order='F')

    proj_fn = haussio_data.dirname_comp + "_proj.npy"
    if not os.path.exists(proj_fn):
        zproj = utils.zproject(np.transpose(Y, (2, 0, 1)))
        np.save(proj_fn, zproj)
    else:
        zproj = np.load(proj_fn)

    # DF_F, DF = cse.extract_DF_F(Y.reshape(d1*d2, T), A2, C2)

    # t0 = time.time()
    # sys.stdout.write("Ordering components... ")
    # sys.stdout.flush()
    # A_or, C_or, srt = cse.order_components(A2, C2)
    # sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))

    cm.stop_server()

    polygons = contour(A2, Y.shape[0], Y.shape[1], thr=roi_iceberg)
    rois = ROIList([sima.ROI.ROI(polygons=poly) for poly in polygons])

    return rois, C2, zproj, S2, Y, YrA