Exemplo n.º 1
0
    def export_signals(self, path, fmt='csv', channel=0, signals_label=None):
        """Export extracted signals to a file.

        Parameters
        ----------
        path : str
            The name of the file that will store the exported data.
        fmt : {'csv'}, optional
            The export format. Currently, only 'csv' export is available.
        channel : string or int
            The channel from which to export signals, either an integer
            index or a string in self.channel_names.
        signals_label : str, optional
            The label of the extracted signal set to use. By default,
            the most recently extracted signals are used.
        """
        with open(path, 'wb') as csvfile:
            writer = csv.writer(csvfile, delimiter='\t')

            signals = self.signals(channel)
            if signals_label is None:
                signals_label = most_recent_key(signals)
            rois = signals[signals_label]['rois']

            writer.writerow(['sequence', 'frame'] + [r['id'] for r in rois])
            writer.writerow(['', 'label'] + [r['label'] for r in rois])
            writer.writerow(
                ['', 'tags'] +
                [''.join(t + ',' for t in sorted(r['tags']))[:-1]
                 for r in rois]
            )
            for sequence_idx, sequence in enumerate(
                    signals[signals_label]['raw']):
                for frame_idx, frame in enumerate(sequence.T):
                    writer.writerow([sequence_idx, frame_idx] + frame.tolist())
Exemplo n.º 2
0
    def export_signals(self, path, fmt='csv', channel=0, signals_label=None):
        """Export extracted signals to a file.

        Parameters
        ----------
        path : str
            The name of the file that will store the exported data.
        fmt : {'csv'}, optional
            The export format. Currently, only 'csv' export is available.
        channel : string or int
            The channel from which to export signals, either an integer
            index or a string in self.channel_names.
        signals_label : str, optional
            The label of the extracted signal set to use. By default,
            the most recently extracted signals are used.
        """
        with open(path, 'wb') as csvfile:
            writer = csv.writer(csvfile, delimiter='\t')

            signals = self.signals(channel)
            if signals_label is None:
                signals_label = most_recent_key(signals)
            rois = signals[signals_label]['rois']

            writer.writerow(['sequence', 'frame'] + [r['id'] for r in rois])
            writer.writerow(['', 'label'] + [r['label'] for r in rois])
            writer.writerow(
                ['', 'tags'] +
                [''.join(t + ',' for t in sorted(r['tags']))[:-1]
                 for r in rois]
            )
            for sequence_idx, sequence in enumerate(
                    signals[signals_label]['raw']):
                for frame_idx, frame in enumerate(sequence.T):
                    writer.writerow([sequence_idx, frame_idx] + frame.tolist())
Exemplo n.º 3
0
    def extract(self, rois=None, signal_channel=0, label=None,
                remove_overlap=True, n_processes=None, demix_channel=None,
                save_summary=True):
        """Extracts imaging data from the current dataset using the
        supplied ROIs file.

        Parameters
        ----------
        rois : sima.ROI.ROIList, optional
            ROIList of rois to extract
        signal_channel : string or int, optional
            Channel containing the signal to be extracted, either an integer
            index or a name in self.channel_names
        label : string or None, optional
            Text label to describe this extraction, if None defaults to a
            timestamp.
        remove_overlap : bool, optional
            If True, remove any pixels that overlap between masks.
        n_processes : int, optional
            Number of processes to farm out the extraction across. Should be
            at least 1 and at most one less then the number of CPUs in the
            computer. If None, uses half the CPUs.
        demix_channel : string or int, optional
            Channel to demix from the signal channel, either an integer index
            or a name in self.channel_names If None, do not demix signals.
        save_summary : bool, optional
            If True, additionally save a summary of the extracted ROIs.

        Return
        ------
        dict of arrays
            Keys: raw, demixed_raw, mean_frame, overlap, signal_channel, rois,
            timestamp

        See also
        --------
        sima.ROI.ROIList

        """

        signal_channel = self._resolve_channel(signal_channel)
        demix_channel = self._resolve_channel(demix_channel)

        if rois is None:
            rois = self.ROIs[most_recent_key(self.ROIs)]
        if rois is None or not len(rois):
            raise Exception('Cannot extract dataset with no ROIs.')
        if self.savedir:
            return save_extracted_signals(
                self, rois, self.savedir, label, signal_channel=signal_channel,
                remove_overlap=remove_overlap, n_processes=n_processes,
                demix_channel=demix_channel, save_summary=save_summary
            )
        else:
            return extract_rois(self, rois, signal_channel, remove_overlap,
                                n_processes, demix_channel)
Exemplo n.º 4
0
    def extract(self, rois=None, signal_channel=0, label=None,
                remove_overlap=True, n_processes=1, demix_channel=None,
                save_summary=True):
        """Extracts imaging data from the current dataset using the
        supplied ROIs file.

        Parameters
        ----------
        rois : sima.ROI.ROIList, optional
            ROIList of rois to extract
        signal_channel : string or int, optional
            Channel containing the signal to be extracted, either an integer
            index or a name in self.channel_names
        label : string or None, optional
            Text label to describe this extraction, if None defaults to a
            timestamp.
        remove_overlap : bool, optional
            If True, remove any pixels that overlap between masks.
        n_processes : int, optional
            Number of processes to farm out the extraction across. Should be
            at least 1 and at most one less then the number of CPUs in the
            computer. Defaults to 1.
        demix_channel : string or int, optional
            Channel to demix from the signal channel, either an integer index
            or a name in self.channel_names If None, do not demix signals.
        save_summary : bool, optional
            If True, additionally save a summary of the extracted ROIs.

        Return
        ------
        dict of arrays
            Keys: raw, demixed_raw, mean_frame, overlap, signal_channel, rois,
            timestamp

        See also
        --------
        sima.ROI.ROIList

        """

        signal_channel = self._resolve_channel(signal_channel)
        demix_channel = self._resolve_channel(demix_channel)

        if rois is None:
            rois = self.ROIs[most_recent_key(self.ROIs)]
        if rois is None or not len(rois):
            raise Exception('Cannot extract dataset with no ROIs.')
        if self.savedir:
            return save_extracted_signals(
                self, rois, self.savedir, label, signal_channel=signal_channel,
                remove_overlap=remove_overlap, n_processes=n_processes,
                demix_channel=demix_channel, save_summary=save_summary
            )
        else:
            return extract_rois(self, rois, signal_channel, remove_overlap,
                                n_processes, demix_channel)
Exemplo n.º 5
0
    def import_transformed_ROIs(self, source_dataset, source_channel=0,
                                target_channel=0, source_label=None,
                                target_label=None, copy_properties=True):
        """Calculate an affine transformation that maps the source
        ImagingDataset onto this ImagingDataset, tranform the source ROIs
        by this mapping, and then import them into this ImagingDataset.

        Parameters
        ----------
        source_dataset : ImagingDataset
            The ImagingDataset object from which ROIs are to be imported.  This
            dataset must be roughly of the same field-of-view as self in order
            to calculate an affine transformation.

        source_channel : string or int, optional
            The channel of the source image from which to calculate an affine
            transformation, either an integer index or a string in
            source_dataset.channel_names.

        target_channel : string or int, optional
            The channel of the target image from which to calculate an affine
            transformation, either an integer index or a string in
            self.channel_names.

        source_label : string, optional
            The label of the ROIList to transform

        target_label : string, optional
            The label to assign the transformed ROIList

        copy_properties : bool, optional
            Copy the label, id, tags, and im_shape properties from the source
            ROIs to the transformed ROIs
        """

        source_channel = source_dataset._resolve_channel(source_channel)
        target_channel = self._resolve_channel(target_channel)
        source = source_dataset.time_averages[source_channel]
        target = self.time_averages[target_channel]

        transform = affine_transform(source, target)

        src_rois = source_dataset.ROIs
        if source_label is None:
            source_label = most_recent_key(src_rois)
        src_rois = src_rois[source_label]
        transformed_ROIs = src_rois.transform(
            transform, copy_properties=copy_properties)
        self.add_ROIs(transformed_ROIs, label=target_label)
Exemplo n.º 6
0
    def export_signals(self, path, fmt="csv", channel=0, signals_label=None):
        """Export extracted signals to a file.

        Parameters
        ----------
        path : str
            The name of the file that will store the exported data.
        fmt : {'csv'}, optional
            The export format. Currently, only 'csv' export is available.
        channel : string or int, optional
            The channel from which to export signals, either an integer
            index or a string in self.channel_names.
        signals_label : str, optional
            The label of the extracted signal set to use. By default,
            the most recently extracted signals are used.

        """

        try:
            csvfile = open(path, "w", newline="")
        except TypeError:  # Python 2
            csvfile = open(path, "wb")
        try:
            try:
                writer = csv.writer(csvfile, delimiter="\t")
            except TypeError:  # Python 2
                writer = csv.writer(csvfile, delimiter=b"\t")
            signals = self.signals(channel)
            if signals_label is None:
                signals_label = most_recent_key(signals)
            rois = signals[signals_label]["rois"]

            writer.writerow(["sequence", "frame"] + [r["id"] for r in rois])
            writer.writerow(["", "label"] + [r["label"] for r in rois])
            writer.writerow(["", "tags"] + ["".join(t + "," for t in sorted(r["tags"]))[:-1] for r in rois])
            for sequence_idx, sequence in enumerate(signals[signals_label]["raw"]):
                for frame_idx, frame in enumerate(sequence.T):
                    writer.writerow([sequence_idx, frame_idx] + frame.tolist())
        finally:
            csvfile.close()
Exemplo n.º 7
0
    def infer_spikes(self, channel=0, label=None, gamma=None,
                     share_gamma=True, mode='correct', verbose=False):
        """Infer the most likely discretized spike train underlying a
        fluorescence trace.

        Parameters
        ----------
        channel : string or int, optional
            The channel to be used for spike inference.
        label : string or None, optional
            Text string indicating the signals from which spikes should be
            inferred. Defaults to the most recently extracted signals.
        gamma : float, optional
            Gamma is 1 - timestep/tau, where tau is the time constant of the
            AR(1) process.  If no value is given, then gamma is estimated from
            the data.
        share_gamma : bool, optional
            Whether to apply the same gamma estimate to all ROIs. Defaults to
            True.
        mode : {'correct', 'robust', 'psd'}, optional
            The method for estimating sigma. The 'robust' method overestimates
            the noise by assuming that gamma = 1. The 'psd' method estimates
            sigma from the PSD of the fluorescence data. Default: 'correct'.
        verbose : bool, optional
            Whether to print status updates. Default: False.

        Returns
        -------
        spikes : ndarray of float
            The inferred normalized spike count at each time-bin.  Values are
            normalized to the maximum value over all time-bins.
            Shape: (num_rois, num_timebins).
        fits : ndarray of float
            The inferred denoised fluorescence signal at each time-bin.
            Shape: (num_rois, num_timebins).
        parameters : dict of (str, ndarray of float)
            Dictionary with values for 'sigma', 'gamma', and 'baseline'.

        Notes
        -----
        We strongly recommend installing MOSEK (www.mosek.com; free for
        academic use) which greatly speeds up the inference.

        References
        ----------
        * Pnevmatikakis et al. 2015. Submitted (arXiv:1409.2903).
        * Machado et al. 2015. Submitted.
        * Vogelstein et al. 2010. Journal of Neurophysiology. 104(6):
          3691-3704.

        """

        channel = self._resolve_channel(channel)

        import sima.spikes
        all_signals = self.signals(channel)
        if label is None:
            label = most_recent_key(all_signals)
        signals = all_signals[label]

        # estimate gamma for all cells
        if mode == "psd":
            if share_gamma:
                mega_trace = np.concatenate(
                    [sigs for sigs in signals['raw'][0]])
                sigma = sima.spikes.estimate_sigma(mega_trace)
                gamma = sima.spikes.estimate_gamma(mega_trace, sigma)
                sigma = [sigma for _ in signals['raw'][0]]
                gamma = [gamma for _ in signals['raw'][0]]
            else:
                sigma = [sima.spikes.estimate_sigma(sigs[0])
                         for sigs in signals['raw'][0]]
                gamma = [sima.spikes.estimate_gamma(sigs[0], sigm)
                         for sigm, sigs in zip(sigma, signals['raw'][0])]
        else:
            gamma = [sima.spikes.estimate_parameters(sigs, gamma, sigma=0)[0]
                     for sigs in zip(*signals['raw'])]
            if share_gamma:
                gamma = np.median(gamma)

            # ensure that gamma is a list, one value per ROI
            if isinstance(gamma, float):
                gamma = [gamma for _ in signals['raw'][0]]

            # estimate sigma values
            sigma = [sima.spikes.estimate_parameters(sigs, g)[1]
                     for g, sigs in zip(gamma, zip(*signals['raw']))]

        # perform spike inference
        spikes, fits, parameters = [], [], []
        for seq_idx, seq_signals in enumerate(signals['raw']):
            spikes.append(np.zeros_like(seq_signals))
            fits.append(np.zeros_like(seq_signals))
            parameters.append(collections.defaultdict(list))
            for i, trace in enumerate(seq_signals):
                spikes[-1][i], fits[-1][i], p = sima.spikes.spike_inference(
                    trace, sigma[i], gamma[i], mode, verbose)
                for k, v in iteritems(p):
                    parameters[-1][k].append(v)
            for v in itervalues(parameters[-1]):
                assert len(v) == len(spikes[-1])
            parameters[-1] = dict(parameters[-1])

        if self.savedir:
            signals['spikes'] = spikes
            signals['spikes_fits'] = fits
            signals['spikes_params'] = parameters
            all_signals[label] = signals

            signals_filename = os.path.join(
                self.savedir,
                'signals_{}.pkl'.format(signals['signal_channel']))

            pickle.dump(all_signals,
                        open(signals_filename, 'wb'), pickle.HIGHEST_PROTOCOL)

        return spikes, fits, parameters
Exemplo n.º 8
0
    def extract(self, rois=None, signal_channel=0, label=None,
                remove_overlap=True, n_processes=1, demix_channel=None,
                save_summary=True):
        """Extracts imaging data from the current dataset using the
        supplied ROIs file.

        Parameters
        ----------
        rois : sima.ROI.ROIList, optional
            ROIList of rois to extract
        signal_channel : string or int, optional
            Channel containing the signal to be extracted, either an integer
            index or a name in self.channel_names.
        label : string or None, optional
            Text label to describe this extraction, if None defaults to a
            timestamp.
        remove_overlap : bool, optional
            If True, remove any pixels that overlap between masks.
        n_processes : int, optional
            Number of processes to farm out the extraction across. Should be
            at least 1 and at most one less then the number of CPUs in the
            computer. Defaults to 1.
        demix_channel : string or int, optional
            Channel to demix from the signal channel, either an integer index
            or a name in self.channel_names If None, do not demix signals.
        save_summary : bool, optional
            If True, additionally save a summary of the extracted ROIs.

        Return
        ------
        signals: dict
            The extracted signals along with parameters and values calculated
            during extraction.
            Contains the following keys:
                raw, demixed_raw : list of arrays
                    The raw/demixed extracted signal. List of length
                    num_sequences, each element is an array of shape
                    (num_ROIs, num_frames).
                mean_frame : array
                    Time-averaged mean frame of entire dataset.
                overlap : tuple of arrays
                    Tuple of (rows, cols) such that zip(*overlap) returns
                    row, col pairs of pixel coordinates that are in more than
                    one mask. Note: coordinates are for the **flattened**
                    image, so 'rows' is always 0s.
                signal_channel : int
                    The index of the channel that was extracted.
                rois : list of dict
                    All the ROIs used for the extraction with the order matched
                    to the order of the rows in 'raw'.
                    See sima.ROI.todict for details of dictionary format.
                timestamp : string
                    Date and time of extraction in '%Y-%m-%d-%Hh%Mm%Ss' format

        See also
        --------
        sima.ROI.ROI
        sima.ROI.ROIList

        """

        signal_channel = self._resolve_channel(signal_channel)
        demix_channel = self._resolve_channel(demix_channel)

        if rois is None:
            rois = self.ROIs[most_recent_key(self.ROIs)]
        if rois is None or not len(rois):
            raise Exception('Cannot extract dataset with no ROIs.')
        if self.savedir:
            return save_extracted_signals(
                self, rois, self.savedir, label, signal_channel=signal_channel,
                remove_overlap=remove_overlap, n_processes=n_processes,
                demix_channel=demix_channel, save_summary=save_summary
            )
        else:
            return extract_rois(self, rois, signal_channel, remove_overlap,
                                n_processes, demix_channel)
Exemplo n.º 9
0
    def import_transformed_ROIs(
            self, source_dataset, method='affine', source_channel=0,
            target_channel=0, source_label=None, target_label=None,
            anchor_label=None, copy_properties=True, **method_kwargs):
        """Calculate a transformation that maps the source ImagingDataset onto
        this ImagingDataset, transforms the source ROIs by this mapping,
        and then imports them into this ImagingDataset.

        Parameters
        ----------
        source_dataset : ImagingDataset
            The ImagingDataset object from which ROIs are to be imported.  This
            dataset must be roughly of the same field-of-view as self in order
            to calculate an affine transformation.

        method : string, optional
            Method to use for transform calculation.

        source_channel : string or int, optional
            The channel of the source image from which to calculate an affine
            transformation, either an integer index or a string in
            source_dataset.channel_names.

        target_channel : string or int, optional
            The channel of the target image from which to calculate an affine
            transformation, either an integer index or a string in
            self.channel_names.

        source_label : string, optional
            The label of the ROIList to transform

        target_label : string, optional
            The label to assign the transformed ROIList

        anchor_label : string, optional
            If None, use automatic dataset registration.
            Otherwise, the label of the ROIList that contains a single ROI
            with vertices defining anchor points common to both datasets.

        copy_properties : bool, optional
            Copy the label, id, tags, and im_shape properties from the source
            ROIs to the transformed ROIs

        **method_kwargs : optional
            Additional arguments can be passed in specific to the particular
            method. For example, 'order' for a polynomial transform estimation.

        """

        source_channel = source_dataset._resolve_channel(source_channel)
        target_channel = self._resolve_channel(target_channel)
        source = source_dataset.time_averages[..., source_channel]
        target = self.time_averages[..., target_channel]

        if anchor_label is None:
            try:
                transforms = [estimate_array_transform(s, t, method=method)
                              for s, t in zip(source, target)]
            except ValueError:
                print('Auto transform not implemented for this method')
                return
        else:
            # Assume one ROI per plane
            transforms = []
            for plane_idx in range(self.frame_shape[0]):
                trg_coords = None
                for roi in self.ROIs[anchor_label]:
                    if roi.coords[0][0, 2] == plane_idx:
                        # Coords is a closed polygon, so the last coord and the
                        # first coord are identical, remove one copy
                        trg_coords = roi.coords[0][:-1, :2]
                        break
                if trg_coords is None:
                    transforms.append(None)
                    continue

                src_coords = None
                for roi in source_dataset.ROIs[anchor_label]:
                    if roi.coords[0][0, 2] == plane_idx:
                        src_coords = roi.coords[0][:-1, :2]
                        break
                if src_coords is None:
                    transforms.append(None)
                    continue

                assert len(src_coords) == len(trg_coords)

                mean_dists = []
                for shift in range(len(src_coords)):
                    points1 = src_coords
                    points2 = np.roll(trg_coords, shift, axis=0)
                    mean_dists.append(
                        np.sum([np.sqrt(np.sum((p1 - p2) ** 2))
                                for p1, p2 in zip(points1, points2)]))
                trg_coords = np.roll(
                    trg_coords, np.argmin(mean_dists), axis=0)

                if method == 'piecewise-affine':

                    whole_frame_transform = estimate_coordinate_transform(
                        src_coords, trg_coords, 'affine')

                    src_additional_coords = [
                        [-50, -50],
                        [-50, source_dataset.frame_shape[1] + 50],
                        [source_dataset.frame_shape[2] + 50, -50],
                        [source_dataset.frame_shape[2] + 50,
                         source_dataset.frame_shape[1] + 50]]
                    trg_additional_coords = whole_frame_transform(
                        src_additional_coords)

                    src_coords = np.vstack((src_coords, src_additional_coords))
                    trg_coords = np.vstack((trg_coords, trg_additional_coords))

                transforms.append(estimate_coordinate_transform(
                    src_coords, trg_coords, method, **method_kwargs))

            transform_check = [t is None for t in transforms]
            assert not all(transform_check)

            if any(transform_check):
                warnings.warn("Z-plane missing transform. Copying from " +
                              "adjacent plane, accuracy not guaranteed")
                # If any planes were missing an anchor set, copy transforms
                # from adjacent planes
                for idx in range(len(transforms) - 1):
                    if transforms[idx + 1] is None:
                        transforms[idx + 1] = transforms[idx]
                for idx in reversed(range(len(transforms) - 1)):
                    if transforms[idx] is None:
                        transforms[idx] = transforms[idx + 1]

        src_rois = source_dataset.ROIs
        if source_label is None:
            source_label = most_recent_key(src_rois)
        src_rois = src_rois[source_label]

        transformed_ROIs = src_rois.transform(
            transforms, im_shape=self.frame_shape[:3],
            copy_properties=copy_properties)
        self.add_ROIs(transformed_ROIs, label=target_label)