예제 #1
0
    def setup(self):
        global tmp_dir

        path = example_hdf5()
        seq = Sequence.create('HDF5', path, 'yxt')
        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.ds = ImagingDataset([seq, seq], self.filepath)

        self.filepath_tiffs = os.path.join(tmp_dir, "test_dataset_tiffs.sima")
        seq = Sequence.create(
            'TIFFs', [[example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()]])
        self.ds_tiffs = ImagingDataset([seq, seq], self.filepath_tiffs)
예제 #2
0
파일: test_imaging.py 프로젝트: csn92/sima
    def setup(self):
        global tmp_dir

        path = example_hdf5()
        seq = Sequence.create('HDF5', path, 'yxt')
        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.ds = ImagingDataset([seq, seq], self.filepath)
예제 #3
0
파일: test_imaging.py 프로젝트: csn92/sima
def test_imaging_dataset_3d():
    global tmp_dir

    path = example_hdf5()
    seq = Sequence.create('HDF5', path, 'yxt')

    filepath = os.path.join(tmp_dir, "test_imaging_dataset_3d.sima")
    ds = ImagingDataset([seq, seq], filepath)
    assert_equal((ds.num_sequences, ) + (ds.num_frames, ) + ds.frame_shape,
                 (2, 40, 1, 128, 256, 1))
예제 #4
0
파일: sara.py 프로젝트: nk53/SARA
    def segment(self, use_settings=False):
        """Performs Spatiotemporal Independent Component Analysis.
    
    Currently only has options to use :class:`sima.segment.STICA`. User is
    prompted for parameters necessary to perform stICA. If *use_settings*
    is True, the settings from :data:`settings_file` are used instead.
    
    Args:
      use_settings (bool, optional): Whether to use the settings stored in
        :data:`settings_file`. If False, user is prompted for settings.
    
    """
        if use_settings:
            components = int(self.settings['components'])
            mu = float(self.settings['mu'])
            overlap_per = float(self.settings['overlap_per'])
        else:
            if self.sequence == None:
                prompt = "File path to the image you want to segment (TIFF only): "
                input_path = self.getTIFF(prompt)
                self.sequence = Sequence.create('TIFF', input_path)
                self.dataset = ImagingDataset([self.sequence], self.sima_dir)
            prompt = "Number of PCA components (default 50): "
            components = self.getNatural(prompt, default=50)
            prompt = "mu (default 0.5): "
            mu = -1.0
            while mu < 0 or mu > 1:
                mu = self.getFloat(prompt, default=0.5)
            prompt = "Minimum overlap " + \
                     "(default 20%; enter 0 to skip): "
            overlap_per = self.getPercent(prompt, default=0.2)
        segment_settings = {
            'components': components,
            'mu': mu,
            'overlap_per': overlap_per,
        }
        print "Performing Spatiotemporal Independent Component Analysis..."
        stdout.flush()
        stica = STICA(**segment_settings)
        stica.append(IdROIs())
        if self.dataset == None:
            self.dataset = ImagingDataset.load(self.sima_dir)
        self.rois = self.dataset.segment(stica, label="stICA ROIs")
        print len(self.dataset.ROIs['stICA ROIs']), "ROIs found"

        if not use_settings:
            segment_settings['segmentation_strategy'] = 'stICA'
            self._updateSettingsFile(segment_settings)
예제 #5
0
파일: convert.py 프로젝트: zhounapeuw/sima
def _load_version0(path):
    """Returns a v1 dataset converted from a v0 dataset

    Parameters
    ----------
    path : str
        The path (ending in .sima) of the version 0.x dataset.

    Examples
    --------

    >>> from sima.misc import example_data
    >>> from sima.misc.convert import _load_version0
    >>> ds = _load_version0(example_data())
    """
    def parse_channel(channel):
        """Parse an old format channel stored a dictionary

        Parameters
        ----------
        channel : dict

        Returns
        -------
        result : sima.Sequence
            A sequence equivalent to the old format channel.
        """
        _resolve_paths(channel, path)
        klass = channel.pop('__class__')
        if klass == 'sima.iterables.MultiPageTIFF':
            result = Sequence.create('TIFF', channel['path'])
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip)
                result = result[s]
        elif klass == 'sima.iterables.HDF5':
            result = Sequence.create('HDF5', channel['path'],
                                     channel['dim_order'], channel['group'],
                                     channel['key'])
            c = channel['dim_order'].index('c')
            chan = channel['channel']
            s = tuple([
                slice(None) if x != c else slice(chan, chan + 1)
                for x in range(len(channel['dim_order']))
            ])
            result = result[s]
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip) + (slice(None), )
                result = result[s]
        else:
            raise Exception('Format not recognized.')
        return result

    def parse_sequence(sequence):
        channels = [parse_channel(c) for c in sequence]
        return Sequence.join(*channels)

    with open(os.path.join(path, 'dataset.pkl'), 'rb') as f:
        unpickler = Unpickler(f)
        dataset_dict = unpickler.load()
    iterables = dataset_dict.pop('iterables')
    sequences = [parse_sequence(seq) for seq in iterables]

    # Apply displacements if they exist
    try:
        with open(os.path.join(path, 'displacements.pkl'), 'rb') as f:
            displacements = pkl.load(f)
    except IOError:
        pass
    else:
        assert all(np.all(d >= 0) for d in displacements)
        max_disp = np.nanmax(
            [np.nanmax(d.reshape(-1, d.shape[-1]), 0) for d in displacements],
            0)
        frame_shape = np.array(sequences[0].shape)[1:-1]  # z, y, x
        frame_shape[1:3] += max_disp
        sequences = [
            s.apply_displacements(d.reshape(s.shape[:3] + (2, )), frame_shape)
            for s, d in zip(sequences, displacements)
        ]
        try:
            trim_coords = dataset_dict.pop('_lazy__trim_coords')
        except KeyError:
            try:
                trim_criterion = dataset_dict.pop('trim_criterion')
            except KeyError:
                pass
            else:
                raise Exception('Parsing of trim_criterion ' +
                                str(trim_criterion) + ' not yet implemented')
        else:
            sequences = [
                s[:, :, trim_coords[0][0]:trim_coords[1][0],
                  trim_coords[0][1]:trim_coords[1][1]] for s in sequences
            ]
    ds = ImagingDataset(sequences, None)
    # Not making it read-only. If you set a savedir, you'll be asked about
    # overwriting it then
    ds._channel_names = [str(n) for n in dataset_dict.pop('channel_names')]
    ds._savedir = path
    return ds
예제 #6
0
파일: resonant.py 프로젝트: csn92/sima
def resonant_motion_correction(sequences,
                               save_dir,
                               channel_names,
                               correction_channels,
                               num_states_retained,
                               max_displacement,
                               trim_criterion,
                               dims=2):
    """HMM motion correction of data acquired with resonant scanning.

    Parameters
    ----------
    sequences, save_dir, channel_names, correction_channels,
    num_states_retained, max_displacement, trim_criterion
        See __init__() and correct() methods of HiddenMarkov2D.
    dims : (2, 3), optional
        Whether to correct for 2- or 3-dimensional displacements.
        Default: 2.

    Returns
    dataset : sima.ImagingDataset
        The motion corrected dataset.
    """

    tmp_savedir = save_dir + '.tmp.sima'

    if dims is 2:
        hmm = HiddenMarkov2D(n_processes=4,
                             verbose=False,
                             num_states_retained=num_states_retained,
                             max_displacement=(max_displacement[0] / 2,
                                               max_displacement[1]))
    elif dims is 3:
        hmm = HiddenMarkov3D(
            granularity=(3, 8),
            n_processes=4,
            verbose=False,
            num_states_retained=num_states_retained,
            max_displacement=((max_displacement[0] / 2, ) +
                              max_displacement[1:]),
        )

    # Motion correction of the even rows
    sliced_set = hmm.correct([seq[:, :, ::2] for seq in sequences],
                             tmp_savedir, channel_names, correction_channels)

    # corrected_sequences = []
    displacements = []
    for seq_idx, sequence in enumerate(sequences):
        # Repeat the displacements for all rows and multiple y-shifts by 2
        disps = sliced_set.sequences[seq_idx]._base.displacements
        disps = np.repeat(disps, 2, axis=2)
        disps[:, :, :, 0] *= 2

        # Subtract off the phase offset from every other line
        displacements.append(disps)

    displacements = MotionEstimationStrategy._make_nonnegative(displacements)

    disp_dim = displacements[0].shape[-1]
    max_disp = np.max(list(
        it.chain.from_iterable(d.reshape(-1, disp_dim)
                               for d in displacements)),
                      axis=0)
    raw_shape = np.array(sequences[0].shape)[1:-1]  # (z, y, x)

    if len(max_disp) == 2:  # if 2D displacements
        max_disp = np.array([0, max_disp[0], max_disp[1]])

    corrected_shape = raw_shape + max_disp

    corrected_sequences = [
        s.apply_displacements(d, corrected_shape)
        for s, d in zip(sequences, displacements)
    ]

    planes, rows, columns = _trim_coords(trim_criterion, displacements,
                                         raw_shape, corrected_shape)

    corrected_sequences = [
        sequence[:, planes, rows, columns] for sequence in corrected_sequences
    ]

    # Save full corrected dataset and remove tempdir
    imSet = ImagingDataset(corrected_sequences,
                           save_dir,
                           channel_names=channel_names)
    shutil.rmtree(tmp_savedir)

    return imSet
예제 #7
0
    def setup(self):
        global tmp_dir

        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.tiff_ds = ImagingDataset(
            [Sequence.create('TIFF', example_tiff(), 1, 1)], self.filepath)
예제 #8
0
    argParser.add_argument(
        "directory",
        action="store",
        type=str,
        help="Target directory to search for 0.x SIMA folders")
    argParser.add_argument(
        "-b",
        "--backup",
        action="store_true",
        help="Old dataset.pkl will be backed-up before conversion")
    args = argParser.parse_args()

    for directory, folders, files in os.walk(args.directory):
        if directory.endswith('.sima'):
            try:
                dataset = ImagingDataset(None, directory)
            except ImportError as error:
                if not error.args[0] == 'No module named iterables':
                    raise error
                # Possibly old SIMA directory, attempt convert
                if args.backup:
                    date_stamp = datetime.date.today().strftime('%Y%m%d')
                    shutil.copyfile(
                        os.path.join(directory, 'dataset.pkl'),
                        os.path.join(directory,
                                     'dataset.pkl.{}.bak'.format(date_stamp)))
                try:
                    # Convert and overwrite the current dataset
                    _0_to_1(directory, directory)
                except:
                    # Convert failed, possibly bad SIMA directory or path