예제 #1
0
    def setup(self):
        global tmp_dir

        path = example_hdf5()
        seq = Sequence.create('HDF5', path, 'yxt')
        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.ds = ImagingDataset([seq, seq], self.filepath)

        self.filepath_tiffs = os.path.join(tmp_dir, "test_dataset_tiffs.sima")
        seq = Sequence.create(
            'TIFFs', [[example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()]])
        self.ds_tiffs = ImagingDataset([seq, seq], self.filepath_tiffs)
예제 #2
0
    def setup(self):
        for frame in Sequence.create('HDF5', example_hdf5(), 'yxt'):
            break
        frame_shifts = [np.array([[[0, 0]], [[-5, -10]]])]
        self.frame_shifts = [np.array([[[5, 10]], [[0, 0]]])]
        self.correlations = [np.array([[1], [0.9301478]])]

        shifted = frame.copy()
        shifted = np.roll(shifted, -frame_shifts[0][1, 0, 1], axis=2)
        shifted = np.roll(shifted, -frame_shifts[0][1, 0, 0], axis=1)
        frames = np.array([frame, shifted])

        self.hm2d = hmm.HiddenMarkov2D(n_processes=1, verbose=False)
        self.dataset = sima.ImagingDataset(
            [Sequence.create('ndarray', frames)], None)
예제 #3
0
파일: test__hmm.py 프로젝트: asaich/sima
    def setup(self):
        for frame in Sequence.create('HDF5', example_hdf5(), 'yxt'):
            break
        frame_shifts = [np.array([[[0, 0]], [[-5, -10]]])]
        self.frame_shifts = [np.array([[[5, 10]], [[0, 0]]])]
        self.correlations = [np.array([[1], [0.9301478]])]

        shifted = frame.copy()
        shifted = np.roll(shifted, -frame_shifts[0][1, 0, 1], axis=2)
        shifted = np.roll(shifted, -frame_shifts[0][1, 0, 0], axis=1)
        frames = np.array([frame, shifted])

        self.hm2d = hmm.HiddenMarkov2D(n_processes=1, verbose=False)
        self.dataset = sima.ImagingDataset(
            [Sequence.create('ndarray', frames)], None)
예제 #4
0
파일: convert.py 프로젝트: zhounapeuw/sima
    def parse_channel(channel):
        """Parse an old format channel stored a dictionary

        Parameters
        ----------
        channel : dict

        Returns
        -------
        result : sima.Sequence
            A sequence equivalent to the old format channel.
        """
        _resolve_paths(channel, path)
        klass = channel.pop('__class__')
        if klass == 'sima.iterables.MultiPageTIFF':
            result = Sequence.create('TIFF', channel['path'])
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip)
                result = result[s]
        elif klass == 'sima.iterables.HDF5':
            result = Sequence.create('HDF5', channel['path'],
                                     channel['dim_order'], channel['group'],
                                     channel['key'])
            c = channel['dim_order'].index('c')
            chan = channel['channel']
            s = tuple([
                slice(None) if x != c else slice(chan, chan + 1)
                for x in range(len(channel['dim_order']))
            ])
            result = result[s]
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip) + (slice(None), )
                result = result[s]
        else:
            raise Exception('Format not recognized.')
        return result
예제 #5
0
파일: convert.py 프로젝트: j3tsai/sima
    def parse_channel(channel):
        """Parse an old format channel stored a dictionary

        Parameters
        ----------
        channel : dict

        Returns
        -------
        result : sima.Sequence
            A sequence equivalent to the old format channel.
        """
        _resolve_paths(channel, path)
        klass = channel.pop('__class__')
        if klass == 'sima.iterables.MultiPageTIFF':
            result = Sequence.create('TIFF', channel['path'])
            try:
                clip = channel['clip']
            except KeyError:
                pass
            else:
                if clip is not None:
                    s = (slice(None), slice(None)) + tuple(
                        slice(*[None if x is 0 else x for x in dim])
                        for dim in clip)
                    result = result[s]
            return result

        elif klass == 'sima.iterables.HDF5':
            raise Exception('TODO')
        else:
            raise Exception('Format not recognized.')
예제 #6
0
    def setup(self):
        global tmp_dir

        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.tiff_ds = ImagingDataset(
            [Sequence.create('TIFF', example_tiff(), 1, 1)],
            self.filepath)
예제 #7
0
파일: test_imaging.py 프로젝트: csn92/sima
    def setup(self):
        global tmp_dir

        path = example_hdf5()
        seq = Sequence.create('HDF5', path, 'yxt')
        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.ds = ImagingDataset([seq, seq], self.filepath)
예제 #8
0
    def parse_channel(channel):
        """Parse an old format channel stored a dictionary

        Parameters
        ----------
        channel : dict

        Returns
        -------
        result : sima.Sequence
            A sequence equivalent to the old format channel.
        """
        _resolve_paths(channel, path)
        klass = channel.pop('__class__')
        if klass == 'sima.iterables.MultiPageTIFF':
            result = Sequence.create('TIFF', channel['path'])
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip)
                result = result[s]
        elif klass == 'sima.iterables.HDF5':
            result = Sequence.create(
                'HDF5', channel['path'], channel['dim_order'],
                channel['group'], channel['key'])
            c = channel['dim_order'].index('c')
            chan = channel['channel']
            s = tuple([slice(None) if x != c else slice(chan, chan + 1)
                       for x in range(len(channel['dim_order']))])
            result = result[s]
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip) + (slice(None),)
                result = result[s]
        else:
            raise Exception('Format not recognized.')
        return result
예제 #9
0
파일: test__hmm.py 프로젝트: asaich/sima
 def test_hmm_missing_frame(self):
     global tmp_dir
     frames = Sequence.create('TIFF', example_tiff())
     masked_seq = frames.mask([(5, None, None)])
     corrected = self.hm2d.correct(
         [masked_seq], os.path.join(tmp_dir, 'test_hmm_3.sima'))
     assert_(all(np.all(np.isfinite(seq.displacements))
                 for seq in corrected))
예제 #10
0
 def test_hmm_missing_frame(self):
     global tmp_dir
     frames = Sequence.create('TIFF', example_tiff())
     masked_seq = frames.mask([(5, None, None)])
     corrected = self.hm2d.correct([masked_seq],
                                   os.path.join(tmp_dir, 'test_hmm_3.sima'))
     assert_(
         all(np.all(np.isfinite(seq.displacements)) for seq in corrected))
     assert_(np.prod(corrected.frame_shape) > 0)
예제 #11
0
파일: test_imaging.py 프로젝트: csn92/sima
def test_imaging_dataset_3d():
    global tmp_dir

    path = example_hdf5()
    seq = Sequence.create('HDF5', path, 'yxt')

    filepath = os.path.join(tmp_dir, "test_imaging_dataset_3d.sima")
    ds = ImagingDataset([seq, seq], filepath)
    assert_equal((ds.num_sequences, ) + (ds.num_frames, ) + ds.frame_shape,
                 (2, 40, 1, 128, 256, 1))
예제 #12
0
def test_imaging_dataset_3d():
    global tmp_dir

    path = example_hdf5()
    seq = Sequence.create('HDF5', path, 'yxt')

    filepath = os.path.join(tmp_dir, "test_imaging_dataset_3d.sima")
    ds = ImagingDataset([seq, seq], filepath)
    assert_equal((ds.num_sequences,) + (ds.num_frames,) + ds.frame_shape,
                 (2, 40, 1, 128, 256, 1))
예제 #13
0
파일: test__hmm.py 프로젝트: asaich/sima
 def test_hmm_missing_column(self):
     global tmp_dir
     frames = Sequence.create('TIFF', example_tiff())
     mask = np.zeros(frames.shape[1:-1], dtype=bool)
     mask[:, :, 30] = True
     masked_seq = frames.mask([(None, mask, None)])
     corrected = self.hm2d.correct(
         [masked_seq], os.path.join(tmp_dir, 'test_hmm_3.sima'))
     assert_(all(np.all(np.isfinite(seq.displacements))
                 for seq in corrected))
예제 #14
0
 def test_hmm_missing_column(self):
     global tmp_dir
     frames = Sequence.create('TIFF', example_tiff())
     mask = np.zeros(frames.shape[1:-1], dtype=bool)
     mask[:, :, 30] = True
     masked_seq = frames.mask([(None, mask, None)])
     corrected = self.hm2d.correct(
         [masked_seq],
         os.path.join(tmp_dir, 'test_hmm_missing_column.sima'))
     assert_(all(np.all(np.isfinite(seq.displacements))
                 for seq in corrected))
예제 #15
0
    def __init__(self, path, channel=0, start=0):
        app.Canvas.__init__(self, position=(300, 100),
                            size=(800, 800), keys='interactive')

        self.program = gloo.Program(vertex, fragment)
        self.program['a_position'] = [(-1., -.5, 0.), (-1., +1.,0.),
                                      (+0.5, -.5, 0.), (+0.5, +1,0.)]
        self.program['a_texcoord'] = [(0., 0.), (0., +1),
                                      (+1., 0.), (+1, +1)]

        self.program2 = gloo.Program(vertex, fragment)
        self.program2['a_position'] = [(-1., -1., 0.), (-1., -0.55,0.),
                                      (+0.5, -1., 0.), (+0.5, -0.55,0.)]
        self.program2['a_texcoord'] = [(0., 0.), (0., +1.),
                                      (+1., 0.), (+1., +1.)]

        self.program3 = gloo.Program(vertex, fragment)
        self.program3['a_position'] = [(0.55, -0.5, 0.), (0.55, +1.,0.),
                                      (+1., -0.5, 0.), (+1., +1.,0.)]
        self.program3['a_texcoord'] = [(0., 0.), (0., +1.),
                                      (+1., 0.), (+1., +1.)]
       
        if os.path.splitext(path)[-1] == '.sima':
            ds = ImagingDataset.load(path)
            self.sequence = ds.__iter__().next()
        else:
            self.sequence = Sequence.create('HDF5',path,'tzyxc')

        self.frame_counter = start
        self.step_size = 1
        self.channel = channel
        self.length = len(self.sequence)

        vol = self.sequence._get_frame(self.frame_counter).astype('float32')
        vol /= NORMING_VAL
        vol = np.clip(vol, 0, 1)

        #surf = np.sum(vol,axis=0)[:,:,channel]/vol.shape[0]
        surf = np.nanmean(vol,axis=0)[:,:,channel]
        self.program['u_texture'] = surf
        
        #surf2 = np.sum(vol,axis=1)[:,:,channel]/vol.shape[1]
        surf2 = np.nanmean(vol,axis=1)[:,:,channel]
        self.program2['u_texture'] = surf2

        #surf3 = np.fliplr((np.sum(vol,axis=2)[:,:,channel]).T)/vol.shape[2]
        surf3 = np.fliplr((np.nanmean(vol,axis=2)[:,:,channel]).T)
        self.program3['u_texture'] = surf3
        self.text = visuals.TextVisual('',font_size=14,color='r',pos=(700, 700))
        self.text.text = "{} / {}".format(self.frame_counter, self.length)
        self.steptext = visuals.TextVisual('step_size: 1',font_size=10,color='r',pos=(700, 725))
        self.tr_sys = visuals.transforms.TransformSystem(self)

        self.timer = app.Timer(0.25, connect=self.on_timer, start=True)
예제 #16
0
 def test_hmm_missing_frame(self):
     global tmp_dir
     frames = Sequence.create('TIFF', example_tiff())
     masked_seq = frames.mask([(5, None, None)])
     with warnings.catch_warnings():
         warnings.simplefilter('ignore')
         corrected = self.hm2d.correct(
             [masked_seq], os.path.join(
                 tmp_dir, 'test_hmm_missing_frame.sima'))
     assert_(all(np.all(np.isfinite(seq.displacements))
                 for seq in corrected))
     assert_(np.prod(corrected.frame_shape) > 0)
예제 #17
0
파일: test__hmm.py 프로젝트: asaich/sima
 def test_hmm_tmp(self):  # TODO: remove when displacements.pkl is updated
     global tmp_dir
     frames = Sequence.create('TIFF', example_tiff())
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=DeprecationWarning)
         corrected = self.hm2d.correct(
             [frames], os.path.join(tmp_dir, 'test_hmm_2.sima'))
     with open(misc.example_data() + '/displacements.pkl', 'rb') as fh:
         displacements = [d.reshape((20, 1, 128, 2))
                          for d in pickle.load(fh)]
     displacements_ = [seq.displacements for seq in corrected]
     assert_(abs(displacements_[0] - displacements[0]).max() <= 1)
예제 #18
0
파일: test__hmm.py 프로젝트: asaich/sima
    def test_hmm(self):
        global tmp_dir

        frames = Sequence.create('TIFF', example_tiff())
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=DeprecationWarning)
            corrected = self.hm2d.correct(
                [frames], os.path.join(tmp_dir, 'test_hmm.sima'))

        with open(misc.example_data() + '/displacements.pkl', 'rb') as fh:
            displacements = [d.reshape((20, 1, 128, 2))
                             for d in pickle.load(fh)]

        displacements_ = [seq.displacements for seq in corrected]
        assert_almost_equal(displacements_, displacements)
예제 #19
0
    def test_hmm(self):
        global tmp_dir

        frames = Sequence.create('TIFF', example_tiff())
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=DeprecationWarning)
            corrected = self.hm2d.correct(
                [frames], os.path.join(tmp_dir, 'test_hmm.sima'))

        with open(misc.example_data() + '/displacements.pkl', 'rb') as fh:
            displacements = [d.reshape((20, 1, 128, 2))
                             for d in pickle.load(fh)]

        displacements_ = [seq.displacements for seq in corrected]
        assert_almost_equal(displacements_, displacements)
예제 #20
0
 def test_hmm_tmp(self):  # TODO: remove when displacements.pkl is updated
     global tmp_dir
     frames = Sequence.create('TIFF', example_tiff())
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=DeprecationWarning)
         corrected = self.hm2d.correct(
             [frames], os.path.join(tmp_dir, 'test_hmm_2.sima'))
     with open(misc.example_data() + '/displacements.pkl', 'rb') as fh:
         displacements = [d.reshape((20, 1, 128, 2))
                          for d in pickle.load(fh)]
     displacements_ = [seq.displacements for seq in corrected]
     diffs = displacements_[0] - displacements[0]
     assert_(
         ((diffs - diffs.mean(axis=2).mean(axis=1).mean(axis=0)) > 1).mean()
         <= 0.001)
예제 #21
0
파일: sara.py 프로젝트: nk53/SARA
    def segment(self, use_settings=False):
        """Performs Spatiotemporal Independent Component Analysis.
    
    Currently only has options to use :class:`sima.segment.STICA`. User is
    prompted for parameters necessary to perform stICA. If *use_settings*
    is True, the settings from :data:`settings_file` are used instead.
    
    Args:
      use_settings (bool, optional): Whether to use the settings stored in
        :data:`settings_file`. If False, user is prompted for settings.
    
    """
        if use_settings:
            components = int(self.settings['components'])
            mu = float(self.settings['mu'])
            overlap_per = float(self.settings['overlap_per'])
        else:
            if self.sequence == None:
                prompt = "File path to the image you want to segment (TIFF only): "
                input_path = self.getTIFF(prompt)
                self.sequence = Sequence.create('TIFF', input_path)
                self.dataset = ImagingDataset([self.sequence], self.sima_dir)
            prompt = "Number of PCA components (default 50): "
            components = self.getNatural(prompt, default=50)
            prompt = "mu (default 0.5): "
            mu = -1.0
            while mu < 0 or mu > 1:
                mu = self.getFloat(prompt, default=0.5)
            prompt = "Minimum overlap " + \
                     "(default 20%; enter 0 to skip): "
            overlap_per = self.getPercent(prompt, default=0.2)
        segment_settings = {
            'components': components,
            'mu': mu,
            'overlap_per': overlap_per,
        }
        print "Performing Spatiotemporal Independent Component Analysis..."
        stdout.flush()
        stica = STICA(**segment_settings)
        stica.append(IdROIs())
        if self.dataset == None:
            self.dataset = ImagingDataset.load(self.sima_dir)
        self.rois = self.dataset.segment(stica, label="stICA ROIs")
        print len(self.dataset.ROIs['stICA ROIs']), "ROIs found"

        if not use_settings:
            segment_settings['segmentation_strategy'] = 'stICA'
            self._updateSettingsFile(segment_settings)
예제 #22
0
def getChannels(directory):
    ds_path = directory.replace(':!', '/')

    if (os.path.splitext(ds_path)[-1] == '.sima'):
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return ''
        channels = ds.channel_names
    else:
        try:
            seq = Sequence.create('HDF5', ds_path, 'tzyxc')
        except IOError:
            return ''
        channels = ['channel_' + str(idx) for idx in range(seq.shape[4])]

    if (len(channels) > 1):
        channels += ['overlay']
    return render_template('select_list.html', options=channels)
예제 #23
0
def getChannels(directory):
    ds_path = directory.replace(":!", "/")

    if os.path.splitext(ds_path)[-1] == ".sima":
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return ""
        channels = ds.channel_names
    else:
        try:
            seq = Sequence.create("HDF5", ds_path, "tzyxc")
        except IOError:
            return ""
        channels = ["channel_" + str(idx) for idx in range(seq.shape[4])]

    if len(channels) > 1:
        channels += ["overlay"]
    return render_template("select_list.html", options=channels)
예제 #24
0
def getInfo():
    ds_path = request.form.get('path')

    if (os.path.splitext(ds_path)[-1] == '.sima'):
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return jsonify(error='dataset not found')

        seq = ds.__iter__().next()
    else:
        try:
            seq = Sequence.create('HDF5', ds_path, 'tzyxc')
        except IOError:
            return jsonify(error='dataset not found')

    length = len(seq)
    norm_factors = {}
    for channel in xrange(seq.shape[4]):
        norm_factors['channel_' + str(channel)] = []

    for frame_index in [0, int(length / 2), -1]:
        frame = seq._get_frame(frame_index)
        for channel in xrange(seq.shape[4]):
            subframe = frame[:, :, :, channel]
            if np.any(np.isfinite(subframe)):
                factor = np.percentile(
                    subframe[np.where(np.isfinite(subframe))], 98)
                if np.isfinite(factor):
                    norm_factors['channel_' + str(channel)] += [factor]

    json = {
        'planes': range(int(seq.shape[1] + 1)),
        'height': int(seq.shape[2]),
        'width': int(seq.shape[3]),
        'length': length
    }

    for channel in norm_factors.keys():
        json[channel] = int(max(1, int(np.nanmean(norm_factors[channel]))))

    return jsonify(**json)
예제 #25
0
def getInfo():
    ds_path = request.form.get("path")

    if os.path.splitext(ds_path)[-1] == ".sima":
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return jsonify(error="dataset not found")

        seq = ds.__iter__().next()
    else:
        try:
            seq = Sequence.create("HDF5", ds_path, "tzyxc")
        except IOError:
            return jsonify(error="dataset not found")

    length = len(seq)
    norm_factors = {}
    for channel in xrange(seq.shape[4]):
        norm_factors["channel_" + str(channel)] = []

    for frame_index in [0, int(length / 2), -1]:
        frame = seq._get_frame(frame_index)
        for channel in xrange(seq.shape[4]):
            subframe = frame[:, :, :, channel]
            if np.any(np.isfinite(subframe)):
                factor = np.percentile(subframe[np.where(np.isfinite(subframe))], 98)
                if np.isfinite(factor):
                    norm_factors["channel_" + str(channel)] += [factor]

    json = {"planes": range(seq.shape[1] + 1), "height": seq.shape[2], "width": seq.shape[3], "max": length}

    for channel in norm_factors.keys():
        json[channel] = max(1, int(np.nanmean(norm_factors[channel])))

    return jsonify(**json)
예제 #26
0
def getFrames():
    ds_path = request.form.get('path')
    requestFrames = request.form.getlist('frames[]', type=int)
    normingVal = request.form.getlist('normingVal[]', type=float)
    sequenceId = request.form.get('sequenceId')
    channel = request.form.get('channel')
    planes = request.form.getlist('planes[]', type=int)
    cycle = request.form.get('cycle', type=int)

    if planes is None:
        planes = [0]

    quality = 40
    if channel == 'overlay':
        channel = None

    ds = None
    if (os.path.splitext(ds_path)[-1] == '.sima'):
        ds = ImagingDataset.load(ds_path)
        seq = ds.sequences[cycle]
        channel = ds._resolve_channel(channel)
    else:
        seq = Sequence.create('HDF5', ds_path, 'tzyxc')
        if channel:
            channel = int(channel.split('_')[-1])

    end = False
    frames = {}
    for frame_number in requestFrames:
        norming_val = normingVal[:]
        if frame_number > len(seq) - 1 or frame_number < -1:
            end = True
            continue
        elif frame_number == -1 and ds is not None:
            try:
                time_averages = pickle.load(
                    open(os.path.join(ds.savedir, 'time_averages.pkl')))
                if not isinstance(time_averages, np.ndarray):
                    raise Exception('no time average')
            except:
                vol = seq._get_frame(0)
            else:
                vol = ds.time_averages
                for ch in xrange(vol.shape[3]):
                    subframe = vol[:, :, :, ch]
                    factor = np.percentile(
                        subframe[np.where(np.isfinite(subframe))], 99)
                    if np.isfinite(factor):
                        norming_val[ch] = factor
        else:
            vol = seq._get_frame(frame_number)

        if channel is not None:
            vol = vol[:, :, :, channel]
            vol /= ((norming_val[channel]) / 255)
            vol = np.clip(vol, 0, 255)
        else:
            vol = np.hstack((vol[:, :, :, 0] / norming_val[0],
                             vol[:, :, :, 1] / norming_val[1]))
            vol *= 255
        frames['frame_' + str(frame_number)] = {}

        for plane in planes:
            if plane == 0:
                zsurf = np.nanmean(vol, axis=0)
            else:
                zsurf = vol[plane - 1, :, :]

            if plane == 0:
                ysurf = np.nanmean(vol, axis=1)
            else:
                ysurf = np.zeros((vol.shape[0], vol.shape[2]))
                ysurf[plane - 1, :] = np.nanmean(zsurf, axis=0)

            if plane == 0:
                xsurf = np.nanmean(vol, axis=2).T
            else:
                xsurf = np.zeros((vol.shape[1], vol.shape[0]))
                xsurf[:, plane - 1] = np.nanmean(zsurf, axis=1).T

            frames['frame_' + str(frame_number)][plane] = {
                'z': convertToB64Jpeg(zsurf.astype('uint8'), quality=quality),
                'y': convertToB64Jpeg(ysurf.astype('uint8'), quality=quality),
                'x': convertToB64Jpeg(xsurf.astype('uint8'), quality=quality)
            }

    return jsonify(end=end, sequenceId=sequenceId, **frames)
예제 #27
0
파일: sara.py 프로젝트: nk53/SARA
    def motionCorrect(self,
                      input_path=None,
                      output_path=None,
                      use_settings=False):
        """Perform motion correction on a recording and export frames.
    
    Uses settings from :data:`mc_radio` or (if *use_settings* is True)
    :data:`settings_file`. 
    
    Args:
      input_path (str, optional) : File path to the image to be corrected.
        If None, user is prompted for location.
      output_path (str, optional): File path to export corrected frames. If
        None, user is prompted for location.
      use_settings (bool, optional): Whether to use the settings stored in
        :data:`settings_file`. If False, user is prompted for settings.
    
    """
        # sima uses the builtin input() function, which is not compatible with IPython
        if isdir(self.sima_dir):
            if ipython_loaded():
                msg = "You cannot perform motion correction using an" + \
                      " existing SIMA analysis directory"
                warnings.warn(msg)
                return  # exit before we screw something up

        if input_path == None:
            # currently only TIFF is supported by SARA
            prompt = "File path to the image you want corrected (TIFF only): "
            input_path = self.getTIFF(prompt)

        if output_path == None:
            prompt = "Where would you like to save the corrected frames? "
            self.corrected_frames = self.reserveFilePath(prompt)
        else:
            self.corrected_frames = output_path

        self.sequence = Sequence.create('TIFF', input_path)
        if use_settings:
            md_x = int(self.settings['max_displacement_x'])
            md_y = int(self.settings['max_displacement_y'])
        else:
            prompt = ["Maximum %s displacement (in pixels; default 100): " \
                       % ax for ax in ['X', 'Y']]
            md_x = self.getNatural(prompt[0], default=100)
            md_y = self.getNatural(prompt[1], default=100)
        mc_settings = {"max_displacement": [md_x, md_y]}

        if use_settings:
            strategy = self.settings['correction_strategy']
            self._motion_correction_map[strategy](mc_settings)
        else:
            # By this time, the user should have selected a strategy
            self.strategy_radio.close()
            self._motion_correction_map[self.strategy_radio.value](mc_settings)

            # export settings we used to settings file
            mc_settings = {
                'uncorrected_image': abspath(input_path),
                'corrected_image': abspath(self.corrected_frames),
                'max_displacement_x': md_x,
                'max_displacement_y': md_y,
                'correction_strategy': self.strategy_radio.value,
            }
            self._updateSettingsFile(mc_settings)
예제 #28
0
파일: convert.py 프로젝트: zhounapeuw/sima
 def parse_sequence(sequence):
     channels = [parse_channel(c) for c in sequence]
     return Sequence.join(*channels)
예제 #29
0
def getFrames():
    ds_path = request.form.get("path")
    requestFrames = request.form.getlist("frames[]", type=int)
    normingVal = request.form.getlist("normingVal[]", type=float)
    sequenceId = request.form.get("sequenceId")
    channel = request.form.get("channel")
    planes = request.form.getlist("planes[]", type=int)
    cycle = request.form.get("cycle", type=int)

    if planes is None:
        planes = [0]

    quality = 40
    if channel == "overlay":
        channel = None

    ds = None
    if os.path.splitext(ds_path)[-1] == ".sima":
        ds = ImagingDataset.load(ds_path)
        seq = ds.sequences[cycle]
        channel = ds._resolve_channel(channel)
    else:
        seq = Sequence.create("HDF5", ds_path, "tzyxc")
        if channel:
            channel = int(channel.split("_")[-1])

    end = False
    frames = {}
    for frame_number in requestFrames:
        norming_val = normingVal[:]
        if frame_number > len(seq) - 1 or frame_number < -1:
            end = True
            continue
        elif frame_number == -1 and ds is not None:
            try:
                time_averages = pickle.load(open(os.path.join(ds.savedir, "time_averages.pkl")))
                if not isinstance(time_averages, np.ndarray):
                    raise Exception("no time average")
            except:
                vol = seq._get_frame(0)
            else:
                vol = ds.time_averages
                for ch in xrange(vol.shape[3]):
                    subframe = vol[:, :, :, ch]
                    factor = np.percentile(subframe[np.where(np.isfinite(subframe))], 99)
                    if np.isfinite(factor):
                        norming_val[ch] = factor
        else:
            vol = seq._get_frame(frame_number)

        if channel is not None:
            vol = vol[:, :, :, channel]
            vol /= (norming_val[channel]) / 255
            vol = np.clip(vol, 0, 255)
        else:
            vol = np.hstack((vol[:, :, :, 0] / norming_val[0], vol[:, :, :, 1] / norming_val[1]))
            vol *= 255
        frames["frame_" + str(frame_number)] = {}

        for plane in planes:
            if plane == 0:
                zsurf = np.nanmean(vol, axis=0)
            else:
                zsurf = vol[plane - 1, :, :]

            if plane == 0:
                ysurf = np.nanmean(vol, axis=1)
            else:
                ysurf = np.zeros((vol.shape[0], vol.shape[2]))
                ysurf[plane - 1, :] = np.nanmean(zsurf, axis=0)

            if plane == 0:
                xsurf = np.nanmean(vol, axis=2).T
            else:
                xsurf = np.zeros((vol.shape[1], vol.shape[0]))
                xsurf[:, plane - 1] = np.nanmean(zsurf, axis=1).T

            frames["frame_" + str(frame_number)][plane] = {
                "z": convertToB64Jpeg(zsurf.astype("uint8"), quality=quality),
                "y": convertToB64Jpeg(ysurf.astype("uint8"), quality=quality),
                "x": convertToB64Jpeg(xsurf.astype("uint8"), quality=quality),
            }

    return jsonify(end=end, sequenceId=sequenceId, **frames)
예제 #30
0
 def setup(self):
     self.strategy = sima.motion.ResonantCorrection(
         sima.motion.HiddenMarkov2D(n_processes=1, verbose=False), 5)
     self.dataset = sima.ImagingDataset(
         [Sequence.create('HDF5', example_volume(), 'tzyxc')], None)
예제 #31
0
 def parse_sequence(sequence):
     channels = [parse_channel(c) for c in sequence]
     return Sequence.join(*channels)
예제 #32
0
import matplotlib.pyplot as plt
import nimfa
import scipy
import scipy.sparse
from nitime import utils 
from nitime import algorithms as alg
from nitime.timeseries import TimeSeries
#
from PIL import Image

# path ='/Users/Chithra/Documents/Columbia/Semester2/Test/z-series-004/z-series-004_Cycle00001_Element00001.h5'
# path ='/Users/Chithra/Documents/Columbia/Semester2/Test/TSeries-08242014-Day1-SessionA-000/TSeries-08242014-Day1-SessionA-000_Cycle00001_Element00001.h5'
path = '/Users/Chithra/Documents/Columbia/Semester2/NMF/ctxA-002/ctxA-002_Cycle00001_Element00001.h5'
# NORMING_VAL = 2194
options={}
sequence = Sequence.create('HDF5',path,'tzxyc')
print sequence.shape
(T, P, R, C, channel) = sequence.shape
# For testing, only use the first 3 
P=1
# R=150
# C=150
T=800
d = R*C #Number of pixels
Y = np.zeros((R,C,T),dtype='float64') #Number of observations
for i in range(0,T):
    vol = sequence._get_frame(i).astype('float32')
    vol = np.nanmean(vol,axis=3)[0,:,:]
    print i
    Y[:,:,i] = vol
예제 #33
0
    def setup(self):
        global tmp_dir

        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.tiff_ds = ImagingDataset(
            [Sequence.create('TIFF', example_tiff(), 1, 1)], self.filepath)
예제 #34
0
    def __init__(self, path, channel=0, start=0):
        app.Canvas.__init__(self,
                            position=(300, 100),
                            size=(800, 800),
                            keys='interactive')

        self.program = gloo.Program(vertex, fragment)
        self.program['a_position'] = [(-1., -.5, 0.), (-1., +1., 0.),
                                      (+0.5, -.5, 0.), (+0.5, +1, 0.)]
        self.program['a_texcoord'] = [(0., 0.), (0., +1), (+1., 0.), (+1, +1)]

        self.program2 = gloo.Program(vertex, fragment)
        self.program2['a_position'] = [(-1., -1., 0.), (-1., -0.55, 0.),
                                       (+0.5, -1., 0.), (+0.5, -0.55, 0.)]
        self.program2['a_texcoord'] = [(0., 0.), (0., +1.), (+1., 0.),
                                       (+1., +1.)]

        self.program3 = gloo.Program(vertex, fragment)
        self.program3['a_position'] = [(0.55, -0.5, 0.), (0.55, +1., 0.),
                                       (+1., -0.5, 0.), (+1., +1., 0.)]
        self.program3['a_texcoord'] = [(0., 0.), (0., +1.), (+1., 0.),
                                       (+1., +1.)]

        if os.path.splitext(path)[-1] == '.sima':
            ds = ImagingDataset.load(path)
            self.sequence = ds.__iter__().next()
        else:
            self.sequence = Sequence.create('HDF5', path, 'tzyxc')

        self.frame_counter = start
        self.step_size = 1
        self.channel = channel
        self.length = len(self.sequence)

        vol = self.sequence._get_frame(self.frame_counter).astype('float32')
        vol /= NORMING_VAL
        vol = np.clip(vol, 0, 1)

        #surf = np.sum(vol,axis=0)[:,:,channel]/vol.shape[0]
        surf = np.nanmean(vol, axis=0)[:, :, channel]
        self.program['u_texture'] = surf

        #surf2 = np.sum(vol,axis=1)[:,:,channel]/vol.shape[1]
        surf2 = np.nanmean(vol, axis=1)[:, :, channel]
        self.program2['u_texture'] = surf2

        #surf3 = np.fliplr((np.sum(vol,axis=2)[:,:,channel]).T)/vol.shape[2]
        surf3 = np.fliplr((np.nanmean(vol, axis=2)[:, :, channel]).T)
        self.program3['u_texture'] = surf3
        self.text = visuals.TextVisual('',
                                       font_size=14,
                                       color='r',
                                       pos=(700, 700))
        self.text.text = "{} / {}".format(self.frame_counter, self.length)
        self.steptext = visuals.TextVisual('step_size: 1',
                                           font_size=10,
                                           color='r',
                                           pos=(700, 725))
        self.tr_sys = visuals.transforms.TransformSystem(self)

        self.timer = app.Timer(0.25, connect=self.on_timer, start=True)