예제 #1
0
파일: test_imaging.py 프로젝트: csn92/sima
    def setup(self):
        global tmp_dir

        path = example_hdf5()
        seq = Sequence.create('HDF5', path, 'yxt')
        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.ds = ImagingDataset([seq, seq], self.filepath)
예제 #2
0
def deleteRoiSet():
    ds_path = request.form.get('path')
    dataset = ImagingDataset.load(ds_path)
    label = request.form.get('label')

    dataset = ImagingDataset.load(ds_path)
    dataset.delete_ROIs(label)

    return jsonify(result='success')
예제 #3
0
    def setup(self):
        global tmp_dir

        path = example_hdf5()
        seq = Sequence.create('HDF5', path, 'yxt')
        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.ds = ImagingDataset([seq, seq], self.filepath)

        self.filepath_tiffs = os.path.join(tmp_dir, "test_dataset_tiffs.sima")
        seq = Sequence.create(
            'TIFFs', [[example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()]])
        self.ds_tiffs = ImagingDataset([seq, seq], self.filepath_tiffs)
예제 #4
0
파일: sara.py 프로젝트: nk53/SARA
    def segment(self, use_settings=False):
        """Performs Spatiotemporal Independent Component Analysis.
    
    Currently only has options to use :class:`sima.segment.STICA`. User is
    prompted for parameters necessary to perform stICA. If *use_settings*
    is True, the settings from :data:`settings_file` are used instead.
    
    Args:
      use_settings (bool, optional): Whether to use the settings stored in
        :data:`settings_file`. If False, user is prompted for settings.
    
    """
        if use_settings:
            components = int(self.settings['components'])
            mu = float(self.settings['mu'])
            overlap_per = float(self.settings['overlap_per'])
        else:
            if self.sequence == None:
                prompt = "File path to the image you want to segment (TIFF only): "
                input_path = self.getTIFF(prompt)
                self.sequence = Sequence.create('TIFF', input_path)
                self.dataset = ImagingDataset([self.sequence], self.sima_dir)
            prompt = "Number of PCA components (default 50): "
            components = self.getNatural(prompt, default=50)
            prompt = "mu (default 0.5): "
            mu = -1.0
            while mu < 0 or mu > 1:
                mu = self.getFloat(prompt, default=0.5)
            prompt = "Minimum overlap " + \
                     "(default 20%; enter 0 to skip): "
            overlap_per = self.getPercent(prompt, default=0.2)
        segment_settings = {
            'components': components,
            'mu': mu,
            'overlap_per': overlap_per,
        }
        print "Performing Spatiotemporal Independent Component Analysis..."
        stdout.flush()
        stica = STICA(**segment_settings)
        stica.append(IdROIs())
        if self.dataset == None:
            self.dataset = ImagingDataset.load(self.sima_dir)
        self.rois = self.dataset.segment(stica, label="stICA ROIs")
        print len(self.dataset.ROIs['stICA ROIs']), "ROIs found"

        if not use_settings:
            segment_settings['segmentation_strategy'] = 'stICA'
            self._updateSettingsFile(segment_settings)
예제 #5
0
def test_STICA():
    ds = ImagingDataset.load(example_data())
    method = segment.STICA(components=5)
    method.append(segment.SparseROIsFromMasks(min_size=50))
    method.append(segment.SmoothROIBoundaries(tolerance=1, min_verts=8))
    method.append(segment.MergeOverlapping(0.5))
    ds.segment(method)
예제 #6
0
def getRoi():
    ds_path = request.form.get('path')
    label = request.form.get('label')
    roi_id = request.form.get('id')

    dataset = ImagingDataset.load(ds_path)
    convertedRois = {}
    try:
        rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'),
                            label=label)
    except:
        return jsonify({})

    for i, roi in enumerate(rois):
        if roi.id == roi_id:
            break

    roi_points = []
    try:
        for i in xrange(roi.im_shape[0]):
            roi_points.append([])
    except:
        for i in xrange(np.max(np.array(roi.coords)[:, :, 2])):
            roi_points.append([])
    for poly in roi.polygons:
        coords = np.array(poly.exterior.coords)
        if np.all(coords[-1] == coords[0]):
            coords = coords[:-1]
        plane = int(coords[0, -1])
        coords = coords[:, :2].astype(int).tolist()
        roi_points[plane].append(coords)

    return jsonify({roi.id: {'label': roi.label, 'points': roi_points}})
예제 #7
0
def test_STICA():
    ds = ImagingDataset.load(example_data())
    method = segment.STICA(components=5)
    method.append(segment.SparseROIsFromMasks(min_size=50))
    method.append(segment.SmoothROIBoundaries(radius=3))
    method.append(segment.MergeOverlapping(0.5))
    ds.segment(method)
예제 #8
0
def deleteRoiSet():
    ds_path = request.form.get("path")
    label = request.form.get("label")

    dataset = ImagingDataset.load(ds_path)
    dataset.delete_ROIs(label)

    return jsonify(result="success")
예제 #9
0
파일: sara.py 프로젝트: nk53/SARA
    def exportSignal(self, outfile=None, use_settings=False):
        """Write ROI signals to a file.
    
    Uses settings from :data:`signal_radio` or (if *use_settings* is True)
    :data:`settings_file`.
    
    Args:
      outfile (str, optional): where to store signal; if None or omitted,
        :meth:`.exportSignal` will prompt the user for a location
      use_settings (bool, optional): Whether to use the settings stored in
        :data:`settings_file`. If False, user is prompted for settings.
    
    """

        frames_per_second = None
        # initialize dataset and rois
        if self.rois == None:
            if self.dataset == None:
                self.dataset = ImagingDataset.load(self.sima_dir)
            self.rois = self.dataset.ROIs['stICA ROIs']
        # prompt user for export path if it hasn't already been provided
        if outfile == None:
            prompt = "File path to export to: "
            outfile = self.reserveFilePath(prompt)
        # get the frames-per-second conversion factor
        if use_settings and self.settings['signals_format'] == 'time':
            frames_per_second = float(self.settings['frames_per_second'])
        elif self.signal_radio.value == 'time':
            prompt = "Please input the recording's capture rate " + \
                     "(frames per second): "
            while frames_per_second <= 0:
                frames_per_second = self.getFloat(prompt)
                prompt = "The number you entered is not a valid capture rate" + \
                         ", please try again: "
            self.signal_radio.close()
        # check if we've already extracted a signal
        if self.dataset.signals() == {}:
            print "Extracting signals from ROIs..."
            stdout.flush()  # force print statement to output to IPython
            self.signal = self.dataset.extract(rois=self.rois, label='signal')
            print "Signals extracted"
        else:
            self.signal = self.dataset.signals()['signal']
        self.dataset.export_signals(outfile)
        # do we need to post-process the CSV?
        if frames_per_second != None:
            self._postProcessSignal(outfile, frames_per_second)

        # update settings file unless it's unnecessary
        if not use_settings:
            signal_settings = {
                'signals_file': abspath(outfile),
                'signals_format': self.signal_radio.value,
                'frames_per_second': frames_per_second,
            }
            self._updateSettingsFile(signal_settings)
        print "Signals Exported to", outfile
예제 #10
0
def locate_datasets(search_directory):
    """Locates all SIMA directories below 'search_directory'"""
    for directory, folders, files in os.walk(search_directory):
        if directory.endswith('.sima'):
            try:
                dataset = ImagingDataset.load(directory)
            except IOError:
                continue
            else:
                yield dataset
예제 #11
0
def locate_datasets(search_directory):
    """Locates all SIMA directories below 'search_directory'"""
    for directory, folders, files in os.walk(search_directory):
        if directory.endswith('.sima'):
            try:
                dataset = ImagingDataset.load(directory)
            except IOError:
                continue
            else:
                yield dataset
예제 #12
0
파일: test_imaging.py 프로젝트: csn92/sima
def test_imaging_dataset_3d():
    global tmp_dir

    path = example_hdf5()
    seq = Sequence.create('HDF5', path, 'yxt')

    filepath = os.path.join(tmp_dir, "test_imaging_dataset_3d.sima")
    ds = ImagingDataset([seq, seq], filepath)
    assert_equal((ds.num_sequences, ) + (ds.num_frames, ) + ds.frame_shape,
                 (2, 40, 1, 128, 256, 1))
예제 #13
0
    def __init__(self, path, channel=0, start=0):
        app.Canvas.__init__(self, position=(300, 100),
                            size=(800, 800), keys='interactive')

        self.program = gloo.Program(vertex, fragment)
        self.program['a_position'] = [(-1., -.5, 0.), (-1., +1.,0.),
                                      (+0.5, -.5, 0.), (+0.5, +1,0.)]
        self.program['a_texcoord'] = [(0., 0.), (0., +1),
                                      (+1., 0.), (+1, +1)]

        self.program2 = gloo.Program(vertex, fragment)
        self.program2['a_position'] = [(-1., -1., 0.), (-1., -0.55,0.),
                                      (+0.5, -1., 0.), (+0.5, -0.55,0.)]
        self.program2['a_texcoord'] = [(0., 0.), (0., +1.),
                                      (+1., 0.), (+1., +1.)]

        self.program3 = gloo.Program(vertex, fragment)
        self.program3['a_position'] = [(0.55, -0.5, 0.), (0.55, +1.,0.),
                                      (+1., -0.5, 0.), (+1., +1.,0.)]
        self.program3['a_texcoord'] = [(0., 0.), (0., +1.),
                                      (+1., 0.), (+1., +1.)]
       
        if os.path.splitext(path)[-1] == '.sima':
            ds = ImagingDataset.load(path)
            self.sequence = ds.__iter__().next()
        else:
            self.sequence = Sequence.create('HDF5',path,'tzyxc')

        self.frame_counter = start
        self.step_size = 1
        self.channel = channel
        self.length = len(self.sequence)

        vol = self.sequence._get_frame(self.frame_counter).astype('float32')
        vol /= NORMING_VAL
        vol = np.clip(vol, 0, 1)

        #surf = np.sum(vol,axis=0)[:,:,channel]/vol.shape[0]
        surf = np.nanmean(vol,axis=0)[:,:,channel]
        self.program['u_texture'] = surf
        
        #surf2 = np.sum(vol,axis=1)[:,:,channel]/vol.shape[1]
        surf2 = np.nanmean(vol,axis=1)[:,:,channel]
        self.program2['u_texture'] = surf2

        #surf3 = np.fliplr((np.sum(vol,axis=2)[:,:,channel]).T)/vol.shape[2]
        surf3 = np.fliplr((np.nanmean(vol,axis=2)[:,:,channel]).T)
        self.program3['u_texture'] = surf3
        self.text = visuals.TextVisual('',font_size=14,color='r',pos=(700, 700))
        self.text.text = "{} / {}".format(self.frame_counter, self.length)
        self.steptext = visuals.TextVisual('step_size: 1',font_size=10,color='r',pos=(700, 725))
        self.tr_sys = visuals.transforms.TransformSystem(self)

        self.timer = app.Timer(0.25, connect=self.on_timer, start=True)
예제 #14
0
def getCycles(directory):
    ds_path = directory.replace(":!", "/")

    if os.path.splitext(ds_path)[-1] == ".sima":
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return ""
        return render_template("select_list.html", options=range(ds.num_sequences))

    return ""
예제 #15
0
def getCycles(directory):
    ds_path = directory.replace(':!', '/')

    if (os.path.splitext(ds_path)[-1] == '.sima'):
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return ''
        return render_template('select_list.html',
                               options=range(ds.num_sequences))

    return ''
예제 #16
0
def setRoiLabel():
    ds_path = request.form.get("path")
    old_label = request.form.get("oldLabel")
    new_label = request.form.get("newLabel")

    dataset = ImagingDataset.load(ds_path)
    dataset.add_ROIs(dataset.ROIs[old_label], label=new_label)

    labels = dataset.ROIs.keys()

    labels.extend(map(os.path.basename, glob.glob(os.path.join(ds_path, "ica*.npz"))))
    labels.extend(map(os.path.basename, glob.glob(os.path.join(ds_path, "opca*.npz"))))

    return render_template("select_list.html", options=[""] + labels)
예제 #17
0
def deleteRoi():
    ds_path = request.form.get('path')
    label = request.form.get('label')
    roi_id = request.form.get('roiId')

    dataset = ImagingDataset.load(ds_path)
    try:
        rois = dataset.ROIs[label]
    except KeyError:
        return jsonify(result='failed to located ROI List')

    rois = filter(lambda r: r.id != roi_id, rois)
    dataset.add_ROIs(ROIList(rois), label=label)

    return jsonify(result='success')
예제 #18
0
def setRoiLabel():
    ds_path = request.form.get('path')
    old_label = request.form.get('oldLabel')
    new_label = request.form.get('newLabel')

    dataset = ImagingDataset.load(ds_path)
    dataset.add_ROIs(dataset.ROIs[old_label], label=new_label)

    labels = dataset.ROIs.keys()

    labels.extend(
        map(os.path.basename, glob.glob(os.path.join(ds_path, 'ica*.npz'))))
    labels.extend(
        map(os.path.basename, glob.glob(os.path.join(ds_path, 'opca*.npz'))))

    return render_template('select_list.html', options=[''] + labels)
예제 #19
0
def getLabels():
    ds_path = request.form.get("path")
    try:
        dataset = ImagingDataset.load(ds_path)
    except:
        return ""

    try:
        with open(os.path.join(dataset.savedir, "rois.pkl"), "rb") as f:
            labels = pickle.load(f).keys()
    except:
        return ""

    labels.extend(map(os.path.basename, glob.glob(os.path.join(ds_path, "ica*.npz"))))
    labels.extend(map(os.path.basename, glob.glob(os.path.join(ds_path, "opca*.npz"))))

    return render_template("select_list.html", options=[""] + labels)
예제 #20
0
def selectRoi():
    ds_path = request.form.get('path')
    label = request.form.get('label')
    plane = float(request.form.get('z'))

    point = Point(float(request.form.get('x')), float(request.form.get('y')))

    dataset = ImagingDataset.load(ds_path)
    rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'), label=label)

    for roi in rois:
        for poly in roi.polygons:
            z_coord = np.array(poly.exterior.coords)[0, 2]
            if z_coord == plane or plane == -1:
                if poly.contains(point):
                    return jsonify(label=roi.label, id=roi.id)

    return jsonify({'error': 'roi not found'})
예제 #21
0
def getChannels(directory):
    ds_path = directory.replace(":!", "/")

    if os.path.splitext(ds_path)[-1] == ".sima":
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return ""
        channels = ds.channel_names
    else:
        try:
            seq = Sequence.create("HDF5", ds_path, "tzyxc")
        except IOError:
            return ""
        channels = ["channel_" + str(idx) for idx in range(seq.shape[4])]

    if len(channels) > 1:
        channels += ["overlay"]
    return render_template("select_list.html", options=channels)
예제 #22
0
def getChannels(directory):
    ds_path = directory.replace(':!', '/')

    if (os.path.splitext(ds_path)[-1] == '.sima'):
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return ''
        channels = ds.channel_names
    else:
        try:
            seq = Sequence.create('HDF5', ds_path, 'tzyxc')
        except IOError:
            return ''
        channels = ['channel_' + str(idx) for idx in range(seq.shape[4])]

    if (len(channels) > 1):
        channels += ['overlay']
    return render_template('select_list.html', options=channels)
예제 #23
0
def getLabels():
    ds_path = request.form.get('path')
    try:
        dataset = ImagingDataset.load(ds_path)
    except:
        return jsonify({'labels': []})

    try:
        with open(os.path.join(dataset.savedir, 'rois.pkl'), 'rb') as f:
            labels = pickle.load(f).keys()
    except:
        #return ''
        return jsonify({'labels': []})

    labels.extend(
        map(os.path.basename, glob.glob(os.path.join(ds_path, 'opca*.npz'))))

    #return render_template('select_list.html',options=['']+labels)
    return jsonify({'labels': labels})
예제 #24
0
def getInfo():
    ds_path = request.form.get('path')

    if (os.path.splitext(ds_path)[-1] == '.sima'):
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return jsonify(error='dataset not found')

        seq = ds.__iter__().next()
    else:
        try:
            seq = Sequence.create('HDF5', ds_path, 'tzyxc')
        except IOError:
            return jsonify(error='dataset not found')

    length = len(seq)
    norm_factors = {}
    for channel in xrange(seq.shape[4]):
        norm_factors['channel_' + str(channel)] = []

    for frame_index in [0, int(length / 2), -1]:
        frame = seq._get_frame(frame_index)
        for channel in xrange(seq.shape[4]):
            subframe = frame[:, :, :, channel]
            if np.any(np.isfinite(subframe)):
                factor = np.percentile(
                    subframe[np.where(np.isfinite(subframe))], 98)
                if np.isfinite(factor):
                    norm_factors['channel_' + str(channel)] += [factor]

    json = {
        'planes': range(int(seq.shape[1] + 1)),
        'height': int(seq.shape[2]),
        'width': int(seq.shape[3]),
        'length': length
    }

    for channel in norm_factors.keys():
        json[channel] = int(max(1, int(np.nanmean(norm_factors[channel]))))

    return jsonify(**json)
예제 #25
0
def getRois():
    ds_path = request.form.get('path')
    label = request.form.get('label')

    dataset = ImagingDataset.load(ds_path)
    convertedRois = {}
    rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'), label=label)

    for i, roi in enumerate(rois):
        if roi.label is None:
            roi.label = i
        convertedRois[roi.label] = {}
        for poly in roi.polygons:
            coords = np.array(poly.exterior.coords)
            plane = int(coords[0, -1])
            #coords = list(coords[:,:2].ravel())
            coords = coords[:, :2].tolist()
            try:
                convertedRois[roi.label][plane].append(coords)
            except KeyError:
                convertedRois[roi.label][plane] = [coords]

    return jsonify(**convertedRois)
예제 #26
0
def getRois():
    ds_path = request.form.get("path")
    label = request.form.get("label")

    dataset = ImagingDataset.load(ds_path)
    convertedRois = {}
    rois = ROIList.load(os.path.join(dataset.savedir, "rois.pkl"), label=label)

    for i, roi in enumerate(rois):
        if roi.label is None:
            roi.label = i
        convertedRois[roi.label] = {}
        for poly in roi.polygons:
            coords = np.array(poly.exterior.coords)
            plane = int(coords[0, -1])
            # coords = list(coords[:,:2].ravel())
            coords = coords[:, :2].tolist()
            try:
                convertedRois[roi.label][plane].append(coords)
            except KeyError:
                convertedRois[roi.label][plane] = [coords]

    return jsonify(**convertedRois)
예제 #27
0
def updateRoi():
    ds_path = request.form.get('path')
    label = request.form.get('label')
    points = json.loads(request.form.get('points'))
    roi_label = request.form.get('roiLabel')
    roi_id = request.form.get('roiId')

    dataset = ImagingDataset.load(ds_path)
    roi_data = []
    for i, plane in enumerate(points):
        if plane is None or not len(plane):
            continue
        array_dat = np.array(plane)
        z_dims = i * np.ones((array_dat.shape[:2] + (1, )))
        plane_data = np.concatenate((array_dat, z_dims), axis=2)
        roi_data.extend(list(plane_data))

    if len(roi_data) == 0:
        return jsonify(result="no polygons to save")

    for poly in roi_data:
        if poly.shape[0] < 3:
            raise Exception("unable to store polygon with less then 3 points")
    roi = ROI(polygons=roi_data, im_shape=dataset.frame_shape[:3])

    roi.label = roi_label
    roi.id = roi_id
    try:
        rois = dataset.ROIs[label]
    except KeyError:
        rois = []

    rois = filter(lambda r: r.id != roi_id, rois)
    rois.append(roi)
    dataset.add_ROIs(ROIList(rois), label=label)

    return jsonify(result='success')
예제 #28
0
def getInfo():
    ds_path = request.form.get("path")

    if os.path.splitext(ds_path)[-1] == ".sima":
        try:
            ds = ImagingDataset.load(ds_path)
        except IOError:
            return jsonify(error="dataset not found")

        seq = ds.__iter__().next()
    else:
        try:
            seq = Sequence.create("HDF5", ds_path, "tzyxc")
        except IOError:
            return jsonify(error="dataset not found")

    length = len(seq)
    norm_factors = {}
    for channel in xrange(seq.shape[4]):
        norm_factors["channel_" + str(channel)] = []

    for frame_index in [0, int(length / 2), -1]:
        frame = seq._get_frame(frame_index)
        for channel in xrange(seq.shape[4]):
            subframe = frame[:, :, :, channel]
            if np.any(np.isfinite(subframe)):
                factor = np.percentile(subframe[np.where(np.isfinite(subframe))], 98)
                if np.isfinite(factor):
                    norm_factors["channel_" + str(channel)] += [factor]

    json = {"planes": range(seq.shape[1] + 1), "height": seq.shape[2], "width": seq.shape[3], "max": length}

    for channel in norm_factors.keys():
        json[channel] = max(1, int(np.nanmean(norm_factors[channel])))

    return jsonify(**json)
예제 #29
0
def setRoiLabel():
    ds_path = request.form.get('path')
    #old_label = request.form.get('oldLabel')
    old_label = ''
    new_label = request.form.get('newLabel')

    if new_label == '':
        new_label = 'rois'

    dataset = ImagingDataset.load(ds_path)
    if (old_label != ''):
        rois = dataset.ROIs[old_label]
    else:
        rois = ROIList([])
    dataset.add_ROIs(rois, label=new_label)

    labels = dataset.ROIs.keys()

    labels.extend(
        map(os.path.basename, glob.glob(os.path.join(ds_path, 'ica*.npz'))))
    labels.extend(
        map(os.path.basename, glob.glob(os.path.join(ds_path, 'opca*.npz'))))

    return jsonify({'labels': labels})
예제 #30
0
def test_PlaneNormalizedCuts():
    ds = ImagingDataset.load(example_data())[:, :, :, :50, :50]
    affinty_method = segment.BasicAffinityMatrix(num_pcs=5)
    method = segment.PlaneWiseSegmentation(
        segment.PlaneNormalizedCuts(affinty_method))
    ds.segment(method)
예제 #31
0
def getRoiList():
    ds_path = request.form.get("path")
    label = request.form.get("label")

    dataset = ImagingDataset.load(ds_path)
    rois = dataset.ROIs[label]
예제 #32
0
파일: sara.py 프로젝트: nk53/SARA
    def _plotROIs(self,
                  save_to=None,
                  warn=False,
                  draw=False,
                  fig=None,
                  ax=None,
                  lines={},
                  ax_image=None,
                  bleft=None,
                  bright=None):
        """Plots ROIs against a background image with an applied rotation/flip
    
    Flipping is always performed first, then rotation. Rotation is in
    degrees clockwise, and must be a multiple of 90.
    
    """
        def transform_generator(t, args):
            """Returns a callback function to perform a transformation"""
            def transform(event):
                if t == 'left':
                    self._rotation -= 90
                elif t == 'right':
                    self._rotation += 90
                elif t == 'hflip':
                    self._hflip = not self._hflip
                elif t == 'vflip':
                    self._vflip = not self._vflip
                else:
                    assert None, "Incorrect transformation: {0}".format(t)
                self._plotROIs(**args)

            return transform

        if not draw:
            fig, ax = plt.subplots()
            plt.subplots_adjust(bottom=0.2)

        # make rotation fall in the range: [0, 360)
        while self._rotation < 0:
            self._rotation += 360
        while self._rotation >= 360:
            self._rotation -= 360

        # get list of ROIs
        if self.dataset == None:
            self.dataset = ImagingDataset.load(self.sima_dir)
        if self.rois == None:
            self.rois = self.dataset.ROIs['stICA ROIs']

        # prepare background image
        # TODO: does this step work for multi-channel inputs?
        imdata = self.dataset.time_averages[0, ..., -1]
        # Perform flips
        if self._hflip:
            imdata = fliplr(imdata)
        if self._vflip:
            imdata = flipud(imdata)
        # Perform rotation
        if self._rotation:
            k = self._rotation / 90
            imdata = rot90(imdata, k)

        #image_width, image_height = image.size
        image_width, image_height = imdata.shape
        ax.set_xlim(xmin=0, xmax=image_width)
        ax.set_ylim(ymin=0, ymax=image_height)
        if draw:
            ax_image.set_data(imdata)
            ax_image.set_cmap('gray')

        else:
            ax_image = ax.imshow(imdata, cmap='gray')

        # plot all of the ROIs, warn user if an ROI has internal loops
        for roi in self.rois:
            coords = roi.coords
            rid = roi.id
            if warn and len(coords) > 1:
                print "Warning: Roi%s has >1 coordinate set" % rid
            x = coords[0][:, 0]
            y = coords[0][:, 1]
            # transform x and y
            x, y = self._rotateFlipXY(x, y, image_height, image_width,
                                      self._rotation, self._hflip, self._vflip)
            if save_to == None:
                if draw:
                    lines[rid].set_data(x, y)
                else:
                    lines[rid], = plt.plot(x,
                                           y,
                                           picker=line_picker_generator(rid))
            else:
                plt.plot(x, y)

        # build options for callback
        args = {
            'save_to': save_to,
            'warn': warn,
            'draw': True,
            'fig': fig,
            'ax': ax,
            'lines': lines,
            'ax_image': ax_image,
            'bleft': bleft,
            'bright': bright,
        }

        # create buttons
        if not draw:
            axhflip = plt.axes([0.15, 0.05, 0.17, 0.075])
            axvflip = plt.axes([0.33, 0.05, 0.17, 0.075])
            axleft = plt.axes([0.51, 0.05, 0.17, 0.075])
            axright = plt.axes([0.69, 0.05, 0.17, 0.075])
            bhflip = Button(axhflip, 'Flip Horizontally')
            bvflip = Button(axvflip, 'Flip Vertically')
            bleft = Button(axleft, 'Rotate Left')
            bright = Button(axright, 'Rotate Right')
            # click handlers
            bhflip.on_clicked(transform_generator('hflip', args))
            bvflip.on_clicked(transform_generator('vflip', args))
            bleft.on_clicked(transform_generator('left', args))
            bright.on_clicked(transform_generator('right', args))

        if save_to != None:
            plt.savefig(save_to)
        else:
            if draw:
                plt.draw()
            else:
                plt.gcf().canvas.mpl_connect('pick_event', onpick)
                plt.show()
예제 #33
0
파일: convert.py 프로젝트: zhounapeuw/sima
def _load_version0(path):
    """Returns a v1 dataset converted from a v0 dataset

    Parameters
    ----------
    path : str
        The path (ending in .sima) of the version 0.x dataset.

    Examples
    --------

    >>> from sima.misc import example_data
    >>> from sima.misc.convert import _load_version0
    >>> ds = _load_version0(example_data())
    """
    def parse_channel(channel):
        """Parse an old format channel stored a dictionary

        Parameters
        ----------
        channel : dict

        Returns
        -------
        result : sima.Sequence
            A sequence equivalent to the old format channel.
        """
        _resolve_paths(channel, path)
        klass = channel.pop('__class__')
        if klass == 'sima.iterables.MultiPageTIFF':
            result = Sequence.create('TIFF', channel['path'])
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip)
                result = result[s]
        elif klass == 'sima.iterables.HDF5':
            result = Sequence.create('HDF5', channel['path'],
                                     channel['dim_order'], channel['group'],
                                     channel['key'])
            c = channel['dim_order'].index('c')
            chan = channel['channel']
            s = tuple([
                slice(None) if x != c else slice(chan, chan + 1)
                for x in range(len(channel['dim_order']))
            ])
            result = result[s]
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip) + (slice(None), )
                result = result[s]
        else:
            raise Exception('Format not recognized.')
        return result

    def parse_sequence(sequence):
        channels = [parse_channel(c) for c in sequence]
        return Sequence.join(*channels)

    with open(os.path.join(path, 'dataset.pkl'), 'rb') as f:
        unpickler = Unpickler(f)
        dataset_dict = unpickler.load()
    iterables = dataset_dict.pop('iterables')
    sequences = [parse_sequence(seq) for seq in iterables]

    # Apply displacements if they exist
    try:
        with open(os.path.join(path, 'displacements.pkl'), 'rb') as f:
            displacements = pkl.load(f)
    except IOError:
        pass
    else:
        assert all(np.all(d >= 0) for d in displacements)
        max_disp = np.nanmax(
            [np.nanmax(d.reshape(-1, d.shape[-1]), 0) for d in displacements],
            0)
        frame_shape = np.array(sequences[0].shape)[1:-1]  # z, y, x
        frame_shape[1:3] += max_disp
        sequences = [
            s.apply_displacements(d.reshape(s.shape[:3] + (2, )), frame_shape)
            for s, d in zip(sequences, displacements)
        ]
        try:
            trim_coords = dataset_dict.pop('_lazy__trim_coords')
        except KeyError:
            try:
                trim_criterion = dataset_dict.pop('trim_criterion')
            except KeyError:
                pass
            else:
                raise Exception('Parsing of trim_criterion ' +
                                str(trim_criterion) + ' not yet implemented')
        else:
            sequences = [
                s[:, :, trim_coords[0][0]:trim_coords[1][0],
                  trim_coords[0][1]:trim_coords[1][1]] for s in sequences
            ]
    ds = ImagingDataset(sequences, None)
    # Not making it read-only. If you set a savedir, you'll be asked about
    # overwriting it then
    ds._channel_names = [str(n) for n in dataset_dict.pop('channel_names')]
    ds._savedir = path
    return ds
예제 #34
0
파일: test_imaging.py 프로젝트: csn92/sima
class TestImagingDataset(object):
    def setup(self):
        global tmp_dir

        path = example_hdf5()
        seq = Sequence.create('HDF5', path, 'yxt')
        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.ds = ImagingDataset([seq, seq], self.filepath)

    def teardown(self):
        shutil.rmtree(self.filepath)

    def test_time_averages(self):
        averages = self.ds.time_averages
        assert_equal(self.ds.frame_shape, averages.shape)
        # Check it twice, since second time should load from a saved pkl
        averages2 = self.ds.time_averages
        assert_equal(self.ds.frame_shape, averages2.shape)

    def test_export_averages_tiff16(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg_Ch2.tif')
        self.ds.export_averages([time_avg_path],
                                fmt='TIFF16',
                                scale_values=False)
        assert_equal(self.ds.time_averages[0, ..., 0].astype('uint16'),
                     np.array(Image.open(time_avg_path)))

    def test_export_averages_tiff8(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg_Ch2.tif')
        self.ds.export_averages([time_avg_path],
                                fmt='TIFF8',
                                scale_values=False)
        assert_equal(self.ds.time_averages[0, ..., 0].astype('uint8'),
                     np.array(Image.open(time_avg_path)))

    def test_export_averages_hdf5(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg.h5')
        self.ds.export_averages(time_avg_path, fmt='HDF5', scale_values=False)

        h5_time_avg = h5py.File(time_avg_path, 'r')['time_average']
        assert_equal(self.ds.time_averages.astype('uint16'), h5_time_avg)
        assert_equal(self.ds.channel_names, h5_time_avg.attrs['channel_names'])
        dim_labels = [dim.label for dim in h5_time_avg.dims]
        assert_equal(['z', 'y', 'x', 'c'], dim_labels)

    def test_add_and_delete_rois(self):
        rois = ROI.ROIList.load(example_imagej_rois(), fmt='ImageJ')
        self.ds.add_ROIs(rois, 'rois')
        assert_equal(len(self.ds.ROIs), 1)

        self.ds.add_ROIs(rois, 'rois2')
        assert_equal(len(self.ds.ROIs), 2)

        assert_equal(sorted(self.ds.ROIs.keys()), ['rois', 'rois2'])
        assert_equal(len(self.ds.ROIs['rois']), 2)

        # This should quietly do nothing
        self.ds.delete_ROIs('foo')

        self.ds.delete_ROIs('rois')
        assert_equal(len(self.ds.ROIs), 1)
        self.ds.delete_ROIs('rois2')
        assert_equal(len(self.ds.ROIs), 0)

        # This should quietly do nothing
        self.ds.delete_ROIs('foo')

    def test_rois(self):
        assert_equal(len(self.ds.ROIs), 0)
예제 #35
0
 def load_saved_tiffs_dataset(self):
     tiff_ds = ImagingDataset.load(self.filepath_tiffs)
     assert_equal(tiff_ds.sequences[0].shape, (3, 4, 173, 173, 2))
예제 #36
0
def getFrames():
    ds_path = request.form.get('path')
    requestFrames = request.form.getlist('frames[]', type=int)
    normingVal = request.form.getlist('normingVal[]', type=float)
    sequenceId = request.form.get('sequenceId')
    channel = request.form.get('channel')
    planes = request.form.getlist('planes[]', type=int)
    cycle = request.form.get('cycle', type=int)

    if planes is None:
        planes = [0]

    quality = 40
    if channel == 'overlay':
        channel = None

    ds = None
    if (os.path.splitext(ds_path)[-1] == '.sima'):
        ds = ImagingDataset.load(ds_path)
        seq = ds.sequences[cycle]
        channel = ds._resolve_channel(channel)
    else:
        seq = Sequence.create('HDF5', ds_path, 'tzyxc')
        if channel:
            channel = int(channel.split('_')[-1])

    end = False
    frames = {}
    for frame_number in requestFrames:
        norming_val = normingVal[:]
        if frame_number > len(seq) - 1 or frame_number < -1:
            end = True
            continue
        elif frame_number == -1 and ds is not None:
            try:
                time_averages = pickle.load(
                    open(os.path.join(ds.savedir, 'time_averages.pkl')))
                if not isinstance(time_averages, np.ndarray):
                    raise Exception('no time average')
            except:
                vol = seq._get_frame(0)
            else:
                vol = ds.time_averages
                for ch in xrange(vol.shape[3]):
                    subframe = vol[:, :, :, ch]
                    factor = np.percentile(
                        subframe[np.where(np.isfinite(subframe))], 99)
                    if np.isfinite(factor):
                        norming_val[ch] = factor
        else:
            vol = seq._get_frame(frame_number)

        if channel is not None:
            vol = vol[:, :, :, channel]
            vol /= ((norming_val[channel]) / 255)
            vol = np.clip(vol, 0, 255)
        else:
            vol = np.hstack((vol[:, :, :, 0] / norming_val[0],
                             vol[:, :, :, 1] / norming_val[1]))
            vol *= 255
        frames['frame_' + str(frame_number)] = {}

        for plane in planes:
            if plane == 0:
                zsurf = np.nanmean(vol, axis=0)
            else:
                zsurf = vol[plane - 1, :, :]

            if plane == 0:
                ysurf = np.nanmean(vol, axis=1)
            else:
                ysurf = np.zeros((vol.shape[0], vol.shape[2]))
                ysurf[plane - 1, :] = np.nanmean(zsurf, axis=0)

            if plane == 0:
                xsurf = np.nanmean(vol, axis=2).T
            else:
                xsurf = np.zeros((vol.shape[1], vol.shape[0]))
                xsurf[:, plane - 1] = np.nanmean(zsurf, axis=1).T

            frames['frame_' + str(frame_number)][plane] = {
                'z': convertToB64Jpeg(zsurf.astype('uint8'), quality=quality),
                'y': convertToB64Jpeg(ysurf.astype('uint8'), quality=quality),
                'x': convertToB64Jpeg(xsurf.astype('uint8'), quality=quality)
            }

    return jsonify(end=end, sequenceId=sequenceId, **frames)
예제 #37
0
def test_PlaneCA1PC():
    ds = ImagingDataset.load(example_data())[:, :, :, :50, :50]
    method = segment.PlaneCA1PC(num_pcs=5)
    ds.segment(method)
예제 #38
0
def _load_version0(path):
    """Returns a v1 dataset converted from a v0 dataset

    Parameters
    ----------
    path : str
        The path (ending in .sima) of the version 0.x dataset.

    Examples
    --------

    >>> from sima.misc import example_data
    >>> from sima.misc.convert import _load_version0
    >>> ds = _load_version0(example_data())
    """

    def parse_channel(channel):
        """Parse an old format channel stored a dictionary

        Parameters
        ----------
        channel : dict

        Returns
        -------
        result : sima.Sequence
            A sequence equivalent to the old format channel.
        """
        _resolve_paths(channel, path)
        klass = channel.pop('__class__')
        if klass == 'sima.iterables.MultiPageTIFF':
            result = Sequence.create('TIFF', channel['path'])
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip)
                result = result[s]
        elif klass == 'sima.iterables.HDF5':
            result = Sequence.create(
                'HDF5', channel['path'], channel['dim_order'],
                channel['group'], channel['key'])
            c = channel['dim_order'].index('c')
            chan = channel['channel']
            s = tuple([slice(None) if x != c else slice(chan, chan + 1)
                       for x in range(len(channel['dim_order']))])
            result = result[s]
            try:
                clip = channel['clip']
            except KeyError:
                clip = None
            if clip is not None:
                s = (slice(None), slice(None)) + tuple(
                    slice(*[None if x is 0 else x for x in dim])
                    for dim in clip) + (slice(None),)
                result = result[s]
        else:
            raise Exception('Format not recognized.')
        return result

    def parse_sequence(sequence):
        channels = [parse_channel(c) for c in sequence]
        return Sequence.join(*channels)

    with open(os.path.join(path, 'dataset.pkl'), 'rb') as f:
        unpickler = Unpickler(f)
        dataset_dict = unpickler.load()
    iterables = dataset_dict.pop('iterables')
    sequences = [parse_sequence(seq) for seq in iterables]

    # Apply displacements if they exist
    try:
        with open(os.path.join(path, 'displacements.pkl'), 'rb') as f:
            displacements = pkl.load(f)
    except IOError:
        pass
    else:
        assert all(np.all(d >= 0) for d in displacements)
        max_disp = np.nanmax([np.nanmax(d.reshape(-1, d.shape[-1]), 0)
                              for d in displacements], 0)
        frame_shape = np.array(sequences[0].shape)[1:-1]  # z, y, x
        frame_shape[1:3] += max_disp
        sequences = [
            s.apply_displacements(d.reshape(s.shape[:3] + (2,)), frame_shape)
            for s, d in zip(sequences, displacements)]
        try:
            trim_coords = dataset_dict.pop('_lazy__trim_coords')
        except KeyError:
            try:
                trim_criterion = dataset_dict.pop('trim_criterion')
            except KeyError:
                pass
            else:
                raise Exception(
                    'Parsing of trim_criterion ' + str(trim_criterion) +
                    ' not yet implemented')
        else:
            sequences = [s[:, :, trim_coords[0][0]:trim_coords[1][0],
                           trim_coords[0][1]:trim_coords[1][1]]
                         for s in sequences]
    ds = ImagingDataset(sequences, None)
    # Not making it read-only. If you set a savedir, you'll be asked about
    # overwriting it then
    ds._channel_names = [str(n) for n in dataset_dict.pop('channel_names')]
    ds._savedir = path
    return ds
예제 #39
0
    def setup(self):
        global tmp_dir

        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.tiff_ds = ImagingDataset(
            [Sequence.create('TIFF', example_tiff(), 1, 1)], self.filepath)
예제 #40
0
class TestImagingDataset(object):

    def setup(self):
        global tmp_dir

        path = example_hdf5()
        seq = Sequence.create('HDF5', path, 'yxt')
        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.ds = ImagingDataset([seq, seq], self.filepath)
        self.rois = ROI.ROIList.load(example_imagej_rois(), fmt='ImageJ')

        self.filepath_tiffs = os.path.join(tmp_dir, "test_dataset_tiffs.sima")
        seq = Sequence.create(
            'TIFFs', [[example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()]])
        self.ds_tiffs = ImagingDataset([seq, seq], self.filepath_tiffs)

    def teardown(self):
        shutil.rmtree(self.filepath)
        shutil.rmtree(self.filepath_tiffs)

    def load_saved_tiffs_dataset(self):
        tiff_ds = ImagingDataset.load(self.filepath_tiffs)
        assert_equal(tiff_ds.sequences[0].shape, (3, 4, 173, 173, 2))

    def test_time_averages(self):
        averages = self.ds.time_averages
        assert_equal(self.ds.frame_shape, averages.shape)
        # Check it twice, since second time should load from a saved pkl
        averages2 = self.ds.time_averages
        assert_equal(self.ds.frame_shape, averages2.shape)

    def test_export_averages_tiff16(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg_Ch2.tif')
        self.ds.export_averages(
            [time_avg_path], fmt='TIFF16', scale_values=False)
        assert_equal(self.ds.time_averages[0, ..., 0].astype('uint16'),
                     np.array(Image.open(time_avg_path)))

    def test_export_averages_tiff8(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg_Ch2.tif')
        self.ds.export_averages(
            [time_avg_path], fmt='TIFF8', scale_values=False)
        assert_equal(self.ds.time_averages[0, ..., 0].astype('uint8'),
                     np.array(Image.open(time_avg_path)))

    def test_export_averages_hdf5(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg.h5')
        self.ds.export_averages(time_avg_path, fmt='HDF5', scale_values=False)

        h5_time_avg = h5py.File(time_avg_path, 'r')['time_average']
        assert_equal(self.ds.time_averages.astype('uint16'), h5_time_avg)
        assert_equal(np.string_(self.ds.channel_names),
                     np.string_(h5_time_avg.attrs['channel_names']))
        dim_labels = [dim.label for dim in h5_time_avg.dims]
        assert_equal(['z', 'y', 'x', 'c'], dim_labels)

    def test_add_and_delete_rois(self):
        self.ds.add_ROIs(self.rois, 'rois')
        assert_equal(len(self.ds.ROIs), 1)

        self.ds.add_ROIs(self.rois, 'rois2')
        assert_equal(len(self.ds.ROIs), 2)

        assert_equal(sorted(self.ds.ROIs.keys()), ['rois', 'rois2'])
        assert_equal(len(self.ds.ROIs['rois']), 2)

        # This should quietly do nothing
        self.ds.delete_ROIs('foo')

        self.ds.delete_ROIs('rois')
        assert_equal(len(self.ds.ROIs), 1)
        self.ds.delete_ROIs('rois2')
        assert_equal(len(self.ds.ROIs), 0)

        # This should quietly do nothing
        self.ds.delete_ROIs('foo')

    def test_rois(self):
        assert_equal(len(self.ds.ROIs), 0)

    def test_extract(self):
        extracted = self.ds.extract(self.rois, label='rois')

        assert_equal(len(self.ds.signals()), 1)
        assert_equal(extracted['raw'], self.ds.signals()['rois']['raw'])
        assert_equal(len(extracted['raw']), 2)
        assert_equal(len(extracted['raw'][0]), 2)

    @dec.skipif(not _has_picos)
    def test_infer_spikes(self):
        self.ds.extract(self.rois, label='rois')
        spikes, fits, parameters = self.ds.infer_spikes()
        signals = self.ds.signals()['rois']

        assert_equal(signals['spikes'], spikes)
        assert_equal(signals['spikes_fits'], fits)
        # assert_equal(signals['spikes_params'], parameters)

        assert_equal(len(spikes), 2)
        assert_equal(len(fits), 2)
        assert_equal(len(parameters), 2)

        assert_equal(spikes[0].shape, (2, 20))
        assert_equal(fits[0].shape, (2, 20))
예제 #41
0
def test_PlaneCA1PC():
    ds = ImagingDataset.load(example_data())[:, :, :, :50, :50]
    method = segment.PlaneCA1PC(num_pcs=5)
    ds.segment(method)
예제 #42
0
class TestImagingDataset(object):
    def setup(self):
        global tmp_dir

        path = example_hdf5()
        seq = Sequence.create('HDF5', path, 'yxt')
        self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
        self.ds = ImagingDataset([seq, seq], self.filepath)
        self.rois = ROI.ROIList.load(example_imagej_rois(), fmt='ImageJ')

        self.filepath_tiffs = os.path.join(tmp_dir, "test_dataset_tiffs.sima")
        seq = Sequence.create(
            'TIFFs', [[example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()],
                      [example_tiffs(), example_tiffs()]])
        self.ds_tiffs = ImagingDataset([seq, seq], self.filepath_tiffs)

    def teardown(self):
        shutil.rmtree(self.filepath)
        shutil.rmtree(self.filepath_tiffs)

    def load_saved_tiffs_dataset(self):
        tiff_ds = ImagingDataset.load(self.filepath_tiffs)
        assert_equal(tiff_ds.sequences[0].shape, (3, 4, 173, 173, 2))

    def test_time_averages(self):
        averages = self.ds.time_averages
        assert_equal(self.ds.frame_shape, averages.shape)
        # Check it twice, since second time should load from a saved pkl
        averages2 = self.ds.time_averages
        assert_equal(self.ds.frame_shape, averages2.shape)

    def test_time_std(self):
        std = self.ds.time_std
        assert_equal(self.ds.frame_shape, std.shape)
        # Check it twice, since second time should load from a saved pkl
        std2 = self.ds.time_std
        assert_equal(self.ds.frame_shape, std2.shape)

    def test_time_kurtosis(self):
        kurtosis = self.ds.time_kurtosis
        assert_equal(self.ds.frame_shape, kurtosis.shape)
        # Check it twice, since second time should load from a saved pkl
        kurtosis2 = self.ds.time_kurtosis
        assert_equal(self.ds.frame_shape, kurtosis2.shape)

    def test_export_averages_tiff16(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg_Ch2.tif')
        self.ds.export_averages([time_avg_path],
                                fmt='TIFF16',
                                scale_values=False)
        assert_equal(self.ds.time_averages[0, ..., 0].astype('uint16'),
                     np.array(Image.open(time_avg_path)))

    def test_export_averages_tiff8(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg_Ch2.tif')
        self.ds.export_averages([time_avg_path],
                                fmt='TIFF8',
                                scale_values=False)
        assert_equal(self.ds.time_averages[0, ..., 0].astype('uint8'),
                     np.array(Image.open(time_avg_path)))

    def test_export_averages_hdf5(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg.h5')
        self.ds.export_averages(time_avg_path, fmt='HDF5', scale_values=False)

        h5_time_avg = h5py.File(time_avg_path, 'r')['time_average']
        assert_equal(self.ds.time_averages.astype('uint16'), h5_time_avg)
        assert_equal(np.string_(self.ds.channel_names),
                     np.string_(h5_time_avg.attrs['channel_names']))
        dim_labels = [dim.label for dim in h5_time_avg.dims]
        assert_equal(['z', 'y', 'x', 'c'], dim_labels)

    def test_add_and_delete_rois(self):
        self.ds.add_ROIs(self.rois, 'rois')
        assert_equal(len(self.ds.ROIs), 1)

        self.ds.add_ROIs(self.rois, 'rois2')
        assert_equal(len(self.ds.ROIs), 2)

        assert_equal(sorted(self.ds.ROIs.keys()), ['rois', 'rois2'])
        assert_equal(len(self.ds.ROIs['rois']), 2)

        # This should quietly do nothing
        self.ds.delete_ROIs('foo')

        self.ds.delete_ROIs('rois')
        assert_equal(len(self.ds.ROIs), 1)
        self.ds.delete_ROIs('rois2')
        assert_equal(len(self.ds.ROIs), 0)

        # This should quietly do nothing
        self.ds.delete_ROIs('foo')

    def test_rois(self):
        assert_equal(len(self.ds.ROIs), 0)

    def test_extract(self):
        extracted = self.ds.extract(self.rois, label='rois')

        assert_equal(len(self.ds.signals()), 1)
        assert_equal(extracted['raw'], self.ds.signals()['rois']['raw'])
        assert_equal(len(extracted['raw']), 2)
        assert_equal(len(extracted['raw'][0]), 2)

    # @dec.skipif(not _has_picos)
    @dec.knownfailureif(True)  # infer_spikes is crashing w/o mosek
    def test_infer_spikes(self):
        self.ds.extract(self.rois, label='rois')
        spikes, fits, parameters = self.ds.infer_spikes()
        signals = self.ds.signals()['rois']

        assert_equal(signals['spikes'], spikes)
        assert_equal(signals['spikes_fits'], fits)
        # assert_equal(signals['spikes_params'], parameters)

        assert_equal(len(spikes), 2)
        assert_equal(len(fits), 2)
        assert_equal(len(parameters), 2)

        assert_equal(spikes[0].shape, (2, 20))
        assert_equal(fits[0].shape, (2, 20))
예제 #43
0
def test_PlaneSTICA():
    ds = ImagingDataset.load(example_data())
    method = segment.PlaneSTICA(components=5)
    ds.segment(method)
예제 #44
0
def getFrames():
    ds_path = request.form.get("path")
    requestFrames = request.form.getlist("frames[]", type=int)
    normingVal = request.form.getlist("normingVal[]", type=float)
    sequenceId = request.form.get("sequenceId")
    channel = request.form.get("channel")
    planes = request.form.getlist("planes[]", type=int)
    cycle = request.form.get("cycle", type=int)

    if planes is None:
        planes = [0]

    quality = 40
    if channel == "overlay":
        channel = None

    ds = None
    if os.path.splitext(ds_path)[-1] == ".sima":
        ds = ImagingDataset.load(ds_path)
        seq = ds.sequences[cycle]
        channel = ds._resolve_channel(channel)
    else:
        seq = Sequence.create("HDF5", ds_path, "tzyxc")
        if channel:
            channel = int(channel.split("_")[-1])

    end = False
    frames = {}
    for frame_number in requestFrames:
        norming_val = normingVal[:]
        if frame_number > len(seq) - 1 or frame_number < -1:
            end = True
            continue
        elif frame_number == -1 and ds is not None:
            try:
                time_averages = pickle.load(open(os.path.join(ds.savedir, "time_averages.pkl")))
                if not isinstance(time_averages, np.ndarray):
                    raise Exception("no time average")
            except:
                vol = seq._get_frame(0)
            else:
                vol = ds.time_averages
                for ch in xrange(vol.shape[3]):
                    subframe = vol[:, :, :, ch]
                    factor = np.percentile(subframe[np.where(np.isfinite(subframe))], 99)
                    if np.isfinite(factor):
                        norming_val[ch] = factor
        else:
            vol = seq._get_frame(frame_number)

        if channel is not None:
            vol = vol[:, :, :, channel]
            vol /= (norming_val[channel]) / 255
            vol = np.clip(vol, 0, 255)
        else:
            vol = np.hstack((vol[:, :, :, 0] / norming_val[0], vol[:, :, :, 1] / norming_val[1]))
            vol *= 255
        frames["frame_" + str(frame_number)] = {}

        for plane in planes:
            if plane == 0:
                zsurf = np.nanmean(vol, axis=0)
            else:
                zsurf = vol[plane - 1, :, :]

            if plane == 0:
                ysurf = np.nanmean(vol, axis=1)
            else:
                ysurf = np.zeros((vol.shape[0], vol.shape[2]))
                ysurf[plane - 1, :] = np.nanmean(zsurf, axis=0)

            if plane == 0:
                xsurf = np.nanmean(vol, axis=2).T
            else:
                xsurf = np.zeros((vol.shape[1], vol.shape[0]))
                xsurf[:, plane - 1] = np.nanmean(zsurf, axis=1).T

            frames["frame_" + str(frame_number)][plane] = {
                "z": convertToB64Jpeg(zsurf.astype("uint8"), quality=quality),
                "y": convertToB64Jpeg(ysurf.astype("uint8"), quality=quality),
                "x": convertToB64Jpeg(xsurf.astype("uint8"), quality=quality),
            }

    return jsonify(end=end, sequenceId=sequenceId, **frames)
예제 #45
0
def test_PlaneNormalizedCuts():
    ds = ImagingDataset.load(example_data())[:, :, :, :50, :50]
    affinty_method = segment.BasicAffinityMatrix(num_pcs=5)
    method = segment.PlaneWiseSegmentation(
        segment.PlaneNormalizedCuts(affinty_method))
    ds.segment(method)
예제 #46
0
def getRoiMasks():
    ds_path = request.form.get('path')
    label = request.form.get('label')
    index = request.form.get('index', type=int)
    overlay = True
    quality = 100

    dataset = ImagingDataset.load(ds_path)
    rois = dataset.ROIs[label]
    num_rois = len(rois)
    if index is not None:
        indicies = [index]
    else:
        indicies = range(num_rois)
    projectedRois = {}

    if overlay is True:
        vol = np.zeros(list(dataset.frame_shape[:3]) + [3])
        cmap = matplotlib.cm.jet
        norm = matplotlib.colors.Normalize(vmin=0, vmax=num_rois)
        m = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)

        for index in indicies:
            color = np.array(m.to_rgba(index))[:-1]
            color /= np.sum(color)
            roiVol = np.array(
                [plane.todense().astype(float) for plane in rois[index].mask])
            mask2 = ma.masked_where(
                np.logical_and(np.sum(vol, axis=-1) > 0, roiVol > 0),
                roiVol).mask
            mask1 = ma.masked_where(
                np.logical_and(np.logical_not(mask2), roiVol > 0), roiVol).mask

            if np.any(mask1):
                vol[mask1] = color

            if np.any(mask2):
                vol[mask2] = vol[mask2] / 2 + color / 2

        cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25)
        vol -= cutoff
        cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99)
        vol = vol * 255 / cutoff
        vol = np.clip(vol, 0, 255)

        zsurf = np.nanmean(vol, axis=0)
        ysurf = np.nanmean(vol, axis=1)
        xsurf = np.swapaxes(np.nanmean(vol, axis=2), 0, 1)

        projectedRois['rois'] = {
            'z': convertToColorB64Jpeg(zsurf.astype('uint8'), quality=quality),
            'y': convertToColorB64Jpeg(ysurf.astype('uint8'), quality=quality),
            'x': convertToColorB64Jpeg(xsurf.astype('uint8'), quality=quality)
        }
        return jsonify(num_rois=num_rois, **projectedRois)

    for i, roi in enumerate(rois):
        mask = roi.mask
        vol = np.array([plane.todense().astype(float) for plane in mask])
        cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25)
        vol -= cutoff
        cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99)
        vol = vol * 255 / cutoff
        vol = np.clip(vol, 0, 255)

        zsurf = np.nanmean(vol, axis=0)
        ysurf = np.nanmean(vol, axis=1)
        xsurf = np.nanmean(vol, axis=2).T

        if roi.label is None:
            roi.label = 'roi_' + str(i)

        projectedRois[roi.label] = {
            'z': convertToB64Jpeg(zsurf.astype('uint8'), quality=quality),
            'y': convertToB64Jpeg(ysurf.astype('uint8'), quality=quality),
            'x': convertToB64Jpeg(xsurf.astype('uint8'), quality=quality)
        }

    return jsonify(**projectedRois)
예제 #47
0
파일: convert.py 프로젝트: j3tsai/sima
def _load_version0(path):
    """Load a SIMA 0.x dataset

    Parameters
    ----------
    path : str
        The path to the original saved dataset, ending in .sima

    Examples
    --------

    >>> from sima.misc import example_data
    >>> from sima.misc.convert import _load_version0
    >>> ds = _load_version0(example_data())

    """

    def parse_channel(channel):
        """Parse an old format channel stored a dictionary

        Parameters
        ----------
        channel : dict

        Returns
        -------
        result : sima.Sequence
            A sequence equivalent to the old format channel.
        """
        _resolve_paths(channel, path)
        klass = channel.pop('__class__')
        if klass == 'sima.iterables.MultiPageTIFF':
            result = Sequence.create('TIFF', channel['path'])
            try:
                clip = channel['clip']
            except KeyError:
                pass
            else:
                if clip is not None:
                    s = (slice(None), slice(None)) + tuple(
                        slice(*[None if x is 0 else x for x in dim])
                        for dim in clip)
                    result = result[s]
            return result

        elif klass == 'sima.iterables.HDF5':
            raise Exception('TODO')
        else:
            raise Exception('Format not recognized.')

    def parse_sequence(sequence):
        channels = [parse_channel(c) for c in sequence]
        return Sequence.join(channels)

    with open(os.path.join(path, 'dataset.pkl'), 'rb') as f:
        unpickler = Unpickler(f)
        dataset_dict = unpickler.load()
    iterables = dataset_dict.pop('iterables')
    sequences = [parse_sequence(seq) for seq in iterables]

    # Apply displacements if they exist
    try:
        with open(os.path.join(path, 'displacements.pkl'), 'rb') as f:
            displacements = pkl.load(f)
    except IOError:
        pass
    else:
        assert all(np.all(d >= 0) for d in displacements)
        max_disp = np.max(list(chain(*displacements)), axis=0)
        frame_shape = np.array(sequences[0].shape)[1:]
        frame_shape[1:3] += max_disp
        sequences = [
            s.apply_displacements(d.reshape(s.shape[:3] + (2,)), frame_shape)
            for s, d in zip(sequences, displacements)]
        try:
            trim_coords = dataset_dict.pop('_lazy__trim_coords')
        except KeyError:
            try:
                trim_criterion = dataset_dict.pop('trim_criterion')
            except KeyError:
                pass
            else:
                raise Exception(
                    'Parsing of trim_criterion ' + str(trim_criterion) +
                    ' not yet implemented')
        else:
            sequences = [s[:, :, trim_coords[0][0]:trim_coords[1][0],
                           trim_coords[0][1]:trim_coords[1][1]]
                         for s in sequences]
    ds = ImagingDataset(sequences, None)
    ds.savedir = path

    # Add ROIs if they exist
    try:
        with open(os.path.join(path, 'rois.pkl'), 'rb') as f:
            rois = pkl.load(f)
    except IOError:
        pass
    else:
        roi_lists = {}
        for label, roi_list_dict in rois.iteritems():
            roi_list = []
            for roi in roi_list_dict['rois']:
                mask = roi['mask']
                polygons = roi['polygons']
                if mask is not None:
                    new_roi = ROI(mask=mask)
                else:
                    new_roi = ROI(polygons=polygons)
                new_roi.id = roi['id']
                new_roi.label = roi['label']
                new_roi.tags = roi['tags']
                new_roi.im_shape = roi['im_shape']

                roi_list.append(new_roi)
            roi_lists[label] = ROIList(roi_list)
            roi_lists[label].timestamp = roi_list_dict['timestamp']

        for label, roi_list in roi_lists.iteritems():
            ds.add_ROIs(roi_list, label=label)
    return ds
예제 #48
0
        "-l", "--target_label", action="store", type=str,
        default="auto_transformed",
        help="Label to give the new transformed ROIs "
        + "(default: auto_transformed)")
    argParser.add_argument(
        "-c", "--channel", action="store", type=str, default="0",
        help="Channel of the datasets used to calculate the affine transform")
    argParser.add_argument(
        "-C", "--copy_properties", action="store_true",
        help="Copy ROI properties ")
    argParser.add_argument(
        "-o", "--overwrite", action="store_true",
        help="If target_label already exists, overwrite")
    args = argParser.parse_args()

    source_dataset = ImagingDataset.load(args.source)
    print "Beginning ROI transforms, source dataset: ", args.source
    print "-----------------------------------------"

    for directory, folders, files in os.walk(args.target):
        if directory.endswith('.sima'):
            try:
                target_dataset = ImagingDataset.load(directory)
            except IOError:
                continue

            if os.path.samefile(args.source, directory):
                continue

            if args.target_label in target_dataset.ROIs and not args.overwrite:
                print "Label already exists, skipping: ", directory
예제 #49
0
def getRoiMasks():
    ds_path = request.form.get("path")
    label = request.form.get("label")
    index = request.form.get("index", type=int)
    overlay = True
    quality = 100

    dataset = ImagingDataset.load(ds_path)
    rois = dataset.ROIs[label]
    num_rois = len(rois)
    if index is not None:
        indicies = [index]
    else:
        indicies = range(num_rois)
    projectedRois = {}

    if overlay == True:
        vol = np.zeros(list(dataset.frame_shape[:3]) + [3])
        cmap = matplotlib.cm.jet
        norm = matplotlib.colors.Normalize(vmin=0, vmax=num_rois)
        m = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)

        for index in indicies:
            color = np.array(m.to_rgba(index))[:-1]
            color /= np.sum(color)
            roiVol = np.array([plane.todense().astype(float) for plane in rois[index].mask])
            mask2 = ma.masked_where(np.logical_and(np.sum(vol, axis=-1) > 0, roiVol > 0), roiVol).mask
            mask1 = ma.masked_where(np.logical_and(np.logical_not(mask2), roiVol > 0), roiVol).mask

            if np.any(mask1):
                vol[mask1] = color

            if np.any(mask2):
                vol[mask2] = vol[mask2] / 2 + color / 2

        cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25)
        vol -= cutoff
        cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99)
        vol = vol * 255 / cutoff
        vol = np.clip(vol, 0, 255)

        zsurf = np.nanmean(vol, axis=0)
        ysurf = np.nanmean(vol, axis=1)
        xsurf = np.swapaxes(np.nanmean(vol, axis=2), 0, 1)

        projectedRois["rois"] = {
            "z": convertToColorB64Jpeg(zsurf.astype("uint8"), quality=quality),
            "y": convertToColorB64Jpeg(ysurf.astype("uint8"), quality=quality),
            "x": convertToColorB64Jpeg(xsurf.astype("uint8"), quality=quality),
        }
        return jsonify(num_rois=num_rois, **projectedRois)

    for i, roi in enumerate(rois):
        mask = roi.mask
        vol = np.array([plane.todense().astype(float) for plane in mask])
        cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25)
        vol -= cutoff
        cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99)
        vol = vol * 255 / cutoff
        vol = np.clip(vol, 0, 255)

        zsurf = np.nanmean(vol, axis=0)
        ysurf = np.nanmean(vol, axis=1)
        xsurf = np.nanmean(vol, axis=2).T

        if roi.label is None:
            roi.label = "roi_" + str(i)

        projectedRois[roi.label] = {
            "z": convertToB64Jpeg(zsurf.astype("uint8"), quality=quality),
            "y": convertToB64Jpeg(ysurf.astype("uint8"), quality=quality),
            "x": convertToB64Jpeg(xsurf.astype("uint8"), quality=quality),
        }

    return jsonify(**projectedRois)
예제 #50
0
 def load_saved_tiffs_dataset(self):
     tiff_ds = ImagingDataset.load(self.filepath_tiffs)
     assert_equal(tiff_ds.sequences[0].shape, (3, 4, 173, 173, 2))
예제 #51
0
파일: resonant.py 프로젝트: csn92/sima
def resonant_motion_correction(sequences,
                               save_dir,
                               channel_names,
                               correction_channels,
                               num_states_retained,
                               max_displacement,
                               trim_criterion,
                               dims=2):
    """HMM motion correction of data acquired with resonant scanning.

    Parameters
    ----------
    sequences, save_dir, channel_names, correction_channels,
    num_states_retained, max_displacement, trim_criterion
        See __init__() and correct() methods of HiddenMarkov2D.
    dims : (2, 3), optional
        Whether to correct for 2- or 3-dimensional displacements.
        Default: 2.

    Returns
    dataset : sima.ImagingDataset
        The motion corrected dataset.
    """

    tmp_savedir = save_dir + '.tmp.sima'

    if dims is 2:
        hmm = HiddenMarkov2D(n_processes=4,
                             verbose=False,
                             num_states_retained=num_states_retained,
                             max_displacement=(max_displacement[0] / 2,
                                               max_displacement[1]))
    elif dims is 3:
        hmm = HiddenMarkov3D(
            granularity=(3, 8),
            n_processes=4,
            verbose=False,
            num_states_retained=num_states_retained,
            max_displacement=((max_displacement[0] / 2, ) +
                              max_displacement[1:]),
        )

    # Motion correction of the even rows
    sliced_set = hmm.correct([seq[:, :, ::2] for seq in sequences],
                             tmp_savedir, channel_names, correction_channels)

    # corrected_sequences = []
    displacements = []
    for seq_idx, sequence in enumerate(sequences):
        # Repeat the displacements for all rows and multiple y-shifts by 2
        disps = sliced_set.sequences[seq_idx]._base.displacements
        disps = np.repeat(disps, 2, axis=2)
        disps[:, :, :, 0] *= 2

        # Subtract off the phase offset from every other line
        displacements.append(disps)

    displacements = MotionEstimationStrategy._make_nonnegative(displacements)

    disp_dim = displacements[0].shape[-1]
    max_disp = np.max(list(
        it.chain.from_iterable(d.reshape(-1, disp_dim)
                               for d in displacements)),
                      axis=0)
    raw_shape = np.array(sequences[0].shape)[1:-1]  # (z, y, x)

    if len(max_disp) == 2:  # if 2D displacements
        max_disp = np.array([0, max_disp[0], max_disp[1]])

    corrected_shape = raw_shape + max_disp

    corrected_sequences = [
        s.apply_displacements(d, corrected_shape)
        for s, d in zip(sequences, displacements)
    ]

    planes, rows, columns = _trim_coords(trim_criterion, displacements,
                                         raw_shape, corrected_shape)

    corrected_sequences = [
        sequence[:, planes, rows, columns] for sequence in corrected_sequences
    ]

    # Save full corrected dataset and remove tempdir
    imSet = ImagingDataset(corrected_sequences,
                           save_dir,
                           channel_names=channel_names)
    shutil.rmtree(tmp_savedir)

    return imSet