def deleteRoiSet(): ds_path = request.form.get('path') dataset = ImagingDataset.load(ds_path) label = request.form.get('label') dataset = ImagingDataset.load(ds_path) dataset.delete_ROIs(label) return jsonify(result='success')
def getRoi(): ds_path = request.form.get('path') label = request.form.get('label') roi_id = request.form.get('id') dataset = ImagingDataset.load(ds_path) convertedRois = {} try: rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'), label=label) except: return jsonify({}) for i, roi in enumerate(rois): if roi.id == roi_id: break roi_points = [] try: for i in xrange(roi.im_shape[0]): roi_points.append([]) except: for i in xrange(np.max(np.array(roi.coords)[:, :, 2])): roi_points.append([]) for poly in roi.polygons: coords = np.array(poly.exterior.coords) if np.all(coords[-1] == coords[0]): coords = coords[:-1] plane = int(coords[0, -1]) coords = coords[:, :2].astype(int).tolist() roi_points[plane].append(coords) return jsonify({roi.id: {'label': roi.label, 'points': roi_points}})
def test_STICA(): ds = ImagingDataset.load(example_data()) method = segment.STICA(components=5) method.append(segment.SparseROIsFromMasks(min_size=50)) method.append(segment.SmoothROIBoundaries(radius=3)) method.append(segment.MergeOverlapping(0.5)) ds.segment(method)
def test_STICA(): ds = ImagingDataset.load(example_data()) method = segment.STICA(components=5) method.append(segment.SparseROIsFromMasks(min_size=50)) method.append(segment.SmoothROIBoundaries(tolerance=1, min_verts=8)) method.append(segment.MergeOverlapping(0.5)) ds.segment(method)
def deleteRoiSet(): ds_path = request.form.get("path") label = request.form.get("label") dataset = ImagingDataset.load(ds_path) dataset.delete_ROIs(label) return jsonify(result="success")
def exportSignal(self, outfile=None, use_settings=False): """Write ROI signals to a file. Uses settings from :data:`signal_radio` or (if *use_settings* is True) :data:`settings_file`. Args: outfile (str, optional): where to store signal; if None or omitted, :meth:`.exportSignal` will prompt the user for a location use_settings (bool, optional): Whether to use the settings stored in :data:`settings_file`. If False, user is prompted for settings. """ frames_per_second = None # initialize dataset and rois if self.rois == None: if self.dataset == None: self.dataset = ImagingDataset.load(self.sima_dir) self.rois = self.dataset.ROIs['stICA ROIs'] # prompt user for export path if it hasn't already been provided if outfile == None: prompt = "File path to export to: " outfile = self.reserveFilePath(prompt) # get the frames-per-second conversion factor if use_settings and self.settings['signals_format'] == 'time': frames_per_second = float(self.settings['frames_per_second']) elif self.signal_radio.value == 'time': prompt = "Please input the recording's capture rate " + \ "(frames per second): " while frames_per_second <= 0: frames_per_second = self.getFloat(prompt) prompt = "The number you entered is not a valid capture rate" + \ ", please try again: " self.signal_radio.close() # check if we've already extracted a signal if self.dataset.signals() == {}: print "Extracting signals from ROIs..." stdout.flush() # force print statement to output to IPython self.signal = self.dataset.extract(rois=self.rois, label='signal') print "Signals extracted" else: self.signal = self.dataset.signals()['signal'] self.dataset.export_signals(outfile) # do we need to post-process the CSV? if frames_per_second != None: self._postProcessSignal(outfile, frames_per_second) # update settings file unless it's unnecessary if not use_settings: signal_settings = { 'signals_file': abspath(outfile), 'signals_format': self.signal_radio.value, 'frames_per_second': frames_per_second, } self._updateSettingsFile(signal_settings) print "Signals Exported to", outfile
def locate_datasets(search_directory): """Locates all SIMA directories below 'search_directory'""" for directory, folders, files in os.walk(search_directory): if directory.endswith('.sima'): try: dataset = ImagingDataset.load(directory) except IOError: continue else: yield dataset
def locate_datasets(search_directory): """Locates all SIMA directories below 'search_directory'""" for directory, folders, files in os.walk(search_directory): if directory.endswith('.sima'): try: dataset = ImagingDataset.load(directory) except IOError: continue else: yield dataset
def getCycles(directory): ds_path = directory.replace(":!", "/") if os.path.splitext(ds_path)[-1] == ".sima": try: ds = ImagingDataset.load(ds_path) except IOError: return "" return render_template("select_list.html", options=range(ds.num_sequences)) return ""
def __init__(self, path, channel=0, start=0): app.Canvas.__init__(self, position=(300, 100), size=(800, 800), keys='interactive') self.program = gloo.Program(vertex, fragment) self.program['a_position'] = [(-1., -.5, 0.), (-1., +1.,0.), (+0.5, -.5, 0.), (+0.5, +1,0.)] self.program['a_texcoord'] = [(0., 0.), (0., +1), (+1., 0.), (+1, +1)] self.program2 = gloo.Program(vertex, fragment) self.program2['a_position'] = [(-1., -1., 0.), (-1., -0.55,0.), (+0.5, -1., 0.), (+0.5, -0.55,0.)] self.program2['a_texcoord'] = [(0., 0.), (0., +1.), (+1., 0.), (+1., +1.)] self.program3 = gloo.Program(vertex, fragment) self.program3['a_position'] = [(0.55, -0.5, 0.), (0.55, +1.,0.), (+1., -0.5, 0.), (+1., +1.,0.)] self.program3['a_texcoord'] = [(0., 0.), (0., +1.), (+1., 0.), (+1., +1.)] if os.path.splitext(path)[-1] == '.sima': ds = ImagingDataset.load(path) self.sequence = ds.__iter__().next() else: self.sequence = Sequence.create('HDF5',path,'tzyxc') self.frame_counter = start self.step_size = 1 self.channel = channel self.length = len(self.sequence) vol = self.sequence._get_frame(self.frame_counter).astype('float32') vol /= NORMING_VAL vol = np.clip(vol, 0, 1) #surf = np.sum(vol,axis=0)[:,:,channel]/vol.shape[0] surf = np.nanmean(vol,axis=0)[:,:,channel] self.program['u_texture'] = surf #surf2 = np.sum(vol,axis=1)[:,:,channel]/vol.shape[1] surf2 = np.nanmean(vol,axis=1)[:,:,channel] self.program2['u_texture'] = surf2 #surf3 = np.fliplr((np.sum(vol,axis=2)[:,:,channel]).T)/vol.shape[2] surf3 = np.fliplr((np.nanmean(vol,axis=2)[:,:,channel]).T) self.program3['u_texture'] = surf3 self.text = visuals.TextVisual('',font_size=14,color='r',pos=(700, 700)) self.text.text = "{} / {}".format(self.frame_counter, self.length) self.steptext = visuals.TextVisual('step_size: 1',font_size=10,color='r',pos=(700, 725)) self.tr_sys = visuals.transforms.TransformSystem(self) self.timer = app.Timer(0.25, connect=self.on_timer, start=True)
def getCycles(directory): ds_path = directory.replace(':!', '/') if (os.path.splitext(ds_path)[-1] == '.sima'): try: ds = ImagingDataset.load(ds_path) except IOError: return '' return render_template('select_list.html', options=range(ds.num_sequences)) return ''
def setRoiLabel(): ds_path = request.form.get("path") old_label = request.form.get("oldLabel") new_label = request.form.get("newLabel") dataset = ImagingDataset.load(ds_path) dataset.add_ROIs(dataset.ROIs[old_label], label=new_label) labels = dataset.ROIs.keys() labels.extend(map(os.path.basename, glob.glob(os.path.join(ds_path, "ica*.npz")))) labels.extend(map(os.path.basename, glob.glob(os.path.join(ds_path, "opca*.npz")))) return render_template("select_list.html", options=[""] + labels)
def deleteRoi(): ds_path = request.form.get('path') label = request.form.get('label') roi_id = request.form.get('roiId') dataset = ImagingDataset.load(ds_path) try: rois = dataset.ROIs[label] except KeyError: return jsonify(result='failed to located ROI List') rois = filter(lambda r: r.id != roi_id, rois) dataset.add_ROIs(ROIList(rois), label=label) return jsonify(result='success')
def segment(self, use_settings=False): """Performs Spatiotemporal Independent Component Analysis. Currently only has options to use :class:`sima.segment.STICA`. User is prompted for parameters necessary to perform stICA. If *use_settings* is True, the settings from :data:`settings_file` are used instead. Args: use_settings (bool, optional): Whether to use the settings stored in :data:`settings_file`. If False, user is prompted for settings. """ if use_settings: components = int(self.settings['components']) mu = float(self.settings['mu']) overlap_per = float(self.settings['overlap_per']) else: if self.sequence == None: prompt = "File path to the image you want to segment (TIFF only): " input_path = self.getTIFF(prompt) self.sequence = Sequence.create('TIFF', input_path) self.dataset = ImagingDataset([self.sequence], self.sima_dir) prompt = "Number of PCA components (default 50): " components = self.getNatural(prompt, default=50) prompt = "mu (default 0.5): " mu = -1.0 while mu < 0 or mu > 1: mu = self.getFloat(prompt, default=0.5) prompt = "Minimum overlap " + \ "(default 20%; enter 0 to skip): " overlap_per = self.getPercent(prompt, default=0.2) segment_settings = { 'components': components, 'mu': mu, 'overlap_per': overlap_per, } print "Performing Spatiotemporal Independent Component Analysis..." stdout.flush() stica = STICA(**segment_settings) stica.append(IdROIs()) if self.dataset == None: self.dataset = ImagingDataset.load(self.sima_dir) self.rois = self.dataset.segment(stica, label="stICA ROIs") print len(self.dataset.ROIs['stICA ROIs']), "ROIs found" if not use_settings: segment_settings['segmentation_strategy'] = 'stICA' self._updateSettingsFile(segment_settings)
def setRoiLabel(): ds_path = request.form.get('path') old_label = request.form.get('oldLabel') new_label = request.form.get('newLabel') dataset = ImagingDataset.load(ds_path) dataset.add_ROIs(dataset.ROIs[old_label], label=new_label) labels = dataset.ROIs.keys() labels.extend( map(os.path.basename, glob.glob(os.path.join(ds_path, 'ica*.npz')))) labels.extend( map(os.path.basename, glob.glob(os.path.join(ds_path, 'opca*.npz')))) return render_template('select_list.html', options=[''] + labels)
def getLabels(): ds_path = request.form.get("path") try: dataset = ImagingDataset.load(ds_path) except: return "" try: with open(os.path.join(dataset.savedir, "rois.pkl"), "rb") as f: labels = pickle.load(f).keys() except: return "" labels.extend(map(os.path.basename, glob.glob(os.path.join(ds_path, "ica*.npz")))) labels.extend(map(os.path.basename, glob.glob(os.path.join(ds_path, "opca*.npz")))) return render_template("select_list.html", options=[""] + labels)
def selectRoi(): ds_path = request.form.get('path') label = request.form.get('label') plane = float(request.form.get('z')) point = Point(float(request.form.get('x')), float(request.form.get('y'))) dataset = ImagingDataset.load(ds_path) rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'), label=label) for roi in rois: for poly in roi.polygons: z_coord = np.array(poly.exterior.coords)[0, 2] if z_coord == plane or plane == -1: if poly.contains(point): return jsonify(label=roi.label, id=roi.id) return jsonify({'error': 'roi not found'})
def getChannels(directory): ds_path = directory.replace(':!', '/') if (os.path.splitext(ds_path)[-1] == '.sima'): try: ds = ImagingDataset.load(ds_path) except IOError: return '' channels = ds.channel_names else: try: seq = Sequence.create('HDF5', ds_path, 'tzyxc') except IOError: return '' channels = ['channel_' + str(idx) for idx in range(seq.shape[4])] if (len(channels) > 1): channels += ['overlay'] return render_template('select_list.html', options=channels)
def getChannels(directory): ds_path = directory.replace(":!", "/") if os.path.splitext(ds_path)[-1] == ".sima": try: ds = ImagingDataset.load(ds_path) except IOError: return "" channels = ds.channel_names else: try: seq = Sequence.create("HDF5", ds_path, "tzyxc") except IOError: return "" channels = ["channel_" + str(idx) for idx in range(seq.shape[4])] if len(channels) > 1: channels += ["overlay"] return render_template("select_list.html", options=channels)
def getLabels(): ds_path = request.form.get('path') try: dataset = ImagingDataset.load(ds_path) except: return jsonify({'labels': []}) try: with open(os.path.join(dataset.savedir, 'rois.pkl'), 'rb') as f: labels = pickle.load(f).keys() except: #return '' return jsonify({'labels': []}) labels.extend( map(os.path.basename, glob.glob(os.path.join(ds_path, 'opca*.npz')))) #return render_template('select_list.html',options=['']+labels) return jsonify({'labels': labels})
def getInfo(): ds_path = request.form.get('path') if (os.path.splitext(ds_path)[-1] == '.sima'): try: ds = ImagingDataset.load(ds_path) except IOError: return jsonify(error='dataset not found') seq = ds.__iter__().next() else: try: seq = Sequence.create('HDF5', ds_path, 'tzyxc') except IOError: return jsonify(error='dataset not found') length = len(seq) norm_factors = {} for channel in xrange(seq.shape[4]): norm_factors['channel_' + str(channel)] = [] for frame_index in [0, int(length / 2), -1]: frame = seq._get_frame(frame_index) for channel in xrange(seq.shape[4]): subframe = frame[:, :, :, channel] if np.any(np.isfinite(subframe)): factor = np.percentile( subframe[np.where(np.isfinite(subframe))], 98) if np.isfinite(factor): norm_factors['channel_' + str(channel)] += [factor] json = { 'planes': range(int(seq.shape[1] + 1)), 'height': int(seq.shape[2]), 'width': int(seq.shape[3]), 'length': length } for channel in norm_factors.keys(): json[channel] = int(max(1, int(np.nanmean(norm_factors[channel])))) return jsonify(**json)
def getRois(): ds_path = request.form.get("path") label = request.form.get("label") dataset = ImagingDataset.load(ds_path) convertedRois = {} rois = ROIList.load(os.path.join(dataset.savedir, "rois.pkl"), label=label) for i, roi in enumerate(rois): if roi.label is None: roi.label = i convertedRois[roi.label] = {} for poly in roi.polygons: coords = np.array(poly.exterior.coords) plane = int(coords[0, -1]) # coords = list(coords[:,:2].ravel()) coords = coords[:, :2].tolist() try: convertedRois[roi.label][plane].append(coords) except KeyError: convertedRois[roi.label][plane] = [coords] return jsonify(**convertedRois)
def getRois(): ds_path = request.form.get('path') label = request.form.get('label') dataset = ImagingDataset.load(ds_path) convertedRois = {} rois = ROIList.load(os.path.join(dataset.savedir, 'rois.pkl'), label=label) for i, roi in enumerate(rois): if roi.label is None: roi.label = i convertedRois[roi.label] = {} for poly in roi.polygons: coords = np.array(poly.exterior.coords) plane = int(coords[0, -1]) #coords = list(coords[:,:2].ravel()) coords = coords[:, :2].tolist() try: convertedRois[roi.label][plane].append(coords) except KeyError: convertedRois[roi.label][plane] = [coords] return jsonify(**convertedRois)
def updateRoi(): ds_path = request.form.get('path') label = request.form.get('label') points = json.loads(request.form.get('points')) roi_label = request.form.get('roiLabel') roi_id = request.form.get('roiId') dataset = ImagingDataset.load(ds_path) roi_data = [] for i, plane in enumerate(points): if plane is None or not len(plane): continue array_dat = np.array(plane) z_dims = i * np.ones((array_dat.shape[:2] + (1, ))) plane_data = np.concatenate((array_dat, z_dims), axis=2) roi_data.extend(list(plane_data)) if len(roi_data) == 0: return jsonify(result="no polygons to save") for poly in roi_data: if poly.shape[0] < 3: raise Exception("unable to store polygon with less then 3 points") roi = ROI(polygons=roi_data, im_shape=dataset.frame_shape[:3]) roi.label = roi_label roi.id = roi_id try: rois = dataset.ROIs[label] except KeyError: rois = [] rois = filter(lambda r: r.id != roi_id, rois) rois.append(roi) dataset.add_ROIs(ROIList(rois), label=label) return jsonify(result='success')
def getInfo(): ds_path = request.form.get("path") if os.path.splitext(ds_path)[-1] == ".sima": try: ds = ImagingDataset.load(ds_path) except IOError: return jsonify(error="dataset not found") seq = ds.__iter__().next() else: try: seq = Sequence.create("HDF5", ds_path, "tzyxc") except IOError: return jsonify(error="dataset not found") length = len(seq) norm_factors = {} for channel in xrange(seq.shape[4]): norm_factors["channel_" + str(channel)] = [] for frame_index in [0, int(length / 2), -1]: frame = seq._get_frame(frame_index) for channel in xrange(seq.shape[4]): subframe = frame[:, :, :, channel] if np.any(np.isfinite(subframe)): factor = np.percentile(subframe[np.where(np.isfinite(subframe))], 98) if np.isfinite(factor): norm_factors["channel_" + str(channel)] += [factor] json = {"planes": range(seq.shape[1] + 1), "height": seq.shape[2], "width": seq.shape[3], "max": length} for channel in norm_factors.keys(): json[channel] = max(1, int(np.nanmean(norm_factors[channel]))) return jsonify(**json)
def setRoiLabel(): ds_path = request.form.get('path') #old_label = request.form.get('oldLabel') old_label = '' new_label = request.form.get('newLabel') if new_label == '': new_label = 'rois' dataset = ImagingDataset.load(ds_path) if (old_label != ''): rois = dataset.ROIs[old_label] else: rois = ROIList([]) dataset.add_ROIs(rois, label=new_label) labels = dataset.ROIs.keys() labels.extend( map(os.path.basename, glob.glob(os.path.join(ds_path, 'ica*.npz')))) labels.extend( map(os.path.basename, glob.glob(os.path.join(ds_path, 'opca*.npz')))) return jsonify({'labels': labels})
def getRoiMasks(): ds_path = request.form.get("path") label = request.form.get("label") index = request.form.get("index", type=int) overlay = True quality = 100 dataset = ImagingDataset.load(ds_path) rois = dataset.ROIs[label] num_rois = len(rois) if index is not None: indicies = [index] else: indicies = range(num_rois) projectedRois = {} if overlay == True: vol = np.zeros(list(dataset.frame_shape[:3]) + [3]) cmap = matplotlib.cm.jet norm = matplotlib.colors.Normalize(vmin=0, vmax=num_rois) m = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap) for index in indicies: color = np.array(m.to_rgba(index))[:-1] color /= np.sum(color) roiVol = np.array([plane.todense().astype(float) for plane in rois[index].mask]) mask2 = ma.masked_where(np.logical_and(np.sum(vol, axis=-1) > 0, roiVol > 0), roiVol).mask mask1 = ma.masked_where(np.logical_and(np.logical_not(mask2), roiVol > 0), roiVol).mask if np.any(mask1): vol[mask1] = color if np.any(mask2): vol[mask2] = vol[mask2] / 2 + color / 2 cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25) vol -= cutoff cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99) vol = vol * 255 / cutoff vol = np.clip(vol, 0, 255) zsurf = np.nanmean(vol, axis=0) ysurf = np.nanmean(vol, axis=1) xsurf = np.swapaxes(np.nanmean(vol, axis=2), 0, 1) projectedRois["rois"] = { "z": convertToColorB64Jpeg(zsurf.astype("uint8"), quality=quality), "y": convertToColorB64Jpeg(ysurf.astype("uint8"), quality=quality), "x": convertToColorB64Jpeg(xsurf.astype("uint8"), quality=quality), } return jsonify(num_rois=num_rois, **projectedRois) for i, roi in enumerate(rois): mask = roi.mask vol = np.array([plane.todense().astype(float) for plane in mask]) cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25) vol -= cutoff cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99) vol = vol * 255 / cutoff vol = np.clip(vol, 0, 255) zsurf = np.nanmean(vol, axis=0) ysurf = np.nanmean(vol, axis=1) xsurf = np.nanmean(vol, axis=2).T if roi.label is None: roi.label = "roi_" + str(i) projectedRois[roi.label] = { "z": convertToB64Jpeg(zsurf.astype("uint8"), quality=quality), "y": convertToB64Jpeg(ysurf.astype("uint8"), quality=quality), "x": convertToB64Jpeg(xsurf.astype("uint8"), quality=quality), } return jsonify(**projectedRois)
def getRoiList(): ds_path = request.form.get("path") label = request.form.get("label") dataset = ImagingDataset.load(ds_path) rois = dataset.ROIs[label]
def test_PlaneNormalizedCuts(): ds = ImagingDataset.load(example_data())[:, :, :, :50, :50] affinty_method = segment.BasicAffinityMatrix(num_pcs=5) method = segment.PlaneWiseSegmentation( segment.PlaneNormalizedCuts(affinty_method)) ds.segment(method)
def test_PlaneCA1PC(): ds = ImagingDataset.load(example_data())[:, :, :, :50, :50] method = segment.PlaneCA1PC(num_pcs=5) ds.segment(method)
def test_PlaneSTICA(): ds = ImagingDataset.load(example_data()) method = segment.PlaneSTICA(components=5) ds.segment(method)
def getRoiMasks(): ds_path = request.form.get('path') label = request.form.get('label') index = request.form.get('index', type=int) overlay = True quality = 100 dataset = ImagingDataset.load(ds_path) rois = dataset.ROIs[label] num_rois = len(rois) if index is not None: indicies = [index] else: indicies = range(num_rois) projectedRois = {} if overlay is True: vol = np.zeros(list(dataset.frame_shape[:3]) + [3]) cmap = matplotlib.cm.jet norm = matplotlib.colors.Normalize(vmin=0, vmax=num_rois) m = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap) for index in indicies: color = np.array(m.to_rgba(index))[:-1] color /= np.sum(color) roiVol = np.array( [plane.todense().astype(float) for plane in rois[index].mask]) mask2 = ma.masked_where( np.logical_and(np.sum(vol, axis=-1) > 0, roiVol > 0), roiVol).mask mask1 = ma.masked_where( np.logical_and(np.logical_not(mask2), roiVol > 0), roiVol).mask if np.any(mask1): vol[mask1] = color if np.any(mask2): vol[mask2] = vol[mask2] / 2 + color / 2 cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25) vol -= cutoff cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99) vol = vol * 255 / cutoff vol = np.clip(vol, 0, 255) zsurf = np.nanmean(vol, axis=0) ysurf = np.nanmean(vol, axis=1) xsurf = np.swapaxes(np.nanmean(vol, axis=2), 0, 1) projectedRois['rois'] = { 'z': convertToColorB64Jpeg(zsurf.astype('uint8'), quality=quality), 'y': convertToColorB64Jpeg(ysurf.astype('uint8'), quality=quality), 'x': convertToColorB64Jpeg(xsurf.astype('uint8'), quality=quality) } return jsonify(num_rois=num_rois, **projectedRois) for i, roi in enumerate(rois): mask = roi.mask vol = np.array([plane.todense().astype(float) for plane in mask]) cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 25) vol -= cutoff cutoff = np.percentile(vol[np.where(np.isfinite(vol))], 99) vol = vol * 255 / cutoff vol = np.clip(vol, 0, 255) zsurf = np.nanmean(vol, axis=0) ysurf = np.nanmean(vol, axis=1) xsurf = np.nanmean(vol, axis=2).T if roi.label is None: roi.label = 'roi_' + str(i) projectedRois[roi.label] = { 'z': convertToB64Jpeg(zsurf.astype('uint8'), quality=quality), 'y': convertToB64Jpeg(ysurf.astype('uint8'), quality=quality), 'x': convertToB64Jpeg(xsurf.astype('uint8'), quality=quality) } return jsonify(**projectedRois)
def getFrames(): ds_path = request.form.get("path") requestFrames = request.form.getlist("frames[]", type=int) normingVal = request.form.getlist("normingVal[]", type=float) sequenceId = request.form.get("sequenceId") channel = request.form.get("channel") planes = request.form.getlist("planes[]", type=int) cycle = request.form.get("cycle", type=int) if planes is None: planes = [0] quality = 40 if channel == "overlay": channel = None ds = None if os.path.splitext(ds_path)[-1] == ".sima": ds = ImagingDataset.load(ds_path) seq = ds.sequences[cycle] channel = ds._resolve_channel(channel) else: seq = Sequence.create("HDF5", ds_path, "tzyxc") if channel: channel = int(channel.split("_")[-1]) end = False frames = {} for frame_number in requestFrames: norming_val = normingVal[:] if frame_number > len(seq) - 1 or frame_number < -1: end = True continue elif frame_number == -1 and ds is not None: try: time_averages = pickle.load(open(os.path.join(ds.savedir, "time_averages.pkl"))) if not isinstance(time_averages, np.ndarray): raise Exception("no time average") except: vol = seq._get_frame(0) else: vol = ds.time_averages for ch in xrange(vol.shape[3]): subframe = vol[:, :, :, ch] factor = np.percentile(subframe[np.where(np.isfinite(subframe))], 99) if np.isfinite(factor): norming_val[ch] = factor else: vol = seq._get_frame(frame_number) if channel is not None: vol = vol[:, :, :, channel] vol /= (norming_val[channel]) / 255 vol = np.clip(vol, 0, 255) else: vol = np.hstack((vol[:, :, :, 0] / norming_val[0], vol[:, :, :, 1] / norming_val[1])) vol *= 255 frames["frame_" + str(frame_number)] = {} for plane in planes: if plane == 0: zsurf = np.nanmean(vol, axis=0) else: zsurf = vol[plane - 1, :, :] if plane == 0: ysurf = np.nanmean(vol, axis=1) else: ysurf = np.zeros((vol.shape[0], vol.shape[2])) ysurf[plane - 1, :] = np.nanmean(zsurf, axis=0) if plane == 0: xsurf = np.nanmean(vol, axis=2).T else: xsurf = np.zeros((vol.shape[1], vol.shape[0])) xsurf[:, plane - 1] = np.nanmean(zsurf, axis=1).T frames["frame_" + str(frame_number)][plane] = { "z": convertToB64Jpeg(zsurf.astype("uint8"), quality=quality), "y": convertToB64Jpeg(ysurf.astype("uint8"), quality=quality), "x": convertToB64Jpeg(xsurf.astype("uint8"), quality=quality), } return jsonify(end=end, sequenceId=sequenceId, **frames)
def test_STICA(): ds = ImagingDataset.load(example_data()) method = segment.STICA(components=5) ds.segment(method)
def _plotROIs(self, save_to=None, warn=False, draw=False, fig=None, ax=None, lines={}, ax_image=None, bleft=None, bright=None): """Plots ROIs against a background image with an applied rotation/flip Flipping is always performed first, then rotation. Rotation is in degrees clockwise, and must be a multiple of 90. """ def transform_generator(t, args): """Returns a callback function to perform a transformation""" def transform(event): if t == 'left': self._rotation -= 90 elif t == 'right': self._rotation += 90 elif t == 'hflip': self._hflip = not self._hflip elif t == 'vflip': self._vflip = not self._vflip else: assert None, "Incorrect transformation: {0}".format(t) self._plotROIs(**args) return transform if not draw: fig, ax = plt.subplots() plt.subplots_adjust(bottom=0.2) # make rotation fall in the range: [0, 360) while self._rotation < 0: self._rotation += 360 while self._rotation >= 360: self._rotation -= 360 # get list of ROIs if self.dataset == None: self.dataset = ImagingDataset.load(self.sima_dir) if self.rois == None: self.rois = self.dataset.ROIs['stICA ROIs'] # prepare background image # TODO: does this step work for multi-channel inputs? imdata = self.dataset.time_averages[0, ..., -1] # Perform flips if self._hflip: imdata = fliplr(imdata) if self._vflip: imdata = flipud(imdata) # Perform rotation if self._rotation: k = self._rotation / 90 imdata = rot90(imdata, k) #image_width, image_height = image.size image_width, image_height = imdata.shape ax.set_xlim(xmin=0, xmax=image_width) ax.set_ylim(ymin=0, ymax=image_height) if draw: ax_image.set_data(imdata) ax_image.set_cmap('gray') else: ax_image = ax.imshow(imdata, cmap='gray') # plot all of the ROIs, warn user if an ROI has internal loops for roi in self.rois: coords = roi.coords rid = roi.id if warn and len(coords) > 1: print "Warning: Roi%s has >1 coordinate set" % rid x = coords[0][:, 0] y = coords[0][:, 1] # transform x and y x, y = self._rotateFlipXY(x, y, image_height, image_width, self._rotation, self._hflip, self._vflip) if save_to == None: if draw: lines[rid].set_data(x, y) else: lines[rid], = plt.plot(x, y, picker=line_picker_generator(rid)) else: plt.plot(x, y) # build options for callback args = { 'save_to': save_to, 'warn': warn, 'draw': True, 'fig': fig, 'ax': ax, 'lines': lines, 'ax_image': ax_image, 'bleft': bleft, 'bright': bright, } # create buttons if not draw: axhflip = plt.axes([0.15, 0.05, 0.17, 0.075]) axvflip = plt.axes([0.33, 0.05, 0.17, 0.075]) axleft = plt.axes([0.51, 0.05, 0.17, 0.075]) axright = plt.axes([0.69, 0.05, 0.17, 0.075]) bhflip = Button(axhflip, 'Flip Horizontally') bvflip = Button(axvflip, 'Flip Vertically') bleft = Button(axleft, 'Rotate Left') bright = Button(axright, 'Rotate Right') # click handlers bhflip.on_clicked(transform_generator('hflip', args)) bvflip.on_clicked(transform_generator('vflip', args)) bleft.on_clicked(transform_generator('left', args)) bright.on_clicked(transform_generator('right', args)) if save_to != None: plt.savefig(save_to) else: if draw: plt.draw() else: plt.gcf().canvas.mpl_connect('pick_event', onpick) plt.show()
"-l", "--target_label", action="store", type=str, default="auto_transformed", help="Label to give the new transformed ROIs " + "(default: auto_transformed)") argParser.add_argument( "-c", "--channel", action="store", type=str, default="0", help="Channel of the datasets used to calculate the affine transform") argParser.add_argument( "-C", "--copy_properties", action="store_true", help="Copy ROI properties ") argParser.add_argument( "-o", "--overwrite", action="store_true", help="If target_label already exists, overwrite") args = argParser.parse_args() source_dataset = ImagingDataset.load(args.source) print "Beginning ROI transforms, source dataset: ", args.source print "-----------------------------------------" for directory, folders, files in os.walk(args.target): if directory.endswith('.sima'): try: target_dataset = ImagingDataset.load(directory) except IOError: continue if os.path.samefile(args.source, directory): continue if args.target_label in target_dataset.ROIs and not args.overwrite: print "Label already exists, skipping: ", directory
def test_PlaneCA1PC(): ds = ImagingDataset.load(example_data())[:, :, :, :50, :50] method = segment.PlaneCA1PC(num_pcs=5) ds.segment(method)
def getRoiList(): ds_path = request.form.get('path') label = request.form.get('label') dataset = ImagingDataset.load(ds_path) rois = dataset.ROIs[label]
def test_PlaneNormalizedCuts(): ds = ImagingDataset.load(example_data())[:, :, :, :50, :50] affinty_method = segment.BasicAffinityMatrix(num_pcs=5) method = segment.PlaneWiseSegmentation( segment.PlaneNormalizedCuts(affinty_method)) ds.segment(method)
def getFrames(): ds_path = request.form.get('path') requestFrames = request.form.getlist('frames[]', type=int) normingVal = request.form.getlist('normingVal[]', type=float) sequenceId = request.form.get('sequenceId') channel = request.form.get('channel') planes = request.form.getlist('planes[]', type=int) cycle = request.form.get('cycle', type=int) if planes is None: planes = [0] quality = 40 if channel == 'overlay': channel = None ds = None if (os.path.splitext(ds_path)[-1] == '.sima'): ds = ImagingDataset.load(ds_path) seq = ds.sequences[cycle] channel = ds._resolve_channel(channel) else: seq = Sequence.create('HDF5', ds_path, 'tzyxc') if channel: channel = int(channel.split('_')[-1]) end = False frames = {} for frame_number in requestFrames: norming_val = normingVal[:] if frame_number > len(seq) - 1 or frame_number < -1: end = True continue elif frame_number == -1 and ds is not None: try: time_averages = pickle.load( open(os.path.join(ds.savedir, 'time_averages.pkl'))) if not isinstance(time_averages, np.ndarray): raise Exception('no time average') except: vol = seq._get_frame(0) else: vol = ds.time_averages for ch in xrange(vol.shape[3]): subframe = vol[:, :, :, ch] factor = np.percentile( subframe[np.where(np.isfinite(subframe))], 99) if np.isfinite(factor): norming_val[ch] = factor else: vol = seq._get_frame(frame_number) if channel is not None: vol = vol[:, :, :, channel] vol /= ((norming_val[channel]) / 255) vol = np.clip(vol, 0, 255) else: vol = np.hstack((vol[:, :, :, 0] / norming_val[0], vol[:, :, :, 1] / norming_val[1])) vol *= 255 frames['frame_' + str(frame_number)] = {} for plane in planes: if plane == 0: zsurf = np.nanmean(vol, axis=0) else: zsurf = vol[plane - 1, :, :] if plane == 0: ysurf = np.nanmean(vol, axis=1) else: ysurf = np.zeros((vol.shape[0], vol.shape[2])) ysurf[plane - 1, :] = np.nanmean(zsurf, axis=0) if plane == 0: xsurf = np.nanmean(vol, axis=2).T else: xsurf = np.zeros((vol.shape[1], vol.shape[0])) xsurf[:, plane - 1] = np.nanmean(zsurf, axis=1).T frames['frame_' + str(frame_number)][plane] = { 'z': convertToB64Jpeg(zsurf.astype('uint8'), quality=quality), 'y': convertToB64Jpeg(ysurf.astype('uint8'), quality=quality), 'x': convertToB64Jpeg(xsurf.astype('uint8'), quality=quality) } return jsonify(end=end, sequenceId=sequenceId, **frames)
def load_saved_tiffs_dataset(self): tiff_ds = ImagingDataset.load(self.filepath_tiffs) assert_equal(tiff_ds.sequences[0].shape, (3, 4, 173, 173, 2))
"--channel", action="store", type=str, default="0", help="Channel of the datasets used to calculate the affine transform") argParser.add_argument("-C", "--copy_properties", action="store_true", help="Copy ROI properties ") argParser.add_argument("-o", "--overwrite", action="store_true", help="If target_label already exists, overwrite") args = argParser.parse_args() source_dataset = ImagingDataset.load(args.source) print "Beginning ROI transforms, source dataset: ", args.source print "-----------------------------------------" for directory, folders, files in os.walk(args.target): if directory.endswith('.sima'): try: target_dataset = ImagingDataset.load(directory) except IOError: continue if os.path.samefile(args.source, directory): continue if args.target_label in target_dataset.ROIs and not args.overwrite: print "Label already exists, skipping: ", directory
def load_saved_tiffs_dataset(self): tiff_ds = ImagingDataset.load(self.filepath_tiffs) assert_equal(tiff_ds.sequences[0].shape, (3, 4, 173, 173, 2))
def __init__(self, path, channel=0, start=0): app.Canvas.__init__(self, position=(300, 100), size=(800, 800), keys='interactive') self.program = gloo.Program(vertex, fragment) self.program['a_position'] = [(-1., -.5, 0.), (-1., +1., 0.), (+0.5, -.5, 0.), (+0.5, +1, 0.)] self.program['a_texcoord'] = [(0., 0.), (0., +1), (+1., 0.), (+1, +1)] self.program2 = gloo.Program(vertex, fragment) self.program2['a_position'] = [(-1., -1., 0.), (-1., -0.55, 0.), (+0.5, -1., 0.), (+0.5, -0.55, 0.)] self.program2['a_texcoord'] = [(0., 0.), (0., +1.), (+1., 0.), (+1., +1.)] self.program3 = gloo.Program(vertex, fragment) self.program3['a_position'] = [(0.55, -0.5, 0.), (0.55, +1., 0.), (+1., -0.5, 0.), (+1., +1., 0.)] self.program3['a_texcoord'] = [(0., 0.), (0., +1.), (+1., 0.), (+1., +1.)] if os.path.splitext(path)[-1] == '.sima': ds = ImagingDataset.load(path) self.sequence = ds.__iter__().next() else: self.sequence = Sequence.create('HDF5', path, 'tzyxc') self.frame_counter = start self.step_size = 1 self.channel = channel self.length = len(self.sequence) vol = self.sequence._get_frame(self.frame_counter).astype('float32') vol /= NORMING_VAL vol = np.clip(vol, 0, 1) #surf = np.sum(vol,axis=0)[:,:,channel]/vol.shape[0] surf = np.nanmean(vol, axis=0)[:, :, channel] self.program['u_texture'] = surf #surf2 = np.sum(vol,axis=1)[:,:,channel]/vol.shape[1] surf2 = np.nanmean(vol, axis=1)[:, :, channel] self.program2['u_texture'] = surf2 #surf3 = np.fliplr((np.sum(vol,axis=2)[:,:,channel]).T)/vol.shape[2] surf3 = np.fliplr((np.nanmean(vol, axis=2)[:, :, channel]).T) self.program3['u_texture'] = surf3 self.text = visuals.TextVisual('', font_size=14, color='r', pos=(700, 700)) self.text.text = "{} / {}".format(self.frame_counter, self.length) self.steptext = visuals.TextVisual('step_size: 1', font_size=10, color='r', pos=(700, 725)) self.tr_sys = visuals.transforms.TransformSystem(self) self.timer = app.Timer(0.25, connect=self.on_timer, start=True)