def execute(self, namespace): series = namespace[self.input_name] # squeeze down from 4D data = series.data[:, :, :].squeeze() if self.mask == '': # not the most memory efficient, but make a mask logger.debug( 'No mask provided to ClusteringByLabel, analyzing full image') mask = np.ones((data.shape[0], data.shape[1]), int) else: mask = namespace[self.mask].data[:, :, :].squeeze() # toss any negative labels, as well as the zero label (per PYME clustering schema). labels = sorted(list(set(np.clip(np.unique(mask), 0, None)) - {0})) print(labels) n_labels = len(labels) # calculate the Variance_t over Mean_t var = np.var(data[:, :, self.excitation_start_frame:], axis=2) mean = np.mean(data[:, :, self.excitation_start_frame:], axis=2) variance_over_mean = var / mean if np.isnan(variance_over_mean).any(): logger.error('Variance over mean contains NaN, see %s' % series.filename) mean_pre_excitation = np.mean(data[:, :, :self.excitation_start_frame], axis=2) cluster_metric_mean = np.zeros(n_labels) mean_before_excitation = np.zeros(n_labels) for li in range(n_labels): # everything is 2D at this point label_mask = mask == labels[li] cluster_metric_mean[li] = np.mean(variance_over_mean[label_mask]) mean_before_excitation[li] = np.mean( mean_pre_excitation[label_mask]) res = tabular.DictSource({ 'variance_over_mean': cluster_metric_mean, 'mean_intensity_over_first_10_frames': mean_before_excitation, 'labels': np.array(labels) }) try: res.mdh = series.mdh except AttributeError: res.mdh = None namespace[self.output_name] = res if self.output_vom != '': namespace[self.output_vom] = image.ImageStack( data=variance_over_mean, mdh=res.mdh) if self.output_mean_pre_excitation != '': namespace[self.output_mean_pre_excitation] = image.ImageStack( data=mean_pre_excitation, mdh=res.mdh)
def test_h5_export_uint16_multicolour(): ''' Saves and re-loads an image using the hdf exporter ''' from PYME.IO import dataExporter from PYME.IO import events data = (1e3 * np.random.rand(100, 100, 50, 2)).astype('uint16') evts = np.zeros(3, dtype=events.EVENTS_DTYPE) tempdir = tempfile.mkdtemp() filename = os.path.join(tempdir, 'test_h5.h5') try: dataExporter.ExportData(data, mdh=MetaData.TIRFDefault, events=evts, filename=filename) im = image.ImageStack(filename=filename) assert (np.allclose(im.data[:, :, :, :, :].squeeze(), data)) im.dataSource.release() finally: shutil.rmtree(tempdir)
def OnPlot(self, event): import PYMEcs.Analysis.offlineTracker as otrack ds = im.ImageStack(filename=self.Zfactorfilename) dataset = ds.data[:, :, :].squeeze() refim0 = dataset[:, :, 10:91:4] calImages0, calFTs0, dz0, dzn0, mask0, X0, Y0 = otrack.genRef( refim0, normalised=False) del resx[:] del resy[:] del resz[:] # empty all these three lists every time before a new plot for i in range(dataset.shape[2]): image = dataset[:, :, i] driftx, drifty, driftz, cm, d = otrack.compare(calImages0, calFTs0, dz0, dzn0, 10, image, mask0, X0, Y0, deltaZ=0.2) resx.append(driftx) resy.append(drifty) resz.append(driftz) self.plotPan.draw() self.plotPan.Refresh()
def Start(self): self.image = np.zeros((self.shape_x, self.shape_y, 2), 'uint16') mdh = MetaDataHandler.NestedClassMDHandler() mdh.setEntry('StartTime', time.time()) mdh.setEntry('AcquisitionType', 'Stack') #loop over all providers of metadata for mdgen in MetaDataHandler.provideStartMetadata: mdgen(mdh) self.img = image.ImageStack(data=self.image, mdh=mdh) #self.view = View3D(self.image, 'Z Stack', mdh = mdh) #self.view = ViewIm3D(self.img, 'Z Stack') self.running = True self.zPoss = np.arange(self.stackSettings.GetStartPos(), self.stackSettings.GetEndPos()+.95*self.stackSettings.GetStepSize(),self.stackSettings.GetStepSize()*self.stackSettings.GetDirection()) #piezo = self.scope.positioning[self.stackSettings.GetScanChannel()] #self.piezo = piezo[0] #self.piezoChan = piezo[1] self.posChan = self.stackSettings.GetScanChannel() #self.startPos = self.piezo.GetPos(self.piezoChan) self.startPos = self.scope.GetPos()[self.posChan] self.scope.frameWrangler.stop() self.scope.frameWrangler.onFrame.connect(self.OnCameraFrame) self.OnAqStart() self.scope.frameWrangler.start()
def OnOpenRaw(self, event): from PYME.IO import image from PYME.DSView import ViewIm3D try: ViewIm3D(image.ImageStack(haveGUI=True), mode='visGUI', glCanvas=self.glCanvas) except image.FileSelectionError: # the user canceled the open dialog pass
def __init__(self, size_x, size_y, n_frames, dtype='uint16', dim_order='XYCZT', shape=[-1, -1,1,1,1]): self.data = np.empty([size_x, size_y, n_frames], dtype=dtype) self.mdh = MetaDataHandler.DictMDHandler() # once we have proper xyztc support in the image viewer ds = XYZTCWrapper(ArrayDataSource(self.data), dim_order, shape[2], shape[3], shape[4]) #self.image = image.ImageStack(data=ds, mdh=self.mdh) # in the meantime - note that this will flatten the ctz dimensions self.image = image.ImageStack(data=ds, mdh=self.mdh)
def create_pyramid_from_dataset(filename, outdir, tile_size=128, **kwargs): from PYME.IO import image dataset = image.ImageStack(filename=filename) xm, ym = get_position_from_events(dataset.events, dataset.mdh) #print(xm(np.arange(dataset.data.shape[2]))) #print(ym(np.arange(dataset.data.shape[2]))) p = tile_pyramid(outdir, dataset.data, xm, ym, dataset.mdh, pyramid_tile_size=tile_size) with open(os.path.join(outdir, 'metadata.json'), 'w') as f: f.write(p.mdh.to_JSON())
def main(): import sys from PYME.LMVis import pipeline from PYME.IO.image import ImageStack resultFile, imageFile, speckles = sys.argv[1:] pipe = pipeline.Pipeline() pipe.OpenFile(resultFile) prepPipeline(pipe) img = image.ImageStack(filename=imageFile, mdh='/Users/david/Downloads/JN150629c3_4_MMStack_Pos0.ome.md') selectAndPlotEvents(pipe, speckleFile = speckles)
def create_pyramid_from_dataset(filename, outdir, tile_size=128, **kwargs): from PYME.IO import image dataset = image.ImageStack(filename=filename) xm, ym = get_position_from_events(dataset.events, dataset.mdh) #print(xm(np.arange(dataset.data.shape[2]))) #print(ym(np.arange(dataset.data.shape[2]))) p = tile_pyramid(outdir, dataset.data, xm, ym, dataset.mdh, pyramid_tile_size=tile_size) return p
def create_distributed_pyramid_from_dataset(filename, outdir, tile_size=128, **kwargs): from PYME.IO import image dataset = image.ImageStack(filename=filename) xm, ym = get_position_from_events(dataset.events, dataset.mdh) p = distributed_pyramid(outdir, dataset.data, xm, ym, dataset.mdh, pyramid_tile_size=tile_size) return p
def test_hdf_spooler(nFrames=50): from PYME.IO import testClusterSpooling tempdir = tempfile.mkdtemp() filename = os.path.join(tempdir, 'test_spool.h5') try: ts = testClusterSpooling.TestSpooler(testFrameSize=[512,512], serverfilter='TEST') ts.run(nFrames=nFrames, filename=filename, hdf_spooler=True, frameShape=[512, 512]) im = image.ImageStack(filename=filename) #check image dimensions are as expected assert(np.allclose(im.data.shape[:3], [512, 512, 50])) im.dataSource.release() finally: shutil.rmtree(tempdir)
def file(request, filename): type = request.GET.get('type', 'raw') #print 'file' if type == 'raw': return HttpResponse(clusterIO.get_file(filename, use_file_cache=False), content_type='') elif type in ['tiff', 'h5']: from PYME.IO import image import tempfile img = image.ImageStack( filename='pyme-cluster://%s/%s' % (clusterIO.local_serverfilter, filename.rstrip('/')), haveGUI=False) if type == 'tiff': ext = '.tif' else: ext = '.' + type fn = os.path.splitext(os.path.split(filename.rstrip('/'))[-1])[0] + ext #note we are being a bit tricky here to ensure our temporary file gets deleted when we are done # 1) We create the temporary file using the tempfile module. This gets automagically deleted when we close the # file (at the end of the with block) # 2) We pass the filename of the temporary file to img.Save. This will mean that a second file object / file handle # gets created, the contents get written, and the file gets closed #with tempfile.NamedTemporaryFile(mode='w+b', suffix=ext) as outf: # don't use a context manager as this closes our file prematurely - rely on a cascading close through HTTPResponse # and FileWrapper instead outf = tempfile.NamedTemporaryFile(mode='w+b', suffix=ext) img.Save(outf.name) #seek to update temporary file (so that it knows the new length) outf.seek(0) wrapper = FileWrapper(outf) response = StreamingHttpResponse(wrapper, content_type='image/%s' % ext.lstrip('.')) response['Content-Disposition'] = 'attachment; filename=%s' % fn response['Content-Length'] = os.path.getsize(outf.name) return response
def execute(self, namespace): from scipy.stats import median_absolute_deviation series = namespace[self.input] steps = range(series.data.shape[2] - self.time_window_size) output = np.empty( (series.data.shape[0], series.data.shape[1], len(steps)), dtype=series.data[:, :, 0, 0].dtype) # only 1 color for now for ti in steps: output[:, :, ti] = median_absolute_deviation( series.data[:, :, ti:ti + self.time_window_size], scale=1, axis=2) out = image.ImageStack(data=output) out.mdh = MetaDataHandler.NestedClassMDHandler() try: out.mdh.copyEntriesFrom(series.mdh) except AttributeError: pass out.mdh['Analysis.FilterSpikes.TimeWindowSize'] = self.time_window_size namespace[self.output] = out
def OnLabelLookupByID(self, event): from PYME.DSView import dsviewer, ViewIm3D import PYME.IO.image as im if ('Everything' in self.qpMeasurements): meas = self.qpMeasurements['Everything'] selection = selectWithDialog(dsviewer.openViewers.keys()) if selection is None: return keyChoice = KeyChoice() keyChoice.add_keys(sorted(meas.keys())) if not keyChoice.configure_traits(kind='modal'): return labelimg = dsviewer.openViewers[selection].image labels = labelimg.data[:,:,:].squeeze() measures = meas.lookupByID(labels,keyChoice.Key) newimg = im.ImageStack(measures, titleStub = 'Measure %s' % (keyChoice.Key)) newimg.mdh.copyEntriesFrom(labelimg.mdh) newimg.mdh['Parent'] = labelimg.filename newimg.mdh['Processing.qpMeasure'] = keyChoice.Key ViewIm3D(newimg, mode='visGUI', title='Measure %s' % (keyChoice.Key), glCanvas=self.visFr.glCanvas, parent=self.visFr)
def execute(self, namespace): from scipy.stats import median_absolute_deviation series = namespace[self.input] diff = np.diff(series.data[:, :, :, 0]).squeeze() over_jump_threshold = np.zeros(series.data.shape[:-1], dtype=bool) over_jump_threshold[:, :, 1:] = diff > self.threshold_change output = np.copy(series.data[:, :, :, 0].squeeze()) # only 1 color for now for ti in range(series.data.shape[2] - self.time_window_size): data = output[:, :, ti:ti + self.time_window_size] median = np.median(data, axis=2) spikes = np.logical_and( data > (self.threshold_factor * median_absolute_deviation(data, scale=1, axis=2) + median)[:, :, None], over_jump_threshold[:, :, ti:ti + self.time_window_size]) spike_locs = np.nonzero(spikes) output[spike_locs[0], spike_locs[1], spike_locs[2] + ti] = median[spike_locs[0], spike_locs[1]] out = image.ImageStack(data=output) out.mdh = MetaDataHandler.NestedClassMDHandler() try: out.mdh.copyEntriesFrom(series.mdh) except AttributeError: pass out.mdh[ 'Analysis.FilterSpikes.ThresholdFactor'] = self.threshold_factor out.mdh[ 'Analysis.FilterSpikes.ThresholdChange'] = self.threshold_change out.mdh['Analysis.FilterSpikes.TimeWindowSize'] = self.time_window_size namespace[self.output] = out
def spoolImageFromFile(self, filename): """Load an image file and then spool""" from PYME.IO import image self.spoolData(image.ImageStack(filename).data)
widths=(x_width_px * pixel_size[0], z_width_px * pixel_size[2]), identifier=pi) return_dictionary[pi] = profile if __name__ == '__main__': handler = LineProfileHandler() base_dir = '/home/smeagol/code/invivo-sted/2019-9-17/quantifying-3d-resolution-er-atto590/twophoton' root_dir = os.path.join(base_dir, '') filename = os.path.join(root_dir, 'twophoton-0051_Seq1_Ch1.tif') im = image.ImageStack(filename=filename, haveGUI=False) data = im.data[:,:,:,0].squeeze() # note that at the moment anisotropic xy pixels aren't compatible, need x and y to be the same unless we extract/ # pass a more sophisticated xy position array to the multiaxis profile init pixel_size = (39.062500, 39.062500, 50.0) profile_width = 400 # nm x_width_px = round(profile_width / pixel_size[0]) y_width_px = round(profile_width / pixel_size[1]) z_width_px = round(profile_width / pixel_size[2]) widths_px = (x_width_px, y_width_px, z_width_px) # profile_length = 2000 roi_info = np.genfromtxt(os.path.join(root_dir, 'centerpoints-and-angles.csv'), delimiter=',')
def main(): chipsize = ( 2048, 2048 ) # we currently assume this is correct but could be chosen based # on camera model in meta data darkthreshold = 1e4 # this really should depend on the gain mode (12bit vs 16 bit etc) variancethreshold = 300**2 # again this is currently picked fairly arbitrarily blemishvariance = 1e8 # options parsing op = argparse.ArgumentParser( description='generate offset and variance maps from darkseries.') op.add_argument('filename', metavar='filename', nargs='?', default=None, help='filename of the darkframe series') op.add_argument('-s', '--start', type=int, default=0, help='start frame to use') op.add_argument('-e', '--end', type=int, default=-1, help='end frame to use') op.add_argument('-u', '--uniform', action='store_true', help='make uniform map using metadata info') op.add_argument( '-i', '--install', action='store_true', help='install map in default location - the filename argument is a map' ) op.add_argument( '-d', '--dir', metavar='destdir', default=None, help='destination directory (default is PYME calibration path)') op.add_argument('-l', '--list', action='store_true', help='list all maps in default location') args = op.parse_args() if args.list: listCalibrationDirs() sys.exit(0) # body of script filename = args.filename if filename is None: op.error('need a file name if -l or --list not requested') print('Opening image series...', file=sys.stderr) source = im.ImageStack(filename=filename) if args.install: if source.mdh.getOrDefault('Analysis.name', '') != 'mean-variance': print( 'Analysis.name is not equal to "mean-variance" - probably not a map', file=sys.stderr) sys.exit('aborting...') if source.mdh['Analysis.resultname'] == 'mean': maptype = 'dark' else: maptype = 'variance' mapname = mkDefaultPath(maptype, source.mdh) saveasmap(source.dataSource.getSlice(0), mapname, mdh=source.mdh) sys.exit(0) start = args.start end = args.end if end < 0: end = int(source.dataSource.getNumSlices() + end) print('Calculating mean and variance...', file=sys.stderr) m, ve = (None, None) if not args.uniform: m, v = meanvards(source.dataSource, start=start, end=end) eperADU = source.mdh['Camera.ElectronsPerCount'] ve = v * eperADU * eperADU # occasionally the cameras seem to have completely unusable pixels # one example was dark being 65535 (i.e. max value for 16 bit) if m.max() > darkthreshold: ve[m > darkthreshold] = blemishvariance if ve.max() > variancethreshold: ve[ve > variancethreshold] = blemishvariance nbad = np.sum((m > darkthreshold) * (ve > variancethreshold)) # if the uniform flag is set, then m and ve are passed as None # which makes sure that just the uniform defaults from meta data are used mfull, vefull, basemdh = insertIntoFullMap(m, ve, source.mdh, chipsize=chipsize) #mfull, vefull, basemdh = (m, ve, source.mdh) print('Saving results...', file=sys.stderr) if args.dir is None: print('installing in standard location...', file=sys.stderr) mname = mkDefaultPath('dark', source.mdh) vname = mkDefaultPath('variance', source.mdh) else: mname = mkDestPath(args.dir, 'dark', source.mdh) vname = mkDestPath(args.dir, 'variance', source.mdh) print('dark map -> %s...' % mname, file=sys.stderr) print('var map -> %s...' % vname, file=sys.stderr) commonMD = NestedClassMDHandler() commonMD.setEntry('Analysis.name', 'mean-variance') commonMD.setEntry('Analysis.start', start) commonMD.setEntry('Analysis.end', end) commonMD.setEntry('Analysis.SourceFilename', filename) commonMD.setEntry('Analysis.darkThreshold', darkthreshold) commonMD.setEntry('Analysis.varianceThreshold', variancethreshold) commonMD.setEntry('Analysis.blemishVariance', blemishvariance) commonMD.setEntry('Analysis.NBadPixels', nbad) if args.uniform: commonMD.setEntry('Analysis.isuniform', True) mmd = NestedClassMDHandler(basemdh) mmd.copyEntriesFrom(commonMD) mmd.setEntry('Analysis.resultname', 'mean') mmd.setEntry('Analysis.units', 'ADU') vmd = NestedClassMDHandler(basemdh) vmd.copyEntriesFrom(commonMD) vmd.setEntry('Analysis.resultname', 'variance') vmd.setEntry('Analysis.units', 'electrons^2') saveasmap(mfull, mname, mdh=mmd) saveasmap(vefull, vname, mdh=vmd)