Пример #1
0
    def execute(self, namespace):
        from PYME.Analysis.points import spherical_harmonics
        from PYME.IO import MetaDataHandler

        inp = namespace[self.input_localizations]
        points = tabular.MappingFilter(inp)
        shell = namespace[self.input_shell]
        if isinstance(shell, tabular.TabularBase):
            shell = spherical_harmonics.ScaledShell.from_tabular(shell)

        # map points to scaled spherical coordinates
        azimuth, zenith, r = shell.shell_coordinates(
            (points['x'], points['y'], points['z']))
        # lookup shell radius at those angles
        r_shell = spherical_harmonics.reconstruct_shell(
            shell.modes, shell.coefficients, azimuth, zenith)

        points.addColumn(self.name_scaled_azimuth, azimuth)
        points.addColumn(self.name_scaled_zenith, zenith)
        points.addColumn(self.name_scaled_radius, r)
        points.addColumn(self.name_normalized_radius, r / r_shell)

        try:
            points.mdh = MetaDataHandler.DictMDHandler(inp.mdh)
        except AttributeError:
            pass
        namespace[self.output_mapped] = points
Пример #2
0
    def execute(self, namespace):
        data = namespace[self.input]

        n_rows = len(data)

        if n_rows < self.num_to_select:
            if self.strict:
                raise IndexError(
                    'Trying to select %d from data with only %d rows. To allow truncation, use strict=False'
                    % (self.num_to_select, n_rows))
            else:
                logger.info(
                    'RandomSubset: Truncating from %d to %d rows as data only has %d rows. To make this an error, use strict=True'
                    % (self.num_to_select, n_rows, n_rows))

        if self.strict and (self.num_to_select > 0.5 * n_rows):
            logger.warning(
                'RandomSubset: Selecting %d from %d rows will not be very random'
                % (self.num_to_select, n_rows))

        out = tabular.RandomSelectionFilter(data,
                                            num_Samples=min(
                                                n_rows, self.num_to_select))

        try:
            out.mdh = MetaDataHandler.DictMDHandler(data.mdh)
        except AttributeError:
            pass

        namespace[self.output] = out
Пример #3
0
    def __init__(self,
                 spooler,
                 seriesName,
                 analysisMetadata,
                 resultsFilename=None,
                 startAt=0,
                 serverfilter=clusterIO.local_serverfilter,
                 **kwargs):
        # TODO - reduce duplication of `LocalisationRule.__init__()` and `LocalisationRule._setup()`
        from PYME.IO import MetaDataHandler
        from PYME.Analysis import MetaData
        from PYME.IO.FileUtils.nameUtils import genClusterResultFileName
        from PYME.IO import unifiedIO

        self.spooler = spooler

        if resultsFilename is None:
            resultsFilename = genClusterResultFileName(seriesName)

        resultsFilename = verify_cluster_results_filename(resultsFilename)
        logger.info('Results file: ' + resultsFilename)

        resultsMdh = MetaDataHandler.DictMDHandler()
        # NB - anything passed in analysis MDH will wipe out corresponding entries in the series metadata
        resultsMdh.update(self.spooler.md)
        resultsMdh.update(analysisMetadata)
        resultsMdh['EstimatedLaserOnFrameNo'] = resultsMdh.getOrDefault(
            'EstimatedLaserOnFrameNo',
            resultsMdh.getOrDefault('Analysis.StartAt', 0))
        MetaData.fixEMGain(resultsMdh)

        self._setup(seriesName, resultsMdh, resultsFilename, startAt,
                    serverfilter)

        Rule.__init__(self, **kwargs)
Пример #4
0
 def __init__(self, size_x, size_y, n_frames, dtype='uint16', dim_order='XYCZT', shape=[-1, -1,1,1,1]):
     self.data = np.empty([size_x, size_y, n_frames], dtype=dtype)
     self.mdh = MetaDataHandler.DictMDHandler()
     
     # once we have proper xyztc support in the image viewer
     ds = XYZTCWrapper(ArrayDataSource(self.data), dim_order, shape[2], shape[3], shape[4])
     #self.image = image.ImageStack(data=ds, mdh=self.mdh)
     
     # in the meantime - note that this will flatten the ctz dimensions
     self.image = image.ImageStack(data=ds, mdh=self.mdh)
Пример #5
0
    def execute(self, namespace):
        from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
        from PYME.IO import MetaDataHandler

        points = namespace[self.input_points]
        X = np.stack([points['x'], points['y'], points['z']], axis=1)

        if self.mode == 'n':
            gmm = GaussianMixture(n_components=self.n,
                                  covariance_type=self.covariance)
            predictions = gmm.fit_predict(X)

        elif self.mode == 'bic':
            n_components = range(1, self.n + 1)
            bic = np.zeros(len(n_components))
            for ind in range(len(n_components)):
                gmm = GaussianMixture(n_components=n_components[ind],
                                      covariance_type=self.covariance)
                gmm.fit(X)
                bic[ind] = gmm.bic(X)
                logger.debug('%d BIC: %f' % (n_components[ind], bic[ind]))

            best = n_components[np.argmin(bic)]
            if best == self.n or (self.n > 10 and best > 0.9 * self.n):
                logger.warning(
                    'BIC optimization selected n components near n max')

            gmm = GaussianMixture(n_components=best,
                                  covariance_type=self.covariance)
            predictions = gmm.fit_predict(X)

        elif self.mode == 'bayesian':
            bgm = BayesianGaussianMixture(n_components=self.n,
                                          covariance_type=self.covariance)
            predictions = bgm.fit_predict(X)

        out = tabular.MappingFilter(points)
        try:
            out.mdh = MetaDataHandler.DictMDHandler(points.mdh)
        except AttributeError:
            pass

        out.addColumn(self.label_key, predictions)
        namespace[self.output_labeled] = out
Пример #6
0
    def execute(self, namespace):
        from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
        from PYME.IO import MetaDataHandler

        points = namespace[self.input_points]
        X = np.stack([points['x'], points['y'], points['z']], axis=1)

        if self.mode == 'n':
            gmm = GaussianMixture(n_components=self.n,
                                  covariance_type=self.covariance,
                                  max_iter=self.max_iter,
                                  init_params=self.init_params)
            predictions = gmm.fit_predict(X) + 1  # PYME labeling scheme
            log_prob = gmm.score_samples(X)
            if not gmm.converged_:
                logger.error('GMM fitting did not converge')
                predictions = np.zeros(len(points), int)
                log_prob = -np.inf * np.ones(len(points))

        elif self.mode == 'bic':
            n_components = range(1, self.n + 1)
            bic = np.zeros(len(n_components))
            for ind in range(len(n_components)):
                gmm = GaussianMixture(n_components=n_components[ind],
                                      covariance_type=self.covariance,
                                      max_iter=self.max_iter,
                                      init_params=self.init_params)
                gmm.fit(X)
                bic[ind] = gmm.bic(X)
                logger.debug('%d BIC: %f' % (n_components[ind], bic[ind]))

            best = n_components[np.argmin(bic)]
            if best == self.n or (self.n > 10 and best > 0.9 * self.n):
                logger.warning(
                    'BIC optimization selected n components near n max')

            gmm = GaussianMixture(n_components=best,
                                  covariance_type=self.covariance,
                                  max_iter=self.max_iter,
                                  init_params=self.init_params)
            predictions = gmm.fit_predict(X) + 1  # PYME labeling scheme
            log_prob = gmm.score_samples(X)
            if not gmm.converged_:
                logger.error('GMM fitting did not converge')
                predictions = np.zeros(len(points), int)
                log_prob = -np.inf * np.ones(len(points))

        elif self.mode == 'bayesian':
            bgm = BayesianGaussianMixture(n_components=self.n,
                                          covariance_type=self.covariance,
                                          max_iter=self.max_iter,
                                          init_params=self.init_params)
            predictions = bgm.fit_predict(X) + 1  # PYME labeling scheme
            log_prob = bgm.score_samples(X)
            if not bgm.converged_:
                logger.error('GMM fitting did not converge')
                predictions = np.zeros(len(points), int)
                log_prob = -np.inf * np.ones(len(points))

        out = tabular.MappingFilter(points)
        try:
            out.mdh = MetaDataHandler.DictMDHandler(points.mdh)
        except AttributeError:
            pass

        out.addColumn(self.label_key, predictions)
        out.addColumn(self.label_key + '_log_prob', log_prob)
        avg_log_prob = np.empty_like(log_prob)
        for label in np.unique(predictions):
            mask = label == predictions
            avg_log_prob[mask] = np.mean(log_prob[mask])
        out.addColumn(self.label_key + '_avg_log_prob', avg_log_prob)
        namespace[self.output_labeled] = out
    def start_spooling(self, fn=None, settings={}, preflight_mode='interactive'):
        """

        Parameters
        ----------
        fn : str, optional
            fn can be hardcoded here, otherwise differs to the seriesName
            property which will create one if need-be.
        settings : dict
            keys should be `SpoolController` attributes or properties with
            setters. Not all keys must be present, and example keys include:
                method : str
                    One of 'File', 'Cluster', or 'Queue'(py2 only)
                hdf_compression_level: int
                    zlib compression level that pytables should use (spool to
                    file and queue)
                z_stepped : bool
                    toggle z-stepping during acquisition
                z_dwell : int
                    number of frames to acquire at each z level (predicated on
                    `SpoolController.z_stepped` being True)
                cluster_h5 : bool
                    Toggle spooling to single h5 file on cluster rather than pzf
                    file per frame. Only applicable to 'Cluster' `method` and
                    preferred for PYMEClusterOfOne.
                pzf_compression_settings : dict
                    Compression settings relevant for 'Cluster' `method` if
                    `cluster_h5` is False. See HTTPSpooler.defaultCompSettings.
                protocol_name : str
                    Note that passing the protocol name will force a (re)load of
                    the protocol file (even if it is already selected).
                max_frames : int, optional
                    point at which to end the series automatically, by default
                    sys.maxsize
                subdirectory : str, optional
                    Directory within current set directory to spool this series. The
                    directory will be created if it doesn't already exist.
                extra_metadata : dict, optional
                    metadata to supplement this series for entries known prior to
                    acquisition which do not have handlers to hook start metadata
        preflight_mode : str (default='interactive')
            What to do when the preflight check fails. Options are 'interactive', 'warn', 'abort' and 'skip' which will
            display a dialog and prompt the user, log a warning and continue, and log an error and abort, or skip completely.
            The former is suitable for interactive acquisition, whereas one of the latter modes is likely better for automated spooling
            via the action manager.
        """
        # these settings were managed by the GUI, but are now managed by the 
        # controller, still allow them to be passed in, but default to internals
        
        
        fn = self.seriesName if fn in ['', None] else fn
        stack = settings.get('z_stepped', self.z_stepped)
        compLevel = settings.get('hdf_compression_level', self.hdf_compression_level)
        pzf_compression_settings = settings.get('pzf_compression_settings', self.pzf_compression_settings)
        cluster_h5 = settings.get('cluster_h5', self.cluster_h5)
        maxFrames = settings.get('max_frames', sys.maxsize)
        stack_settings = settings.get('stack_settings', None)
        
        
        # try stack settings for z_dwell, then aq settings.
        # precedence is settings > stack_settings > self.z_dwell
        # The reasoning for allowing the dwell time to be set in either the spooling or stack settings is to allow
        # API users to choose which is most coherent for their use case (it would seem logical to put dwell time with
        # the other stack settings, but this becomes problematic when sharing stack settings across modalities - e.g.
        # PALM/STORM and widefield stacks which are likely to share most of the stack settings but have greatly different
        # z dwell times). PYMEAcquire specifies it in the spooling/series settings by default to allow shared usage
        # between modalities.
        if stack_settings:
            if isinstance(stack_settings, dict):
                z_dwell = stack_settings.get('DwellFrames', self.z_dwell)
            else:
                # have a StackSettings object
                # TODO - fix this to be a bit more sane and not use private attributes etc ...
                z_dwell = stack_settings._dwell_frames
                # z_dwell defaults to -1  (with a meaning of ignore) in StackSettings objects if not value is not
                # explicitly provided. In this case, use our internal value instead. The reason for the 'ignore'
                # special value is to allow the same StackSettings object to be used for widefield stacks and
                # localization series (where sharing everything except dwell time makes sense).
                if z_dwell < 1:
                    z_dwell = self.z_dwell
        else:
            z_dwell = self.z_dwell
        
        z_dwell = settings.get('z_dwell', z_dwell)
        
        if (stack_settings is not None) and (not isinstance(stack_settings, stackSettings.StackSettings)):
            # let us pass stack settings as a dict, constructing a StackSettings instance as needed
            stack_settings = stackSettings.StackSettings(**dict(stack_settings))
        
        protocol_name = settings.get('protocol_name')
        if protocol_name is None:
            protocol, protocol_z = self.protocol, self.protocolZ
        else:
            pmod = prot.get_protocol(protocol_name)
            protocol, protocol_z = pmod.PROTOCOL, pmod.PROTOCOL_STACK

        subdirectory  = settings.get('subdirectory', None)
        # make directories as needed, makedirs(dir, exist_ok=True) once py2 support is dropped
        if (self.spoolType != 'Cluster') and (not os.path.exists(self.get_dirname(subdirectory))):
                os.makedirs(self.get_dirname(subdirectory))

        if self._checkOutputExists(fn): #check to see if data with the same name exists
            self.seriesCounter +=1
            self.seriesName = self._GenSeriesName()
            
            raise IOError('A series with the same name already exists')

        if stack:
            protocol = protocol_z
            protocol.dwellTime = z_dwell
            #print(protocol)
        else:
            protocol = protocol

        if (preflight_mode != 'skip') and not preflight.ShowPreflightResults(protocol.PreflightCheck(), preflight_mode):
            return #bail if we failed the pre flight check, and the user didn't choose to continue
            
          
        #fix timing when using fake camera
        if self.scope.cam.__class__.__name__ == 'FakeCamera':
            fakeCycleTime = self.scope.cam.GetIntegTime()
        else:
            fakeCycleTime = None
            
        frameShape = (self.scope.cam.GetPicWidth(), self.scope.cam.GetPicHeight())
        
        if self.spoolType == 'Queue':
            from PYME.Acquire import QueueSpooler
            self.queueName = getRelFilename(self._get_queue_name(fn, subdirectory=subdirectory))
            self.spooler = QueueSpooler.Spooler(self.queueName, self.scope.frameWrangler.onFrame, 
                                                frameShape = frameShape, protocol=protocol, 
                                                guiUpdateCallback=self._ProgressUpate, complevel=compLevel, 
                                                fakeCamCycleTime=fakeCycleTime, maxFrames=maxFrames, stack_settings=stack_settings)
        elif self.spoolType == 'Cluster':
            from PYME.Acquire import HTTPSpooler
            self.queueName = self._get_queue_name(fn, pcs=(not cluster_h5), 
                                                  subdirectory=subdirectory)
            self.spooler = HTTPSpooler.Spooler(self.queueName, self.scope.frameWrangler.onFrame,
                                               frameShape = frameShape, protocol=protocol,
                                               guiUpdateCallback=self._ProgressUpate,
                                               fakeCamCycleTime=fakeCycleTime, maxFrames=maxFrames,
                                               compressionSettings=pzf_compression_settings, aggregate_h5=cluster_h5, stack_settings=stack_settings)
           
        else:
            from PYME.Acquire import HDFSpooler
            self.spooler = HDFSpooler.Spooler(self._get_queue_name(fn, subdirectory=subdirectory),
                                              self.scope.frameWrangler.onFrame,
                                              frameShape = frameShape, protocol=protocol, 
                                              guiUpdateCallback=self._ProgressUpate, complevel=compLevel, 
                                              fakeCamCycleTime=fakeCycleTime, maxFrames=maxFrames, stack_settings=stack_settings)

        #TODO - sample info is probably better handled with a metadata hook
        #if sampInf:
        #    try:
        #        sampleInformation.getSampleData(self, self.spooler.md)
        #    except:
        #        #the connection to the database will timeout if not present
        #        #FIXME: catch the right exception (or delegate handling to sampleInformation module)
        #        pass
        extra_metadata = settings.get('extra_metadata')
        if extra_metadata is not None:
            self.spooler.md.mergeEntriesFrom(MetaDataHandler.DictMDHandler(extra_metadata))

        # stop the frameWrangler before we start spooling
        # this serves to ensure that a) we don't accidentally spool frames which were in the camera buffer when we hit start
        # and b) we get a nice clean timestamp for when the actual frames start (after any protocol init tasks)
        # it might also slightly improve performance.
        self.scope.frameWrangler.stop()
        
        try:
            self.spooler.onSpoolStop.connect(self.SpoolStopped)
            self.spooler.StartSpool()
        except:
            self.spooler.abort()
            raise

        # restart frame wrangler
        self.scope.frameWrangler.Prepare()
        self.scope.frameWrangler.start()
        
        self.onSpoolStart.send(self)
        
        #return a function which can be called to indicate if we are done
        return lambda : self.spooler.spool_complete
Пример #8
0
    def execute(self, namespace):
        from PYME.Analysis.points import spherical_harmonics
        from PYME.IO import MetaDataHandler

        shell = namespace[self.input_shell]
        if isinstance(shell, tabular.TabularBase):
            shell = spherical_harmonics.ScaledShell.from_tabular(shell)

        bin_edges = np.arange(0, 1.0 + self.r_bin_spacing, self.r_bin_spacing)
        bin_centers = 0.5 * (bin_edges[1:] + bin_edges[:-1])
        out_hist = np.zeros(len(bin_centers), float)

        # get shell bounds, make grid within
        shell_bounds = shell.approximate_image_bounds()
        xv = np.arange(shell_bounds.x0, shell_bounds.x1 + self.sampling_nm[0],
                       self.sampling_nm[0])
        yv = np.arange(shell_bounds.y0, shell_bounds.y1 + self.sampling_nm[1],
                       self.sampling_nm[1])
        zv = np.arange(shell_bounds.z0, shell_bounds.z1 + self.sampling_nm[2],
                       self.sampling_nm[2])
        x, y, z = np.meshgrid(xv, yv, zv, indexing='ij')

        v_estimates = []
        sdev_estimates = []
        n_choose = 10000

        for _ in range(self.jitter_iterations):
            xr, yr, zr = np.random.rand(len(xv), len(yv),
                                        len(zv)), np.random.rand(
                                            len(xv), len(yv),
                                            len(zv)), np.random.rand(
                                                len(xv), len(yv), len(zv))
            xr = (xr - 0.5) * self.sampling_nm[0] + x
            yr = (yr - 0.5) * self.sampling_nm[1] + y
            zr = (zr - 0.5) * self.sampling_nm[2] + z
            azi, zen, r = shell.shell_coordinates((xr, yr, zr))
            r_shell = spherical_harmonics.reconstruct_shell(
                shell.modes, shell.coefficients, azi, zen)
            inside = r < r_shell
            N = np.sum(inside)
            r_norm = r[inside] / r_shell[inside]
            # sum-normalize this iteration and add to output
            out_hist += np.histogram(r_norm, bins=bin_edges)[0] / N

            # record volume estimate
            v_estimates.append(N)
            # estimate spread along principle axes of the shell
            X = np.vstack([xr[inside], yr[inside], zr[inside]])
            if N > n_choose:
                # downsample to avoid memory error
                X = X[:, np.random.choice(N, n_choose, replace=False)]
            # TODO - do we need to be mean-centered?
            X = X - X.mean(axis=1)[:, None]
            _, s, _ = np.linalg.svd(X.T)
            # svd cov is not normalized, handle that
            sdev_estimates.append(
                s / np.sqrt(X.shape[1] - 1))  # with bessel's correction

        # finish the average
        out_hist = out_hist / self.jitter_iterations
        # finish the volume calculation, convert from nm^3 to um^3
        volume = np.mean(v_estimates) * (np.prod(self.sampling_nm) / (1e9))
        # average the standard deviation estimates
        standard_deviations = np.mean(np.stack(sdev_estimates), axis=0)
        # similar to Basser, P. J., et al. doi.org/10.1006/jmrb.1996.0086
        # note that singular values are square roots of the eigenvalues. Use
        # the sample standard deviation rather than pop.
        anisotropy = np.sqrt(np.var(standard_deviations**2, ddof=1)) / (
            np.sqrt(3) * np.mean(standard_deviations**2))

        res = tabular.DictSource({
            'bin_centers': bin_centers,
            'counts': out_hist
        })
        try:
            res.mdh = MetaDataHandler.DictMDHandler(shell.mdh)
        except AttributeError:
            res.mdh = MetaDataHandler.DictMDHandler()

        res.mdh['SHShellRadiusDensityEstimate.Volume'] = float(volume)
        res.mdh[
            'SHShellRadiusDensityEstimate.StdDeviations'] = standard_deviations.tolist(
            )
        res.mdh['SHShellRadiusDensityEstimate.Anisotropy'] = float(anisotropy)

        namespace[self.output] = res
Пример #9
0
    def __init__(self, parent=None):                
        wx.Frame.__init__(self, parent, wx.ID_ANY, 'The PYME Bakery')
        
        logger.debug('BatchFrame.__init__ start')
        self.rm = RecipeManager()
        #self.inputFiles = []
        #self.inputFiles2 = []
        self._default_md = MetaDataHandler.DictMDHandler(MetaData.ConfocDefault)
        
        self._file_lists = []
        
        vsizer1=wx.BoxSizer(wx.VERTICAL)
        hsizer = wx.StaticBoxSizer(wx.StaticBox(self, -1, "Recipe:"), wx.HORIZONTAL)
        self.recipeView = RecipeView(self, self.rm)
        
        hsizer.Add(self.recipeView, 1, wx.ALL|wx.EXPAND, 2)
        
        vsizer1.Add(hsizer, 1, wx.ALL|wx.EXPAND, 2)
        
        hsizer1 = wx.BoxSizer(wx.HORIZONTAL)

        sbsizer = wx.StaticBoxSizer(wx.StaticBox(self, -1, 'Input files:'), wx.VERTICAL)
        self._file_lists.append(FileListPanel(self, -1))
        sbsizer.Add(self._file_lists[-1], 1, wx.EXPAND, 0)
        hsizer1.Add(sbsizer, 1, wx.EXPAND, 10)

        sbsizer = wx.StaticBoxSizer(wx.StaticBox(self, -1, 'Input files (input2) [optional]:'), wx.VERTICAL)
        self._file_lists.append(FileListPanel(self, -1))
        sbsizer.Add(self._file_lists[-1], 1, wx.EXPAND, 0)
        hsizer1.Add(sbsizer, 1, wx.EXPAND, 10)

        self._sb_metadata = wx.StaticBox(self, -1, 'Metadata defaults')
        sbsizer = wx.StaticBoxSizer(self._sb_metadata, wx.VERTICAL)
        sbsizer.Add(wx.StaticText(self, -1, 'If metadata is not found in input images,\nthe following defaults will be used:'), 0, wx.EXPAND,0)
        self._mdpan = MetadataTree.MetadataPanel(self, self._default_md, refreshable=False)
        sbsizer.Add(self._mdpan, 1, wx.EXPAND, 0)
        hsizer1.Add(sbsizer, 0, wx.EXPAND, 10)
        
        
        vsizer1.Add(hsizer1, 0, wx.EXPAND|wx.TOP, 10)
        
        hsizer2 = wx.StaticBoxSizer(wx.StaticBox(self, -1, 'Output Directory:'), wx.HORIZONTAL)
        
        self.dcOutput = wx.DirPickerCtrl(self, -1, style=wx.DIRP_USE_TEXTCTRL)
        hsizer2.Add(self.dcOutput, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 2)
        
        vsizer1.Add(hsizer2, 0, wx.EXPAND|wx.TOP, 10)
        
        hsizer = wx.BoxSizer(wx.HORIZONTAL)
        hsizer.AddStretchSpacer()

        self.cbSpawnWorkerProcs = wx.CheckBox(self, -1, 'spawn worker processes for each core')
        self.cbSpawnWorkerProcs.SetValue(True)
        hsizer.Add(self.cbSpawnWorkerProcs, 0, wx.ALL, 5)

        self.bBake = wx.Button(self, -1, 'Bake') 
        hsizer.Add(self.bBake, 0, wx.ALL, 5)
        self.bBake.Bind(wx.EVT_BUTTON, self.OnBake)
        
        vsizer1.Add(hsizer, 0, wx.EXPAND|wx.TOP, 10)
                
        self.SetSizerAndFit(vsizer1)

        logger.debug('BatchFrame.__init__ done')
Пример #10
0
from PYME.IO.DataSources.RandomDataSource import DataSource
from PYME.IO.buffers import dataBuffer
from warpdrive.buffers import Buffer
import numpy as np

try:
    from PYME.localization.remFitBuf import CameraInfoManager
    from PYME.IO import MetaDataHandler
    mdh = MetaDataHandler.DictMDHandler()
    mdh['Camera.ReadNoise'] = 1.0
    mdh['Camera.NoiseFactor'] = 1.0
    mdh['Camera.ElectronsPerCount'] = 1.0
    mdh['Camera.TrueEMGain'] = 1.0
    mdh['voxelsize.x'] = 0.7
    mdh['voxelsize.y'] = 0.7
    mdh['Analysis.DetectionFilterSize'] = 3
    mdh['Analysis.ROISize'] = 4.5
    mdh['Analysis.GPUPCTBackground'] = False

    camera_info_manager = CameraInfoManager()
except ImportError:
    print("PYME not installed")
    import pytest
    pytest.skip('python-microscopy environment (PYME) not installed')


def gen_image(p=.95, disp=False):
    try:
        from PYME.simulation import wormlike2
    except ImportError:  # legacy PYME, pre March 2021
        from PYME.Acquire.Hardware.Simulator import wormlike2