示例#1
0
    def OnApplyThreshold(self, event):
        import numpy as np
        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D

        filt_ims = [
            np.atleast_3d(self.image.data[:, :, :, chanNum].squeeze() >
                          self.dsviewer.do.thresholds[chanNum])
            for chanNum in range(self.image.data.shape[3])
        ]

        if self.dsviewer.mode == 'visGUI':
            mode = 'visGUI'
        elif self.dsviewer.mode == 'graph':
            mode = 'graph'
            filt_ims = [fi.squeeze() for fi in filt_ims]
        else:
            mode = 'lite'

        im = ImageStack(sum(filt_ims) > 0.5, titleStub='Thresholded Image')
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename

        dv = ViewIm3D(im,
                      mode=mode,
                      glCanvas=self.dsviewer.glCanvas,
                      parent=wx.GetTopLevelParent(self.dsviewer))

        #set scaling to (0,1)
        for i in range(im.data.shape[3]):
            dv.do.Gains[i] = 1.0
示例#2
0
    def OnPSFFromZernikeModes(self, event):
        import numpy as np
        #import pylab
        from PYME.Analysis.PSFGen import fourierHNA
        
        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D
        
        self.configure_traits(kind='modal')

        z_ = np.arange(self.sizeZ)*float(self.zSpacing)
        z_ -= z_.mean()        
        
        #if self.vectorial:
        #    ps = fourierHNA.PsfFromPupilVect(self.image.data[:,:], z_, self.image.mdh['voxelsize.x']*1e3, self.wavelength, apodization=self.apodization, NA=self.NA)#, shape = [self.sizeX, self.sizeX])
        #    #ps = abs(ps*np.conj(ps))
        #else:
        #    ps = fourierHNA.PsfFromPupil(self.image.data[:,:], z_, self.image.mdh['voxelsize.x']*1e3, self.wavelength, apodization=self.apodization, NA=self.NA)#, shape = [self.sizeX, self.sizeX])
        
        ps = fourierHNA.GenZernikeDPSF(z_, dx = self.image.voxelsize_nm.x,
                                       zernikeCoeffs = self.dsviewer.zernModes, lamb=self.wavelength, 
                                       n=1.51, NA = self.NA, ns=1.51, beadsize=0, 
                                       vect=self.vectorial, apodization=self.apodization)
        #ps = ps/ps[:,:,self.sizeZ/2].sum()
        
        ps = ps/ps.max()
        
        im = ImageStack(ps, titleStub = 'Generated PSF')
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename
        #im.mdh['Processing.CropROI'] = roi
        mode = 'psf'

        dv = ViewIm3D(im, mode=mode, glCanvas=self.dsviewer.glCanvas, parent=wx.GetTopLevelParent(self.dsviewer))     
def generate_maps(source,
                  start_frame,
                  end_frame,
                  darkthreshold=1e4,
                  variancethreshold=300**2,
                  blemishvariance=1e8):
    if end_frame < 0:
        end_frame = int(source.dataSource.getNumSlices() + end_frame)

    # pre-checks before calculations to minimise the pain
    sensorSize = get_sensor_size(source.mdh)

    if not ((source.mdh['Camera.ROIWidth'] == sensorSize[0]) and
            (source.mdh['Camera.ROIHeight'] == sensorSize[1])):
        logger.warning(
            'Generating a map from data with ROI set. Use with EXTREME caution.\nMaps should be calculated from the whole chip.'
        )

    logger.info('Calculating mean and variance...')

    m, v = _meanvards(source.dataSource, start=start_frame, end=end_frame)
    eperADU = source.mdh['Camera.ElectronsPerCount']
    ve = v * eperADU * eperADU

    # occasionally the cameras seem to have completely unusable pixels
    # one example was dark being 65535 (i.e. max value for 16 bit)
    if m.max() > darkthreshold:
        ve[m > darkthreshold] = blemishvariance
    if ve.max() > variancethreshold:
        ve[ve > variancethreshold] = blemishvariance

    nbad = np.sum((m > darkthreshold) * (ve > variancethreshold))

    # if the uniform flag is set, then m and ve are passed as None
    # which makes sure that just the uniform defaults from meta data are used
    mfull, vefull, mapmdh = insert_into_full_map(m,
                                                 ve,
                                                 source.mdh,
                                                 sensor_size=sensorSize)

    mapmdh['CameraMap.StartFrame'] = start_frame
    mapmdh['CameraMap.EndFrame'] = end_frame
    mapmdh['CameraMap.SourceFilename'] = source.filename
    mapmdh['CameraMap.DarkThreshold'] = darkthreshold
    mapmdh['CameraMap.VarianceThreshold'] = variancethreshold
    mapmdh['CameraMap.BlemishVariance'] = blemishvariance
    mapmdh['CameraMap.NBadPixels'] = nbad

    mmd = NestedClassMDHandler(mapmdh)
    mmd['CameraMap.Type'] = 'mean'
    mmd['CameraMap.Units'] = 'ADU'

    vmd = NestedClassMDHandler(mapmdh)
    vmd['CameraMap.Type'] = 'variance'
    vmd['CameraMap.Units'] = 'electrons^2'

    im_dark = ImageStack(mfull, mdh=mmd)
    im_variance = ImageStack(vefull, mdh=vmd)

    return im_dark, im_variance
    def execute(self, namespace):
        from quant_condensate import FlatfieldDarkCorrectedDataSource
        from PYME.IO.image import ImageStack
        image = namespace[self.input_image]

        if self.flatfield_filename == '':
            flat = None
        else:
            flat = ImageStack(
                filename=self.flatfield_filename).data[:, :, 0].squeeze()

        if not self.darkmap_filename == '':
            dark = ImageStack(
                filename=self.darkmap_filename).data[:, :, 0].squeeze()
        else:
            dark = None

        ffd = FlatfieldDarkCorrectedDataSource.DataSource(image.data,
                                                          image.mdh,
                                                          flatfield=flat,
                                                          dark=dark)

        im = ImageStack(ffd, titleStub=self.output_name)
        im.mdh.copyEntriesFrom(image.mdh)
        im.mdh['Parent'] = image.filename
        if self.darkmap_filename:
            im.mdh['FlatAndDarkCorrect.Darkmap'] = self.darkmap_filename
        if self.flatfield_filename:
            im.mdh['FlatAndDarkCorrect.Flatmap'] = self.flatfield_filename
        namespace[self.output_name] = im
def install_map(filename):
    """Installs a map file to its default location"""

    source = ImageStack(filename=filename)
    if source.mdh.getOrDefault('Analysis.name', '') != 'mean-variance':
        logger.error(
            'Analysis.name is not equal to "mean-variance" - probably not a map'
        )
        sys.exit('aborting...')

    if not (source.mdh['Analysis.valid.ROIHeight']
            == source.mdh['Camera.ROIHeight']
            and source.mdh['Analysis.valid.ROIHeight']
            == source.mdh['Camera.ROIHeight']):
        logger.error(
            'Partial (ROI based) maps cannot be installed to the default location'
        )
        sys.exit(-1)

    if source.mdh.getOrDefault('Analysis.isuniform', False):
        logger.error(
            'Uniform maps cannot be installed to the default location')
        sys.exit(-1)

    if source.mdh['Analysis.resultname'] == 'mean':
        maptype = 'dark'
    else:
        maptype = 'variance'

    mapname = mkDefaultPath(maptype, source.mdh)

    source.Save(filename=mapname)
示例#6
0
def combine_maps(maps, return_validMap=False):
    destarr = None
    mapimgs = []
    for map in maps:
        mapimg = ImageStack(filename=map)
        mapimgs.append(mapimg)
        if destarr is None:
            mdh = NestedClassMDHandler(mapimg.mdh)
            destarr = mkdestarr(mapimg)
            validMap = np.zeros_like(destarr, dtype='int')
        else:
            checkMapCompat(mapimg, mdh)
        insertvmap(mapimg, destarr, validMap)

    mdh.setEntry('CameraMap.combinedFromMaps', maps)
    mdh.setEntry('CameraMap.ValidROI.ROIHeight',
                 mapimgs[0].mdh['Camera.SensorHeight'])
    mdh.setEntry('CameraMap.ValidROI.ROIWidth',
                 mapimgs[0].mdh['Camera.SensorWidth'])
    mdh.setEntry('CameraMap.ValidROI.ROIOriginX', 0)
    mdh.setEntry('CameraMap.ValidROI.ROIOriginY', 0)

    combinedMap = ImageStack(destarr, mdh=mdh)
    if return_validMap:
        vmdh = NestedClassMDHandler(mdh)
        vmdh.setEntry('CameraMap.ValidMask', True)
        return (combinedMap, ImageStack(validMap, mdh=vmdh))
    else:
        return combinedMap
示例#7
0
    def OnLabel(self, event):
        import numpy as np
        from scipy import ndimage
        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D

        filt_ims = [
            np.atleast_3d(self.image.data[:, :, :, chanNum].squeeze() >
                          self.dsviewer.do.thresholds[chanNum])
            for chanNum in range(self.image.data.shape[3])
        ]

        #print sum(filt_ims).shape
        mask = sum(filt_ims) > 0.5
        labs, nlabs = ndimage.label(mask)

        im = ImageStack(labs, titleStub='Thresholded Image')
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename
        #im.mdh['Processing.CropROI'] = roi

        if self.dsviewer.mode == 'visGUI':
            mode = 'visGUI'
        else:
            mode = 'lite'

        dv = ViewIm3D(im,
                      mode=mode,
                      glCanvas=self.dsviewer.glCanvas,
                      parent=wx.GetTopLevelParent(self.dsviewer))

        #set scaling to (0,1)
        for i in range(im.data.shape[3]):
            dv.do.Gains[i] = 1.0
    def OnSegmentAnneal(self, event):
        newImages = [
            np.zeros(self.image.data.shape[:3], 'b')
            for i in range(self.image.data.shape[3])
        ]

        im = ImageStack(newImages)
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename

        if self.dsviewer.mode == 'visGUI':
            mode = 'visGUI'
        else:
            mode = 'lite'

        self.res = ViewIm3D(im,
                            parent=wx.GetTopLevelParent(self.dsviewer),
                            mode=mode,
                            glCanvas=self.dsviewer.glCanvas)

        self.panAnneal = SegmentationPanel(self.res, self.image, newImages)

        self.pinfo1 = aui.AuiPaneInfo().Name("annealPanel").Left(
        ).Caption('Segmentation').DestroyOnClose(
            True
        ).CloseButton(False).MinimizeButton(True).MinimizeMode(
            aui.AUI_MINIMIZE_CAPT_SMART | aui.AUI_MINIMIZE_POS_RIGHT
        )  #.MinimizeButton(True).MinimizeMode(aui.AUI_MINIMIZE_CAPT_SMART|aui.AUI_MINIMIZE_POS_RIGHT)#.CaptionVisible(False)
        self.res._mgr.AddPane(self.panAnneal, self.pinfo1)
        self.res._mgr.Update()
示例#9
0
    def Project(self, projType):
        import numpy as np
        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D
        import os

        if projType == 'mean':
            filt_ims = [
                np.atleast_3d(self.image.data[:, :, :, chanNum].mean(2))
                for chanNum in range(self.image.data.shape[3])
            ]
        elif projType == 'max':
            filt_ims = [
                np.atleast_3d(self.image.data[:, :, :, chanNum].max(2))
                for chanNum in range(self.image.data.shape[3])
            ]

        fns = os.path.split(self.image.filename)[1]

        im = ImageStack(filt_ims, titleStub='%s - %s' % (fns, projType))
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename
        im.mdh['Processing.Projection'] = projType

        if self.dsviewer.mode == 'visGUI':
            mode = 'visGUI'
        else:
            mode = 'lite'

        dv = ViewIm3D(im, mode=mode, glCanvas=self.dsviewer.glCanvas)

        #set scaling to (0,1)
        for i in range(im.data.shape[3]):
            dv.do.Gains[i] = 1.0
示例#10
0
def redimension(parent, img):
    with RedimensionDialog(parent, img) as dlg:
        if dlg.ShowModal() == wx.ID_OK:
            from PYME.IO.image import ImageStack
            from PYME.DSView import ViewIm3D
            from PYME.IO.DataSources.BaseDataSource import XYZTCWrapper

            
            
            d = XYZTCWrapper(img.data_xyztc)
            d.set_dim_order_and_size(dlg.cOrder.GetStringSelection(), size_z=int(dlg.tSizeZ.GetValue()),
                                     size_t=int(dlg.tSizeT.GetValue()), size_c=int(dlg.tSizeC.GetValue()))
            im = ImageStack(data=d, titleStub='Redimensioned')
            
            im.mdh.copyEntriesFrom(img.mdh)
            im.mdh['Parent'] = img.filename
            #im.mdh['Processing.CropROI'] = roi

            # if self.dsviewer.mode == 'visGUI':
            #     mode = 'visGUI'
            # else:
            #     mode = 'lite'

            dv = ViewIm3D(im, mode=parent.mode, glCanvas=parent.glCanvas, parent=wx.GetTopLevelParent(parent))

            #set scaling to (0,1)
            for i in range(im.data.shape[3]):
                dv.do.Gains[i] = 1.0
示例#11
0
    def filter(self, image):
        #from PYME.util.shmarray import shmarray
        #import multiprocessing

        if self.processFramesIndividually:
            filt_ims = []
            for chanNum in range(image.data.shape[3]):
                filt_ims.append(
                    np.concatenate([
                        np.atleast_3d(
                            self.applyFilter(
                                image.data[:, :, i,
                                           chanNum].squeeze().astype('f'),
                                chanNum, i, image))
                        for i in range(image.data.shape[2])
                    ], 2))
        else:
            filt_ims = [
                np.atleast_3d(
                    self.applyFilter(
                        image.data[:, :, :, chanNum].squeeze().astype('f'),
                        chanNum, 0, image))
                for chanNum in range(image.data.shape[3])
            ]

        im = ImageStack(filt_ims, titleStub=self.outputName)
        im.mdh.copyEntriesFrom(image.mdh)
        im.mdh['Parent'] = image.filename

        self.completeMetadata(im)

        return im
示例#12
0
    def filter(self, image0, image1):
        if self.processFramesIndividually:
            filt_ims = []
            for chanNum in range(image0.data.shape[3]):
                out = []
                for i in range(image0.data.shape[2]):
                    d0 = image0.data[:, :, i, chanNum].squeeze().astype('f')
                    d1 = image1.data[:, :, i, chanNum].squeeze().astype('f')
                    out.append(
                        np.atleast_3d(
                            self.applyFilter(d0, d1, chanNum, i, image0)))
                filt_ims.append(np.concatenate(out, 2))
        else:
            filt_ims = []
            for chanNum in range(image0.data.shape[3]):
                d0 = image0.data[:, :, :, chanNum].squeeze().astype('f')
                d1 = image1.data[:, :, :, chanNum].squeeze().astype('f')
                filt_ims.append(
                    np.atleast_3d(self.applyFilter(d0, d1, chanNum, 0,
                                                   image0)))

        im = ImageStack(filt_ims, titleStub=self.outputName)
        im.mdh.copyEntriesFrom(image0.mdh)
        im.mdh['Parents'] = '%s, %s' % (image0.filename, image1.filename)

        self.completeMetadata(im)

        return im
示例#13
0
    def _joinChannels(self, namespace):
        chans = []

        image = namespace[self.inputChan0]

        chans.append(np.atleast_3d(image.data[:, :, :, 0]))

        channel_names = [
            self.inputChan0,
        ]

        if not self.inputChan1 == '':
            chans.append(namespace[self.inputChan1].data[:, :, :, 0])
            channel_names.append(self.inputChan1)
        if not self.inputChan2 == '':
            chans.append(namespace[self.inputChan2].data[:, :, :, 0])
            channel_names.append(self.inputChan2)
        if not self.inputChan3 == '':
            chans.append(namespace[self.inputChan3].data[:, :, :, 0])
            channel_names.append(self.inputChan3)

        im = ImageStack(chans, titleStub='Composite Image')
        im.mdh.copyEntriesFrom(image.mdh)
        im.names = channel_names
        im.mdh['Parent'] = image.filename

        return im
示例#14
0
def crop_3D(image, roi):
    # TODO - make or refactor into recipe module
    from PYME.IO.image import ImageStack
    import numpy as np
    filt_ims = [
        np.atleast_3d(image.data_xyztc[roi[0][0]:roi[0][1],
                                       roi[1][0]:roi[1][1],
                                       roi[2][0]:roi[2][1], 0,
                                       chanNum].squeeze())
        for chanNum in range(image.data_xyztc.shape[4])
    ]

    im = ImageStack(filt_ims, titleStub='Cropped Image')
    im.mdh.copyEntriesFrom(image.mdh)
    im.mdh['Parent'] = image.filename
    im.mdh['Processing.CropROI'] = roi

    vx, vy, vz = image.voxelsize
    ox, oy, oz = image.origin

    im.mdh['Origin.x'] = ox + roi[0][0] * vx
    im.mdh['Origin.y'] = oy + roi[1][0] * vy
    im.mdh['Origin.z'] = oz + roi[2][0] * vz

    return im
示例#15
0
    def OnLabelWatershed(self, event):
        import numpy as np
        from PYME.contrib.cpmath import watershed
        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D

        nChans = self.image.data.shape[3]

        filt_ims = [
            np.atleast_3d(self.image.data[:, :, :, chanNum].squeeze())
            for chanNum in range(nChans)
        ]

        img = (-sum([im / im.max()
                     for im in filt_ims]) * (2**15) / nChans).astype('int16')

        mask = (sum([
            filt_ims[chanNum] > self.do.thresholds[chanNum]
            for chanNum in range(nChans)
        ]) > .5).astype('int16')

        #self.image.labelThresholds = [(self.dsviewer.do.Offs[chanNum] + 0.5/self.dsviewer.do.Gains[chanNum]) for chanNum in range(self.image.data.shape[3])]

        #print sum(filt_ims).shape

        labs = watershed.fast_watershed(img,
                                        self.image.labels.astype('int16'),
                                        mask=mask)

        #store a copy in the image for measurements etc ...
        self.image.labels = labs

        im = ImageStack(labs, titleStub='Labelled Image')
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename

        im.mdh['Labelling.WatershedThresholds'] = self.do.thresholds

        #im.mdh['Labelling.MinSize'] = rSize
        #im.mdh['Labelling.Thresholds'] = self.image.labelThresholds
        #im.mdh['Processing.CropROI'] = roi

        if self.dsviewer.mode == 'visGUI':
            mode = 'visGUI'
        else:
            mode = 'lite'

        dv = ViewIm3D(im,
                      mode=mode,
                      glCanvas=self.dsviewer.glCanvas,
                      parent=wx.GetTopLevelParent(self.dsviewer))

        #set scaling to (0,1)
        for i in range(im.data.shape[3]):
            dv.do.Gains[i] = 1.0
示例#16
0
    def OnDiagSplit(self, event):
        import numpy as np
        #from scipy.ndimage import gaussian_filter
        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D

        #dlg = wx.TextEntryDialog(self.dsviewer, 'Blur size [pixels]:', 'Gaussian Blur', '[1,1,1]')

        #if dlg.ShowModal() == wx.ID_OK:
        #sigmas = eval(dlg.GetValue())
        #print sigmas
        #print self.images[0].img.shape

        x0, x1, y0, y1 = [
            self.do.selection_begin_x, self.do.selection_end_x,
            self.do.selection_begin_y, self.do.selection_end_y
        ]

        dx = x1 - x0
        dy = y1 - y0

        m = dy / dx
        c = y0 - m * x0

        d = self.image.data

        X, Y = np.ogrid[:d.shape[0], :d.shape[1]]

        msk = Y > (m * X + c)

        #filt_ims = [np.atleast_3d(self.image.data[roi[0][0]:roi[0][1],roi[1][0]:roi[1][1],:,chanNum].squeeze()) for chanNum in range(self.image.data.shape[3])]
        imn = (d[:, :, :, 0] -
               self.do.Offs[0]) * self.do.Gains[0] * msk[:, :, None] + (
                   d[:, :, :, 1] -
                   self.do.Offs[1]) * self.do.Gains[1] * (1 - msk)[:, :, None]

        im = ImageStack(imn, titleStub='Cropped Image')
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename
        #im.mdh['Processing.CropROI'] = roi

        if self.dsviewer.mode == 'visGUI':
            mode = 'visGUI'
        else:
            mode = 'lite'

        dv = ViewIm3D(im,
                      mode=mode,
                      glCanvas=self.dsviewer.glCanvas,
                      parent=wx.GetTopLevelParent(self.dsviewer))

        #set scaling to (0,1)
        for i in range(im.data.shape[3]):
            dv.do.Gains[i] = 1.0
    def execute(self, namespace):
        from PYME.IO.image import ImageStack
        from quant_condensate import MeanNormalizedDataSource
        image = namespace[self.input_image]

        mnd = MeanNormalizedDataSource.DataSource(image.data, image.mdh)

        im = ImageStack(mnd, titleStub=self.output_name)
        im.mdh.copyEntriesFrom(image.mdh)
        im.mdh['Parent'] = image.filename
        namespace[self.output_name] = im
示例#18
0
    def OnSubtractBackground(self, event):
        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D
        from PYME.Analysis.PSFEst import extractImages

        d_bg = extractImages.backgroundCorrectPSFWF(self.image.data[:, :, :])

        im = ImageStack(d_bg, titleStub='Filtered Image')
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename

        dv = ViewIm3D(im, mode='psf', glCanvas=self.dsviewer.glCanvas)
示例#19
0
    def _pickChannel(self, image):
        chan = image.data[:, :, :, self.channelToExtract]

        im = ImageStack(chan, titleStub='Filtered Image')
        im.mdh.copyEntriesFrom(image.mdh)
        try:
            im.mdh['ChannelNames'] = [
                image.names[self.channelToExtract],
            ]
        except (KeyError, AttributeError):
            logger.warn("Error setting channel name")

        im.mdh['Parent'] = image.filename

        return im
    def _execute(self, namespace):
        self._start_time = time.time()
        ims = namespace[self.input_name]
        
        dtype = ims.data[:,:,0].dtype
        
        # Somewhat arbitrary way to decide on chunk size 
        chunk_size = 100000000 / ims.data.shape[0] / ims.data.shape[1] / dtype.itemsize
        chunk_size = max(1, chunk_size)
#        print chunk_size
        
        tukey_mask_x = signal.tukey(ims.data.shape[0], self.tukey_size)
        tukey_mask_y = signal.tukey(ims.data.shape[1], self.tukey_size)
        self._tukey_mask_2d = np.multiply(*np.meshgrid(tukey_mask_x, tukey_mask_y, indexing='ij'))[:,:,None]

        
        if self.cache_clip == "":
            raw_data = np.empty(tuple(np.asarray(ims.data.shape[:3], dtype=np.long)), dtype=dtype)
        else:
            raw_data = np.memmap(self.cache_clip, dtype=dtype, mode='w+', shape=tuple(np.asarray(ims.data.shape[:3], dtype=np.long)))
        
        progress = 0.2 * ims.data.shape[2]
        for f in np.arange(0, ims.data.shape[2], chunk_size):
            raw_data[:,:,f:f+chunk_size] = self.applyFilter(ims.data[:,:,f:f+chunk_size])            
            
            if (f+chunk_size >= progress):
                if isinstance(raw_data, np.memmap):
                    raw_data.flush()
                progress += 0.2 * ims.data.shape[2]
                print("{:.2f} s. Completed clipping {} of {} total images.".format(time.time() - self._start_time, min(f+chunk_size, ims.data.shape[2]), ims.data.shape[2]))
        
        clipped_images = ImageStack(raw_data, mdh=ims.mdh)
        self.completeMetadata(clipped_images)
        
        namespace[self.output_name] = clipped_images
def test_labels_from_image():
    from PYME.IO.image import ImageStack
    from PYME.IO.tabular import DictSource
    im_size = 10
    im = np.zeros((im_size, im_size, im_size), dtype=int)
    im[-3:, -3:, -3:] = 1
    im[:5, :5, :5] = 2

    image_stack = ImageStack(im)
    assert(image_stack.origin == (0, 0, 0) and image_stack.pixelSize == 1)
    image_stack.mdh['voxelsize.x'], image_stack.mdh['voxelsize.y'], image_stack.mdh['voxelsize.z'] = 0.001, 0.001, 0.001

    xx, yy, zz = np.meshgrid(np.arange(im_size), np.arange(im_size), np.arange(im_size))
    points = DictSource({
        'x': xx.ravel(), 'y': yy.ravel(), 'z': zz.ravel()
    })
    points.mdh = image_stack.mdh

    ids, counts_per_label = cluster_morphology.get_labels_from_image(image_stack, points)
    np.testing.assert_array_equal(ids, im.ravel())
    assert counts_per_label[0] == (im == 1).sum()
    assert counts_per_label[1] == (im == 2).sum()

    # now test minimum counts, throwing out the smaller label
    ids, counts_per_label = cluster_morphology.get_labels_from_image(image_stack, points, (im == 1).sum() + 1)
    assert not np.any(ids == 1)
    assert (ids == 2).sum() == (im==2).sum()
示例#22
0
    def OnOpenOMERO(self, wx_event=None):
        # from pyme_omero.core import localization_files_from_image_url
        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D
        import wx
        from pyme_omero.core import download_image

        dlg = wx.TextEntryDialog(self.dsviewer, 'OMERO URL',
                                 'URL to OMERO image', '')

        if dlg.ShowModal() == wx.ID_OK:
            image_url = dlg.GetValue()
        else:
            dlg.Destroy()
            return

        dlg.Destroy()

        path = download_image(image_url, self._tempdir.name)
        logger.debug('temporary file path: %s' % path)

        im = ImageStack(filename=path)

        dv = ViewIm3D(im,
                      glCanvas=self.dsviewer.glCanvas,
                      parent=wx.GetTopLevelParent(self.dsviewer))

        #set scaling to (0,1)
        for i in range(im.data.shape[3]):
            dv.do.Gains[i] = 1.0
    def _execute(self, namespace):
        self._start_time = time.time()
        #        try:
        ##            del self._ft_images
        #            del self.image_cache
        #        except:
        #            pass

        ims = namespace[self.input_image]

        t_out = np.arange(ims.data.shape[2], dtype=np.float)

        if 'recipe.binning' in ims.mdh.keys():
            t_out *= ims.mdh['recipe.binning'][2]
            t_out += 0.5 * ims.mdh['recipe.binning'][2]
#        print t_out

        dx = namespace[self.input_drift_interpolator][0](t_out)
        dy = namespace[self.input_drift_interpolator][1](t_out)

        shifted_images = self.shift_images(ims, np.stack([dx, dy], 1), ims.mdh)

        namespace[self.outputName] = ImageStack(shifted_images,
                                                titleStub=self.outputName,
                                                mdh=ims.mdh)
示例#24
0
    def OnExtractPupil(self, event):
        import numpy as np
        # import pylab
        import matplotlib.pyplot as plt
        from PYME.Analysis.PSFGen import fourierHNA

        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D

        vs = self.image.voxelsize_nm
        z_ = np.arange(self.image.data.shape[2]) * vs.z
        z_ -= z_.mean()

        self.configure_traits(kind='modal')

        #pupil = fourierHNA.ExtractPupil(np.maximum(self.image.data[:,:,:] - .001, 0), z_, self.image.mdh['voxelsize.x']*1e3, self.wavelength, self.NA, nIters=self.iterations, size=self.pupilSize)

        pupil = fourierHNA.ExtractPupil(
            self.image.data[:, :, :],
            z_,
            vs.x,
            self.wavelength,
            self.NA,
            nIters=self.iterations,
            size=self.pupilSize,
            intermediateUpdates=self.intermediateUpdates)

        plt.figure()
        plt.subplot(121)
        plt.imshow(np.abs(pupil), interpolation='nearest')
        plt.subplot(122)
        plt.imshow(np.angle(pupil) * (np.abs(pupil) > 0),
                   interpolation='nearest')

        pupil = pupil * (np.abs(pupil) > 0)

        #im = ImageStack([np.abs(pupil), np.angle(pupil)*(np.abs(pupil) > 0)], titleStub = 'Extracted Pupil')
        im = ImageStack(pupil, titleStub='Extracted Pupil')
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename
        #im.mdh['Processing.CropROI'] = roi
        mode = 'pupil'

        dv = ViewIm3D(im,
                      mode=mode,
                      glCanvas=self.dsviewer.glCanvas,
                      parent=wx.GetTopLevelParent(self.dsviewer))
示例#25
0
    def OnGaussianFilter(self, event):
        import numpy as np
        from scipy.ndimage import gaussian_filter1d, convolve1d
        from PYME.IO.image import ImageStack
        from PYME.DSView import ViewIm3D

        #dlg = wx.TextEntryDialog(self.dsviewer, 'Blur size [pixels]:', 'Gaussian Blur', '[1,1,1]')

        if True:  #dlg.ShowModal() == wx.ID_OK:
            #sigmas = eval(dlg.GetValue())
            #print sigmas
            #print self.images[0].img.shape
            #filt_ims = [np.atleast_3d(gaussian_filter(self.image.data[:,:,:,chanNum].squeeze(), sigmas)) for chanNum in range(self.image.data.shape[3])]
            ims = self.image.data[:, :, 0, 0].astype('f')

            ims = ims - np.median(ims, 0)

            ims = gaussian_filter1d(ims, 2, 1)
            ims = convolve1d(ims, np.ones(10), 0)

            ims = np.rollaxis(ims, 0, 3)

            #ims = (ims - ims.min()).astype('uint16')

            im = ImageStack(ims, titleStub='Filtered Image')
            im.mdh.copyEntriesFrom(self.image.mdh)
            im.mdh['Parent'] = self.image.filename
            #im.mdh['Processing.'] = sigmas

            if self.dsviewer.mode == 'visGUI':
                mode = 'visGUI'
            else:
                mode = 'lite'

            dv = ViewIm3D(im, mode=mode, glCanvas=self.dsviewer.glCanvas)

            #set scaling to (0,1)
            for i in range(im.data.shape[3]):
                dv.do.Gains[i] = 1.0

            #imfc = MultiChannelImageViewFrame(self.parent, self.parent.glCanvas, filt_ims, self.image.names, title='Filtered Image - %3.1fnm bins' % self.image.pixelSize)

            #self.parent.generatedImages.append(imfc)
            #imfc.Show()

        dlg.Destroy()
示例#26
0
def View3D(data, titleStub='Untitled Image', mdh = None, mode='lite', 
           parent=None, glCanvas=None):
    im = ImageStack(data = data, mdh = mdh, titleStub=titleStub)
    dvf = DSViewFrame(im, mode=mode, size=(500, 500), 
                      parent=parent, glCanvas=glCanvas)
    dvf.SetSize((500,500))
    dvf.Show()
    return dvf
示例#27
0
def test_recipe_1():
    rec = Recipe.fromYAML(recipe_1)
    im = ImageStack(
        filename=os.path.join(resources.get_test_data_dir(), 't_im.tif'))

    rec.execute(input=im)
    assert (np.allclose(rec.namespace['zoomed'].data_xyztc.shape,
                        (88, 80, 241, 1, 2)))
 def execute(self, namespace):
     from quant_condensate.SwapColorAndSliceDataSource import DataSource
     from PYME.IO.MetaDataHandler import DictMDHandler
     from PYME.IO.image import ImageStack
     im = namespace[self.input_name]
     mdh = DictMDHandler()
     mdh.copyEntriesFrom(im.mdh)
     mdh['SwapColorAndSlice'] = True
     namespace[self.output_name] = ImageStack(DataSource(im.data), mdh=mdh)
示例#29
0
    def calculate_FRC_from_images(self, image_pair, mdh):
        ft_images = list()
        if self.multiprocessing:
            results = list()
            for im in image_pair:
                results.append(self._pool.apply_async(np.fft.fftn, (im,)))            
            for res in results:
                ft_images.append(res.get())
            del results
        else:
            for im in image_pair:
                ft_images.append(np.fft.fftn(im))
        
#        im_fft_freq = np.fft.fftfreq(image_pair[0].shape[0], self._pixel_size_in_nm)
#        im_R = np.sqrt(im_fft_freq[:, None]**2 + im_fft_freq[None, :]**2)
        im_fft_freqs = [np.fft.fftfreq(image_pair[0].shape[i], self._pixel_size_in_nm[i]) for i in range(image_pair[0].ndim)]
        im_R = np.linalg.norm(np.stack(np.meshgrid(*im_fft_freqs, indexing='ij')), axis=0)

        im1_fft_power = np.multiply(ft_images[0], np.conj(ft_images[0]))
        im2_fft_power = np.multiply(ft_images[1], np.conj(ft_images[1]))        
        im12_fft_power = np.multiply(ft_images[0], np.conj(ft_images[1]))                
        
##        fft_ims = ImageStack(data=np.stack([np.fft.fftshift(im1_fft_power),
##                                            np.fft.fftshift(im2_fft_power),
##                                            np.fft.fftshift(im12_fft_power)], axis=-1), mdh=mdh)
##        self._namespace[self.output_fft_images] = fft_ims
#        self._namespace[self.output_fft_image_a] = ImageStack(data=np.fft.fftshift(im1_fft_power), titleStub="ImageA_FFT")
#        self._namespace[self.output_fft_image_b] = ImageStack(data=np.fft.fftshift(im2_fft_power), titleStub="ImageB_FFT")
#        self._namespace[self.output_fft_images_cc] = ImageStack(data=np.fft.fftshift(im12_fft_power), titleStub="ImageA_Image_B_FFT_CC")
        
        try:
            self._namespace[self.output_fft_images_cc] = ImageStack(data=np.stack([np.atleast_3d(np.fft.fftshift(im1_fft_power)),
                   np.atleast_3d(np.fft.fftshift(im2_fft_power)),
                   np.atleast_3d(np.fft.fftshift(im12_fft_power))], 3), titleStub="ImageA_Image_FFT_CC")
            
#            if self.plot_graphs:
#                from PYME.DSView.dsviewer import ViewIm3D, View3D
#    #            ViewIm3D(self._namespace[self.output_fft_image_a])
#    #            ViewIm3D(self._namespace[self.output_fft_image_b])
#                ViewIm3D(self._namespace[self.output_fft_images_cc])
#    #            View3D(np.fft.fftshift(im_R))
            
        except Exception as e:
            print (e)
            
        
        im1_fft_flat_res = CalculateFRCBase.BinData(im_R.flatten(), im1_fft_power.flatten(), statistic='mean', bins=201)
        im2_fft_flat_res = CalculateFRCBase.BinData(im_R.flatten(), im2_fft_power.flatten(), statistic='mean', bins=201)
        im12_fft_flat_res = CalculateFRCBase.BinData(im_R.flatten(), im12_fft_power.flatten(), statistic='mean', bins=201)
        
        corr = np.real(im12_fft_flat_res.statistic) / np.sqrt(np.abs(im1_fft_flat_res.statistic*im2_fft_flat_res.statistic))
        
        smoothed_frc = self.smooth_frc(im12_fft_flat_res.bin_edges[:-1], corr, self.cubic_smoothing)
        
        res, rawdata = self.calculate_threshold(im12_fft_flat_res.bin_edges[:-1], corr, smoothed_frc, im12_fft_flat_res.counts)
        
        return res, rawdata
示例#30
0
    def execute(self, namespace):
        self._namespace = namespace
        import multiprocessing
#        from PYME.util import mProfile        
#        mProfile.profileOn(["frc.py"])
        
        if self.multiprocessing:
            proccess_count = np.clip(2, 1, multiprocessing.cpu_count()-1)
            self._pool = multiprocessing.Pool(processes=proccess_count)
        
        pipeline = namespace[self.inputName]
        mapped_pipeline = tabular.mappingFilter(pipeline)
        self._pixel_size_in_nm = self.pixel_size_in_nm * np.ones(3, dtype=np.float)
        
        image_pair = self.generate_image_pair(mapped_pipeline)
        
        image_pair = self.preprocess_images(image_pair)
            
        # Should use DensityMapping recipe eventually when it is ready.
        mdh = MetaDataHandler.NestedClassMDHandler()
        mdh['Rendering.Method'] = "np.histogramdd"
        if 'imageID' in pipeline.mdh.getEntryNames():
            mdh['Rendering.SourceImageID'] = pipeline.mdh['imageID']
        try:
            mdh['Rendering.SourceFilename'] = pipeline.resultsSource.h5f.filename
        except:
            pass        
        mdh.Source = MetaDataHandler.NestedClassMDHandler(pipeline.mdh)        
        mdh['Rendering.NEventsRendered'] = [image_pair[0].sum(), image_pair[1].sum()]
        mdh['voxelsize.units'] = 'um'
        mdh['voxelsize.x'] = self.pixel_size_in_nm * 1E-3
        mdh['voxelsize.y'] = self.pixel_size_in_nm * 1E-3
        
        ims = ImageStack(data=np.stack(image_pair, axis=-1), mdh=mdh)
        namespace[self.output_images] = ims
        
#        if self.plot_graphs:
#            from PYME.DSView.dsviewer import ViewIm3D
#            ViewIm3D(ims)
        
        frc_res, rawdata = self.calculate_FRC_from_images(image_pair, pipeline.mdh)
        
#        smoothed_frc = self.SmoothFRC(frc_freq, frc_corr)
#        
#        self.CalculateThreshold(frc_freq, frc_corr, smoothed_frc)
        
        namespace[self.output_frc_dict] = frc_res
        namespace[self.output_frc_raw] = rawdata
        
        if self.multiprocessing:
            self._pool.close()
            self._pool.join()
        
#        mProfile.profileOff()
#        mProfile.report()
        
        self.save_to_file(namespace)