예제 #1
0
def get_shifts(md, data_shape, x, y):
    x = round(x)
    y = round(y)

    # pixel size in nm
    vx, vy, _ = md.voxelsize_nm

    # position in nm from camera origin
    roi_x0, roi_y0 = get_camera_roi_origin(md)
    x_ = (x + roi_x0) * vx
    y_ = (y + roi_y0) * vy

    # look up shifts
    if not md.getOrDefault('Analysis.FitShifts', False):
        DeltaX = md.chroma.dx.ev(x_, y_)
        DeltaY = md.chroma.dy.ev(x_, y_)
    else:
        DeltaX = 0
        DeltaY = 0

    # find shift in whole pixels
    dxp = int(DeltaX / vx)
    dyp = int(DeltaY / vy)

    return DeltaX, DeltaY, dxp, dxy
예제 #2
0
    def OnGenShiftmapQuad(self, event):
        from PYME.Analysis.points import twoColour, twoColourPlot
        from PYME.IO.MetaDataHandler import get_camera_roi_origin

        pipeline = self.visFr.pipeline

        vs = pipeline.mdh.voxelsize_nm
        
        roi_x0, roi_y0 = get_camera_roi_origin(pipeline.mdh)
        
        x0 = (roi_x0)*vs[0]
        y0 = (roi_y0)*vs[1]
        
        lx = len(pipeline.filter['x'])
        bbox = None#[0,(pipeline.mdh['Camera.ROIWidth'] + 1)*vs[0], 0,(pipeline.mdh['Camera.ROIHeight'] + 1)*vs[1]]
        dx, dy, spx, spy, good = twoColour.genShiftVectorFieldQ(pipeline.filter['x']+.1*np.random.randn(lx) + x0, pipeline.filter['y']+.1*np.random.randn(lx) + y0, pipeline.filter['fitResults_dx'], pipeline.filter['fitResults_dy'], pipeline.filter['fitError_dx'], pipeline.filter['fitError_dy'], bbox=bbox)
        #twoColourPlot.PlotShiftField(dx, dy, spx, spy)
        twoColourPlot.PlotShiftField2(spx, spy, pipeline.mdh['Splitter.Channel0ROI'][2:], voxelsize=vs)
        twoColourPlot.PlotShiftResiduals(pipeline['x'][good] + x0, pipeline['y'][good] + y0, pipeline['fitResults_dx'][good], pipeline['fitResults_dy'][good], spx, spy)

        from six.moves import cPickle

        defFile = os.path.splitext(os.path.split(self.visFr.GetTitle())[-1])[0] + '.sf'

        fdialog = wx.FileDialog(None, 'Save shift field as ...',
            wildcard='Shift Field file (*.sf)|*.sf', style=wx.FD_SAVE, defaultDir = nameUtils.genShiftFieldDirectoryPath(), defaultFile=defFile)
        succ = fdialog.ShowModal()
        if (succ == wx.ID_OK):
            fpath = fdialog.GetPath()
            #save as a pickle containing the data and voxelsize

            fid = open(fpath, 'wb')
            cPickle.dump((spx, spy), fid, 2)
            fid.close()
예제 #3
0
def remap_splitter_coords(md, data_shape, x, y):
    vx, vy, _ = md.voxelsize_nm

    x0, y0 = get_camera_roi_origin(md)

    if False:  #'Splitter.Channel0ROI' in md.getEntryNames():
        xg, yg, w, h = md['Splitter.Channel0ROI']
        xr, yr, w, h = md['Splitter.Channel1ROI']
    else:
        xg, yg, w, h = 0, 0, data_shape[0], data_shape[1]
        xr, yr = w, h

    xn = x + (xr - xg)
    yn = y + (yr - yg)

    if md.get(
            'Splitter.Flip', True
    ):  #not (('Splitter.Flip' in md.getEntryNames() and not md.getEntry('Splitter.Flip'))):
        yn = (h - y0 - y) + yr - yg

    #chromatic shift
    if 'chroma.dx' in md.getEntryNames():
        dx = md['chroma.dx'].ev((x + x0) * vx, (y + y0) * vy) / vx
        dy = md['chroma.dy'].ev((x + x0) * vx, (y + y0) * vy) / vy

        xn -= dx
        yn -= dy

    return xn, yn
예제 #4
0
    def _parseROI(md):
        """
        Extract ROI coordinates from metadata

        TODO - refactor out of here as it is being used in non-fitting code

        Parameters
        ----------
        md: dict-like
            Metadata containing Camera.ROIWidth, Camera.ROIHeight or multiview information

        Returns
        -------
        roi_slices: list
            list of (x, y) slices to extract ROI from camera maps

        """

        if any(md.get('Multiview.ActiveViews', [])):
            #special case handling for multiview ROIs
            origins = [
                md['Multiview.ROI%dOrigin' % ind]
                for ind in md['Multiview.ActiveViews']
            ]
            size_x, size_y = md['Multiview.ROISize']
            return [(slice(int(ox),
                           int(ox + size_x)), slice(int(oy), int(oy + size_y)))
                    for ox, oy in origins]

        x0, y0 = get_camera_roi_origin(md)

        x1 = x0 + md['Camera.ROIWidth']
        y1 = y0 + md['Camera.ROIHeight']

        return [(slice(int(x0), int(x1)), slice(int(y0), int(y1)))]
예제 #5
0
    def __getitem__(self, key):
        #print key
        xslice, yslice, zslice = key

        #cut region out of data stack
        dataROI = self.data[xslice, yslice, zslice]

        #average in z
        #dataMean = dataROI.mean(2) - self.metadata.CCD.ADOffset

        #generate grid to evaluate function on
        Xg, Yg = scipy.mgrid[xslice, yslice]
        vs = self.metadata.voxelsize_nm
        Xg = vs.x*Xg
        Yg = vs.y*Yg

        #generate a corrected grid for the red channel
        #note that we're cheating a little here - for shifts which are slowly
        #varying we should be able to set Xr = Xg + delta_x(\bar{Xr}) and
        #similarly for y. For slowly varying shifts the following should be
        #equivalent to this. For rapidly varying shifts all bets are off ...

        #DeltaX, DeltaY = twoColour.getCorrection(Xg.mean(), Yg.mean(), self.metadata.chroma.dx,self.metadata.chroma.dy)
        roi_x0, roi_y0 = get_camera_roi_origin(self.metadata)

        x_ = Xg.mean() + roi_x0*vs.x
        y_ = Yg.mean() + roi_y0*vs.y
        DeltaX = self.metadata.chroma.dx.ev(x_, y_)
        DeltaY = self.metadata.chroma.dy.ev(x_, y_)

        Xr = Xg + DeltaX
        Yr = Yg + DeltaY


        if not self.background is None and len(numpy.shape(self.background)) > 1 and not ('Analysis.subtractBackground' in self.metadata.getEntryNames() and self.metadata.Analysis.subtractBackground == False):
            bgROI = self.background[xslice, yslice, zslice]

            dataROI = dataROI - bgROI

        Ag = dataROI[:,:,0]
        Ar = dataROI[:,:,1]

        #print Xg.shape, Ag.shape
        x0 =  (Xg*Ag + Xr*Ar).sum()/(Ag.sum() + Ar.sum())
        y0 =  (Yg*Ag + Yr*Ar).sum()/(Ag.sum() + Ar.sum())

        sig_xl = (numpy.maximum(0, x0 - Xg)*Ag + numpy.maximum(0, x0 - Xr)*Ar).sum()/(Ag.sum() + Ar.sum())
        sig_xr = (numpy.maximum(0, Xg - x0)*Ag + numpy.maximum(0, Xr - x0)*Ar).sum()/(Ag.sum() + Ar.sum())

        sig_yu = (numpy.maximum(0, y0 - Yg)*Ag + numpy.maximum(0, y0 - Yr)*Ar).sum()/(Ag.sum() + Ar.sum())
        sig_yd = (numpy.maximum(0, Yg - y0)*Ag + numpy.maximum(0, Yr - y0)*Ar).sum()/(Ag.sum() + Ar.sum())

        Ag = Ag.sum()  #amplitude
        Ar = Ar.sum()  #amplitude


        res = numpy.array([Ag, Ar, x0, y0, sig_xl, sig_xr, sig_yu, sig_yd])
        
        return COIFitResultR(res, self.metadata, (xslice, yslice, zslice))
예제 #6
0
 def OnApplyShiftmap(self, event):
     """apply a vectorial correction for chromatic shift to an image - this
     is a generic vectorial shift compensation, rather than the secial case 
     correction used with the splitter."""
     from scipy import ndimage
     import numpy as np
     from PYME.DSView import ImageStack, ViewIm3D
     from PYME.IO.MetaDataHandler import get_camera_roi_origin
     
     dlg = ShiftmapSelectionDialog(self.dsviewer, self.image)
     succ = dlg.ShowModal()
     if (succ == wx.ID_OK):
         #self.ds = example.CDataStack(fdialog.GetPath().encode())
         #self.ds =
         ds = []
         shiftFiles = {}
         X, Y, Z = np.mgrid[0:self.image.data.shape[0], 0:self.image.data.shape[1], 0:self.image.data.shape[2]]
         
         vx, vy, vz = self.image.voxelsize_nm
         
         roi_x0, roi_y0 = get_camera_roi_origin(self.image.mdh)
         
         for ch in range(self.image.data.shape[3]):
             sfFilename = dlg.GetChanFilename(ch)
             shiftFiles[ch] = sfFilename
             
             data = self.image.data[:,:,:, ch]
             
             if os.path.exists(sfFilename):
                 spx, spy, dz = np.load(sfFilename)
                 
                 dx = spx.ev(vx*(X+roi_x0), vy*(Y+roi_y0))/vx
                 dy = spy.ev(vx*(X+roi_x0), vy*(Y+roi_y0))/vy
                 dz = dz/vz
                 
                 ds.append(ndimage.map_coordinates(data, [X+dx, Y+dy, Z+dz], mode='nearest'))
             else:
                 ds.append(data)
             
         
         fns = os.path.split(self.image.filename)[1]
         im = ImageStack(ds, titleStub = '%s - corrected' % fns)
         im.mdh.copyEntriesFrom(self.image.mdh)
         im.mdh['Parent'] = self.image.filename
         im.mdh.setEntry('ChromaCorrection.ShiftFilenames', shiftFiles)
         
         if 'fitResults' in dir(self.image):
             im.fitResults = self.image.fitResults
         #im.mdh['Processing.GaussianFilter'] = sigmas
 
         if self.dsviewer.mode == 'visGUI':
             mode = 'visGUI'
         else:
             mode = 'lite'
 
         dv = ViewIm3D(im, mode=mode, glCanvas=self.dsviewer.glCanvas, parent=wx.GetTopLevelParent(self.dsviewer))
         
     dlg.Destroy()
예제 #7
0
def insert_into_full_map(dark, variance, metadata, sensor_size=(2048, 2048)):
    """

    Embeds partial-sensor camera maps into full-sized camera map by padding with basic values in metadata. Alternatively
    can be used to create boring maps to use in place of metadata scalars.

    Parameters
    ----------
    dark: ndarray or None
        darkmap for valid ROI, or None to generate a uniform, ~useless metadata map
    variance: ndarray
        variance for valid ROI, or None to generate a uniform, ~useless metadata map
    metadata: dict-like
        ROI informatrion and camera noise parameters to use when padding maps
    sensor_size: 2-int tuple
        x and y camera sensor size

    Returns
    -------
    full_dark: ndarray
        padded dark map
    full_var: ndarray
        padded variance map
    mdh: PYME.IO.MetadataHandler.NestedClassMDHandler
        metadata handler to be associated with full maps while maintaining information about the original/valid ROI.
    """

    mdh = NestedClassMDHandler()
    mdh.copyEntriesFrom(metadata)
    mdh.setEntry('Analysis.name', 'mean-variance')
    x_origin, y_origin = get_camera_roi_origin(mdh)
    mdh.setEntry('Analysis.valid.ROIOriginX', x_origin)
    mdh.setEntry('Analysis.valid.ROIOriginY', y_origin)
    mdh.setEntry('Analysis.valid.ROIWidth', mdh['Camera.ROIWidth'])
    mdh.setEntry('Analysis.valid.ROIHeight', mdh['Camera.ROIHeight'])
    mdh['Camera.ROIOriginX'], mdh['Camera.ROIOriginY'] = 0, 0
    mdh['Camera.ROIWidth'], mdh['Camera.ROIHeight'] = sensor_size
    mdh['Camera.ROI'] = (0, 0, sensor_size[0], sensor_size[1])

    if dark is not None and variance is not None:
        full_dark = mdh['Camera.ADOffset'] * np.ones(sensor_size,
                                                     dtype=dark.dtype)
        full_var = (mdh['Camera.ReadNoise']**2) * np.ones(sensor_size,
                                                          dtype=variance.dtype)

        xslice = slice(x_origin, x_origin + metadata['Camera.ROIWidth'])
        yslice = slice(y_origin, y_origin + metadata['Camera.ROIHeight'])

        full_dark[xslice, yslice] = dark
        full_var[xslice, yslice] = variance
    else:
        logger.warning('Generating uniform maps')
        full_dark = mdh['Camera.ADOffset'] * np.ones(sensor_size)
        full_var = (mdh['Camera.ReadNoise']**2) * np.ones(sensor_size)

    return full_dark, full_var, mdh
    def __init__(self, parentSource, mdh, flatfield=None, dark=None):
        self.source = parentSource
        self.mdh = mdh
        self.mdh['IntensityUnits'] = 'ADU'

        x0, y0 = get_camera_roi_origin(mdh)
        x1 = x0 + mdh.getOrDefault('Camera.ROIWidth', self.source.shape[0]) + 1
        y1 = y0 + mdh.getOrDefault('Camera.ROIHeight',
                                   self.source.shape[1]) + 1

        self.flat = flatfield[x0:x1, y0:y1] if flatfield is not None else 1.
        if dark is None:
            self.dark = float(self.mdh.getEntry('Camera.ADOffset'))
        else:
            self.dark = dark[x0:x1, y0:y1].astype(float)
예제 #9
0
def get_labels_from_image(inp, img):
    """

    Parameters
    ----------
    inp: PYME.IO.tabular
        localizations to query membership in labels from corresponding image
    img: image.ImageStack
        image containing labels to apply to localizations

    Returns
    -------
    ids: ndarray
        Label number from image, mapped to each localization within that label
    numPerObject: ndarray
        Number of localizations within the label that a given localization belongs to

    """
    im_ox, im_oy, im_oz = img.origin

    # account for ROIs
    try:
        p_ox, p_oy = np.array(get_camera_roi_origin(inp.mdh)) * np.array(
            inp.mdh.voxelsize_nm[:2])
    except AttributeError:
        raise UserWarning('get_labels_from_image requires metadata')

    pixX = np.round((inp['x'] + p_ox - im_ox) / img.pixelSize).astype('i')
    pixY = np.round((inp['y'] + p_oy - im_oy) / img.pixelSize).astype('i')
    pixZ = np.round((inp['z'] - im_oz) / img.sliceSize).astype('i')

    if img.data.shape[2] == 1:
        # disregard z for 2D images
        pixZ = np.zeros_like(pixX)

    ind = (pixX < img.data.shape[0]) * (pixY < img.data.shape[1]) * (
        pixX >= 0) * (pixY >= 0) * (pixZ >= 0) * (pixZ < img.data.shape[2])

    ids = np.zeros_like(pixX)

    # assume there is only one channel
    ids[ind] = np.atleast_3d(img.data[:, :, :,
                                      0].squeeze())[pixX[ind], pixY[ind],
                                                    pixZ[ind]].astype('i')

    numPerObject, b = np.histogram(ids, np.arange(ids.max() + 1.5) + .5)

    return ids, numPerObject
예제 #10
0
def genFitImage(fitResults, metadata, fitfcn=f_Interp3d):
    from PYME.IO.MetaDataHandler import get_camera_roi_origin

    xslice = slice(*fitResults['slicesUsed']['x'])
    yslice = slice(*fitResults['slicesUsed']['y'])

    vx, vy = metadata.voxelsize_nm

    #position in nm from camera origin
    roi_x0, roi_y0 = get_camera_roi_origin(metadata)
    x_ = (xslice.start + roi_x0) * vx
    y_ = (yslice.start + roi_y0) * vy

    im = PSFFitFactory._evalModel(fitResults['fitResults'], metadata, xslice,
                                  yslice, x_, y_)

    return im[0].squeeze()
def genFitImage(fitResults, metadata):
    xslice = slice(*fitResults['slicesUsed']['x'])
    yslice = slice(*fitResults['slicesUsed']['y'])
    
    vx, vy, _ = metadata.voxelsize_nm
    
    #position in nm from camera origin
    roi_x0, roi_y0 = get_camera_roi_origin(metadata)

    x_ = (xslice.start + roi_x0) * vx
    y_ = (yslice.start + roi_y0) * vy
    
    ratio = fitResults['ratio']
    
    im = InterpFitFactory._evalModel(np.array(list(fitResults['fitResults'])), metadata, xslice, yslice, ratio, x_, y_)[0]
    #print im.shape

    return np.hstack([im[:,:,0], im[:,:,1]]).squeeze()
예제 #12
0
def map_splitter_coords(md, data_shape, x, y):
    vx = md['voxelsize.x'] * 1e3
    vy = md['voxelsize.y'] * 1e3

    x0, y0 = get_camera_roi_origin(md)

    if False:  #'Splitter.Channel0ROI' in md.getEntryNames():
        xg, yg, wg, hg = md['Splitter.Channel0ROI']
        xr, yr, wr, hr = md['Splitter.Channel1ROI']

        #w2 = w - x0
        #h2 = h - y0
    else:
        xg, yg, wg, hg = 0, 0, data_shape[0], data_shape[1]
        xr, yr, wr, hr = wg, hg, wg, hg

    #xg, wg = _bdsClip(xg, wg, x0, data_shape[0])
    #xr, wr = _bdsClip(xr, wr, x0, data_shape[0])
    #yg, hg = _bdsClip(yg, hg, y0, data_shape[1])
    #yr, hr = _bdsClip(yr, hr, y0, data_shape[1])

    w = min(wg, wr)
    h = min(hg, hr)

    ch1 = (x >= xr) & (y >= yr)

    xn = x - ch1 * xr
    yn = y - ch1 * yr

    if md.get(
            'Splitter.Flip', True
    ):  #not (('Splitter.Flip' in md.getEntryNames() and not md.getEntry('Splitter.Flip'))):
        #yn = y - ch1*yr
        yn += ch1 * (h - 2 * yn)

    #chromatic shift
    if 'chroma.dx' in md.getEntryNames():
        dx = md['chroma.dx'].ev((xn + x0) * vx, (yn + y0) * vy) / vx
        dy = md['chroma.dy'].ev((xn + x0) * vy, (yn + y0) * vy) / vy

        xn += dx * ch1
        yn += dy * ch1

    return np.clip(xn, 0, w - 1), np.clip(yn, 0, h - 1)
예제 #13
0
파일: genmaps.py 프로젝트: carandraug/PyME
def insertIntoFullMap(m, ve, smdh, chipsize=(2048, 2048)):
    x0, y0 = get_camera_roi_origin(smdh)

    validROI = {
        'PosX': x0 + 1,
        'PosY': x0 + 1,
        'Width': smdh['Camera.ROIWidth'],
        'Height': smdh['Camera.ROIHeight']
    }

    bmdh = NestedClassMDHandler()
    bmdh.copyEntriesFrom(smdh)
    bmdh.setEntry('Analysis.name', 'mean-variance')
    bmdh.setEntry('Analysis.valid.ROIPosX', validROI['PosX'])
    bmdh.setEntry('Analysis.valid.ROIPosY', validROI['PosY'])
    bmdh.setEntry('Analysis.valid.ROIWidth', validROI['Width'])
    bmdh.setEntry('Analysis.valid.ROIHeight', validROI['Height'])

    bmdh['Camera.ROIOriginX'] = 0
    bmdh['Camera.ROIOriginY'] = 0
    bmdh['Camera.ROIWidth'] = chipsize[0]
    bmdh['Camera.ROIHeight'] = chipsize[1]
    bmdh['Camera.ROI'] = (0, 0, chipsize[0], chipsize[1])

    if m is None:
        mfull = np.zeros(chipsize, dtype='float64')
        vefull = np.zeros(chipsize, dtype='float64')
    else:
        mfull = np.zeros(chipsize, dtype=m.dtype)
        vefull = np.zeros(chipsize, dtype=ve.dtype)
    mfull.fill(smdh['Camera.ADOffset'])
    vefull.fill(smdh['Camera.ReadNoise']**2)

    if m is not None:
        mfull[validROI['PosX'] - 1:validROI['PosX'] - 1 + validROI['Width'],
              validROI['PosY'] - 1:validROI['PosY'] - 1 +
              validROI['Height']] = m
        vefull[validROI['PosX'] - 1:validROI['PosX'] - 1 + validROI['Width'],
               validROI['PosY'] - 1:validROI['PosY'] - 1 +
               validROI['Height']] = ve

    return mfull, vefull, bmdh
def genFitImage(fitResults, metadata):
    from PYME.IO.MetaDataHandler import get_camera_roi_origin
    xslice = slice(*fitResults['slicesUsed']['x'])
    yslice = slice(*fitResults['slicesUsed']['y'])
    
    vx = 1e3*metadata.voxelsize.x
    vy = 1e3*metadata.voxelsize.y
    
    #position in nm from camera origin
    roi_x0, roi_y0 = get_camera_roi_origin(metadata)

    x_ = (xslice.start + roi_x0)*vx
    y_ = (yslice.start + roi_y0)*vy
    
    #ratio = fitResults['ratio']
    
    im = InterpFitFactory._evalModel(np.array(list(fitResults['fitResults'])), metadata, xslice, yslice, x_, y_)[0]
    #print im.shape

    return np.hstack([im[:,:,0], im[:,:,1]]).squeeze()
예제 #15
0
def get_splitter_rois(md, data_shape):
    x0, y0 = get_camera_roi_origin(md)

    if 'Splitter.Channel0ROI' in md.getEntryNames():
        xg, yg, wg, hg = md['Splitter.Channel0ROI']
        xr, yr, wr, hr = md['Splitter.Channel1ROI']
        #print 'Have splitter ROIs'
    else:
        xg = 0
        yg = 0
        wg = data_shape[0]
        hg = data_shape[1] / 2

        xr = 0
        yr = hg
        wr = data_shape[0]
        hr = data_shape[1] / 2

    #print yr, hr

    xg, wg = _bdsClip(xg, wg, x0, data_shape[0])
    xr, wr = _bdsClip(xr, wr, x0, data_shape[0])
    yg, hg = _bdsClip(yg, hg, y0, data_shape[1])
    yr, hr = _bdsClip(yr, hr, y0, data_shape[1])

    w = min(wg, wr)
    h = min(hg, hr)

    #print yg, hg, yr, hr

    if ('Splitter.Flip' in md.getEntryNames()
            and not md.getEntry('Splitter.Flip')):
        step = 1
        return (slice(xg, xg + w,
                      1), slice(xr, xr + w,
                                1), slice(yg, yg + h,
                                          1), slice(yr, yr + h, step))
    else:
        step = -1
        return (slice(xg, xg + w, 1), slice(xr, xr + w, 1),
                slice(yg + hg - h, yg + hg, 1), slice(yr + h, yr - 1, step))
    def __init__(self, parentSource, mdh, flatfield, dark=None):
        #self.h5Filename = getFullFilename(h5Filename)#convert relative path to full path
        #self.h5File = tables.openFile(self.h5Filename)
        self.source = parentSource
        self.mdh = mdh
        #self.flat = flatfield

        x0, y0 = get_camera_roi_origin(mdh)
        x1 = x0 + mdh.getOrDefault('Camera.ROIWidth', self.source.shape[0]) + 1
        y1 = y0 + mdh.getOrDefault('Camera.ROIHeight',
                                   self.source.shape[1]) + 1

        #print((x0, x1, y0, y1))

        #self.offset = mdh.getEntry()

        self.flat = flatfield[x0:x1, y0:y1]
        if dark is None:
            self.dark = self.mdh.getEntry('Camera.ADOffset')
        else:
            self.dark = dark[x0:x1, y0:y1]
    def OnExtract(self, event):
        from PYME.DSView import View3D
        from PYME.IO.MetaDataHandler import get_camera_roi_origin
        #print 'extracting ...'

        mdh = self.image.mdh

        #dark = deTile.genDark(self.vp.do.ds, self.image.mdh)
        dark = mdh.getEntry('Camera.ADOffset')

        #split = False

        frames = mdh.getEntry('Protocol.PrebleachFrames')

        dt = self.image.data[:, :,
                             frames[0]:frames[1]].astype('f').mean(2) - dark

        roi_x0, roi_y0 = get_camera_roi_origin(mdh)

        ROIX1 = roi_x0 + 1
        ROIY1 = roi_y0 + 1

        ROIX2 = ROIX1 + mdh.getEntry('Camera.ROIWidth')
        ROIY2 = ROIY1 + mdh.getEntry('Camera.ROIHeight')

        if self.split:
            from PYME.Acquire.Hardware import splitter
            unmux = splitter.Unmixer(
                [mdh.getEntry('chroma.dx'),
                 mdh.getEntry('chroma.dy')], 1e3 * mdh.getEntry('voxelsize.x'))

            dt = unmux.Unmix(dt, self.mixmatrix, 0,
                             [ROIX1, ROIY1, ROIX2, ROIY2])

            View3D(dt, 'Prebleach Image')
        else:
            View3D(dt, 'Prebleach Image')
예제 #18
0
    def getSplitROIAtPoint(self, x, y, z=None, roiHalfSize=5, axialHalfSize=15):
        """Helper fcn to extract ROI from frame at given x,y, point from a multi-channel image.
        
        Returns:
            Xg - x coordinates of pixels in ROI in nm (channel 1)
            Yg - y coordinates of pixels in ROI (chanel 1)
            Xr - x coordinates of pixels in ROI in nm (channel 2)
            Yr - y coordinates of pixels in ROI (chanel 2)
            data - raw pixel data of ROI
            background - extimated background for ROI
            sigma - estimated error (std. dev) of pixel values
            xslice - x slice into original data array used to get ROI (channel 1)
            yslice - y slice into original data array (channel 1)
            xslice2 - x slice into original data array used to get ROI (channel 2)
            yslice2 - y slice into original data array (channel 2)
        """
        
        x = round(x)
        y = round(y)
        
        roiHalfSize = int(roiHalfSize)
        
        #pixel size in nm
        vx, vy, _ = self.metadata.voxelsize_nm
        
        #position in nm from camera origin
        roi_x0, roi_y0 = get_camera_roi_origin(self.metadata)
        x_ = (x + roi_x0)*vx
        y_ = (y + roi_y0)*vy
        
        
        #look up shifts
        if not self.metadata.getOrDefault('Analysis.FitShifts', False):
            DeltaX = self.shift_x.ev(x_, y_)
            DeltaY = self.shift_y.ev(x_, y_)
        else:
            DeltaX = 0
            DeltaY = 0
        
        #find shift in whole pixels
        dxp = int(DeltaX/vx)
        dyp = int(DeltaY/vy)
        
        #find ROI which works in both channels
        x01 = max(x - roiHalfSize, max(0, dxp))
        x11 = min(max(x01, x + roiHalfSize + 1), self.data.shape[0] + min(0, dxp))
        x02 = x01 - dxp
        x12 = x11 - dxp
        
        y01 = max(y - roiHalfSize, max(0, dyp))
        y11 = min(max(y + roiHalfSize + 1,  y01), self.data.shape[1] + min(0, dyp))
        y02 = y01 - dyp
        y12 = y11 - dyp
        
        xslice = slice(int(x01), int(x11))
        xslice2 = slice(int(x02), int(x12))
        
        yslice = slice(int(y01), int(y11))
        yslice2 = slice(int(y02), int(y12))
        

         #cut region out of data stack
        dataROI = np.copy(self.data[xslice, yslice, 0:2])
        dataROI[:,:,1] = self.data[xslice2, yslice2, 1]
        
        if self.noiseSigma is None:
            sigma = self._calc_sigma(dataROI)
        else:
            sigma = self.noiseSigma[xslice, yslice, 0:2]
            sigma[:,:,1] = self.noiseSigma[xslice2, yslice2, 1]
            
        sigma = ndimage.maximum_filter(sigma, [3,3,0])


        if self.metadata.getOrDefault('Analysis.subtractBackground', True) :
            #print 'bgs'
            if not self.background is None and len(np.shape(self.background)) > 1:
                bgROI = self.background[xslice, yslice, 0:2]
                bgROI[:,:,1] = self.background[xslice2, yslice2, 1]
            else:
                bgROI = np.zeros_like(dataROI) + (self.background if self.background else 0)
        else:
            bgROI = np.zeros_like(dataROI)

 

        #generate grid to evaluate function on        
        Xg = vx*(np.mgrid[xslice] + self.roi_offset[0])
        Yg = vy*(np.mgrid[yslice] + self.roi_offset[1])

        #generate a corrected grid for the red channel
        #note that we're cheating a little here - for shifts which are slowly
        #varying we should be able to set Xr = Xg + delta_x(\bar{Xr}) and
        #similarly for y. For slowly varying shifts the following should be
        #equivalent to this. For rapidly varying shifts all bets are off ...

        #DeltaX, DeltaY = twoColour.getCorrection(Xg.mean(), Yg.mean(), self.metadata['chroma.dx'],self.metadata['chroma.dy'])
        

        Xr = Xg + DeltaX - vx*dxp
        Yr = Yg + DeltaY - vy*dyp
        
            
        return Xg, Yg, Xr, Yr, dataROI, bgROI, sigma, xslice, yslice, xslice2, yslice2
예제 #19
0
def tile(ds,
         xm,
         ym,
         mdh,
         split=True,
         skipMoveFrames=True,
         shiftfield=None,
         mixmatrix=[[1., 0.], [0., 1.]],
         correlate=False,
         dark=None,
         flat=None):
    frameSizeX, frameSizeY, numFrames = ds.shape[:3]

    if split:
        frameSizeY /= 2
        nchans = 2
        unmux = splitter.Unmixer(shiftfield, mdh.voxelsize_nm.x)
    else:
        nchans = 1

    #x & y positions of each frame
    xps = xm(np.arange(numFrames))
    yps = ym(np.arange(numFrames))

    if mdh.getOrDefault('CameraOrientation.FlipX', False):
        xps = -xps

    if mdh.getOrDefault('CameraOrientation.FlipY', False):
        yps = -yps

    #give some room at the edges
    bufSize = 0
    if correlate:
        bufSize = 300

    #convert to pixels
    xdp = (bufSize + ((xps - xps.min()) /
                      (mdh.getEntry('voxelsize.x'))).round()).astype('i')
    ydp = (bufSize + ((yps - yps.min()) /
                      (mdh.getEntry('voxelsize.y'))).round()).astype('i')

    #print (xps - xps.min()), mdh.getEntry('voxelsize.x')

    #work out how big our tiled image is going to be
    imageSizeX = np.ceil(xdp.max() + frameSizeX + bufSize)
    imageSizeY = np.ceil(ydp.max() + frameSizeY + bufSize)

    #print imageSizeX, imageSizeY

    #allocate an empty array for the image
    im = np.zeros([imageSizeX, imageSizeY, nchans])

    # and to record occupancy (to normalise overlapping tiles)
    occupancy = np.zeros([imageSizeX, imageSizeY, nchans])

    #calculate a weighting matrix (to allow feathering at the edges - TODO)
    weights = np.ones((frameSizeX, frameSizeY, nchans))
    #weights[:, :10, :] = 0 #avoid splitter edge artefacts
    #weights[:, -10:, :] = 0

    #print weights[:20, :].shape
    edgeRamp = min(100, int(.5 * ds.shape[0]))
    weights[:edgeRamp, :, :] *= np.linspace(0, 1, edgeRamp)[:, None, None]
    weights[-edgeRamp:, :, :] *= np.linspace(1, 0, edgeRamp)[:, None, None]
    weights[:, :edgeRamp, :] *= np.linspace(0, 1, edgeRamp)[None, :, None]
    weights[:, -edgeRamp:, :] *= np.linspace(1, 0, edgeRamp)[None, :, None]

    roi_x0, roi_y0 = get_camera_roi_origin(mdh)

    ROIX1 = roi_x0 + 1
    ROIY1 = roi_y0 + 1

    ROIX2 = ROIX1 + mdh.getEntry('Camera.ROIWidth')
    ROIY2 = ROIY1 + mdh.getEntry('Camera.ROIHeight')

    if dark is None:
        offset = float(mdh.getEntry('Camera.ADOffset'))
    else:
        offset = 0.

#    #get a sorted list of x and y values
#    xvs = list(set(xdp))
#    xvs.sort()
#
#    yvs = list(set(ydp))
#    yvs.sort()

    for i in range(mdh.getEntry('Protocol.DataStartsAt'), numFrames):
        if xdp[i - 1] == xdp[i] or not skipMoveFrames:
            d = ds[:, :, i].astype('f')
            if not dark is None:
                d = d - dark
            if not flat is None:
                d = d * flat

            if split:
                d = np.concatenate(
                    unmux.Unmix(d, mixmatrix, offset,
                                [ROIX1, ROIY1, ROIX2, ROIY2]), 2)
            #else:
            #d = d.reshape(list(d.shape) + [1])

            imr = (im[xdp[i]:(xdp[i] + frameSizeX),
                      ydp[i]:(ydp[i] + frameSizeY), :] /
                   occupancy[xdp[i]:(xdp[i] + frameSizeX),
                             ydp[i]:(ydp[i] + frameSizeY), :])
            alreadyThere = (weights *
                            occupancy[xdp[i]:(xdp[i] + frameSizeX), ydp[i]:
                                      (ydp[i] + frameSizeY), :]).sum(2) > 0

            #d_ = d.sum(2)

            if split:
                r0 = imr[:, :, 0][alreadyThere].sum()
                r1 = imr[:, :, 1][alreadyThere].sum()

                if r0 == 0:
                    r0 = 1
                else:
                    r0 = r0 / (d[:, :, 0][alreadyThere]).sum()

                if r1 == 0:
                    r1 = 1
                else:
                    r1 = r1 / (d[:, :, 1][alreadyThere]).sum()

                rt = np.array([r0, r1])

                imr = imr.sum(2)
            else:
                rt = imr[:, :, 0][alreadyThere].sum()
                if rt == 0:
                    rt = 1
                else:
                    rt = rt / (d[:, :, 0][alreadyThere]).sum()

                rt = np.array([rt])

            #print rt

            if correlate:
                if (alreadyThere.sum() > 50):
                    dx = 0
                    dy = 0
                    rois = findRectangularROIs(alreadyThere)

                    for r in rois:
                        x0, y0, x1, y1 = r
                        #print r
                        dx_, dy_ = calcCorrShift(
                            d.sum(2)[x0:x1, y0:y1], imr[x0:x1, y0:y1])
                        print(('d_', dx_, dy_))
                        dx += dx_
                        dy += dy_

                    dx = np.round(dx / len(rois))
                    dy = np.round(dy / len(rois))

                    print((dx, dy))

                    #dx, dy = (0,0)
                else:
                    dx, dy = (0, 0)

                im[(xdp[i] + dx):(xdp[i] + frameSizeX + dx),
                   (ydp[i] + dy):(ydp[i] + frameSizeY +
                                  dy), :] += weights * d * rt[None, None, :]
                occupancy[(xdp[i] + dx):(xdp[i] + frameSizeX + dx),
                          (ydp[i] + dy):(ydp[i] + frameSizeY +
                                         dy), :] += weights

            else:
                #print weights.shape, rt.shape, d.shape
                im[xdp[i]:(xdp[i] + frameSizeX),
                   ydp[i]:(ydp[i] +
                           frameSizeY), :] += weights * d  #*rt[None, None, :]
                occupancy[xdp[i]:(xdp[i] + frameSizeX),
                          ydp[i]:(ydp[i] + frameSizeY), :] += weights

    ret = (im / occupancy).squeeze()
    #print ret.shape, occupancy.shape
    ret[occupancy.squeeze() == 0] = 0  #fix up /0s

    return ret
예제 #20
0
    def _extractROI_1(self, fri):
        from PYME.IO.MetaDataHandler import get_camera_roi_origin
        roi_x0, roi_y0 = get_camera_roi_origin(self.mdh)

        if 'Splitter' in self.mdh['Analysis.FitModule']:
            # is a splitter fit
            if 'Splitter.Channel0ROI' in self.mdh.getEntryNames():
                x0, y0, w, h = self.mdh['Splitter.Channel0ROI']

                x0 -= roi_x0
                y0 -= roi_y0
                # g = self.data[x0:(x0+w), y0:(y0+h)]
                x1, y1, w, h = self.mdh['Splitter.Channel1ROI']
                x1 -= roi_x0
                y1 -= roi_y0
                # r = self.data[x0:(x0+w), y0:(y0+h)]
            else:
                x0, y0 = 0, 0
                x1, y1 = 0, (self.mdh['Camera.ROIHeight'] + 1) / 2
                h = y1

            slux = fri['slicesUsed']['x']
            sluy = fri['slicesUsed']['y']

            slx = slice(slux[0], slux[1])
            sly = slice(sluy[0], sluy[1])

            # sx0 = slice(x0+ slux[0], x0+slux[1])
            # sy0 = slice(y0+ sluy[0], y0+sluy[1])

            if 'NR' in self.mdh['Analysis.FitModule']:
                # for fits which take chromatic shift into account when selecting ROIs
                # pixel size in nm
                vx, vy, _ = self.mdh.voxelsize_nm

                # position in nm from camera origin
                x_ = ((slux[0] + slux[1]) / 2. + roi_x0) * vx
                y_ = ((sluy[0] + sluy[1]) / 2. + roi_y0) * vy

                # look up shifts
                if not self.mdh.getOrDefault('Analysis.FitShifts', False):
                    DeltaX = self.mdh.chroma.dx.ev(x_, y_)
                    DeltaY = self.mdh.chroma.dy.ev(x_, y_)
                else:
                    DeltaX = 0
                    DeltaY = 0

                # find shift in whole pixels
                dxp = int(DeltaX / vx)
                dyp = int(DeltaY / vy)

                print((DeltaX, DeltaY, dxp, dyp))

                x1 -= dxp
                y1 -= dyp

            sx1 = slice(x1 - x0 + slux[0], x1 - x0 + slux[1])

            if ('Splitter.Flip' in self.mdh.getEntryNames()
                    and not self.mdh.getEntry('Splitter.Flip')):
                sy1 = slice(y1 - y0 + sluy[0], y1 - y0 + sluy[1])
            else:
                sy1 = slice(y1 + h + y0 - sluy[0], y1 + h + y0 - sluy[1],
                            -1)  # FIXME

            print((slx, sx1, sly, sy1))
            print(h, y0, y1, sluy)

            g = self.ds[slx, sly, int(fri['tIndex'])].squeeze()
            r = self.ds[sx1, sy1, int(fri['tIndex'])].squeeze()

            return np.hstack([g, r])
        else:
            return self.ds[slice(*fri['slicesUsed']['x']),
                           slice(*fri['slicesUsed']['y']),
                           int(fri['tIndex'])].squeeze()
예제 #21
0
def get_labels_from_image(label_image, points, minimum_localizations=1):
    """
    Function to extract labels from a segmented image (2D or 3D) at given locations. 

    Parameters
    ----------
    label_image: PYME.IO.image.ImageStack instance
        an image containing object labels
    points: tabular-like (PYME.IO.tabular, np.recarray, pandas DataFrame) containing 'x', 'y' & 'z' columns
        locations at which to extract labels

    Returns
    -------
    ids: Label number from image, mapped to each localization within that label
    numPerObject: Number of localizations within the label that a given localization belongs to

    """
    from PYME.IO.MetaDataHandler import get_camera_roi_origin

    im_ox, im_oy, im_oz = label_image.origin

    # account for ROIs
    try:
        roi_x0, roi_y0 = get_camera_roi_origin(points.mdh)

        vs = points.mdh.voxelsize_nm
        p_ox = roi_x0 * vs.x
        p_oy = roi_y0 * vs.y
    except AttributeError:
        raise RuntimeError(
            'label image requires metadata specifying ROI position and voxelsize'
        )

    # Image origin is referenced to top-left corner of pixelated image.
    # FIXME - localisations are currently referenced to centre of raw pixels
    pixX = np.floor(
        (points['x'] + p_ox - im_ox) / label_image.pixelSize).astype('i')
    pixY = np.floor(
        (points['y'] + p_oy - im_oy) / label_image.pixelSize).astype('i')
    pixZ = np.floor((points['z'] - im_oz) / label_image.sliceSize).astype('i')

    label_data = label_image.data

    if label_data.shape[2] == 1:
        # disregard z for 2D images
        pixZ = np.zeros_like(pixX)

    ind = (pixX < label_data.shape[0]) * (pixY < label_data.shape[1]) * (
        pixX >= 0) * (pixY >= 0) * (pixZ >= 0) * (pixZ < label_data.shape[2])

    ids = np.zeros_like(pixX)

    # assume there is only one channel
    ids[ind] = np.atleast_3d(label_data[:, :, :,
                                        0].squeeze())[pixX[ind], pixY[ind],
                                                      pixZ[ind]].astype('i')

    # check if we keep all labels
    if minimum_localizations > 1:  # skip if we don't need this
        labels, counts = np.unique(ids, return_counts=True)
        labels, counts = labels[1:], counts[
            1:]  # ignore unlabeled points, or zero-label
        for label, count in zip(labels, counts):
            if count < minimum_localizations:
                ids[ids == label] = 0

    numPerObject, b = np.histogram(ids, np.arange(ids.max() + 1.5) + .5)

    return ids, numPerObject
예제 #22
0
    def _getUSDataSources(self):
        mdh = self.image.mdh
        if 'chroma.dx' in mdh.getEntryNames():
            sf = (mdh['chroma.dx'], mdh['chroma.dy'])
        elif global_shiftfield:
            sf = global_shiftfield
        else:
            sf = None

        flip = True
        if 'Splitter.Flip' in mdh.getEntryNames() and not mdh['Splitter.Flip']:
            flip = False

        chanROIs = None
        if 'Splitter.Channel0ROI' in mdh.getEntryNames():
            chanROIs = [
                mdh['Splitter.Channel0ROI'], mdh['Splitter.Channel1ROI']
            ]

        if 'Multiview.NumROIs' in mdh.getEntryNames():
            # we have more than 2 ROIs
            numROIs = mdh['Multiview.NumROIs']
            w, h = mdh['Multiview.ROISize']

            #print self.image.data.shape, w, h, numROIs
            flip = False

            if self.image.data.shape[0] == numROIs * w:
                #we are extracted as expected.
                h_ = min(h, int(self.image.data.shape[1]))

                chanROIs = []
                for i in range(numROIs):
                    x0, y0 = (i * w, 0)
                    chanROIs.append((x0, y0, w, h_))

                #TODO - Fix me to use proper coordinates
                ROIX1, ROIY1 = (1, 1)
                ROIX2, ROIY2 = (w * numROIs, h_)

            else:
                #raw data - do the extraction ourselves
                raise RuntimeError(
                    "data has not been unsplit, we can't handle this at present"
                )
                chanROIs = []
                for i in range(numROIs):
                    x0, y0 = mdh['Multiview.ROISize']
                    chanROIs.append((x0, y0, w, h))
        else:
            #default to old splitter code
            from PYME.IO.MetaDataHandler import get_camera_roi_origin

            roi_x0, roi_y0 = get_camera_roi_origin(mdh)

            ROIX1 = roi_x0 + 1
            ROIY1 = roi_y0 + 1

            ROIX2 = ROIX1 + mdh.getEntry('Camera.ROIWidth')
            ROIY2 = ROIY1 + mdh.getEntry('Camera.ROIHeight')

            numROIs = 2

        usds = [
            UnsplitDataSource.DataSource(self.image.data,
                                         [ROIX1, ROIY1, ROIX2, ROIY2],
                                         i,
                                         flip,
                                         sf,
                                         chanROIs=chanROIs,
                                         voxelsize=self.image.voxelsize)
            for i in range(numROIs)
        ]

        return usds
예제 #23
0
    def getMultiviewROIAtPoint(self, x, y, z=None, roiHalfSize=5, axialHalfSize=15):
        """Helper fcn to extract ROI from frame at given x,y, point from a multi-channel image.
        
        WARNING: EXPERIMENTAL WORK IN PROGRESS!!! This will eventually replace getSplitROIAtPoint and generalise to higher
        dimensional splitting (e.g. 4-quadrant systems such as the 4Pi-SMS) but is not useful in it's current form.

        Returns:
            Xg - x coordinates of pixels in ROI in nm (channel 1)
            Yg - y coordinates of pixels in ROI (chanel 1)
            Xr - x coordinates of pixels in ROI in nm (channel 2)
            Yr - y coordinates of pixels in ROI (chanel 2)
            data - raw pixel data of ROI
            background - extimated background for ROI
            sigma - estimated error (std. dev) of pixel values
            xslice - x slice into original data array used to get ROI (channel 1)
            yslice - y slice into original data array (channel 1)
            xslice2 - x slice into original data array used to get ROI (channel 2)
            yslice2 - y slice into original data array (channel 2)
        """
    
        x = round(x)
        y = round(y)
    
        #pixel size in nm
        vx, vy, _ = self.metadata.voxelsize_nm
    
        #position in nm from camera origin
        roi_x0, roi_y0 = get_camera_roi_origin(self.metadata)
        x_ = (x + roi_x0) * vx
        y_ = (y + roi_y0) * vy
    
        #look up shifts
        if not self.metadata.getOrDefault('Analysis.FitShifts', False):
            DeltaX = self.shift_x.ev(x_, y_)
            DeltaY = self.shift_y.ev(x_, y_)
        else:
            DeltaX = 0
            DeltaY = 0
    
        #find shift in whole pixels
        dxp = int(DeltaX / vx)
        dyp = int(DeltaY / vy)
    
        #find ROI which works in both channels
        #if dxp < 0:
        x01 = max(x - roiHalfSize, max(0, dxp))
        x11 = min(max(x01, x + roiHalfSize + 1), self.data.shape[0] + min(0, dxp))
        x02 = x01 - dxp
        x12 = x11 - dxp
    
        y01 = max(y - roiHalfSize, max(0, dyp))
        y11 = min(max(y + roiHalfSize + 1, y01), self.data.shape[1] + min(0, dyp))
        y02 = y01 - dyp
        y12 = y11 - dyp
    
        xslice = slice(int(x01), int(x11))
        xslice2 = slice(int(x02), int(x12))
    
        yslice = slice(int(y01), int(y11))
        yslice2 = slice(int(y02), int(y12))
    
        #print xslice2, yslice2
    
    
        #cut region out of data stack
        dataROI = self.data[xslice, yslice, 0:2]
        #print dataROI.shape
        dataROI[:, :, 1] = self.data[xslice2, yslice2, 1]
    
        nSlices = 1
        #sigma = np.sqrt(self.metadata.Camera.ReadNoise**2 + (self.metadata.Camera.NoiseFactor**2)*self.metadata.Camera.ElectronsPerCount*self.metadata.Camera.TrueEMGain*np.maximum(dataROI, 1)/nSlices)/self.metadata.Camera.ElectronsPerCount
        #phConv = self.metadata.Camera.ElectronsPerCount/self.metadata.Camera.TrueEMGain
        #nPhot = dataROI*phConv
    
        if self.noiseSigma is None:
            sigma = self._calc_sigma(dataROI)
        else:
            sigma = self.noiseSigma[xslice, yslice, 0:2]
            sigma[:, :, 1] = self.noiseSigma[xslice2, yslice2, 1]
    
        sigma = ndimage.maximum_filter(sigma, [3, 3, 0])
    
        if self.metadata.getOrDefault('Analysis.subtractBackground', True):
            #print 'bgs'
            if not self.background is None and len(np.shape(self.background)) > 1:
                bgROI = self.background[xslice, yslice, 0:2]
                bgROI[:, :, 1] = self.background[xslice2, yslice2, 1]
            else:
                bgROI = np.zeros_like(dataROI) + self.background
        else:
            bgROI = np.zeros_like(dataROI)
    
        #generate grid to evaluate function on
        Xg = vx * (np.mgrid[xslice] + self.roi_offset[0])
        Yg = vy * (np.mgrid[yslice] + self.roi_offset[1])
    
        #generate a corrected grid for the red channel
        #note that we're cheating a little here - for shifts which are slowly
        #varying we should be able to set Xr = Xg + delta_x(\bar{Xr}) and
        #similarly for y. For slowly varying shifts the following should be
        #equivalent to this. For rapidly varying shifts all bets are off ...
    
        #DeltaX, DeltaY = twoColour.getCorrection(Xg.mean(), Yg.mean(), self.metadata['chroma.dx'],self.metadata['chroma.dy'])
    
    
        Xr = Xg + DeltaX - vx * dxp
        Yr = Yg + DeltaY - vy * dyp
    
        return Xg, Yg, Xr, Yr, dataROI, bgROI, sigma, xslice, yslice, xslice2, yslice2
예제 #24
0
def tile_pyramid(out_folder, ds, xm, ym, mdh, split=False, skipMoveFrames=False, shiftfield=None,
                 mixmatrix=[[1., 0.], [0., 1.]],
                 correlate=False, dark=None, flat=None, pyramid_tile_size=256):
    frameSizeX, frameSizeY, numFrames = ds.shape[:3]
    
    if split:
        from PYME.Acquire.Hardware import splitter
        frameSizeY /= 2
        nchans = 2
        unmux = splitter.Unmixer(shiftfield, 1e3 * mdh.getEntry('voxelsize.x'))
    else:
        nchans = 1
    
    #x & y positions of each frame
    xps = xm(np.arange(numFrames))
    yps = ym(np.arange(numFrames))

    if mdh.getOrDefault('CameraOrientation.FlipX', False):
        xps = -xps
    
    if mdh.getOrDefault('CameraOrientation.FlipY', False):
        yps = -yps

    rotate_cam = mdh.getOrDefault('CameraOrientation.Rotate', False)

    #give some room at the edges
    bufSize = 0
    if correlate:
        bufSize = 300
    

    x0 = xps.min()
    y0 = yps.min()
    xps -= x0
    yps -= y0

    #convert to pixels
    xdp = (bufSize + (xps / (mdh.getEntry('voxelsize.x'))).round()).astype('i')
    ydp = (bufSize + (yps / (mdh.getEntry('voxelsize.y'))).round()).astype('i')
    
    #calculate a weighting matrix (to allow feathering at the edges - TODO)
    weights = np.ones((frameSizeX, frameSizeY, nchans))
    #weights[:, :10, :] = 0 #avoid splitter edge artefacts
    #weights[:, -10:, :] = 0
    
    #print weights[:20, :].shape
    edgeRamp = min(100, int(.25 * ds.shape[0]))
    weights[:edgeRamp, :, :] *= np.linspace(0, 1, edgeRamp)[:, None, None]
    weights[-edgeRamp:, :, :] *= np.linspace(1, 0, edgeRamp)[:, None, None]
    weights[:, :edgeRamp, :] *= np.linspace(0, 1, edgeRamp)[None, :, None]
    weights[:, -edgeRamp:, :] *= np.linspace(1, 0, edgeRamp)[None, :, None]
    
    roi_x0, roi_y0 = get_camera_roi_origin(mdh)
    
    ROIX1 = roi_x0 + 1
    ROIY1 = roi_y0 + 1
    
    ROIX2 = ROIX1 + mdh.getEntry('Camera.ROIWidth')
    ROIY2 = ROIY1 + mdh.getEntry('Camera.ROIHeight')
    
    if dark is None:
        offset = float(mdh.getEntry('Camera.ADOffset'))
    else:
        offset = 0.

    P = ImagePyramid(out_folder, pyramid_tile_size, x0=x0, y0=y0, pixel_size=mdh.getEntry('voxelsize.x'))

    logger.debug('Adding base tiles ...')
    
    t1 = time.time()
    for i in range(int(mdh.getEntry('Protocol.DataStartsAt')), numFrames):
        if xdp[i - 1] == xdp[i] or not skipMoveFrames:
            x_i = xdp[i]
            y_i = ydp[i]
            d = ds[:, :, i].astype('f')
            if not dark is None:
                d = d - dark
            if not flat is None:
                d = d * flat
            
            if split:
                d = np.concatenate(unmux.Unmix(d, mixmatrix, offset, [ROIX1, ROIY1, ROIX2, ROIY2]), 2)

            d_weighted = weights * d


            # orient frame - TODO - check if we need to flip x and y?!
            if rotate_cam:
                #print('adding base tile from frame %d [transposed]' % i)
                P.add_base_tile(x_i, y_i, d_weighted.T.squeeze(), weights.T.squeeze())
            else:
                #print('adding base tile from frame %d' % i)
                P.add_base_tile(x_i, y_i, d_weighted.squeeze(), weights.squeeze())
                
    
    t2 = time.time()
    logger.debug('Added base tiles in %fs' % (t2 - t1))
    #P._occ.flush()
    logger.debug(time.time() - t2)
    logger.debug('Updating pyramid ...')
    P.update_pyramid()
    logger.debug(time.time() - t2)
    logger.debug('Done')
    return P