示例#1
0
    def newImage(self, new_image):
        """
        This is called once at the start of the analysis of a new image.
        
        new_image - A 2D numpy array.
        """
        super(PeakFinderArbitraryPSF, self).newImage(new_image)

        #
        # If does not already exist, create filter objects from
        # the PSF at different z values.
        #
        if (len(self.fg_mfilter) == 0):
            for zval in self.fg_mfilter_zval:
                psf = self.psf_object.getPSF(zval,
                                             shape=new_image.shape,
                                             normalize=False)
                psf_norm = psf / numpy.sum(psf)
                fg_mfilter = matchedFilterC.MatchedFilter(psf_norm,
                                                          memoize=True,
                                                          max_diff=1.0e-3)
                self.fg_mfilter.append(fg_mfilter)
                self.fg_vfilter.append(
                    matchedFilterC.MatchedFilter(psf_norm * psf_norm,
                                                 memoize=True,
                                                 max_diff=1.0e-3))

                # Save a picture of the PSF for debugging purposes.
                if self.check_mode:
                    print("psf max", numpy.max(psf))
                    filename = "psf_{0:.3f}.tif".format(zval)
                    tifffile.imsave(filename, psf.astype(numpy.float32))
def test_matched_filter4():
    """
    Verify that fftw_estimate has no effect on the results.
    """
    x_size = 80
    y_size = 90

    objects = numpy.zeros((1, 5))

    # Make filter with unit sum.
    objects[0,:] = [x_size/2, y_size/2, 1.0, 1.0, 1.0]
    psf = dg.drawGaussians((x_size, y_size), objects)
    psf = psf/numpy.sum(psf)
    flt1 = matchedFilterC.MatchedFilter(psf, fftw_estimate = False)
    flt2 = matchedFilterC.MatchedFilter(psf, fftw_estimate = True)

    for i in range(1,5):
        image = numpy.zeros((x_size, y_size))
        image[int(x_size/2), int(y_size/2)] = float(i)
        conv1 = flt1.convolve(image)
        conv2 = flt2.convolve(image)
        
        assert(numpy.allclose(conv1, conv2))

    flt1.cleanup()
    flt2.cleanup()
示例#3
0
    def newImage(self, new_image):
        """
        This is called once at the start of the analysis of a new image.
        
        new_image - A 2D numpy array.
        """
        super(PeakFinderArbitraryPSF, self).newImage(new_image)
    
        #
        # If does not already exist, create filter objects from
        # the PSF at different z values.
        #
        # As not all PSFs will be maximal in the center we can't just
        # use the image intensity at the center as the starting
        # height value. Instead we will use the intensity at the
        # peak center of the convolved image, then adjust this
        # value by the height_rescale parameter.
        #
        if (len(self.fg_mfilter) == 0):
            for zval in self.fg_mfilter_zval:
                psf = self.psf_object.getPSF(zval,
                                             shape = new_image.shape,
                                             normalize = False)
                psf_norm = psf/numpy.sum(psf)
                self.fg_mfilter.append(matchedFilterC.MatchedFilter(psf_norm, memoize = True, max_diff = 1.0e-3))
                self.fg_vfilter.append(matchedFilterC.MatchedFilter(psf_norm * psf_norm, memoize = True, max_diff = 1.0e-3))

                # Save a picture of the PSF for debugging purposes.
                if self.check_mode:
                    print("psf max", numpy.max(psf))
                    filename = "psf_{0:.3f}.tif".format(zval)
                    tifffile.imsave(filename, psf.astype(numpy.float32))
示例#4
0
    def setVariances(self, variances):
        """
        setVariances() customized for arbitrary PSFs.
        """

        # Make sure that the number of (sCMOS) variance arrays
        # matches the number of image planes.
        #
        assert (len(variances) == self.n_channels)

        # Pad variances to correct size.
        #
        temp = []
        for variance in variances:
            temp.append(fitting.padArray(variance, self.margin))
        variances = temp

        # Create "foreground" and "variance" filters.
        #
        # These are stored in a list indexed by z value, then by
        # channel / plane. So self.mfilters[1][2] is the filter
        # for z value 1, plane 2.
        #
        for i, mfilter_z in enumerate(self.mfilters_z):
            self.mfilters.append([])
            self.vfilters.append([])

            for j, psf_object in enumerate(self.psf_objects):
                psf = psf_object.getPSF(mfilter_z,
                                        shape=variances[0].shape,
                                        normalize=False)

                #
                # We are assuming that the psf has no negative values,
                # or if it does that they are very small.
                #
                psf_norm = psf / numpy.sum(psf)
                self.mfilters[i].append(
                    matchedFilterC.MatchedFilter(psf_norm,
                                                 memoize=True,
                                                 max_diff=1.0e-3))
                self.vfilters[i].append(
                    matchedFilterC.MatchedFilter(psf_norm * psf_norm,
                                                 memoize=True,
                                                 max_diff=1.0e-3))

                # Save a pictures of the PSFs for debugging purposes.
                if self.check_mode:
                    print("psf max", numpy.max(psf))
                    filename = "psf_z{0:.3f}_c{1:d}.tif".format(mfilter_z, j)
                    tifffile.imsave(filename, psf.astype(numpy.float32))

        # This handles the rest of the initialization.
        #
        super(MPPeakFinderArb, self).setVariances(variances)

        return variances
示例#5
0
    def setVariances(self, variances):
        """
        setVariances() customized for gaussian PSFs.
        """

        # Make sure that the number of (sCMOS) variance arrays
        # matches the number of image planes.
        #
        assert (len(variances) == self.n_channels)

        # Pad variances to correct size.
        #
        temp = []
        for variance in variances:
            temp.append(fitting.padArray(variance, self.margin))
        variances = temp

        # Create "foreground" and "variance" filters. There is
        # only one z value here.
        #
        # These are stored in a list indexed by z value, then by
        # channel / plane. So self.mfilters[1][2] is the filter
        # for z value 1, plane 2.
        #
        self.mfilters.append([])
        self.vfilters.append([])

        psf_norm = fitting.gaussianPSF(
            variances[0].shape, self.parameters.getAttr("foreground_sigma"))
        var_norm = psf_norm * psf_norm

        for i in range(self.n_channels):
            self.mfilters[0].append(
                matchedFilterC.MatchedFilter(
                    psf_norm,
                    fftw_estimate=self.parameters.getAttr("fftw_estimate"),
                    memoize=True,
                    max_diff=1.0e-3))
            self.vfilters[0].append(
                matchedFilterC.MatchedFilter(
                    var_norm,
                    fftw_estimate=self.parameters.getAttr("fftw_estimate"),
                    memoize=True,
                    max_diff=1.0e-3))

            # Save a pictures of the PSFs for debugging purposes.
            if self.check_mode:
                print("psf max", numpy.max(psf))
                filename = "psf_z0.0_c{1:d}.tif".format(j)
                tifffile.imsave(filename, psf.astype(numpy.float32))

        # This handles the rest of the initialization.
        #
        super(MPPeakFinderDao, self).setVariances(variances)

        return variances
示例#6
0
    def newImage(self, new_image):
        """
        This is called once at the start of the analysis of a new image.
        
        new_image - A 2D numpy array.
        """
        # Make a copy of the starting image.
        self.image = numpy.copy(new_image)
        
        # Initialize new peak minimum threshold. If we are doing more
        # than one iteration we start a bit higher and come down to
        # the specified threshold.
        if(self.iterations>4):
            self.cur_threshold = self.threshold + 4.0
        else:
            self.cur_threshold = self.threshold + float(self.iterations)

        # Create mask to limit peak finding to a user defined sub-region of the image.
        if self.peak_mask is None:
            self.peak_mask = numpy.ones(new_image.shape)
            if self.parameters.hasAttr("x_start"):
                self.peak_mask[0:self.parameters.getAttr("x_start")+self.margin,:] = 0.0
            if self.parameters.hasAttr("x_stop"):
                self.peak_mask[self.parameters.getAttr("x_stop")+self.margin:-1,:] = 0.0
            if self.parameters.hasAttr("y_start"):
                self.peak_mask[:,0:self.parameters.getAttr("y_start")+self.margin] = 0.0
            if self.parameters.hasAttr("y_stop"):
                self.peak_mask[:,self.parameters.getAttr("y_stop")+self.margin:-1] = 0.0

        # Create filter objects if necessary.
        if self.bg_filter is None:

            # Create matched filter for background.
            bg_psf = gaussianPSF(new_image.shape, self.parameters.getAttr("background_sigma"))
            self.bg_filter = matchedFilterC.MatchedFilter(bg_psf, memoize = True, max_diff = 1.0e-3)

            #
            # Create matched filter for foreground as well as a matched filter
            # for calculating the expected variance of the background if it was
            # smoothed on the same scale as the foreground.
            #
            if self.parameters.hasAttr("foreground_sigma"):
                if (self.parameters.getAttr("foreground_sigma") > 0.0):
                    fg_psf = gaussianPSF(new_image.shape, self.parameters.getAttr("foreground_sigma"))
                    self.fg_mfilter = matchedFilterC.MatchedFilter(fg_psf, memoize = True, max_diff = 1.0e-3)
                    self.fg_vfilter = matchedFilterC.MatchedFilter(fg_psf * fg_psf, memoize = True, max_diff = 1.0e-3)

        # Reset maxima finder.
        self.mfinder.resetTaken()
def test_matched_filter2():
    """
    Test recovering the original height from the convolved image.

    FIXME: Not sure this test is relevant anymore as we no longer use
           this approach for initializing peak heights.
    """
    x_size = 80
    y_size = 90

    objects = numpy.zeros((1, 5))

    # Make filter with unit sum.    
    objects[0,:] = [x_size/2, y_size/2, 1.0, 2.0, 2.0]
    psf = dg.drawGaussians((x_size, y_size), objects)
    psf_norm = psf/numpy.sum(psf)
    flt = matchedFilterC.MatchedFilter(psf_norm)

    rescale = 1.0/numpy.sum(psf * psf_norm)

    # Create object with height 10 and the same shape as the filter.
    height = 10.0
    objects[0,:] = [x_size/2, y_size/2, height, 2.0, 2.0]
    image = dg.drawGaussians((x_size, y_size), objects)

    # Apply filter.
    conv = flt.convolve(image)

    # Verify that final height is 'close enough'.
    assert (abs(numpy.amax(conv) * rescale - height)/height < 1.0e-2)

    flt.cleanup()
def test_matched_filter5():
    """
    Verify that the filter results are correct.
    """
    x_size = 80
    y_size = 90

    objects = numpy.zeros((1, 5))

    # Make filter with unit sum.
    objects[0,:] = [x_size/2, y_size/2, 1.0, 1.0, 1.0]
    psf = dg.drawGaussians((x_size, y_size), objects)
    psf = psf/numpy.sum(psf)
    flt = matchedFilterC.MatchedFilter(psf)

    # Make test image.
    image = numpy.zeros((x_size, y_size))
    image[int(x_size/2), int(y_size/2)] = float(100)

    mf_conv = flt.convolve(image)

    t1 = numpy.fft.fft2(recenterPSF.recenterPSF(psf))
    t2 = numpy.fft.fft2(image)
    np_conv = numpy.real(numpy.fft.ifft2(t1*t2))

    assert(numpy.allclose(mf_conv, np_conv))

    flt.cleanup()
    def newImage(self, new_image):
        fitting.PeakFinder.newImage(self, new_image)
        
        #
        # If does not already exist, create filter objects from
        # the best fit spline at different z value of the PSF.
        #
        # As not all PSFs will be maximal in the center we can't just
        # use the image intensity at the center as the starting
        # height value. Instead we will use the intensity at the
        # peak center of the convolved image, then adjust this
        # value by the height_rescale parameter.
        #
        if (len(self.mfilter) == 0):
            for mfilter_z in self.mfilter_z:
                psf = self.s_to_psf.getPSF(mfilter_z,
                                           shape = new_image.shape,
                                           normalize = False)
                psf_norm = psf/numpy.sum(psf)
                self.height_rescale.append(1.0/numpy.sum(psf * psf_norm))
                self.mfilter.append(matchedFilterC.MatchedFilter(psf_norm))

                # Save a picture of the PSF for debugging purposes.
                if False:
                    print("psf max", numpy.max(psf))
                    temp = 10000.0 * psf + 100.0
                    filename = "psf_{0:.3f}.tif".format(mfilter_z)
                    tifffile.imsave(filename, temp.astype(numpy.uint16))

        self.taken = []
        for i in range(len(self.mfilter)):
            self.taken.append(numpy.zeros(new_image.shape, dtype=numpy.int32))
示例#10
0
    def findROI(self, image):
        """
        Finds the ROI.
        """
        assert (image.flags['C_CONTIGUOUS']), "Image is not C contiguous!"
        assert (
            image.dtype == numpy.float64), "Images is not numpy.float64 type."

        self.mxf.resetTaken()

        # Create convolution object, if we have not already done this.
        if self.fg_filter is None:
            fg_psf = fitting.gaussianPSF(image.shape, self.sigma)
            self.fg_filter = matchedFilterC.MatchedFilter(fg_psf)

        # Find peaks.
        smoothed_image = self.fg_filter.convolve(image)
        [x, y, z, h] = self.mxf.findMaxima([smoothed_image], want_height=True)

        # No peaks found check
        if (x.size == 0):
            return [0, 0, False]

        # Slice out ROI.
        max_index = numpy.argmax(h)
        mx = int(round(x[max_index])) + 1
        my = int(round(y[max_index])) + 1
        rs = self.roi_size
        roi = image[my - rs:my + rs, mx - rs:mx + rs]
        roi -= numpy.min(roi)

        return [mx, my, roi]
    def findFitPeak(self, image):
        """
        Returns the 2D gaussian fit to the brightest peak in the image.
        """
        # Convert image and add offset, this is to keep the MLE fitter from
        # overfitting the background and/or taking logs of zero.
        #
        image = numpy.ascontiguousarray(image, dtype = numpy.float64)
        image += self.offset
        
        self.mxf.resetTaken()

        # Create the peak fitter object, if we have not already done this.
        #
        if self.mfit is None:
            background = numpy.zeros(image.shape)

            # Create convolution filter.
            fg_psf = fitting.gaussianPSF(image.shape, self.sigma)
            self.fg_filter = matchedFilterC.MatchedFilter(fg_psf)

            # Create fitter.
            self.mfit = daoFitC.MultiFitter2DFixed(roi_size = self.roi_size)
            #self.mfit = daoFitC.MultiFitter2D()
            #self.mfit = daoFitC.MultiFitter3D()
            #self.mfit.default_tol = 1.0e-3
            self.mfit.initializeC(image)
            self.mfit.newImage(image)
            self.mfit.newBackground(background)
        else:
            self.mfit.newImage(image)

        # Find peaks.
        smoothed_image = self.fg_filter.convolve(image)
        [x, y, z, h] = self.mxf.findMaxima([smoothed_image], want_height = True)

        # No peaks found check
        if(x.size == 0):
            return [0, 0, False]

        max_index = numpy.argmax(h)

        peaks = {"x" : numpy.array([x[max_index]]),
                 "y" : numpy.array([y[max_index]]),
                 "z" : numpy.array([z[max_index]]),
                 "sigma" : numpy.array([self.sigma])}

        # Pass peaks to fitter & fit.
        self.mfit.newPeaks(peaks, "finder")
        self.mfit.doFit(max_iterations = 50)

        # Check for fit convergence.
        if (self.mfit.getUnconverged() == 0):
            # Return peak location.
            x = self.mfit.getPeakProperty("x")
            y = self.mfit.getPeakProperty("y")
            return [y[0], x[0], True]
        else:
            return [0, 0, False]
示例#12
0
    def newImage(self, new_image):
        super(PeakFinderGaussian, self).newImage(new_image)

        #
        # Create matched filter for foreground as well as a matched filter
        # for calculating the expected variance of the background if it was
        # smoothed on the same scale as the foreground.
        #
        if (self.fg_mfilter is
                None) and self.parameters.hasAttr("foreground_sigma"):
            if (self.parameters.getAttr("foreground_sigma") > 0.0):
                fg_psf = gaussianPSF(
                    new_image.shape,
                    self.parameters.getAttr("foreground_sigma"))
                self.fg_mfilter = matchedFilterC.MatchedFilter(fg_psf,
                                                               memoize=True,
                                                               max_diff=1.0e-3)
                self.fg_vfilter = matchedFilterC.MatchedFilter(fg_psf * fg_psf,
                                                               memoize=True,
                                                               max_diff=1.0e-3)
示例#13
0
    def newImage(self, new_image):
        fitting.PeakFinder.newImage(self, new_image)

        # If does not already exist, create a gaussian filter object.
        if self.mfilter is None:
            psf = dg.drawGaussiansXY(new_image.shape,
                                     numpy.array([0.5 * new_image.shape[0]]),
                                     numpy.array([0.5 * new_image.shape[1]]),
                                     sigma=self.filter_sigma)
            psf = psf / numpy.sum(psf)
            self.mfilter = matchedFilterC.MatchedFilter(psf)
def test_matched_filter3():
    """
    Test memoization.
    """
    x_size = 40
    y_size = 50

    objects = numpy.zeros((1, 5))

    # Make filter with unit sum.    
    objects[0,:] = [x_size/2, y_size/2, 1.0, 2.0, 2.0]
    psf = dg.drawGaussians((x_size, y_size), objects)
    psf_norm = psf/numpy.sum(psf)
    flt = matchedFilterC.MatchedFilter(psf_norm, memoize = True, max_diff = 0.1)

    # Convolve first image.
    image = numpy.zeros((x_size, y_size))
    image[int(x_size/2),int(y_size/2)] = 1.0

    res1 = flt.convolve(image)

    # Repeat with approximately the same image.
    image = numpy.zeros((x_size, y_size))
    image[int(x_size/2),int(y_size/2)] = 1.05

    res2 = flt.convolve(image)
    assert(abs(numpy.max(res2 - res1)) < 1.0e-12)

    # Repeat with a different image.
    image = numpy.zeros((x_size, y_size))
    image[int(x_size/2),int(y_size/2)] = 1.1

    res2 = flt.convolve(image)
    assert(abs(numpy.max(res2 - res1)) > 1.0e-12)

    # Repeat with original image to verify update.
    image = numpy.zeros((x_size, y_size))
    image[int(x_size/2),int(y_size/2)] = 1.0

    res1 = flt.convolve(image)
    assert(abs(numpy.max(res2 - res1)) > 1.0e-12)

    # Repeat with exactly the same image.
    image = numpy.zeros((x_size, y_size))
    image[int(x_size/2),int(y_size/2)] = 1.0

    res2 = flt.convolve(image)
    assert(abs(numpy.max(res2 - res1)) < 1.0e-12)

    flt.cleanup()
示例#15
0
def test_matched_filter1():
    """
    Verify that the filter results are normalized correctly.
    """
    x_size = 80
    y_size = 90

    objects = numpy.zeros((1, 5))

    # Make filter with unit sum.
    objects[0, :] = [x_size / 2, y_size / 2, 1.0, 1.0, 1.0]
    psf = dg.drawGaussians((x_size, y_size), objects)
    psf = psf / numpy.sum(psf)
    flt = matchedFilterC.MatchedFilter(psf)

    for i in range(1, 5):
        image = numpy.zeros((x_size, y_size))
        image[int(x_size / 2), int(y_size / 2)] = float(i)
        conv = flt.convolve(image)
        assert (abs(numpy.sum(image) - numpy.sum(conv)) < 1.0e-6)
示例#16
0
    def newImage(self, new_image):
        """
        This is called once at the start of the analysis of a new image.
        
        new_image - A 2D numpy array.
        """
        # Make a copy of the starting image.
        #
        # FIXME: Is this copy necessary? We're not doing this in multiplane.
        #
        self.image = numpy.copy(new_image)

        # Initialize new peak minimum threshold. If we are doing more
        # than one iteration we start a bit higher and come down to
        # the specified threshold.
        if (self.iterations > 4):
            self.cur_threshold = self.threshold + 4.0
        else:
            self.cur_threshold = self.threshold + float(self.iterations)

        # Create mask to limit peak finding to a user defined sub-region of the image.
        if self.peak_mask is None:
            self.peak_mask = peakMask(new_image.shape, self.parameters,
                                      self.margin)

        # Create filter objects if necessary.
        if self.bg_filter is None:

            # Create matched filter for background.
            bg_psf = gaussianPSF(new_image.shape,
                                 self.parameters.getAttr("background_sigma"))
            self.bg_filter = matchedFilterC.MatchedFilter(
                bg_psf,
                fftw_estimate=self.parameters.getAttr("fftw_estimate"),
                memoize=True,
                max_diff=1.0e-3)

        # Reset maxima finder.
        self.mfinder.resetTaken()
示例#17
0
    def findFitPeak(self, image):
        """
        Returns the optimal alignment (based on the correlation score) between
        a Gaussian and the brightest peak in the image.
        """
        image = numpy.ascontiguousarray(image, dtype=numpy.float64)

        self.mxf.resetTaken()

        # Create convolution object, if we have not already done this.
        if self.fg_filter is None:
            fg_psf = fitting.gaussianPSF(image.shape, self.sigma)
            self.fg_filter = matchedFilterC.MatchedFilter(fg_psf)

        # Find peaks.
        smoothed_image = self.fg_filter.convolve(image)
        [x, y, z, h] = self.mxf.findMaxima([smoothed_image], want_height=True)

        # No peaks found check
        if (x.size == 0):
            return [0, 0, False]

        # Slice out ROI.
        max_index = numpy.argmax(h)
        mx = int(round(x[max_index])) + 1
        my = int(round(y[max_index])) + 1
        rs = self.roi_size
        roi = image[my - rs:my + rs, mx - rs:mx + rs]
        roi -= numpy.min(roi)

        # Pass to aligner and find optimal offset.
        self.c2dg.setImage(roi)
        [disp, success, fun, status] = self.c2dg.maximize()
        if (success) or (status == 2):
            return [my + disp[0] - 0.5, mx + disp[1] - 0.5, True]
        else:
            return [0, 0, False]
示例#18
0
    def setVariances(self, variances):
        """
        We initialize the following here because at __init__ we
        don't know how big the images are. This is called after
        the analysis specific version pads the variances and
        creates the mfilters[] and the vfilters[] class members.
        
        Note the assumption that every frame in all the movies
        is the same size.
        """

        # Create mask to limit peak finding to a user defined sub-region of the image.
        #
        self.peak_mask = fitting.peakMask(variances[0].shape, self.parameters,
                                          self.margin)

        # Create matched filter for background. There is one of these for
        # each imaging plane for the benefit of memoization.
        #
        for i in range(self.n_channels):
            bg_psf = fitting.gaussianPSF(
                variances[0].shape,
                self.parameters.getAttr("background_sigma"))
            self.bg_filters.append(
                matchedFilterC.MatchedFilter(bg_psf,
                                             memoize=True,
                                             max_diff=1.0e-3))

        # Process variance arrays now as they don't change from frame
        # to frame.
        #
        # This initializes the self.variances array with a list
        # of lists with the same organization as foreground and
        # psf / variance filters.
        #
        # Use variance filter. I now think this is correct as this is
        # also what we are doing with the image background term. In
        # the case of the image background we are estimating the
        # variance under the assumption that it is Poisson so the
        # mean of the background is the variance. With the cameras
        # we know what the variance is because we measured it. Now
        # we need to weight it properly given the PSF filter that
        # we are applying to the foreground.
        #

        # Iterate over z values.
        #
        for i in range(len(self.mfilters)):
            variance = numpy.zeros(variances[0].shape)

            # Iterate over channels / planes.
            for j in range(len(self.mfilters[i])):

                # Convolve variance with the appropriate variance filter.
                conv_var = self.vfilters[i][j].convolve(variances[j])

                # Transform variance to the channel 0 frame.
                if self.atrans[j] is None:
                    variance += conv_var
                else:
                    variance += self.atrans[j].transform(conv_var)

            self.variances.append(variance)

        # Save results if needed for debugging purposes.
        if self.check_mode:
            with tifffile.TiffWriter("camera_variances.tif") as tf:
                for var in self.variances:
                    tf.save(var.astype(numpy.float32))
示例#19
0
def findOffsets(base_name,
                params_file,
                background_scale=4.0,
                foreground_scale=1.0):
    """
    The 'main' function of this module.

    base_name - The basename for the group of movies.
    params_file - An analysis XML file containing the details for this experiment.
    background_scale - Features in the background change on this scale (in pixels)
                       or more slowly.
    foreground_scale - Features that change on this scale are likely foreground.

    Notes: 
      1. This only checks a limited range of offsets between the two channels.
      2. This assumes that the movies are longer than just a few frames.
    """
    n_tests = 10
    search_range = 5

    # Load parameters.
    parameters = params.ParametersMultiplane().initFromFile(params_file)

    # Load the movies from each camera.
    n_channels = 0
    movies = []
    for ext in mpUtil.getExtAttrs(parameters):
        movie_name = base_name + parameters.getAttr(ext)
        movies.append(datareader.inferReader(movie_name))
        n_channels += 1

    print("Found", n_channels, "movies.")

    # Load sCMOS calibration data.
    offsets = []
    gains = []
    for calib_name in mpUtil.getCalibrationAttrs(parameters):
        [offset, variance, gain,
         rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr(calib_name))
        offsets.append(offset)
        gains.append(1.0 / gain)

    assert (len(offsets) == n_channels)

    # Load the plane to plane mapping data & create affine transform objects.
    mappings = {}
    with open(parameters.getAttr("mapping"), 'rb') as fp:
        mappings = pickle.load(fp)

    atrans = []
    for i in range(n_channels - 1):
        xt = mappings["0_" + str(i + 1) + "_x"]
        yt = mappings["0_" + str(i + 1) + "_y"]
        atrans.append(affineTransformC.AffineTransform(xt=xt, yt=yt))

    # Create background and foreground variance filters.
    #
    # FIXME: Is this right for movies that are not square?
    #
    [y_size, x_size] = movies[0].filmSize()[:2]

    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5 * x_size]),
                             numpy.array([0.5 * y_size]),
                             sigma=background_scale)
    psf = psf / numpy.sum(psf)
    bg_filter = matchedFilterC.MatchedFilter(psf)

    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5 * x_size]),
                             numpy.array([0.5 * y_size]),
                             sigma=foreground_scale)
    psf = psf / numpy.sum(psf)
    fg_filter = matchedFilterC.MatchedFilter(psf)
    var_filter = matchedFilterC.MatchedFilter(psf * psf)

    # Check background estimation.
    if False:
        frame = loadImage(movies[0], 0, offsets[0], gain[0])
        frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter)
        with tifffile.TiffWriter("bg_estimate.tif") as tif:
            tif.save(frame.astype(numpy.float32))
            tif.save(frame_bg.astype(numpy.float32))
            tif.save((frame - frame_bg).astype(numpy.float32))

    votes = numpy.zeros((n_channels - 1, 2 * search_range + 1))
    for i in range(n_tests):
        print("Test", i)

        # Load reference frame.
        ref_frame = loadImage(movies[0], search_range + i, offsets[0], gain[0])
        ref_frame_bg = estimateBackground(ref_frame, bg_filter, fg_filter,
                                          var_filter)
        ref_frame -= ref_frame_bg

        # Load test frames and measure correlation.
        for j in range(n_channels - 1):
            best_corr = 0.0
            best_offset = 0
            for k in range(-search_range, search_range + 1):
                test_frame = loadImage(movies[j + 1],
                                       search_range + i + k,
                                       offsets[j + 1],
                                       gain[j + 1],
                                       transform=atrans[j])
                test_frame_bg = estimateBackground(test_frame, bg_filter,
                                                   fg_filter, var_filter)
                test_frame -= test_frame_bg
                test_frame_corr = numpy.sum(
                    ref_frame * test_frame) / numpy.sum(test_frame)
                if (test_frame_corr > best_corr):
                    best_corr = test_frame_corr
                    best_offset = k + search_range

            votes[j, best_offset] += 1

    # Print results.
    print("Offset votes:")
    print(votes)

    frame_offsets = [0]
    frame_offsets += list(numpy.argmax(votes, axis=1) - search_range)
    print("Best offsets:")
    for i in range(n_channels):
        print(str(i) + ": " + str(frame_offsets[i]))

    # Create stacks with optimal offsets.
    print("Saving image stacks.")
    for i in range(n_channels):
        with tifffile.TiffWriter("find_offsets_ch" + str(i) + ".tif") as tif:
            for j in range(5):
                if (i == 0):
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i], gain[i])
                else:
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i],
                                      gain[i],
                                      transform=atrans[i - 1])
                frame_bg = estimateBackground(frame, bg_filter, fg_filter,
                                              var_filter)
                frame -= frame_bg
                tif.save(frame.astype(numpy.float32))
    def setVariances(self, variances):

        #
        # Make sure that the number of (sCMOS) variance arrays
        # matches the number of image planes.
        #
        assert (len(variances) == self.n_channels)

        #
        # We initialize the following here because at __init__ we
        # don't know how big the images are.
        #
        # Note the assumption that every frame in all the movies
        # is the same size.
        #

        # Create mask to limit peak finding to a user defined sub-region of the image.
        self.peak_mask = numpy.ones(variances[0].shape)
        if self.parameters.hasAttr("x_start"):
            self.peak_mask[0:self.parameters.getAttr("x_start") +
                           self.margin, :] = 0.0
        if self.parameters.hasAttr("x_stop"):
            self.peak_mask[self.parameters.getAttr("x_stop") +
                           self.margin:-1, :] = 0.0
        if self.parameters.hasAttr("y_start"):
            self.peak_mask[:, 0:self.parameters.getAttr("y_start") +
                           self.margin] = 0.0
        if self.parameters.hasAttr("y_stop"):
            self.peak_mask[:,
                           self.parameters.getAttr("y_stop") +
                           self.margin:-1] = 0.0

        #
        # Create mpUtilC.MpUtil object that is used to do a lot of the
        # peak list manipulations.
        #
        self.mpu = mpUtilC.MpUtil(radius=self.new_peak_radius,
                                  neighborhood=self.neighborhood,
                                  im_size_x=variances[0].shape[1],
                                  im_size_y=variances[0].shape[0],
                                  n_channels=self.n_channels,
                                  n_zplanes=len(self.z_values),
                                  margin=self.margin)

        #
        # Load mappings file again so that we can set the transforms for
        # the MpUtil object.
        #
        # Use self.margin - 1, because we added 1 to the x,y coordinates
        # when we saved them, see sa_library.i3dtype.createFromMultiFit().
        #
        [xt, yt] = mpUtilC.loadMappings(self.mapping_filename,
                                        self.margin - 1)[:2]
        self.mpu.setTransforms(xt, yt)

        #
        # Now that we have the MpUtil object we can split the input peak
        # locations to create a list for each channel.
        #
        if self.peak_locations is not None:
            self.peak_locations = self.mpu.splitPeaks(self.peak_locations)

        #
        # Create "foreground" and "variance" filters, as well as the
        # height rescaling array.
        #
        # These are stored in a list indexed by z value, then by
        # channel / plane. So self.mfilters[1][2] is the filter
        # for z value 1, plane 2.
        #
        for i, mfilter_z in enumerate(self.mfilters_z):
            self.height_rescale.append([])
            self.mfilters.append([])
            self.vfilters.append([])

            for j, s_to_psf in enumerate(self.s_to_psfs):
                psf = s_to_psf.getPSF(mfilter_z,
                                      shape=variances[0].shape,
                                      normalize=False)

                #
                # We are assuming that the psf has no negative values,
                # or if it does that they are very small.
                #
                psf_norm = psf / numpy.sum(psf)
                self.mfilters[i].append(matchedFilterC.MatchedFilter(psf_norm))
                self.vfilters[i].append(
                    matchedFilterC.MatchedFilter(psf_norm * psf_norm))

                self.height_rescale[i].append(1.0 / numpy.sum(psf * psf_norm))

                # Save a pictures of the PSFs for debugging purposes.
                if False:
                    print("psf max", numpy.max(psf))
                    filename = "psf_z{0:.3f}_c{1:d}.tif".format(mfilter_z, j)
                    tifffile.imsave(filename, psf.astype(numpy.float32))

        # "background" filter.
        psf = dg.drawGaussiansXY(variances[0].shape,
                                 numpy.array([0.5 * variances[0].shape[0]]),
                                 numpy.array([0.5 * variances[0].shape[1]]),
                                 sigma=self.bg_filter_sigma)
        psf = psf / numpy.sum(psf)
        self.bg_filter = matchedFilterC.MatchedFilter(psf)

        #
        # Process variance arrays now as they don't change from frame
        # to frame.
        #
        # This initializes the self.variances array with a list
        # of lists with the same organization as foreground and
        # psf / variance filters.
        #
        # Use PSF filter and not variance filter here as this is the
        # measured camera variance.
        #

        # Iterate over z values.
        for i in range(len(self.mfilters)):
            variance = numpy.zeros(variances[0].shape)

            # Iterate over channels / planes.
            for j in range(len(self.mfilters[i])):

                # Convolve variance with the appropriate PSF filter.
                conv_var = self.mfilters[i][j].convolve(variances[j])

                # Transform variance to the channel 0 frame.
                if self.atrans[j] is None:
                    variance += conv_var
                else:
                    variance += self.atrans[j].transform(conv_var)

            self.variances.append(variance)