Example #1
0
def initFindAndFit(parameters):
    """
    Create and return a fittingMp.MPFinderFitter object.
    """
    # Create PSF objects.
    psf_objects = initPSFObjects(parameters)

    # Create peak finder.
    finder = fittingMp.MPPeakFinderArb(parameters=parameters,
                                       psf_objects=psf_objects)

    # Load sCMOS calibration data.
    #
    # Note: Gain is expected to be in units of ADU per photo-electron.
    #
    rqes = []
    variances = []
    for calib_name in mpUtil.getCalibrationAttrs(parameters):
        [offset, variance, gain,
         rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr(calib_name))
        variances.append(variance / (gain * gain))
        rqes.append(rqe)

    # Set variance in the peak finder. This method also pads the variance
    # to the correct size and performs additional initializations.
    #
    variances = finder.setVariances(variances)

    # Pad the rqes to the correct size.
    rqes = list(map(lambda x: finder.padArray(x), rqes))

    # Create mpFitC.MPFit object.
    #
    mfitter = initFitter(finder.margin, parameters, psf_objects, rqes,
                         variances)

    # Create peak fitter.
    #
    fitter = fittingMp.MPPeakFitterArb(mfitter=mfitter, parameters=parameters)

    # Specify which properties we want (for each channel) from the
    # analysis. Note that some of these may be duplicates of each
    # other, for example if the heights are not independent.
    #
    properties = [
        "background", "error", "height", "iterations", "significance", "sum",
        "x", "y", "z"
    ]

    return fittingMp.MPFinderFitter(peak_finder=finder,
                                    peak_fitter=fitter,
                                    properties=properties)
def initFindAndFit(parameters):
    """
    Create and return a fittingMp.MPFinderFitter object.
    """
    # Create PSF objects.
    psf_objects = initPSFObjects(parameters)
    
    # Create peak finder.
    finder = fittingMp.MPPeakFinderArb(parameters = parameters,
                                       psf_objects = psf_objects)

    # Load sCMOS calibration data.
    #
    # Note: Gain is expected to be in units of ADU per photo-electron.
    #
    rqes = []
    variances = []
    for calib_name in mpUtil.getCalibrationAttrs(parameters):
        [offset, variance, gain, rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr(calib_name))
        variances.append(variance/(gain*gain))
        rqes.append(rqe)

    # Set variance in the peak finder. This method also pads the variance
    # to the correct size and performs additional initializations.
    #
    variances = finder.setVariances(variances)

    # Pad the rqes to the correct size.
    rqes = list(map(lambda x: finder.padArray(x), rqes))

    # Create mpFitC.MPFit object.
    #
    mfitter = initFitter(finder.margin,
                         parameters,
                         psf_objects,
                         rqes,
                         variances)

    # Create peak fitter.
    #
    fitter = fittingMp.MPPeakFitterArb(mfitter = mfitter,
                                       parameters = parameters)

    # Specify which properties we want (for each channel) from the
    # analysis. Note that some of these may be duplicates of each
    # other, for example if the heights are not independent.
    #
    properties = ["background", "error", "height", "iterations", "significance", "sum", "x", "y", "z"]

    return fittingMp.MPFinderFitter(peak_finder = finder,
                                    peak_fitter = fitter,
                                    properties = properties)
Example #3
0
    def __init__(self, base_name=None, parameters=None, **kwds):
        super(MPMovieReader, self).__init__(**kwds)

        self.backgrounds = []
        self.bg_estimators = []
        self.cur_frame = 0
        self.frames = []
        self.max_frame = 0
        self.offsets = []
        self.parameters = parameters
        self.planes = []

        #
        # Load the movies and offsets for each plane/channel. At present
        # multiplane expects the sCMOS camera calibration data.
        #
        calib_name = mpUtil.getCalibrationAttrs(parameters)
        for i, ext in enumerate(mpUtil.getExtAttrs(parameters)):
            movie_name = base_name + parameters.getAttr(ext)
            self.planes.append(
                analysisIO.FrameReaderSCMOS(
                    parameters=parameters,
                    movie_file=movie_name,
                    calibration_file=parameters.getAttr(calib_name[i])))

        for offset in mpUtil.getOffsetAttrs(parameters):
            self.offsets.append(parameters.getAttr(offset))

        print("Found data for", len(self.planes), "planes.")

        [self.movie_x, self.movie_y, self.movie_l] = self.planes[0].filmSize()
        self.movie_l -= self.offsets[0]

        # Check if the movies for the other channels (adjusted for their offsets)
        # are shorter than the movie for channel 0.
        #
        for i in range(1, len(self.planes)):
            [px, py, pl] = self.planes[1].filmSize()
            pl -= self.offsets[i]
            if (pl < self.movie_l):
                self.movie_l = pl

        # Assert that all the movies are the same size, at least in x,y.
        for i in range(1, len(self.planes)):
            assert (self.movie_x == self.planes[i].filmSize()[0])
            assert (self.movie_y == self.planes[i].filmSize()[1])
Example #4
0
    def __init__(self, base_name = None, parameters = None, **kwds):
        super(MPMovieReader, self).__init__(**kwds)

        self.backgrounds = []
        self.bg_estimators = []
        self.cur_frame = 0
        self.frames = []
        self.max_frame = 0
        self.offsets = []
        self.parameters = parameters
        self.planes = []

        #
        # Load the movies and offsets for each plane/channel. At present
        # multiplane expects the sCMOS camera calibration data.
        #
        calib_name = mpUtil.getCalibrationAttrs(parameters)
        for i, ext in enumerate(mpUtil.getExtAttrs(parameters)):
            movie_name = base_name + parameters.getAttr(ext)
            self.planes.append(analysisIO.FrameReaderSCMOS(parameters = parameters,
                                                           movie_file = movie_name,
                                                           calibration_file = parameters.getAttr(calib_name[i])))

        for offset in mpUtil.getOffsetAttrs(parameters):
            self.offsets.append(parameters.getAttr(offset))

        print("Found data for", len(self.planes), "planes.")

        [self.movie_x, self.movie_y, self.movie_l] = self.planes[0].filmSize()
        self.movie_l -= self.offsets[0]

        # Check if the movies for the other channels (adjusted for their offsets)
        # are shorter than the movie for channel 0.
        #
        for i in range(1, len(self.planes)):
            [px, py, pl] = self.planes[1].filmSize()
            pl -= self.offsets[i]
            if (pl < self.movie_l):
                self.movie_l = pl

        # Assert that all the movies are the same size, at least in x,y.
        for i in range(1, len(self.planes)):
            assert(self.movie_x == self.planes[i].filmSize()[0])
            assert(self.movie_y == self.planes[i].filmSize()[1])
def findOffsets(base_name,
                params_file,
                background_scale=4.0,
                foreground_scale=1.0):
    """
    The 'main' function of this module.

    base_name - The basename for the group of movies.
    params_file - An analysis XML file containing the details for this experiment.
    background_scale - Features in the background change on this scale (in pixels)
                       or more slowly.
    foreground_scale - Features that change on this scale are likely foreground.

    Notes: 
      1. This only checks a limited range of offsets between the two channels.
      2. This assumes that the movies are longer than just a few frames.
    """
    n_tests = 10
    search_range = 5

    # Load parameters.
    parameters = params.ParametersMultiplane().initFromFile(params_file)

    # Load the movies from each camera.
    n_channels = 0
    movies = []
    for ext in mpUtil.getExtAttrs(parameters):
        movie_name = base_name + parameters.getAttr(ext)
        movies.append(datareader.inferReader(movie_name))
        n_channels += 1

    print("Found", n_channels, "movies.")

    # Load sCMOS calibration data.
    offsets = []
    gains = []
    for calib_name in mpUtil.getCalibrationAttrs(parameters):
        [offset, variance, gain,
         rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr(calib_name))
        offsets.append(offset)
        gains.append(1.0 / gain)

    assert (len(offsets) == n_channels)

    # Load the plane to plane mapping data & create affine transform objects.
    mappings = {}
    with open(parameters.getAttr("mapping"), 'rb') as fp:
        mappings = pickle.load(fp)

    atrans = []
    for i in range(n_channels - 1):
        xt = mappings["0_" + str(i + 1) + "_x"]
        yt = mappings["0_" + str(i + 1) + "_y"]
        atrans.append(affineTransformC.AffineTransform(xt=xt, yt=yt))

    # Create background and foreground variance filters.
    #
    # FIXME: Is this right for movies that are not square?
    #
    [y_size, x_size] = movies[0].filmSize()[:2]

    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5 * x_size]),
                             numpy.array([0.5 * y_size]),
                             sigma=background_scale)
    psf = psf / numpy.sum(psf)
    bg_filter = matchedFilterC.MatchedFilter(psf)

    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5 * x_size]),
                             numpy.array([0.5 * y_size]),
                             sigma=foreground_scale)
    psf = psf / numpy.sum(psf)
    fg_filter = matchedFilterC.MatchedFilter(psf)
    var_filter = matchedFilterC.MatchedFilter(psf * psf)

    # Check background estimation.
    if False:
        frame = loadImage(movies[0], 0, offsets[0], gain[0])
        frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter)
        with tifffile.TiffWriter("bg_estimate.tif") as tif:
            tif.save(frame.astype(numpy.float32))
            tif.save(frame_bg.astype(numpy.float32))
            tif.save((frame - frame_bg).astype(numpy.float32))

    votes = numpy.zeros((n_channels - 1, 2 * search_range + 1))
    for i in range(n_tests):
        print("Test", i)

        # Load reference frame.
        ref_frame = loadImage(movies[0], search_range + i, offsets[0], gain[0])
        ref_frame_bg = estimateBackground(ref_frame, bg_filter, fg_filter,
                                          var_filter)
        ref_frame -= ref_frame_bg

        # Load test frames and measure correlation.
        for j in range(n_channels - 1):
            best_corr = 0.0
            best_offset = 0
            for k in range(-search_range, search_range + 1):
                test_frame = loadImage(movies[j + 1],
                                       search_range + i + k,
                                       offsets[j + 1],
                                       gain[j + 1],
                                       transform=atrans[j])
                test_frame_bg = estimateBackground(test_frame, bg_filter,
                                                   fg_filter, var_filter)
                test_frame -= test_frame_bg
                test_frame_corr = numpy.sum(
                    ref_frame * test_frame) / numpy.sum(test_frame)
                if (test_frame_corr > best_corr):
                    best_corr = test_frame_corr
                    best_offset = k + search_range

            votes[j, best_offset] += 1

    # Print results.
    print("Offset votes:")
    print(votes)

    frame_offsets = [0]
    frame_offsets += list(numpy.argmax(votes, axis=1) - search_range)
    print("Best offsets:")
    for i in range(n_channels):
        print(str(i) + ": " + str(frame_offsets[i]))

    # Create stacks with optimal offsets.
    print("Saving image stacks.")
    for i in range(n_channels):
        with tifffile.TiffWriter("find_offsets_ch" + str(i) + ".tif") as tif:
            for j in range(5):
                if (i == 0):
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i], gain[i])
                else:
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i],
                                      gain[i],
                                      transform=atrans[i - 1])
                frame_bg = estimateBackground(frame, bg_filter, fg_filter,
                                              var_filter)
                frame -= frame_bg
                tif.save(frame.astype(numpy.float32))
Example #6
0
def findOffsets(base_name, params_file, background_scale = 4.0, foreground_scale = 1.0, im_slice = None):
    """
    The 'main' function of this module.

    base_name - The basename for the group of movies.
    params_file - An analysis XML file containing the details for this experiment.
    background_scale - Features in the background change on this scale (in pixels)
                       or more slowly.
    foreground_scale - Features that change on this scale are likely foreground.
    im_slice - A slice object created for example with numpy.s_ to limit the analysis
               to a smaller AOI.

    Notes: 
      1. This only checks a limited range of offsets between the two channels.
      2. This assumes that the movies are longer than just a few frames.
    """
    n_tests = 10
    search_range = 5
    
    # Load parameters.
    parameters = params.ParametersMultiplane().initFromFile(params_file)

    # Load the movies from each camera.
    n_channels = 0
    movies = []
    for ext in mpUtil.getExtAttrs(parameters):
        movie_name = base_name + parameters.getAttr(ext)
        movies.append(datareader.inferReader(movie_name))
        n_channels += 1

    print("Found", n_channels, "movies.")

    # Load sCMOS calibration data.
    offsets = []
    gains = []
    for calib_name in mpUtil.getCalibrationAttrs(parameters):
        [offset, variance, gain, rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr(calib_name))
        offsets.append(offset)
        gains.append(1.0/gain)

    assert(len(offsets) == n_channels)
    
    # Load the plane to plane mapping data & create affine transform objects.
    mappings = {}
    with open(parameters.getAttr("mapping"), 'rb') as fp:
        mappings = pickle.load(fp)

    atrans = []
    for i in range(n_channels-1):
        xt = mappings["0_" + str(i+1) + "_x"]
        yt = mappings["0_" + str(i+1) + "_y"]
        atrans.append(affineTransformC.AffineTransform(xt = xt, yt = yt))

    # Create background and foreground variance filters.
    #
    # FIXME: Is this right for movies that are not square?
    #
    [y_size, x_size] = movies[0].filmSize()[:2]

    if im_slice is not None:
        y_size = im_slice[0].stop - im_slice[0].start
        x_size = im_slice[1].stop - im_slice[1].start
        
    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5*x_size]),
                             numpy.array([0.5*y_size]),
                             sigma = background_scale)
    psf = psf/numpy.sum(psf)
    bg_filter = matchedFilterC.MatchedFilter(psf)

    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5*x_size]),
                             numpy.array([0.5*y_size]),
                             sigma = foreground_scale)
    psf = psf/numpy.sum(psf)
    fg_filter = matchedFilterC.MatchedFilter(psf)
    var_filter = matchedFilterC.MatchedFilter(psf*psf)

    # Check background estimation.
    if False:
        frame = loadImage(movies[0], 0, offsets[0], gain[0])
        frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter)
        with tifffile.TiffWriter("bg_estimate.tif") as tif:
            tif.save(frame.astype(numpy.float32))
            tif.save(frame_bg.astype(numpy.float32))
            tif.save((frame - frame_bg).astype(numpy.float32))

    votes = numpy.zeros((n_channels - 1, 2*search_range+1))
    for i in range(n_tests):
        print("Test", i)
        
        # Load reference frame.
        ref_frame = loadImage(movies[0], search_range + i, offsets[0], gain[0])
        if im_slice is not None:
            ref_frame = ref_frame[im_slice]
        ref_frame_bg = estimateBackground(ref_frame, bg_filter, fg_filter, var_filter)
        ref_frame -= ref_frame_bg

        # Load test frames and measure correlation.
        for j in range(n_channels - 1):
            best_corr = 0.0
            best_offset = 0
            for k in range(-search_range, search_range + 1):
                test_frame = loadImage(movies[j+1], search_range + i + k, offsets[j+1], gain[j+1], transform = atrans[j])
                if im_slice is not None:
                    test_frame = test_frame[im_slice]
                test_frame_bg = estimateBackground(test_frame, bg_filter, fg_filter, var_filter)
                test_frame -= test_frame_bg
                test_frame_corr = numpy.sum(ref_frame*test_frame)/numpy.sum(test_frame)
                if (test_frame_corr > best_corr):
                    best_corr = test_frame_corr
                    best_offset = k + search_range

            votes[j, best_offset] += 1

    # Print results.
    print("Offset votes:")
    print(votes)

    frame_offsets = [0]
    frame_offsets += list(numpy.argmax(votes, axis = 1) - search_range)
    print("Best offsets:")
    for i in range(n_channels):
        print(str(i) + ": " + str(frame_offsets[i]))

    # Create stacks with optimal offsets.
    print("Saving image stacks.")
    for i in range(n_channels):
        with tifffile.TiffWriter(base_name + "_offsets_ch" + str(i) + ".tif") as tif:
            for j in range(5):
                if (i == 0):
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i],
                                      gain[i])
                else:
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i],
                                      gain[i],
                                      transform = atrans[i-1])
                if im_slice is not None:
                    frame = frame[im_slice]
                    
                frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter)
                frame -= frame_bg
                tif.save(frame.astype(numpy.float32))

    return frame_offsets