Ejemplo n.º 1
0
    def __init__(self, parameters=None, sa_type=None, **kwds):
        super(MPDataWriter, self).__init__(**kwds)

        self.movie_info_set = False
        self.offsets = []

        # Figure out how many planes there are.
        self.n_planes = len(mpUtil.getExtAttrs(parameters))

        # Save frame offsets for each plane.
        for offset in mpUtil.getOffsetAttrs(parameters):
            self.offsets.append(parameters.getAttr(offset))

        # Figure out where to start if the analysis file already exists.
        if os.path.exists(self.filename):
            print(
                "Existing analysis file found. Restarting from last analyzed frame."
            )
            self.h5 = saH5Py.SAH5Py(filename=self.filename)

            self.movie_info_set = True

            # Find the last frame that we analyzed.
            i = self.h5.getMovieLength()
            while (i > 0):
                if self.h5.isAnalyzed(i):
                    break
                i -= 1
            self.start_frame = i

        # Otherwise start from the beginning.
        else:
            self.h5 = saH5Py.SAH5Py(filename=self.filename,
                                    is_existing=False,
                                    sa_type=sa_type)

            # Save analysis parameters.
            etree = parameters.toXMLElementTree(False)
            if (sys.version_info > (3, 0)):
                self.h5.addMetadata(ElementTree.tostring(etree, 'unicode'))
            else:
                self.h5.addMetadata(ElementTree.tostring(etree, 'ISO-8859-1'))

            # Save pixel size.
            self.h5.setPixelSize(parameters.getAttr("pixel_size"))

            # Adjust starting frame based on channel 0 offset.
            if (self.offsets[0] != 0):
                assert (self.offsets[0] >
                        0), "Channel 0 offset cannot be negative."
                self.start_frame = self.offsets[0]
                print("Adjusted start frame to", self.start_frame,
                      "based on channel 0 offset.")

        self.h5.setAnalysisFinished(False)
Ejemplo n.º 2
0
    def __init__(self, parameters = None, sa_type = None, **kwds):
        super(MPDataWriter, self).__init__(**kwds)

        self.movie_info_set = False
        self.offsets = []

        # Figure out how many planes there are.
        self.n_planes = len(mpUtil.getExtAttrs(parameters))

        # Save frame offsets for each plane.
        for offset in mpUtil.getOffsetAttrs(parameters):
            self.offsets.append(parameters.getAttr(offset))        

        # Figure out where to start if the analysis file already exists.
        if os.path.exists(self.filename):
            self.h5 = saH5Py.SAH5Py(filename = self.filename)

            self.movie_info_set = True
            
            # Find the last frame that we analyzed.
            i = self.h5.getMovieLength()
            while (i > 0):
                if self.h5.isAnalyzed(i):
                    break
                i -= 1
            self.start_frame = i

        # Otherwise start from the beginning.
        else:
            self.h5 = saH5Py.SAH5Py(filename = self.filename,
                                    is_existing = False,
                                    sa_type = sa_type)
            
            # Save analysis parameters.
            etree = parameters.toXMLElementTree(False)
            if (sys.version_info > (3, 0)):
                self.h5.addMetadata(ElementTree.tostring(etree, 'unicode'))
            else:
                self.h5.addMetadata(ElementTree.tostring(etree, 'ISO-8859-1'))

            # Save pixel size.
            self.h5.setPixelSize(parameters.getAttr("pixel_size"))
            
            # Adjust starting frame based on channel 0 offset.
            if (self.offsets[0] != 0):
                assert(self.offsets[0] > 0), "Channel 0 offset cannot be negative."
                self.start_frame = self.offsets[0]
                print("Adjusted start frame to", self.start_frame, "based on channel 0 offset.")

        self.h5.setAnalysisFinished(False)
Ejemplo n.º 3
0
    def __init__(self, base_name=None, parameters=None, **kwds):
        super(MPMovieReader, self).__init__(**kwds)

        self.backgrounds = []
        self.bg_estimators = []
        self.cur_frame = 0
        self.frames = []
        self.max_frame = 0
        self.offsets = []
        self.parameters = parameters
        self.planes = []

        #
        # Load the movies and offsets for each plane/channel. At present
        # multiplane expects the sCMOS camera calibration data.
        #
        calib_name = mpUtil.getCalibrationAttrs(parameters)
        for i, ext in enumerate(mpUtil.getExtAttrs(parameters)):
            movie_name = base_name + parameters.getAttr(ext)
            self.planes.append(
                analysisIO.FrameReaderSCMOS(
                    parameters=parameters,
                    movie_file=movie_name,
                    calibration_file=parameters.getAttr(calib_name[i])))

        for offset in mpUtil.getOffsetAttrs(parameters):
            self.offsets.append(parameters.getAttr(offset))

        print("Found data for", len(self.planes), "planes.")

        [self.movie_x, self.movie_y, self.movie_l] = self.planes[0].filmSize()
        self.movie_l -= self.offsets[0]

        # Check if the movies for the other channels (adjusted for their offsets)
        # are shorter than the movie for channel 0.
        #
        for i in range(1, len(self.planes)):
            [px, py, pl] = self.planes[1].filmSize()
            pl -= self.offsets[i]
            if (pl < self.movie_l):
                self.movie_l = pl

        # Assert that all the movies are the same size, at least in x,y.
        for i in range(1, len(self.planes)):
            assert (self.movie_x == self.planes[i].filmSize()[0])
            assert (self.movie_y == self.planes[i].filmSize()[1])
Ejemplo n.º 4
0
    def __init__(self, base_name = None, parameters = None, **kwds):
        super(MPMovieReader, self).__init__(**kwds)

        self.backgrounds = []
        self.bg_estimators = []
        self.cur_frame = 0
        self.frames = []
        self.max_frame = 0
        self.offsets = []
        self.parameters = parameters
        self.planes = []

        #
        # Load the movies and offsets for each plane/channel. At present
        # multiplane expects the sCMOS camera calibration data.
        #
        calib_name = mpUtil.getCalibrationAttrs(parameters)
        for i, ext in enumerate(mpUtil.getExtAttrs(parameters)):
            movie_name = base_name + parameters.getAttr(ext)
            self.planes.append(analysisIO.FrameReaderSCMOS(parameters = parameters,
                                                           movie_file = movie_name,
                                                           calibration_file = parameters.getAttr(calib_name[i])))

        for offset in mpUtil.getOffsetAttrs(parameters):
            self.offsets.append(parameters.getAttr(offset))

        print("Found data for", len(self.planes), "planes.")

        [self.movie_x, self.movie_y, self.movie_l] = self.planes[0].filmSize()
        self.movie_l -= self.offsets[0]

        # Check if the movies for the other channels (adjusted for their offsets)
        # are shorter than the movie for channel 0.
        #
        for i in range(1, len(self.planes)):
            [px, py, pl] = self.planes[1].filmSize()
            pl -= self.offsets[i]
            if (pl < self.movie_l):
                self.movie_l = pl

        # Assert that all the movies are the same size, at least in x,y.
        for i in range(1, len(self.planes)):
            assert(self.movie_x == self.planes[i].filmSize()[0])
            assert(self.movie_y == self.planes[i].filmSize()[1])
Ejemplo n.º 5
0
def findOffsets(base_name,
                params_file,
                background_scale=4.0,
                foreground_scale=1.0):
    """
    The 'main' function of this module.

    base_name - The basename for the group of movies.
    params_file - An analysis XML file containing the details for this experiment.
    background_scale - Features in the background change on this scale (in pixels)
                       or more slowly.
    foreground_scale - Features that change on this scale are likely foreground.

    Notes: 
      1. This only checks a limited range of offsets between the two channels.
      2. This assumes that the movies are longer than just a few frames.
    """
    n_tests = 10
    search_range = 5

    # Load parameters.
    parameters = params.ParametersMultiplane().initFromFile(params_file)

    # Load the movies from each camera.
    n_channels = 0
    movies = []
    for ext in mpUtil.getExtAttrs(parameters):
        movie_name = base_name + parameters.getAttr(ext)
        movies.append(datareader.inferReader(movie_name))
        n_channels += 1

    print("Found", n_channels, "movies.")

    # Load sCMOS calibration data.
    offsets = []
    gains = []
    for calib_name in mpUtil.getCalibrationAttrs(parameters):
        [offset, variance, gain,
         rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr(calib_name))
        offsets.append(offset)
        gains.append(1.0 / gain)

    assert (len(offsets) == n_channels)

    # Load the plane to plane mapping data & create affine transform objects.
    mappings = {}
    with open(parameters.getAttr("mapping"), 'rb') as fp:
        mappings = pickle.load(fp)

    atrans = []
    for i in range(n_channels - 1):
        xt = mappings["0_" + str(i + 1) + "_x"]
        yt = mappings["0_" + str(i + 1) + "_y"]
        atrans.append(affineTransformC.AffineTransform(xt=xt, yt=yt))

    # Create background and foreground variance filters.
    #
    # FIXME: Is this right for movies that are not square?
    #
    [y_size, x_size] = movies[0].filmSize()[:2]

    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5 * x_size]),
                             numpy.array([0.5 * y_size]),
                             sigma=background_scale)
    psf = psf / numpy.sum(psf)
    bg_filter = matchedFilterC.MatchedFilter(psf)

    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5 * x_size]),
                             numpy.array([0.5 * y_size]),
                             sigma=foreground_scale)
    psf = psf / numpy.sum(psf)
    fg_filter = matchedFilterC.MatchedFilter(psf)
    var_filter = matchedFilterC.MatchedFilter(psf * psf)

    # Check background estimation.
    if False:
        frame = loadImage(movies[0], 0, offsets[0], gain[0])
        frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter)
        with tifffile.TiffWriter("bg_estimate.tif") as tif:
            tif.save(frame.astype(numpy.float32))
            tif.save(frame_bg.astype(numpy.float32))
            tif.save((frame - frame_bg).astype(numpy.float32))

    votes = numpy.zeros((n_channels - 1, 2 * search_range + 1))
    for i in range(n_tests):
        print("Test", i)

        # Load reference frame.
        ref_frame = loadImage(movies[0], search_range + i, offsets[0], gain[0])
        ref_frame_bg = estimateBackground(ref_frame, bg_filter, fg_filter,
                                          var_filter)
        ref_frame -= ref_frame_bg

        # Load test frames and measure correlation.
        for j in range(n_channels - 1):
            best_corr = 0.0
            best_offset = 0
            for k in range(-search_range, search_range + 1):
                test_frame = loadImage(movies[j + 1],
                                       search_range + i + k,
                                       offsets[j + 1],
                                       gain[j + 1],
                                       transform=atrans[j])
                test_frame_bg = estimateBackground(test_frame, bg_filter,
                                                   fg_filter, var_filter)
                test_frame -= test_frame_bg
                test_frame_corr = numpy.sum(
                    ref_frame * test_frame) / numpy.sum(test_frame)
                if (test_frame_corr > best_corr):
                    best_corr = test_frame_corr
                    best_offset = k + search_range

            votes[j, best_offset] += 1

    # Print results.
    print("Offset votes:")
    print(votes)

    frame_offsets = [0]
    frame_offsets += list(numpy.argmax(votes, axis=1) - search_range)
    print("Best offsets:")
    for i in range(n_channels):
        print(str(i) + ": " + str(frame_offsets[i]))

    # Create stacks with optimal offsets.
    print("Saving image stacks.")
    for i in range(n_channels):
        with tifffile.TiffWriter("find_offsets_ch" + str(i) + ".tif") as tif:
            for j in range(5):
                if (i == 0):
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i], gain[i])
                else:
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i],
                                      gain[i],
                                      transform=atrans[i - 1])
                frame_bg = estimateBackground(frame, bg_filter, fg_filter,
                                              var_filter)
                frame -= frame_bg
                tif.save(frame.astype(numpy.float32))
Ejemplo n.º 6
0
def findOffsets(base_name, params_file, background_scale = 4.0, foreground_scale = 1.0, im_slice = None):
    """
    The 'main' function of this module.

    base_name - The basename for the group of movies.
    params_file - An analysis XML file containing the details for this experiment.
    background_scale - Features in the background change on this scale (in pixels)
                       or more slowly.
    foreground_scale - Features that change on this scale are likely foreground.
    im_slice - A slice object created for example with numpy.s_ to limit the analysis
               to a smaller AOI.

    Notes: 
      1. This only checks a limited range of offsets between the two channels.
      2. This assumes that the movies are longer than just a few frames.
    """
    n_tests = 10
    search_range = 5
    
    # Load parameters.
    parameters = params.ParametersMultiplane().initFromFile(params_file)

    # Load the movies from each camera.
    n_channels = 0
    movies = []
    for ext in mpUtil.getExtAttrs(parameters):
        movie_name = base_name + parameters.getAttr(ext)
        movies.append(datareader.inferReader(movie_name))
        n_channels += 1

    print("Found", n_channels, "movies.")

    # Load sCMOS calibration data.
    offsets = []
    gains = []
    for calib_name in mpUtil.getCalibrationAttrs(parameters):
        [offset, variance, gain, rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr(calib_name))
        offsets.append(offset)
        gains.append(1.0/gain)

    assert(len(offsets) == n_channels)
    
    # Load the plane to plane mapping data & create affine transform objects.
    mappings = {}
    with open(parameters.getAttr("mapping"), 'rb') as fp:
        mappings = pickle.load(fp)

    atrans = []
    for i in range(n_channels-1):
        xt = mappings["0_" + str(i+1) + "_x"]
        yt = mappings["0_" + str(i+1) + "_y"]
        atrans.append(affineTransformC.AffineTransform(xt = xt, yt = yt))

    # Create background and foreground variance filters.
    #
    # FIXME: Is this right for movies that are not square?
    #
    [y_size, x_size] = movies[0].filmSize()[:2]

    if im_slice is not None:
        y_size = im_slice[0].stop - im_slice[0].start
        x_size = im_slice[1].stop - im_slice[1].start
        
    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5*x_size]),
                             numpy.array([0.5*y_size]),
                             sigma = background_scale)
    psf = psf/numpy.sum(psf)
    bg_filter = matchedFilterC.MatchedFilter(psf)

    psf = dg.drawGaussiansXY((x_size, y_size),
                             numpy.array([0.5*x_size]),
                             numpy.array([0.5*y_size]),
                             sigma = foreground_scale)
    psf = psf/numpy.sum(psf)
    fg_filter = matchedFilterC.MatchedFilter(psf)
    var_filter = matchedFilterC.MatchedFilter(psf*psf)

    # Check background estimation.
    if False:
        frame = loadImage(movies[0], 0, offsets[0], gain[0])
        frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter)
        with tifffile.TiffWriter("bg_estimate.tif") as tif:
            tif.save(frame.astype(numpy.float32))
            tif.save(frame_bg.astype(numpy.float32))
            tif.save((frame - frame_bg).astype(numpy.float32))

    votes = numpy.zeros((n_channels - 1, 2*search_range+1))
    for i in range(n_tests):
        print("Test", i)
        
        # Load reference frame.
        ref_frame = loadImage(movies[0], search_range + i, offsets[0], gain[0])
        if im_slice is not None:
            ref_frame = ref_frame[im_slice]
        ref_frame_bg = estimateBackground(ref_frame, bg_filter, fg_filter, var_filter)
        ref_frame -= ref_frame_bg

        # Load test frames and measure correlation.
        for j in range(n_channels - 1):
            best_corr = 0.0
            best_offset = 0
            for k in range(-search_range, search_range + 1):
                test_frame = loadImage(movies[j+1], search_range + i + k, offsets[j+1], gain[j+1], transform = atrans[j])
                if im_slice is not None:
                    test_frame = test_frame[im_slice]
                test_frame_bg = estimateBackground(test_frame, bg_filter, fg_filter, var_filter)
                test_frame -= test_frame_bg
                test_frame_corr = numpy.sum(ref_frame*test_frame)/numpy.sum(test_frame)
                if (test_frame_corr > best_corr):
                    best_corr = test_frame_corr
                    best_offset = k + search_range

            votes[j, best_offset] += 1

    # Print results.
    print("Offset votes:")
    print(votes)

    frame_offsets = [0]
    frame_offsets += list(numpy.argmax(votes, axis = 1) - search_range)
    print("Best offsets:")
    for i in range(n_channels):
        print(str(i) + ": " + str(frame_offsets[i]))

    # Create stacks with optimal offsets.
    print("Saving image stacks.")
    for i in range(n_channels):
        with tifffile.TiffWriter(base_name + "_offsets_ch" + str(i) + ".tif") as tif:
            for j in range(5):
                if (i == 0):
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i],
                                      gain[i])
                else:
                    frame = loadImage(movies[i],
                                      search_range + frame_offsets[i] + j,
                                      offsets[i],
                                      gain[i],
                                      transform = atrans[i-1])
                if im_slice is not None:
                    frame = frame[im_slice]
                    
                frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter)
                frame -= frame_bg
                tif.save(frame.astype(numpy.float32))

    return frame_offsets