def __init__(self,
                 color=None,
                 locs_name=None,
                 movie_name=None,
                 number=None,
                 **kwds):
        super().__init__(**kwds)
        self.color = color
        self.cur_frame = 0
        self.flip_lr = False
        self.flip_ud = False
        self.fr_height = None
        self.fr_width = None
        self.image = None
        self.locs = None
        self.locs_name = locs_name
        self.movie_name = movie_name
        self.number = number
        self.offset_x = 0
        self.offset_y = 0
        self.pixmap = None

        self.locs_i3 = readinsight3.I3Reader(locs_name)
        self.movie_fp = datareader.inferReader(movie_name)
        self.movie_len = self.movie_fp.filmSize()[2]
示例#2
0
def rollingBallSub(movie_in, movie_out, radius, sigma, offset = 100):
        
    input_movie = datareader.inferReader(movie_in)
    output_dax = datawriter.inferWriter(movie_out)

    rb = RollingBall(radius, sigma)
        
    for i in range(input_movie.filmSize()[2]):

        if((i%10) == 0):
            print("Processing frame", i)

        image = input_movie.loadAFrame(i) - offset

        if False:
            image = image.astype(numpy.float)
            lowpass = scipy.ndimage.filters.gaussian_filter(image, sigma)
            sub = image - lowpass
            
        else:
            sub = rb.removeBG(image)
            
        output_dax.addFrame(sub + offset)

    output_dax.close()
def movieToCalibration(movie_name):
    """
    Calculate calibration data from a movie. This includes
    the mean intensity per frame to reduce issues with
    average brightness of the light source drifting during
    the movie.

    movie_name - The name of the movie.
    """

    # Open the movie.
    in_file = datareader.inferReader(movie_name)
    [w, h, l] = in_file.filmSize()

    # Calculate frame mean, x & xx.
    frame_mean = numpy.zeros(l)
    N = numpy.zeros((h,w), dtype = numpy.int64)
    NN = numpy.zeros((h,w), dtype = numpy.int64)

    for i in range(l):
        aframe = in_file.loadAFrame(i)
        frame_mean[i] = numpy.mean(aframe)
        
        aframe = aframe.astype(numpy.int64)
        N += aframe
        NN += aframe * aframe

    return [frame_mean, N, NN]
def overlayImage(movie_name, locs_name, frame_number, sx = 8, sy = 8):
    """
    Create an image of a frame with the localizations overlaid.
    """
    frame = datareader.inferReader(movie_name).loadAFrame(frame_number).astype(numpy.float64)
    with saH5Py.SAH5Py(locs_name) as h5:
        locs = h5.getLocalizationsInFrame(0)

    frame = frame - numpy.min(frame)
    frame = frame/numpy.max(frame)
    
    fig = pyplot.figure(figsize = (sx, sy))
    ax = fig.add_subplot(1,1,1)
    ax.imshow(frame, interpolation = 'nearest', cmap = "gray")
    for i in range(locs["x"].size):
        width = 10
        height = 10
        if "xsigma" in locs:
            width = height = 5.0*locs["xsigma"][i]
        if "ysigma" in locs:
            height = 5.0*locs["ysigma"][i]
        ellipse = patches.Ellipse((locs["x"][i], locs["y"][i]), width, height, facecolor='none', edgecolor='g', linewidth = 2)
        ax.add_artist(ellipse)
        
    #ax.scatter(locs["x"], locs["y"], s = 200,
    ax.set_title("Overlay Image")

    pyplot.show()
示例#5
0
    def __init__(self, color = None,
                 locs_name = None,
                 movie_name = None,
                 number = None,
                 **kwds):
        super().__init__(**kwds)
        self.color = color
        self.cur_frame = 0
        self.flip_lr = False
        self.flip_ud = False
        self.fr_height = None
        self.fr_width = None
        self.image = None
        self.locs = None
        self.locs_name = locs_name
        self.movie_name = movie_name
        self.number = number
        self.offset_x = 0
        self.offset_y = 0
        self.pixmap = None

        if (locs_name.endswith(".bin")):
            self.locs_reader = LocalizationReaderI3(locs_name)
        else:
            self.locs_reader = LocalizationReaderH5(locs_name)

        self.movie_fp = datareader.inferReader(movie_name)
        self.movie_len = self.movie_fp.filmSize()[2]
示例#6
0
def test_io_3():
    """
    Test FITS movie IO.
    """
    movie_h = 50
    movie_w = 40
    movie_l = 10
    
    data = numpy.random.randint(0, 60000, (movie_h, movie_w)).astype(numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.fits")

    # Write FITS movie.
    wr = datawriter.inferWriter(movie_name)
    for i in range(movie_l):
        wr.addFrame(data)
    wr.close()
        
    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert(mh == movie_h)
    assert(mw == movie_w)
    assert(ml == movie_l)
    assert(numpy.allclose(data, rd.loadAFrame(0)))
示例#7
0
def test_io_4():
    """
    Test TIF movie IO (1 page, multiple frames per page).
    """
    movie_h = 50
    movie_w = 40
    movie_l = 10
    
    data = numpy.random.randint(0, 60000, (movie_l, movie_h, movie_w)).astype(numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.tif")

    # Write tif movie.
    with tifffile.TiffWriter(movie_name, imagej = True) as tf:
        tf.save(data, truncate = True)

    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert(mh == movie_h)
    assert(mw == movie_w)
    assert(ml == movie_l)
    for i in range(movie_l):
        assert(numpy.allclose(data[i,:,:], rd.loadAFrame(i)))
示例#8
0
def overlayImageBeads(movie_name, beads_locs_name, frame_number, sx = 8, sy = 8):
    """
    Create an image of a frame with the bead locations overlaid.

    movie_name - The name of the movie file.
    beads_locs_name - The name of the text file with the bead locations.
    frame_number - Which frame to examine.
    sx - Figure x size in inches.
    sy - Figure y size in inches.
    """
    
    frame = datareader.inferReader(movie_name).loadAFrame(frame_number).astype(numpy.float64)
    frame = frame - numpy.min(frame)
    frame = frame/numpy.max(frame)

    bead_locs = numpy.loadtxt(beads_locs_name)
    locs = {"x" : bead_locs[:,0],
            "y" : bead_locs[:,1]}
    
    fig = pyplot.figure(figsize = (sx, sy))
    ax = fig.add_subplot(1,1,1)
    ax.imshow(frame, interpolation = 'nearest', cmap = "gray")
    for i in range(locs["x"].size):
        width = 10
        height = 10
        ellipse = patches.Ellipse((locs["x"][i], locs["y"][i]), width, height, facecolor='none', edgecolor='g', linewidth = 2)
        ax.add_artist(ellipse)
        
    #ax.scatter(locs["x"], locs["y"], s = 200,
    ax.set_title("Overlay Image")

    pyplot.show()    
示例#9
0
def test_io_4():
    """
    Test TIF movie IO (1 page, multiple frames per page).
    """
    movie_h = 50
    movie_w = 40
    movie_l = 10

    data = numpy.random.randint(0, 60000, (movie_l, movie_h, movie_w)).astype(
        numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.tif")

    # Write tif movie.
    with tifffile.TiffWriter(movie_name, imagej=True) as tf:
        tf.save(data, truncate=True)

    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert (mh == movie_h)
    assert (mw == movie_w)
    assert (ml == movie_l)
    for i in range(movie_l):
        assert (numpy.allclose(data[i, :, :], rd.loadAFrame(i)))
示例#10
0
    def handleLoadMovie(self):
        movie_filename = QtWidgets.QFileDialog.getOpenFileName(self,
                                                               "Load Movie",
                                                               self.directory,
                                                               "*.dax *.fits *.spe *.tif")[0]
        if movie_filename:            
            self.directory = os.path.dirname(movie_filename)
            self.movie_file = datareader.inferReader(movie_filename)
            [self.film_x, self.film_y, self.film_l] = self.movie_file.filmSize()
            self.ui.fileLabel.setText(movie_filename)
            self.cur_frame = 0

            # Clear molecule lists.
            for elt in [self.locs1_list, self.locs2_list]:
                if elt is not None:
                    elt.cleanUp()
            self.locs1_list = None
            self.locs2_list = None

            # Hide info displays
            self.locs1_table.hideFields()
            self.locs2_table.hideFields()
            
            # Reset view transform.
            self.movie_view.setTransform(QtGui.QTransform())
            
            self.incCurFrame(0)
示例#11
0
def test_io_3():
    """
    Test FITS movie IO.
    """
    movie_h = 50
    movie_w = 40
    movie_l = 10

    data = numpy.random.randint(0, 60000,
                                (movie_h, movie_w)).astype(numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.fits")

    # Write FITS movie.
    wr = datawriter.inferWriter(movie_name)
    for i in range(movie_l):
        wr.addFrame(data)
    wr.close()

    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert (mh == movie_h)
    assert (mw == movie_w)
    assert (ml == movie_l)
    assert (numpy.allclose(data, rd.loadAFrame(0)))
    def __init__(self, base_name=None, parameters=None):

        self.backgrounds = []
        self.bg_estimators = []
        self.cur_frame = 0
        self.frames = []
        self.max_frame = 0
        self.offsets = []
        self.parameters = parameters
        self.planes = []

        # Load the movies and offsets for each plane/channel.
        for ext in mpUtilC.getExtAttrs(parameters):
            movie_name = base_name + parameters.getAttr(ext)
            self.planes.append(datareader.inferReader(movie_name))

        for offset in mpUtilC.getOffsetAttrs(parameters):
            self.offsets.append(parameters.getAttr(offset))

        print("Found data for", len(self.planes), "planes.")

        [self.movie_x, self.movie_y, self.movie_l] = self.planes[0].filmSize()

        # Assert that all the movies are the same size, at least in x,y.
        for i in range(1, len(self.planes)):
            assert (self.movie_x == self.planes[i].filmSize()[0])
            assert (self.movie_y == self.planes[i].filmSize()[1])
示例#13
0
def batchAnalysis(analysis_exe,
                  input_directory,
                  output_directory,
                  multi_xml,
                  max_processes=2):
    minimum_length = 100

    # FIXME: Should also handle .tif movies?
    dax_files = glob.glob(input_directory + "*.dax")

    # Figure out which movies to analyze.
    cmd_lines = []
    for movie_file in dax_files:

        movie_obj = datareader.inferReader(movie_file)
        if (movie_obj.filmSize()[2] > minimum_length):

            print("Analyzing:", movie_file)
            basename = os.path.basename(movie_file)
            mlistname = output_directory + "/" + basename[:-4] + "_mlist.bin"
            cmd_lines.append([
                'python', analysis_exe, "--movie", movie_file, "--bin",
                mlistname, "--xml", multi_xml
            ])
    batchRun.batchRun(cmd_lines, max_processes=max_processes)
示例#14
0
def getFilmSize(filename, i3_data):
    """
    Determine the (analyzed) film size.
    """

    # First try to load meta data.
    metadata = readinsight3.loadI3Metadata(filename, verbose = False)
    if metadata is not None:
        movie_data = metadata.find("movie")
        movie_x = int(movie_data.find("movie_x").text)
        movie_y = int(movie_data.find("movie_y").text)
        movie_l = int(movie_data.find("movie_l").text)

        # Check if analysis stopped before the end of the movie.
        settings = metadata.find("settings")
        max_frame = int(settings.find("max_frame").text)
        if (max_frame > 0):
            movie_l = max_frame
        return [movie_x, movie_y, movie_l]

    # Next try and load the corresponding movie file.
    names = [filename[:-9], filename[:-10]]
    extensions = [".dax", ".spe", ".tif"]
    for name in names:
        for ext in extensions:
            if os.path.exists(name + ext):
                movie_file = datareader.inferReader(name + ext)
                return movie_file.filmSize()

    # Finally, just hope..
    film_l = int(numpy.max(i3_data['fr']))+1
    print("Could not find movie file for", filename, "assuming 256x256x" + str(film_l))
    return [256, 256, film_l]
示例#15
0
def movieToCalibration(movie_name):
    """
    Calculate calibration data from a movie. This includes
    the mean intensity per frame to reduce issues with
    average brightness of the light source drifting during
    the movie.

    movie_name - The name of the movie.
    """

    # Open the movie.
    in_file = datareader.inferReader(movie_name)
    [w, h, l] = in_file.filmSize()

    # Calculate frame mean, x & xx.
    frame_mean = numpy.zeros(l)
    N = numpy.zeros((h, w), dtype=numpy.int64)
    NN = numpy.zeros((h, w), dtype=numpy.int64)

    for i in range(l):
        aframe = in_file.loadAFrame(i)
        frame_mean[i] = numpy.mean(aframe)

        aframe = aframe.astype(numpy.int64)
        N += aframe
        NN += aframe * aframe

    return [frame_mean, N, NN]
def overlayImage(movie_name, locs_name, frame_number, sx=8, sy=8):
    """
    Create an image of a frame with the localizations overlaid.
    """
    frame = datareader.inferReader(movie_name).loadAFrame(frame_number).astype(
        numpy.float64)
    with saH5Py.SAH5Py(locs_name) as h5:
        locs = h5.getLocalizationsInFrame(0)

    frame = frame - numpy.min(frame)
    frame = frame / numpy.max(frame)

    fig = pyplot.figure(figsize=(sx, sy))
    ax = fig.add_subplot(1, 1, 1)
    ax.imshow(frame, interpolation='nearest', cmap="gray")
    for i in range(locs["x"].size):
        width = 10
        height = 10
        if "xsigma" in locs:
            width = height = 5.0 * locs["xsigma"][i]
        if "ysigma" in locs:
            height = 5.0 * locs["ysigma"][i]
        ellipse = patches.Ellipse((locs["x"][i], locs["y"][i]),
                                  width,
                                  height,
                                  facecolor='none',
                                  edgecolor='g',
                                  linewidth=2)
        ax.add_artist(ellipse)

    #ax.scatter(locs["x"], locs["y"], s = 200,
    ax.set_title("Overlay Image")

    pyplot.show()
示例#17
0
    def __init__(self,
                 color=None,
                 locs_name=None,
                 movie_name=None,
                 number=None,
                 **kwds):
        super().__init__(**kwds)
        self.color = color
        self.cur_frame = 0
        self.flip_lr = False
        self.flip_ud = False
        self.fr_height = None
        self.fr_width = None
        self.image = None
        self.locs = None
        self.locs_name = locs_name
        self.movie_name = movie_name
        self.number = number
        self.offset_x = 0
        self.offset_y = 0
        self.pixmap = None

        if (locs_name.endswith(".bin")):
            self.locs_reader = LocalizationReaderI3(locs_name)
        else:
            self.locs_reader = LocalizationReaderH5(locs_name)

        self.movie_fp = datareader.inferReader(movie_name)
        self.movie_len = self.movie_fp.filmSize()[2]
示例#18
0
    def handleLoadMovie(self):
        movie_filename = QtWidgets.QFileDialog.getOpenFileName(
            self, "Load Movie", self.directory, "*.dax *.fits *.spe *.tif")[0]
        if movie_filename:
            self.directory = os.path.dirname(movie_filename)
            self.movie_file = datareader.inferReader(movie_filename)
            [self.film_x, self.film_y,
             self.film_l] = self.movie_file.filmSize()
            self.ui.fileLabel.setText(movie_filename)
            self.cur_frame = 0

            # Clear molecule lists.
            for elt in [self.locs1_list, self.locs2_list]:
                if elt is not None:
                    elt.cleanUp()
            self.locs1_list = None
            self.locs2_list = None

            # Hide info displays
            self.locs1_table.hideFields()
            self.locs2_table.hideFields()

            # Reset view transform.
            self.movie_view.setTransform(QtGui.QTransform())

            self.incCurFrame(0)
示例#19
0
def rollingBallSub(movie_in, movie_out, radius, sigma, offset=100):

    input_movie = datareader.inferReader(movie_in)
    output_dax = daxwriter.DaxWriter(movie_out, 0, 0)

    rb = RollingBall(radius, sigma)

    for i in range(input_movie.filmSize()[2]):

        if ((i % 10) == 0):
            print("Processing frame", i)

        image = input_movie.loadAFrame(i) - offset

        if False:
            image = image.astype(numpy.float)
            lowpass = scipy.ndimage.filters.gaussian_filter(image, sigma)
            sub = image - lowpass

        else:
            sub = rb.removeBG(image)

        output_dax.addFrame(sub + offset)

    output_dax.close()
示例#20
0
def test_io_5():
    """
    Test TIF movie IO (1 frame, 1 page).
    """
    movie_h = 50
    movie_w = 40
    movie_l = 1

    movie_name = storm_analysis.getPathOutputTest("test_dataio.tif")

    ## Standard Tiff.
    data = numpy.random.randint(0, 60000,
                                (movie_h, movie_w)).astype(numpy.uint16)

    # Write tif movie.
    wr = datawriter.inferWriter(movie_name)
    for i in range(movie_l):
        wr.addFrame(data)
    wr.close()

    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert (mh == movie_h)
    assert (mw == movie_w)
    assert (ml == movie_l)
    assert (numpy.allclose(data, rd.loadAFrame(0)))

    ## 'imagej' Tiff.
    data = numpy.random.randint(0, 60000, (movie_l, movie_h, movie_w)).astype(
        numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.tif")

    # Write tif movie.
    with tifffile.TiffWriter(movie_name, imagej=True) as tf:
        tf.save(data, truncate=True)

    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert (mh == movie_h)
    assert (mw == movie_w)
    assert (ml == movie_l)
    assert (numpy.allclose(data[0, :, :], rd.loadAFrame(0)))
示例#21
0
    def __init__(self, movie_file=None, parameters=None, **kwds):
        super(FrameReader, self).__init__(**kwds)

        self.parameters = parameters
        self.verbose = 1
        if self.parameters is not None:
            self.verbose = (self.parameters.getAttr("verbosity") == 1)
        self.movie_data = datareader.inferReader(movie_file)
示例#22
0
    def __init__(self, movie_file = None, parameters = None, **kwds):
        super(FrameReader, self).__init__(**kwds)

        self.parameters = parameters
        self.verbose = 1
        if self.parameters is not None:
            self.verbose = (self.parameters.getAttr("verbosity") == 1)
        self.movie_data = datareader.inferReader(movie_file)
示例#23
0
def psfZStack(movie_name, h5_filename, zstack_name, scmos_cal = None, aoi_size = 8, driftx = 0.0, drifty = 0.0):
    """
    movie_name - The movie file containing the z stack.
    h5_filename - The HDF5 file containing the localizations to use for the PSF measurement.
    zstack_name - The name of the file to save the zstack in.
    scmos_cal - The sCMOS calibration file.
    aoi_size - The AOI size in pixels.

    driftx, drifty are in units of pixels per frame, (bead x last frame - bead x first frame)/n_frames.
    """
    # Create appropriate reader.
    if scmos_cal is None:
        fr_reader = datareader.inferReader(movie_name)
    else:
        fr_reader = analysisIO.FrameReaderSCMOS(movie_file = movie_name,
                                                calibration_file = scmos_cal)
        
    [movie_x, movie_y, movie_len] = fr_reader.filmSize()
    
    # Load localizations.
    with saH5Py.SAH5Py(h5_filename) as h5:
        locs = h5.getLocalizations()
        x = locs["y"] + 1
        y = locs["x"] + 1

    # Measure Z stacks.
    z_stacks = []
    for i in range(x.size):
        z_stacks.append(numpy.zeros((4*aoi_size, 4*aoi_size, movie_len)))
        
    for i in range(movie_len):
        if((i%50)==0):
            print("Processing frame {0:0d}".format(i))

        # Load the frame. This also handles gain and offset correction.
        #
        frame = fr_reader.loadAFrame(i)

        # Subtract estimated background. This assumes that the image is
        # mostly background and that the background is uniform.
        #
        frame = frame - numpy.median(frame)
            
        for j in range(x.size):
            xf = x[j] + driftx * float(i)
            yf = y[j] + drifty * float(i)
            z_stacks[j][:,:,i] = measurePSFUtils.extractAOI(frame, aoi_size, xf, yf)

    # Save z_stacks.
    numpy.save(zstack_name + ".npy", z_stacks)

    # Save a (normalized) z_stack as tif for inspection purposes.
    z_stack = z_stacks[0]
    z_stack = z_stack/numpy.amax(z_stack)
    z_stack = z_stack.astype(numpy.float32)
    with tifffile.TiffWriter(zstack_name + ".tif") as tf:
        for i in range(movie_len):
            tf.save(z_stack[:,:,i])
示例#24
0
def test_io_5():
    """
    Test TIF movie IO (1 frame, 1 page).
    """
    movie_h = 50
    movie_w = 40
    movie_l = 1

    movie_name = storm_analysis.getPathOutputTest("test_dataio.tif")
    
    ## Standard Tiff.
    data = numpy.random.randint(0, 60000, (movie_h, movie_w)).astype(numpy.uint16)

    # Write tif movie.
    wr = datawriter.inferWriter(movie_name)
    for i in range(movie_l):
        wr.addFrame(data)
    wr.close()
        
    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert(mh == movie_h)
    assert(mw == movie_w)
    assert(ml == movie_l)
    assert(numpy.allclose(data, rd.loadAFrame(0)))

    ## 'imagej' Tiff.
    data = numpy.random.randint(0, 60000, (movie_l, movie_h, movie_w)).astype(numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.tif")

    # Write tif movie.
    with tifffile.TiffWriter(movie_name, imagej = True) as tf:
        tf.save(data, truncate = True)

    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert(mh == movie_h)
    assert(mw == movie_w)
    assert(ml == movie_l)
    assert(numpy.allclose(data[0,:,:], rd.loadAFrame(0)))
示例#25
0
def test_hal_film_1():

    # This is expected to record a movie called 'movie_01.dax'
    halTest(config_xml = "none_classic_config.xml",
            class_name = "FilmTest1",
            test_module = "storm_control.test.hal.film_tests")

    # Check that the movie is the right length.
    movie = datareader.inferReader(os.path.join(test.dataDirectory(), "movie_01.dax"))
    assert(movie.filmSize() == [512, 512, 10])
示例#26
0
def test_hal_film():

    # This is expected to record a movie called 'movie_01.dax'
    halTest(config_xml="none_classic_config.xml",
            class_name="FilmTest1",
            test_module="storm_control.test.hal.film_tests")

    # Check that the movie is the right length.
    movie = datareader.inferReader(
        os.path.join(test.dataDirectory(), "movie_01.dax"))
    assert (movie.filmSize() == [512, 512, 10])
示例#27
0
def test_hal_film_5():
    """
    Test repeated film acquisition.
    """
    halTest(config_xml = "none_classic_config.xml",
            class_name = "FilmTest4",
            test_module = "storm_control.test.hal.film_tests")

    # Check that the final movie is correct.
    movie = datareader.inferReader(os.path.join(test.dataDirectory(), "movie_04.dax"))
    assert(movie.filmSize() == [512, 512, 1])
示例#28
0
def getFilmSize(filename, i3_data):
    names = [filename[:-9], filename[:-10]]
    extensions = [".dax", ".spe", ".tif"]
    for name in names:
        for ext in extensions:
            if os.path.exists(name + ext):
                movie_file = datareader.inferReader(name + ext)
                return movie_file.filmSize()

    film_l = int(numpy.max(i3_data['fr']))+1
    print("Could not find movie file for", filename, "assuming 256x256x" + str(film_l))
    return [256, 256, film_l]
示例#29
0
 def loadMovie(self):
     movie_filename = QtWidgets.QFileDialog.getOpenFileName(
         self, "Load Movie", self.directory, "*.dax *.spe *.tif")[0]
     if movie_filename:
         self.directory = os.path.dirname(movie_filename)
         self.movie_file = datareader.inferReader(movie_filename)
         [self.film_x, self.film_y,
          self.film_l] = self.movie_file.filmSize()
         self.ui.fileLabel.setText(movie_filename)
         self.cur_frame = 0
         self.multi_list = False
         self.incCurFrame(0)
示例#30
0
def test_hal_film_5():
    """
    Test repeated film acquisition.
    """
    halTest(config_xml="none_classic_config.xml",
            class_name="FilmTest4",
            test_module="storm_control.test.hal.film_tests")

    # Check that the final movie is correct.
    movie = datareader.inferReader(
        os.path.join(test.dataDirectory(), "movie_04.dax"))
    assert (movie.filmSize() == [512, 512, 1])
示例#31
0
 def loadMovie(self):
     movie_filename = QtWidgets.QFileDialog.getOpenFileName(self,
                                                            "Load Movie",
                                                            self.directory,
                                                            "*.dax *.spe *.tif")[0]
     if movie_filename:            
         self.directory = os.path.dirname(movie_filename)
         self.movie_file = datareader.inferReader(movie_filename)
         [self.film_x, self.film_y, self.film_l] = self.movie_file.filmSize()
         self.ui.fileLabel.setText(movie_filename)
         self.cur_frame = 0
         self.multi_list = False
         self.incCurFrame(0)
示例#32
0
def test_hal_film_2():

    # This is expected to record several movies with names starting with 'movie_02'
    halTest(config_xml = "none_classic_config.xml",
            class_name = "FilmTest2",
            test_module = "storm_control.test.hal.film_tests")

    # Check that the movies are the right length.
    for name, size in [["movie_02.dax", [512, 512, 10]],
                       ["movie_02_average.dax", [512, 512, 1]],
                       ["movie_02_interval.dax", [508, 256, 2]],
                       ["movie_02_slice1.dax", [64, 65, 10]]]:
        movie = datareader.inferReader(os.path.join(test.dataDirectory(), name))
        assert(movie.filmSize() == size)
示例#33
0
def batchAnalysis(analysis_exe, input_directory, output_directory, multi_xml, max_processes = 2):
    minimum_length = 100

    dax_files = glob.glob(input_directory + "*.dax")

    # setup process queue
    process_count = 0
    results = Queue.Queue()

    # start processes
    procs = []
    for i, file in enumerate(dax_files):

        print("Found:", file)

        movie_obj = datareader.inferReader(file)
        if(movie_obj.filmSize()[2] > minimum_length):
            basename = os.path.basename(file)
            mlistname = output_directory + "/" + basename[:-4] + "_mlist.bin"
            print("  ->", mlistname)

            try:
                # Wait for a process to stop before starting
                # the next one if we are at the limit.
                if(process_count >= max_processes):
                    description, rc = results.get()
                    print(description)
                    process_count -= 1
                proc = subprocess.Popen(['python', analysis_exe, file, mlistname, multi_xml])
                procs.append(proc)
                thread.start_new_thread(process_waiter, (proc, "Finished: " + basename, results))
                process_count += 1

            except KeyboardInterrupt:
                for proc in procs:
                    if(not proc.poll()):
                        proc.send_signal(signal.CTRL_C_EVENT)

    # wait until all the processes finish
    try:
        while(process_count>0):
            description, rc = results.get()
            print(description)
            process_count -= 1

    except KeyboardInterrupt:
        for proc in procs:
            if(not proc.poll()):
                proc.send_signal(signal.CTRL_C_EVENT)
示例#34
0
def test_hal_film_2():

    # This is expected to record several movies with names starting with 'movie_02'
    halTest(config_xml="none_classic_config.xml",
            class_name="FilmTest2",
            test_module="storm_control.test.hal.film_tests")

    # Check that the movies are the right length.
    for name, size in [["movie_02.dax", [512, 512, 10]],
                       ["movie_02_average.dax", [512, 512, 1]],
                       ["movie_02_interval.dax", [508, 256, 2]],
                       ["movie_02_slice1.dax", [64, 65, 10]]]:
        movie = datareader.inferReader(os.path.join(test.dataDirectory(),
                                                    name))
        assert (movie.filmSize() == size)
示例#35
0
def test_hal_film_7():

    # This is expected to record several movies with names starting
    # with 'movie_01' and 'movie_02'.
    halTest(config_xml = "none_classic_config.xml",
            class_name = "FilmTest6",
            test_module = "storm_control.test.hal.film_tests")

    # Check that the movies are the right length.
    for name, size in [["movie_01.dax", [256, 256, 10]],
                       ["movie_01_slice1.dax", [128, 128, 10]],
                       ["movie_02.dax", [256, 256, 10]],
                       ["movie_02_slice1.dax", [128, 128, 10]]]:
        movie = datareader.inferReader(os.path.join(test.dataDirectory(), name))
        assert(movie.filmSize() == size)
示例#36
0
def test_hal_film_7():

    # This is expected to record several movies with names starting
    # with 'movie_01' and 'movie_02'.
    halTest(config_xml="none_classic_config.xml",
            class_name="FilmTest6",
            test_module="storm_control.test.hal.film_tests")

    # Check that the movies are the right length.
    for name, size in [["movie_01.dax", [256, 256, 10]],
                       ["movie_01_slice1.dax", [128, 128, 10]],
                       ["movie_02.dax", [256, 256, 10]],
                       ["movie_02_slice1.dax", [128, 128, 10]]]:
        movie = datareader.inferReader(os.path.join(test.dataDirectory(),
                                                    name))
        assert (movie.filmSize() == size)
示例#37
0
def getFilmSize(filename, i3_data):
    """
    Determine the (analyzed) film size.
    """

    # First try to load meta data.
    metadata = readinsight3.loadI3Metadata(filename, verbose = False)
    if metadata is not None:
        movie_data = metadata.find("movie")
        movie_x = int(movie_data.find("movie_x").text)
        movie_y = int(movie_data.find("movie_y").text)
        movie_l = int(movie_data.find("movie_l").text)

        # Check if analysis stopped before the end of the movie.
        settings = metadata.find("settings")
        max_frame = int(settings.find("max_frame").text)
        if (max_frame > 0):
            movie_l = max_frame
        return [movie_x, movie_y, movie_l]

    # Next try and load the corresponding movie file.
    names = [filename[:-9], filename[:-10]]
    extensions = [".dax", ".spe", ".tif"]
    for name in names:
        for ext in extensions:
            if os.path.exists(name + ext):
                movie_file = datareader.inferReader(name + ext)
                return movie_file.filmSize()

    # Finally, just guess / hope. Assume that
    # the image size is a power of 2..
    film_l = int(numpy.max(i3_data['fr']))+1
    
    max_x = numpy.max(i3_data['x'])
    x_size = 2
    while(x_size < max_x):
        x_size = x_size * 2

    max_y = numpy.max(i3_data['y'])
    y_size = 2
    while(y_size < max_y):
        y_size = y_size * 2
    
    print("Could not find movie file for", filename, "assuming", x_size, "x", y_size, "by", str(film_l))
    return [x_size, y_size, film_l]
示例#38
0
def getFilmSize(filename, i3_data):
    """
    Determine the (analyzed) film size.
    """

    # First try to load meta data.
    metadata = readinsight3.loadI3Metadata(filename, verbose = False)
    if metadata is not None:
        movie_data = metadata.find("movie")
        movie_x = int(movie_data.find("movie_x").text)
        movie_y = int(movie_data.find("movie_y").text)
        movie_l = int(movie_data.find("movie_l").text)

        # Check if analysis stopped before the end of the movie.
        settings = metadata.find("settings")
        max_frame = int(settings.find("max_frame").text)
        if (max_frame > 0):
            movie_l = max_frame
        return [movie_x, movie_y, movie_l]

    # Next try and load the corresponding movie file.
    names = [filename[:-9], filename[:-10]]
    extensions = [".dax", ".spe", ".tif"]
    for name in names:
        for ext in extensions:
            if os.path.exists(name + ext):
                movie_file = datareader.inferReader(name + ext)
                return movie_file.filmSize()

    # Finally, just guess / hope. Assume that
    # the image size is a power of 2..
    film_l = int(numpy.max(i3_data['fr']))+1
    
    max_x = numpy.max(i3_data['x'])
    x_size = 2
    while(x_size < max_x):
        x_size = x_size * 2

    max_y = numpy.max(i3_data['y'])
    y_size = 2
    while(y_size < max_y):
        y_size = y_size * 2
    
    print("Could not find movie file for", filename, "assuming", x_size, "x", y_size, "by", str(film_l))
    return [x_size, y_size, film_l]
示例#39
0
def waveletBGRSub(movie_in, movie_out, wavelet_type, wavelet_level, iterations, threshold, offset = 100):

    input_movie = datareader.inferReader(movie_in)
    output_dax = daxwriter.DaxWriter(movie_out, 0, 0)

    wbgr = WaveletBGR(wavelet_type = wavelet_type)

    for i in range(input_movie.filmSize()[2]):

        if((i%10) == 0):
            print("Processing frame", i)

        image = input_movie.loadAFrame(i) - offset
        sub = wbgr.removeBG(image,
                            iterations,
                            threshold,
                            wavelet_level)
        output_dax.addFrame(sub + offset)

    output_dax.close()
示例#40
0
def batchAnalysis(analysis_exe, input_directory, output_directory, multi_xml, max_processes = 2):
    minimum_length = 100

    # FIXME: Should also handle .tif movies?
    dax_files = glob.glob(input_directory + "*.dax")

    # Figure out which movies to analyze.
    cmd_lines = []
    for movie_file in dax_files:

        movie_obj = datareader.inferReader(movie_file)
        if(movie_obj.filmSize()[2] > minimum_length):

            print("Analyzing:", movie_file)
            basename = os.path.basename(movie_file)
            mlistname = output_directory + "/" + basename[:-4] + ".hdf5"
            cmd_lines.append(['python', analysis_exe,
                              "--movie", movie_file,
                              "--bin", mlistname,
                              "--xml", multi_xml])
    batchRun.batchRun(cmd_lines, max_processes = max_processes)
示例#41
0
def waveletBGRSub(movie_in,
                  movie_out,
                  wavelet_type,
                  wavelet_level,
                  iterations,
                  threshold,
                  offset=100):

    input_movie = datareader.inferReader(movie_in)
    output_dax = datawriter.inferWriter(movie_out)

    wbgr = WaveletBGR(wavelet_type=wavelet_type)

    for i in range(input_movie.filmSize()[2]):

        if ((i % 10) == 0):
            print("Processing frame", i)

        image = input_movie.loadAFrame(i) - offset
        sub = wbgr.removeBG(image, iterations, threshold, wavelet_level)
        output_dax.addFrame(sub + offset)

    output_dax.close()
示例#42
0
def overlayImageBeads(movie_name, beads_locs_name, frame_number, sx=8, sy=8):
    """
    Create an image of a frame with the bead locations overlaid.

    movie_name - The name of the movie file.
    beads_locs_name - The name of the text file with the bead locations.
    frame_number - Which frame to examine.
    sx - Figure x size in inches.
    sy - Figure y size in inches.
    """

    frame = datareader.inferReader(movie_name).loadAFrame(frame_number).astype(
        numpy.float64)
    frame = frame - numpy.min(frame)
    frame = frame / numpy.max(frame)

    bead_locs = numpy.loadtxt(beads_locs_name)
    locs = {"x": bead_locs[:, 0], "y": bead_locs[:, 1]}

    fig = pyplot.figure(figsize=(sx, sy))
    ax = fig.add_subplot(1, 1, 1)
    ax.imshow(frame, interpolation='nearest', cmap="gray")
    for i in range(locs["x"].size):
        width = 10
        height = 10
        ellipse = patches.Ellipse((locs["x"][i], locs["y"][i]),
                                  width,
                                  height,
                                  facecolor='none',
                                  edgecolor='g',
                                  linewidth=2)
        ax.add_artist(ellipse)

    #ax.scatter(locs["x"], locs["y"], s = 200,
    ax.set_title("Overlay Image")

    pyplot.show()
示例#43
0
#

import numpy
import sys

import storm_analysis.sa_library.datareader as datareader

if (len(sys.argv) != 3):
    print("usage: <input_dax> <variance>")
    exit()

cam_offset = 100
max_frames = 1000

# Open the input file.
in_file = datareader.inferReader(sys.argv[1])
[w, h, l] = in_file.filmSize()

if (l > max_frames):
    l = max_frames

# Calculate x and xx.
mean = numpy.zeros((w, h), dtype=numpy.int64)
var = numpy.zeros((w, h), dtype=numpy.int64)

for i in range(l):
    if ((i % 10) == 0):
        print("Processing frame", i)

    aframe = in_file.loadAFrame(i)
示例#44
0
 def checkMessage(self, tcp_message):
     movie = datareader.inferReader(os.path.join(self.directory, self.name + ".dax"))
     assert(movie.filmSize() == [256, 256, self.length])
示例#45
0
def measurePSF(movie_name, zfile_name, movie_mlist, psf_name, want2d = False, aoi_size = 12, z_range = 750.0, z_step = 50.0):
    """
    The actual z range is 2x z_range (i.e. from -z_range to z_range).
    """
    
    # Load dax file, z offset file and molecule list file.
    dax_data = datareader.inferReader(movie_name)
    z_offsets = None
    if os.path.exists(zfile_name):
        try:
            z_offsets = numpy.loadtxt(zfile_name, ndmin = 2)[:,1]
        except IndexError:
            z_offsets = None
            print("z offsets were not loaded.")
    i3_data = readinsight3.loadI3File(movie_mlist)

    if want2d:
        print("Measuring 2D PSF")
    else:
        print("Measuring 3D PSF")

    #
    # Go through the frames identifying good peaks and adding them
    # to the average psf. For 3D molecule z positions are rounded to 
    # the nearest 50nm.
    #
    z_mid = int(z_range/z_step)
    max_z = 2 * z_mid + 1

    average_psf = numpy.zeros((max_z,4*aoi_size,4*aoi_size))
    peaks_used = 0
    totals = numpy.zeros(max_z)
    [dax_x, dax_y, dax_l] = dax_data.filmSize()
    for curf in range(dax_l):

        # Select localizations in current frame & not near the edges.
        mask = (i3_data['fr'] == curf+1) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_x - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_y - aoi_size - 1))
        xr = i3_data['x'][mask]
        yr = i3_data['y'][mask]

        # Use the z offset file if it was specified, otherwise use localization z positions.
        if z_offsets is None:
            if (curf == 0):
                print("Using fit z locations.")
            zr = i3_data['z'][mask]
        else:
            if (curf == 0):
                print("Using z offset file.")
            zr = numpy.ones(xr.size) * z_offsets[curf]

        ht = i3_data['h'][mask]

        # Remove localizations that are too close to each other.
        in_peaks = numpy.zeros((xr.size,util_c.getNPeakPar()))
        in_peaks[:,util_c.getXCenterIndex()] = xr
        in_peaks[:,util_c.getYCenterIndex()] = yr
        in_peaks[:,util_c.getZCenterIndex()] = zr
        in_peaks[:,util_c.getHeightIndex()] = ht

        out_peaks = util_c.removeNeighbors(in_peaks, 2*aoi_size)
        #out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

        print(curf, "peaks in", in_peaks.shape[0], ", peaks out", out_peaks.shape[0])

        # Use remaining localizations to calculate spline.
        image = dax_data.loadAFrame(curf).astype(numpy.float64)

        xr = out_peaks[:,util_c.getXCenterIndex()]
        yr = out_peaks[:,util_c.getYCenterIndex()]
        zr = out_peaks[:,util_c.getZCenterIndex()]
        ht = out_peaks[:,util_c.getHeightIndex()]

        for i in range(xr.size):
            xf = xr[i]
            yf = yr[i]
            zf = zr[i]
            xi = int(xf)
            yi = int(yf)
            if want2d:
                zi = 0
            else:
                zi = int(round(zf/z_step) + z_mid)

            # check the z is in range
            if (zi > -1) and (zi < max_z):

                # get localization image
                mat = image[xi-aoi_size:xi+aoi_size,
                            yi-aoi_size:yi+aoi_size]

                # zoom in by 2x
                psf = scipy.ndimage.interpolation.zoom(mat, 2.0)

                # re-center image
                psf = scipy.ndimage.interpolation.shift(psf, (-2.0*(xf-xi), -2.0*(yf-yi)), mode='nearest')

                # add to average psf accumulator
                average_psf[zi,:,:] += psf
                totals[zi] += 1

    # Force PSF to be zero (on average) at the boundaries.
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i,0,:],
                                  average_psf[i,-1,:],
                                  average_psf[i,:,0],
                                  average_psf[i,:,-1]))
        average_psf[i,:,:] -= numpy.mean(edge)

    # Normalize the PSF.
    if want2d:
        max_z = 1

    for i in range(max_z):
        print(i, totals[i])
        if (totals[i] > 0.0):
            average_psf[i,:,:] = average_psf[i,:,:]/numpy.sum(numpy.abs(average_psf[i,:,:]))

    average_psf = average_psf/numpy.max(average_psf)

    # Save PSF (in image form).
    if True:
        import storm_analysis.sa_library.daxwriter as daxwriter
        dxw = daxwriter.DaxWriter(os.path.join(os.path.dirname(psf_name), "psf.dax"),
                                  average_psf.shape[1],
                                  average_psf.shape[2])
        for i in range(max_z):
            dxw.addFrame(1000.0 * average_psf[i,:,:] + 100)
        dxw.close()

    # Save PSF.
    if want2d:
        psf_dict = {"psf" : average_psf[0,:,:],
                    "type" : "2D"}

    else:
        cur_z = -z_range
        z_vals = []
        for i in range(max_z):
            z_vals.append(cur_z)
            cur_z += z_step

        psf_dict = {"psf" : average_psf,
                    "pixel_size" : 0.080, # 1/2 the camera pixel size in nm.
                    "type" : "3D",
                    "zmin" : -z_range,
                    "zmax" : z_range,
                    "zvals" : z_vals}

    pickle.dump(psf_dict, open(psf_name, 'wb'))
示例#46
0
    def __init__(self, movie_file = None, **kwds):
        super(FrameReader, self).__init__(**kwds)

        self.movie_data = datareader.inferReader(movie_file)
示例#47
0
import storm_analysis.sa_library.ia_utilities_c as util_c
import storm_analysis.sa_library.datareader as datareader
import storm_analysis.sa_library.readinsight3 as readinsight3

if (len(sys.argv)!=4):
    print("usage: homotopy_psf <dax_file, input> <bin_file, input> <npy_file, output>")
    exit()

# Minimum number of peaks to calculate the PSF from.
min_peaks = 300

# Half width of the aoi size in pixels.
aoi_size = 8

# Load dax file and corresponding molecule list file.
dax_data = datareader.inferReader(sys.argv[1])
i3_data = readinsight3.loadI3File(sys.argv[2])

# Go through the frames identifying good peaks and adding them
# to the average psf
average_psf = numpy.zeros((4*aoi_size,4*aoi_size))
curf = 1
peaks_used = 0
total = 0.0
[dax_x, dax_y, dax_l] = dax_data.filmSize()
while (curf < dax_l) and (peaks_used < min_peaks):

    # Select localizations in current frame & not near the edges.
    mask = (i3_data['fr'] == curf) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_y - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_x - aoi_size - 1))
    xr = i3_data['x'][mask]
    yr = i3_data['y'][mask]
示例#48
0
def measurePSFBeads(movie_name, zfile_name, beads_file, psf_name, want2d = False, aoi_size = 12, z_range = 600.0, z_step = 50.0):

    # Load movie file.
    movie_data = datareader.inferReader(movie_name)

    #
    # Load the z-offset information for the dax file.
    #
    #   This is a text file with one line per frame that contains the 
    #   z-offset (in nm) for that frame. Each line is a space separated
    #   valid, z_pos pair. If valid if 0 the frame will be ignored,
    #   otherwise it will be used.
    #
    data = numpy.loadtxt(zfile_name)
    valid = data[:,0]
    z_off = data[:,1]

    #
    # Load the locations of the beads.
    #
    #   This is a text file the contains the locations of the beads that 
    #   will be used to construct the PSF. Each line is a space separated 
    #   x, y pair of bead locations (in pixels).
    #
    #   One way to create this file is to look at the bead movie with
    #   visualizer.py and record the center positions of several beads.
    #
    data = numpy.loadtxt(beads_file, ndmin = 2)
    bead_x = data[:,0]
    bead_y = data[:,1]

    #
    # Go through the frames and the bead images to the average psf. Z 
    # positions are rounded to the nearest 50nm. You might need to 
    # adjust z_range depending on your experiment.
    #
    z_mid = int(z_range/z_step)
    max_z = 2 * z_mid + 1
    average_psf = numpy.zeros((max_z,4*aoi_size,4*aoi_size))
    totals = numpy.zeros(max_z)
    [dax_x, dax_y, dax_l] = movie_data.filmSize()
    for curf in range(dax_l):

        if ((curf%50)==0):
            print("Processing frame:", curf)

        if (abs(valid[curf]) < 1.0e-6):
            #    print "skipping", valid[curf]
            continue

        # Use bead localization to calculate spline.
        image = movie_data.loadAFrame(curf).astype(numpy.float64)

        # Get frame z and check that it is in range.
        zf = z_off[curf]
        zi = int(round(zf/z_step)) + z_mid
        if (zi > -1) and (zi < max_z):

            for i in range(bead_x.size):

                xf = bead_x[i]
                yf = bead_y[i]
                xi = int(xf)
                yi = int(yf)

                # Get localization image.
                mat = image[xi-aoi_size:xi+aoi_size,
                            yi-aoi_size:yi+aoi_size]
                
                # Zoom in by 2x.
                psf = scipy.ndimage.interpolation.zoom(mat, 2.0)

                # Re-center image.
                psf = scipy.ndimage.interpolation.shift(psf, (-2.0*(xf-xi), -2.0*(yf-yi)), mode='nearest')

                # Add to average psf accumulator.
                average_psf[zi,:,:] += psf
                totals[zi] += 1

    # Force PSF to be zero (on average) at the boundaries.
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i,0,:],
                                  average_psf[i,-1,:],
                                  average_psf[i,:,0],
                                  average_psf[i,:,-1]))
        average_psf[i,:,:] -= numpy.mean(edge)

    # Normalize PSF.
    for i in range(max_z):
        if (totals[i] > 0.0):
            average_psf[i,:,:] = average_psf[i,:,:]/numpy.sum(numpy.abs(average_psf[i,:,:]))

    average_psf = average_psf/numpy.max(average_psf)
    
    # Save PSF (in image form).
    if True:
        import os
        import storm_analysis.sa_library.daxwriter as daxwriter
        dxw = daxwriter.DaxWriter(os.path.join(os.path.dirname(psf_name), "psf_beads.dax"),
                                  average_psf.shape[1],
                                  average_psf.shape[2])
        for i in range(max_z):
            #print i, numpy.max(average_psf[i,:,:])
            dxw.addFrame(1000.0 * average_psf[i,:,:] + 100)
        dxw.close()

    # Save PSF. 
    cur_z = -z_range
    z_vals = []
    for i in range(max_z):
        z_vals.append(cur_z)
        cur_z += z_step

    dict = {"psf" : average_psf,
            "pixel_size" : 0.080, # 1/2 the camera pixel size in nm.
            "type" : "3D",
            "zmin" : -z_range,
            "zmax" : z_range,
            "zvals" : z_vals}

    pickle.dump(dict, open(psf_name, 'wb'))
示例#49
0
import scipy.ndimage
import sys

import storm_analysis.sa_library.datareader as datareader

if (len(sys.argv) != 5):
    print(
        "usage: measure_psf_beads <movie_file, input> <z_file, input> <bead_file, input> <psf_file output>"
    )
    exit()

# Half width of the aoi size in pixels.
aoi_size = 12

# Load movie file.
movie_data = datareader.inferReader(sys.argv[1])

#
# Load the z-offset information for the dax file.
#
#   This is a text file with one line per frame that contains the
#   z-offset (in nm) for that frame. Each line is a space separated
#   valid, z_pos pair. If valid if 0 the frame will be ignored,
#   otherwise it will be used.
#
data = numpy.loadtxt(sys.argv[2])
valid = data[:, 0]
z_off = data[:, 1]

#
# Load the locations of the beads.
    parser.add_argument('--movie',
                        dest='movie',
                        type=str,
                        required=True,
                        help="The name of the movie that will be analyzed.")
    parser.add_argument('--working_dir',
                        dest='wdir',
                        type=str,
                        required=True,
                        help="The directory for intermediate analysis.")
    parser.add_argument('--xml',
                        dest='settings',
                        type=str,
                        required=True,
                        help="The name of the settings xml file.")
    parser.add_argument('--divisions',
                        dest='divisions',
                        type=int,
                        required=True,
                        help="How many sections to break the movie into.")

    args = parser.parse_args()

    # Figure out how many frames are in the movie.
    movie = datareader.inferReader(args.movie)
    movie_length = movie.filmSize()[2]

    print("Movie has", movie_length, "frames")
    splitAnalysisXML(args.wdir, args.settings, movie_length, args.divisions)
示例#51
0
def peakFinding(find_peaks, movie_file, mlist_file, parameters):

    # open files for input & output
    movie_data = datareader.inferReader(movie_file)
    [movie_x,movie_y,movie_l] = movie_data.filmSize()

    # if the i3 file already exists, read it in,
    # write it out & start the analysis from the
    # end.
    total_peaks = 0
    if(os.path.exists(mlist_file)):
        print("Found", mlist_file)
        i3data_in = readinsight3.loadI3File(mlist_file)
        try:
            curf = int(numpy.max(i3data_in['fr']))
        except ValueError:
            curf = 0
        print(" Starting analysis at frame:", curf)
        i3data = writeinsight3.I3Writer(mlist_file)
        if (curf > 0):
            i3data.addMolecules(i3data_in)
            total_peaks = i3data_in['x'].size
    else:
        curf = 0
        i3data = writeinsight3.I3Writer(mlist_file)

    # process parameters
    if parameters.hasAttr("start_frame"):
        if (parameters.getAttr("start_frame")>=curf) and (parameters.getAttr("start_frame")<movie_l):
            curf = parameters.getAttr("start_frame")

    if parameters.hasAttr("max_frame"):
        if (parameters.getAttr("max_frame")>0) and (parameters.getAttr("max_frame")<movie_l):
            movie_l = parameters.getAttr("max_frame")

    static_bg_estimator = None
    if (parameters.getAttr("static_background_estimate", 0) > 0):
        print("Using static background estimator.")
        static_bg_estimator = static_background.StaticBGEstimator(movie_data,
                                                                  start_frame = curf,
                                                                  sample_size = parameters.getAttr("static_background_estimate"))

    # analyze the movie
    # catch keyboard interrupts & "gracefully" exit.
    try:
        while(curf<movie_l):
            #for j in range(l):

            # Set up the analysis.
            image = movie_data.loadAFrame(curf) - parameters.getAttr("baseline")
            mask = (image < 1.0)
            if (numpy.sum(mask) > 0):
                print(" Removing negative values in frame", curf)
                image[mask] = 1.0

            # Find and fit the peaks.
            if static_bg_estimator is not None:
                bg_estimate = static_bg_estimator.estimateBG(curf) - parameters.getAttr("baseline")
                [peaks, residual] = find_peaks.analyzeImage(image,
                                                            bg_estimate = bg_estimate)
            else:
                [peaks, residual] = find_peaks.analyzeImage(image)

            # Save the peaks.
            if (type(peaks) == type(numpy.array([]))):
                # remove unconverged peaks
                peaks = find_peaks.getConvergedPeaks(peaks)

                # save results
                if(parameters.getAttr("orientation", "normal") == "inverted"):
                    i3data.addMultiFitMolecules(peaks, movie_x, movie_y, curf+1, parameters.getAttr("pixel_size"), inverted = True)
                else:
                    i3data.addMultiFitMolecules(peaks, movie_x, movie_y, curf+1, parameters.getAttr("pixel_size"), inverted = False)

                total_peaks += peaks.shape[0]
                print("Frame:", curf, peaks.shape[0], total_peaks)
            else:
                print("Frame:", curf, 0, total_peaks)
            curf += 1

        print("")
        i3data.close()
        find_peaks.cleanUp()
        return 0

    except KeyboardInterrupt:
        print("Analysis stopped.")
        i3data.close()
        find_peaks.cleanUp()
        return 1
示例#52
0
def homotopyPSF(dax_file, bin_file, psf_file):
    
    # Minimum number of peaks to calculate the PSF from.
    min_peaks = 300

    # Half width of the aoi size in pixels.
    aoi_size = 8

    # Load dax file and corresponding molecule list file.
    dax_data = datareader.inferReader(dax_file)
    i3_data = readinsight3.loadI3File(bin_file)

    # Go through the frames identifying good peaks and adding them
    # to the average psf
    average_psf = numpy.zeros((4*aoi_size,4*aoi_size))
    curf = 1
    peaks_used = 0
    total = 0.0
    [dax_x, dax_y, dax_l] = dax_data.filmSize()
    while (curf < dax_l) and (peaks_used < min_peaks):

        # Select localizations in current frame & not near the edges.
        mask = (i3_data['fr'] == curf) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_y - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_x - aoi_size - 1))
        xr = i3_data['x'][mask]
        yr = i3_data['y'][mask]
        ht = i3_data['h'][mask]

        # Remove localizations that are too close to each other.
        mask = iaUtilsC.removeNeighborsMask(xr, yr, aoi_size)
        print(curf, "peaks in", xr.size, ", peaks out", numpy.count_nonzero(mask))
        
        xr = xr[mask]
        yr = yr[mask]
        ht = ht[mask]
        
        # Use remaining localizations to calculate spline.
        image = dax_data.loadAFrame(curf-1).astype(numpy.float64)

        for i in range(xr.size):
            xf = xr[i]
            yf = yr[i]
            xi = int(xf)
            yi = int(yf)

            # get localization image
            mat = image[xi-aoi_size:xi+aoi_size,
                        yi-aoi_size:yi+aoi_size]

            # re-center image
            psf = scipy.ndimage.interpolation.shift(mat,(-(xf-xi),-(yf-yi)),mode='nearest')

            # zoom in by 2x
            psf = scipy.ndimage.interpolation.zoom(psf,2.0)

            # add to average psf accumulator
            average_psf += psf
            total += ht[i]

            peaks_used += 1
        
        curf += 1

    average_psf = average_psf/total

    average_psf = numpy.transpose(average_psf)

    # force psf to be zero (on average) at the boundaries.
    if True:
        edge = numpy.concatenate((average_psf[0,:],
                                  average_psf[-1,:],
                                  average_psf[:,0],
                                  average_psf[:,-1]))
        average_psf -= numpy.mean(edge)

    # save PSF (in numpy form).
    numpy.save(psf_file, average_psf)
    
    # save PSF (in image form).
    #
    # FIXME: This may be useful but it is annoying for automated testing as this file
    #        is created in which ever directory the tests are run in.
    #
    if True:
        import tifffile
        tifffile.imsave("l1h_psf.tif", average_psf.astype(numpy.float32))
spot_list.pixel_size = pix_to_nm
spot_list.nr_spots = localization_number

spot_list.nr_channels = len(channels)

# These are always the same.
spot_list.nr_slices = 1
spot_list.nr_pos = 1
spot_list.fit_mode = 1
spot_list.location_units = 0
spot_list.intensity_units = 0
spot_list.is_track = False

# If a dax file is provided, get the film size.
if os.path.exists(sys.argv[1]):
    data_reader = datareader.inferReader(sys.argv[1])
    [x, y, l] = data_reader.filmSize()

    spot_list.nr_pixels_x = x
    spot_list.nr_pixels_y = y
    spot_list.nr_frames = l

spot_list_offset = tsf_file.tell() - 12

out = spot_list.SerializeToString()
out = encoder._VarintBytes(len(out)) + out
tsf_file.write(out)

# Rewind to the beginning and record the offset of the SpotList message.
tsf_file.seek(4)
setV(tsf_file, ">Q", spot_list_offset)
示例#54
0
def analyze(movie_name, settings_name, hres_name, bin_name):

    movie_data = datareader.inferReader(movie_name)

    #
    # FIXME:
    #
    # This should also start at the same frame as hres in the event of a restart.
    #
    i3_file = writeinsight3.I3Writer(bin_name)
    
    params = parameters.ParametersL1H().initFromFile(settings_name)

    #
    # Load the a matrix and setup the homotopy image analysis class.
    #
    a_mat_file = params.getAttr("a_matrix")

    print("Using A matrix file:", a_mat_file)
    a_mat = setup_A_matrix.loadAMatrix(a_mat_file)

    image = movie_data.loadAFrame(0)
    htia = homotopy_imagea_c.HomotopyIA(a_mat,
                                        params.getAttr("epsilon"),
                                        image.shape)

    #
    # This opens the file. If it already exists, then it sets the file pointer
    # to the end of the file & returns the number of the last frame analyzed.
    #
    curf = htia.openHRDataFile(hres_name)

    #
    # Figure out which frame to start & stop at.
    #
    [dax_x,dax_y,dax_l] = movie_data.filmSize()

    if params.hasAttr("start_frame"):
        if (params.getAttr("start_frame") >= curf) and (params.getAttr("start_frame") < dax_l):
            curf = params.getAttr("start_frame")

    if params.hasAttr("max_frame"):
        if (params.getAttr("max_frame") > 0) and (params.getAttr("max_frame") < dax_l):
            dax_l = params.getAttr("max_frame")

    print("Starting analysis at frame", curf)

    #
    # Analyze the dax data.
    #
    total_peaks = 0
    try:
        while(curf<dax_l):

            # Load image, subtract baseline & remove negative values.
            image = movie_data.loadAFrame(curf).astype(numpy.float)

            # Convert to photo-electrons.
            image -= params.getAttr("camera_offset")
            image = image/params.getAttr("camera_gain")

            # Remove negative values.
            mask = (image < 0)
            image[mask] = 0

            # Analyze image.
            hres_image = htia.analyzeImage(image)
            peaks = htia.saveHRFrame(hres_image, curf + 1)
            [cs_x,cs_y,cs_a,cs_i] = htia.getPeaks(hres_image)
            i3_file.addMoleculesWithXYAItersFrame(cs_x, cs_y, cs_a, cs_i, curf+1)

            peaks = cs_x.size
            total_peaks += peaks
            print("Frame:", curf, peaks, total_peaks)

            curf += 1

    except KeyboardInterrupt:
        print("Analysis stopped.")

    # cleanup
    htia.closeHRDataFile()
    i3_file.close()
示例#55
0
def measurePSF(movie_name, zfile_name, movie_h5_name, psf_name, want2d = False, aoi_size = 12, pixel_size = 0.1, z_range = 0.75, z_step = 0.05):
    """
    movie_name - The name of the movie file.
    zfile_name - The name of the text file containing z offset data. If this does not exist
                 then the localizations z value will be used.
    movie_h5_name - The name of the HDF5 file containing the localization information.
    psf_name - The name of the file to save the measured PSF in.
    want2d - Measure a 2D PSF.
    aoi_size - The final AOI size will 2x this number (in pixels).
    pixel_size - The pixel size in microns.
    z_range - The z range of the PSF (in microns). The actual z range is 2x z_range (i.e. 
                 from -z_range to z_range).
    z_step - The z granularity of the PSF (in microns).
    """
    # Create z scaling object.
    z_sclr = measurePSFUtils.ZScaler(z_range, z_step)
    
    # Load dax file, z offset file and molecule list file.
    dax_data = datareader.inferReader(movie_name)
    z_off = None
    if os.path.exists(zfile_name):
        data = numpy.loadtxt(zfile_name, ndmin = 2)
        valid = data[:,0]
        z_off = data[:,1]

    if want2d:
        print("Measuring 2D PSF")
    else:
        print("Measuring 3D PSF")

    # Go through the frames identifying good peaks and adding them
    # to the average psf.
    #
    max_z = z_sclr.getMaxZ()

    average_psf = numpy.zeros((max_z, 2*aoi_size, 2*aoi_size))
    peaks_used = 0
    totals = numpy.zeros(max_z, dtype = numpy.int)
    
    with saH5Py.SAH5Py(movie_h5_name) as h5:
        [dax_x, dax_y, dax_l] = dax_data.filmSize()
        for curf, locs in h5.localizationsIterator():

            # Select localizations in current frame & not near the edges.
            mask = (locs['x'] > aoi_size) & (locs['x'] < (dax_x - aoi_size - 1)) & (locs['y'] > aoi_size) & (locs['y'] < (dax_y - aoi_size - 1))
            xr = locs['y'][mask] + 1
            yr = locs['x'][mask] + 1

            # Use the z offset file if it was specified, otherwise use localization z positions.
            if z_off is None:
                if (curf == 0):
                    print("Using fit z locations.")
                zr = locs['z'][mask]
            else:
                if (curf == 0):
                    print("Using z offset file.")
                if (abs(valid[curf]) < 1.0e-6):
                    continue
                zr = numpy.ones(xr.size) * z_off[curf]

            ht = locs['height'][mask]

            # Remove localizations that are too close to each other.
            mask = iaUtilsC.removeNeighborsMask(xr, yr, 2.0 * aoi_size)
            print(curf, "peaks in", xr.size, ", peaks out", numpy.count_nonzero(mask))
        
            xr = xr[mask]
            yr = yr[mask]
            zr = zr[mask]
            ht = ht[mask]

            # Use remaining localizations to calculate spline.
            image = dax_data.loadAFrame(curf).astype(numpy.float64)

            for i in range(xr.size):
                xf = xr[i]
                yf = yr[i]
                zf = zr[i]
                if want2d:
                    zi = 0
                else:
                    zi = z_sclr.convert(zf)

                # Check that the z value is in range
                if z_sclr.inRange(zi):

                    # Extract PSF.
                    psf = measurePSFUtils.extractAOI(image, aoi_size, xf, yf)

                    # Add to average psf accumulator
                    average_psf[zi,:,:] += psf
                    totals[zi] += 1

    # Check that we got at least one valid measurement.
    #
    assert (numpy.max(totals) > 0)
    
    # Set the PSF to have zero average on the X/Y boundaries.
    #
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i,0,:],
                                  average_psf[i,-1,:],
                                  average_psf[i,:,0],
                                  average_psf[i,:,-1]))
        average_psf[i,:,:] -= numpy.mean(edge)

    # Normalize the PSF.
    #
    if want2d:
        max_z = 1

    # Note: I think it makes sense to normalize to a sum of 1.0 here as the user may
    #       be using the images of single localizations as the inputs. Unlike beads
    #       we can't assume that they are all the same brightness so normalizing by
    #       the number of events would make even less sense.
    #
    for i in range(max_z):
        print("z plane {0:0d} has {1:0d} samples".format(i, totals[i]))
        if (totals[i] > 0.0):
            average_psf[i,:,:] = average_psf[i,:,:]/numpy.sum(numpy.abs(average_psf[i,:,:]))

    # Normalize to unity maximum height.
    if (numpy.max(average_psf) > 0.0):
        average_psf = average_psf/numpy.max(average_psf)
    else:
        print("Warning! Measured PSF maxima is zero or negative!")
        
    # Save PSF (in image form).
    #
    # FIXME: This may be useful but it is annoying for automated testing as this file
    #        is created in which ever directory the tests are run in.
    #
    if True:
        with tifffile.TiffWriter("psf.tif") as tf:
            for i in range(max_z):
                tf.save(average_psf[i,:,:].astype(numpy.float32))
    
    # Save PSF.
    #
    #  At least for now the PSFs use nanometers, not microns.
    #
    z_range = z_range * 1.0e+3
    z_step = z_step * 1.0e+3

    if want2d:
        psf_dict = {"psf" : average_psf[0,:,:],
                    "pixel_size" : pixel_size,
                    "type" : "2D",
                    "version" : 2.0}
        
    else:
        cur_z = -z_range
        z_vals = []
        for i in range(max_z):
            z_vals.append(cur_z)
            cur_z += z_step

        psf_dict = {"psf" : average_psf,
                    "pixel_size" : pixel_size,
                    "type" : "3D",
                    "version" : 2.0,
                    "zmin" : -z_range,
                    "zmax" : z_range,
                    "zvals" : z_vals}

    with open(psf_name, 'wb') as fp:
        pickle.dump(psf_dict, fp)
示例#56
0
#

import numpy
import sys

import storm_analysis.sa_library.datareader as datareader

if (len(sys.argv) != 3):
    print("usage: <input_dax> <variance>")
    exit()

cam_offset = 100
max_frames = 1000

# Open the input file.
in_file = datareader.inferReader(sys.argv[1])
[w, h, l] = in_file.filmSize()

if (l > max_frames):
    l = max_frames

# Calculate x and xx.
mean = numpy.zeros((w,h), dtype = numpy.int64)
var = numpy.zeros((w,h), dtype = numpy.int64)

for i in range(l):
    if ((i%10)==0):
        print("Processing frame", i)

    aframe = in_file.loadAFrame(i)
示例#57
0
def measurePSF(movie_name,
               zfile_name,
               movie_h5_name,
               psf_name,
               want2d=False,
               aoi_size=12,
               pixel_size=0.1,
               z_range=0.75,
               z_step=0.05):
    """
    movie_name - The name of the movie file.
    zfile_name - The name of the text file containing z offset data. If this does not exist
                 then the localizations z value will be used.
    movie_h5_name - The name of the HDF5 file containing the localization information.
    psf_name - The name of the file to save the measured PSF in.
    want2d - Measure a 2D PSF.
    aoi_size - The final AOI size will 2x this number (in pixels).
    pixel_size - The pixel size in microns.
    z_range - The z range of the PSF (in microns). The actual z range is 2x z_range (i.e. 
                 from -z_range to z_range).
    z_step - The z granularity of the PSF (in microns).
    """
    # Create z scaling object.
    z_sclr = measurePSFUtils.ZScaler(z_range, z_step)

    # Load dax file, z offset file and molecule list file.
    dax_data = datareader.inferReader(movie_name)
    z_off = None
    if os.path.exists(zfile_name):
        data = numpy.loadtxt(zfile_name, ndmin=2)
        valid = data[:, 0]
        z_off = data[:, 1]

    if want2d:
        print("Measuring 2D PSF")
    else:
        print("Measuring 3D PSF")

    # Go through the frames identifying good peaks and adding them
    # to the average psf.
    #
    max_z = z_sclr.getMaxZ()

    average_psf = numpy.zeros((max_z, 2 * aoi_size, 2 * aoi_size))
    peaks_used = 0
    totals = numpy.zeros(max_z, dtype=numpy.int)

    with saH5Py.SAH5Py(movie_h5_name) as h5:
        [dax_x, dax_y, dax_l] = dax_data.filmSize()
        for curf, locs in h5.localizationsIterator():

            # Select localizations in current frame & not near the edges.
            mask = (locs['x'] >
                    aoi_size) & (locs['x'] < (dax_x - aoi_size - 1)) & (
                        locs['y'] > aoi_size) & (locs['y'] <
                                                 (dax_y - aoi_size - 1))
            xr = locs['y'][mask] + 1
            yr = locs['x'][mask] + 1

            # Use the z offset file if it was specified, otherwise use localization z positions.
            if z_off is None:
                if (curf == 0):
                    print("Using fit z locations.")
                zr = locs['z'][mask]
            else:
                if (curf == 0):
                    print("Using z offset file.")
                if (abs(valid[curf]) < 1.0e-6):
                    continue
                zr = numpy.ones(xr.size) * z_off[curf]

            ht = locs['height'][mask]

            # Remove localizations that are too close to each other.
            mask = iaUtilsC.removeNeighborsMask(xr, yr, 2.0 * aoi_size)
            print(curf, "peaks in", xr.size, ", peaks out",
                  numpy.count_nonzero(mask))

            xr = xr[mask]
            yr = yr[mask]
            zr = zr[mask]
            ht = ht[mask]

            # Use remaining localizations to calculate spline.
            image = dax_data.loadAFrame(curf).astype(numpy.float64)

            for i in range(xr.size):
                xf = xr[i]
                yf = yr[i]
                zf = zr[i]
                if want2d:
                    zi = 0
                else:
                    zi = z_sclr.convert(zf)

                # Check that the z value is in range
                if z_sclr.inRange(zi):

                    # Extract PSF.
                    psf = measurePSFUtils.extractAOI(image, aoi_size, xf, yf)

                    # Add to average psf accumulator
                    average_psf[zi, :, :] += psf
                    totals[zi] += 1

    # Check that we got at least one valid measurement.
    #
    assert (numpy.max(totals) > 0)

    # Set the PSF to have zero average on the X/Y boundaries.
    #
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i, 0, :], average_psf[i, -1, :],
                                  average_psf[i, :, 0], average_psf[i, :, -1]))
        average_psf[i, :, :] -= numpy.mean(edge)

    # Normalize the PSF.
    #
    if want2d:
        max_z = 1

    # Note: I think it makes sense to normalize to a sum of 1.0 here as the user may
    #       be using the images of single localizations as the inputs. Unlike beads
    #       we can't assume that they are all the same brightness so normalizing by
    #       the number of events would make even less sense.
    #
    for i in range(max_z):
        print("z plane {0:0d} has {1:0d} samples".format(i, totals[i]))
        if (totals[i] > 0.0):
            average_psf[i, :, :] = average_psf[i, :, :] / numpy.sum(
                numpy.abs(average_psf[i, :, :]))

    # Normalize to unity maximum height.
    if (numpy.max(average_psf) > 0.0):
        average_psf = average_psf / numpy.max(average_psf)
    else:
        print("Warning! Measured PSF maxima is zero or negative!")

    # Save PSF (in image form).
    if True:
        with tifffile.TiffWriter("psf.tif") as tf:
            for i in range(max_z):
                tf.save(average_psf[i, :, :].astype(numpy.float32))

    # Save PSF.
    #
    #  At least for now the PSFs use nanometers, not microns.
    #
    z_range = z_range * 1.0e+3
    z_step = z_step * 1.0e+3

    if want2d:
        psf_dict = {
            "psf": average_psf[0, :, :],
            "pixel_size": pixel_size,
            "type": "2D",
            "version": 2.0
        }

    else:
        cur_z = -z_range
        z_vals = []
        for i in range(max_z):
            z_vals.append(cur_z)
            cur_z += z_step

        psf_dict = {
            "psf": average_psf,
            "pixel_size": pixel_size,
            "type": "3D",
            "version": 2.0,
            "zmin": -z_range,
            "zmax": z_range,
            "zvals": z_vals
        }

    with open(psf_name, 'wb') as fp:
        pickle.dump(psf_dict, fp)
def measurePSFBeads(movie_name,
                    zfile_name,
                    beads_file,
                    psf_name,
                    want2d=False,
                    aoi_size=12,
                    z_range=600.0,
                    z_step=50.0):

    # Load movie file.
    movie_data = datareader.inferReader(movie_name)

    #
    # Load the z-offset information for the dax file.
    #
    #   This is a text file with one line per frame that contains the
    #   z-offset (in nm) for that frame. Each line is a space separated
    #   valid, z_pos pair. If valid if 0 the frame will be ignored,
    #   otherwise it will be used.
    #
    data = numpy.loadtxt(zfile_name)
    valid = data[:, 0]
    z_off = data[:, 1]

    #
    # Load the locations of the beads.
    #
    #   This is a text file the contains the locations of the beads that
    #   will be used to construct the PSF. Each line is a space separated
    #   x, y pair of bead locations (in pixels).
    #
    #   One way to create this file is to look at the bead movie with
    #   visualizer.py and record the center positions of several beads.
    #
    data = numpy.loadtxt(beads_file, ndmin=2)
    bead_x = data[:, 0]
    bead_y = data[:, 1]

    #
    # Go through the frames and the bead images to the average psf. Z
    # positions are rounded to the nearest 50nm. You might need to
    # adjust z_range depending on your experiment.
    #
    z_mid = int(z_range / z_step)
    max_z = 2 * z_mid + 1
    average_psf = numpy.zeros((max_z, 4 * aoi_size, 4 * aoi_size))
    totals = numpy.zeros(max_z)
    [dax_x, dax_y, dax_l] = movie_data.filmSize()
    for curf in range(dax_l):

        if ((curf % 50) == 0):
            print("Processing frame:", curf)

        if (abs(valid[curf]) < 1.0e-6):
            #    print "skipping", valid[curf]
            continue

        # Use bead localization to calculate spline.
        image = movie_data.loadAFrame(curf).astype(numpy.float64)

        # Get frame z and check that it is in range.
        zf = z_off[curf]
        zi = int(round(zf / z_step)) + z_mid
        if (zi > -1) and (zi < max_z):

            for i in range(bead_x.size):

                xf = bead_x[i]
                yf = bead_y[i]
                xi = int(xf)
                yi = int(yf)

                # Get localization image.
                mat = image[xi - aoi_size:xi + aoi_size,
                            yi - aoi_size:yi + aoi_size]

                # Zoom in by 2x.
                psf = scipy.ndimage.interpolation.zoom(mat, 2.0)

                # Re-center image.
                psf = scipy.ndimage.interpolation.shift(
                    psf, (-2.0 * (xf - xi), -2.0 * (yf - yi)), mode='nearest')

                # Add to average psf accumulator.
                average_psf[zi, :, :] += psf
                totals[zi] += 1

    # Force PSF to be zero (on average) at the boundaries.
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i, 0, :], average_psf[i, -1, :],
                                  average_psf[i, :, 0], average_psf[i, :, -1]))
        average_psf[i, :, :] -= numpy.mean(edge)

    # Normalize PSF.
    for i in range(max_z):
        if (totals[i] > 0.0):
            average_psf[i, :, :] = average_psf[i, :, :] / numpy.sum(
                numpy.abs(average_psf[i, :, :]))

    average_psf = average_psf / numpy.max(average_psf)

    # Save PSF (in image form).
    if True:
        import os
        import storm_analysis.sa_library.daxwriter as daxwriter
        dxw = daxwriter.DaxWriter(
            os.path.join(os.path.dirname(psf_name), "psf_beads.dax"),
            average_psf.shape[1], average_psf.shape[2])
        for i in range(max_z):
            #print i, numpy.max(average_psf[i,:,:])
            dxw.addFrame(1000.0 * average_psf[i, :, :] + 100)
        dxw.close()

    # Save PSF.
    cur_z = -z_range
    z_vals = []
    for i in range(max_z):
        z_vals.append(cur_z)
        cur_z += z_step

    dict = {
        "psf": average_psf,
        "pixel_size": 0.080,  # 1/2 the camera pixel size in nm.
        "type": "3D",
        "zmin": -z_range,
        "zmax": z_range,
        "zvals": z_vals
    }

    pickle.dump(dict, open(psf_name, 'wb'))
示例#59
0
 def checkMessage(self, tcp_message):
     movie = datareader.inferReader(
         os.path.join(self.directory, self.name + ".dax"))
     assert (movie.filmSize() == [256, 512, self.length])
示例#60
0
    parser = argparse.ArgumentParser(description = 'FISTA deconvolution - Beck and Teboulle, SIAM J. Imaging Sciences, 2009')

    parser.add_argument('--movie', dest='movie', type=str, required=True,
                        help = "The name of the movie to deconvolve, can be .dax, .tiff or .spe format.")
    parser.add_argument('--xml', dest='settings', type=str, required=True,
                        help = "The name of the settings xml file.")
    parser.add_argument('--output', dest='output', type=str, required=True,
                        help = "The name of the .tif file to save the results in.")

    args = parser.parse_args()

    # Load parameters
    parameters = params.ParametersSplinerFISTA().initFromFile(args.settings)

    # Open movie and load the first frame.
    movie_data = datareader.inferReader(args.movie)
    [x_size, y_size, z_size] = movie_data.filmSize()
    image = (movie_data.loadAFrame(0) - parameters.getAttr("camera_offset"))/parameters.getAttr("camera_gain")
    image = image.astype(numpy.float)

    # Load spline.
    psf_object = splineToPSF.loadSpline(parameters.getAttr("spline"))
    
    # Do FISTA deconvolution.
    fdecon = FISTADecon(image.shape,
                        psf_object,
                        parameters.getAttr("fista_number_z"),
                        parameters.getAttr("fista_timestep"))

    if False:
        # Wavelet background removal.