Пример #1
0
    def fitPeaks(self, peaks):

        # Adjust to z starting position.
        z_index = utilC.getZCenterIndex()
        peaks[:,z_index] = peaks[:,z_index] * float(self.mfitter.getSplineSize())

        if False:
            print("Before fitting")
            for i in range(5):
                print(" ", peaks[i,0], peaks[i,1], peaks[i,3], peaks[i,5], peaks[i,6], peaks[i,7])
            print("")

        # Fit to update peak locations.
        fit_peaks = self.mfitter.doFit(peaks)
        fit_peaks = self.mfitter.getGoodPeaks(fit_peaks, min_height = 0.9 * self.threshold)

        # Remove peaks that are too close to each other & refit.
        fit_peaks = utilC.removeClosePeaks(fit_peaks, self.sigma, self.fit_neighborhood)

        # Redo the fit for the remaining peaks.
        fit_peaks = self.mfitter.doFit(fit_peaks)
        fit_peaks = self.mfitter.getGoodPeaks(fit_peaks, min_height = 0.9*self.threshold)
        residual = self.mfitter.getResidual()

        if False:
            print("After fitting")
            for i in range(5):
                print(" ", fit_peaks[i,0], fit_peaks[i,1], fit_peaks[i,3], fit_peaks[i,5], fit_peaks[i,6], fit_peaks[i,7])
            print("")
        
        return [fit_peaks, residual]
Пример #2
0
    def __init__(self, parameters):
        fitting.PeakFinder.__init__(self, parameters)
        self.height_rescale = []
        self.mfilter = []
        self.mfilter_z = []
        self.z_value = []

        # Load the spline.
        self.s_to_psf = splineToPSF.loadSpline(parameters.getAttr("spline"))

        # Update margin based on the spline size.
        old_margin = self.margin
        self.margin = int((self.s_to_psf.getSize() + 1)/4 + 2)

        self.mfilter_z = parameters.getAttr("z_value", [0.0])
        for zval in self.mfilter_z:
            self.z_value.append(self.s_to_psf.getScaledZ(zval))

        if parameters.hasAttr("peak_locations"):

            # Correct for any difference in the margins.
            self.peak_locations[:,utilC.getXCenterIndex()] += self.margin - old_margin
            self.peak_locations[:,utilC.getYCenterIndex()] += self.margin - old_margin

            # Provide the "correct" starting z value.
            self.peak_locations[:,utilC.getZCenterIndex()] = self.z_value[0]
Пример #3
0
def convertToMultiFit(i3data, x_size, y_size, frame, nm_per_pixel, inverted=False):
    """
    Create a 3D-DAOSTORM, sCMOS or Spliner analysis compatible peak array from I3 data.

    Notes:
      (1) This uses the non-drift corrected positions.
      (2) This sets the initial fitting error to zero and the status to RUNNING.
    """
    i3data = maskData(i3data, (i3data['fr'] == frame))

    peaks = numpy.zeros((i3data.size, utilC.getNPeakPar()))
    
    peaks[:,utilC.getBackgroundIndex()] = i3data['bg']
    peaks[:,utilC.getHeightIndex()] = i3data['h']
    peaks[:,utilC.getZCenterIndex()] = i3data['z'] * 0.001

    if inverted:
        peaks[:,utilC.getXCenterIndex()] = y_size - i3data['x']
        peaks[:,utilC.getYCenterIndex()] = x_size - i3data['y']
        ax = i3data['ax']
        ww = i3data['w']
        peaks[:,utilC.getYWidthIndex()] = 0.5*numpy.sqrt(ww*ww/ax)/nm_per_pixel
        peaks[:,utilC.getXWidthIndex()] = 0.5*numpy.sqrt(ww*ww*ax)/nm_per_pixel
    else:
        peaks[:,utilC.getYCenterIndex()] = i3data['x'] - 1
        peaks[:,utilC.getXCenterIndex()] = i3data['y'] - 1
        ax = i3data['ax']
        ww = i3data['w']
        peaks[:,utilC.getXWidthIndex()] = 0.5*numpy.sqrt(ww*ww/ax)/nm_per_pixel
        peaks[:,utilC.getYWidthIndex()] = 0.5*numpy.sqrt(ww*ww*ax)/nm_per_pixel

    return peaks
Пример #4
0
    def __init__(self, parameters):
        fitting.PeakFinder.__init__(self, parameters)
        self.height_rescale = []
        self.mfilter = []
        self.mfilter_z = []
        self.z_value = []

        # Load the spline.
        self.s_to_psf = splineToPSF.loadSpline(parameters.getAttr("spline"))

        # Update margin based on the spline size.
        old_margin = self.margin
        self.margin = int((self.s_to_psf.getSize() + 1)/4 + 2)

        self.mfilter_z = parameters.getAttr("z_value", [0.0])
        for zval in self.mfilter_z:
            self.z_value.append(self.s_to_psf.getScaledZ(zval))

        if parameters.hasAttr("peak_locations"):

            # Correct for any difference in the margins.
            self.peak_locations[:,utilC.getXCenterIndex()] += self.margin - old_margin
            self.peak_locations[:,utilC.getYCenterIndex()] += self.margin - old_margin

            # Provide the "correct" starting z value.
            self.peak_locations[:,utilC.getZCenterIndex()] = self.z_value[0]
Пример #5
0
 def rescaleZ(self, peaks, zmin, zmax):
     z_index = utilC.getZCenterIndex()
     spline_range = zmax - zmin
     peaks[:,
           z_index] = peaks[:,
                            z_index] * self.inv_zscale * spline_range + zmin
     return peaks
Пример #6
0
def createFromMultiFit(molecules,
                       x_size,
                       y_size,
                       frame,
                       nm_per_pixel,
                       inverted=False):
    """
    Create an I3 data from the output of 3D-DAOSTORM, sCMOS or Spliner.
    """
    n_molecules = molecules.shape[0]

    h = molecules[:, 0]
    if inverted:
        xc = y_size - molecules[:, utilC.getXCenterIndex()]
        yc = x_size - molecules[:, utilC.getYCenterIndex()]
        wx = 2.0 * molecules[:, utilC.getXWidthIndex()] * nm_per_pixel
        wy = 2.0 * molecules[:, utilC.getYWidthIndex()] * nm_per_pixel
    else:
        xc = molecules[:, utilC.getYCenterIndex()] + 1
        yc = molecules[:, utilC.getXCenterIndex()] + 1
        wx = 2.0 * molecules[:, utilC.getYWidthIndex()] * nm_per_pixel
        wy = 2.0 * molecules[:, utilC.getXWidthIndex()] * nm_per_pixel

    bg = molecules[:, utilC.getBackgroundIndex()]
    zc = molecules[:, utilC.getZCenterIndex(
    )] * 1000.0  # fitting is done in um, insight works in nm
    st = numpy.round(molecules[:, utilC.getStatusIndex()])
    err = molecules[:, utilC.getErrorIndex()]

    #
    # Calculate peak area, which is saved in the "a" field.
    #
    # Note that this is assuming that the peak is a 2D gaussian. This
    # will not calculate the correct area for a Spline..
    #
    parea = 2.0 * 3.14159 * h * molecules[:, utilC.getXWidthIndex(
    )] * molecules[:, utilC.getYWidthIndex()]

    ax = wy / wx
    ww = numpy.sqrt(wx * wy)

    i3data = createDefaultI3Data(xc.size)
    posSet(i3data, 'x', xc)
    posSet(i3data, 'y', yc)
    posSet(i3data, 'z', zc)
    setI3Field(i3data, 'h', h)
    setI3Field(i3data, 'bg', bg)
    setI3Field(i3data, 'fi', st)
    setI3Field(i3data, 'a', parea)
    setI3Field(i3data, 'w', ww)
    setI3Field(i3data, 'ax', ax)
    setI3Field(i3data, 'fr', frame)
    setI3Field(i3data, 'i', err)

    return i3data
Пример #7
0
    def findPeaks(self, peaks):

        # Adjust x,y for margin.
        peaks[:, utilC.getXCenterIndex()] += float(self.margin)
        peaks[:, utilC.getYCenterIndex()] += float(self.margin)

        # Convert z (in nano-meters) to spline z units.
        zi = utilC.getZCenterIndex()
        peaks[:, zi] = self.s_to_psfs[0].getScaledZ(peaks[:, zi])

        # Split peaks into per-channel peaks.
        return self.mpu.splitPeaks(peaks)
Пример #8
0
    def analyzeImage(self,
                     new_image,
                     bg_estimate=None,
                     save_residual=False,
                     verbose=False):

        image = fitting.padArray(new_image, self.peak_finder.margin)
        if bg_estimate is not None:
            bg_estimate = fitting.padArray(bg_estimate,
                                           self.peak_finder.margin)

        self.peak_finder.newImage(image, bg_estimate)
        self.peak_fitter.newImage(image)

        #
        # This is a lot simpler than 3D-DAOSTORM as we only do one pass,
        # hopefully the compressed sensing (FISTA) deconvolution finds all the
        # peaks and then we do a single pass of fitting.
        #
        if True:
            peaks = self.peak_finder.findPeaks()
            [fit_peaks, residual] = self.peak_fitter.fitPeaks(peaks)

        #
        # This is for testing if just using FISTA followed by the center
        # of mass calculation is basically as good as also doing the
        # additional MLE spline fitting step.
        #
        # The short answer is that it appears that it is not. It about
        # 1.3x worse in XY and about 4x worse in Z.
        #
        else:
            fit_peaks = self.peak_finder.findPeaks()

            # Adjust z scale.
            z_index = utilC.getZCenterIndex()
            z_size = (self.peak_fitter.spline.shape[2] - 1.0)
            status_index = utilC.getStatusIndex()
            fit_peaks[:, z_index] = z_size * fit_peaks[:, z_index]

            # Mark as converged.
            fit_peaks[:, status_index] = 1.0

            residual = None

        #
        # Subtract margin so that peaks are in the right
        # place with respect to the original image.
        #
        fit_peaks[:, utilC.getXCenterIndex()] -= float(self.peak_finder.margin)
        fit_peaks[:, utilC.getYCenterIndex()] -= float(self.peak_finder.margin)

        return [fit_peaks, residual]
Пример #9
0
def createFromMultiFit(molecules, x_size, y_size, frame, nm_per_pixel, inverted=False):
    """
    Create an I3 data from the output of 3D-DAOSTORM, sCMOS or Spliner.
    """
    n_molecules = molecules.shape[0]
        
    h = molecules[:,0]
    if inverted:
        xc = y_size - molecules[:,utilC.getXCenterIndex()]
        yc = x_size - molecules[:,utilC.getYCenterIndex()]
        wx = 2.0*molecules[:,utilC.getXWidthIndex()]*nm_per_pixel
        wy = 2.0*molecules[:,utilC.getYWidthIndex()]*nm_per_pixel
    else:
        xc = molecules[:,utilC.getYCenterIndex()] + 1
        yc = molecules[:,utilC.getXCenterIndex()] + 1
        wx = 2.0*molecules[:,utilC.getYWidthIndex()]*nm_per_pixel
        wy = 2.0*molecules[:,utilC.getXWidthIndex()]*nm_per_pixel

    bg = molecules[:,utilC.getBackgroundIndex()]
    zc = molecules[:,utilC.getZCenterIndex()] * 1000.0  # fitting is done in um, insight works in nm
    st = numpy.round(molecules[:,utilC.getStatusIndex()])
    err = molecules[:,utilC.getErrorIndex()]

    #
    # Calculate peak area, which is saved in the "a" field.
    #
    # Note that this is assuming that the peak is a 2D gaussian. This
    # will not calculate the correct area for a Spline..
    #
    parea = 2.0*3.14159*h*molecules[:,utilC.getXWidthIndex()]*molecules[:,utilC.getYWidthIndex()]

    ax = wy/wx
    ww = numpy.sqrt(wx*wy)
        
    i3data = createDefaultI3Data(xc.size)
    posSet(i3data, 'x', xc)
    posSet(i3data, 'y', yc)
    posSet(i3data, 'z', zc)
    setI3Field(i3data, 'h', h)
    setI3Field(i3data, 'bg', bg)
    setI3Field(i3data, 'fi', st)
    setI3Field(i3data, 'a', parea)
    setI3Field(i3data, 'w', ww)
    setI3Field(i3data, 'ax', ax)
    setI3Field(i3data, 'fr', frame)
    setI3Field(i3data, 'i', err)

    return i3data
Пример #10
0
    def analyzeImage(self, new_image, bg_estimate = None, save_residual = False, verbose = False):
        
        image = fitting.padArray(new_image, self.peak_finder.margin)
        if bg_estimate is not None:
            bg_estimate = fitting.padArray(bg_estimate, self.peak_finder.margin)
            
        self.peak_finder.newImage(image, bg_estimate)
        self.peak_fitter.newImage(image)
        
        #
        # This is a lot simpler than 3D-DAOSTORM as we only do one pass,
        # hopefully the compressed sensing (FISTA) deconvolution finds all the
        # peaks and then we do a single pass of fitting.
        #
        if True:
            peaks = self.peak_finder.findPeaks()
            [fit_peaks, residual] = self.peak_fitter.fitPeaks(peaks)

        #
        # This is for testing if just using FISTA followed by the center
        # of mass calculation is basically as good as also doing the
        # additional MLE spline fitting step.
        #
        # The short answer is that it appears that it is not. It about
        # 1.3x worse in XY and about 4x worse in Z.
        #
        else:
            fit_peaks = self.peak_finder.findPeaks()

            # Adjust z scale.
            z_index = utilC.getZCenterIndex()
            z_size = (self.peak_fitter.spline.shape[2] - 1.0)
            status_index = utilC.getStatusIndex()
            fit_peaks[:,z_index] = z_size*fit_peaks[:,z_index]
            
            # Mark as converged.
            fit_peaks[:,status_index] = 1.0
            
            residual = None

        #
        # Subtract margin so that peaks are in the right
        # place with respect to the original image.
        #
        fit_peaks[:,utilC.getXCenterIndex()] -= float(self.peak_finder.margin)
        fit_peaks[:,utilC.getYCenterIndex()] -= float(self.peak_finder.margin)

        return [fit_peaks, residual]
Пример #11
0
    def getPeaks(self, threshold, margin):
        """
        Extract peaks from the deconvolved image and create
        an array that can be used by a peak fitter.
        
        FIXME: Need to compensate for up-sampling parameter in x,y.
        """

        fx = self.getXVector()

        # Get area, position, height.
        fd_peaks = fdUtil.getPeaks(fx, threshold, margin)
        num_peaks = fd_peaks.shape[0]

        peaks = numpy.zeros((num_peaks, utilC.getNPeakPar()))

        peaks[:, utilC.getXWidthIndex()] = numpy.ones(num_peaks)
        peaks[:, utilC.getYWidthIndex()] = numpy.ones(num_peaks)

        peaks[:, utilC.getXCenterIndex()] = fd_peaks[:, 2]
        peaks[:, utilC.getYCenterIndex()] = fd_peaks[:, 1]

        # Calculate height.
        #
        # FIXME: Typically the starting value for the peak height will be
        #        under-estimated unless a large enough number of FISTA
        #        iterations is performed to completely de-convolve the image.
        #
        h_index = utilC.getHeightIndex()
        #peaks[:,h_index] = fd_peaks[:,0]
        for i in range(num_peaks):
            peaks[i, h_index] = fd_peaks[i, 0] * self.psf_heights[int(
                round(fd_peaks[i, 3]))]

        # Calculate z (0.0 - 1.0).
        if (fx.shape[2] > 1):
            peaks[:, utilC.getZCenterIndex()] = fd_peaks[:, 3] / (
                float(fx.shape[2]) - 1.0)

        # Background term calculation.
        bg_index = utilC.getBackgroundIndex()
        for i in range(num_peaks):
            ix = int(round(fd_peaks[i, 1]))
            iy = int(round(fd_peaks[i, 2]))
            peaks[i, bg_index] = self.background[ix, iy]

        return peaks
Пример #12
0
def convertToMultiFit(i3data,
                      x_size,
                      y_size,
                      frame,
                      nm_per_pixel,
                      inverted=False):
    """
    Create a 3D-DAOSTORM, sCMOS or Spliner analysis compatible peak array from I3 data.

    Notes:
      (1) This uses the non-drift corrected positions.
      (2) This sets the initial fitting error to zero and the status to RUNNING.
    """
    i3data = maskData(i3data, (i3data['fr'] == frame))

    peaks = numpy.zeros((i3data.size, utilC.getNPeakPar()))

    peaks[:, utilC.getBackgroundIndex()] = i3data['bg']
    peaks[:, utilC.getHeightIndex()] = i3data['h']
    peaks[:, utilC.getZCenterIndex()] = i3data['z'] * 0.001

    if inverted:
        peaks[:, utilC.getXCenterIndex()] = y_size - i3data['x']
        peaks[:, utilC.getYCenterIndex()] = x_size - i3data['y']
        ax = i3data['ax']
        ww = i3data['w']
        peaks[:, utilC.getYWidthIndex()] = 0.5 * numpy.sqrt(
            ww * ww / ax) / nm_per_pixel
        peaks[:, utilC.getXWidthIndex()] = 0.5 * numpy.sqrt(
            ww * ww * ax) / nm_per_pixel
    else:
        peaks[:, utilC.getYCenterIndex()] = i3data['x'] - 1
        peaks[:, utilC.getXCenterIndex()] = i3data['y'] - 1
        ax = i3data['ax']
        ww = i3data['w']
        peaks[:, utilC.getXWidthIndex()] = 0.5 * numpy.sqrt(
            ww * ww / ax) / nm_per_pixel
        peaks[:, utilC.getYWidthIndex()] = 0.5 * numpy.sqrt(
            ww * ww * ax) / nm_per_pixel

    return peaks
Пример #13
0
    def getPeaks(self, threshold, margin):

        fx = self.getXVector()

        # Get area, position, height.
        fd_peaks = fdUtil.getPeaks(fx, threshold, margin)
        num_peaks = fd_peaks.shape[0]

        peaks = numpy.zeros((num_peaks, utilC.getNResultsPar()))

        peaks[:, utilC.getXWidthIndex()] = numpy.ones(num_peaks)
        peaks[:, utilC.getYWidthIndex()] = numpy.ones(num_peaks)

        peaks[:, utilC.getXCenterIndex()] = fd_peaks[:, 2]
        peaks[:, utilC.getYCenterIndex()] = fd_peaks[:, 1]

        # Calculate height.
        #
        # FIXME: Typically the starting value for the peak height will be
        #        under-estimated unless a large enough number of FISTA
        #        iterations is performed to completely de-convolve the image.
        #
        h_index = utilC.getHeightIndex()
        #peaks[:,h_index] = fd_peaks[:,0]
        for i in range(num_peaks):
            peaks[i, h_index] = fd_peaks[i, 0] * self.psf_heights[int(
                round(fd_peaks[i, 3]))]

        # Calculate z (0.0 - 1.0).
        peaks[:,
              utilC.getZCenterIndex()] = fd_peaks[:, 3] / (float(fx.shape[2]) -
                                                           1.0)

        # Background term calculation.
        bg_index = utilC.getBackgroundIndex()
        for i in range(num_peaks):
            ix = int(round(fd_peaks[i, 1]))
            iy = int(round(fd_peaks[i, 2]))
            peaks[i, bg_index] = self.background[ix, iy]

        return peaks
Пример #14
0
    def fitPeaks(self, peaks):

        # Adjust to z starting position.
        z_index = utilC.getZCenterIndex()
        peaks[:, z_index] = peaks[:, z_index] * float(
            self.mfitter.getSplineSize())

        if False:
            print("Before fitting")
            for i in range(5):
                print(" ", peaks[i, 0], peaks[i, 1], peaks[i, 3], peaks[i, 5],
                      peaks[i, 6], peaks[i, 7])
            print("")

        # Fit to update peak locations.
        fit_peaks = self.mfitter.doFit(peaks)
        fit_peaks = self.mfitter.getGoodPeaks(fit_peaks,
                                              min_height=0.9 * self.threshold)

        # Remove peaks that are too close to each other & refit.
        fit_peaks = utilC.removeClosePeaks(fit_peaks, self.sigma,
                                           self.fit_neighborhood)

        # Redo the fit for the remaining peaks.
        fit_peaks = self.mfitter.doFit(fit_peaks)
        fit_peaks = self.mfitter.getGoodPeaks(fit_peaks,
                                              min_height=0.9 * self.threshold)
        residual = self.mfitter.getResidual()

        if False:
            print("After fitting")
            for i in range(5):
                print(" ", fit_peaks[i, 0], fit_peaks[i, 1], fit_peaks[i, 3],
                      fit_peaks[i, 5], fit_peaks[i, 6], fit_peaks[i, 7])
            print("")

        return [fit_peaks, residual]
Пример #15
0
    fdecon.decon(parameters.getAttr("fista_iterations"),
                 parameters.getAttr("fista_lambda"),
                 verbose=True)

    # Save results.
    fx = fdecon.getXVector()
    print(numpy.min(fx), numpy.max(fx))
    decon_data = daxwriter.DaxWriter(sys.argv[3], fx.shape[0], fx.shape[1])
    for i in range(fx.shape[2]):
        decon_data.addFrame(fx[:, :, i])
    decon_data.close()

    # Find peaks in the decon data.
    peaks = fdecon.getPeaks(parameters.getAttr("threshold"), 5)

    zci = utilC.getZCenterIndex()
    z_min, z_max = fdecon.getZRange()
    peaks[:, zci] = 1.0e-3 * ((z_max - z_min) * peaks[:, zci] + z_min)

    i3_writer = writeinsight3.I3Writer(sys.argv[3][:-4] + "_flist.bin")
    i3_writer.addMultiFitMolecules(peaks, x_size, y_size, 1,
                                   parameters.getAttr("pixel_size"))
    i3_writer.close()

#
# The MIT License
#
# Copyright (c) 2016 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
Пример #16
0
    if z_offset is None:
        if (curf == 0):
            print("Using fit z locations.")
        zr = i3_data['z'][mask]
    else:
        if (curf == 0):
            print("Using z offset file.")
        zr = numpy.ones(xr.size) * z_offsets[curf]

    ht = i3_data['h'][mask]

    # Remove localizations that are too close to each other.
    in_peaks = numpy.zeros((xr.size, util_c.getNResultsPar()))
    in_peaks[:, util_c.getXCenterIndex()] = xr
    in_peaks[:, util_c.getYCenterIndex()] = yr
    in_peaks[:, util_c.getZCenterIndex()] = zr
    in_peaks[:, util_c.getHeightIndex()] = ht

    out_peaks = util_c.removeNeighbors(in_peaks, 2 * aoi_size)
    #out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

    print(curf, "peaks in", in_peaks.shape[0], ", peaks out",
          out_peaks.shape[0])

    # Use remaining localizations to calculate spline.
    image = dax_data.loadAFrame(curf).astype(numpy.float64)

    xr = out_peaks[:, util_c.getXCenterIndex()]
    yr = out_peaks[:, util_c.getYCenterIndex()]
    zr = out_peaks[:, util_c.getZCenterIndex()]
    ht = out_peaks[:, util_c.getHeightIndex()]
Пример #17
0
 def rescaleZ(self, peaks, zmin, zmax):
     z_index = utilC.getZCenterIndex()
     spline_range = zmax - zmin
     peaks[:,z_index] = peaks[:,z_index] * self.inv_zscale * spline_range + zmin
     return peaks
Пример #18
0
def measurePSF(movie_name, zfile_name, movie_mlist, psf_name, want2d = False, aoi_size = 12, z_range = 750.0, z_step = 50.0):
    """
    The actual z range is 2x z_range (i.e. from -z_range to z_range).
    """
    
    # Load dax file, z offset file and molecule list file.
    dax_data = datareader.inferReader(movie_name)
    z_offsets = None
    if os.path.exists(zfile_name):
        try:
            z_offsets = numpy.loadtxt(zfile_name, ndmin = 2)[:,1]
        except IndexError:
            z_offsets = None
            print("z offsets were not loaded.")
    i3_data = readinsight3.loadI3File(movie_mlist)

    if want2d:
        print("Measuring 2D PSF")
    else:
        print("Measuring 3D PSF")

    #
    # Go through the frames identifying good peaks and adding them
    # to the average psf. For 3D molecule z positions are rounded to 
    # the nearest 50nm.
    #
    z_mid = int(z_range/z_step)
    max_z = 2 * z_mid + 1

    average_psf = numpy.zeros((max_z,4*aoi_size,4*aoi_size))
    peaks_used = 0
    totals = numpy.zeros(max_z)
    [dax_x, dax_y, dax_l] = dax_data.filmSize()
    for curf in range(dax_l):

        # Select localizations in current frame & not near the edges.
        mask = (i3_data['fr'] == curf+1) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_x - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_y - aoi_size - 1))
        xr = i3_data['x'][mask]
        yr = i3_data['y'][mask]

        # Use the z offset file if it was specified, otherwise use localization z positions.
        if z_offsets is None:
            if (curf == 0):
                print("Using fit z locations.")
            zr = i3_data['z'][mask]
        else:
            if (curf == 0):
                print("Using z offset file.")
            zr = numpy.ones(xr.size) * z_offsets[curf]

        ht = i3_data['h'][mask]

        # Remove localizations that are too close to each other.
        in_peaks = numpy.zeros((xr.size,util_c.getNPeakPar()))
        in_peaks[:,util_c.getXCenterIndex()] = xr
        in_peaks[:,util_c.getYCenterIndex()] = yr
        in_peaks[:,util_c.getZCenterIndex()] = zr
        in_peaks[:,util_c.getHeightIndex()] = ht

        out_peaks = util_c.removeNeighbors(in_peaks, 2*aoi_size)
        #out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

        print(curf, "peaks in", in_peaks.shape[0], ", peaks out", out_peaks.shape[0])

        # Use remaining localizations to calculate spline.
        image = dax_data.loadAFrame(curf).astype(numpy.float64)

        xr = out_peaks[:,util_c.getXCenterIndex()]
        yr = out_peaks[:,util_c.getYCenterIndex()]
        zr = out_peaks[:,util_c.getZCenterIndex()]
        ht = out_peaks[:,util_c.getHeightIndex()]

        for i in range(xr.size):
            xf = xr[i]
            yf = yr[i]
            zf = zr[i]
            xi = int(xf)
            yi = int(yf)
            if want2d:
                zi = 0
            else:
                zi = int(round(zf/z_step) + z_mid)

            # check the z is in range
            if (zi > -1) and (zi < max_z):

                # get localization image
                mat = image[xi-aoi_size:xi+aoi_size,
                            yi-aoi_size:yi+aoi_size]

                # zoom in by 2x
                psf = scipy.ndimage.interpolation.zoom(mat, 2.0)

                # re-center image
                psf = scipy.ndimage.interpolation.shift(psf, (-2.0*(xf-xi), -2.0*(yf-yi)), mode='nearest')

                # add to average psf accumulator
                average_psf[zi,:,:] += psf
                totals[zi] += 1

    # Force PSF to be zero (on average) at the boundaries.
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i,0,:],
                                  average_psf[i,-1,:],
                                  average_psf[i,:,0],
                                  average_psf[i,:,-1]))
        average_psf[i,:,:] -= numpy.mean(edge)

    # Normalize the PSF.
    if want2d:
        max_z = 1

    for i in range(max_z):
        print(i, totals[i])
        if (totals[i] > 0.0):
            average_psf[i,:,:] = average_psf[i,:,:]/numpy.sum(numpy.abs(average_psf[i,:,:]))

    average_psf = average_psf/numpy.max(average_psf)

    # Save PSF (in image form).
    if True:
        import storm_analysis.sa_library.daxwriter as daxwriter
        dxw = daxwriter.DaxWriter(os.path.join(os.path.dirname(psf_name), "psf.dax"),
                                  average_psf.shape[1],
                                  average_psf.shape[2])
        for i in range(max_z):
            dxw.addFrame(1000.0 * average_psf[i,:,:] + 100)
        dxw.close()

    # Save PSF.
    if want2d:
        psf_dict = {"psf" : average_psf[0,:,:],
                    "type" : "2D"}

    else:
        cur_z = -z_range
        z_vals = []
        for i in range(max_z):
            z_vals.append(cur_z)
            cur_z += z_step

        psf_dict = {"psf" : average_psf,
                    "pixel_size" : 0.080, # 1/2 the camera pixel size in nm.
                    "type" : "3D",
                    "zmin" : -z_range,
                    "zmax" : z_range,
                    "zvals" : z_vals}

    pickle.dump(psf_dict, open(psf_name, 'wb'))
Пример #19
0
    def __init__(self, parameters):
        super(MPPeakFinder, self).__init__(parameters)

        self.atrans = [None]
        self.backgrounds = []
        self.bg_filter = None
        self.bg_filter_sigma = parameters.getAttr("bg_filter_sigma")
        self.check_mode = False
        self.height_rescale = []
        self.images = []
        self.mapping_filename = None
        self.mfilters = []
        self.mfilters_z = []
        self.mpu = None
        self.n_channels = 0
        self.s_to_psfs = []
        self.variances = []
        self.vfilters = []
        self.xt = []
        self.yt = []
        self.z_values = []

        # Print warning about check mode
        if self.check_mode:
            print("Warning: Running in check mode.")

        # Load the splines.
        for spline_attr in mpUtilC.getSplineAttrs(parameters):
            self.s_to_psfs.append(
                splineToPSF.loadSpline(parameters.getAttr(spline_attr)))
            self.n_channels += 1

        # Assert that all the splines are the same size.
        for i in range(1, len(self.s_to_psfs)):
            assert (self.s_to_psfs[0].getSize() == self.s_to_psfs[i].getSize())

        #
        # Update margin based on the spline size. Note the assumption
        # that all of the splines are the same size, or at least smaller
        # than the spline for plane 0.
        #
        old_margin = self.margin
        self.margin = int((self.s_to_psfs[0].getSize() + 1) / 4 + 2)

        # Load the plane to plane mapping data & create affine transform objects.
        mappings = {}
        if parameters.hasAttr("mapping"):
            if os.path.exists(parameters.getAttr("mapping")):
                self.mapping_filename = parameters.getAttr("mapping")
                with open(parameters.getAttr("mapping"), 'rb') as fp:
                    mappings = pickle.load(fp)

        # Use self.margin - 1, because we added 1 to the x,y coordinates when we saved them.
        for i in range(self.n_channels - 1):
            self.xt.append(
                mpUtilC.marginCorrect(mappings["0_" + str(i + 1) + "_x"],
                                      self.margin - 1))
            self.yt.append(
                mpUtilC.marginCorrect(mappings["0_" + str(i + 1) + "_y"],
                                      self.margin - 1))
            self.atrans.append(
                affineTransformC.AffineTransform(xt=self.xt[i], yt=self.yt[i]))

        #
        # Note the assumption that the splines for each plane all use
        # the same z scale / have the same z range.
        #
        self.mfilters_z = parameters.getAttr("z_value", [0.0])
        for zval in self.mfilters_z:
            self.z_values.append(self.s_to_psfs[0].getScaledZ(zval))

        if parameters.hasAttr("peak_locations"):

            # Correct for any difference in the margins.
            self.peak_locations[:, utilC.getXCenterIndex(
            )] += self.margin - old_margin
            self.peak_locations[:, utilC.getYCenterIndex(
            )] += self.margin - old_margin

            # Provide the "correct" starting z value.
            #
            # FIXME: Should also allow the user to specify the peak z location
            #        as any fixed value could be so far off for some of the
            #        localizations that the fitting will fail.
            #
            self.peak_locations[:, utilC.getZCenterIndex()] = self.z_values[0]
Пример #20
0
cubic_fit.initializeMultiFit.argtypes = [
    ndpointer(dtype=numpy.float64), ctypes.c_double, ctypes.c_int, ctypes.c_int
]

cubic_fit.newImage.argtypes = [ndpointer(dtype=numpy.float64)]

cubic_fit.newPeaks2D.argtypes = [ndpointer(dtype=numpy.float64), ctypes.c_int]

cubic_fit.newPeaks3D.argtypes = [ndpointer(dtype=numpy.float64), ctypes.c_int]

# Globals
default_tol = 1.0e-6
height_index = utilC.getHeightIndex()
n_results_par = utilC.getNResultsPar()
status_index = utilC.getStatusIndex()
z_index = utilC.getZCenterIndex()


#
# Functions
#
def fSpline2D(x, y):
    return cubic_fit.fSpline2D(x, y)


def fSpline3D(x, y, z):
    return cubic_fit.fSpline3D(x, y, z)


#
# Classes.
Пример #21
0
def measurePSF(movie_name,
               zfile_name,
               movie_mlist,
               psf_name,
               want2d=False,
               aoi_size=12,
               z_range=750.0,
               z_step=50.0):
    """
    The actual z range is 2x z_range (i.e. from -z_range to z_range).
    """

    # Load dax file, z offset file and molecule list file.
    dax_data = datareader.inferReader(movie_name)
    z_offsets = None
    if os.path.exists(zfile_name):
        try:
            z_offsets = numpy.loadtxt(zfile_name, ndmin=2)[:, 1]
        except IndexError:
            z_offsets = None
            print("z offsets were not loaded.")
    i3_data = readinsight3.loadI3File(movie_mlist)

    if want2d:
        print("Measuring 2D PSF")
    else:
        print("Measuring 3D PSF")

    #
    # Go through the frames identifying good peaks and adding them
    # to the average psf. For 3D molecule z positions are rounded to
    # the nearest 50nm.
    #
    z_mid = int(z_range / z_step)
    max_z = 2 * z_mid + 1

    average_psf = numpy.zeros((max_z, 4 * aoi_size, 4 * aoi_size))
    peaks_used = 0
    totals = numpy.zeros(max_z)
    [dax_x, dax_y, dax_l] = dax_data.filmSize()
    for curf in range(dax_l):

        # Select localizations in current frame & not near the edges.
        mask = (i3_data['fr'] == curf + 1) & (i3_data['x'] > aoi_size) & (
            i3_data['x'] <
            (dax_x - aoi_size - 1)) & (i3_data['y'] >
                                       aoi_size) & (i3_data['y'] <
                                                    (dax_y - aoi_size - 1))
        xr = i3_data['x'][mask]
        yr = i3_data['y'][mask]

        # Use the z offset file if it was specified, otherwise use localization z positions.
        if z_offsets is None:
            if (curf == 0):
                print("Using fit z locations.")
            zr = i3_data['z'][mask]
        else:
            if (curf == 0):
                print("Using z offset file.")
            zr = numpy.ones(xr.size) * z_offsets[curf]

        ht = i3_data['h'][mask]

        # Remove localizations that are too close to each other.
        in_peaks = numpy.zeros((xr.size, util_c.getNPeakPar()))
        in_peaks[:, util_c.getXCenterIndex()] = xr
        in_peaks[:, util_c.getYCenterIndex()] = yr
        in_peaks[:, util_c.getZCenterIndex()] = zr
        in_peaks[:, util_c.getHeightIndex()] = ht

        out_peaks = util_c.removeNeighbors(in_peaks, 2 * aoi_size)
        #out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

        print(curf, "peaks in", in_peaks.shape[0], ", peaks out",
              out_peaks.shape[0])

        # Use remaining localizations to calculate spline.
        image = dax_data.loadAFrame(curf).astype(numpy.float64)

        xr = out_peaks[:, util_c.getXCenterIndex()]
        yr = out_peaks[:, util_c.getYCenterIndex()]
        zr = out_peaks[:, util_c.getZCenterIndex()]
        ht = out_peaks[:, util_c.getHeightIndex()]

        for i in range(xr.size):
            xf = xr[i]
            yf = yr[i]
            zf = zr[i]
            xi = int(xf)
            yi = int(yf)
            if want2d:
                zi = 0
            else:
                zi = int(round(zf / z_step) + z_mid)

            # check the z is in range
            if (zi > -1) and (zi < max_z):

                # get localization image
                mat = image[xi - aoi_size:xi + aoi_size,
                            yi - aoi_size:yi + aoi_size]

                # zoom in by 2x
                psf = scipy.ndimage.interpolation.zoom(mat, 2.0)

                # re-center image
                psf = scipy.ndimage.interpolation.shift(
                    psf, (-2.0 * (xf - xi), -2.0 * (yf - yi)), mode='nearest')

                # add to average psf accumulator
                average_psf[zi, :, :] += psf
                totals[zi] += 1

    # Force PSF to be zero (on average) at the boundaries.
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i, 0, :], average_psf[i, -1, :],
                                  average_psf[i, :, 0], average_psf[i, :, -1]))
        average_psf[i, :, :] -= numpy.mean(edge)

    # Normalize the PSF.
    if want2d:
        max_z = 1

    for i in range(max_z):
        print(i, totals[i])
        if (totals[i] > 0.0):
            average_psf[i, :, :] = average_psf[i, :, :] / numpy.sum(
                numpy.abs(average_psf[i, :, :]))

    average_psf = average_psf / numpy.max(average_psf)

    # Save PSF (in image form).
    if True:
        import storm_analysis.sa_library.daxwriter as daxwriter
        dxw = daxwriter.DaxWriter(
            os.path.join(os.path.dirname(psf_name), "psf.dax"),
            average_psf.shape[1], average_psf.shape[2])
        for i in range(max_z):
            dxw.addFrame(1000.0 * average_psf[i, :, :] + 100)
        dxw.close()

    # Save PSF.
    if want2d:
        psf_dict = {"psf": average_psf[0, :, :], "type": "2D"}

    else:
        cur_z = -z_range
        z_vals = []
        for i in range(max_z):
            z_vals.append(cur_z)
            cur_z += z_step

        psf_dict = {
            "psf": average_psf,
            "pixel_size": 0.080,  # 1/2 the camera pixel size in nm.
            "type": "3D",
            "zmin": -z_range,
            "zmax": z_range,
            "zvals": z_vals
        }

    with open(psf_name, 'wb') as fp:
        pickle.dump(psf_dict, fp)