コード例 #1
0
    def getGoodPeaks(self, peaks, min_height, min_width):
        """
        Create a new list from peaks containing only those peaks that meet 
        the specified criteria for minimum peak height and width.
        """
        if(peaks.shape[0]>0):
            min_width = 0.5 * min_width

            status_index = utilC.getStatusIndex()
            height_index = utilC.getHeightIndex()
            xwidth_index = utilC.getXWidthIndex()
            ywidth_index = utilC.getYWidthIndex()

            if self.verbose:
                tmp = numpy.ones(peaks.shape[0])                
                print("getGoodPeaks")
                for i in range(peaks.shape[0]):
                    print(i, peaks[i,0], peaks[i,1], peaks[i,3], peaks[i,2], peaks[i,4], peaks[i,7])
                print("Total peaks:", numpy.sum(tmp))
                print("  fit error:", numpy.sum(tmp[(peaks[:,status_index] != 2.0)]))
                print("  min height:", numpy.sum(tmp[(peaks[:,height_index] > min_height)]))
                print("  min width:", numpy.sum(tmp[(peaks[:,xwidth_index] > min_width) & (peaks[:,ywidth_index] > min_width)]))
                print("")
            mask = (peaks[:,status_index] != 2.0) & (peaks[:,height_index] > min_height) & (peaks[:,xwidth_index] > min_width) & (peaks[:,ywidth_index] > min_width)
            return peaks[mask,:]
        else:
            return peaks
コード例 #2
0
    def getGoodPeaks(self, peaks, threshold):
        #
        # FIXME: We'd like to have a minimum height threshold, but the
        #        threshold parameter is a relative value not an absolute.
        #        For now we are just rejecting ERROR peaks.
        #
        if (peaks.size > 0):
            #
            # In the C library, the peaks that represent a single object
            # in multiple channels will all have the same status index
            # and height, so we can filter here with some confidence that
            # we'll keep or eliminate all the peaks in a particular group
            # at the same time. We attempt to enforce this with an assertion
            # statement, but it is an imperfect test as we could eliminate
            # 1 peak from each of 4 groups and still pass, even though this
            # would completely mess up the analysis.
            #
            status_index = utilC.getStatusIndex()
            height_index = utilC.getHeightIndex()

            #mask = (peaks[:,status_index] != 2.0) & (peaks[:,height_index] > min_height)
            mask = (peaks[:, status_index] != 2.0)
            if self.verbose:
                print(" ", numpy.sum(mask), "were good out of", peaks.shape[0])

            #
            # For debugging.
            #
            if False:
                xw_index = utilC.getXWidthIndex()
                yw_index = utilC.getYWidthIndex()
                for i in range(peaks.shape[0]):
                    if (peaks[i, xw_index] != peaks[i, yw_index]):
                        print("bad peak width detected", i, peaks[i, xw_index],
                              peaks[i, yw_index])

            #
            # Debugging check that the peak status markings are actually
            # in sync.
            #
            if True:
                n_peaks = int(peaks.shape[0] / self.n_channels)
                not_bad = True
                for i in range(n_peaks):
                    if (mask[i] != mask[i + n_peaks]):
                        print("Problem detected with peak", i)
                        print("  ", peaks[i, :])
                        print("  ", peaks[i + n_peaks, :])
                        not_bad = False
                assert not_bad

            masked_peaks = peaks[mask, :]

            # The number of peaks should always be a multiple of the number of channels.
            assert ((masked_peaks.shape[0] % self.n_channels) == 0)

            return masked_peaks

        else:
            return peaks
コード例 #3
0
def convertToMultiFit(i3data, x_size, y_size, frame, nm_per_pixel, inverted=False):
    """
    Create a 3D-DAOSTORM, sCMOS or Spliner analysis compatible peak array from I3 data.

    Notes:
      (1) This uses the non-drift corrected positions.
      (2) This sets the initial fitting error to zero and the status to RUNNING.
    """
    i3data = maskData(i3data, (i3data['fr'] == frame))

    peaks = numpy.zeros((i3data.size, utilC.getNPeakPar()))
    
    peaks[:,utilC.getBackgroundIndex()] = i3data['bg']
    peaks[:,utilC.getHeightIndex()] = i3data['h']
    peaks[:,utilC.getZCenterIndex()] = i3data['z'] * 0.001

    if inverted:
        peaks[:,utilC.getXCenterIndex()] = y_size - i3data['x']
        peaks[:,utilC.getYCenterIndex()] = x_size - i3data['y']
        ax = i3data['ax']
        ww = i3data['w']
        peaks[:,utilC.getYWidthIndex()] = 0.5*numpy.sqrt(ww*ww/ax)/nm_per_pixel
        peaks[:,utilC.getXWidthIndex()] = 0.5*numpy.sqrt(ww*ww*ax)/nm_per_pixel
    else:
        peaks[:,utilC.getYCenterIndex()] = i3data['x'] - 1
        peaks[:,utilC.getXCenterIndex()] = i3data['y'] - 1
        ax = i3data['ax']
        ww = i3data['w']
        peaks[:,utilC.getXWidthIndex()] = 0.5*numpy.sqrt(ww*ww/ax)/nm_per_pixel
        peaks[:,utilC.getYWidthIndex()] = 0.5*numpy.sqrt(ww*ww*ax)/nm_per_pixel

    return peaks
コード例 #4
0
    def getGoodPeaks(self, peaks, min_height, min_width):
        """
        Create a new list from peaks containing only those peaks that meet 
        the specified criteria for minimum peak height and width.
        """
        if(peaks.shape[0]>0):
            min_width = 0.5 * min_width

            status_index = utilC.getStatusIndex()
            height_index = utilC.getHeightIndex()
            xwidth_index = utilC.getXWidthIndex()
            ywidth_index = utilC.getYWidthIndex()

            if self.verbose:
                tmp = numpy.ones(peaks.shape[0])                
                print("getGoodPeaks")
                for i in range(peaks.shape[0]):
                    print(i, peaks[i,0], peaks[i,1], peaks[i,3], peaks[i,2], peaks[i,4])
                print("Total peaks:", numpy.sum(tmp))
                print("  fit error:", numpy.sum(tmp[(peaks[:,status_index] != 2.0)]))
                print("  min height:", numpy.sum(tmp[(peaks[:,height_index] > min_height)]))
                print("  min width:", numpy.sum(tmp[(peaks[:,xwidth_index] > min_width) & (peaks[:,ywidth_index] > min_width)]))
                print("")
            mask = (peaks[:,7] != status_index) & (peaks[:,height_index] > min_height) & (peaks[:,xwidth_index] > min_width) & (peaks[:,ywidth_index] > min_width)
            return peaks[mask,:]
        else:
            return peaks
コード例 #5
0
    def __init__(self, parameters):

        # Initialized from parameters.
        self.find_max_radius = parameters.getAttr("find_max_radius", 5)     # Radius (in pixels) over which the maxima is maximal.
        self.iterations = parameters.getAttr("iterations")                  # Maximum number of cycles of peak finding, fitting and subtraction to perform.
        self.sigma = parameters.getAttr("sigma")                            # Peak sigma (in pixels).
        self.threshold = parameters.getAttr("threshold")                    # Peak minimum threshold (height, in camera units).
        self.z_value = parameters.getAttr("z_value", 0.0)                   # The starting z value to use for peak fitting.

        # Other member variables.
        self.background = None                                              # Current estimate of the image background.
        self.image = None                                                   # The original image.
        self.margin = PeakFinderFitter.margin                               # Size of the unanalyzed "edge" around the image.
        self.neighborhood = PeakFinder.unconverged_dist * self.sigma        # Radius for marking neighbors as unconverged.
        self.new_peak_radius = PeakFinder.new_peak_dist                     # Minimum allowed distance between new peaks and current peaks.
        self.parameters = parameters                                        # Keep access to the parameters object.
        self.peak_locations = None                                          # Initial peak locations, as explained below.
        self.peak_mask = None                                               # Mask for limiting peak identification to a particular AOI.
        self.taken = None                                                   # Spots in the image where a peak has already been added.

        
        #
        # This is for is you already know where your want fitting to happen, as
        # for example in a bead calibration movie and you just want to use the
        # approximate locations as inputs for fitting.
        #
        # peak_locations is a text file with the peak x, y, height and background
        # values as white spaced columns (x and y positions are in pixels as
        # determined using visualizer).
        #
        # 1.0 2.0 1000.0 100.0
        # 10.0 5.0 2000.0 200.0
        # ...
        #
        if parameters.hasAttr("peak_locations"):
            print("Using peak starting locations specified in", parameters.getAttr("peak_locations"))

            # Only do one cycle of peak finding as we'll always return the same locations.
            if (self.iterations != 1):
                print("WARNING: setting number of iterations to 1!")
                self.iterations = 1

            # Load peak x,y locations.
            peak_locs = numpy.loadtxt(parameters.getAttr("peak_locations"), ndmin = 2)
            print(peak_locs.shape)

            # Create peak array.
            self.peak_locations = numpy.zeros((peak_locs.shape[0],
                                               utilC.getNPeakPar()))
            self.peak_locations[:,utilC.getXCenterIndex()] = peak_locs[:,1] + self.margin
            self.peak_locations[:,utilC.getYCenterIndex()] = peak_locs[:,0] + self.margin
            self.peak_locations[:,utilC.getHeightIndex()] = peak_locs[:,2]
            self.peak_locations[:,utilC.getBackgroundIndex()] = peak_locs[:,3]

            self.peak_locations[:,utilC.getXWidthIndex()] = numpy.ones(peak_locs.shape[0]) * self.sigma
            self.peak_locations[:,utilC.getYWidthIndex()] = numpy.ones(peak_locs.shape[0]) * self.sigma
コード例 #6
0
    def getGoodPeaks(self, peaks, min_height = 0.0):
        if (peaks.size > 0):
            status_index = utilC.getStatusIndex()
            height_index = utilC.getHeightIndex()

            mask = (peaks[:,status_index] != 2.0) & (peaks[:,height_index] > min_height)
            if self.verbose:
                print(" ", numpy.sum(mask), "were good out of", peaks.shape[0])
            return peaks[mask,:]
        else:
            return peaks
コード例 #7
0
    def getGoodPeaks(self, peaks, min_height=0.0):
        if (peaks.size > 0):
            status_index = utilC.getStatusIndex()
            height_index = utilC.getHeightIndex()

            mask = (peaks[:, status_index] != 2.0) & (peaks[:, height_index] >
                                                      min_height)
            if self.verbose:
                print(" ", numpy.sum(mask), "were good out of", peaks.shape[0])
            return peaks[mask, :]
        else:
            return peaks
コード例 #8
0
    def getPeaks(self, threshold, margin):
        """
        Extract peaks from the deconvolved image and create
        an array that can be used by a peak fitter.
        
        FIXME: Need to compensate for up-sampling parameter in x,y.
        """

        fx = self.getXVector()

        # Get area, position, height.
        fd_peaks = fdUtil.getPeaks(fx, threshold, margin)
        num_peaks = fd_peaks.shape[0]

        peaks = numpy.zeros((num_peaks, utilC.getNPeakPar()))

        peaks[:, utilC.getXWidthIndex()] = numpy.ones(num_peaks)
        peaks[:, utilC.getYWidthIndex()] = numpy.ones(num_peaks)

        peaks[:, utilC.getXCenterIndex()] = fd_peaks[:, 2]
        peaks[:, utilC.getYCenterIndex()] = fd_peaks[:, 1]

        # Calculate height.
        #
        # FIXME: Typically the starting value for the peak height will be
        #        under-estimated unless a large enough number of FISTA
        #        iterations is performed to completely de-convolve the image.
        #
        h_index = utilC.getHeightIndex()
        #peaks[:,h_index] = fd_peaks[:,0]
        for i in range(num_peaks):
            peaks[i, h_index] = fd_peaks[i, 0] * self.psf_heights[int(
                round(fd_peaks[i, 3]))]

        # Calculate z (0.0 - 1.0).
        if (fx.shape[2] > 1):
            peaks[:, utilC.getZCenterIndex()] = fd_peaks[:, 3] / (
                float(fx.shape[2]) - 1.0)

        # Background term calculation.
        bg_index = utilC.getBackgroundIndex()
        for i in range(num_peaks):
            ix = int(round(fd_peaks[i, 1]))
            iy = int(round(fd_peaks[i, 2]))
            peaks[i, bg_index] = self.background[ix, iy]

        return peaks
コード例 #9
0
def analyze(base_name, mlist_name, settings_name):
    parameters = params.ParametersMultiplane().initFromFile(settings_name)

    # Set to only analyze 1 frame, 1 iteration of peak finding.
    parameters.setAttr("max_frame", "int", 1)
    parameters.setAttr("iterations", "int", 1)

    # Analyze one frame.
    finder = PFPeakFinder(parameters)
    fitter = PFPeakFitter(parameters)
    finder_fitter = findPeaksStd.MPFinderFitter(parameters, finder, fitter)
    reader = findPeaksStd.MPMovieReader(base_name=base_name,
                                        parameters=parameters)
    stdAnalysis.standardAnalysis(finder_fitter, reader, mlist_name, parameters)

    # Evaluate found vs fit parameters.
    hi = utilC.getHeightIndex()
    xi = utilC.getXCenterIndex()
    yi = utilC.getYCenterIndex()

    # First identify peak pairs.
    fi_x = fitted_peaks[:, xi]
    fi_y = fitted_peaks[:, yi]
    fo_x = found_peaks[:, xi]
    fo_y = found_peaks[:, yi]

    dd = utilC.peakToPeakDist(fo_x, fo_y, fi_x, fi_y)
    di = utilC.peakToPeakIndex(fo_x, fo_y, fi_x, fi_y)

    print(numpy.mean(dd), fi_x.size, fo_x.size)

    fo_h = []
    fi_h = []
    for i in range(dd.size):
        if (dd[i] < 2.0):
            fo_h.append(found_peaks[i, hi])
            fi_h.append(fitted_peaks[di[i], hi])

    fi_h = numpy.array(fi_h)
    fo_h = numpy.array(fo_h)

    print(numpy.mean(fi_h), fi_h.size)
    print(numpy.mean(fo_h), fo_h.size)
    print(numpy.mean(fi_h) / numpy.mean(fo_h))
コード例 #10
0
    def getPeaks(self, threshold, margin):

        fx = self.getXVector()

        # Get area, position, height.
        fd_peaks = fdUtil.getPeaks(fx, threshold, margin)
        num_peaks = fd_peaks.shape[0]

        peaks = numpy.zeros((num_peaks, utilC.getNResultsPar()))

        peaks[:, utilC.getXWidthIndex()] = numpy.ones(num_peaks)
        peaks[:, utilC.getYWidthIndex()] = numpy.ones(num_peaks)

        peaks[:, utilC.getXCenterIndex()] = fd_peaks[:, 2]
        peaks[:, utilC.getYCenterIndex()] = fd_peaks[:, 1]

        # Calculate height.
        #
        # FIXME: Typically the starting value for the peak height will be
        #        under-estimated unless a large enough number of FISTA
        #        iterations is performed to completely de-convolve the image.
        #
        h_index = utilC.getHeightIndex()
        #peaks[:,h_index] = fd_peaks[:,0]
        for i in range(num_peaks):
            peaks[i, h_index] = fd_peaks[i, 0] * self.psf_heights[int(
                round(fd_peaks[i, 3]))]

        # Calculate z (0.0 - 1.0).
        peaks[:,
              utilC.getZCenterIndex()] = fd_peaks[:, 3] / (float(fx.shape[2]) -
                                                           1.0)

        # Background term calculation.
        bg_index = utilC.getBackgroundIndex()
        for i in range(num_peaks):
            ix = int(round(fd_peaks[i, 1]))
            iy = int(round(fd_peaks[i, 2]))
            peaks[i, bg_index] = self.background[ix, iy]

        return peaks
コード例 #11
0
def convertToMultiFit(i3data,
                      x_size,
                      y_size,
                      frame,
                      nm_per_pixel,
                      inverted=False):
    """
    Create a 3D-DAOSTORM, sCMOS or Spliner analysis compatible peak array from I3 data.

    Notes:
      (1) This uses the non-drift corrected positions.
      (2) This sets the initial fitting error to zero and the status to RUNNING.
    """
    i3data = maskData(i3data, (i3data['fr'] == frame))

    peaks = numpy.zeros((i3data.size, utilC.getNPeakPar()))

    peaks[:, utilC.getBackgroundIndex()] = i3data['bg']
    peaks[:, utilC.getHeightIndex()] = i3data['h']
    peaks[:, utilC.getZCenterIndex()] = i3data['z'] * 0.001

    if inverted:
        peaks[:, utilC.getXCenterIndex()] = y_size - i3data['x']
        peaks[:, utilC.getYCenterIndex()] = x_size - i3data['y']
        ax = i3data['ax']
        ww = i3data['w']
        peaks[:, utilC.getYWidthIndex()] = 0.5 * numpy.sqrt(
            ww * ww / ax) / nm_per_pixel
        peaks[:, utilC.getXWidthIndex()] = 0.5 * numpy.sqrt(
            ww * ww * ax) / nm_per_pixel
    else:
        peaks[:, utilC.getYCenterIndex()] = i3data['x'] - 1
        peaks[:, utilC.getXCenterIndex()] = i3data['y'] - 1
        ax = i3data['ax']
        ww = i3data['w']
        peaks[:, utilC.getXWidthIndex()] = 0.5 * numpy.sqrt(
            ww * ww / ax) / nm_per_pixel
        peaks[:, utilC.getYWidthIndex()] = 0.5 * numpy.sqrt(
            ww * ww * ax) / nm_per_pixel

    return peaks
コード例 #12
0
    def peakFinder(self, no_bg_image):

        all_new_peaks = None

        save_convolution = False
        if save_convolution:
            print("making image")
            tif = tifffile.TiffWriter("image.tif")
            tif.save(self.image.astype(numpy.uint16) + 100)
            tif.save(no_bg_image.astype(numpy.uint16) + 100)
            tif.save(self.background.astype(numpy.uint16) + 100)
            
        #
        # Find peaks in image convolved with the PSF at different z values.
        #
        for i in range(len(self.mfilter)):

            height_rescale = self.height_rescale[i]
            mfilter = self.mfilter[i]
            taken = self.taken[i]
            z_value = self.z_value[i]

            # Smooth image with gaussian filter.
            smooth_image = mfilter.convolve(no_bg_image)

            if save_convolution:
                tif.save(smooth_image.astype(numpy.uint16) + 100)

            # Mask the image so that peaks are only found in the AOI.
            masked_image = smooth_image * self.peak_mask
        
            # Identify local maxima in the masked image.
            [new_peaks, taken] = utilC.findLocalMaxima(masked_image,
                                                       taken,
                                                       self.cur_threshold,
                                                       self.find_max_radius,
                                                       self.margin)

            #
            # Fill in initial values for peak height, background and sigma.
            #
            # FIXME: We just add the smoothed image and the background together
            #        as a hack so that we can still use the initializePeaks()
            #        function.
            #
            new_peaks = utilC.initializePeaks(new_peaks,                      # The new peaks.
                                              smooth_image + self.background, # Smooth image + background.
                                              self.background,                # The current estimate of the background.
                                              self.sigma,                     # The starting sigma value.
                                              z_value)                        # The starting z value.

            # Correct initial peak heights.
            h_index = utilC.getHeightIndex()
            new_peaks[:,h_index] = new_peaks[:,h_index] * height_rescale
            
            if all_new_peaks is None:
                all_new_peaks = new_peaks
            else:
                all_new_peaks = numpy.append(all_new_peaks, new_peaks, axis = 0)

        if save_convolution:
            tif.close()
                
        #
        # Remove the dimmer of two peaks with similar x,y values but different z values.
        #
        if (len(self.mfilter) > 1):

            if False:
                print("before", all_new_peaks.shape)
                for i in range(all_new_peaks.shape[0]):
                    print(all_new_peaks[i,:])
                print("")
            
            all_new_peaks = utilC.removeClosePeaks(all_new_peaks,                                               
                                                   self.find_max_radius,
                                                   self.find_max_radius)

            if False:
                print("after", all_new_peaks.shape)
                for i in range(all_new_peaks.shape[0]):
                    print(all_new_peaks[i,:])
                print("")
                
        return all_new_peaks
コード例 #13
0
    def peakFinder(self, no_bg_image):

        all_new_peaks = None

        save_convolution = False
        if save_convolution:
            print("making image")
            tif = tifffile.TiffWriter("image.tif")
            tif.save(self.image.astype(numpy.uint16) + 100)
            tif.save(no_bg_image.astype(numpy.uint16) + 100)
            tif.save(self.background.astype(numpy.uint16) + 100)
            
        #
        # Find peaks in image convolved with the PSF at different z values.
        #
        for i in range(len(self.mfilter)):

            height_rescale = self.height_rescale[i]
            mfilter = self.mfilter[i]
            taken = self.taken[i]
            z_value = self.z_value[i]

            # Smooth image with gaussian filter.
            smooth_image = mfilter.convolve(no_bg_image)

            if save_convolution:
                tif.save(smooth_image.astype(numpy.uint16) + 100)

            # Mask the image so that peaks are only found in the AOI.
            masked_image = smooth_image * self.peak_mask
        
            # Identify local maxima in the masked image.
            [new_peaks, taken] = utilC.findLocalMaxima(masked_image,
                                                       taken,
                                                       self.cur_threshold,
                                                       self.find_max_radius,
                                                       self.margin)

            #
            # Fill in initial values for peak height, background and sigma.
            #
            # FIXME: We just add the smoothed image and the background together
            #        as a hack so that we can still use the initializePeaks()
            #        function.
            #
            new_peaks = utilC.initializePeaks(new_peaks,                      # The new peaks.
                                              smooth_image + self.background, # Smooth image + background.
                                              self.background,                # The current estimate of the background.
                                              self.sigma,                     # The starting sigma value.
                                              z_value)                        # The starting z value.

            # Correct initial peak heights.
            h_index = utilC.getHeightIndex()
            new_peaks[:,h_index] = new_peaks[:,h_index] * height_rescale
            
            if all_new_peaks is None:
                all_new_peaks = new_peaks
            else:
                all_new_peaks = numpy.append(all_new_peaks, new_peaks, axis = 0)

        if save_convolution:
            tif.close()
                
        #
        # Remove the dimmer of two peaks with similar x,y values but different z values.
        #
        if (len(self.mfilter) > 1):

            if False:
                print("before", all_new_peaks.shape)
                for i in range(all_new_peaks.shape[0]):
                    print(all_new_peaks[i,:])
                print("")
            
            all_new_peaks = utilC.removeClosePeaks(all_new_peaks,                                               
                                                   self.find_max_radius,
                                                   self.find_max_radius)

            if False:
                print("after", all_new_peaks.shape)
                for i in range(all_new_peaks.shape[0]):
                    print(all_new_peaks[i,:])
                print("")
                
        return all_new_peaks
コード例 #14
0
def measurePSF(movie_name, zfile_name, movie_mlist, psf_name, want2d = False, aoi_size = 12, z_range = 750.0, z_step = 50.0):
    """
    The actual z range is 2x z_range (i.e. from -z_range to z_range).
    """
    
    # Load dax file, z offset file and molecule list file.
    dax_data = datareader.inferReader(movie_name)
    z_offsets = None
    if os.path.exists(zfile_name):
        try:
            z_offsets = numpy.loadtxt(zfile_name, ndmin = 2)[:,1]
        except IndexError:
            z_offsets = None
            print("z offsets were not loaded.")
    i3_data = readinsight3.loadI3File(movie_mlist)

    if want2d:
        print("Measuring 2D PSF")
    else:
        print("Measuring 3D PSF")

    #
    # Go through the frames identifying good peaks and adding them
    # to the average psf. For 3D molecule z positions are rounded to 
    # the nearest 50nm.
    #
    z_mid = int(z_range/z_step)
    max_z = 2 * z_mid + 1

    average_psf = numpy.zeros((max_z,4*aoi_size,4*aoi_size))
    peaks_used = 0
    totals = numpy.zeros(max_z)
    [dax_x, dax_y, dax_l] = dax_data.filmSize()
    for curf in range(dax_l):

        # Select localizations in current frame & not near the edges.
        mask = (i3_data['fr'] == curf+1) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_x - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_y - aoi_size - 1))
        xr = i3_data['x'][mask]
        yr = i3_data['y'][mask]

        # Use the z offset file if it was specified, otherwise use localization z positions.
        if z_offsets is None:
            if (curf == 0):
                print("Using fit z locations.")
            zr = i3_data['z'][mask]
        else:
            if (curf == 0):
                print("Using z offset file.")
            zr = numpy.ones(xr.size) * z_offsets[curf]

        ht = i3_data['h'][mask]

        # Remove localizations that are too close to each other.
        in_peaks = numpy.zeros((xr.size,util_c.getNPeakPar()))
        in_peaks[:,util_c.getXCenterIndex()] = xr
        in_peaks[:,util_c.getYCenterIndex()] = yr
        in_peaks[:,util_c.getZCenterIndex()] = zr
        in_peaks[:,util_c.getHeightIndex()] = ht

        out_peaks = util_c.removeNeighbors(in_peaks, 2*aoi_size)
        #out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

        print(curf, "peaks in", in_peaks.shape[0], ", peaks out", out_peaks.shape[0])

        # Use remaining localizations to calculate spline.
        image = dax_data.loadAFrame(curf).astype(numpy.float64)

        xr = out_peaks[:,util_c.getXCenterIndex()]
        yr = out_peaks[:,util_c.getYCenterIndex()]
        zr = out_peaks[:,util_c.getZCenterIndex()]
        ht = out_peaks[:,util_c.getHeightIndex()]

        for i in range(xr.size):
            xf = xr[i]
            yf = yr[i]
            zf = zr[i]
            xi = int(xf)
            yi = int(yf)
            if want2d:
                zi = 0
            else:
                zi = int(round(zf/z_step) + z_mid)

            # check the z is in range
            if (zi > -1) and (zi < max_z):

                # get localization image
                mat = image[xi-aoi_size:xi+aoi_size,
                            yi-aoi_size:yi+aoi_size]

                # zoom in by 2x
                psf = scipy.ndimage.interpolation.zoom(mat, 2.0)

                # re-center image
                psf = scipy.ndimage.interpolation.shift(psf, (-2.0*(xf-xi), -2.0*(yf-yi)), mode='nearest')

                # add to average psf accumulator
                average_psf[zi,:,:] += psf
                totals[zi] += 1

    # Force PSF to be zero (on average) at the boundaries.
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i,0,:],
                                  average_psf[i,-1,:],
                                  average_psf[i,:,0],
                                  average_psf[i,:,-1]))
        average_psf[i,:,:] -= numpy.mean(edge)

    # Normalize the PSF.
    if want2d:
        max_z = 1

    for i in range(max_z):
        print(i, totals[i])
        if (totals[i] > 0.0):
            average_psf[i,:,:] = average_psf[i,:,:]/numpy.sum(numpy.abs(average_psf[i,:,:]))

    average_psf = average_psf/numpy.max(average_psf)

    # Save PSF (in image form).
    if True:
        import storm_analysis.sa_library.daxwriter as daxwriter
        dxw = daxwriter.DaxWriter(os.path.join(os.path.dirname(psf_name), "psf.dax"),
                                  average_psf.shape[1],
                                  average_psf.shape[2])
        for i in range(max_z):
            dxw.addFrame(1000.0 * average_psf[i,:,:] + 100)
        dxw.close()

    # Save PSF.
    if want2d:
        psf_dict = {"psf" : average_psf[0,:,:],
                    "type" : "2D"}

    else:
        cur_z = -z_range
        z_vals = []
        for i in range(max_z):
            z_vals.append(cur_z)
            cur_z += z_step

        psf_dict = {"psf" : average_psf,
                    "pixel_size" : 0.080, # 1/2 the camera pixel size in nm.
                    "type" : "3D",
                    "zmin" : -z_range,
                    "zmax" : z_range,
                    "zvals" : z_vals}

    pickle.dump(psf_dict, open(psf_name, 'wb'))
コード例 #15
0
ファイル: cubic_fit_c.py プロジェクト: hadim/storm-analysis
    ndpointer(dtype=numpy.float64), ctypes.c_int, ctypes.c_int, ctypes.c_int
]

cubic_fit.initializeMultiFit.argtypes = [
    ndpointer(dtype=numpy.float64), ctypes.c_double, ctypes.c_int, ctypes.c_int
]

cubic_fit.newImage.argtypes = [ndpointer(dtype=numpy.float64)]

cubic_fit.newPeaks2D.argtypes = [ndpointer(dtype=numpy.float64), ctypes.c_int]

cubic_fit.newPeaks3D.argtypes = [ndpointer(dtype=numpy.float64), ctypes.c_int]

# Globals
default_tol = 1.0e-6
height_index = utilC.getHeightIndex()
n_results_par = utilC.getNResultsPar()
status_index = utilC.getStatusIndex()
z_index = utilC.getZCenterIndex()


#
# Functions
#
def fSpline2D(x, y):
    return cubic_fit.fSpline2D(x, y)


def fSpline3D(x, y, z):
    return cubic_fit.fSpline3D(x, y, z)
コード例 #16
0
    def __init__(self, parameters):

        # Member variables.
        self.background = None  # Current estimate of the image background.
        self.image = None  # The original image.
        self.iterations = parameters.iterations  # Maximum number of cycles of peak finding, fitting and subtraction to perform.
        self.margin = PeakFinderFitter.margin  # Size of the unanalyzed "edge" around the image.
        self.neighborhood = PeakFinder.unconverged_dist * parameters.sigma  # Radius for marking neighbors as unconverged.
        self.new_peak_radius = PeakFinder.new_peak_dist  # Minimum allowed distance between new peaks and current peaks.
        self.parameters = parameters  # Keep access to the parameters object.
        self.peak_locations = None  # Initial peak locations, as explained below.
        self.peak_mask = None  # Mask for limiting peak identification to a particular AOI.
        self.sigma = parameters.sigma  # Peak sigma (in pixels).
        self.taken = None  # Spots in the image where a peak has already been added.
        self.threshold = parameters.threshold  # Peak minimum threshold (height, in camera units).
        self.z_value = 0.0  # The starting z value to use for peak fitting.

        # Radius (in pixels) over which the maxima is maximal.
        if hasattr(parameters, "find_max_radius"):
            self.find_max_radius = float(parameters.find_max_radius)
        else:
            self.find_max_radius = 5

        #
        # This is for is you already know where your want fitting to happen, as
        # for example in a bead calibration movie and you just want to use the
        # approximate locations as inputs for fitting.
        #
        # peak_locations is a text file with the peak x, y, height and background
        # values as white spaced columns (x and y positions are in pixels as
        # determined using visualizer).
        #
        # 1.0 2.0 1000.0 100.0
        # 10.0 5.0 2000.0 200.0
        # ...
        #
        if hasattr(parameters, "peak_locations"):
            print("Using peak starting locations specified in",
                  parameters.peak_locations)

            # Only do one cycle of peak finding as we'll always return the same locations.
            if (self.iterations != 1):
                print("WARNING: setting number of iterations to 1!")
                self.iterations = 1

            # Load peak x,y locations.
            peak_locs = numpy.loadtxt(parameters.peak_locations, ndmin=2)
            print(peak_locs.shape)

            # Create peak array.
            self.peak_locations = numpy.zeros(
                (peak_locs.shape[0], util_c.getNResultsPar()))
            self.peak_locations[:, util_c.getXCenterIndex(
            )] = peak_locs[:, 1] + self.margin
            self.peak_locations[:, util_c.getYCenterIndex(
            )] = peak_locs[:, 0] + self.margin
            self.peak_locations[:, util_c.getHeightIndex()] = peak_locs[:, 2]
            self.peak_locations[:, util_c.getBackgroundIndex()] = peak_locs[:,
                                                                            3]

            self.peak_locations[:, util_c.getXWidthIndex()] = numpy.ones(
                peak_locs.shape[0]) * self.sigma
            self.peak_locations[:, util_c.getYWidthIndex()] = numpy.ones(
                peak_locs.shape[0]) * self.sigma
コード例 #17
0
def measurePSF(movie_name,
               zfile_name,
               movie_mlist,
               psf_name,
               want2d=False,
               aoi_size=12,
               z_range=750.0,
               z_step=50.0):
    """
    The actual z range is 2x z_range (i.e. from -z_range to z_range).
    """

    # Load dax file, z offset file and molecule list file.
    dax_data = datareader.inferReader(movie_name)
    z_offsets = None
    if os.path.exists(zfile_name):
        try:
            z_offsets = numpy.loadtxt(zfile_name, ndmin=2)[:, 1]
        except IndexError:
            z_offsets = None
            print("z offsets were not loaded.")
    i3_data = readinsight3.loadI3File(movie_mlist)

    if want2d:
        print("Measuring 2D PSF")
    else:
        print("Measuring 3D PSF")

    #
    # Go through the frames identifying good peaks and adding them
    # to the average psf. For 3D molecule z positions are rounded to
    # the nearest 50nm.
    #
    z_mid = int(z_range / z_step)
    max_z = 2 * z_mid + 1

    average_psf = numpy.zeros((max_z, 4 * aoi_size, 4 * aoi_size))
    peaks_used = 0
    totals = numpy.zeros(max_z)
    [dax_x, dax_y, dax_l] = dax_data.filmSize()
    for curf in range(dax_l):

        # Select localizations in current frame & not near the edges.
        mask = (i3_data['fr'] == curf + 1) & (i3_data['x'] > aoi_size) & (
            i3_data['x'] <
            (dax_x - aoi_size - 1)) & (i3_data['y'] >
                                       aoi_size) & (i3_data['y'] <
                                                    (dax_y - aoi_size - 1))
        xr = i3_data['x'][mask]
        yr = i3_data['y'][mask]

        # Use the z offset file if it was specified, otherwise use localization z positions.
        if z_offsets is None:
            if (curf == 0):
                print("Using fit z locations.")
            zr = i3_data['z'][mask]
        else:
            if (curf == 0):
                print("Using z offset file.")
            zr = numpy.ones(xr.size) * z_offsets[curf]

        ht = i3_data['h'][mask]

        # Remove localizations that are too close to each other.
        in_peaks = numpy.zeros((xr.size, util_c.getNPeakPar()))
        in_peaks[:, util_c.getXCenterIndex()] = xr
        in_peaks[:, util_c.getYCenterIndex()] = yr
        in_peaks[:, util_c.getZCenterIndex()] = zr
        in_peaks[:, util_c.getHeightIndex()] = ht

        out_peaks = util_c.removeNeighbors(in_peaks, 2 * aoi_size)
        #out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

        print(curf, "peaks in", in_peaks.shape[0], ", peaks out",
              out_peaks.shape[0])

        # Use remaining localizations to calculate spline.
        image = dax_data.loadAFrame(curf).astype(numpy.float64)

        xr = out_peaks[:, util_c.getXCenterIndex()]
        yr = out_peaks[:, util_c.getYCenterIndex()]
        zr = out_peaks[:, util_c.getZCenterIndex()]
        ht = out_peaks[:, util_c.getHeightIndex()]

        for i in range(xr.size):
            xf = xr[i]
            yf = yr[i]
            zf = zr[i]
            xi = int(xf)
            yi = int(yf)
            if want2d:
                zi = 0
            else:
                zi = int(round(zf / z_step) + z_mid)

            # check the z is in range
            if (zi > -1) and (zi < max_z):

                # get localization image
                mat = image[xi - aoi_size:xi + aoi_size,
                            yi - aoi_size:yi + aoi_size]

                # zoom in by 2x
                psf = scipy.ndimage.interpolation.zoom(mat, 2.0)

                # re-center image
                psf = scipy.ndimage.interpolation.shift(
                    psf, (-2.0 * (xf - xi), -2.0 * (yf - yi)), mode='nearest')

                # add to average psf accumulator
                average_psf[zi, :, :] += psf
                totals[zi] += 1

    # Force PSF to be zero (on average) at the boundaries.
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i, 0, :], average_psf[i, -1, :],
                                  average_psf[i, :, 0], average_psf[i, :, -1]))
        average_psf[i, :, :] -= numpy.mean(edge)

    # Normalize the PSF.
    if want2d:
        max_z = 1

    for i in range(max_z):
        print(i, totals[i])
        if (totals[i] > 0.0):
            average_psf[i, :, :] = average_psf[i, :, :] / numpy.sum(
                numpy.abs(average_psf[i, :, :]))

    average_psf = average_psf / numpy.max(average_psf)

    # Save PSF (in image form).
    if True:
        import storm_analysis.sa_library.daxwriter as daxwriter
        dxw = daxwriter.DaxWriter(
            os.path.join(os.path.dirname(psf_name), "psf.dax"),
            average_psf.shape[1], average_psf.shape[2])
        for i in range(max_z):
            dxw.addFrame(1000.0 * average_psf[i, :, :] + 100)
        dxw.close()

    # Save PSF.
    if want2d:
        psf_dict = {"psf": average_psf[0, :, :], "type": "2D"}

    else:
        cur_z = -z_range
        z_vals = []
        for i in range(max_z):
            z_vals.append(cur_z)
            cur_z += z_step

        psf_dict = {
            "psf": average_psf,
            "pixel_size": 0.080,  # 1/2 the camera pixel size in nm.
            "type": "3D",
            "zmin": -z_range,
            "zmax": z_range,
            "zvals": z_vals
        }

    with open(psf_name, 'wb') as fp:
        pickle.dump(psf_dict, fp)
コード例 #18
0
[dax_x, dax_y, dax_l] = dax_data.filmSize()
while (curf < dax_l) and (peaks_used < min_peaks):

    # Select localizations in current frame & not near the edges.
    mask = (i3_data['fr'] == curf) & (i3_data['x'] > aoi_size) & (
        i3_data['x'] < (dax_y - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (
            i3_data['y'] < (dax_x - aoi_size - 1))
    xr = i3_data['x'][mask]
    yr = i3_data['y'][mask]
    ht = i3_data['h'][mask]

    # Remove localizations that are too close to each other.
    in_peaks = numpy.zeros((xr.size, util_c.getNResultsPar()))
    in_peaks[:, util_c.getXCenterIndex()] = xr
    in_peaks[:, util_c.getYCenterIndex()] = yr
    in_peaks[:, util_c.getHeightIndex()] = ht

    out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

    print(curf, in_peaks.shape, out_peaks.shape)

    # Use remaining localizations to calculate spline.
    image = dax_data.loadAFrame(curf - 1).astype(numpy.float64)

    xr = out_peaks[:, util_c.getXCenterIndex()]
    yr = out_peaks[:, util_c.getYCenterIndex()]
    ht = out_peaks[:, util_c.getHeightIndex()]

    for i in range(xr.size):
        xf = xr[i]
        yf = yr[i]
コード例 #19
0
peaks_used = 0
total = 0.0
[dax_x, dax_y, dax_l] = dax_data.filmSize()
while (curf < dax_l) and (peaks_used < min_peaks):

    # Select localizations in current frame & not near the edges.
    mask = (i3_data['fr'] == curf) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_y - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_x - aoi_size - 1))
    xr = i3_data['x'][mask]
    yr = i3_data['y'][mask]
    ht = i3_data['h'][mask]

    # Remove localizations that are too close to each other.
    in_peaks = numpy.zeros((xr.size,util_c.getNResultsPar()))
    in_peaks[:,util_c.getXCenterIndex()] = xr
    in_peaks[:,util_c.getYCenterIndex()] = yr
    in_peaks[:,util_c.getHeightIndex()] = ht

    out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

    print(curf, in_peaks.shape, out_peaks.shape)

    # Use remaining localizations to calculate spline.
    image = dax_data.loadAFrame(curf-1).astype(numpy.float64)

    xr = out_peaks[:,util_c.getXCenterIndex()]
    yr = out_peaks[:,util_c.getYCenterIndex()]
    ht = out_peaks[:,util_c.getHeightIndex()]

    for i in range(xr.size):
        xf = xr[i]
        yf = yr[i]