コード例 #1
0
    def peak_finder(self, map_data, mask_pf=None):
        '''
        Function to generate initial parameters of gaussian(s) to be used as guesses in the fitting algorithm.
        The code is searching automatically for peaks location using astropy and photutils routines. 
        Parameters:
        - map_data: map to be searched for gaussian(s)
        - mask_pf: a mask to be used in the searching algorithm. The alorithm is looking for gaussians only where 
                   the mask is true. In case of an iterative fitting, the mask is updated everytime to mask out 
                   the gaussians that have been already found.  
        '''

        x_lim = np.size(self.xgrid)
        y_lim = np.size(self.ygrid)
        fact = 20.

        bs = np.array(
            [int(np.floor(y_lim / fact)),
             int(np.floor(x_lim / fact))])

        mean, median, std = sigma_clipped_stats(self.data, sigma=3.0)
        threshold = median + (5. * std)
        if mask_pf is None:
            tbl = find_peaks(map_data, threshold, box_size=bs)
            mask_pf = np.zeros_like(self.xy_mesh[0])
        else:
            self.mask = mask_pf.copy()
            tbl = find_peaks(map_data, threshold, box_size=bs, mask=self.mask)
        tbl['peak_value'].info.format = '%.8g'

        guess = np.array([])

        x_i = np.array([])
        y_i = np.array([])

        for i in range(len(tbl['peak_value'])):
            guess_temp = np.array([tbl['peak_value'][i], self.xgrid[tbl['x_peak'][i]], \
                                  self.ygrid[tbl['y_peak'][i]], 1., 1., 0.])
            guess = np.append(guess, guess_temp)
            index_x = self.xgrid[tbl['x_peak'][i]]
            index_y = self.ygrid[tbl['y_peak'][i]]
            x_i = np.append(x_i, index_x)
            y_i = np.append(y_i, index_y)
            mask_pf[index_y - bs[1]:index_y + bs[1],
                    index_x - bs[0]:index_x + bs[0]] = True

            if self.param is None:
                self.param = guess_temp
                self.mask = mask_pf.copy()

            else:
                self.param = np.append(self.param, guess_temp)
                self.mask = np.logical_or(self.mask, mask_pf)
コード例 #2
0
def center(s_post, member_indices):

    mem = gcat[member_indices]
    """ Member Galaxy Locations """

    mempix = convpix(mem, w)
    x = mempix[:, 0]
    y = mempix[:, 1]
    """ Configuring Pixel Space"""

    init_x, init_y = w.wcs_world2pix(s_post[0], s_post[1],
                                     1)  # initial estimate to draw box
    wid_arcsec = 302  # 5'x 5' area around s-post
    pix_scale = 0.8627  # "/pixel ---> 0.000239 deg/pixel
    wid_pixels = int(wid_arcsec / pix_scale)  # ~350x350 pixels

    xmin = int(init_x - (wid_pixels / 2.0))  # boundaries
    xmax = int(init_x + (wid_pixels / 2.0))
    ymin = int(init_y - (wid_pixels / 2.0))
    ymax = int(init_y + (wid_pixels / 2.0))
    """ 2D-Histogram & Smoothing """

    binstep = 10  # 10 pixel ~ 8.62 "
    wght = np.asarray(
        gcat[member_indices]['fch1'])  # weights: irac ch1[3.6 um] flux
    xedges = np.arange(xmin, xmax + binstep, binstep)
    yedges = np.arange(ymin, ymax + binstep, binstep)
    h, xedges, yedges = np.histogram2d(x,
                                       y,
                                       bins=(xedges, yedges),
                                       weights=wght)

    size = int(round(
        (np.shape(h)[0]) / 4.0))  # kernel size : at least ~ 4 times smaller
    stdev_ker = size / 3.0  # size = 3-sigma
    gauss_kernel = Gaussian2DKernel(stddev=stdev_ker, x_size=size,
                                    y_size=size)  # kernel
    smoothed_data_gauss = convolve_fft(h.T,
                                       gauss_kernel)  # convolution/smoothing
    """ Finds Peak """

    x_org, y_org = (xedges[0] + xedges[1]) / 2.0, (
        yedges[0] + yedges[1]) / 2.0  # origin of bin-coordinate
    fp_m = find_peaks(smoothed_data_gauss, 0,
                      npeaks=1)  # 1 peak with max value
    xbin_peak, ybin_peak = fp_m['x_peak'][0], fp_m['y_peak'][
        0]  # x, y positions of the peak
    x_p, y_p = (x_org + xbin_peak * binstep), (
        y_org + ybin_peak * binstep)  # conversion to pixel coordinates
    peak = (x_p, y_p)
    ra_p, dec_p = w.wcs_pix2world(peak[0], peak[1], 1)
    peak_fk5 = (ra_p, dec_p)
    """ Centroid from Image Moments """

    cen_pos = centroid_com(smoothed_data_gauss)
    cenx, ceny = (x_org + cen_pos[0] * binstep), (y_org + cen_pos[1] * binstep)
    ra_c, dec_c = w.wcs_pix2world(cenx, ceny, 1)
    centre = (ra_c, dec_c)

    return centre, peak_fk5
コード例 #3
0
ファイル: maths.py プロジェクト: bj0/astropicam
def measure_hfr(buf):
    """
    measure focus using HFR
    :param buf:
    :return:
    """
    im = np.frombuffer(buf, dtype=np.uint8).reshape((240, 320, 3))

    # sub background
    median = np.median(im[:, :, 1])
    Im = im[:, :, 1] - median

    # find star
    max = np.max(Im)
    tbl = find_peaks(Im, max - median, box_size=100, subpixel=False)
    x, y = int(tbl['x_peak'][0]), int(tbl['y_peak'][0])

    # improve center estimate by centroid
    d = 50
    X, Y = np.arange(x - d, x + d), np.arange(y - d, y + d)
    V = Im[X, Y]
    D = sum(V)
    x = int(sum(V * X) / D)
    y = int(sum(V * Y) / D)

    # use new center for hfr
    X, Y = np.arange(x - d, x + d), np.arange(y - d, y + d)
    V = Im[X, Y]

    D = np.array(np.vstack((X - x, Y - y)))
    D = np.linalg.norm(D, axis=0)
    hfr = np.sum(V * D) / np.sum(V)
    return hfr
コード例 #4
0
    def locate_peaks(self, sigma_level, kernel=None):
        """
        This function ...
        :param sigma_level:
        :param kernel:
        :return:
        """

        # If a subtracted box is present, use it to locate the peaks
        box = self.subtracted if self.has_background else self.cutout

        # Calculate the sigma-clipped statistics of the box
        mean, median, stddev = statistics.sigma_clipped_statistics(
            box, sigma=3.0, mask=self.background_mask
        )  # Sigma 3.0 for clipping is what photutils uses in detect_threshold
        #sigma_level = 1.5   # I once tried to investigate why some clear peaks were not detected, did not have time ..
        threshold = median + (sigma_level * stddev)

        # Convolve the box with the given kernel, if any
        if kernel is not None:
            box = convolve_fft(box, kernel, normalize_kernel=True)

        # Find peaks
        #threshold = detect_threshold(box, snr=2.0) # other method (snr corresponds to sigma_level as I use it above)
        peaks = find_peaks(box,
                           threshold,
                           box_size=5,
                           mask=self.background_mask)

        # For some reason, once in a while, an ordinary list comes out of the find_peaks routine instead of an
        # Astropy Table instance. We assume we need an empty table in this case
        if type(peaks) is list:
            peaks = Table([[], []], names=('x_peak', 'y_peak'))

        # Initialize a list to contain the peak positions
        positions = []

        # Loop over the peaks
        for peak in peaks:

            # Calculate the absolute x and y coordinate of the peak
            x_rel = peak['x_peak']
            y_rel = peak['y_peak']
            x = x_rel + self.cutout.x_min
            y = y_rel + self.cutout.y_min

            # Check whether the peak position falls in the box and then add it to the list
            peak_position = Position(x, y)
            if self.cutout.contains(peak_position):
                # Add the coordinates to the positions list
                positions.append(peak_position)
            else:
                print("DEBUG: peak position", peak_position,
                      "falls outside of box with shape", box.shape)

        # If exactly one peak was found, set the self.peak attribute accordingly
        if len(positions) == 1: self.peak = positions[0]

        # Return the list of peak positions
        return positions
コード例 #5
0
def peak_detection(denormed_img, band, shifts, img_size, npeaks,
                   nb_blended_gal, training_or_test, dist_cut):
    '''
    Return coordinates of the centroid of the closest galaxy from the center

    Parameters:
    ----------
    denormed_img: denormalized image of the blended galaxies
    band: filter in which the detection is done
    shifts: initial shifts used for the image generation
    img_size: size of the image
    npeaks: maximum number of peaks to return for photutils find_peaks function
    nb_blended_gal: number of blended galaxies in the image
    training_or_test: choice of training or test sample being generated
    dist_cut: cut in distance to check if detected galaxy is not too close from its neighbours
    '''
    gal = denormed_img
    df_temp = photutils.find_peaks(gal,
                                   threshold=5 *
                                   np.sqrt(sky_level_pixel[band]),
                                   npeaks=npeaks,
                                   centroid_func=centroid_com)
    if df_temp is not None:
        df_temp['x_peak'] = (df_temp['x_centroid'] -
                             ((img_size / 2.) - 0.5)) * pixel_scale[band]
        df_temp['y_peak'] = (df_temp['y_centroid'] -
                             ((img_size / 2.) - 0.5)) * pixel_scale[band]
        df_temp.sort('peak_value', reverse=True)
        # Distances of true centers to brightest peak
        qq = [
            np.sqrt(
                float((shifts[j, 0] - df_temp['x_peak'][0])**2 +
                      (shifts[j, 1] - df_temp['y_peak'][0])**2))
            for j in range(nb_blended_gal)
        ]
        idx_closest = np.argmin(qq)
        if nb_blended_gal > 1:
            # Distance from peak galaxy to others
            qq_prime = [
                np.sqrt(
                    float((shifts[idx_closest, 0] - shifts[j, 0])**2 +
                          (shifts[idx_closest, 1] - shifts[j, 1])**2))
                if j != idx_closest else np.inf for j in range(nb_blended_gal)
            ]
            idx_closest_to_peak_galaxy = np.argmin(qq_prime)
            if training_or_test != 'test':
                if not np.all(np.array(qq_prime) > dist_cut):
                    print(
                        'TRAINING CUT: closest is not central and others are too close'
                    )
                    return False
        else:
            idx_closest_to_peak_galaxy = np.nan
        return idx_closest, idx_closest_to_peak_galaxy, df_temp[0][
            'x_centroid'], df_temp[0]['y_centroid'], df_temp[0][
                'x_peak'], df_temp[0]['y_peak'], len(df_temp)
    else:
        return False
コード例 #6
0
    def peak_finder(self, map_data, mask_pf=False):

        x_lim = np.size(self.xgrid)
        y_lim = np.size(self.ygrid)
        fact = 20.

        bs = np.array(
            [int(np.floor(y_lim / fact)),
             int(np.floor(x_lim / fact))])

        mean, median, std = sigma_clipped_stats(self.data, sigma=3.0)
        threshold = median + (5. * std)
        if mask_pf is False:
            tbl = find_peaks(map_data, threshold, box_size=bs)
            mask_pf = np.zeros_like(self.xy_mesh[0])
        else:
            self.mask = mask_pf.copy()
            tbl = find_peaks(map_data, threshold, box_size=bs, mask=self.mask)
        tbl['peak_value'].info.format = '%.8g'

        guess = np.array([])

        x_i = np.array([])
        y_i = np.array([])

        for i in range(len(tbl['peak_value'])):
            guess_temp = np.array([tbl['peak_value'][i], self.xgrid[tbl['x_peak'][i]], \
                                  self.ygrid[tbl['y_peak'][i]], 1., 1., 0.])
            guess = np.append(guess, guess_temp)
            index_x = self.xgrid[tbl['x_peak'][i]]
            index_y = self.ygrid[tbl['y_peak'][i]]
            x_i = np.append(x_i, index_x)
            y_i = np.append(y_i, index_y)
            mask_pf[index_y - bs[1]:index_y + bs[1],
                    index_x - bs[0]:index_x + bs[0]] = True

            if self.param is None:
                self.param = guess_temp
                self.mask = mask_pf.copy()

            else:
                self.param = np.append(self.param, guess_temp)
                self.mask = np.logical_or(self.mask, mask_pf)
コード例 #7
0
 def _find_saturated(self, data, sat_thresh, search_fwhm):
     """Use threshold search to look for regions of saturated pixels
     """
     
     boxsize = int(4 * search_fwhm)
     self._logger.debug(f'Looking for possibly saturated regions above {sat_thresh} ADU separated by {boxsize} pixels.')    
     saturated_positions = find_peaks(data, 
         threshold=sat_thresh, 
         box_size=boxsize)
     print('Saturated position table:\n', saturated_positions)
     return saturated_positions
コード例 #8
0
ファイル: detection.py プロジェクト: SKIRT/PTS
    def locate_peaks(self, sigma_level, kernel=None):

        """
        This function ...
        :param sigma_level:
        :param kernel:
        :return:
        """

        # If a subtracted box is present, use it to locate the peaks
        box = self.subtracted if self.has_background else self.cutout

        # Calculate the sigma-clipped statistics of the box
        mean, median, stddev = statistics.sigma_clipped_statistics(box, sigma=3.0, mask=self.background_mask.data) # Sigma 3.0 for clipping is what photutils uses in detect_threshold
        #sigma_level = 1.5   # I once tried to investigate why some clear peaks were not detected, did not have time ..
        threshold = median + (sigma_level * stddev)

        # Convolve the box with the given kernel, if any
        if kernel is not None: box = convolve_fft(box, kernel, normalize_kernel=True)

        # Find peaks
        #threshold = detect_threshold(box, snr=2.0) # other method (snr corresponds to sigma_level as I use it above)
        peaks = find_peaks(box, threshold, box_size=5, mask=self.background_mask)

        # For some reason, once in a while, an ordinary list comes out of the find_peaks routine instead of an
        # Astropy Table instance. We assume we need an empty table in this case
        if type(peaks) is list: peaks = Table([[], []], names=('x_peak', 'y_peak'))

        # Initialize a list to contain the peak positions
        positions = []

        # Loop over the peaks
        for peak in peaks:

            # Calculate the absolute x and y coordinate of the peak
            x_rel = peak['x_peak']
            y_rel = peak['y_peak']
            x = x_rel + self.cutout.x_min
            y = y_rel + self.cutout.y_min

            # Check whether the peak position falls in the box and then add it to the list
            peak_position = PixelCoordinate(x, y)
            if self.cutout.contains(peak_position):
                # Add the coordinates to the positions list
                positions.append(peak_position)
            else: print("DEBUG: peak position", peak_position, "falls outside of box with shape", box.shape)

        # If exactly one peak was found, set the self.peak attribute accordingly
        if len(positions) == 1: self.peak = positions[0]

        # Return the list of peak positions
        return positions
コード例 #9
0
ファイル: psf_model.py プロジェクト: sophiedubber/hubble
def construct_epsf(imdata, mean):

    peaks_tbl = find_peaks(imdata, threshold=20.0)
    peaks_tbl['peak_value'].info.format = '%.8g'
    for i in range(len(peaks_tbl)):
        print(i, peaks_tbl['x_peak'][i], peaks_tbl['y_peak'][i])

    #rem_indA = [0,1,3,4,5,6,8,9,11,12,13,16,84,85,86,87,91,92,99,102]
    #rem_indB = [0,1,3,4,5,6,8,9,11,12,13,15,16,17,24,25,61,62,73,78,84,85,86,87,91,92,99,102]
    #rem_indC = [0,1,2,3,4,5,6,8,9,10,11,12,13,15,16,17,22,23,24,25,61,62,71,73,77,78,84,85,86,87,91,92,94,96,99,102]
    #rem_indVIS = [2,3,4,5,6,7,8,9,10,12,13,14]
    #rem_indD = [3,5]
    #rem_indVIS2 = [0,1,2,7]
    #rem_indE = [22,23,24,25,27,28,29]
    #rem_indF = [11,12,13,15,16,18,19,20,21,22,23,24,25,26]
    #rem_indG = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28]
    #rem_indH=[4,5,6,7,8,9,10,11,12,13]
    #rem_indI = [4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]
    #rem_indJ = [0,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,22,25,27,28,29,30,33,34,36,37,38,39]
    #rem_indK = [0,1,2,3,4,5,6,7,8,9,10,11,13,14,15,23,24,25,26,29,30,31,33,34,35]
    #rem_indL = [4,5,6,7,8,10,11,12,13]
    #rem_indM = [5,6,7,8,9,10,11,13,14,15,16,18,27,29]
    #rem_indN = [4,5,6,7,8,9,10,11,13,14,15,16,17,19,20,21]
    #peaks_tbl.remove_rows([rem_indM])

    plt.figure()
    plt.scatter(peaks_tbl['x_peak'], peaks_tbl['y_peak'], c='k')
    plt.imshow(imdata, vmin=-0.2, vmax=2., origin='lowerleft')
    plt.show()

    stars_tbl = Table()
    stars_tbl['x'] = peaks_tbl['x_peak']
    stars_tbl['y'] = peaks_tbl['y_peak']

    mean_val, median_val, std_val = sigma_clipped_stats(imdata, sigma=2.0)
    imdata -= median_val

    nddata = NDData(data=imdata)
    stars = extract_stars(nddata, stars_tbl, size=20)

    epsf_builder = EPSFBuilder(oversampling=4, maxiters=3, progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)

    hdu = fits.PrimaryHDU(epsf.data)
    hdul = fits.HDUList([hdu])
    hdul.writeto('Serpens3/epsf.fits')

    return stars, epsf
コード例 #10
0
    def find_sources_experimental(self):
        """
        This function ...
        :return:
        """

        kernel = self.star_finder.kernel._array

        result = match_template(self.frame, kernel, pad_input=True)

        #plotting.plot_box(result)

        #y, x = np.unravel_index(np.argmax(result), result.shape)
        #source = Source.around_coordinate(self.frame, Coordinate(x,y), radius=5, factor=1.3)
        #source.plot()

        #from ...core.basics.distribution import Distribution
        #distribution = Distribution.from_values(result.flatten())
        #distribution.plot()

        from photutils import find_peaks

        mask = Mask(self.galaxy_finder.segments) + Mask(
            self.star_finder.segments)

        peaks = find_peaks(result, 0.8, box_size=5, mask=mask)

        index = 1

        self.segments = Frame.zeros_like(self.frame)

        # Loop over the peaks
        for peak in peaks:

            # Calculate the absolute x and y coordinate of the peak
            x = peak['x_peak']
            y = peak['y_peak']
            coordinate = Coordinate(x, y)

            source = Source.around_coordinate(self.frame,
                                              coordinate,
                                              radius=5,
                                              factor=1.3)

            self.segments[source.y_slice, source.x_slice][source.mask] = index

            index += 1
コード例 #11
0
ファイル: other.py プロジェクト: SKIRT/PTS
    def find_sources_experimental(self):

        """
        This function ...
        :return:
        """

        # Inform the user
        log.info("Finding sources based on template matching ...")

        kernel = self.kernel._array

        result = match_template(self.frame, kernel, pad_input=True)

        #plotting.plot_box(result)

        #y, x = np.unravel_index(np.argmax(result), result.shape)
        #source = Source.around_coordinate(self.frame, Coordinate(x,y), radius=5, factor=1.3)
        #source.plot()

        #from ...core.basics.distribution import Distribution
        #distribution = Distribution.from_values(result.flatten())
        #distribution.plot()

        from photutils import find_peaks

        mask = Mask(self.galaxy_segments) + Mask(self.star_segments)

        peaks = find_peaks(result, 0.8, box_size=5, mask=mask)

        index = 1

        self.segments = Frame.zeros_like(self.frame)

        # Loop over the peaks
        for peak in peaks:

            # Calculate the absolute x and y coordinate of the peak
            x = peak['x_peak']
            y = peak['y_peak']
            coordinate = Coordinate(x,y)

            source = Detection.around_coordinate(self.frame, coordinate, radius=5, factor=1.3)

            self.segments[source.y_slice, source.x_slice][source.mask] = index

            index += 1
コード例 #12
0
def get_photometry(image, mask=None, gain=4., pos=(dx_stamp, dx_stamp),
                   radii=10.):

    sigma_clip = SigmaClip(sigma=3., iters=2)
    bkg_estimator = MedianBackground()
    bkg = Background2D(image, (10, 10), sigma_clip=sigma_clip,
                       bkg_estimator=bkg_estimator)
    print '\tBackground stats: %f, %f' % (bkg.background_median,
                                          bkg.background_rms_median)

    data = image - bkg.background
    # bkg_err = np.random.normal(bkg.background_median,
    #                            bkg.background_rms_median, image.shape)
    if False:
        plt.imshow(data, cmap='viridis', interpolation='nearest')
        plt.colorbar()
        plt.show()
    # error = calc_total_error(image, bkg_err, gain)
    back_mean, back_median, back_std = sigma_clipped_stats(data, mask, sigma=3,
                                                           iters=3,
                                                           cenfunc=np.median)
    print '\tBackground stats: %f, %f' % (back_median, back_std)
    tbl = find_peaks(data, np.minimum(back_std, bkg.background_rms_median) * 3,
                     box_size=5, subpixel=True)
    print tbl
    tree_XY = cKDTree(np.array([tbl['x_peak'], tbl['y_peak']]).T)
    dist, indx = tree_XY.query(pos, k=1, distance_upper_bound=5)
    if np.isinf(dist):
        print '\tNo source found in the asked position...'
        return None
    position = [tbl[indx]['x_centroid'], tbl[indx]['y_centroid']]
    print '\tObject position: ', position

    apertures = [CircularAperture(position, r=r) for r in radii]
    phot_table = aperture_photometry(data, apertures, mask=mask,
                                     method='subpixel', subpixels=5)
    for k, r in enumerate(radii):
        area = np.pi * r ** 2
        phot_table['aperture_flx_err_%i' %
                   k] = np.sqrt(area * bkg.background_rms_median ** 2 +
                                phot_table['aperture_sum_%i' % k] / gain)
    phot_table.remove_columns(['xcenter', 'ycenter'])
    phot_table['xcenter'] = position[0]
    phot_table['ycenter'] = position[1]
    return phot_table
コード例 #13
0
def get_centroid(data):
    mean, median, std = sigma_clipped_stats(data, sigma=3.0)
    threshold = median + (10.0 * std)
    tbl = find_peaks(data, threshold, box_size=5)
    #limit location of x and y based on quick look image
    half=data.shape[1]/2
    xlim = [i for i,(j,k) in enumerate(zip(tbl['x_peak'],tbl['y_peak'])) if half-20 < j < half+20 and 200 < k < 800]
    
    try:
        for idx in xlim:
            #get index of brightest source within the limit
            if tbl['peak_value'][idx] == max(tbl[xlim]['peak_value']):
                #import pdb; pdb.set_trace()
                x_max, y_max = int(tbl['x_peak'][idx]), int(tbl['y_peak'][idx])
    except:
        x_max, y_max = 100,100
        print('no centroid')
    return (x_max, y_max)#tuple
コード例 #14
0
def optimize_m(t_ini, f_ini, alpha_ini, sig_curr):
    #keeping in mind that minimize requires flattened arrays
    grad_fun = lambda tg: -1 * grad_lnpost(tg, f_ini, alpha_ini, sig_curr)
    res = scipy.optimize.minimize(
        lambda tt: -1 * lnpost(tt, f_ini, alpha_ini, sig_curr),
        t_ini,  # theta initial
        jac=grad_fun,
        method='L-BFGS-B',
        bounds=[(1e-5, 10)] * len(t_ini))

    tt_prime = res['x']
    print(res['nit'])
    w_final = tt_prime.reshape((n_grid, n_grid))
    print(w_final)
    #pick out the peaks using photutils
    thresh = detect_threshold(w_final, 3)
    tbl = find_peaks(w_final, thresh)
    positions = np.transpose((tbl['x_peak'], tbl['y_peak']))
    w_peaks = np.zeros((n_grid, n_grid))
    w_peaks[positions] = w_final[positions]
    return w_peaks
コード例 #15
0
def BuildEPSF(filter='Ks'):
    """Builds the effective PSF used for the photometry.
    Currently uses the SCAO PSF from SimCADO. 
    """
    src = sim.source.star(mag=19, filter_name=filter, spec_type='M0V')
    image = img.MakeImage(src,
                          exposure=1800,
                          NDIT=1,
                          view='wide',
                          chip='centre',
                          filter=filter,
                          ao_mode='scao')
    # PSF_AnisoCADO_SCAO_FVPSF_4mas_EsoMedian_20190328.fits

    peaks_tbl = phu.find_peaks(image, threshold=135000., box_size=11)
    peaks_tbl['peak_value'].info.format = '%.8g'  # for consistent table output
    # make sure the positions are correct (use the exact ones)
    peaks_tbl['x_peak'] = src.x_pix
    peaks_tbl['y_peak'] = src.y_pix
    positions = (peaks_tbl['x_peak'], peaks_tbl['y_peak'])
    apertures = phu.CircularAperture(positions, r=5.)
    # extract cutouts of the stars using the extract_stars() function
    stars_tbl = apta.Table()
    stars_tbl['x'] = peaks_tbl['x_peak']
    stars_tbl['y'] = peaks_tbl['y_peak']
    mean_val, median_val, std_val = apy.stats.sigma_clipped_stats(img_data,
                                                                  sigma=2.)
    img_data -= median_val  # subtract background
    nddata = apy.nddata.NDData(data=img_data)
    stars = phu.psf.extract_stars(nddata, stars_tbl, size=170)
    # build the epsf
    epsf_builder = phu.EPSFBuilder(oversampling=4,
                                   maxiters=5,
                                   progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    # save the epsf
    with open(os.path.join('objects', 'epsf-scao-fv.pkl'), 'wb') as output:
        pickle.dump(epsf, output, -1)
コード例 #16
0
def build_epsf(image_num):

    size = 7
    hsize = (size - 1) / 2
    peaks_tbl = find_peaks(Reduced_Image_Data[image_num], threshold=750.)
    x = peaks_tbl['x_peak']
    y = peaks_tbl['y_peak']
    mask = ((x > hsize) &
            (x < (Reduced_Image_Data[image_num].shape[1] - 1 - hsize)) &
            (y > hsize) &
            (y < (Reduced_Image_Data[image_num].shape[0] - 1 - hsize)))

    stars_tbl = Table()
    stars_tbl['x'] = x[mask]
    stars_tbl['y'] = y[mask]

    nddata = NDData(data=Reduced_Image_Data[image_num])

    stars = extract_stars(nddata, stars_tbl, size=10)

    #nrows = 5
    #ncols = 5
    #fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize = (15, 15), squeeze = True)
    #ax = ax.ravel()

    #for i in range(nrows*ncols):
    #norm = simple_norm(stars[i], 'log', percent = 99.)
    #ax[i].imshow(stars[i], norm = norm, origin = 'lower', cmap = 'gray')

    epsf_builder = EPSFBuilder(oversampling=8, maxiters=20, progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)

    #norm = simple_norm(epsf.data, 'log', percent = 99.)
    #plt.imshow(epsf.data, norm = norm, origin = 'lower', cmap = 'viridis')
    #plt.colorbar()

    return epsf
コード例 #17
0
def plot_composites(pdata,outfolder,contours,contour_colors=True,
                    calibration_plot=True,brown_data=False,paperplot=False):

    ### image qualities
    fs = 10 # fontsize
    maxlim = 0.01 # limit of maximum

    ### contour color limits (customized for W1-W2)
    color_limits = [-1.0,2.6]
    kernel = None

    ### output blobs
    gradient, gradient_error, arcsec,objname_out, obj_size_kpc, background_out = [], [], [], [], [], []

    ### begin loop
    fig = None
    for idx in xrange(len(pdata['objname'])):

        if paperplot:
            if ('NGC 4168' not in pdata['objname'][idx]) & ('NGC 1275' not in pdata['objname'][idx]):
                continue

        ### load object information
        objname = pdata['objname'][idx]
        fagn = pdata['pars']['fagn']['q50'][idx]
        ra, dec = load_coordinates(objname)
        phot_size = load_structure(objname,long_axis=True) # in arcseconds

        ### load image and WCS
        try:
            if brown_data:
                ### convert from DN to flux in Janskies, from this table: 
                # http://wise2.ipac.caltech.edu/docs/release/allsky/expsup/sec2_3f.html
                img1, noise1 = load_image(objname,contours[0]), None
                img1, noise1 = img1*1.9350E-06, noise1*(1.9350e-06)**-2
                img2, noise2 = load_image(objname,contours[1]), None
                img2, noise2 = img2*2.7048E-06, noise2*(2.7048E-06)**-2

                ### translate object location into pixels using WCS coordinates
                wcs = WCS(img1.header)
                pix_center = wcs.all_world2pix([[ra[0],dec[0]]],1)
            else:
                img1, noise1 = load_wise_data(objname,contours[0].split(' ')[1])
                img2, noise2 = load_wise_data(objname,contours[1].split(' ')[1])

                ### translate object location into pixels using WCS coordinates
                wcs = WCS(img1.header)
                pix_center = wcs.all_world2pix([[ra[0],dec[0]]],1)

                if (pix_center.squeeze()[0]-4 > img1.shape[1]) or \
                    (pix_center.squeeze()[1]-4 > img1.shape[0]) or \
                    (np.any(pix_center < 4)):
                    print 'object not in image, checking for additional image'
                    print pix_center, img1.shape
                    img1, noise1 = load_wise_data(objname,contours[0].split(' ')[1],load_other = True)
                    img2, noise2 = load_wise_data(objname,contours[1].split(' ')[1],load_other = True)

                    wcs = WCS(img1.header)
                    pix_center = wcs.all_world2pix([[ra[0],dec[0]]],1)
                    print pix_center, img1.shape
        except:
            gradient.append(None)
            gradient_error.append(None)
            arcsec.append(None)
            kpc.append(None)
            objname_out.append(None)
            continue

        size = calc_dist(wcs, pix_center, phot_size, img1.data.shape)

        ### convert inverse variance to noise
        noise1.data = (1./noise1.data)**0.5
        noise2.data = (1./noise2.data)**0.5

        ### build image extents
        extent = image_extent(size,pix_center,wcs)

        ### convolve W1 to W2 resolution
        w1_convolved, kernel = match_resolution(img1.data,contours[0],contours[1],
                                                    kernel=kernel,data1_res=px_scale)
        w1_convolved_noise, kernel = match_resolution(noise1.data,contours[0],contours[1],
                                                      kernel=kernel,data1_res=px_scale)

        #### put onto same scale, and grab slices
        data2, footprint = reproject_exact(img2, img1.header)
        noise2, footprint = reproject_exact(noise2, img1.header)

        img1_slice = w1_convolved[size[2]:size[3],size[0]:size[1]]
        img2_slice = data2[size[2]:size[3],size[0]:size[1]]
        noise1_slice = w1_convolved_noise[size[2]:size[3],size[0]:size[1]]
        noise2_slice = noise2[size[2]:size[3],size[0]:size[1]]

        ### subtract background from both images
        # identify background pixels.
        # background is any pixel consistent within X sigma of background!
        sigma = 3.0
        if paperplot:
            sigma = 5.0
        mean1, median1, std1 = sigma_clipped_stats(w1_convolved, sigma=sigma,iters=10)
        background1 = img1_slice < (median1+std1) 
        img1_slice -= median1
        mean2, median2, std2 = sigma_clipped_stats(data2, sigma=sigma, iters=10)
        background2 = img2_slice < (median2+std2)
        img2_slice -= median2

        #### calculate the color
        flux_color  = convert_to_color(img1_slice, img2_slice,None,None,contours[0],contours[1],
                                       minflux=-np.inf, vega_conversions=brown_data)

        ### don't show any "background" pixels!
        background = background1 | background2
        flux_color[background] = np.nan

        ### plot colormap
        count = 0
        if paperplot:
            if fig is None:
                fig, axall = plt.subplots(1,2, figsize=(12,6))
                fig.subplots_adjust(right=0.8,wspace=0.4,hspace=0.3,left=0.12)
                cb_ax = fig.add_axes([0.83, 0.15, 0.05, 0.7])
                ax = np.ravel(axall[0])
            else:
                ax = np.ravel(axall[1])
                count = 1
            vmin, vmax = -0.4,1.05
        else:
            fig, ax = plt.subplots(1,2, figsize=(12,6))
            vmin, vmax = color_limits[0], color_limits[1]
        ax = np.ravel(ax)
        img = ax[0].imshow(flux_color, origin='lower',extent=extent,vmin=vmin,vmax=vmax,cmap='plasma')

        if not paperplot:
            cbar = fig.colorbar(img, ax=ax[0])
        elif count == 0:
            cbar = fig.colorbar(img, cax=cb_ax)
            cbar.set_label('(W1-W2) [Vega]', fontdict={'fontsize':18})

        ax[0].set_xlabel(r'$\Delta$(arcsec)')
        ax[0].set_ylabel(r'$\Delta$(arcsec)')

        ### plot W1 contours
        if not paperplot:
            plot_contour(ax[0],np.log10(img2_slice),ncontours=20)

        ### find image center in W2 image, and mark it
        # do this by finding the source closest to center
        tbl = []
        nthresh, box_size = 20, 4
        fake_noise2_error = copy.copy(noise2_slice)
        bad = np.logical_or(np.isinf(noise2_slice),np.isnan(noise2_slice))
        fake_noise2_error[bad] = fake_noise2_error[~bad].max()
        while len(tbl) < 1:
            threshold = nthresh * std1 # peak threshold, @ 20 sigma
            tbl = find_peaks(img2_slice, threshold, box_size=box_size, subpixel=True, border_width=3, error = fake_noise2_error)
            nthresh -=2
        
            if nthresh < 2:
                nthresh = 20
                box_size += 1

        '''
        center = np.array(img2_slice.shape)/2.
        idxmax = ((center[0]-tbl['x_peak'])**2 + (center[1]-tbl['y_peak'])**2).argmin()
        fig, ax = plt.subplots(1,1, figsize=(6,6))
        ax.plot(tbl['x_peak'][idxmax],tbl['y_peak'][idxmax],'x',color='red',ms=10)
        ax.imshow(img2_slice,origin='lower')
        plot_contour(ax, np.log10(img2_slice),ncontours=20)
        plt.show()
        '''


        ### find size of biggest one
        imgcenter = np.array(img2_slice.shape)/2.
        idxmax = ((imgcenter[0]-tbl['x_centroid'])**2 + (imgcenter[1]-tbl['y_centroid'])**2).argmin()
        center = [tbl['x_centroid'][idxmax], tbl['y_centroid'][idxmax]]

        ### find center in arcseconds (NEW)
        center_coordinates = SkyCoord.from_pixel(imgcenter[0],imgcenter[1],wcs)
        x_pos_obj = SkyCoord.from_pixel(center[0],imgcenter[1],wcs)
        y_pos_obj = SkyCoord.from_pixel(imgcenter[0],center[1],wcs)
        xarcsec = x_pos_obj.separation(center_coordinates).arcsec
        if center[0] < imgcenter[0]:
            xarcsec = -xarcsec
        yarcsec = y_pos_obj.separation(center_coordinates).arcsec
        if center[1] < imgcenter[1]:
            yarcsec = -yarcsec

        #xarcsec = (extent[1]-extent[0])*center[0]/float(img2_slice.shape[0]) + extent[0]
        #yarcsec = (extent[3]-extent[2])*center[1]/float(img2_slice.shape[1]) + extent[2]
        ax[0].scatter(xarcsec,yarcsec,color='black',marker='x',s=50,linewidth=2)

        ### add in WISE PSF
        wise_psf = 6 # in arcseconds
        start = 0.85*extent[0]

        if not paperplot:
            ax[0].plot([start,start+wise_psf],[start,start],lw=2,color='k')
            ax[0].text(start+wise_psf/2.,start+1, '6"', ha='center')
            ax[0].set_xlim(extent[0],extent[1]) # reset plot limits b/c of text stuff
            ax[0].set_ylim(extent[2],extent[3])
        else:
            ax[0].set_xlim(-65,65)
            ax[0].set_ylim(-65,65)

        ### gradient
        phys_scale = float(1./WMAP9.arcsec_per_kpc_proper(pdata['z'][idx]).value)
        if objname == 'CGCG 436-030':
            center[1] = center[1]+1.5
            yarcsec += px_scale*1.5

        grad, graderr, x_arcsec, back = measure_gradient(img1_slice,img2_slice, 
                                  noise1_slice, noise2_slice, background,
                                  ax, center,
                                  tbl['peak_value'][idxmax], (xarcsec,yarcsec),
                                  phys_scale,paperplot=paperplot)

        obj_size_phys = phot_size*phys_scale
        if not paperplot:
            ax[1].text(0.05,0.06,r'f$_{\mathrm{AGN,MIR}}$='+"{:.2f}".format(pdata['pars']['fagn']['q50'][idx])+\
                                    ' ('+"{:.2f}".format(pdata['pars']['fagn']['q84'][idx]) +
                                    ') ('+"{:.2f}".format(pdata['pars']['fagn']['q16'][idx])+')',
                                    transform=ax[1].transAxes,color='black',fontsize=9)

            ax[1].axvline(phot_size, linestyle='--', color='0.2',lw=2,zorder=-1)

        else:
            ax[0].text(0.98,0.94,objname,transform=ax[0].transAxes,fontsize=14,weight='bold',ha='right')
            ax[0].text(0.98,0.88,r'$\nabla$(2 kpc)='+"{:.2f}".format(grad[1]),fontsize=14,transform=ax[0].transAxes,ha='right')

        gradient.append(grad)
        gradient_error.append(graderr)
        arcsec.append(x_arcsec)
        obj_size_kpc.append(obj_size_phys)
        objname_out.append(objname)
        background_out.append(back)
        print objname, back

        # I/O
        outname = outfolder+'/'+objname+'.png'
        if paperplot:
            outname = outfolder+'/sample_wise_gradient.png'

        if (not paperplot) | (count == 1):
            if not paperplot:
                plt.tight_layout()
            plt.savefig(outname,dpi=150)
            plt.close()

    out = {
            'gradient': np.array(gradient),
            'gradient_error': np.array(gradient_error),
            'arcsec': np.array(arcsec),
            'obj_size_brown_kpc': np.array(obj_size_kpc),
            'objname': objname_out,
            'background_fraction': np.array(background_out)
          }
    if not paperplot:
        pickle.dump(out,open(outfile, "wb"))
コード例 #18
0
def get_most_confident_outputs(img_filename, patch_center_row,
                               patch_center_col, confident_th, gpu_id):

    patch_size = 64
    center = (patch_center_col, patch_center_row)

    x_tmp = int(center[0] - patch_size / 2)
    y_tmp = int(center[1] - patch_size / 2)

    confident_connections = {}
    confident_connections['x_peak'] = []
    confident_connections['y_peak'] = []
    confident_connections['peak_value'] = []

    root_dir = './gt_dbs/MassachusettsRoads/test/images/'
    img = Image.open(os.path.join(root_dir, img_filename))
    img = np.array(img, dtype=np.float32)
    h, w = img.shape[:2]

    if x_tmp > 0 and y_tmp > 0 and x_tmp + patch_size < w and y_tmp + patch_size < h:

        img_crop = img[y_tmp:y_tmp + patch_size, x_tmp:x_tmp + patch_size, :]

        img_crop = img_crop.transpose((2, 0, 1))
        img_crop = torch.from_numpy(img_crop)
        img_crop = img_crop.unsqueeze(0)

        inputs = img_crop / 255 - 0.5

        # Forward pass of the mini-batch
        inputs = Variable(inputs)

        if gpu_id >= 0:
            inputs = inputs.cuda()

        p = {}
        p['useRandom'] = 1  # Shuffle Images
        p['useAug'] = 0  # Use Random rotations in [-30, 30] and scaling in [.75, 1.25]
        p['inputRes'] = (64, 64)  # Input Resolution
        p['outputRes'] = (64, 64)  # Output Resolution (same as input)
        p['g_size'] = 64  # Higher means narrower Gaussian
        p['trainBatch'] = 1  # Number of Images in each mini-batch
        p['numHG'] = 2  # Number of Stacked Hourglasses
        p['Block'] = 'ConvBlock'  # Select: 'ConvBlock', 'BasicBlock', 'BottleNeck'
        p['GTmasks'] = 0  # Use GT Vessel Segmentations as input instead of Retinal Images
        model_dir = './results_dir/'
        modelName = tb.construct_name(p, "HourGlass")
        numHGScales = 4  # How many times to downsample inside each HourGlass
        net = nt.Net_SHG(p['numHG'], numHGScales, p['Block'], 128, 1)
        epoch = 130
        net.load_state_dict(
            torch.load(os.path.join(
                model_dir,
                os.path.join(model_dir,
                             modelName + '_epoch-' + str(epoch) + '.pth')),
                       map_location=lambda storage, loc: storage))

        if gpu_id >= 0:
            net = net.cuda()

        output = net.forward(inputs)
        pred = np.squeeze(
            np.transpose(
                output[len(output) - 1].cpu().data.numpy()[0, :, :, :],
                (1, 2, 0)))

        mean, median, std = sigma_clipped_stats(pred, sigma=3.0)
        threshold = median + (10.0 * std)
        sources = find_peaks(pred, threshold, box_size=3)

        if visualize_graph_step_by_step:
            fig, axes = plt.subplots(1, 2)
            axes[0].imshow(img.astype(np.uint8))
            mask_graph_skel = skeletonize(mask_graph > 0)
            indxs = np.argwhere(mask_graph_skel == 1)
            axes[0].scatter(indxs[:, 1], indxs[:, 0], color='red', marker='+')

            axes[0].add_patch(
                patches.Rectangle((x_tmp, y_tmp),
                                  patch_size,
                                  patch_size,
                                  fill=False,
                                  color='cyan',
                                  linewidth=5))
            img_crop_array = img[y_tmp:y_tmp + patch_size,
                                 x_tmp:x_tmp + patch_size, :]
            axes[1].imshow(img_crop_array.astype(np.uint8),
                           interpolation='nearest')
            tmp_vector_x = []
            tmp_vector_y = []
            for ii in range(0, len(sources['peak_value'])):
                if sources['peak_value'][ii] > confident_th:
                    tmp_vector_x.append(sources['x_peak'][ii])
                    tmp_vector_y.append(sources['y_peak'][ii])
            axes[1].plot(tmp_vector_x,
                         tmp_vector_y,
                         ls='none',
                         color='red',
                         marker='+',
                         ms=25,
                         markeredgewidth=10)
            axes[1].plot(32,
                         32,
                         ls='none',
                         color='cyan',
                         marker='+',
                         ms=25,
                         markeredgewidth=10)
            plt.show()

        if visualize_evolution:

            if iter < 20 or (iter < 200 and iter % 20 == 0) or iter % 100 == 0:

                plt.figure(figsize=(12, 12), dpi=60)
                plt.imshow(img.astype(np.uint8))
                mask_graph_skeleton = skeletonize(mask_graph > 0)
                indxs_skel = np.argwhere(mask_graph_skeleton == 1)
                plt.scatter(indxs_skel[:, 1],
                            indxs_skel[:, 0],
                            color='red',
                            marker='+')
                plt.axis('off')
                plt.savefig(directory + 'iter_%05d.png' % iter,
                            bbox_inches='tight')
                plt.close()

        indxs = np.argsort(sources['peak_value'])
        for ii in range(0, len(indxs)):
            idx = indxs[len(indxs) - 1 - ii]
            if sources['peak_value'][idx] > confident_th:
                confident_connections['x_peak'].append(sources['x_peak'][idx])
                confident_connections['y_peak'].append(sources['y_peak'][idx])
                confident_connections['peak_value'].append(
                    sources['peak_value'][idx])
            else:
                break

        confident_connections = Table([
            confident_connections['x_peak'], confident_connections['y_peak'],
            confident_connections['peak_value']
        ],
                                      names=('x_peak', 'y_peak', 'peak_value'))

    return confident_connections
コード例 #19
0
ファイル: quad_fa.py プロジェクト: jwhsueh/SHARP_jw
boxsize = 20

for i in range(n_real):
	realID = i+n_start

	#image_fits=fits.open(imgpath+'image_fsub1_'+str(realID)+'.fits')
	image_fits=fits.open(imgpath+'image_sie_'+str(realID)+'.fits')
	image=image_fits[0].data

	## smoothing
	kernel=convolution.Gaussian2DKernel(beam)
	image=convolution.convolve(image,kernel)

	mean,median,std=sigma_clipped_stats(image,sigma=3.0)
	threshold=median+(20*std)
	peaks=find_peaks(image,threshold,box_size=5) # w/ convolve 

	#print peaks

	x_pix,y_pix = np.array(peaks['x_peak']),np.array(peaks['y_peak'])
	f_img = np.array(peaks['peak_value'])

	if (len(x_pix)==4):
		x_img,y_img = x_pix-x_pix[img_idx],y_pix-y_pix[img_idx]
		

		## ------ Gaussian fit	
		## img cut
		gaus2d = models.Gaussian2D(amplitude=1., x_mean=0, y_mean=0., x_stddev=1.,y_stddev=1.)
		fit_g = fitting.LevMarLSQFitter()
		
コード例 #20
0
ファイル: nsc_dwarfs_hpix.py プロジェクト: dnidever/nscdwarfs
                                                           bcatall['dec'],
                                                           fwhm_small=small_k,
                                                           fwhm_big=big_k)
    except:
        rootLogger.info('Problems with dwarf filter')
        # Create done file
        f = open(donefile, 'w')
        f.write(host)
        f.close()
        sys.exit()

    # find peaks
    small_k = 2.0
    mn, med, std = stats.sigma_clipped_stats(clipped, sigma=3.0, iters=5)
    nsigma = 2.5
    tbl = find_peaks(clipped, med + nsigma, box_size=small_k * 2)
    rootLogger.info(str(len(tbl)) + ' peaks found')
    if len(tbl) == 0:
        # Create done file
        f = open(donefile, 'w')
        f.write(host)
        f.close()
        sys.exit()

    # add ra & dec positions of peaks found
    a, b = extent[:2]
    xvec = np.arange(a, b, (b - a) / clipped.shape[1])
    a, b = extent[2:]
    yvec = np.arange(a, b, (b - a) / clipped.shape[0])

    # Not enough pixels in density image
コード例 #21
0
def A3d(data, **kwargs):
    which = kwargs.get('chosen_by', 'prox')
    data_sum = np.nansum(data, axis=0)
    Npixels = kwargs.get('N_pixels_max',
                         data_sum.shape[0] * data_sum.shape[1] / 2.)
    sigma = kwargs.get('sigma', 2.)
    data_sum_noneg = np.copy(data_sum)
    data_sum_noneg[data_sum_noneg <= 0.] = 0.0
    bkg2D = Background2D(data_sum_noneg,
                         data_sum_noneg.shape,
                         filter_size=(2, 2),
                         sigma_clip=SigmaClip(sigma=3., iters=2),
                         bkg_estimator=MedianBackground())
    bkg = np.unique(bkg2D.background)
    tbl = find_peaks(data_sum, threshold=bkg)
    x0 = int(data[int(data.shape[0] * 2 / 3)].shape[1] / 2.)
    y0 = int(data[int(data.shape[0] * 2 / 3)].shape[0] / 2.)
    dist = np.array([])
    for i in range(len(tbl['x_peak'])):
        dist = np.append(
            dist,
            np.sqrt((x0 - tbl['x_peak'][i])**2 + (y0 - tbl['y_peak'][i])**2))
    if which == 'prox':
        chosen = np.where(dist == min(dist))[0]
        if len(chosen) > 1:
            for i in chosen:
                vec_chos = np.array(
                    [tbl['peak_value'][i], tbl['peak_value'][i]])
            chosen = np.where(tbl['peak_value'] == max(vec_chos))[0]
    elif which == 'mxpeak':
        chosen = np.where(tbl['peak_value'] == max(tbl['peak_value']))[0]
    cx = int(tbl['x_peak'][chosen])
    cy = int(tbl['y_peak'][chosen])
    aperture = np.zeros_like(data_sum)
    aperture[cy, cx] = 1.
    c = 1
    x = cy
    y = cx
    for n in count(start=1, step=1):
        if n > data_sum.shape[0] and n > data_sum.shape[1]: break
        if n % 2:
            x += 1
            try:
                if data_sum[x, y] >= sigma * bkg:
                    try:
                        A0 = aperture[x + 1, y] == 1
                    except:
                        A0 = False
                    try:
                        A1 = aperture[x - 1, y] == 1
                    except:
                        A1 = False
                    try:
                        A2 = aperture[x, y + 1] == 1
                    except:
                        A2 = False
                    try:
                        A3 = aperture[x, y - 1] == 1
                    except:
                        A3 = False
                    if A0 or A1 or A2 or A3:
                        aperture[x, y] = 1
                        c += 1
                        if c >= Npixels: break
            except:
                pass
            for i in range(n):
                y -= 1
                try:
                    if data_sum[x, y] >= sigma * bkg:
                        try:
                            B0 = aperture[x + 1, y] == 1
                        except:
                            B0 = False
                        try:
                            B1 = aperture[x - 1, y] == 1
                        except:
                            B1 = False
                        try:
                            B2 = aperture[x, y + 1] == 1
                        except:
                            B2 = False
                        try:
                            B3 = aperture[x, y - 1] == 1
                        except:
                            B3 = False
                        if B0 or B1 or B2 or B3:
                            aperture[x, y] = 1
                            c += 1
                            if c >= Npixels: break
                except:
                    pass
            for i in range(n):
                x -= 1
                try:
                    if data_sum[x, y] >= sigma * bkg:
                        try:
                            C0 = aperture[x + 1, y] == 1
                        except:
                            C0 = False
                        try:
                            C1 = aperture[x - 1, y] == 1
                        except:
                            C1 = False
                        try:
                            C2 = aperture[x, y + 1] == 1
                        except:
                            C2 = False
                        try:
                            C3 = aperture[x, y - 1] == 1
                        except:
                            C3 = False
                        if C0 or C1 or C2 or C3:
                            aperture[x, y] = 1
                            c += 1
                            if c >= Npixels: break
                except:
                    pass
        else:
            x -= 1
            try:
                if data_sum[x, y] >= sigma * bkg:
                    try:
                        D0 = aperture[x + 1, y] == 1
                    except:
                        D0 = False
                    try:
                        D1 = aperture[x - 1, y] == 1
                    except:
                        D1 = False
                    try:
                        D2 = aperture[x, y + 1] == 1
                    except:
                        D2 = False
                    try:
                        D3 = aperture[x, y - 1] == 1
                    except:
                        D3 = False
                    if D0 or D1 or D2 or D3:
                        aperture[x, y] = 1
                        c += 1
                        if c >= Npixels: break
            except:
                pass
            for i in range(n):
                y += 1
                try:
                    if data_sum[x, y] >= sigma * bkg:
                        try:
                            E0 = aperture[x + 1, y] == 1
                        except:
                            E0 = False
                        try:
                            E1 = aperture[x - 1, y] == 1
                        except:
                            E1 = False
                        try:
                            E2 = aperture[x, y + 1] == 1
                        except:
                            E2 = False
                        try:
                            E3 = aperture[x, y - 1] == 1
                        except:
                            E3 = False
                        if E0 or E1 or E2 or E3:
                            aperture[x, y] = 1
                            c += 1
                            if c >= Npixels: break
                except:
                    pass
            for i in range(n):
                x += 1
                try:
                    if data_sum[x, y] >= sigma * bkg:
                        try:
                            F0 = aperture[x + 1, y] == 1
                        except:
                            F0 = False
                        try:
                            F1 = aperture[x - 1, y] == 1
                        except:
                            F1 = False
                        try:
                            F2 = aperture[x, y + 1] == 1
                        except:
                            F2 = False
                        try:
                            F3 = aperture[x, y - 1] == 1
                        except:
                            F3 = False
                        if F0 or F1 or F2 or F3:
                            aperture[x, y] = 1
                            c += 1
                            if c >= Npixels: break
                except:
                    pass
    if which == 'prox':
        no_chosen = np.where(dist != min(dist))[0]
    elif which == 'mxpeak':
        no_chosen = np.where(tbl['peak_value'] != max(tbl['peak_value']))[0]

    for k in no_chosen:
        ncy = int(tbl['x_peak'][k])
        ncx = int(tbl['y_peak'][k])

        aperture[ncx, ncy] = 0
        try:
            aperture[ncx + 1, ncy] = 0
        except:
            pass
        try:
            aperture[ncx, ncy + 1] = 0
        except:
            pass
        try:
            aperture[ncx - 1, ncy] = 0
        except:
            pass
        try:
            aperture[ncx, ncy - 1] = 0
        except:
            pass
        try:
            aperture[ncx + 1, ncy + 1] = 0
        except:
            pass
        try:
            aperture[ncx + 1, ncy - 1] = 0
        except:
            pass
        try:
            aperture[ncx - 1, ncy - 1] = 0
        except:
            pass
        try:
            aperture[ncx - 1, ncy - 1] = 0
        except:
            pass
        try:
            aperture[ncx - 1, ncy + 1] = 0
        except:
            pass
        try:
            aperture[ncx - 1, ncy - 1] = 0
        except:
            pass

    return aperture, c
コード例 #22
0
def find_pinholes_regular(fname,
                          sname,
                          fdarkff,
                          fdark,
                          fff,
                          files,
                          ref_shape,
                          size,
                          threshold,
                          fwhm,
                          fitshape,
                          range_psf,
                          sigma=2.,
                          oversampling=4,
                          maxiters=3):
    """Finds and fits regullary spread pinhole positions with a ePSF in a FITS image.
    
    Parameters
    ----------
    fname : str
        Folder name of the input fits files.
    sname : str
        Folder name of the returned found and matched pinhole positions (txt files) 
    fdarkff : string
        Location of the dark images for the flat field images.
    fdark : string
        Location of the dark images for the raw images.
    fff : string
        Location of the flat field images.
    files : (1, 2)-shaped int array
        File range to create a median image
    ref_shape : (1,2)-shaped array
        Number of reference stars in x and y direction [x, y].
    size : int
        Rectangular size of the ePSF. Size must be an odd number.
    threshold : float
        The absolute image value above which to select sources.
    fwhm : float
        The full-width half-maximum (FWHM) of the major axis of the Gaussian kernel in units of pixels.
    fitshape : int or length-2 array-like
        Rectangular shape around the center of a star which will be used to collect the data to do the fitting. 
        Can be an integer to be the same along both axes. E.g., 5 is the same as (5, 5), which means to fit only at the following 
        relative pixel positions: [-2, -1, 0, 1, 2]. Each element of fitshape must be an odd number.
    range_psf : (1, 4)-shaped int array
        Position range to compute epsf [xmin,xmax,ymin,ymax]
    sigma : float
        Number of standard deviations used to perform sigma clip with a astropy.stats.SigmaClip object.
    oversampling : int or tuple of two int
        The oversampling factor(s) of the ePSF relative to the input stars along the x and y axes. 
        The oversampling can either be a single float or a tuple of two floats of the form (x_oversamp, y_oversamp). 
        If oversampling is a scalar then the oversampling will be the same for both the x and y axes.
    maxiters : int
        The maximum number of iterations to perform.
    Returns
    -------
    positions_sort : (N,2)-shaped array
        Found and matched positions of the pinholes.
    ref_positions : (N,2)-shaped array
        Matched reference grid positions.
    """

    #Load the sample of fits images
    entries = os.listdir(fname)

    data_col = np.array([fits.getdata(fname + '/' + entries[files[0]], ext=0)])
    for k in range(files[0], files[1]):
        data_col = np.append(data_col,
                             [fits.getdata(fname + '/' + entries[k], ext=0)],
                             axis=0)

    #Data reduction: Darc current + Flatfield + bias
    data_col = data_correction(data_col, fdarkff, fdark, fff)

    #Claculate median image
    data_full = np.median(data_col, axis=0)

    data = data_full[range_psf[2]:range_psf[3], range_psf[0]:range_psf[1]]

    #Find peaks in data
    peaks_tbl = find_peaks(data, threshold=threshold)
    peaks_tbl['peak_value'].info.format = '%.8g'

    #Load data around found peaks
    hsize = (size - 1) / 2
    x = peaks_tbl['x_peak']
    y = peaks_tbl['y_peak']
    mask = ((x > hsize) & (x < (data.shape[1] - 1 - hsize)) & (y > hsize) &
            (y < (data.shape[0] - 1 - hsize)))

    stars_tbl = Table()
    stars_tbl['x'] = x[mask]
    stars_tbl['y'] = y[mask]

    #Calculate mean, median, std
    mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=sigma)
    data = data - median_val

    #Find pinholes and create ePSF
    nddata = NDData(data=data)

    stars = extract_stars(nddata, stars_tbl, size=size)

    epsf_builder = EPSFBuilder(oversampling=oversampling,
                               maxiters=maxiters,
                               progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)

    #Use ePSF to find precise locations of pinholes
    daofind = DAOPhotPSFPhotometry(crit_separation=30,
                                   threshold=threshold,
                                   fwhm=fwhm,
                                   psf_model=epsf,
                                   fitshape=fitshape,
                                   aperture_radius=12,
                                   niters=1)

    #Get positions
    sources = daofind(data_full)

    for col in sources.colnames:
        sources[col].info.format = '%.8g'

    pos_full = np.transpose((sources['x_fit'], sources['y_fit']))

    #Plot found pinholes
    apertures = CircularAperture(pos_full, r=10)

    norm = ImageNormalize(stretch=SqrtStretch())

    fig, ax = plt.subplots()
    ax.set_title('Pinhole Positions')
    ax.set(xlabel='x [pixel]', ylabel='y [pixel]')
    ax.imshow(data_full, cmap='Greys', origin='lower', norm=norm)
    apertures.plot(color='blue', lw=1.5, alpha=0.5)
    ax.legend(['#pinholes = ' + str(len(pos_full[:, 0]))],
              loc='lower left',
              prop={'size': 12})
    plt.show()

    #Find central position
    xcent = (np.max(pos_full[:, 0]) + np.min(pos_full[:, 0])) / 2
    ycent = (np.max(pos_full[:, 1]) + np.min(pos_full[:, 1])) / 2

    #Find positions at the edges to set base positions for linear transformatio to match pinholes with reference grid
    distance = (pos_full[:, 0] - xcent)**2 + (pos_full[:, 1] - ycent)**2
    pins = len(distance)
    sort_distance = np.partition(distance,
                                 (pins - 4, pins - 3, pins - 2, pins - 1))
    maxpos = pos_full[distance == sort_distance[pins - 1]]
    maxpos = np.append(maxpos,
                       pos_full[distance == sort_distance[pins - 2]],
                       axis=0)
    maxpos = np.append(maxpos,
                       pos_full[distance == sort_distance[pins - 3]],
                       axis=0)
    maxpos = np.append(maxpos,
                       pos_full[distance == sort_distance[pins - 4]],
                       axis=0)

    b01 = maxpos[maxpos[:, 1] < ycent]
    b23 = maxpos[maxpos[:, 1] > ycent]

    posbase = np.array(b01[b01[:, 0] < xcent])

    posbase = np.append(posbase, b01[b01[:, 0] > xcent], axis=0)
    posbase = np.append(posbase, b23[b23[:, 0] < xcent], axis=0)

    print(posbase)

    #Sort positions by matching with reference grid
    positions_sort, ref_positions = sort_positions_regular(
        pos_full, posbase, ref_shape)

    text = np.array([
        positions_sort[:, 0], positions_sort[:, 1], ref_positions[:, 0],
        ref_positions[:, 1]
    ])
    text_trans = np.zeros((len(positions_sort[:, 0]), 4))

    #Transpose text matrix
    for k in range(0, 4):
        for l in range(0, len(positions_sort[:, 0])):
            text_trans[l][k] = text[k][l]

    #Save data as txt file
    np.savetxt(
        sname + '.txt',
        text_trans,
        fmt='%1.9E',
        delimiter='\t',
        header=
        ' x-measured         y-measured         x-reference          y-reference',
        comments='')

    return positions_sort, ref_positions
コード例 #23
0
def get_sources(detection_frame,
                mask=False,
                sigma=5.0,
                mode='DAO',
                fwhm=2.5,
                threshold=None,
                npix=4,
                return_segm_image=False):
    """
    Main method used to identify sources in a detection frame and estimate their position.
    Different modes are available, accesible through the ``mode`` keyword :

    * DAO : uses the :class:`photutils:photutils.DAOStarFinder` method, adapted from DAOPHOT.
    * IRAF : uses the :class:`photutils:photutils.IRAFStarFinder` method, adapted from IRAF.
    * PEAK : uses the :func:`photutils:photutils.find_peaks` method, looking for local peaks above a given threshold.
    * ORB : uses the :func:`ORB:orb.utils.astrometry.detect_stars` method, fitting stars in the frame
    * SEGM : uses the :func:`photutils:photutils.detect_sources` method, segmenting the image.

    The most reliable is SEGM.

    Parameters
    ----------
    detection_frame : 2D :class:`~numpy:numpy.ndarray`
        Map on which the sources should be visible.
    mask : 2D :class:`~numpy:numpy.ndarray` or bool,  Default = False
        (Optional) If passed, only sources inside the mask are detected.
    sigma : float
        (Optional) Signal to Noise of the detections we want to keep. Only used if threshold is None. In this case, the signal and the noise are computed with sigma-clipping on the deteciton frame. Default = 5
    threshold : float or 2D :class:`~numpy:numpy.ndarray` of floats
        (Optional) Threshold above which we consider having a detection. Default is None
    mode : str
        (Optional) One of the detection mode listed above. Dafault = 'DAO'
    fwhm : float
        (Optional) Expected FWHM of the sources. Default : 2.5
    npix : int
        (Optional) Only used by the 'SEGM' method : minimum number of connected pixels with flux above the threshold to make a credible source. Default = 4
    return_segm_image : bool, Default = False
        (Optional) Only used in the 'SEGM' mode. If True, returns the obtained segmentation image.

    Returns
    -------
    sources : :class:`~pandas:pandas.DataFrame`
        A DataFrame where each row represents a detection, with at least the positions named as ``xcentroid``, ``ycentroid`` (WARNING : using astropy convention). The other columns depend on the mode used.

    """
    if mask is False:
        mask = np.ones_like(detection_frame)
    if threshold is None:
        mean, median, std = sigma_clipped_stats(
            detection_frame, sigma=3.0, iters=5,
            mask=~mask.astype(bool))  #On masque la region hors de l'anneau
        threshold = median + sigma * std
    #On detecte sur toute la frame, mais on garde que ce qui est effectivement dans l'anneau
    if mode == 'DAO':
        daofind = DAOStarFinder(fwhm=fwhm, threshold=threshold)
        sources = daofind(detection_frame)
    elif mode == 'IRAF':
        irafind = IRAFStarFinder(threshold=threshold, fwhm=fwhm)
        sources = irafind(detection_frame)
    elif mode == 'PEAK':
        sources = find_peaks(detection_frame, threshold=threshold)
        sources.rename_column('x_peak', 'xcentroid')
        sources.rename_column('y_peak', 'ycentroid')
    elif mode == 'ORB':
        astro = Astrometry(detection_frame, instrument='sitelle')
        path, fwhm_arc = astro.detect_stars(min_star_number=5000,
                                            r_max_coeff=1.,
                                            filter_image=False)
        star_list = astro.load_star_list(path)
        sources = Table([star_list[:, 0], star_list[:, 1]],
                        names=('ycentroid', 'xcentroid'))
    elif mode == 'SEGM':
        logging.info('Detecting')
        segm = detect_sources(detection_frame, threshold, npixels=npix)
        deblend = True
        labels = segm.labels
        if deblend:
            # while labels.shape != (0,):
            #     try:
            #         #logging.info('Deblending')
            #         # fwhm = 3.
            #         # s = fwhm / (2.0 * np.sqrt(2.0 * np.log(2.0)))
            #         # kernel = Gaussian2DKernel(s, x_size = 3, y_size = 3)
            #         # kernel = Box2DKernel(3, mode='integrate')
            #         deblended = deblend_sources(detection_frame, segm, npixels=npix, labels=labels)#, filter_kernel=kernel)
            #         success = True
            #     except ValueError as e:
            #         #warnings.warn('Deblend was not possible.\n %s'%e)
            #         source_id = int(e.args[0].split('"')[1])
            #         id = np.argwhere(labels == source_id)[0,0]
            #         labels = np.concatenate((labels[:id], labels[id+1:]))
            #         success = False
            #     if success is True:
            #         break
            try:
                logging.info('Deblending')
                # fwhm = 3.
                # s = fwhm / (2.0 * np.sqrt(2.0 * np.log(2.0)))
                # kernel = Gaussian2DKernel(s, x_size = 3, y_size = 3)
                # kernel = Box2DKernel(3, mode='integrate')
                deblended = deblend_sources(
                    detection_frame, segm,
                    npixels=npix)  #, filter_kernel=kernel)
            except ValueError as e:
                warnings.warn('Deblend was not possible.\n %s' % e)
                deblended = segm
            logging.info('Retieving properties')
            sources = source_properties(detection_frame, deblended).to_table()
        else:
            deblended = segm
            logging.info('Retieving properties')
            sources = source_properties(detection_frame, deblended).to_table()
        logging.info('Filtering Quantity columns')
        for col in sources.colnames:
            if type(sources[col]) is Quantity:
                sources[col] = sources[col].value
    sources = mask_sources(sources, mask)  # On filtre
    df = sources.to_pandas()
    if return_segm_image:
        return deblended.array, df
    else:
        return df
コード例 #24
0
def center(s_post, gal_cat, image_wcs, search_box_width):

    # centroid of the gal_cat
    # search box width = width of the search box around the signpost in arcmin. (5' x 5' BOX)
    # image_wcs = WCS of the image where objects are detected.

    # Search box
    PIX_SCALE = 0.000239631 * 3600  # arcsec/pixel ; 0.000239631 degrees/pixel for IRAC-ch2 (SDWFS Survey)
    wid_arcsec = search_box_width * 60  # arcsec
    wid_pixels = int(wid_arcsec / PIX_SCALE)  # in pixels

    init_x, init_y = image_wcs.wcs_world2pix(
        s_post[0], s_post[1], 0)  # initial center estimate to draw box
    xmin, xmax = int(init_x - (wid_pixels / 2.0)), int(
        init_x + (wid_pixels / 2.0))  # boundaries
    ymin, ymax = int(init_y - (wid_pixels / 2.0)), int(
        init_y + (wid_pixels / 2.0))  # boundaries

    # Galaxy catalog in image pixel coordinates
    galpix = convpix(gal_cat, image_wcs)  # objects in pixel coordinates
    x_pixels, y_pixels = galpix[:, 0], galpix[:, 1]

    # 2D-histogram on the grid in the search box
    binstep = 5.0  # pixels, grid resolution
    wght = np.asarray(gal_cat['ch2'])  # weights: irac ch2[4.5 um] flux, uJy
    xedges, yedges = np.arange(xmin, xmax + binstep,
                               binstep), np.arange(ymin, ymax + binstep,
                                                   binstep)
    h, xedges, yedges = np.histogram2d(x_pixels,
                                       y_pixels,
                                       bins=(xedges, yedges),
                                       weights=wght)

    # Smoothing the 2D histogram of the galaxies in the search box
    size = int(round(
        (np.shape(h)[0]) / 4.0))  # kernel size : at least ~ 4 times smaller
    stdev_ker = size / 4.0  # size = 4-sigma
    gauss_kernel = Gaussian2DKernel(x_stddev=stdev_ker,
                                    y_stddev=stdev_ker,
                                    x_size=size,
                                    y_size=size)  # kernel
    smoothed_data_gauss = convolve_fft(h.T,
                                       gauss_kernel)  # convolution/smoothing

    # Peak of the histogram
    x_org, y_org = (xedges[0] + xedges[1]) / 2.0, (
        yedges[0] + yedges[1]) / 2.0  # origin of bin-coordinate
    fp_m = find_peaks(smoothed_data_gauss, 0,
                      npeaks=1)  # 1 peak with max value
    xbin_peak, ybin_peak = fp_m['x_peak'][0], fp_m['y_peak'][
        0]  # x, y positions of the peak
    x_p, y_p = (x_org + xbin_peak * binstep), (
        y_org + ybin_peak * binstep)  # conversion to pixel coordinates
    peak_fk5 = image_wcs.wcs_pix2world(x_p, y_p, 0)

    # Centroid of the image moments
    cen_pos = centroid_com(smoothed_data_gauss)
    cenx, ceny = (x_org + cen_pos[0] * binstep), (y_org + cen_pos[1] * binstep)
    centre = image_wcs.wcs_pix2world(cenx, ceny, 0)

    return centre, peak_fk5
コード例 #25
0
def centroid_fit(x,
                 y,
                 data,
                 reference=None,
                 rssframe=None,
                 galaxyid=None,
                 microns=True,
                 circular=True):  #** reference,rssframe,galaxyid added
    """Fit the x,y,data values, regardless of what they are and return some useful stuff. Data is an array of spectra"""

    working_dir = rssframe.strip('sci.fits')

    # Smooth the data spectrally to get rid of cosmics
    data_smooth = np.zeros_like(data)
    for q in range(np.shape(data)[0]):
        # data_smooth[q,:]=utils.smooth(data[q,:], 11) #default hanning smooth
        data_smooth[q, :] = median_filter(data[q, :], 15)

    # Now sum the data over a large range to get broad band "image"
    data_sum = np.nansum(data_smooth[:, 200:1800], axis=1)
    data_med = np.nanmedian(data_smooth[:, 200:1800], axis=1)

    #** New masking method starts ————————————————————————————————————————————————
    from scipy.ndimage.filters import gaussian_filter
    from astropy.stats import sigma_clipped_stats
    from photutils import find_peaks

    # Parameter initializations
    x0, y0 = x - np.min(x), y - np.min(y)  # image x,y
    xc, yc = (np.max(x) - np.min(x)) / 2. + np.min(
        x), (np.max(y) - np.min(y)) / 2. + np.min(y)  # central pixel
    width = 85.  # default gaussian filtering size
    checkind = 'None'  # for check list
    img = np.zeros((np.max(x0) + 1, np.max(y0) + 1))  # rss image
    x_good, y_good, data_sum_good = x, y, data_sum  # good fibres to use
    tx, ty, trad = xc, yc, 1000  #target x,y centre and masking radius (1000 means no masking)
    if not os.path.exists(
            working_dir + '_centroid_fit_reference/'
    ):  # path to save centre of reference frame & checklist
        os.makedirs(working_dir + '_centroid_fit_reference')

# Load fibre flux to image
    for i in range(len(x0)):
        img[x0[i], y0[i]] = data_sum[i]

# Gaussian filtering
    img1 = gaussian_filter(
        img, sigma=(width, width), order=0,
        mode='constant')  # width = diameter of a core in degrees/microns

    # Find peaks
    mean, median, std = sigma_clipped_stats(img1, sigma=3.0)
    threshold = median + std
    tbl = find_peaks(img1, threshold, box_size=105)

    # Case1: If no peaks are found, masking is not applied. Actually I don't find any.
    if tbl == None:
        checkind = 'nopeak'

    elif (len(tbl) < 1):
        checkind = 'nopeak'

# Case2: A single peak is found
    elif (len(tbl) == 1):
        checkind = 'single'
        dist = (tbl['y_peak'] + np.min(x) -
                xc)**2 + (tbl['x_peak'] + np.min(y) -
                          yc)**2  # separation between a peak and centre
        if (dist < (310)**2):  # Single peak near the centre
            tx, ty, trad = tbl['y_peak'] + np.min(x), tbl['x_peak'] + np.min(
                y), 105 * 2  # y_peak is x. yes. it's right.
        else:  # When a peak is near the edge. High possibility that our target is not detected due to low brightness
            for k in range(
                    1, 100
            ):  # repeat until it finds multiple peaks with reduced filtering box
                width = width * 0.98
                img3 = gaussian_filter(
                    img,
                    sigma=(width, width),
                    order=0,
                    mode='constant',
                    cval=np.min(
                        img))  # width = diameter of a core in degrees/microns
                mean, median, std = sigma_clipped_stats(img3, sigma=3.0)
                threshold = median + std * 0.1
                tbl = find_peaks(img3, threshold, box_size=width)  #find peaks
                if tbl == None:
                    continue
                if (
                        len(tbl) == 1
                ):  # only a single peak is found until maximum iteration (=100)
                    tx, ty, trad = tbl['y_peak'] + np.min(
                        x), tbl['x_peak'] + np.min(
                            y
                        ), 1000  # fibre masking is not applied (trad = 1000)
                    checkind = 'single_edge'

                if (len(tbl) > 1
                    ):  # multiple peaks are found, go to Case3: multiple peaks
                    checkind = 'multi_faint'
                    break

    # Case3: When there are multiple peaks
    elif (len(tbl) > 1):
        if checkind is not 'multi_faint':
            checkind = 'multi'
        xx, yy = tbl['y_peak'] + np.min(x), tbl['x_peak'] + np.min(
            y)  # y_peak is x. yes. it's right.

        # The assumption is that dithering is relatively small, and our target is near the target centre from the (1st) reference frame
        if reference is not None and rssframe != reference and os.path.exists(
                working_dir + '_centroid_fit_reference/centre_' + galaxyid +
                '_ref.txt') != False:
            fileref = open(
                working_dir + '_centroid_fit_reference/centre_' + galaxyid +
                '_ref.txt', 'r')
            rx, ry = np.loadtxt(fileref, usecols=(0, 1))
            coff = (xx - rx)**2 + (
                yy - ry
            )**2  # If not reference frame, the closest object from the reference
        else:
            coff = (xx - xc)**2 + (
                yy - yc
            )**2  # If reference frame, the closest object from the centre

        tx, ty = xx[np.where(coff == np.min(coff))[0][0]], yy[np.where(
            coff == np.min(coff))[0][0]]  # target centre
        xx, yy = xx[np.where(xx * yy != tx * ty)], yy[np.where(
            xx * yy != tx * ty)]
        osub = np.where(
            ((xx - tx)**2 + (yy - ty)**2 - np.min((xx - tx)**2 + (yy - ty)**2))
            < 0.1)  # the 2nd closest object
        trad = np.sqrt(
            (xx[osub] - tx)**2 + (yy[osub] - ty)**2
        ) / 2.  # masking radius = (a separation btw the target and 2nd closest object)/2.
        if (trad > 105 * 2):  # when masking radius is too big
            trad = 105 * 2
        if (trad < 105 * 1.5):  # when masking radius is too small
            trad = 105 * 1.5

    # Use fibres only within masking radius
    gsub = np.where(np.sqrt((x - tx)**2 + (y - ty)**2) < trad)
    if len(gsub) < 5:
        tdist = np.sqrt((x - tx)**2 + (y - ty)**2)
        inds = np.argsort(tdist)
        gsub = inds[:5]
    x_good, y_good, data_sum_good = x[gsub], y[gsub], data_sum[gsub]

    # Save the target centre of reference frame
    if reference is not None and rssframe == reference:
        ref = open(
            working_dir + '_centroid_fit_reference/centre_' + galaxyid +
            '_ref.txt', 'w')
        try:
            ref.write(str(tx.data[0]) + ' ' + str(ty.data[0]))
        except:
            ref.write(str(tx) + ' ' + str(ty))
        ref.close()


#** New masking method ends ————————————————————————————————————————————————

# Use the crude distributed centre-of-mass to get the rough centre of mass
    com = utils.comxyz(x_good, y_good,
                       data_sum_good)  #**use good data within masking

    # Peak height guess could be closest fibre to com position.
    dist = (x - com[0])**2 + (
        y - com[1])**2  # distance between com and all fibres.

    # First guess at width of Gaussian - diameter of a core in degrees/microns.
    if microns == True:
        sigx = 105.0
        core_diam = 105.0
    else:
        sigx = 4.44e-4
        core_diam = 4.44e-4

    # First guess Gaussian parameters.
    if circular == True:
        p0 = [
            data_sum[np.sum(np.where(dist == np.min(dist)))], com[0], com[1],
            sigx, 0.0
        ]

        #print "Guess Parameters:", p0 #here

    elif circular == False:
        p0 = [
            data_sum[np.sum(np.where(dist == np.min(dist)))], com[0], com[1],
            sigx, sigx, 45.0, 0.0
        ]
        #print "Guess Parameters:", p0

    # Fit two circular 2D Gaussians.
    gf = fitting.TwoDGaussFitter(
        p0, x_good, y_good, data_sum_good)  #** use good data within masking
    amplitude_mic, xout_mic, yout_mic, sig_mic, bias_mic = gf.p

    fitting.fibre_integrator(gf, core_diam)  # fibre integrator
    gf.fit()  #### gaussian fitting
    # Make a linear grid to reconstruct the fitted Gaussian over.
    x_0 = np.min(x)
    y_0 = np.min(y)

    # dx should be 1/10th the fibre diameter (in whatever units)
    dx = sigx / 10.0

    xlin = x_0 + np.arange(100) * dx  # x axis
    ylin = y_0 + np.arange(100) * dx  # y axis

    # Reconstruct the model
    model = np.zeros((len(xlin), len(ylin)))
    # Reconstructing the Gaussian over the proper grid.
    for ii in range(len(xlin)):
        xval = xlin[ii]
        for jj in range(len(ylin)):
            yval = ylin[jj]
            model[ii, jj] = gf.fitfunc(gf.p, xval, yval)

    amplitude_mic, xout_mic, yout_mic, sig_mic, bias_mic = gf.p
    #  print('gx,gy final',xout_mic,yout_mic) #test

    return gf.p, data_sum, xlin, ylin, model
コード例 #26
0
def find_peaks(imagename, ext=1, outname='default', threshold=40.0,
               box_size=24.0, border_width=5.0):
#, footprint=footprint, mask=mask, 
#               border_width=border_width, npeaks=npeaks, subpixel=subpixel,
#               error=error, wcs=wcs):
    """ Extends `photutils.find_peaks`.

    Parameters
    ----------
    imagename : string
        Name of the FITS file.
    ext : int
        The extension of the FITS file to read.
    outname : string
        Name of the output coordinate file. If "default", becomes,
        "<imagename>.coo."
    threshold : float
        Default of "40.0."
    box_size : float
        Default of "24.0."
    border_width : float
        Default of "5.0."
        
    Returns
    -------
    coo_tab : astropy.Table
        Table containing the coordinates.

    References
    ----------  
    http://photutils.readthedocs.org/en/latest/api/photutils.detection.find_peaks.html#photutils.detection.find_peaks
    
    """
    # Fetch function metadata.
    current_params = locals()
    func_name = sys._getframe().f_code.co_name 

    # Read in FITS file.    
    hdulist = fits.open(imagename)
    data = hdulist[ext].data
    hdulist.close()

    coo_tab = photutils.find_peaks(data=data, \
                                   threshold=threshold, \
                                   box_size=box_size, \
                                   border_width=border_width)
                                   #footprint=footprint, \
                                   #mask=mask, \
                                   #npeaks=npeaks, \
                                   #subpixel=subpixel, \
                                   #error=error, \
                                   #wcs=wcs)

    # Create basename for out file.
    if outname == 'default':
        baseoutname = imagename + '.coo'
    else:
        baseoutname = outname

    # Check whether file already exists. If yes, append number.
    fileoutname = baseoutname
    i=1
    while os.path.isfile(fileoutname):
        fileoutname = baseoutname
        fileoutname += ('.' + str(i))
        i += 1

    write_out_photfile(fileoutname, coo_tab, current_params, func_name)

    return coo_tab
コード例 #27
0
ファイル: brutus_plots.py プロジェクト: fpavogt/brutus
def build_ap_list(data, 
                  start_aps = None,
                  radius = 3.0,
                  automatic_mode=True,
                  interactive_mode=False,
                  lam = None,
                  save_plot = None,
                  ):
    ''' Detects local maxima in an image, and allows the manual inspection of the result.
    
    This function finds local maxima in 2-D array, and then offers the user the ability
    to check/modify the found peaks manually. It associate a fixed circular aperture to 
    each peak (individually modifiable manually), used later to extract the integrated s
    pectra of these areas. 
    
    :Args:
        data: 2-D numpy array
            The raw data from which to find maxima.
        start_aps: list of list [default: None]
            A pre-existing list of apertures.
        radius: int [default: 3.0]
            The default radius of the apertures associated with the local maxima detected
            automatically.
        automatic_mode: bool [default: True]
                        Whether to detect peaks and assign apertures automatically or not.
        interactive_mode: bool [default:True]
                          Whether to inspect the apertures manually or not.
        lam: float [default: False]
             The wavelength of the data, to be added to the plot title if set.
        save_plot: string [default: None]
                   If set, the name of the file used to save the final aperture selection.
        
    :Returns:
        out: 2-D numpy array
             The array of apertures with shape (n,3), where n is the total number of
             apertures (1 per row), defined by (x,y,r) its center and radius (in pixels). 
    :Notes:
        If start_aps are provided, then automatic_mode is False.     
    '''
    
    mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5) 
    
    # I tried to play with daofind at some point, with some limited success. 
    # It is very sensitive to varying background, and was not ideal to find HII regions
    # at the core of galaxies. 
    # The next lines is left here for possible future usage, and remind me that I tried.
    # sources = daofind(data, fwhm=5.0, threshold=1.*std) 

    # For now, these parameters are fixed. Until I see whether playing with them at the 
    # user level can help or not.
    threshold =  (1.0 * std) 
    #myf = np.array([[0,1,1,1,0],
    #                [1,1,1,1,1],
    #                [1,1,1,1,1],
    #                [1,1,1,1,1],
    #                [0,1,1,1,0]])               
    
    if not(start_aps) and automatic_mode:
        # Find the local peak in the data.
        tbl = photutils.find_peaks(data, threshold, box_size=5, subpixel=False)
        xs = tbl['x_peak']
        ys = tbl['y_peak']
        rs = np.zeros(len(xs))+radius
    elif start_aps:
        # User provided us with apertures already. No need to start from scratch
        xs,ys,rs = zip(*start_aps)
    else:
        xs = np.zeros(0)
        ys = np.zeros(0)
        rs = np.ones(0)
        
    # Start the plotting
    plt.close(90)
    fig = plt.figure(90, figsize=(10,8))
    gs = gridspec.GridSpec(1,1, height_ratios=[1], width_ratios=[1])
    gs.update(left=0.05, right=0.95, bottom=0.08, top=0.93, wspace=0.02, hspace=0.03)
  
    ax = plt.subplot(gs[0,0])
 
    # Note, I will here be working in pixel space for simplicity. No need to bother
    # with WCS coefficient for now.
    ax.imshow(data, cmap='Greys', origin='lower', 
              norm=LogNorm(), vmin=1,
              interpolation='nearest')
              
    # Plot the peaks, and make them pickable !        
    line, = ax.plot(xs, ys, ls='none', color='tomato',
             marker='+', ms=10, lw=1.5, picker=4)
             
    # Plot the associated apertures, but DON'T make them pickable.
    # Construct the size of the apertures associated with each peak, using the 
    # default value from the user.
    # TODO: be smarter, and find the best aperture size for each peak. 
    # E.G. 2D gaussian fitting ?
    # Also for now, just use a Circle            
    my_ap_scatter(ax, xs, ys, rs, facecolor='none',edgecolor='tomato')  
    
    # Now, the 1-line black magic command that deals with the user interaction
    if interactive_mode:          
        apmanager = ApManager(fig, ax, line, rs)          

    ax.grid(True)
    ax.set_xlim((-10,np.shape(data)[1]+10))
    ax.set_xlabel(r'x [pixels]', labelpad = 10)
    ax.set_ylim((-10,np.shape(data)[0]+10))
    ax.set_ylabel(r'y [pixels]', labelpad = 10)
    ax.set_title(r'$\lambda$:%.2f' % lam)
    plt.show()

    if interactive_mode:
        print '   Starting interactive mode:'
        print '   Aperture radius: 3 pixels'
        print '      [left-click]      : add an aperture' 
        print '      [right-click]     : remove an aperture'
        print '      [u]+cursor on plot: larger aperture '
        print '      [d]+cursor on plot: smaller aperture '                    
    
        # A little trick to wait for the user to be done before proceeding
        while True:
    
            letter = raw_input('   [m] to save and continue, [zzz] to NOT save and '+
                               'crash (maybe). \n')
            if letter in ['m','zzz']:
                break
            else:
                print '   [%s] unrecognized !' % letter
        
        line.remove()
        plt.savefig(save_plot+np.str(lam)+'.pdf',bbox_inches='tight')            
        plt.close()
        
        if letter =='m':
            return zip(apmanager.xs, apmanager.ys, apmanager.rs)
        else:
            return False
    else:
        line.remove()
        plt.savefig(save_plot+np.str(lam)+'.pdf',bbox_inches='tight')            
        plt.close()
        return zip(xs,ys,rs)
コード例 #28
0
        precision_patch = np.zeros(254, np.float32)
        recall_patch = np.zeros(254, np.float32)

        retina_img = Image.open(gt_base_dir + config_type +
                                '/img_%02d_patch_%02d_img.png' %
                                (idx, idx_patch))
        pred = np.load(results_base_dir + config_type +
                       '/epoch_1800/img_%02d_patch_%02d.npy' %
                       (idx, idx_patch))

        axes[0, config_id].imshow(retina_img)

        mean, median, std = sigma_clipped_stats(pred, sigma=3.0)
        threshold = median + (10.0 * std)
        sources = find_peaks(pred, threshold, box_size=3)
        positions = (sources['x_peak'], sources['y_peak'])

        axes[2, config_id].imshow(pred, interpolation='nearest')
        axes[2, config_id].plot(sources['x_peak'],
                                sources['y_peak'],
                                ls='none',
                                color='red',
                                marker='+',
                                ms=10,
                                lw=1.5)

        gt_img = Image.open(gt_base_dir + config_type +
                            '/img_%02d_patch_%02d_gt.png' % (idx, idx_patch))

        mean_gt, median_gt, std_gt = sigma_clipped_stats(gt_img, sigma=3.0)
コード例 #29
0
from astropy.visualization import simple_norm
from photutils import datasets

hdu = datasets.load_simulated_hst_star_image()
data = hdu.data
from photutils.datasets import make_noise_image
data +=  make_noise_image(data.shape, type='gaussian', mean=10.,
                          stddev=5., random_state=12345)

from photutils import find_peaks
peaks_tbl = find_peaks(data, threshold=500.)

from astropy.table import Table
stars_tbl = Table()
stars_tbl['x'] = peaks_tbl['x_peak']
stars_tbl['y'] = peaks_tbl['y_peak']

from astropy.stats import sigma_clipped_stats
mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=2.,
                                                    iters=None)
data -= median_val

from astropy.nddata import NDData
nddata = NDData(data=data)

from photutils.psf import extract_stars
stars = extract_stars(nddata, stars_tbl, size=25)

import matplotlib.pyplot as plt
nrows = 5
ncols = 5
コード例 #30
0
    def center(iter_no, s_post, member_indices, apr, plotmap):

        mem = gcat[
            member_indices]  # gcat = The galaxy catalog of which the overdensities are being computed.
        """ Member Galaxy Locations in Pixel Space """

        galpix = convpix(mem, w)
        x = galpix[:, 0]
        y = galpix[:, 1]
        """ Configuring Pixel Space"""

        init_x, init_y = w.wcs_world2pix(s_post[0], s_post[1],
                                         0)  # initial estimate to draw box
        PIX_SCALE = imagefile[0].header[
            'CDELT2'] * 3600.0  # "/pixel ---> 0.000239 deg/pixel, IRAC/bootes

        if apr <= 2.0:
            wid_arcsec = 302  # ~5'x5' region around the signpost
        elif 2.0 < apr <= 5.0:
            wid_arcsec = 716.5  # ~12'x12'
        else:
            wid_arcsec = 898  # ~15'x15'

        wid_pixels = int(wid_arcsec /
                         PIX_SCALE)  # ~350x350 pixels/ 830x830 /1040x1040
        xmin, xmax = int(init_x - (wid_pixels / 2.0)), int(
            init_x + (wid_pixels / 2.0))  # boundaries
        ymin, ymax = int(init_y - (wid_pixels / 2.0)), int(init_y +
                                                           (wid_pixels / 2.0))
        """ 2D-Histogram & Smoothing """

        binstep = 5.0  # 10 pixel ~ 8.62 "
        wght = np.asarray(
            gcat[member_indices]['ch2_flux'])  # weights: irac ch2[4.5 um] flux
        xedges = np.arange(xmin, xmax + binstep, binstep)
        yedges = np.arange(ymin, ymax + binstep, binstep)
        h, xedges, yedges = np.histogram2d(x,
                                           y,
                                           bins=(xedges, yedges),
                                           weights=wght)

        xmid = [(xedges[ik] + xedges[ik + 1]) / 2.0
                for ik in range(len(xedges) - 1)]
        ymid = [(yedges[ij] + yedges[ij + 1]) / 2.0
                for ij in range(len(yedges) - 1)]

        size = int(round((np.shape(h)[0]) /
                         4.0))  # kernel size : at least ~ 4 times smaller
        stdev_ker = size / 4.0  # size = 4-sigma
        gauss_kernel = Gaussian2DKernel(x_stddev=stdev_ker,
                                        y_stddev=stdev_ker,
                                        x_size=size,
                                        y_size=size)  # kernel
        smoothed_data_gauss = convolve_fft(
            h.T, gauss_kernel)  # convolution/smoothing
        max_sm_val = max(smoothed_data_gauss.flatten())
        """ Finds Peak """

        x_org, y_org = (xedges[0] + xedges[1]) / 2.0, (
            yedges[0] + yedges[1]) / 2.0  # origin of bin-coordinate
        fp_m = find_peaks(smoothed_data_gauss, 0,
                          npeaks=1)  # 1 peak with max value
        xbin_peak, ybin_peak = fp_m['x_peak'][0], fp_m['y_peak'][
            0]  # x, y positions of the peak
        x_p, y_p = (x_org + xbin_peak * binstep), (
            y_org + ybin_peak * binstep)  # conversion to pixel coordinates
        peak_pix = (x_p, y_p)
        ra_p, dec_p = w.wcs_pix2world(peak_pix[0], peak_pix[1], 0)
        peak_fk5 = (ra_p, dec_p)
        """ Centroid from Image Moments """

        cen_pos = centroid_com(smoothed_data_gauss)
        cenx, ceny = (x_org + cen_pos[0] * binstep), (y_org +
                                                      cen_pos[1] * binstep)
        ra_c, dec_c = w.wcs_pix2world(cenx, ceny, 0)
        centre = (ra_c, dec_c)
        """ Plots """

        if plotmap == 'ON':
            fgm = plt.figure()
            ax = fgm.add_subplot(111)
            # ax_b.imshow(smoothed_data_gauss, extent=[xmin, xmax, ymin, ymax], cmap='jet')
            cntf = ax.contourf(
                xmid,
                ymid,
                smoothed_data_gauss,
                cmap='Blues',
                levels=[lt for lt in np.arange(0.10, max_sm_val + 0.02, 0.02)])
            ax.scatter(x, y, marker='o', color='red', s=5)
            ax.scatter(cenx, ceny, marker='s')
            ax.scatter(x_p, y_p, marker='^')
            ax.scatter(init_x, init_y, marker='o', color='lime')

            fgm.colorbar(cntf)
            fgm.savefig(catdir + 'clump_plots/' + str(iter_no) + '_map.png',
                        format='png',
                        dpi=100)
            plt.close(fgm)

        return centre, peak_fk5
コード例 #31
0
def get_most_confident_outputs(img_id, patch_center_row, patch_center_col,
                               confident_th, gpu_id, connected_same_vessel):

    patch_size = 64
    center = (patch_center_col, patch_center_row)

    x_tmp = int(center[0] - patch_size / 2)
    y_tmp = int(center[1] - patch_size / 2)

    confident_connections = {}
    confident_connections['x_peak'] = []
    confident_connections['y_peak'] = []
    confident_connections['peak_value'] = []

    root_dir = './gt_dbs/DRIVE/'
    img = Image.open(
        os.path.join(root_dir, 'test', 'images', '%02d_test.tif' % img_id))
    img = np.array(img, dtype=np.float32)
    h, w = img.shape[:2]

    if x_tmp > 0 and y_tmp > 0 and x_tmp + patch_size < w and y_tmp + patch_size < h:

        img_crop = img[y_tmp:y_tmp + patch_size, x_tmp:x_tmp + patch_size, :]

        img_crop = img_crop.transpose((2, 0, 1))
        img_crop = torch.from_numpy(img_crop)
        img_crop = img_crop.unsqueeze(0)

        inputs = img_crop / 255 - 0.5

        # Forward pass of the mini-batch
        inputs = Variable(inputs)

        if gpu_id >= 0:
            inputs = inputs.cuda()

        p = {}
        p['useRandom'] = 1  # Shuffle Images
        p['useAug'] = 0  # Use Random rotations in [-30, 30] and scaling in [.75, 1.25]
        p['inputRes'] = (64, 64)  # Input Resolution
        p['outputRes'] = (64, 64)  # Output Resolution (same as input)
        p['g_size'] = 64  # Higher means narrower Gaussian
        p['trainBatch'] = 1  # Number of Images in each mini-batch
        p['numHG'] = 2  # Number of Stacked Hourglasses
        p['Block'] = 'ConvBlock'  # Select: 'ConvBlock', 'BasicBlock', 'BottleNeck'
        p['GTmasks'] = 0  # Use GT Vessel Segmentations as input instead of Retinal Images
        model_dir = './results_dir_vessels/'
        if connected_same_vessel:
            modelName = tb.construct_name(p, "HourGlass-connected-same-vessel")
        else:
            modelName = tb.construct_name(p, "HourGlass-connected")
        numHGScales = 4  # How many times to downsample inside each HourGlass
        net = nt.Net_SHG(p['numHG'], numHGScales, p['Block'], 128, 1)
        epoch = 1800
        net.load_state_dict(
            torch.load(os.path.join(
                model_dir,
                os.path.join(model_dir,
                             modelName + '_epoch-' + str(epoch) + '.pth')),
                       map_location=lambda storage, loc: storage))

        if gpu_id >= 0:
            net = net.cuda()

        output = net.forward(inputs)
        pred = np.squeeze(
            np.transpose(
                output[len(output) - 1].cpu().data.numpy()[0, :, :, :],
                (1, 2, 0)))

        mean, median, std = sigma_clipped_stats(pred, sigma=3.0)
        threshold = median + (10.0 * std)
        sources = find_peaks(pred, threshold, box_size=3)

        indxs = np.argsort(sources['peak_value'])
        for ii in range(0, len(indxs)):
            idx = indxs[len(indxs) - 1 - ii]
            if sources['peak_value'][idx] > confident_th:
                confident_connections['x_peak'].append(sources['x_peak'][idx])
                confident_connections['y_peak'].append(sources['y_peak'][idx])
                confident_connections['peak_value'].append(
                    sources['peak_value'][idx])
            else:
                break

        confident_connections = Table([
            confident_connections['x_peak'], confident_connections['y_peak'],
            confident_connections['peak_value']
        ],
                                      names=('x_peak', 'y_peak', 'peak_value'))

    return confident_connections
コード例 #32
0
ファイル: Rcusp.py プロジェクト: jwhsueh/SHARP_jw
image_fits=pyfits.open(filepath+filename)
image=image_fits[0].data
image_or=image

## smoothing
kernel=convolution.Gaussian2DKernel(5)
image=convolution.convolve(image,kernel)

# get image size
img_size=image.shape[0]
print img_size

mean,median,std=sigma_clipped_stats(image,sigma=3.0)
threshold=median+(20*std)
peaks=find_peaks(image,threshold,box_size=5)

print peaks
'''
## throw out source peak
# by distance to center
center=img_size/2
dist_center=np.sqrt((peaks['x_peak']-center)**2+(peaks['y_peak']-center)**2)
print dist_center
idx=list(dist_center).index(np.min(dist_center))

threshold=np.min(peaks['peak_value'][idx])

peaks=find_peaks(image,threshold,box_size=5)
print peaks
#print len(peaks['x_peak'])
コード例 #33
0
def get_photometry(image, mask=None, gain=4., pos=(dx_stamp, dx_stamp),
                   radii=10., sigma1=None, alpha=None, beta=None, iter=0):

    print iter
    sigma_clip = SigmaClip(sigma=3., iters=2)
    bkg_estimator = MedianBackground()
    bkg = Background2D(image, (10, 10), sigma_clip=sigma_clip,
                       bkg_estimator=bkg_estimator)
    print '\tBackground stats: %f, %f' % (bkg.background_median,
                                          bkg.background_rms_median)

    data = image - bkg.background
    # bkg_err = np.random.normal(bkg.background_median,
    #                            bkg.background_rms_median, image.shape)
    if False:
        plt.imshow(data, cmap='viridis', interpolation='nearest')
        plt.colorbar()
        # plt.show()
    # error = calc_total_error(image, bkg_err, gain)
    if True:
        back_mean, back_median, back_std = sigma_clipped_stats(data, mask,
                                                               sigma=3,
                                                               iters=3,
                                                               cenfunc=np.median)
        print '\tBackground stats: %f, %f' % (back_median, back_std)
        tbl = find_peaks(data,
                         np.minimum(back_std, bkg.background_rms_median) * 3,
                         box_size=5, subpixel=True)
        if len(tbl) == 0:
            print '\tNo detection...'
            return None
        tree_XY = cKDTree(np.array([tbl['x_centroid'], tbl['y_centroid']]).T)
        if iter == 0:
            d = 9
        else:
            d = 5
        dist, indx = tree_XY.query(pos, k=2, distance_upper_bound=d)
        print tbl
        print dist, indx

        if np.isinf(dist).all():
            print '\tNo source found in the asked position... ',
            print 'using given position...'
            position = pos
            # return None
        else:
            if len(tbl) >= 2 and not np.isinf(dist[1]):
                if tbl[indx[1]]['fit_peak_value'] > \
                    tbl[indx[0]]['fit_peak_value']:
                    indx = indx[1]
                else:
                    indx = indx[0]
            else:
                indx = indx[0]
            position = [tbl[indx]['x_centroid'], tbl[indx]['y_centroid']]
    else:
        position = pos

    print '\tObject position: ', position

    apertures = [CircularAperture(position, r=r) for r in radii]
    try:
        phot_table = aperture_photometry(data, apertures, mask=mask,
                                         method='subpixel', subpixels=5)
    except IndexError:
        phot_table = aperture_photometry(data, apertures,
                                         method='subpixel', subpixels=5)
    for k, r in enumerate(radii):
        area = np.pi * r ** 2
        phot_table['aperture_flx_err_%i' %
                   k] = np.sqrt(sigma1**2 * alpha**2 * area**beta +
                                phot_table['aperture_sum_%i' % k][0] / gain)
    phot_table.remove_columns(['xcenter', 'ycenter'])
    phot_table['xcenter'] = position[0]
    phot_table['ycenter'] = position[1]
    return phot_table
コード例 #34
0
ファイル: find_radius.py プロジェクト: jejestern/cepheids
if star == 'V0473_Lyr':
    x_1 = 2100
    x_2 = 2450
    y_1 = 1700
    y_2 = 2300

if star == 'RR_Lyr':
    x_1 = 0
    x_2 = 4090
    y_1 = 0
    y_2 = 4090

for image_name in files[0:1]:
    image = fits.getdata(star + "/" + image_name, ext=0)
    data = image[x_1:x_2, y_1:y_2]
    tbl = find_peaks(data, 300, box_size=50)
    pe = np.array(tbl['peak_value'])
    pe_x = np.array(tbl['x_peak'])
    pe_y = np.array(tbl['y_peak'])
    peaks = np.array((pe_x, pe_y, pe)).T
    peaks = peaks.tolist()
    peaks = sorted(peaks, key=lambda t: t[2], reverse=True)
    positions = (peaks[0][0], peaks[0][1]
                 )  #[peaks[0][0], peaks[0][1]], [peaks[1][0], peaks[1][1]]
    apertures = lambda r: CircularAperture(positions, r=r)
    phot_table = lambda r: aperture_photometry(data, apertures(r))
    rad = [0.0]
    numcounts = [0.0]
    noise = 10000
    i = 0
    while (noise >= 5000):
コード例 #35
0
def find_pinholes_irregular(fname,
                            freference,
                            sname,
                            fdarkff,
                            fdark,
                            fff,
                            files,
                            size,
                            threshold,
                            fwhm,
                            fitshape,
                            MAX_CONTROL_POINTS,
                            PIXEL_TOL,
                            range_psf,
                            sigma=2.,
                            oversampling=4,
                            maxiters=3,
                            MIN_MATCHES_FRACTION=0.8,
                            NUM_NEAREST_NEIGHBORS=5):
    """Finds and fits irregularly spread pinhole positions with a ePSF in a FITS image. Then matches them to the reference positions.
    
    Parameters
    ----------
    fname : str
        Folder name of the input fits files.
    freference : str
        File name of the reference positions (txt file).
    sname : str
        Folder name of the returned found and matched pinhole positions (txt files).
    fdarkff : string
        Location of the dark images for the flat field images.
    fdark : string
        Location of the dark images for the raw images.
    fff : string
        Location of the flat field images.
    files : (1, 2)-shaped int array
        File range to create a median image
    size : int
        Rectangular size of the ePSF. Size must be an odd number.
    threshold : float
        The absolute image value above which to select sources.
    fwhm : float
        The full-width half-maximum (FWHM) of the major axis of the Gaussian kernel in units of pixels.
    fitshape : int or length-2 array-like
        Rectangular shape around the center of a star which will be used to collect the data to do the fitting. 
        Can be an integer to be the same along both axes. 
        E.g., 5 is the same as (5, 5), which means to fit only at the following 
        relative pixel positions: [-2, -1, 0, 1, 2]. Each element of fitshape must be an odd number.
    MAX_CONTROL_POINTS : int
        The maximum control points (stars) to use to build the invariants.    
    PIXEL_TOL : int
        The pixel distance tolerance to assume two invariant points are the same.
    range_psf : (1, 4)-shaped int array
        Position range to compute epsf [xmin,xmax,ymin,ymax]
    sigma : float
        Number of standard deviations used to perform sigma clip with a astropy.stats.SigmaClip object.
    oversampling : int or tuple of two int
        The oversampling factor(s) of the ePSF relative to the input stars along the x and y axes. 
        The oversampling can either be a single float or a tuple of two floats of the form (x_oversamp, y_oversamp). 
        If oversampling is a scalar then the oversampling will be the same for both the x and y axes.
    maxiters : int
        The maximum number of iterations to perform.
    MIN_MATCHES_FRACTION : float (0,1]
        The minimum fraction of triangle matches to accept a transformation.
        If the minimum fraction yields more than 10 triangles, 10 is used instead.
    NUM_NEAREST_NEIGHBORS : int
        The number of nearest neighbors of a given star (including itself) to construct the triangle invariants.
    Returns
    -------
    s_list : (N,2)-shaped array
        Found and matched positions of the pinholes.
    t_list : (N,2)-shaped array
        Matched reference grid positions.
    """

    #Load the sample of fits images
    entries = os.listdir(fname)

    data_col = np.array([fits.getdata(fname + '/' + entries[files[0]], ext=0)])
    for k in range(files[0] + 1, files[1] + 1):
        data_col = np.append(data_col,
                             [fits.getdata(fname + '/' + entries[k], ext=0)],
                             axis=0)

    #Data reduction: Darc current + Flatfield
    data_col = data_correction(data_col, fdarkff, fdark, fff)

    #Claculate median image
    data_full = np.median(data_col, axis=0)
    pos_full = np.array([[0, 0]])

    data = data_full[range_psf[2]:range_psf[3], range_psf[0]:range_psf[1]]

    #Find peaks in data
    peaks_tbl = find_peaks(data, threshold=threshold)
    peaks_tbl['peak_value'].info.format = '%.8g'

    #Load data around found peaks
    hsize = (size - 1) / 2
    x = peaks_tbl['x_peak']
    y = peaks_tbl['y_peak']
    mask = ((x > hsize) & (x < (data.shape[1] - 1 - hsize)) & (y > hsize) &
            (y < (data.shape[0] - 1 - hsize)))

    stars_tbl = Table()
    stars_tbl['x'] = x[mask]
    stars_tbl['y'] = y[mask]

    #Calculate mean, median, std
    mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=sigma)
    data = data - median_val

    #Find pinholes and create ePSF
    nddata = NDData(data=data)

    stars = extract_stars(nddata, stars_tbl, size=size)

    epsf_builder = EPSFBuilder(oversampling=oversampling,
                               maxiters=maxiters,
                               progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)

    #Use ePSF to find precise locations of pinholes
    daofind = DAOPhotPSFPhotometry(crit_separation=30,
                                   threshold=threshold,
                                   fwhm=fwhm,
                                   psf_model=epsf,
                                   fitshape=fitshape,
                                   aperture_radius=12,
                                   niters=1)

    #Get positions
    sources = daofind(data_full)

    for col in sources.colnames:
        sources[col].info.format = '%.8g'

    pos = np.transpose((sources['x_fit'], sources['y_fit']))
    pos_full = np.append(pos_full, pos, axis=0)

    pos_full = pos_full[1:]

    #Plot found pinholes
    apertures = CircularAperture(pos_full, r=10)

    norm = ImageNormalize(stretch=SqrtStretch())

    #Plot found pinholes
    fig, ax = plt.subplots()
    ax.set_title('Pinhole Positions')
    ax.set(xlabel='x [pixel]', ylabel='y [pixel]')
    ax.imshow(data_full, cmap='Greys', origin='lower', norm=norm)
    apertures.plot(color='blue', lw=1.5, alpha=0.5)
    ax.legend(['#pinholes = ' + str(len(pos_full[:, 0]))],
              loc='lower left',
              prop={'size': 12})
    plt.show()

    #Sort positions by matching with reference grid
    positions_sort = pos_full

    ref_positions = np.genfromtxt(freference, skip_header=0)

    transf, (s_list, t_list) = find_transform(positions_sort, ref_positions,
                                              MAX_CONTROL_POINTS, PIXEL_TOL,
                                              MIN_MATCHES_FRACTION,
                                              NUM_NEAREST_NEIGHBORS)

    text = np.array([s_list[:, 0], s_list[:, 1], t_list[:, 0], t_list[:, 1]])
    text_trans = np.zeros((len(s_list[:, 0]), 4))

    #Transpose text matrix
    for k in range(0, 4):
        for l in range(0, len(s_list[:, 0])):
            text_trans[l][k] = text[k][l]

    #Save data as txt file
    np.savetxt(sname + '.txt',
               text_trans,
               fmt='%1.9E',
               delimiter='\t',
               comments='',
               header='x-measured   y-measured   x-reference   y-reference')

    return s_list, t_list
コード例 #36
0
gt_base_dir = './results_dir/gt_test'
results_base_dir = './results_dir/results'

idx_patch = 2

for idx in range(start_img,start_img+num_images):

    print(idx)

    config_type = '_connected'
    retina_img = Image.open(gt_base_dir + config_type + '/img_%02d_patch_%02d_img.png' %(idx, idx_patch))
    plt.imshow(retina_img)
    pred = np.load(results_base_dir + config_type + '/epoch_1800/img_%02d_patch_%02d.npy' %(idx, idx_patch))

    mean, median, std = sigma_clipped_stats(pred, sigma=3.0)
    threshold = median + (10.0 * std)
    sources = find_peaks(pred, threshold, box_size=3)
    positions = (sources['x_peak'], sources['y_peak'])

    pos_x_vector = []
    pos_y_vector = []
    for ii in range(0,len(sources['peak_value'])):
        if sources['peak_value'][ii] > 20:
            pos_x_vector.append(sources['x_peak'][ii])
            pos_y_vector.append(sources['y_peak'][ii])

    plt.scatter(pos_x_vector, pos_y_vector, marker='+', color='blue', s=100, linewidths=10)
    plt.axis('off')
    plt.show()