コード例 #1
0
ファイル: single_image.py プロジェクト: LiuDezi/ProperImage
    def interped(self):
        if not hasattr(self, '_interped'):
            kernel = Box2DKernel(5)  # Gaussian2DKernel(stddev=2.5) #

            crmask, _ = detect_cosmics(indat=np.ascontiguousarray(
                self.bkg_sub_img.filled(-9999)),
                                       inmask=self.bkg_sub_img.mask,
                                       sigclip=6.,
                                       cleantype='medmask')
            self.bkg_sub_img.mask = np.ma.mask_or(self.bkg_sub_img.mask,
                                                  crmask)
            self.bkg_sub_img.mask = np.ma.mask_or(self.bkg_sub_img.mask,
                                                  np.isnan(self.bkg_sub_img))

            print(('Masked pixels: ', np.sum(self.bkg_sub_img.mask)))
            img = self.bkg_sub_img.filled(np.nan)
            img_interp = interpolate_replace_nans(img, kernel, convolve=conv)

            while np.any(np.isnan(img_interp)):
                img_interp = interpolate_replace_nans(img_interp,
                                                      kernel,
                                                      convolve=conv)
            # clipped = sigma_clip(self.bkg_sub_img,
            # iters=5, sigma_upper=40).filled(np.nan)
            # img_interp = interpolate_replace_nans(img_interp, kernel)
            self._interped = img_interp

        return self._interped
コード例 #2
0
ファイル: smoothing.py プロジェクト: chloe-mt-cheng/imf_css
def smoothing(blue_path, red_path, tell_wave, tell_flux, hi, lo, mid, mid2, pre):
    #Import co-added target spectra
    blue_coadd1d = fits.open(blue_path)
    blue_dat = blue_coadd1d[1].data
    blue_coadd1d.close()

    red_coadd1d = fits.open(red_path)
    red_dat = red_coadd1d[1].data
    red_coadd1d.close()

    #Get wavelengths and fluxes
    blue_wave = blue_dat['wave']
    red_wave = red_dat['wave']
    blue_ivar = blue_dat['ivar']
    red_ivar = red_dat['ivar']

    flux = np.copy(tell_flux)
    total_wave = np.concatenate((blue_wave, red_wave))
    noise = 1/np.sqrt(np.concatenate((blue_ivar, red_ivar)))

    #Get resolution
    resb, resr = tell.lris_res(blue_wave, red_wave)
    total_res = np.concatenate((resb, resr))

    #Get pixel positions of noisy telluric spikes
    spikes_hi, spikes_lo, spikes_mid, spikes_mid2, spikes_early = spikes(total_wave, flux, hi, lo, mid, mid2, pre)
    bad_pixels = []
    for i in range(len(spikes_hi)):
        for j in range(len(spikes_hi[i])):
            bad_pixels.append(spikes_hi[i][j])
    for i in range(len(spikes_lo)):
        for j in range(len(spikes_lo[i])):
            bad_pixels.append(spikes_lo[i][j])
    for i in range(len(spikes_mid)):
        bad_pixels.append(spikes_mid[i])
    for i in range(len(spikes_mid2)):
        bad_pixels.append(spikes_mid2[i])
    for i in range(len(spikes_early)):
        bad_pixels.append(spikes_early[i])

    bad_pixels = np.sort(list(set(bad_pixels))) #Remove duplicates

    #Interpolate over noisy spikes
    flux[bad_pixels] = np.nan
    noise[bad_pixels] = np.nan

    kernel = Gaussian1DKernel(stddev = 1)
    intp_flux = interpolate_replace_nans(flux, kernel, convolve = convolve_fft)
    intp_noise = interpolate_replace_nans(noise, kernel, convolve = convolve_fft)

    #Smooth by 200km/s
    c = 299792.458 #speed of light
    in_sigma_kms = 200
    sigma_aa_desired = in_sigma_kms/c*total_wave
    smoothed_flux = utils.smoothing.smoothspec(total_wave, intp_flux, outwave=total_wave, smoothtype='lsf', resolution=sigma_aa_desired)
    smoothed_noise = utils.smoothing.smoothspec(total_wave, intp_noise, outwave=total_wave, smoothtype='lsf', resolution=sigma_aa_desired)
    return total_wave, smoothed_flux, smoothed_noise, bad_pixels
		
コード例 #3
0
    def __call__(self, abs_sci_name):
        '''
        Bad pix fixing, for a single frame so as to parallelize job

        INPUTS:
        sci_name: science array filename
        '''

        # read in the science frame from raw data directory
        sci, header_sci = fits.getdata(abs_sci_name, 0, header=True)

        # fix bad pixels (note conversion to 32-bit signed ints)
        sci_badnan = np.multiply(sci, self.badpix)
        image_fixpixed = interpolate_replace_nans(array=sci_badnan,
                                                  kernel=self.kernel).astype(
                                                      np.int32)

        # add a line to the header indicating last reduction step
        header_sci["RED_STEP"] = "bad-pixel-fixed"

        # write file out
        abs_image_fixpixed_name = str(self.config_data["data_dirs"]["DIR_PIXL_CORRTD"] + \
                                        os.path.basename(abs_sci_name))
        fits.writeto(filename=abs_image_fixpixed_name,
                     data=image_fixpixed,
                     header=header_sci,
                     overwrite=True)
        print("Writing out bad-pixel-fixed frame " +
              os.path.basename(abs_image_fixpixed_name))
コード例 #4
0
ファイル: fitmaps.py プロジェクト: cjhang/ENLR
 def fix_outlier(self, arr, interpolate=True, sigma=5, iters=2):
     kernel = Gaussian2DKernel(x_stddev=self.psf / 2.355)
     new_array = sigma_clip(arr, sigma=sigma,
                            iters=iters)  # remove 5 sigma outlier
     if interpolate:
         new_array = interpolate_replace_nans(new_array.filled(np.nan),
                                              kernel)
     return new_array
コード例 #5
0
def loadmap_astroclean(image, x_stddev=1, x_size=8, y_size=8):
    #9x9 kernel for smoothing
    kernel = Gaussian2DKernel(x_stddev, x_size, y_size)
    image = imread(image)

    imageclean = interpolate_replace_nans(image, kernel)

    return imageclean
コード例 #6
0
def quick_plot(array, interp=False):

    if interp:
        array = interpolate_replace_nans(array, kernel=Gaussian2DKernel(x_stddev=0.25))

    plt.ion()
    plt.figure()
    norm = ImageNormalize(array, interval=ZScaleInterval())
    im = plt.imshow(array, origin='lower', norm=norm)
    cb = plt.colorbar(im)
    plt.tight_layout()
コード例 #7
0
def fix_bad_pixels(image, bad_map, add_bad=[], x_stddev=1):
    """ Replace bad pixels with values interpolated from their neighbors (interpolation
    is made with a gaussian kernel convolution)."""
    if len(add_bad) != 0:
        for j in range(len(add_bad)):
            bad_map[add_bad[j][1], add_bad[j][0]] = 1

    img_nan = image.copy()
    img_nan[bad_map == 1] = np.nan
    kernel = Gaussian2DKernel(x_stddev=x_stddev)
    fixed_image = interpolate_replace_nans(img_nan, kernel)
    return fixed_image
コード例 #8
0
def extend_field(grid, mask=None, gauss_widths=(1, 11), taper=None):
    """Extrapolate w/Gaussian average of increasing width and taper weights.

    Extend field (internal and external borders) by applying a Gaussian
    average of increasing width at each iteration, and scaling the result
    by a cosine/linear taper (from 1 to 0 with iterations).

    Args:
        grid: 2D field to extrapolate.
        mask: region to exclude from the Gaussian average.
        gauss_widths: width (n_pixels) of Gaussian average at each iteration.
        taper: cosine|linear|None, shape of the weight window.
    Notes:
        Cosine taper goes from 1 at gauss_widths[0] to 0 at gauss_widths[-1].
        gauss_widths define the number of iterations (one per width).
        Widths can be passed as a range (1, N) or explicitly (1, 3, 5, ...).
    """

    if len(gauss_widths) == 2:
        gauss_widths = range(*gauss_widths)

    if taper == "cosine":
        half_window = len(gauss_widths)
        taper_weights = tukey(half_window * 2)[half_window:]
    elif taper == "linear":
        taper_weights = np.linspace(1, 0, len(gauss_widths))
    else:
        taper_weights = np.full_like(gauss_widths, 1)

    print("taper weights:\n", taper_weights)

    if mask is not None:
        grid[mask == 1] = np.nan  # <= this is key !!!

    for i, k in enumerate(gauss_widths):
        print("gauss kernel size:", k)

        mask_before = np.isnan(grid)

        kernel = Gaussian2DKernel(k)
        grid = interpolate_replace_nans(grid, kernel, boundary="extend")

        if mask is not None:
            grid[mask == 1] = np.nan

        mask_after = np.isnan(grid)

        mask_extended = mask_before & ~mask_after

        grid[mask_extended] *= taper_weights[i]

    return grid
コード例 #9
0
    def maybe_interpolate_dead_pixels(self):
        if self.interpolate_dead_pixels:
            print('Interpolating dead pixels...')
            kernel = Gaussian2DKernel(1)

            not_valid_and_inside_brightfield = np.logical_and(
                np.logical_not(self.bvm), self.bmb)
            self.data[not_valid_and_inside_brightfield] = np.NaN
            # p = Pool(multiprocessing.cpu_count())
            # p.map(replace_nans, data)
            for i, da in enumerate(self.data):
                self.data[i] = interpolate_replace_nans(da.copy(), kernel)

            self.data = np.nan_to_num(self.data, copy=False)
コード例 #10
0
ファイル: fiber_utils.py プロジェクト: grzeimann/Remedy
def identify_sky_pixels(sky, kernel=10.0):
    G = Gaussian1DKernel(kernel)
    cont = convolve(sky, G, boundary='extend')
    mask = safe_sigma_clip(sky - cont)
    for i in np.arange(5):
        nsky = sky * 1.
        mask.mask[1:] += mask.mask[:-1]
        mask.mask[:-1] += mask.mask[1:]
        nsky[mask.mask] = np.nan
        cont = convolve(nsky, G, boundary='extend')
        while np.isnan(cont).sum():
            cont = interpolate_replace_nans(cont, G)
        mask = safe_sigma_clip(sky - cont)
    return mask.mask, cont
コード例 #11
0
    def interped(self):
        if not hasattr(self, "_interped"):
            kernel = Box2DKernel(5)  # Gaussian2DKernel(stddev=2.5) #

            crmask, _ = detect_cosmics(
                indat=np.ascontiguousarray(self.bkg_sub_img.filled(-9999)),
                inmask=self.bkg_sub_img.mask,
                sigclip=6.0,
                cleantype="medmask",
            )
            self.bkg_sub_img.mask = np.ma.mask_or(self.bkg_sub_img.mask,
                                                  crmask)
            self.bkg_sub_img.mask = np.ma.mask_or(self.bkg_sub_img.mask,
                                                  np.isnan(self.bkg_sub_img))
            img = self.bkg_sub_img.filled(np.nan)
            img_interp = interpolate_replace_nans(img, kernel, convolve=conv)

            while np.any(np.isnan(img_interp)):
                img_interp = interpolate_replace_nans(img_interp,
                                                      kernel,
                                                      convolve=conv)
            self._interped = img_interp

        return self._interped
コード例 #12
0
 def fill_gaps(
     self,
     size=[3, 3]
 ):  #Function fills any gaps, experimental version at the moment, comment out this one and uncomment the above one to go back to the old versoin
     n_lines, ny, n_velocity, nx = shape(self.cube)  #Calculate pixel sizes
     mask = zeros([ny + 2, nx + 2],
                  dtype=int)  #Create an array to store pixle
     kernel = Gaussian2DKernel(stddev=0.25, x_size=size[0], y_size=size[1])
     structure = ones(size, dtype=int)
     for i in xrange(n_lines):
         for j in xrange(n_velocity):
             cube_slice = self.cube[i, :, j, :]
             var_slice = self.var[i, :, j, :]
             smoothed_cube_slice = interpolate_replace_nans(cube_slice,
                                                            kernel=kernel)
             smoothed_var_slice = interpolate_replace_nans(var_slice,
                                                           kernel=kernel)
             mask[1:-1, 1:-1][isfinite(cube_slice)] = 1
             pixels_to_replace = (mask - binary_closing(
                 mask, structure=structure))[1:-1, 1:-1] < 0
             mask[:] = 0
             cube_slice[pixels_to_replace] = smoothed_cube_slice[
                 pixels_to_replace]
             var_sclice = smoothed_var_slice[pixels_to_replace]
コード例 #13
0
def replacenans_2d_interpolate(data):
    """Interpolate over nans within array using astropy interpolate_replace_nans function
    
        Input: 
            data = np.array of data values
        Output: 
            data_out = np.array of data value with no nan values"""

    # We smooth with a Gaussian kernel with x_stddev=1 (and y_stddev=1)
    # It is a 9x9 array
    kernel = Gaussian2DKernel(x_stddev=1)

    # create a "fixed" image with NaNs replaced by interpolated values
    data_out = interpolate_replace_nans(data, kernel)

    return data_out
コード例 #14
0
ファイル: photom_v1.py プロジェクト: epascale/pyCIRSF
def interpolate_replace(ima, pos, size = 20):
    kernel = Box2DKernel(3, mode='center')
    ima_ = ima
    for x,y in pos:
        x= int(round(x))
        y= int(round(y))
        ima_[y-size:y+size,x-r_out:x+size] = interpolate_replace_nans(ima[y-size:y+size,x-r_out:x+size].filled(), kernel)
        
        if False:
          print x,y
          plt.figure(35); plt.clf(); plt.ioff()
          fig, (ax0,ax1) = plt.subplots(ncols=2, nrows=1, num=33)
          ax0.imshow(ima[y-size:y+size,x-r_out:x+size])
          ax1.imshow(ima_[y-size:y+size,x-r_out:x+size])
          plt.show()
    return ima_
コード例 #15
0
def fix_bad_pixels(image, bad_map, add_bad=None, x_stddev=1):
    """Replace bad pixels with values interpolated from their neighbors (interpolation
    is made with a gaussian kernel convolution)."""
    from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans

    if add_bad is None:
        add_bad = []

    if len(add_bad) != 0:
        bad_map = bad_map.copy()  # Don't modify input bad pixel map, use a copy
        for j in range(len(add_bad)):
            bad_map[add_bad[j][1], add_bad[j][0]] = 1

    img_nan = image.copy()
    img_nan[bad_map == 1] = np.nan
    kernel = Gaussian2DKernel(x_stddev=x_stddev)
    fixed_image = interpolate_replace_nans(img_nan, kernel)
    return fixed_image
コード例 #16
0
ファイル: constants.py プロジェクト: abgibbs/edenAP_detrend
 def interpolate(self):
     """
     interpolate zeros and negative values in data using FFT convolve function
     """
     for hdu in self:
         if hdu.data is None:
             continue
         with np.errstate(invalid='ignore'):
             non_finite = ~np.isfinite(hdu.data)
             less_zero = hdu.data <= 0
             if np.any(less_zero) or np.any(non_finite):
                 data = hdu.data.astype(float)
                 mask_data = np.ma.masked_less_equal(data, 0)
                 # mask_data = np.ma.masked_inside(data, -1e5, 0)
                 mask_data.fill_value = np.nan
                 data = mask_data.filled()
                 data = interpolate_replace_nans(data,
                                                 self.kernel,
                                                 convolve=convolve_fft,
                                                 allow_huge=True)
                 hdu.data = data
コード例 #17
0
def interp_nans(data, x_stddev=1):
    """Interpolate over any NaNs present in a final mosaic.
    
    Uses the Astropy.convolution interpolate_replace_nans to smooth over
    any gaps left in an image. This may be particularly useful for
    WFPC2 images, where there are small gaps between chips.
    
    Args:
        data (numpy.ndarray): Input data to interpolate NaNs over.
        x_stddev (int, optional): Standard deviation of the Gaussian kernel.
            Defaults to 1 (pixel).
            
    Returns:
        numpy.ndarray: The data with NaNs interpolated over
        
    """

    kernel = Gaussian2DKernel(x_stddev=x_stddev)

    image_interp = interpolate_replace_nans(data, kernel)

    return image_interp
コード例 #18
0
ファイル: main.py プロジェクト: krachyon/ir_reduce
def interpolate(img: CCDData):
    """
    Takes a image with a mask for bad pixels and interpolates over the bad pixels

    :param img: the image you want to interpolate bad pixels in
    :return: interpolated image

    """
    # TODO combiner does not care about this and marks it invalid still
    from astropy.convolution import CustomKernel
    from astropy.convolution import interpolate_replace_nans

    # TODO this here doesn't really work all that well -> extended regions cause artifacts at border
    kernel_array = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]
                             ]) / 9  # average of all surrounding pixels
    # noinspection PyTypeChecker
    kernel = CustomKernel(
        kernel_array
    )  # TODO the original pipeline used fixpix, which says it uses linear interpolation

    img.data[np.logical_not(img.mask)] = np.NaN
    img.data = interpolate_replace_nans(img.data, kernel)

    return img
コード例 #19
0
def MaxFilter(img, wlen):
    img_conv_max = [
        maximum_filter(interpolate_replace_nans(img[i], np.ones((29, 29))),
                       size=wlen) for i in range(0, len(img))
    ]
    return img_conv_max
コード例 #20
0
ファイル: cutout.py プロジェクト: SKIRT/PTS
def interpolate_nans_kernel(data, kernel, max_iterations=10, not_converge="keep", ignore_nans_in=None, plot=False):

    """
    This function ...
    :param data:
    :param kernel:
    :param max_iterations:
    :param not_converge:
    :param ignore_nans_in:
    :param plot:
    :return:
    """

    from astropy.convolution import interpolate_replace_nans

    # Debugging
    nans = np.isnan(data)
    nnans = np.sum(nans)
    log.debug("The number of NaNs at the start is " + str(nnans))

    # Plot?
    if plot: plotting.plot_mask(nans, title="NaNs")

    # Interpolate
    result = interpolate_replace_nans(data, kernel)
    niterations = 1

    # Plot?
    if plot: plotting.plot_mask(result, title="Result after iteration 1")

    # Plot?
    if plot and ignore_nans_in is not None: plotting.plot_mask(ignore_nans_in, title="ignoring NaNs")

    # Get the current number of nans
    # previous_nnans = None  # don't get it for performance
    if ignore_nans_in is not None: nnans = np.sum(np.isnan(result[np.logical_not(ignore_nans_in)]))
    else: nnans = np.sum(np.isnan(result))

    # Plot?
    if plot: plotting.plot_mask(np.isnan(result), title="NaNs after iteration 1")

    # Debugging
    log.debug("The number of NaN values after iteration 1 is " + str(nnans))

    # Are there still NaNs?
    while nnans > 0:

        # Check number of iterations
        if max_iterations is not None and niterations == max_iterations: raise RuntimeError("The maximum number of iterations has been reached without success")

        # Debugging
        log.debug("Interpolation iteration " + str(niterations + 1) + " ...")

        # Perform next interpolation
        result = interpolate_replace_nans(result, kernel)

        # Increment the niterations counter
        niterations += 1

        # Get the current number of nans
        previous_nnans = nnans
        if ignore_nans_in is not None: nnans = np.sum(np.isnan(result[np.logical_not(ignore_nans_in)]))
        else: nnans = np.sum(np.isnan(result))

        # Plot?
        if plot: plotting.plot_mask(np.isnan(result), title="NaNs after iteration " + str(niterations))

        condition = nnans > previous_nnans if ignore_nans_in else nnans >= previous_nnans

        # Not converging
        if condition:
            if not_converge == "keep":
                log.warning("The number of NaNs could not converge to zero: " + str(nnans) + " NaN values will remain")
                break  # break the loop
            elif not_converge == "error": raise RuntimeError("The number of NaNs is not converging to zero (nnans = " + str(nnans) + ", previous nnans = " + str(previous_nnans) + ")")
            else: raise ValueError("Invalid option for 'not_converge'")

        # Debugging
        log.debug("The number of NaN values after iteration " + str(niterations) + " is " + str(nnans))

    # Return the resulting data
    return result
コード例 #21
0
def MedianFilter(img, wlen):
    img_conv_med = [
        median_filter(interpolate_replace_nans(img[i], np.ones((29, 29))),
                      size=wlen) for i in range(0, len(img))
    ]
    return img_conv_med
コード例 #22
0
def PINES_quicklook(image_name='test.fits', interp=True):
    calibration_path = '/Users/obs72/Desktop/PINES_scripts/Calibrations/'
    if image_name == 'test.fits':
        file_path = '/Users/obs72/Desktop/PINES_scripts/test_image/test.fits'
    else:
        date_string = image_name.split('.')[0]
        file_path = '/mimir/data/obs72/' + date_string + '/' + image_name

    if os.path.exists(file_path):
        header = fits.open(file_path)[0].header
        band = header['FILTNME2']
        exptime = str(header['EXPTIME'])
        flat_path = calibration_path + 'Flats/master_flat_' + band + '.fits'
        if os.path.exists(flat_path):
            flat = fits.open(flat_path)[0].data
        else:
            print('ERROR: No ', band, '-band flat exists in ',
                  calibration_path, 'Flats/...make one.')
            return

        #Select the master dark on-disk with the closest exposure time to exptime.
        dark_top_level_path = calibration_path + 'Darks/'
        dark_files = np.array(glob(dark_top_level_path + '*.fits'))
        dark_exptimes = np.array([
            float(i.split('master_dark_')[1].split('.fits')[0])
            for i in dark_files
        ])
        dark_files = dark_files[np.argsort(dark_exptimes)]
        dark_exptimes = dark_exptimes[np.argsort(dark_exptimes)]
        dark_ind = np.where(
            abs(dark_exptimes -
                float(exptime)) == np.min(abs(dark_exptimes -
                                              float(exptime))))[0][0]
        dark_path = dark_files[dark_ind]
        dark = fits.open(dark_path)[0].data

        ut_str = header['DATE-OBS'].split(
            'T')[0] + ' ' + header['DATE-OBS'].split('T')[1].split('.')[0]
        from_zone = tz.gettz('UTC')
        to_zone = tz.gettz('America/Phoenix')
        utc = datetime.datetime.strptime(ut_str, '%Y-%m-%d %H:%M:%S')
        utc = utc.replace(tzinfo=from_zone)
        local = utc.astimezone(to_zone)
        local_str = local.strftime('%Y-%m-%d %H:%M:%S')

        raw_image = fits.open(file_path)[0].data[0:1024, :]
        reduced_image = (raw_image - dark) / flat
        avg, med, std = sigma_clipped_stats(reduced_image)

        if interp:
            bpm = fits.open(
                '/Users/obs72/Desktop/PINES_scripts/Calibrations/Bad_pixel_masks/bpm.fits'
            )[0].data
            reduced_image[bpm == 1] = np.nan
            reduced_image = interpolate_replace_nans(
                reduced_image, kernel=Gaussian2DKernel(0.5))

        fig, ax = plt.subplots(figsize=(9, 8))
        divider = make_axes_locatable(ax)
        cax = divider.append_axes('right', size='5%', pad=0.05)
        ax.set_aspect('equal')
        norm = ImageNormalize(reduced_image, interval=ZScaleInterval())
        im = ax.imshow(reduced_image, origin='lower', norm=norm)
        fig.colorbar(im, cax=cax, orientation='vertical', label='ADU')
        ax.set_title(file_path.split('/')[-1], fontsize=16)
        plt.tight_layout()
        breakpoint()
        plt.close()
    else:
        print('ERROR: file ', file_path, ' does not exist.')
        return
コード例 #23
0
def make_data_dict(regions=['DR21C'],datadirs=['DR21C'],alignment_iteration=0,DIST=7,length=200,kernel_sigma=6,wavelength = '850'):
    """
    :param regions: a list of regions to run
    :param datadirs: a list of directories that hold the data for each region listed in parameter regions.
    :param alignment_iteration: there will be multiple iterations of the alignment run - this 0-based integer designates which alignment iteration the output file describes
    :param DIST: the distance used for linear fitting and gaussian fitting (use width = RADIUS*2 + 1)
    :param length: the distance used for linear fitting and gaussian fitting (use width = RADIUS*2 + 1)
    :param kernel_sigma: the smoothing kernel (in pixels) to subtract largescale structure (high pass filter)
    """
    # + ===================== +
    # | Global parameters     |
    # + ===================== +
    tol = 0.05
    
    REGIONS = {}
    for i in regions:
        REGIONS[i] = {wavelength:1}

    align_smooth_kernel = Gaussian2DKernel(x_stddev=kernel_sigma, y_stddev=kernel_sigma)
    
    data = defaultdict(dict)
    
    for region,datadir in zip(regions,datadirs):
        data[region] = defaultdict(dict)
        Dates850 = []
        Dates450 = []
        DataRoot = datadir + "/"  # where all the data is stored
        files = []
        for eachfile in os.listdir(DataRoot):
            if os.path.isfile(os.path.join(DataRoot, eachfile)):
                if eachfile.split('.')[-1] == 'sdf':
                    if wavelength in eachfile:
                        files.append(eachfile)
        #files = [f for f in os.listdir(DataRoot) if (os.path.isfile(os.path.join(DataRoot, f)) and
        #                                             os.path.join(DataRoot, f)[-4:] ==".sdf")]  # all the files in dir for this wavelength
        files = sorted(files)  # sorting to ensure we select the correct first region
    
        if wavelength == '450':
            scale = 2
        elif wavelength == '850':
            scale = 3
        else:
            scale = 0
        data[region][wavelength] = defaultdict(dict)
        data[region][wavelength]['epoch'] = defaultdict(list)
    
        data[region][wavelength]['dates'] = list()  # to collect all of the dates in the data[region] set
        data[region][wavelength]['JCMT_offset'] = defaultdict(str)  # to use the date as the index
        data[region][wavelength]['header'] = defaultdict(dict)
    
        data[region][wavelength]['XC'] = defaultdict(dict)
        data[region][wavelength]['XC']['offset'] = defaultdict(list)
        data[region][wavelength]['XC']['offset_err'] = defaultdict(list)
        data[region][wavelength]['XC']['alignment'] = defaultdict(list)
    
        data[region][wavelength]['linear'] = defaultdict(dict)
        data[region][wavelength]['linear']['m'] = defaultdict(dict)
        data[region][wavelength]['linear']['m_err'] = defaultdict(dict)
        data[region][wavelength]['linear']['b'] = defaultdict(dict)
        data[region][wavelength]['linear']['b_err'] = defaultdict(dict)
    
        data[region][wavelength]['AC'] = defaultdict(dict)
        data[region][wavelength]['AC']['beam'] = defaultdict(list)
        data[region][wavelength]['AC']['amp'] = defaultdict(list)
        data[region][wavelength]['AC']['amp_err'] = defaultdict(list)
        data[region][wavelength]['AC']['sig_x'] = defaultdict(list)
        data[region][wavelength]['AC']['sig_x_err'] = defaultdict(list)
        data[region][wavelength]['AC']['sig_y'] = defaultdict(list)
        data[region][wavelength]['AC']['sig_y_err'] = defaultdict(list)
        data[region][wavelength]['AC']['theta'] = defaultdict(list)
        data[region][wavelength]['AC']['theta_err'] = defaultdict(list)
        
    
        FEN = files[0]
        FilePath = datadir + "/" + FEN
        OutPath = datadir + "/" + FEN.split('.sdf')[0] + ".fit"
        if os.path.isfile(OutPath):
            pass
        else:
            convert.ndf2fits(FilePath, OutPath)
        FilePath = OutPath
        print('\n\nFIRST EPOCH: '+FilePath+'\n\n')
        FirstEpoch = fits.open(FilePath)  # opening the file in astropy
        FirstEpochData = FirstEpoch[0].data[0]  # Numpy data array for the first epoch
        FirstEpochCentre = np.array([FirstEpoch[0].header['CRPIX1'], FirstEpoch[0].header['CRPIX2']])
    
        # middle of the map of the first epoch
        FED_MidMapX = FirstEpochData.shape[1] // 2
        FED_MidMapY = FirstEpochData.shape[0] // 2
        FirstEpochVec = np.array([FirstEpochCentre[0] - FED_MidMapX,
                                  FirstEpochCentre[1] - FED_MidMapY])
        FirstEpochData = FirstEpochData[
                         FED_MidMapY - length:FED_MidMapY + length + 1,
                         FED_MidMapX - length:FED_MidMapX + length + 1]
        FirstEpochData_smooth = convolve(FirstEpochData, align_smooth_kernel, normalize_kernel=False)
        FirstEpochData -= FirstEpochData_smooth
        for fn in files:
            if wavelength in fn:
                FilePath = datadir + "/" + fn
    
                tau225_start = float(kappa.fitsval(FilePath, 'WVMTAUST').value)
                tau225_end = float(kappa.fitsval(FilePath, 'WVMTAUEN').value)
                tau225 = sum([tau225_start, tau225_end]) / 2
    
                AirMass_start = float(kappa.fitsval(FilePath, 'AMSTART').value)
                AirMass_end = float(kappa.fitsval(FilePath, 'AMEND').value)
                AirMass = sum([AirMass_start, AirMass_end]) / 2
    
                elev_start = float(kappa.fitsval(FilePath, 'ELSTART').value)
                elev_end = float(kappa.fitsval(FilePath, 'ELEND').value)
                elev = int(round(sum([elev_start, elev_end]) / 2, 0))
    
                OutPath = datadir + "/" + fn[:-4] + ".fit"
    
                if os.path.isfile(OutPath):
                    pass
                else:
                    convert.ndf2fits(FilePath, OutPath)
    
                FilePath = OutPath
                hdul = fits.open(FilePath)  # opening the file in astropy
                date = ''.join(str(hdul[0].header['DATE-OBS']).split('T')[0].split('-'))  # extract date from the header
                date += '-' + str(hdul[0].header['OBSNUM'])
                JulianDate = str(float(hdul[0].header['MJD-OBS']) + 2400000.5)
                print('Epoch: {:14}'.format(date))
                data[region][wavelength]['header']['airmass'][date] = AirMass
                data[region][wavelength]['header']['t225'][date] = tau225
                data[region][wavelength]['header']['julian_date'][date] = JulianDate
                data[region][wavelength]['header']['elevation'][date] = elev
                data[region][wavelength]['dates'].append(date)
                centre = (hdul[0].header['CRPIX1'], hdul[0].header['CRPIX2'])  # JCMT's alleged centre is
                hdu = hdul[0]  # a nice compact way to store the data for later.
    
                # data[region][wavelength]['epoch'][date].append(hdu)o
                Epoch = hdu.data[0]  # map of the region
                Map_of_Region = interpolate_replace_nans(correlate(Epoch, clip_only=True),
                                                         Gaussian2DKernel(5))
                Map_of_Region_smooth = convolve(Map_of_Region, align_smooth_kernel, normalize_kernel=False)
                Map_of_RegionXC = Map_of_Region - Map_of_Region_smooth
    
                XC = correlate(epoch_1=Map_of_RegionXC, epoch_2=FirstEpochData).real
                PS = correlate(Map_of_Region, psd=True)
                AC = correlate(Map_of_Region).real  # auto correlation of the map
                centre = (hdul[0].header['CRPIX1'], hdul[0].header['CRPIX2'])  # JCMT's alleged centre is
                Vec = np.array([centre[0] - (hdul[0].shape[2] // 2),
                                centre[1] - (hdul[0].shape[1] // 2)])
                JCMT_offset = FirstEpochVec - Vec  # JCMT offset from headers
                data[region][wavelength]['JCMT_offset'][date] = JCMT_offset  # used for accessing data later.
    
                [[AMP, SIGX, SIGY, THETA], [AMP_ERR, SIGX_ERR, SIGY_ERR, THETA_ERR]], _ = gaussian_fit_ac(AC)
                offset, offset_err = gaussian_fit_xc(XC)
                alignment = JCMT_offset - offset
                Length_Scale = np.sqrt(SIGX * SIGY)
    
                data[region][wavelength]['XC']['offset'][date] = offset * scale
                data[region][wavelength]['XC']['offset_err'][date] = offset_err
                data[region][wavelength]['XC']['alignment'][date] = alignment
    
                data[region][wavelength]['AC']['beam'][date] = Length_Scale
                data[region][wavelength]['AC']['amp'][date] = AMP
                data[region][wavelength]['AC']['amp_err'][date] = AMP_ERR
                data[region][wavelength]['AC']['sig_x'][date] = SIGX
                data[region][wavelength]['AC']['sig_x_err'][date] = SIGX_ERR
                data[region][wavelength]['AC']['sig_y'][date] = SIGY
                data[region][wavelength]['AC']['sig_y_err'][date] = SIGY_ERR
                data[region][wavelength]['AC']['theta'][date] = THETA
                data[region][wavelength]['AC']['theta_err'][date] = THETA_ERR
    
                Clipped_Map_of_Region_LENGTH = np.arange(0, Map_of_Region.shape[0])
                loc = list(product(Clipped_Map_of_Region_LENGTH, Clipped_Map_of_Region_LENGTH))
                MidMapX = AC.shape[1] // 2  # middle of the map x
                MidMapY = AC.shape[0] // 2  # and y
                radius, AC_pows = [], []
                for idx in loc:  # Determining the power at a certain radius
                    r = ((idx[0] - MidMapX) ** 2 + (idx[1] - MidMapY) ** 2) ** (1 / 2)
                    AC_pow = AC[idx[0], idx[1]].real
                    radius.append(r)
                    AC_pows.append(AC_pow)
                radius, AC_pows = zip(*sorted(list(zip(radius, AC_pows)), key=op.itemgetter(0)))
                radius = np.array(radius)
                AC_pows = np.array(AC_pows)
    
                num = len(radius[np.where(radius <= DIST)])
                opt_fit_AC, cov_mat_AC = curve_fit(f, radius[1:num], AC_pows[1:num])
                err = np.sqrt(np.diag(cov_mat_AC))
    
                M = opt_fit_AC[0]
                M_err = err[0]
                B = opt_fit_AC[1]
                B_err = err[1]
    
                data[region][wavelength]['linear']['m'][date] = M
                data[region][wavelength]['linear']['m_err'][date] = M_err
                data[region][wavelength]['linear']['b'][date] = B
                data[region][wavelength]['linear']['b_err'][date] = B_err
    
    
    data = default_to_regular(data)
    if not os.path.exists('data'):
        os.system('mkdir data')
    with open('data/data_Transient_run_'+str(alignment_iteration)+'_'+wavelength+'.pickle', 'wb') as OUT:
        pickle.dump(data, OUT)
コード例 #24
0
ファイル: cubegemb.py プロジェクト: tsutterley/captoolkit
    )

da_gemb = xr.concat(grids, dim="t")

print("averaging in time ...")
da_gemb = da_gemb.rolling(t=window, center=True).mean()

print("regridding in time ...")
da_gemb = da_gemb.interp(t=t_cube)

print(da_gemb)

print("extending spatial boundaries before ...")
for k in range(t_cube.size):
    da_gemb.values[:, :, k] = interpolate_replace_nans(
        da_gemb.values[:, :, k], Gaussian2DKernel(2), boundary="extend"
    )

print("regridding in space ...")
da_gemb = da_gemb.interp(x=x_cube, y=y_cube)

print("extending spatial boundaries after ...")
for k in range(t_cube.size):
    da_gemb.values[:, :, k] = interpolate_replace_nans(
        da_gemb.values[:, :, k], Gaussian2DKernel(5), boundary="extend"
    )
    da_gemb.values[:, :, k] = interpolate_replace_nans(
        da_gemb.values[:, :, k], Gaussian2DKernel(3), boundary="extend"
    )

if 0:
コード例 #25
0
plt.figure()
plt.pcolormesh(X_new, Y_new, mask_new)
plt.show()
sys.exit()
"""

# Filter artifacts and extend boundaries
for k in range(melt.shape[2]):
    print(k)
    melt_k = melt[:, :, k]
    melt_k = filt_velocity_boundary(x, y, melt_k)
    melt_k = filt_positives(x, y, melt_k,
                            [(-1574000, -1543250, -500000, -440000)])

    melt[:, :, k] = interpolate_replace_nans(melt_k,
                                             Gaussian2DKernel(1),
                                             boundary="extend")

# Mask with zeros
if 0:
    mask3d_zeros = np.repeat(mask_zeros[:, :, np.newaxis],
                             melt.shape[2],
                             axis=2)
    melt[mask3d_zeros == 1] = 0.0
"""
plt.matshow(np.nanmean(melt, axis=2), vmin=-5, vmax=5, cmap='RdBu')
plt.show()
sys.exit()
"""

# Regrid (comes out upside down)
コード例 #26
0
            plt.show()

            if count_plot == n_plots:
                sys.exit()

# --- Spatial filtering --- #

if 1:
    kernel = Gaussian2DKernel(1)

    for k in range(H.shape[2]):
        print("interpolating slice:", k)

        dHdt[:, :, k] = interpolate_replace_nans(dHdt[:, :, k],
                                                 kernel,
                                                 boundary="extend")
        H[:, :, k] = interpolate_replace_nans(H[:, :, k],
                                              kernel,
                                              boundary="extend")
        adv[:, :, k] = interpolate_replace_nans(adv[:, :, k],
                                                kernel,
                                                boundary="extend")
        str[:, :, k] = interpolate_replace_nans(str[:, :, k],
                                                kernel,
                                                boundary="extend")
        div[:, :, k] = interpolate_replace_nans(div[:, :, k],
                                                kernel,
                                                boundary="extend")
        smb[:, :, k] = interpolate_replace_nans(smb[:, :, k],
                                                kernel,
コード例 #27
0
def master_synthetic_image_creator(target, seeing=''):
    def mimir_source_finder(image_path, sigma_above_bg, fwhm):
        """Find sources in Mimir images."""

        np.seterr(all='ignore')  #Ignore invalids (i.e. divide by zeros)

        #Find stars in the master image.
        avg, med, stddev = sigma_clipped_stats(
            image, sigma=3.0, maxiters=3)  #Previously maxiters = 5!
        daofind = DAOStarFinder(fwhm=fwhm,
                                threshold=sigma_above_bg * stddev,
                                sky=med,
                                ratio=0.8)
        new_sources = daofind(image)
        x_centroids = new_sources['xcentroid']
        y_centroids = new_sources['ycentroid']
        sharpness = new_sources['sharpness']
        fluxes = new_sources['flux']
        peaks = new_sources['peak']

        #Cut sources that are found within 20 pix of the edges.
        use_x = np.where((x_centroids > 20) & (x_centroids < 1004))[0]
        x_centroids = x_centroids[use_x]
        y_centroids = y_centroids[use_x]
        sharpness = sharpness[use_x]
        fluxes = fluxes[use_x]
        peaks = peaks[use_x]
        use_y = np.where((y_centroids > 20) & (y_centroids < 1004))[0]
        x_centroids = x_centroids[use_y]
        y_centroids = y_centroids[use_y]
        sharpness = sharpness[use_y]
        fluxes = fluxes[use_y]
        peaks = peaks[use_y]

        #Also cut using sharpness, this seems to eliminate a lot of false detections.
        use_sharp = np.where(sharpness > 0.5)[0]
        x_centroids = x_centroids[use_sharp]
        y_centroids = y_centroids[use_sharp]
        sharpness = sharpness[use_sharp]
        fluxes = fluxes[use_sharp]
        peaks = peaks[use_sharp]

        #Cut sources in the lower left, if bars are present.
        #use_ll =  np.where((x_centroids > 512) | (y_centroids > 512))
        #x_centroids  = x_centroids [use_ll]
        #y_centroids  = y_centroids [use_ll]
        #sharpness = sharpness[use_ll]
        #fluxes = fluxes[use_ll]
        #peaks = peaks[use_ll]

        #Cut targets whose y centroids are near y = 512. These are usually bad.
        use_512 = np.where(
            np.logical_or((y_centroids < 509), (y_centroids > 515)))[0]
        x_centroids = x_centroids[use_512]
        y_centroids = y_centroids[use_512]
        sharpness = sharpness[use_512]
        fluxes = fluxes[use_512]
        peaks = peaks[use_512]

        #Cut sources with negative/saturated peaks
        use_peaks = np.where((peaks > 30) & (peaks < 7000))[0]
        x_centroids = x_centroids[use_peaks]
        y_centroids = y_centroids[use_peaks]
        sharpness = sharpness[use_peaks]
        fluxes = fluxes[use_peaks]
        peaks = peaks[use_peaks]

        #Do quick photometry on the remaining sources.
        positions = [(x_centroids[i], y_centroids[i])
                     for i in range(len(x_centroids))]
        apertures = CircularAperture(positions, r=4)
        phot_table = aperture_photometry(image - med, apertures)

        #Cut based on brightness.
        phot_table.sort('aperture_sum')
        cutoff = 1 * std * np.pi * 4**2
        bad_source_locs = np.where(phot_table['aperture_sum'] < cutoff)
        phot_table.remove_rows(bad_source_locs)

        if len(phot_table) > 15:
            x_centroids = phot_table['xcenter'].value[-16:-1]
            y_centroids = phot_table['ycenter'].value[-16:-1]
        else:
            x_centroids = phot_table['xcenter'].value
            y_centroids = phot_table['ycenter'].value

        return (x_centroids, y_centroids)

    def synthetic_image_maker(x_centroids, y_centroids, fwhm):
        #Construct synthetic images from centroid/flux data.
        synthetic_image = np.zeros((1024, 1024))
        sigma = fwhm / 2.355
        for i in range(len(x_centroids)):
            #Cut out little boxes around each source and add in Gaussian representations. This saves time.
            int_centroid_x = int(np.round(x_centroids[i]))
            int_centroid_y = int(np.round(y_centroids[i]))
            y_cut, x_cut = np.mgrid[int_centroid_y - 10:int_centroid_y + 10,
                                    int_centroid_x - 10:int_centroid_x + 10]
            dist = np.sqrt((x_cut - x_centroids[i])**2 +
                           (y_cut - y_centroids[i])**2)
            synthetic_image[y_cut,
                            x_cut] += np.exp(-((dist)**2 / (2 * sigma**2) +
                                               ((dist)**2 / (2 * sigma**2))))
        return (synthetic_image)

    def auto_correlation_seeing(im, cutout_w=15):

        plate_scale = 0.579  #arcsec/pix
        sigma_to_fwhm = 2.355

        #Set a row to NaNs, which will dominate the autocorrelation of Mimir images.
        im[513] = np.nan

        #Interpolate nans in the image, repeating until no nans remain.
        while sum(sum(np.isnan(im))) > 0:
            im = interpolate_replace_nans(im, kernel=Gaussian2DKernel(0.5))

        #Cut 80 pixels near top/bottom edges, which can dominate the fft if they have a "ski jump" feature.
        im = im[80:944, :]
        y_dim, x_dim = im.shape

        #Subtract off a simple estimate of the image background.
        im -= sigma_clipped_stats(im)[1]

        #Do auto correlation
        fft = signal.fftconvolve(im, im[::-1, ::-1], mode='same')

        #Do a cutout around the center of the fft.
        cutout = fft[int(y_dim / 2) - cutout_w:int(y_dim / 2) + cutout_w,
                     int(x_dim / 2) - cutout_w:int(x_dim / 2) + cutout_w]

        #Set the midplane of the cutout to nans and interpolate.
        cutout[cutout_w] = np.nan

        while sum(sum(np.isnan(cutout))) > 0:
            cutout = interpolate_replace_nans(cutout, Gaussian2DKernel(0.25))

        #Subtract off "background"
        cutout -= np.nanmedian(cutout)

        #Fit a 2D Gaussian to the cutout
        #Assume a seeing of 2".7, the average value measured for PINES.
        g_init = models.Gaussian2D(
            amplitude=np.nanmax(cutout),
            x_mean=cutout_w,
            y_mean=cutout_w,
            x_stddev=2.7 / plate_scale / sigma_to_fwhm * np.sqrt(2),
            y_stddev=2.7 / plate_scale / sigma_to_fwhm * np.sqrt(2))
        g_init.x_mean.fixed = True
        g_init.y_mean.fixed = True
        #Set limits on the fitted gaussians between 1".6 and 7".0
        #Factor of sqrt(2) corrects for autocorrelation of 2 gaussians.
        g_init.x_stddev.min = 1.6 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.y_stddev.min = 1.6 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.x_stddev.max = 7 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.y_stddev.max = 7 / plate_scale / sigma_to_fwhm * np.sqrt(2)

        fit_g = fitting.LevMarLSQFitter()
        y, x = np.mgrid[:int(2 * cutout_w), :int(2 * cutout_w)]
        g = fit_g(g_init, x, y, cutout)

        #Convert to fwhm in arcsec.
        seeing_fwhm_as = g.y_stddev.value / np.sqrt(
            2) * sigma_to_fwhm * plate_scale

        return seeing_fwhm_as

    plt.ion()

    target = target.replace(' ', '')

    #By default, point to today's date directory.
    ut_date = datetime.datetime.utcnow()
    if ut_date.month < 10:
        month_string = '0' + str(ut_date.month)
    else:
        month_string = str(ut_date.month)

    if ut_date.day < 10:
        day_string = '0' + str(ut_date.day)
    else:
        day_string = str(ut_date.day)

    date_string = str(ut_date.year) + month_string + day_string

    #Copy the test.fits file to the master_images directory in PINES scripts.
    #test_path =  '/mimir/data/obs72/'+date_string+'/test.fits'
    test_path = '/Users/obs72/Desktop/PINES_scripts/test_image/test.fits'
    target_path = '/Users/obs72/Desktop/PINES_scripts/master_images/' + target + '_master.fits'
    shutil.copyfile(test_path, target_path)

    file_path = '/Users/obs72/Desktop/PINES_scripts/master_images/' + target + '_master.fits'
    calibration_path = '/Users/obs72/Desktop/PINES_scripts/Calibrations/'

    #Open the image and calibration files.
    header = fits.open(file_path)[0].header
    exptime = header['EXPTIME']
    filter = header['FILTNME2']
    image = fits.open(file_path)[0].data[0:1024, :].astype('float')
    dark = fits.open(calibration_path + 'Darks/master_dark_' + str(exptime) +
                     '.fits')[0].data
    flat = fits.open(calibration_path + 'Flats/master_flat_' + filter +
                     '.fits')[0].data
    bpm_path = calibration_path + 'Bad_pixel_masks/bpm.fits'
    bpm = fits.open(bpm_path)[0].data

    #Reduce image.
    image = (image - dark) / flat

    #Interpolate over bad pixels
    image[np.where(bpm)] = np.nan
    kernel = Gaussian2DKernel(x_stddev=1)
    image = interpolate_replace_nans(image, kernel)

    if seeing == '':
        seeing = auto_correlation_seeing(image)
    else:
        seeing = float(seeing)
        #old code had a 2.355 factor. PSM thinks this is a bug
        #daostarfinder_fwhm = seeing*2.355/0.579

        #new code removes 2.355 factor

    print('Using seeing FWHM = {:1.1f}" to detect sources'.format(seeing))
    daostarfinder_fwhm = seeing / 0.579

    #Do a simple 2d background model.
    box_size = 32
    sigma_clip = SigmaClip(sigma=3.)
    bkg_estimator = MedianBackground()
    bkg = Background2D(image, (box_size, box_size),
                       filter_size=(3, 3),
                       sigma_clip=sigma_clip,
                       bkg_estimator=bkg_estimator)
    image = image - bkg.background

    avg, med, std = sigma_clipped_stats(image)

    #Save reduced image to test_image for inspection
    # hdu_reduced = fits.PrimaryHDU(image)
    # hdu_reduced.writeto('/Users/obs72/Desktop/PINES_scripts/test_image/master_reduced.fits',overwrite=True)

    #Find sources in the image.
    (x_centroids, y_centroids) = mimir_source_finder(image,
                                                     sigma_above_bg=5,
                                                     fwhm=daostarfinder_fwhm)

    #Plot the field with detected sources.
    plt.figure(figsize=(9, 9))
    norm = ImageNormalize(image, interval=ZScaleInterval())
    plt.imshow(image, origin='lower', norm=norm)
    plt.plot(x_centroids, y_centroids, 'rx')
    for i in range(len(x_centroids)):
        plt.text(x_centroids[i] + 8,
                 y_centroids[i] + 8,
                 str(i),
                 color='r',
                 fontsize=14)
    plt.title(
        'Inspect to make sure stars were found!\nO for magnification tool, R to reset view'
    )
    plt.tight_layout()
    plt.show()

    print('')
    print('')
    print('')

    #Prompt the user to remove any false detections.
    ids = input(
        'Enter ids of sources to be removed separated by commas (i.e., 4,18,22). If none to remove, hit enter. To break, ctrl + D. '
    )
    if ids != '':
        ids_to_eliminate = [int(i) for i in ids.split(',')]
        ids = [
            int(i) for i in np.linspace(0,
                                        len(x_centroids) - 1, len(x_centroids))
        ]
        ids_to_keep = []
        for i in range(len(ids)):
            if ids[i] not in ids_to_eliminate:
                ids_to_keep.append(ids[i])
    else:
        ids_to_keep = [
            int(i) for i in np.linspace(0,
                                        len(x_centroids) - 1, len(x_centroids))
        ]
    plt.clf()
    plt.imshow(image, origin='lower', vmin=med, vmax=med + 5 * std)
    plt.plot(x_centroids[ids_to_keep], y_centroids[ids_to_keep], 'rx')
    for i in range(len(x_centroids[ids_to_keep])):
        plt.text(x_centroids[ids_to_keep][i] + 8,
                 y_centroids[ids_to_keep][i] + 8,
                 str(i),
                 color='r')

    #Create the synthetic image using the accepted sources.
    synthetic_image = synthetic_image_maker(x_centroids[ids_to_keep],
                                            y_centroids[ids_to_keep], 8)
    plt.figure(figsize=(9, 7))
    plt.imshow(synthetic_image, origin='lower')
    plt.title('Synthetic image')
    plt.show()

    print('')
    print('')
    print('')
    #Now write to a master synthetic image.fits file.
    hdu = fits.PrimaryHDU(synthetic_image, header=header)
    if os.path.exists('master_images/' + target + '_master_synthetic.fits'):
        ans = input(
            'Master image already exists for target, type y to overwrite. ')
        if ans == 'y':
            os.remove('master_images/' + target + '_master_synthetic.fits')
            hdu.writeto('master_images/' + target + '_master_synthetic.fits')
            print('Writing master synthetic image to master_images/' + target +
                  '_master_synthetic.fits')
        else:
            print('New master synthetic image not saved.')
    else:
        hdu.writeto('master_images/' + target + '_master_synthetic.fits')
        print('Writing master synthetic image to master_images/' + target +
              '_master_synthetic.fits')

        #Open list of master images and append new one.
        master_image_list = '/Users/obs72/Desktop/PINES_scripts/input_file.txt'
        file_object = open(master_image_list, 'a')
        append_str = '/Users/obs72/Desktop/PINES_scripts/master_images/' + target + '_master.fits, 2MASS ' + target.split(
            '2MASS')[1]
        file_object.write('\n')
        file_object.write(append_str)
        file_object.close()
コード例 #28
0
                JulianDate = str(float(hdul[0].header['MJD-OBS']) + 2400000.5)
                print('Epoch: {:14}'.format(date))
                data[region][wavelength]['header']['airmass'][date] = AirMass
                data[region][wavelength]['header']['t225'][date] = tau225
                data[region][wavelength]['header']['julian_date'][
                    date] = JulianDate
                data[region][wavelength]['header']['elevation'][date] = elev
                data[region][wavelength]['dates'].append(date)
                centre = (hdul[0].header['CRPIX1'], hdul[0].header['CRPIX2']
                          )  # JCMT's alleged centre is
                hdu = hdul[
                    0]  # a nice compact way to store the data for later.

                # data[region][wavelength]['epoch'][date].append(hdu)o
                Epoch = hdu.data[0]  # map of the region
                Map_of_Region = interpolate_replace_nans(
                    correlate(Epoch, clip_only=True), Gaussian2DKernel(5))
                Map_of_Region_smooth = convolve(Map_of_Region,
                                                align_smooth_kernel,
                                                normalize_kernel=False)
                Map_of_RegionXC = Map_of_Region - Map_of_Region_smooth

                XC = correlate(epoch_1=Map_of_RegionXC,
                               epoch_2=FirstEpochData).real
                PS = correlate(Map_of_Region, psd=True)
                AC = correlate(
                    Map_of_Region).real  # auto correlation of the map
                centre = (hdul[0].header['CRPIX1'], hdul[0].header['CRPIX2']
                          )  # JCMT's alleged centre is
                Vec = np.array([
                    centre[0] - (hdul[0].shape[2] // 2),
                    centre[1] - (hdul[0].shape[1] // 2)
コード例 #29
0
        Data['sigma_y'] = []
        Data['alpha'] = []

        Count = []
        for index in range(4, 60):
            atoms = Data["Raw"][index] - dark_mean

            # Generic Image preperation

            atoms_nans = (atoms <= 0)
            atoms[atoms_nans] = 1
            OD = -np.log((atoms) / (probe - dark_mean))
            OD[atoms_nans] = np.nan

            # Now use astropy to clean this up
            OD = interpolate_replace_nans(OD, kernel)

            BGCounts = np.sum(OD * BG_Mask['mask']) / BG_Mask['pts']
            OD -= BGCounts

            Data["OD"].append(OD)

            # Construct smooth background images
            # Next we setup a fit: I'll do a deformed gaussian peak.
            pguess = lmfit.Parameters()
            pguess.add('amp', value=1, min=-0.5, max=6)
            pguess.add('xc', value=0, min=-10, max=10)
            pguess.add('yc', value=0, min=-10, max=10)
            pguess.add('sigma_x', value=10, min=2, max=30)
            pguess.add('sigma_y', value=10, min=2, max=30)
            pguess.add('alpha', value=1, min=0.1, max=4)
コード例 #30
0
def merge_fits_files(infolder=None,
                     infiles=None,
                     suffix='',
                     fout=None,
                     method='mean',
                     prefix='',
                     sigma=3,
                     target=None,
                     filt=None,
                     rotangle=None,
                     northup=False,
                     refbox=20,
                     fitmeth='fastgauss',
                     refsign=1,
                     imgoffsetangle=None,
                     verbose=False,
                     tolrot=0.01,
                     pfov=None,
                     replace_nan_with='median',
                     instrument=None):
    """
    Take either a folder full of single (chop/nod) frames or a given list of
    files and simply average them into one merged file
    """

    if infolder is not None and infiles is None:
        infiles = [
            ff for ff in os.listdir(infolder)
            if (ff.endswith(suffix + '.fits')) & (ff.startswith(prefix))
        ]

    if infolder is None:
        infolder = ''

    nfiles = len(infiles)
    ima = []
    shapes = []
    rots = []

    ffull = infolder + "/" + infiles[0]
    hdu = fits.open(ffull)
    head0 = hdu[0].header
    hdu.close()

    if instrument is None:
        instrument = head0['INSTRUME']
        print('Found instrument: ', instrument)

    if instrument == 'VISIR':
        if imgoffsetangle is None:
            imgoffsetangle = _vp.imgoffsetangle
            rotsense = _vp.rotsense
            rotoffset = _vp.rotoffset

    elif (instrument == 'NAOS+CONICA') | (instrument == 'NACO'):
        imgoffsetangle = 0  # apparently correct?
        rotsense = -1  # for NACO the ADA.POSANG really gives the PA on sky
        rotoffset = 0

    elif instrument == "ISAAC":
        imgoffsetangle = _ip.imgoffsetangle
        rotsense = _ip.rotsense
        rotoffset = _ip.rotoffset

    print('Found n potential files: ', nfiles)

    if verbose:
        print("MERGE_FITS_FILES: fitmeth: ", fitmeth)
        print("MERGE_FITS_FILES: refbox: ", refbox)
        print("MERGE_FITS_FILES: refsign: ", refsign)

    for i in range(nfiles):

        ffull = infolder + "/" + infiles[i]
        hdu = fits.open(ffull)

        if verbose:
            print("MERGE_FITS_FILES: i, infiles[i]: ", i, infiles[i])

        head = hdu[0].header
        rot = head['HIERARCH ESO ADA POSANG']

        if "HIERARCH ESO TEL ROT ALTAZTRACK" in head:
            pupiltrack = (head["HIERARCH ESO TEL ROT ALTAZTRACK"])
        else:
            pupiltrack = False

        # --- if pupil tracking is on then the parang in the VISIR fits-header is
        #     not normalised by the offset angle of the VISIR imager with respect
        #     to the adapter/rotator
        if pupiltrack:
            rot = rot - imgoffsetangle

        rot += rotoffset

        if verbose:
            print("MERGE_FITS_FILES: rot: ", rot)

        if target is not None:
            targ = head['HIERARCH ESO OBS TARG NAME']
            if str(targ) != target:
                continue
        if filt is not None:
            filtf = head['HIERARCH ESO INS FILT1 NAME']
            if str(filtf) != filt:
                continue
        if rotangle is not None:
            if rot != rotangle:
                continue

        im = hdu[0].data
        hdu.close()

        # --- rotate images if Northup is requested to be up
        if np.abs(rot) > tolrot and northup:

            # --- ndimage is not compaticble with nans, so we need to replace them
            print(
                "MERGE_FITS_FILES: Encountered NaNs not compatible with " +
                "rotation. Replacing with ", replace_nan_with)

            if ((type(replace_nan_with) == float) |
                (type(replace_nan_with) == int)):
                im[np.isnan(im)] = replace_nan_with

            elif replace_nan_with == 'median':
                im[np.isnan(im)] = np.nanmedian(im)

            # --- WARNING: does not work with large NaN areas at the borders
            elif replace_nan_with == 'interpol':
                kernel = Gaussian2DKernel(stddev=1)
                im = interpolate_replace_nans(im, kernel)

#            print(np.isnan(im).any())
#
#            plt.imshow(im, origin='bottom', interpolation='nearest',
#                   norm=LogNorm())
#            plt.title(str(i)+' - after replacing NaNs, before rotation')
#            plt.show()

            im = ndimage.interpolation.rotate(im, rotsense * rot, order=3)

#            plt.imshow(im, origin='bottom', interpolation='nearest',
#                       norm=LogNorm())
#            plt.show()

        ima.append(im)
        shapes.append(np.shape(im))
        rots.append(rot)

    shapes = np.array(shapes)
    n = len(ima)

    if verbose:
        print("MERGE_FITS_FILES: n: ", n)

    # --- after rotating the images probably have different sizes
    if northup:
        minsize = [np.min(shapes[:, 0]), np.min(shapes[:, 1])]
        for i in range(n):

            if verbose:
                print("MERGE_FITS_FILES: i: ", i)
                print("MERGE_FITS_FILES: shapes: ", shapes[i])
                print("MERGE_FITS_FILES: argmax: ",
                      np.unravel_index(np.nanargmax(ima[i]), shapes[i]))

                plt.imshow(ima[i],
                           origin='bottom',
                           interpolation='nearest',
                           norm=LogNorm())
                plt.title(
                    str(i) + ' - rot ' + str(rots[i]) +
                    ' - before centered crop')
                plt.show()

            fit, _, _ = _find_source(ima[i],
                                     method=fitmeth,
                                     searchbox=refbox,
                                     fitbox=refbox,
                                     sign=refsign,
                                     verbose=verbose,
                                     plot=verbose)

            ima[i] = _crop_image(ima[i], box=minsize, cenpos=fit[2:4])

            if verbose:
                plt.imshow(ima[i],
                           origin='bottom',
                           interpolation='nearest',
                           norm=LogNorm())

                plt.title(str(i) + ' - rot ' + str(rots[i]))
                plt.show()
                print(i, fit)

        # --- if WCS is present in the header, it needs to be updated to
        if "CTYPE1" in head0:
            if head0["CTYPE1"] == "RA---TAN":
                if pfov is None:
                    pfov = _
                    if "HIERARCH ESO INS PFOV" in head0:
                        pfov = float(head["HIERARCH ESO INS PFOV"])  # VISIR
                    else:
                        pfov = float(head["HIERARCH ESO INS PIXSCALE"])  # NACO

                ra = head0["HIERARCH ESO TEL TARG ALPHA"]
                sec = ra % 100
                min = (ra % 10000 - sec) / 100
                hour = (int(ra) / 10000)
                ra_deg = 15 * (hour + min / 60.0 + sec / 3600.0)
                # print(ra_deg)

                dec = head0["HIERARCH ESO TEL TARG DELTA"]
                sec = dec % 100
                min = (dec % 10000 - sec) / 100
                deg = (int(dec) / 10000)
                if deg < 0:
                    dec_deg = deg - min / 60.0 - sec / 3600.0
                else:
                    dec_deg = deg + min / 60.0 + sec / 3600.0
                # print(ra_deg)

                head0["CRPIX1"] = minsize[1] * 0.5
                head0["CRPIX2"] = minsize[0] * 0.5
                head0["CRVAL1"] = ra_deg
                head0["CRVAL2"] = dec_deg
                if "CD1_1" in head0:
                    del head0["CD1_1"]
                    del head0["CD1_2"]
                    del head0["CD2_1"]
                    del head0["CD2_2"]
                head0["CDELT1"] = -pfov / 3600.0
                head0["CDELT2"] = pfov / 3600.0

    ima = np.array(ima)
    #print(np.shape(ima))

    if method == 'mean':
        outim = np.nanmean(ima, axis=0)

    if method == 'median':
        outim = np.nanmedian(ima, axis=0)

    if method == 'sigmaclip':
        outim = np.array(
            np.mean(sigma_clip(ima, sigma=sigma, axis=0, maxiters=2), axis=0))

    print(" - " + str(n) + " images combined.")

    if fout is not None:
        fits.writeto(fout, outim, head0, overwrite=True)

    return (outim)
コード例 #31
0
    def auto_correlation_seeing(im, cutout_w=15):

        plate_scale = 0.579  #arcsec/pix
        sigma_to_fwhm = 2.355

        #Set a row to NaNs, which will dominate the autocorrelation of Mimir images.
        im[513] = np.nan

        #Interpolate nans in the image, repeating until no nans remain.
        while sum(sum(np.isnan(im))) > 0:
            im = interpolate_replace_nans(im, kernel=Gaussian2DKernel(0.5))

        #Cut 80 pixels near top/bottom edges, which can dominate the fft if they have a "ski jump" feature.
        im = im[80:944, :]
        y_dim, x_dim = im.shape

        #Subtract off a simple estimate of the image background.
        im -= sigma_clipped_stats(im)[1]

        #Do auto correlation
        fft = signal.fftconvolve(im, im[::-1, ::-1], mode='same')

        #Do a cutout around the center of the fft.
        cutout = fft[int(y_dim / 2) - cutout_w:int(y_dim / 2) + cutout_w,
                     int(x_dim / 2) - cutout_w:int(x_dim / 2) + cutout_w]

        #Set the midplane of the cutout to nans and interpolate.
        cutout[cutout_w] = np.nan

        while sum(sum(np.isnan(cutout))) > 0:
            cutout = interpolate_replace_nans(cutout, Gaussian2DKernel(0.25))

        #Subtract off "background"
        cutout -= np.nanmedian(cutout)

        #Fit a 2D Gaussian to the cutout
        #Assume a seeing of 2".7, the average value measured for PINES.
        g_init = models.Gaussian2D(
            amplitude=np.nanmax(cutout),
            x_mean=cutout_w,
            y_mean=cutout_w,
            x_stddev=2.7 / plate_scale / sigma_to_fwhm * np.sqrt(2),
            y_stddev=2.7 / plate_scale / sigma_to_fwhm * np.sqrt(2))
        g_init.x_mean.fixed = True
        g_init.y_mean.fixed = True
        #Set limits on the fitted gaussians between 1".6 and 7".0
        #Factor of sqrt(2) corrects for autocorrelation of 2 gaussians.
        g_init.x_stddev.min = 1.6 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.y_stddev.min = 1.6 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.x_stddev.max = 7 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.y_stddev.max = 7 / plate_scale / sigma_to_fwhm * np.sqrt(2)

        fit_g = fitting.LevMarLSQFitter()
        y, x = np.mgrid[:int(2 * cutout_w), :int(2 * cutout_w)]
        g = fit_g(g_init, x, y, cutout)

        #Convert to fwhm in arcsec.
        seeing_fwhm_as = g.y_stddev.value / np.sqrt(
            2) * sigma_to_fwhm * plate_scale

        return seeing_fwhm_as
コード例 #32
0
def source_detect_astrometry(target, api_key, filename, download_data=False):
    '''
        Authors:
            Pat Tamburo, Boston University, May 2021
        Purpose: 
            Uploads a single reduced image for a target to astrometry.net, downloads solution image, and updates the image header with the astrometry.net wcs. 
        Inputs:
            target (str): The target's long 2MASS name (e.g., '2MASS J09161504+2139512')
            api_key (str): The api key of your account on astrometry.net. 
            filename (pathlib.Path): Path to the source_detect_image.
            download_data (bool, optional): Whether or not to first download reduced data for the target from the PINES server. 
    '''

    kernel = Gaussian2DKernel(x_stddev=0.25)

    #If the header does not have a HISTORY keyword (which is added by astrometry.net), process it.
    header = fits.open(filename)[0].header

    if 'HISTORY' not in header:
        print('Uploading {} to astrometry.net.'.format(filename.name))
        #Read in the image data, interpolate NaNs, and save to a temporary fits file.
        #Astrometry.net does not work with NaNs in images.
        original_image = fits.open(filename)[0].data
        image = interpolate_replace_nans(original_image, kernel=kernel)
        temp_filename = filename.parent / (filename.name.split('.fits')[0] +
                                           '_temp.fits')
        hdu = fits.PrimaryHDU(image, header=header)
        hdu.writeto(temp_filename, overwrite=True)

        #Upload the image with interpolated NaNs to astrometry.net. It will solve and download to a new image.
        pat.astrometry.upload_to_astrometry.upload_file(
            api_key, temp_filename, header)

        #Try to donwload the solved image and open it.
        try:
            #Grab the header of the astrometry.net solution image, and the original image data.
            astrometry_image_path = filename.parent / (
                temp_filename.name.split('.fits')[0] + '_new_image.fits')
            wcs_header = fits.open(astrometry_image_path)[0].header
            wcs_hdu = fits.PrimaryHDU(original_image, header=wcs_header)

            #Save the original image data with the new wcs header.
            output_filename = filename
            wcs_hdu.writeto(output_filename, overwrite=True)

        #If the try clause didn't work, that's because the processing on astrometry.net failed.
        except:
            raise RuntimeError(
                'Astrometry solution failed! Use a different source detect image for choosing reference stars.'
            )

        #Delete temporary files.
        os.remove(temp_filename)
        os.remove(astrometry_image_path)
        time.sleep(1)
        print('')

    #If the header DOES have a HISTORY keyword, skip it, it has already been processed.
    else:
        print(
            'Astrometric processing already complete for {}, skipping.'.format(
                filename.name))
        print('')