Пример #1
0
def getDAOStarFinderStats(img_file,
                          target_dir,
                          kernel_in_pix,
                          sig_clipping_for_stats=3.0,
                          star_find_n_sig_threshold=4.0,
                          fit_radius_scaling=5.0,
                          bg_radii_scalings=[7.0, 8.0]):

    data, header = can.readInDataFromFitsFile(img_file, target_dir)
    mean, median, std = astrostats.sigma_clipped_stats(
        data, sigma=sig_clipping_for_stats)
    print('Finding stars in image: ' + target_dir + img_file)
    daofind = DAOStarFinder(fwhm=kernel_in_pix,
                            threshold=star_find_n_sig_threshold * std)
    sources = daofind(data - median)
    results = {
        'xcentroid': sources['xcentroid'].data,
        'ycentroid': sources['ycentroid'].data,
        'sharpness': sources['sharpness'].data,
        'roundness1': sources['roundness1'].data,
        'roundness2': sources['roundness2'].data,
        'npix': sources['npix'].data,
        'sky': sources['sky'].data,
        'peak': sources['peak'].data,
        'flux': sources['flux'].data,
        'mag': sources['mag'].data
    }

    return results
Пример #2
0
    def getImage(self, flux_pattern, exp_time, temp = -15.0, use_master_bias = 0, use_master_dark = 0, binning = [1,1]):
        print ( 'use_master_bias = ' + str(use_master_bias) ) 
        #print 'binning = ' + str(binning)
        #print 'self.size = ' + str(self.size) 
        binned_size = [self.size[0] // binning[0], self.size[1] // binning[1]]
        if use_master_bias:
            print ( 'Using master bias file: ' + self.master_bias_file )
            bias_array, bias_header = readInDataFromFitsFile(self.master_bias_file, self.master_bias_dir)
            bias_array = bias_array.transpose() 
            bias_signal = bias_array[0:self.size[0], 0:self.size[1]]
            bias_signal = binArray(bias_signal, binning) / (binning[0] * binning[1])
            print ( 'np.shape(bias_signal) = ' + str(np.shape(bias_signal))  )
            
            #bias_signal = np.random.normal(self.bias_level, self.rdnoise, tuple(binned_size))
        else:
            bias_signal = np.zeros(self.size) + self.bias_level
        bias_noise = np.random.normal(0.0, self.rdnoise, tuple(binned_size))
        if use_master_dark:
            print ('Using master dark file: ' + self.master_dark_file )
            dark_array, dark_header = readInDataFromFitsFile(self.master_dark_file, self.master_dark_dir)
            dark_array = dark_array.transpose()
            dark_array = dark_array[0:self.size[0], 0:self.size[1]]
            print ('np.shape(dark_array) = ' + str(np.shape(dark_array)) )
            dark_sigma_deviations = np.random.normal(0.0, 1.0, tuple(self.size))
            print ('np.shape(dark_sigma_deviations ) = ' + str(np.shape(dark_sigma_deviations )) )
            dark_signal = dark_array * exp_time + np.sqrt(dark_array * exp_time) * dark_sigma_deviations
            dark_signal = dark_signal[0:self.size[0], 0:self.size[1]]
            #dark_signal = np.random.normal(dark_array[0:self.size[0], 0:self.size[1]] * exp_time, np.sqrt(dark_array[0:self.size[0], 0:self.size[1]] * exp_time), tuple(binned_size))
            #dark_signal = np.random.normal(dark_current * exp_time, np.sqrt(dark_current * exp_time), tuple(self.size))
        else:
            dark_current = self.dark_current_interp(temp)
            dark_signal = np.random.normal(dark_current * exp_time, np.sqrt(dark_current * exp_time), tuple(self.size))

        source_sigma_deviations = np.random.normal(0.0, 1.0, tuple(self.size))
        source_signal = flux_pattern * exp_time + np.sqrt(flux_pattern * exp_time) * source_sigma_deviations 
        #source_signal = np.random.normal(flux_pattern * exp_time, np.sqrt(flux_pattern * exp_time), tuple(self.size))

        print ('np.shape(bias_signal) = ' + str(np.shape(bias_signal)) )
        print ('np.shape(dark_signal) = ' + str(np.shape(dark_signal)) )
        print ('np.shape(source_signal) = ' + str(np.shape(source_signal)) )
        ccd_signal = dark_signal + source_signal
        print ('Binning CCD signal before adding read noise...'  )
        print ('binning = ' + str(binning)) 
        ccd_signal = binArray(ccd_signal, binning) 
            
        return ((ccd_signal + bias_signal + bias_noise) / self.gain).astype(int)
Пример #3
0
    def __init__(self, fits_file, load_dir = '', fits_data_type = 'image', n_mosaic_extensions = 0):
        if fits_data_type in ['i','I','img','IMG','Img','image','IMAGE','Image']:
            fits_data_type = 'image'
        self.fits_file = fits_file
        self.target_dir = load_dir
        self.fits_data_type = fits_data_type
        self.n_mosaic_extensions = n_mosaic_extensions

        self.data, self.header = can.readInDataFromFitsFile(self.fits_file, self.target_dir, n_mosaic_image_extensions = self.n_mosaic_extensions, data_type = self.fits_data_type )

        if n_mosaic_extensions <=1:
            self.data = np.transpose(self.data)
        else:
Пример #4
0
def getOneDSpectrumFromFile(file_name,
                            file_dir='',
                            summing_method='median',
                            size=[1024, 1024],
                            binning=[1, 1],
                            spectrum_box=[[0, 1023], [417, 652]],
                            pix_to_wavelength_funct=None):
    data_array, header = readInDataFromFitsFile(file_name, file_dir)
    print('np.shape(data_array) = ' + str(np.shape(data_array)))
    one_d_spectrum = SCD.measureSpectrum(
        spectrum_box,
        data_array,
        summing_method=summing_method,
        binning=binning,
        pix_to_wavelength_funct=pix_to_wavelength_funct)
    return one_d_spectrum
    def __init__(
            self,
            data_file='redmapper_dr8_public_v6.3_catalog.fits',
            data_dir='/Users/sashabrownsberger/Documents/Harvard/physics/stubbs/ClusterCatalogues/',
            use_spec_zs=0):

        data_table, col_names, header = cant.readInDataFromFitsFile(
            data_file, data_dir, data_type='table')
        self.spec_z_keyword = 'z_spec'
        self.phot_z_keyword = 'z_LAMBDA'
        self.RA_keyword = 'RA'
        self.Dec_keyword = 'Dec'

        self.rm_RAs = data_table[self.RA_keyword]
        self.rm_Decs = data_table[self.Dec_keyword]
        rm_phot_zs = data_table[self.phot_z_keyword]
        rm_spec_zs = data_table[self.spec_z_keyword]

        if use_spec_zs:
            self.rm_zs = rm_spec_zs
        else:
            self.rm_zs = rm_phot_zs
Пример #6
0
def measureStatisticsOfFitsImages(
        img_list,
        data_dir=None,
        n_mosaic_image_extensions=0,
        #img_indeces_to_id = None, img_ids = None,
        time_header_key='STARTEXP',
        time_header_formatting='%Y-%m-%dT%H:%M:%SZ',
        stat_type='median',
        show_plot=1,
        n_std_lims=3.5,
        save_plot=0,
        save_plot_name=None,
        ax=None,
        data_sect=None,
        xlabel=r'$\Delta t$ (sec)',
        ylabel=None,
        labelsize=16,
        title='',
        titlesize=20,
        color='k'):
    if ylabel == None:
        ylabel = stat_type + ' of counts in images'

    stats = []
    exp_time_strs = []
    for i in range(len(img_list)):
        img = img_list[i]
        data, header = can.readInDataFromFitsFile(
            img, data_dir, n_mosaic_image_extensions=n_mosaic_image_extensions)
        if data_sect != None:
            data = data[data_sect[0][0]:data_sect[0][1],
                        data_sect[1][0]:data_sect[1][1]]
        if stat_type == 'median':
            stat = np.median(data)
        elif stat_type == 'mean':
            stat = np.mean(data)
        elif stat_type == 'std':
            stat = np.std(data)

        stats = stats + [stat]
        exp_time_strs = exp_time_strs + [header[time_header_key]]
    stats_mean = can.sigClipMean(stats, sig_clip=n_std_lims)
    stats_std = can.sigClipStd(stats, sig_clip=n_std_lims)
    exp_times_datetimes = [
        datetime.strptime(exp_time_str, time_header_formatting)
        for exp_time_str in exp_time_strs
    ]
    exp_times = [exp_time.timestamp() for exp_time in exp_times_datetimes]
    min_time = min(exp_times)
    delta_ts = [exp_time - min_time for exp_time in exp_times]
    if ax == None:
        f, axarr = plt.subplots(1, 1)
        ax = axarr
    ax.plot(delta_ts, stats, marker='.', c=color)
    ax.set_ylim(stats_mean - stats_std * n_std_lims,
                stats_mean + stats_std * n_std_lims)
    ax.set_xlabel(xlabel, fontsize=labelsize)
    ax.set_ylabel(ylabel, fontsize=labelsize)
    ax.set_title(title, fontsize=titlesize)
    if save_plot and save_plot_name != None:
        plt.savefig(save_plot_name)
    if show_plot:
        plt.show()

    return exp_times, stats
Пример #7
0
def measure1dPSFOfStar(
        init_x,
        init_y,
        img_file,
        target_dir,
        init_fwhm_guess_arcsec,
        fit_radius_scaling=5.0,
        bg_radii_scalings=[6.0, 7.0],
        init_fit_guess=None,
        radial_fit_funct=lambda rsqr, A, sig: A * np.exp(-rsqr /
                                                         (2.0 * sig**2.0)),
        show_fit=1,
        max_iters=5,
        fwhm_convergence_arcsec=0.01,
        pixel_scaling=0.1,
        max_search_rad_in_pix=100,
        min_n_iters_before_completion=2,
        verbose=0):

    init_fwhm_guess = init_fwhm_guess_arcsec / pixel_scaling
    if verbose: print('init_fwhm_guess in pixels = ' + str(init_fwhm_guess))
    img, header = can.readInDataFromFitsFile(img_file, target_dir)
    init_sig_guess = fwhmToSig(init_fwhm_guess)
    current_fit_params = [init_x, init_y, np.nan, init_sig_guess]
    current_fit_guess = init_fit_guess
    convergence_satisfied = 0
    n_iters = 0
    fit_params_sequence = []

    #No convergence criterion yet defined
    while not (convergence_satisfied):
        try:
            fit_params_sequence = fit_params_sequence + [current_fit_params[:]]
            current_x, current_y, current_height, current_sig = current_fit_params
            if verbose:
                print('current_fit_params = ' + str(current_fit_params))
            if current_sig * bg_radii_scalings[1] > max_search_rad_in_pix:
                current_sig = max_search_rad_in_pix // bg_radii_scalings[1]

            current_fit = measure1DPSFOfStarFixedRadius(
                current_x,
                current_y,
                img,
                round(fit_radius_scaling * current_sig), [
                    round(scaling * current_sig)
                    for scaling in bg_radii_scalings
                ],
                fit_guess=current_fit_guess,
                radial_fit_funct=radial_fit_funct,
                show_fit=show_fit,
                verbose=verbose,
                display_title='Centroiding at ' +
                str([c.round_to_n(init_x, 5),
                     can.round_to_n(init_y, 5)]) + ' in file ' + img_file)
            current_fit_params = current_fit[0:-1]
            current_fit_minimized_val = current_fit[-1]
            current_fit_guess = [
                0.0, 0.0, current_fit_params[2], current_fit_params[3]
            ]
            n_iters = n_iters + 1
            if n_iters > max(min_n_iters_before_completion - 1, 1):
                if sigToFwhm(
                        abs(fit_params_sequence[-1][0] -
                            fit_params_sequence[-2][0])
                ) <= fwhm_convergence_arcsec:
                    if verbose:
                        print('Converged after ' + str(n_iters) +
                              ' iterations. ')
                    break
            if max_iters < n_iters:
                print('Maximum iterations reached.  Returning current fit.')
                break
        except ValueError as e:
            print('Fitting to that star failed with ValueError: "' + str(e) +
                  '".  Returning "None"')
            return None
    return current_fit_params
    def updateFitsHeaderWithSolvedField(self,
                                        wcs_solution_fits_file,
                                        original_image_file,
                                        new_image_file,
                                        target_dir,
                                        verbose=0):
        #target_dir, target_wcs_fits_file, target_image_file, wcs_prefix = sys.argv[1:]

        wcs_data, wcs_header = c.readInDataFromFitsFile(
            wcs_solution_fits_file, target_dir)
        image_data, image_header = c.readInDataFromFitsFile(
            original_image_file, target_dir)

        keywords_to_copy = [
            'WCSAXES',
            'CTYPE1',
            'CTYPE2',
            'EQUINOX',
            'LONPOLE',
            'LATPOLE',
            'CRVAL1',
            'CRVAL2',
            'CRPIX1',
            'CRPIX2',
            'CUNIT1',
            'CUNIT2',
            'CD1_1',
            'CD1_2',
            'CD2_1',
            'CD2_2',
            'IMAGEW',
            'IMAGEH',
            'A_ORDER',
            'A_0_0',
            'A_0_1',
            'A_0_2',
            'A_0_3',
            'A_1_0',
            'A_1_1',
            'A_1_2',
            'A_2_0',
            'A_2_1',
            'A_3_0',
            'B_ORDER',
            'B_0_0',
            'B_0_1',
            'B_0_2',
            'B_0_3',
            'B_1_0',
            'B_1_1',
            'B_1_2',
            'B_2_0',
            'B_2_1',
            'B_3_0',
            'AP_ORDER',
            'AP_0_0',
            'AP_0_1',
            'AP_0_2',
            'AP_0_3',
            'AP_1_0',
            'AP_1_1',
            'AP_1_2',
            'AP_2_0',
            'AP_2_1',
            'AP_3_0',
            'BP_ORDER',
            'BP_0_0',
            'BP_0_1',
            'BP_0_2',
            'BP_0_3',
            'BP_1_0',
            'BP_1_1',
            'BP_1_2',
            'BP_2_0',
            'BP_2_1',
            'BP_3_0',
        ]

        image_header[
            'COMMENT'] = 'WCS information generated by nova.astronometry.net'
        image_header[
            'COMMENT'] = 'See file ' + wcs_solution_fits_file + ' for details.'
        for key in keywords_to_copy:
            try:
                image_header[key] = wcs_header[key]
            except (KeyError):
                if verbose:
                    print('header keyword ' + str(key) +
                          ' not found in wcs file ' + wcs_solution_fits_file)

        c.saveDataToFitsFile(image_data.transpose(),
                             new_image_file,
                             target_dir,
                             header=image_header)

        return new_image_file
def measurePISCOCTE(imgs_list,
                    img_strs=None,
                    target_dir='',
                    bias_file='BIAS.fits',
                    already_cropped=0,
                    PISCO_mosaic_extensions=8,
                    binning=1,
                    lowx_bin_2x2=40,
                    highx_bin_2x2=1510,
                    lowy_bin_2x2=[365, 429, 300, 375],
                    highy_bin_2x2=[2505, 2569, 2440, 2515],
                    correlation_length=1,
                    n_max_for_correllation=200,
                    figsize=[15, 5],
                    bands=['g', 'r', 'i', 'z'],
                    read_in_from_file=1,
                    save_file_name='cteTransferCurves.png',
                    save_arrays_to_fits=0,
                    save_cte_fig=0,
                    show_cte_fig=0,
                    direction_flips=[[0, 0], [1, 0], [0, 1], [1, 1]]):
    if img_strs is None:
        img_strs = ['' for img in imgs_list]
    if binning == 1:
        lowx = lowx_bin_2x2 * 2
        highx = highx_bin_2x2 * 2
        lowy = [low * 2 for low in lowy_bin_2x2]
        highy = [high * 2 for high in highy_bin_2x2]
    else:
        lowx = lowx_bin_2x2
        highx = highx_bin_2x2
        lowy = lowy_bin_2x2
        highy = highy_bin_2x2

    if already_cropped: n_mosaic_extensions = PISCO_mosaic_extensions
    else: n_mosaic_extensions = 0
    #Need to work on images one at a time, as we otherwise will run out of memory
    img_medians = [[0, 0, 0, 0, 0.0, 0.0] for img in imgs_list]
    #horizontal_shifted_imgs = [[[] for band in bands] for img_str in imgs_list]
    #vertical_shifted_imgs = [[[] for band in bands] for img_str in imgs_list]
    connected_deviations_by_row = [[[] for band in bands] for img in imgs_list]
    connected_deviations_by_col = [[[] for band in bands] for img in imgs_list]
    for i in range(len(imgs_list)):
        img_str = img_strs[i]
        if read_in_from_file:
            img_file = imgs_list[i]
            if bias_file is None:
                imgs, header = c.readInDataFromFitsFile(
                    img_file,
                    target_dir,
                    n_mosaic_image_extensions=PISCO_mosaic_extensions)
                imgs = piscobr.PISCOStitch(imgs)
            else:
                imgs, header = piscobr.OverscanCorrect(img_file,
                                                       target_dir,
                                                       binning,
                                                       oscan_prefix='os_',
                                                       return_data=1,
                                                       save_data=0,
                                                       overscan_fit_order=1,
                                                       overscan_buffer=10)
                imgs = piscobr.PISCOStitch(imgs)
                bias = piscobr.loadimageFB(target_dir + bias_file)
                imgs = imgs - bias
            cropped_imgs = [
                imgs[j][lowy[j]:highy[j], lowx:highx] for j in range(len(imgs))
            ]
        else:
            cropped_imgs = [
                imgs_list[i][j][lowy[j]:highy[j], lowx:highx]
                for j in range(len(imgs_list[i]))
            ]
            imgs = imgs_list
        cropped_imgs = [np.transpose(img) for img in cropped_imgs]
        #for img in cropped_imgs:
        #    plt.imshow(img)
        #    plt.show()
        cropped_medians = [np.median(img) for img in cropped_imgs]
        img_medians[i] = cropped_medians
        cropped_cte_measurements = [
            c.measureImageCorrelations(
                img,
                correlation_pixel_length=correlation_length,
                n_max_for_correllation=n_max_for_correllation)
            for img in cropped_imgs
        ]
        if save_arrays_to_fits:
            [
                c.saveDataToFitsFile(cropped_imgs[j],
                                     'TEST_CTE_CROPPED_' + bands[j] + img_str,
                                     target_dir)
                for j in range(len(cropped_imgs))
            ]
            #print ('[cropped_cte_measurements[2][0][0,0],cropped_cte_measurements[0][1][0,0]] = ' + str([cropped_cte_measurements[0][0][0,0],cropped_cte_measurements[0][1][0,0]]))
            [[
                c.saveDataToFitsFile(cropped_cte_measurements[j][0],
                                     'TEST_CTE_HORIZ_' + bands[j] + img_str,
                                     target_dir),
                c.saveDataToFitsFile(
                    cropped_cte_measurements[j][1],
                    'TEST_CTE_VERTICAL_' + ['g', 'r', 'i', 'z'][j] + img_str,
                    target_dir)
            ] for j in range(len(cropped_imgs))]
        #print('[cropped_cte_measurements[0][0][3555 - 731, 267 - 21], cropped_cte_measurements[0][1][3555 - 731, 267 - 21]] = ' + str([cropped_cte_measurements[0][0][3555 - 731, 267 - 21], cropped_cte_measurements[0][1][3555 - 731, 267 - 21]]))
        print('Computing means of cropped images...')
        #horizontal_shifted_imgs[i] = [cropped_cte_measurement[0] for cropped_cte_measurement in cropped_cte_measurements]
        #vertical_shifted_imgs[i] = [cropped_cte_measurement[1] for cropped_cte_measurement in cropped_cte_measurements]
        connected_deviations_by_row[i] = [
            cropped_cte_measurement[2]
            for cropped_cte_measurement in cropped_cte_measurements
        ]
        connected_deviations_by_col[i] = [
            cropped_cte_measurement[3]
            for cropped_cte_measurement in cropped_cte_measurements
        ]

    img_medians, connected_deviations_by_row, connected_deviations_by_col, sorted_img_strs = c.safeSortOneListByAnother(
        [med[-1] for med in img_medians], [
            img_medians, connected_deviations_by_row,
            connected_deviations_by_col, img_strs
        ])
    f, axarr = plt.subplots(len(img_strs),
                            len(bands),
                            figsize=figsize,
                            sharex=True,
                            sharey=True,
                            squeeze=False)
    plt.subplots_adjust(hspace=0.0, wspace=0.0)
    [axarr[0][band_num].set_xlabel('Pixel') for band_num in range(len(bands))]
    [axarr[-1][band_num].set_xlabel('Pixel') for band_num in range(len(bands))]
    fit_funct = lambda pix, a, p, b: np.array(pix / a + b)**p
    all_fits = [[[] for band in bands] for i in range(len(imgs_list))]
    for i in range(len(sorted_img_strs)):
        img_str = sorted_img_strs[i]
        axarr[i][0].set_ylabel(img_str)
        for band_num in range(len(bands)):
            n_rows = len(connected_deviations_by_row[i][band_num])
            n_cols = len(connected_deviations_by_col[i][band_num])
            print('[n_cols, n_rows] = ' + str([n_cols, n_rows]))
            direction_flip = direction_flips[band_num]
            by_row_to_plot = connected_deviations_by_row[i][band_num]
            if direction_flip[1]: by_row_to_plot.reverse()
            by_col_to_plot = connected_deviations_by_col[i][band_num]
            if direction_flip[0]: by_col_to_plot.reverse()
            axarr[i][band_num].plot(range(n_rows),
                                    c.smoothList(by_row_to_plot,
                                                 params=[20],
                                                 averaging='mean'),
                                    c='b')
            #print ('n_rows = ' + str(n_rows))
            #print ('connected_deviations_by_row[i][band_num] = ' + str(connected_deviations_by_row[i][band_num]))
            #by_row_fit = optimize.curve_fit(fit_funct, np.array(list(range(n_rows))), connected_deviations_by_row[i][band_num], p0 = [n_rows, 0.5, 0.1], maxfev = 2000)
            by_row_fit = np.polyfit(np.array(list(range(n_rows))),
                                    by_row_to_plot, 2)
            all_fits[i][band_num] = by_row_fit
            print('[a0,a1,a2] = ' + str(by_row_fit))
            axarr[i][band_num].plot(range(n_rows),
                                    np.poly1d(by_row_fit)(np.array(
                                        list(range(n_rows)))),
                                    c='r')
            #axarr[i][band_num].plot(range(n_rows), fit_funct(np.array(list(range(n_rows))), *by_row_fit[0]), c = 'r')
            #axarr[i][band_num].plot(range(n_rows), fit_funct(np.array(list(range(n_rows))), *[n_rows, 0.5, 0.1]), c = 'orange')
            #by_col_plot = axarr[i][band_num].plot(range(len(connected_deviations_by_col[i][band_num])), connected_deviations_by_col[i][band_num], c = 'r')
            axarr[i][band_num].plot(range(n_cols),
                                    c.smoothList(by_col_to_plot,
                                                 params=[20],
                                                 averaging='mean'),
                                    c='g')
            axarr[i][band_num].text(
                0,
                1.0,
                r'Median ADU$\simeq$' +
                str(c.round_to_n(img_medians[i][band_num], 3)),
                fontsize=8.0)
            axarr[i][band_num].text(
                0,
                0.9,
                r'[a2,a1,a0]$\simeq$' +
                str([c.round_to_n(term, 3) for term in by_row_fit]),
                fontsize=8.0)
            axarr[i][band_num].set_ylim(-0.05, 1.15)
            if i == 0: axarr[i][band_num].set_title('PISCO ' + bands[band_num])
    plt.tight_layout()
    if save_cte_fig:
        f.savefig(save_file_name)
    if show_cte_fig:
        plt.show()
    plt.close('all')
    return all_fits