Beispiel #1
0
def bad_shift_identifier(target, date, bad_shift_threshold=200):
    pines_path = pines_dir_check()
    log_path = pines_path / ('Logs/' + date + '_log.txt')
    log = pines_log_reader(log_path)
    target_inds = np.where(log['Target'] == target)[0]
    x_shifts = np.array(log['X shift'][target_inds])
    y_shifts = np.array(log['Y shift'][target_inds])

    bad_shift_inds = np.where((x_shifts > bad_shift_threshold)
                              | (y_shifts > bad_shift_threshold))[0]
    shift_flags = np.zeros(len(target_inds), dtype=int)
    shift_flags[bad_shift_inds] = 1

    return
Beispiel #2
0
def average_seeing(log_path):
    try:
        df = pines_log_reader(log_path)
        if 'X seeing' in df.keys():
            seeing = np.array(df['X seeing'], dtype=float)
            seeing = np.array(seeing[np.where(~np.isnan(seeing))], dtype=float)
            seeing = seeing[np.where((seeing > 1.2) & (seeing < 7.0))[0]]
            mean_seeing = np.nanmean(seeing)
            std_seeing = np.nanstd(seeing)
            print('Average seeing for {}: {:1.1f} +/- {:1.1f}"'.format(
                log_path.split('/')[-1].split('_')[0], mean_seeing,
                std_seeing))
            return mean_seeing
    except:
        print('{}: No seeing measurements, inspect manually.'.format(
            log_path.split('/')[-1].split('_')[0]))
        return np.nan
Beispiel #3
0
def variable_aper_phot(target,
                       centroided_sources,
                       multiplicative_factors,
                       an_in=12.,
                       an_out=30.,
                       plots=False,
                       gain=8.21,
                       qe=0.9,
                       plate_scale=0.579):
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    #Remove any leading/trailing spaces in the column names.
    centroided_sources.columns = centroided_sources.columns.str.lstrip()
    centroided_sources.columns = centroided_sources.columns.str.rstrip()

    #Get list of reduced files for target.
    reduced_path = pines_path / ('Objects/' + short_name + '/reduced')
    reduced_filenames = natsort.natsorted(
        [x.name for x in reduced_path.glob('*red.fits')])
    reduced_files = np.array([reduced_path / i for i in reduced_filenames])

    #Get source names.
    source_names = get_source_names(centroided_sources)

    #Get seeing.
    seeing = np.array(centroided_sources['Seeing'])

    #Loop over multiplicative factors
    for i in range(len(multiplicative_factors)):
        fact = multiplicative_factors[i]
        print(
            'Doing variable aperture photometry for {}, multiplicative seeing factor = {}, inner annulus radius = {} pix, outer annulus radius = {} pix.'
            .format(target, fact, an_in, an_out))

        #Declare a new dataframe to hold the information for all targets for this aperture.
        columns = [
            'Filename', 'Time UT', 'Time JD UTC', 'Time BJD TDB', 'Airmass',
            'Seeing'
        ]
        for j in range(0, len(source_names)):
            columns.append(source_names[j] + ' Flux')
            columns.append(source_names[j] + ' Flux Error')
            columns.append(source_names[j] + ' Background')
            columns.append(source_names[j] + ' Interpolation Flag')

        var_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
        output_filename = pines_path / (
            'Objects/' + short_name + '/aper_phot/' + short_name +
            '_variable_aper_phot_' + str(float(fact)) + '_seeing_factor.csv')

        #Loop over all images.
        pbar = ProgressBar()
        for j in pbar(range(len(reduced_files))):
            data = fits.open(reduced_files[j])[0].data
            #Read in some supporting information.
            log_path = pines_path / (
                'Logs/' + reduced_files[j].name.split('.')[0] + '_log.txt')
            log = pines_log_reader(log_path)
            log_ind = np.where(
                log['Filename'] == reduced_files[j].name.split('_')[0] +
                '.fits')[0][0]

            header = fits.open(reduced_files[j])[0].header
            date_obs = header['DATE-OBS']
            #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds.
            if len(date_obs.split(':')[-1].split('.')[0]) == 3:
                date_obs = date_obs.split(':')[0] + ':' + date_obs.split(
                    ':')[1] + ':' + date_obs.split(':')[-1][1:]

            if date_obs.split(':')[-1] == '60.00':
                date_obs = date_obs.split(':')[0] + ':' + str(
                    int(date_obs.split(':')[1]) + 1) + ':00.00'
            #Keep a try/except clause here in case other unknown DATE-OBS formats pop up.
            try:
                date = datetime.datetime.strptime(date_obs,
                                                  '%Y-%m-%dT%H:%M:%S.%f')
            except:
                print(
                    'Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.'
                )
                pdb.set_trace()

            #Get the closest date master_dark_stddev image for this exposure time.
            #We'll use this to measure read noise and dark current.
            date_str = date_obs.split('T')[0].replace('-', '')
            master_dark_stddev = master_dark_stddev_chooser(
                pines_path / ('Calibrations/Darks/Master Darks Stddev/'),
                header)

            days = date.day + hmsm_to_days(date.hour, date.minute, date.second,
                                           date.microsecond)
            jd = date_to_jd(date.year, date.month, days)
            var_df['Filename'][j] = reduced_files[j].name
            var_df['Time UT'][j] = header['DATE-OBS']
            var_df['Time JD UTC'][j] = jd
            var_df['Time BJD TDB'][j] = jd_utc_to_bjd_tdb(
                jd, header['TELRA'], header['TELDEC'])
            var_df['Airmass'][j] = header['AIRMASS']
            var_df['Seeing'][j] = log['X seeing'][np.where(
                log['Filename'] == reduced_files[j].name.split('_')[0] +
                '.fits')[0][0]]

            #If the shift quality has been flagged, skip this image.
            if log['Shift quality flag'].iloc[log_ind] == 1:
                continue

            #Get the source positions in this image.
            positions = []
            for k in range(len(source_names)):
                positions.append(
                    (centroided_sources[source_names[k] + ' Image X'][j],
                     centroided_sources[source_names[k] + ' Image Y'][j]))

            #Create an aperture centered on this position with radius (in pixels) of (seeing*multiplicative_factor[j])/plate_scale.
            try:
                apertures = CircularAperture(positions,
                                             r=(seeing[j] * fact) /
                                             plate_scale)
            except:
                pdb.set_trace()

            #Create an annulus centered on this position.
            annuli = CircularAnnulus(positions, r_in=an_in, r_out=an_out)

            photometry_tbl = iraf_style_photometry(apertures, annuli,
                                                   data * gain,
                                                   master_dark_stddev * gain,
                                                   header, var_df['Seeing'][j])

            for k in range(len(photometry_tbl)):
                var_df[source_names[k] +
                       ' Flux'][j] = photometry_tbl['flux'][k]
                var_df[source_names[k] +
                       ' Flux Error'][j] = photometry_tbl['flux_error'][k]
                var_df[source_names[k] +
                       ' Background'][j] = photometry_tbl['background'][k]
                var_df[source_names[k] + ' Interpolation Flag'][j] = int(
                    photometry_tbl['interpolation_flag'][k])

        #Write output to file.
        print(
            'Saving multiplicative factor = {} variable aperture photometry output to {}.'
            .format(fact, output_filename))
        print('')
        with open(output_filename, 'w') as f:
            for j in range(len(var_df)):
                #Write in the header.
                if j == 0:
                    f.write(
                        '{:>21s}, {:>22s}, {:>17s}, {:>17s}, {:>7s}, {:>7s}, '.
                        format('Filename', 'Time UT', 'Time JD UTC',
                               'Time BJD TDB', 'Airmass', 'Seeing'))
                    for k in range(len(source_names)):
                        if k != len(source_names) - 1:
                            f.write(
                                '{:>22s}, {:>28s}, {:>28s}, {:>34s}, '.format(
                                    source_names[k] + ' Flux',
                                    source_names[k] + ' Flux Error',
                                    source_names[k] + ' Background',
                                    source_names[k] + ' Interpolation Flag'))
                        else:
                            f.write(
                                '{:>22s}, {:>28s}, {:>28s}, {:>34s}\n'.format(
                                    source_names[k] + ' Flux',
                                    source_names[k] + ' Flux Error',
                                    source_names[k] + ' Background',
                                    source_names[k] + ' Interpolation Flag'))

                #Write in Filename, Time UT, Time JD, Airmass, Seeing values.
                format_string = '{:21s}, {:22s}, {:17.9f}, {:17.9f}, {:7.2f}, {:7.1f}, '
                #If the seeing value for this image is 'nan' (a string), convert it to a float.
                #TODO: Not sure why it's being read in as a string, fix that.
                if type(var_df['Seeing'][j]) == str:
                    var_df['Seeing'][j] = float(var_df['Seeing'][j])

                #Do a try/except clause for writeout, in case it breaks in the future.
                try:
                    f.write(
                        format_string.format(var_df['Filename'][j],
                                             var_df['Time UT'][j],
                                             var_df['Time JD UTC'][j],
                                             var_df['Time BJD TDB'][j],
                                             var_df['Airmass'][j],
                                             var_df['Seeing'][j]))
                except:
                    print(
                        'Writeout failed! Inspect quantities you are trying to write out.'
                    )
                    pdb.set_trace()

                #Write in Flux, Flux Error, and Background values for every source.
                for i in range(len(source_names)):
                    if i != len(source_names) - 1:
                        format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34d}, '
                    else:
                        format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34d}\n'
                    try:
                        f.write(
                            format_string.format(
                                var_df[source_names[i] + ' Flux'][j],
                                var_df[source_names[i] + ' Flux Error'][j],
                                var_df[source_names[i] + ' Background'][j],
                                var_df[source_names[i] +
                                       ' Interpolation Flag'][j]))
                    except:
                        if i != len(source_names) - 1:
                            format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34f}, '
                        else:
                            format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34f}\n'
                        f.write(
                            format_string.format(
                                var_df[source_names[i] + ' Flux'][j],
                                var_df[source_names[i] + ' Flux Error'][j],
                                var_df[source_names[i] + ' Background'][j],
                                var_df[source_names[i] +
                                       ' Interpolation Flag'][j]))
        print('')
    return
Beispiel #4
0
def fixed_aper_phot(target,
                    centroided_sources,
                    ap_radii,
                    an_in=12.,
                    an_out=30.,
                    plots=False,
                    gain=8.21,
                    qe=0.9):
    '''Authors:
		Patrick Tamburo, Boston University, June 2020
	Purpose:
        Performs *fixed* aperture photometry on a set of reduced images given dataframe of source positions.
        The iraf_style_photometry, compute_phot_error, perture_stats_tbl, and calc_aperture_mmm routines are from Varun Bajaj on github:
            https://github.com/spacetelescope/wfc3_photometry/blob/master/photometry_tools/photometry_with_errors.py. 
	Inputs:
        target (str): The target's full 2MASS name.
        sources (pandas dataframe): List of source names, x and y positions in every image. 
        ap_radii (list of floats): List of aperture radii in pixels for which aperture photometry wil be performed. 
        an_in (float, optional): The inner radius of the annulus used to estimate background, in pixels. 
        an_out (float, optional): The outer radius of the annulus used to estimate background, in pixels. 
        plots (bool, optional): Whether or not to output surface plots. Images output to aper_phot directory within the object directory.
        gain (float, optional): The gain of the detector in e-/ADU.
        qe (float, optional): The quantum efficiency of the detector.
    Outputs:
        Saves aperture photometry csv to PINES_analysis_toolkit/Objects/short_name/aper_phot/ for each aperture.
	TODO:
    '''

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    #Remove any leading/trailing spaces in the column names.
    centroided_sources.columns = centroided_sources.columns.str.lstrip()
    centroided_sources.columns = centroided_sources.columns.str.rstrip()

    #Get list of reduced files for target.
    reduced_path = pines_path / ('Objects/' + short_name + '/reduced')
    reduced_filenames = natsort.natsorted(
        [x.name for x in reduced_path.glob('*red.fits')])
    reduced_files = np.array([reduced_path / i for i in reduced_filenames])

    #source_names = natsort.natsorted(list(set([i.replace('X','').replace('Y','').replace('Centroid Warning','').strip() for i in centroided_sources.keys() if i != 'Filename'])))
    source_names = get_source_names(centroided_sources)

    #Create output plot directories for each source.
    if plots:
        #Camera angles for surface plots
        azim_angles = np.linspace(0, 360 * 1.5, len(reduced_files)) % 360
        elev_angles = np.zeros(len(azim_angles)) + 25
        for name in source_names:
            #If the folders are already there, delete them.
            source_path = (
                pines_path /
                ('Objects/' + short_name + '/aper_phot/' + name + '/'))
            if source_path.exists():
                shutil.rmtree(source_path)
            #Create folders.
            os.mkdir(source_path)

    #Loop over all aperture radii.
    for ap in ap_radii:
        print(
            'Doing fixed aperture photometry for {}, aperture radius = {:1.1f} pix, inner annulus radius = {} pix, outer annulus radius = {} pix.'
            .format(target, ap, an_in, an_out))

        #Declare a new dataframe to hold the information for all targets for this aperture.
        columns = [
            'Filename', 'Time UT', 'Time JD UTC', 'Time BJD TDB', 'Airmass',
            'Seeing'
        ]
        for i in range(0, len(source_names)):
            columns.append(source_names[i] + ' Flux')
            columns.append(source_names[i] + ' Flux Error')
            columns.append(source_names[i] + ' Background')
            columns.append(source_names[i] + ' Interpolation Flag')

        ap_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
        output_filename = pines_path / (
            'Objects/' + short_name + '/aper_phot/' + short_name +
            '_fixed_aper_phot_{:1.1f}_pix_radius.csv'.format(float(ap)))

        #Loop over all images.
        pbar = ProgressBar()
        for j in pbar(range(len(reduced_files))):
            data = fits.open(reduced_files[j])[0].data

            #Read in some supporting information.
            log_path = pines_path / (
                'Logs/' + reduced_files[j].name.split('.')[0] + '_log.txt')
            log = pines_log_reader(log_path)
            log_ind = np.where(
                log['Filename'] == reduced_files[j].name.split('_')[0] +
                '.fits')[0][0]

            header = fits.open(reduced_files[j])[0].header
            date_obs = header['DATE-OBS']
            #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds.
            if len(date_obs.split(':')[-1].split('.')[0]) == 3:
                date_obs = date_obs.split(':')[0] + ':' + date_obs.split(
                    ':')[1] + ':' + date_obs.split(':')[-1][1:]

            if date_obs.split(':')[-1] == '60.00':
                date_obs = date_obs.split(':')[0] + ':' + str(
                    int(date_obs.split(':')[1]) + 1) + ':00.00'
            #Keep a try/except clause here in case other unknown DATE-OBS formats pop up.
            try:
                date = datetime.datetime.strptime(date_obs,
                                                  '%Y-%m-%dT%H:%M:%S.%f')
            except:
                print(
                    'Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.'
                )
                pdb.set_trace()

            #Get the closest date master_dark_stddev image for this exposure time.
            #We'll use this to measure read noise and dark current.
            date_str = date_obs.split('T')[0].replace('-', '')
            master_dark_stddev = master_dark_stddev_chooser(
                pines_path / ('Calibrations/Darks/Master Darks Stddev/'),
                header)

            days = date.day + hmsm_to_days(date.hour, date.minute, date.second,
                                           date.microsecond)
            jd = date_to_jd(date.year, date.month, days)
            ap_df['Filename'][j] = reduced_files[j].name
            ap_df['Time UT'][j] = header['DATE-OBS']
            ap_df['Time JD UTC'][j] = jd
            ap_df['Time BJD TDB'][j] = jd_utc_to_bjd_tdb(
                jd, header['TELRA'], header['TELDEC']
            )  #Using the telescope ra and dec should be accurate enough for our purposes
            ap_df['Airmass'][j] = header['AIRMASS']
            ap_df['Seeing'][j] = log['X seeing'][log_ind]

            #If the shift quality has been flagged, skip this image.
            if log['Shift quality flag'].iloc[log_ind] == 1:
                continue

            #Get the source positions in this image.
            positions = []
            for i in range(len(source_names)):
                positions.append((float(centroided_sources[source_names[i] +
                                                           ' Image X'][j]),
                                  float(centroided_sources[source_names[i] +
                                                           ' Image Y'][j])))

            #Create an aperture centered on this position with radius = ap.
            try:
                apertures = CircularAperture(positions, r=ap)
            except:
                pdb.set_trace()

            #Create an annulus centered on this position.
            annuli = CircularAnnulus(positions, r_in=an_in, r_out=an_out)

            photometry_tbl = iraf_style_photometry(apertures, annuli,
                                                   data * gain,
                                                   master_dark_stddev * gain,
                                                   header, ap_df['Seeing'][j])

            for i in range(len(photometry_tbl)):
                ap_df[source_names[i] + ' Flux'][j] = photometry_tbl['flux'][i]
                ap_df[source_names[i] +
                      ' Flux Error'][j] = photometry_tbl['flux_error'][i]
                ap_df[source_names[i] +
                      ' Background'][j] = photometry_tbl['background'][i]
                ap_df[source_names[i] + ' Interpolation Flag'][j] = int(
                    photometry_tbl['interpolation_flag'][i])

            #Make surface plots.
            if plots:
                for i in range(len(photometry_tbl)):
                    x_p = photometry_tbl['X'][i]
                    y_p = photometry_tbl['Y'][i]

                    fig = plt.figure()
                    ax = fig.add_subplot(111, projection='3d')
                    xx, yy = np.meshgrid(
                        np.arange(int(x_p) - 10,
                                  int(x_p) + 10 + 1),
                        np.arange(int(y_p) - 10,
                                  int(y_p) + 10 + 1))
                    theta = np.linspace(0, 2 * np.pi, 201)
                    y_circ = ap * np.cos(theta) + y_p
                    x_circ = ap * np.sin(theta) + x_p
                    vmin = np.nanmedian(data[yy, xx])
                    vmax = vmin + 2.5 * np.nanstd(data[yy, xx])
                    ax.plot_surface(xx,
                                    yy,
                                    data[yy, xx],
                                    cmap=cm.viridis,
                                    alpha=0.8,
                                    rstride=1,
                                    cstride=1,
                                    edgecolor='k',
                                    lw=0.2,
                                    vmin=vmin,
                                    vmax=vmax)
                    current_z = ax.get_zlim()
                    ax.set_zlim(current_z[0] - 150, current_z[1])
                    current_z = ax.get_zlim()
                    cset = ax.contourf(xx,
                                       yy,
                                       data[yy, xx],
                                       zdir='z',
                                       offset=current_z[0],
                                       cmap=cm.viridis)
                    ax.plot(x_circ,
                            y_circ,
                            np.zeros(len(x_circ)) + current_z[0],
                            color='r',
                            lw=2,
                            zorder=100)
                    ax.set_xlabel('X')
                    ax.set_ylabel('Y')
                    ax.set_zlabel('Counts')

                    ax.set_title('SURFACE DIAGNOSTIC PLOT, ' + ', Ap. = ' +
                                 str(ap) + '\n' + source_names[i] + ', ' +
                                 reduced_files[j].name + ' (image ' +
                                 str(j + 1) + ' of ' +
                                 str(len(reduced_files)) + ')')
                    ax.view_init(elev=elev_angles[j], azim=azim_angles[j])
                    plot_output_path = (
                        pines_path /
                        ('Objects/' + short_name + '/aper_phot/' +
                         source_names[i] + '/' + str(j).zfill(4) + '.jpg'))
                    plt.tight_layout()
                    plt.savefig(plot_output_path)
                    plt.close()

        #Write output to file.
        print('Saving ap = {:1.1f} aperture photometry output to {}.'.format(
            ap, output_filename))
        print('')
        with open(output_filename, 'w') as f:
            for j in range(len(ap_df)):
                #Write in the header.
                if j == 0:
                    f.write(
                        '{:>21s}, {:>22s}, {:>17s}, {:>17s}, {:>7s}, {:>7s}, '.
                        format('Filename', 'Time UT', 'Time JD UTC',
                               'Time BJD TDB', 'Airmass', 'Seeing'))
                    for i in range(len(source_names)):
                        if i != len(source_names) - 1:
                            f.write(
                                '{:>22s}, {:>28s}, {:>28s}, {:>34s}, '.format(
                                    source_names[i] + ' Flux',
                                    source_names[i] + ' Flux Error',
                                    source_names[i] + ' Background',
                                    source_names[i] + ' Interpolation Flag'))
                        else:
                            f.write(
                                '{:>22s}, {:>28s}, {:>28s}, {:>34s}\n'.format(
                                    source_names[i] + ' Flux',
                                    source_names[i] + ' Flux Error',
                                    source_names[i] + ' Background',
                                    source_names[i] + ' Interpolation Flag'))

                #Write in Filename, Time UT, Time JD, Airmass, Seeing values.
                format_string = '{:21s}, {:22s}, {:17.9f}, {:17.9f}, {:7.2f}, {:7.1f}, '
                #If the seeing value for this image is 'nan' (a string), convert it to a float.
                #TODO: Not sure why it's being read in as a string, fix that.
                if type(ap_df['Seeing'][j]) == str:
                    ap_df['Seeing'][j] = float(ap_df['Seeing'][j])

                #Do a try/except clause for writeout, in case it breaks in the future.
                try:
                    f.write(
                        format_string.format(ap_df['Filename'][j],
                                             ap_df['Time UT'][j],
                                             ap_df['Time JD UTC'][j],
                                             ap_df['Time BJD TDB'][j],
                                             ap_df['Airmass'][j],
                                             ap_df['Seeing'][j]))
                except:
                    print(
                        'Writeout failed! Inspect quantities you are trying to write out.'
                    )
                    pdb.set_trace()

                #Write in Flux, Flux Error, and Background values for every source.
                for i in range(len(source_names)):
                    if i != len(source_names) - 1:
                        format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34d}, '
                    else:
                        format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34d}\n'
                    try:
                        f.write(
                            format_string.format(
                                ap_df[source_names[i] + ' Flux'][j],
                                ap_df[source_names[i] + ' Flux Error'][j],
                                ap_df[source_names[i] + ' Background'][j],
                                ap_df[source_names[i] +
                                      ' Interpolation Flag'][j]))
                    except:
                        if i != len(source_names) - 1:
                            format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34f}, '
                        else:
                            format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34f}\n'
                        f.write(
                            format_string.format(
                                ap_df[source_names[i] + ' Flux'][j],
                                ap_df[source_names[i] + ' Flux Error'][j],
                                ap_df[source_names[i] + ' Background'][j],
                                ap_df[source_names[i] +
                                      ' Interpolation Flag'][j]))

    print('')
    return
Beispiel #5
0
def dark(date, exptime, dark_start=0, dark_stop=0, upload=False, delete_raw=False, sftp=''):
    clip_lvl = 3 #The value to use for sigma clipping. 
    pines_path = pines_dir_check()
    np.seterr(invalid='ignore') #Suppress some warnings we don't care about in median combining. 
    exptime = float(exptime)
    plt.ion() #Turn on interactive plotting.

    t1 = time.time()
    #If an sftp connection to the PINES server was passed, download the dark data. 
    if type(sftp) == pysftp.Connection:
        sftp.chdir('/data/raw/mimir')
        run_list = sftp.listdir()
        data_path = '' #Initialize to check that it gets filled. 
        for i in range(len(run_list)):
            sftp.chdir(run_list[i])    
            date_list = sftp.listdir()
            if date in date_list:
                data_path = sftp.getcwd()
                print('{} directory found in pines.bu.edu:{}/\n'.format(date,data_path))
                sftp.chdir(date)
                break
            sftp.chdir('..')
    
        if data_path == '':
            print('ERROR: specified date not found in any run on pines.bu.edu:data/raw/mimir/\n')
            return
        else:
            #If the file start/stop numbers are specified, grab those files.
            if (dark_stop != 0):
                files_in_dir = sftp.listdir()
                dark_filenums = np.arange(dark_start, dark_stop+1, step=1)
                dark_files = []

                #Add the darks to the file list. 
                for i in range(len(dark_filenums)):
                    file_num = dark_filenums[i]
                    #Generate the filename. 
                    if file_num < 10:
                        file_name = date+'.00'+str(file_num)+'.fits'
                    elif (file_num >= 10) and (file_num < 100):
                        file_name = date+'.0'+str(file_num)+'.fits'
                    else:
                        file_name = date+'.'+str(file_num)+'.fits'
                    #Check if the file name is in the directory, and if so, append it to the list of flat files. 
                    if file_name in files_in_dir:
                        dark_files.append(file_name)
                    else:
                        print('{} not found in directory, skipping.'.format(file_name))        
                pdb.set_trace()
            else:
                #Otherwise, find the files automatically using the night's log. 
                log_path = pines_path/'Logs'
                #Check if you already have the log for this date, if not, download it. 
                #Download from the /data/logs/ directory on PINES.
                if not (log_path/(date+'_log.txt')).exists():
                    print('Downloading {}_log.txt to {}\n'.format(date,log_path))
                    sftp.get('/data/logs/'+date+'_log.txt',log_path/(date+'_log.txt'))
                
                #Read in the log from this date.
                log = pines_log_reader(log_path/(date+'_log.txt'))

                #Identify dark files. 
                dark_inds = np.where((log['Target'] == 'Dark') & (log['Filename'] != 'test.fits') & (log['Exptime'] == exptime))[0]
                dark_files = natsort.natsorted(list(set(log['Filename'][dark_inds]))) #Set guarantees we only grab the unique files that have been identified as flats, in case the log bugged out. 
            print('Found {} dark files.'.format(len(dark_files)))
            print('')
            #Downoad data to the Calibrations/Darks/Raw/ directory. 
            dark_path = pines_path/('Calibrations/Darks/Raw')
            for j in range(len(dark_files)):
                if not (dark_path/dark_files[j]).exists():
                    sftp.get(dark_files[j],os.path.join(pines_path,dark_path/dark_files[j]))
                    print('Downloading {} to {}, {} of {}.'.format(dark_files[j], dark_path, j+1, len(dark_files)))
                else:
                    print('{} already in {}, skipping download.'.format(dark_files[j],dark_path))
            print('')

    #If no sftp was passed, search for files on disk. 
    else:
        dark_path = pines_path/('Calibrations/Darks/Raw')
        all_dark_files = natsort.natsorted(list(Path(dark_path).rglob(date+'*.fits')))
        dark_files = []
        for file in all_dark_files:
            if fits.open(file)[0].header['EXPTIME'] == exptime:
                dark_files.append(file)

    num_images = len(dark_files)

    if num_images == 0:
        raise RuntimeError('No raw dark files found on disk with date '+date+'!')

    print('Reading in ', num_images,' dark images.')
    dark_cube_raw = np.zeros([len(dark_files),1024,1024]) 
    print('')
    print('Dark frame information')
    print('-------------------------------------------------')
    print('ID   Mean               Stddev         Max    Min')
    print('-------------------------------------------------')
    for j in range(len(dark_files)):
        image_data = fits.open(dark_path/dark_files[j])[0].data[0:1024,:] #This line trims off the top two rows of the image, which are overscan.
        header = fits.open(dark_path/dark_files[j])[0].header
        if header['EXPTIME'] != exptime:
            print('ERROR: {} taken has exposure time different than than exptime.'.format(dark_files[j]))
            return
        dark_cube_raw[j,:,:] = image_data 
        print(str(j+1)+'    '+str(np.mean(image_data))+'    '+str(np.std(image_data))+'    '+ str(np.amax(image_data))+'    '+str(np.amin(image_data)))

    cube_shape = np.shape(dark_cube_raw)

    master_dark = np.zeros((cube_shape[1], cube_shape[2]), dtype='float32')
    master_dark_stddev = np.zeros((cube_shape[1], cube_shape[2]), dtype='float32')

    print('')
    print('Combining the darks')
    print('......')

    pbar = ProgressBar()
    #For each pixel, calculate the mean, median, and standard deviation "through the stack" of darks.
    for x in pbar(range(cube_shape[1])):
        for y in range(cube_shape[2]):
            through_stack = dark_cube_raw[:,y,x]
            through_stack_median = np.nanmedian(through_stack)
            through_stack_stddev = np.nanstd(through_stack)

            #Flag values that are > clip_lvl-sigma discrepant from the median.
            good_inds = np.where((abs(through_stack - through_stack_median) / through_stack_stddev <= clip_lvl))[0]

            #Calculate the sigma-clipped mean and sigma-clipped stddev using good_inds. 
            s_c_mean = np.nanmean(through_stack[good_inds])
            s_c_stddev = np.nanstd(through_stack[good_inds])

            #Store the sigma-clipped mean as the master dark value for this pixel. 
            master_dark[y,x] = s_c_mean
            master_dark_stddev[y,x] = s_c_stddev
    

    np.seterr(invalid='warn') #Turn invalid warnings back on, in case it would permanently turn it off otherwise.

    output_filename = pines_path/('Calibrations/Darks/Master Darks/master_dark_'+str(exptime)+'_s_'+date+'.fits')
    
    #Add some header keywords detailing the master_dark creation process. 
    hdu = fits.PrimaryHDU(master_dark)
    hdu.header['HIERARCH DATE CREATED'] = datetime.utcnow().strftime('%Y-%m-%d')+'T'+datetime.utcnow().strftime('%H:%M:%S')

    #Now save to a file on your local machine. 
    #Check to see if other files of this name exist.
    # if os.path.exists(output_filename):
    #     print('')
    #     print('WARNING: This will overwrite {}!'.format(output_filename))
    #     dark_check = input('Do you want to continue? y/n: ')
    #     if dark_check == 'y':
    #         hdu.writeto(output_filename,overwrite=True)
    #         print('Wrote to {}!'.format(output_filename))
    #     else:
    #         print('Not overwriting!')
    # else:
    hdu.writeto(output_filename,overwrite=True)
    print('Wrote to {}!'.format(output_filename))
    
    #Upload the master dark to PINES server.
    if upload:
        print('Uploading to pines.bu.edu...')
        sftp.chdir('..')
        sftp.chdir('..')
        sftp.chdir('..')
        sftp.chdir('..')
        sftp.chdir('calibrations/Darks')
        upload_name = 'master_dark_'+str(exptime)+'_s_'+date+'.fits'
        # if upload_name in sftp.listdir():
        #     print('WARNING: This will overwrite {} in pines.bu.edu:data/calibrations/Darks/'.format(upload_name))
        #     upload_check = input('Do you want to continue? y/n: ')
        #     if upload_check == 'y':
        #         sftp.put(output_filename,upload_name)
        #         print('Uploaded to pines.bu.edu:data/calibrations/Darks/!')
        #     else:
        #         print('Skipping upload!')
        # else:
        sftp.put(output_filename,upload_name)
        print('Uploaded {} to pines.bu.edu:data/calibrations/Darks/!'.format(upload_name))

        sftp.chdir('..')

    #Do the same thing for the sigma-clipped standard deviation image. 

    output_filename = pines_path/('Calibrations/Darks/Master Darks Stddev/master_dark_stddev_'+str(exptime)+'_s_'+date+'.fits')
    
    #Add some header keywords detailing the master_dark creation process. 
    hdu = fits.PrimaryHDU(master_dark_stddev)
    hdu.header['HIERARCH DATE CREATED'] = datetime.utcnow().strftime('%Y-%m-%d')+'T'+datetime.utcnow().strftime('%H:%M:%S')
    username = ''

    #Now save to a file on your local machine. 
    print('')

    #Check to see if other files of this name exist.
    # if os.path.exists(output_filename):
    #     print('')
    #     print('WARNING: This will overwrite {}!'.format(output_filename))
    #     dark_check = input('Do you want to continue? y/n: ')
    #     if dark_check == 'y':
    #         hdu.writeto(output_filename,overwrite=True)
    #         print('Wrote to {}!'.format(output_filename))
    #     else:
    #         print('Not overwriting!')
    # else:
    hdu.writeto(output_filename,overwrite=True)
    print('Wrote to {}!'.format(output_filename))
    
    #Upload the master dark to PINES server.
    if upload:
        print('Uploading to pines.bu.edu...')
        sftp.chdir('Darks Stddev')
        upload_name = 'master_dark_stddev_'+str(exptime)+'_s_'+date+'.fits'
        # if upload_name in sftp.listdir():
        #     print('WARNING: This will overwrite {} in pines.bu.edu:data/calibrations/Darks Stddev/'.format(upload_name))
        #     upload_check = input('Do you want to continue? y/n: ')
        #     if upload_check == 'y':
        #         sftp.put(output_filename,upload_name)
        #         print('Uploaded to pines.bu.edu:data/calibrations/Darks Stddev/!')
        #     else:
        #         print('Skipping upload!')
        # else:
        sftp.put(output_filename,upload_name)
        print('Uploaded {} to pines.bu.edu:data/calibrations/Darks Stddev/!'.format(upload_name))

    print('')
    #Delete raw dark images from disk.
    if delete_raw:
        files_to_delete = glob.glob(os.path.join(dark_path/'*.fits'))
        for j in range(len(files_to_delete)):
            os.remove(files_to_delete[j])

    print('dark runtime: ', np.round((time.time()-t1)/60,1), ' minutes.')
    print('Done!')
Beispiel #6
0
def log_updater(date, sftp, shift_tolerance=30, upload=False):
    '''
    Authors:
		Patrick Tamburo, Boston University, January 2021
	Purpose:
        Updates x_shift and y_shift measurements from a PINES log. These shifts are measured using *full* resolution images, while at the telescope,
        we use *half* resolution images (to save time between exposures). By measuring on full-res images, we get more accurate shifts, which allows 
        us to determine centroids more easily.
	Inputs:
        date (str): the UT date of the log whose shifts you want to update in YYYYMMDD format, e.g. '20151110'
        sftp (pysftp connection): sftp connection to the PINES server
        shift_tolerance (float): the maximum distance an x/y shift can be before shifts will be flagged as poor quality. 
        upload (bool): whether or not to push the updated log to the PINES server (only admins can do this)
    Outputs:
		Writes updated log file to disk. 
	TODO:
        Re-measure seeing?
    FIXME:
    '''
    def tie_sigma(model):
        return model.x_stddev_1

    def guide_star_seeing(subframe):
        # subframe = subframe - np.median(subframe)
        subframe = subframe - np.percentile(subframe,5)
        sub_frame_l = int(np.shape(subframe)[0])
        y, x = np.mgrid[:sub_frame_l, :sub_frame_l]

        # Fit with constant, bounds, tied x and y sigmas and outlier rejection:
        gaussian_init = models.Const2D(0.0) + models.Gaussian2D(subframe[int(sub_frame_l/2),int(sub_frame_l/2)],int(sub_frame_l/2),int(sub_frame_l/2),8/2.355,8/2.355,0)
        gaussian_init.x_stddev_1.min = 1.0/2.355
        gaussian_init.x_stddev_1.max = 20.0/2.355
        gaussian_init.y_stddev_1.min = 1.0/2.355
        gaussian_init.y_stddev_1.max = 20.0/2.355
        gaussian_init.y_stddev_1.tied = tie_sigma
        gaussian_init.theta_1.fixed = True
        fit_gauss = fitting.FittingWithOutlierRemoval(fitting.LevMarLSQFitter(),sigma_clip,niter=3,sigma=3.0)
        # gaussian, mask = fit_gauss(gaussian_init, x, y, subframe)
        gain = 8.21 #e per ADU
        read_noise = 2.43 #ADU
        weights = gain / np.sqrt(np.absolute(subframe)*gain + (read_noise*gain)**2) #1/sigma for each pixel
        gaussian, mask = fit_gauss(gaussian_init, x, y, subframe, weights)
        fwhm_x = 2.355*gaussian.x_stddev_1.value
        fwhm_y = 2.355*gaussian.y_stddev_1.value

        x_seeing = fwhm_x * 0.579
        y_seeing = fwhm_y * 0.579
        return(x_seeing,y_seeing)

    pines_path = pines_dir_check()
    log_path = pines_path/('Logs/'+date+'_log.txt')

    #Begin by checking filenames, making sure they're in sequential order, and that there is only one entry for each. 
    log_out_of_order_fixer(log_path, sftp)
    
    log = pines_log_reader(log_path) #Get telescope log shifts.
    myfile = open(log_path, 'r')
    lines = myfile.readlines()
    myfile.close()

    #Now loop over all files in the log, measure shifts in each file and update the line in the log. 
    for i in range(len(log)):
        if (log['Target'][i].lower() != 'flat') & (log['Target'][i].lower() != 'skyflat') & (log['Target'][i].lower() != 'supersky') & (log['Target'][i].lower() != 'dark') & (log['Target'][i].lower() != 'bias') & (log['Target'][i].lower() != 'dummy') & (log['Post-processing flag'][i] != 1):
            filename = log['Filename'][i].split('.fits')[0]+'_red.fits'
            target = log['Target'][i]
            short_name = short_name_creator(target)
            image_path = pines_path/('Objects/'+short_name+'/reduced/'+filename)

            #Figure out which file you're looking at and its position in the log. 
            log_ind = np.where(log['Filename'] == filename.split('_')[0]+'.fits')[0][0]

            #Measure the shifts and get positions of targets.
            (measured_x_shift, measured_y_shift, source_x, source_y, check_image) = shift_measurer(target, filename, sftp)

            if (abs(measured_x_shift) > shift_tolerance) or (abs(measured_y_shift) > shift_tolerance):
                print('Shift greater than {} pixels measured for {} in {}.'.format(shift_tolerance, short_name, image_path.name))
                print('Inspect manually.')
                shift_quality_flag = 1
            elif np.isnan(measured_x_shift) or np.isnan(measured_y_shift):
                raise RuntimeError('Found nans for shifts!')
                shift_quality_flag = 1
            else:
                shift_quality_flag = 0
            
            
            #Measure the seeing. 
            guide_star_cut = np.where((source_x > 50) & (source_x < 975) & (source_y > 50) & (source_y < 975))[0]
            if len(guide_star_cut) != 0:
                x_seeing_array = []
                y_seeing_array = []
                for guide_star_ind in guide_star_cut:
                    guide_star_x_int = int(source_x[guide_star_ind])
                    guide_star_y_int = int(source_y[guide_star_ind])
                    guide_star_subframe = check_image[guide_star_y_int-15:guide_star_y_int+15,guide_star_x_int-15:guide_star_x_int+15]
                    (x_seeing,y_seeing) = guide_star_seeing(guide_star_subframe)
                    #Cut unrealistic values/saturated stars. 
                    if x_seeing > 1.2 and x_seeing < 7.0:
                        x_seeing_array.append(x_seeing)
                        y_seeing_array.append(y_seeing)
                x_seeing = np.nanmedian(x_seeing_array)
                y_seeing = np.nanmedian(y_seeing_array)
            else:
                #Default to the average PINES value if no sources were found for guiding. 
                x_seeing = 2.6
                y_seeing = 2.6

            print('Log line {} of {}.'.format(i+1, len(log)))
            print('Measured x shift: {:4.1f}, measured y shift: {:4.1f}'.format(measured_x_shift, measured_y_shift))
            print('Measured seeing: {:4.1f}'.format(x_seeing))
            print('')

            #Overwrite the telescope's logged shifts and seeing values with the new measurements. 
            log['X shift'][log_ind] = str(np.round(measured_x_shift,1))
            log['Y shift'][log_ind] = str(np.round(measured_y_shift,1))
            log['X seeing'][log_ind] = str(np.round(x_seeing, 1))
            log['Y seeing'][log_ind] = str(np.round(y_seeing, 1))

            #Grab entries for log line.
            filename = log['Filename'][log_ind]
            log_date = log['Date'][log_ind]
            target_name = log['Target'][log_ind]
            filter_name = log['Filt.'][log_ind]
            exptime = log['Exptime'][log_ind]
            airmass = log['Airmass'][log_ind]
            x_shift = log['X shift'][log_ind]
            y_shift = log['Y shift'][log_ind]
            x_seeing = log['X seeing'][log_ind]
            y_seeing = log['Y seeing'][log_ind]
            post_processing_flag = 1
            #Generate line of log text following the PINES telescope log format. 
            log_text = pines_logging(filename, log_date, target_name, filter_name, exptime, airmass, x_shift, y_shift, x_seeing, y_seeing, post_processing_flag, shift_quality_flag)

            #Overwrite the line with the new shifts.
            line_ind = log_ind + 1
            lines[line_ind] = log_text

            #Update the log on disk.
            with open(log_path, 'w') as f:
                for line in lines:
                    f.write(line)

        elif (log['Post-processing flag'][i] == 1):
            print('File already post-processed, skipping. {} of {}'.format(i+1, len(log)))
        else:
            print('File not a science target, skipping. {} of {}.'.format(i+1, len(log)))

    if upload:
        sftp.chdir('/data/logs/')
        print('Uploading to /data/logs/{}_log.txt.'.format(date))
        sftp.put(log_path,date+'_log.txt')
    return 
Beispiel #7
0
def corr_all_sources_plot(target):
    print('Generating corrected flux plots for all sources...\n')
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    analysis_path = pines_path / ('Objects/' + short_name + '/analysis/')
    photometry_path = pines_path / ('Objects/' + short_name + '/aper_phot/')

    #Grab the data for the best aperture.
    if os.path.exists(analysis_path / ('optimal_aperture.txt')):
        with open(analysis_path / ('optimal_aperture.txt'), 'r') as f:
            best_ap = f.readlines()[0].split(':  ')[1].split('\n')[0]
            phot_type = best_ap.split('_')[1]
            if phot_type == 'fixed':
                s = 'r'
            elif phot_type == 'variable':
                s = 'f'
    else:
        raise RuntimeError(
            'No optimal_aperture.txt file for {}.\nUsing first photometry file in {}.'
            .format(target, phot_path))

    filename = short_name.replace(
        ' ', '') + '_' + phot_type + '_aper_phot_' + s + '=' + best_ap.split(
            '_')[0] + '_nightly_weighted_lc.csv'
    best_phot_path = analysis_path / ('aper_phot_analysis/' + best_ap + '/')
    output_path = best_phot_path / ('corr_ref_plots/')
    if not os.path.exists(output_path):
        os.mkdir(output_path)

    data = pines_log_reader(best_phot_path / filename)
    ref_names = get_source_names(data)[1:]
    num_refs = len(ref_names)

    times = np.array(data['Time BJD TDB'])
    night_inds = night_splitter(times)
    num_nights = len(night_inds)

    cmap = plt.get_cmap('viridis')
    for i in range(num_refs + 1):
        fig, ax = plt.subplots(nrows=1,
                               ncols=num_nights,
                               figsize=(17, 5),
                               sharey=True)
        plt.subplots_adjust(left=0.07, wspace=0.05, top=0.92, bottom=0.17)

        if i == 0:
            color = cmap(0)
            flux = np.array(data[short_name + ' Corrected Flux'],
                            dtype='float64')
            flux_err = np.array(data[short_name + ' Corrected Flux Error'],
                                dtype='float64')
            title = short_name
            output_name = short_name + '_corrected_flux.png'

        else:
            color = cmap(95)
            ref_name = ref_names[i - 1]
            flux = np.array(data[ref_name + ' Corrected Flux'],
                            dtype='float64')
            flux_err = np.array(data[ref_name + ' Corrected Flux Error'],
                                dtype='float64')
            if i < 10:
                num = '0' + str(i)
            else:
                num = str(i)
            output_name = 'reference_' + num + '_corrected_flux.png'

        for j in range(num_nights):
            if i != 0:
                weight = np.array(data[ref_name +
                                       ' ALC Weight'])[night_inds[j]][0]
                title = ref_name.replace(
                    'erence', '.') + ', weight = {:1.3f}'.format(weight)

            if j == 0:
                ax[j].set_ylabel('Normalized Flux', fontsize=20)

            inds = night_inds[j]

            block_inds = block_splitter(times[inds])
            binned_time = []
            binned_flux = []
            binned_err = []
            for k in range(len(block_inds)):
                binned_time.append(np.nanmean(times[inds][block_inds[k]]))
                binned_flux.append(np.nanmean(flux[inds][block_inds[k]]))
                binned_err.append(
                    np.nanstd(flux[inds][block_inds[k]]) /
                    np.sqrt(len(block_inds[k])))

            ax[j].plot(times[inds],
                       flux[inds],
                       color=color,
                       linestyle='',
                       marker='.',
                       alpha=0.25)
            ax[j].errorbar(binned_time,
                           binned_flux,
                           binned_err,
                           color=color,
                           linestyle='',
                           marker='o',
                           ms=10,
                           mfc='none',
                           mew=2)
            ax[j].set_xlabel('Time (BJD$_{TDB}$)', fontsize=20)
            ax[j].tick_params(labelsize=16)
            ax[j].axhline(1, color='k', alpha=0.7, lw=1, zorder=0)
            ax[j].grid(alpha=0.2)
            ax[j].set_title(title, fontsize=20, color=color)
            ax[j].set_ylim(0.9, 1.1)

        plt.savefig(output_path / output_name, dpi=300)
        plt.close()
Beispiel #8
0
        pix_shift_string.replace(' ', '')
        ang_shift_string = '(' + str(ra_shift) + '",' + str(dec_shift) + '")'
        ang_shift_string.replace(' ', '')
        seeing_string = '(' + str(x_seeing) + '",' + str(y_seeing) + '")'
        print('Measured (X shift, Y shift):    ', pix_shift_string)
        print('Measured (RA shift, Dec shift): ', ang_shift_string)
        print('Measured (X seeing, Y seeing):  ', seeing_string)

    return (x_shift, y_shift, x_seeing, y_seeing)


log_path = '~/Downloads/20200630_log.txt'
data_path = '/Users/tamburo/Downloads/20200630/'
new_log_path = '/Users/tamburo/Downloads/20200630_log_fixed.txt'

df = pines_log_reader(log_path)
files = natsort.natsorted(glob.glob(data_path + '*.fits'))

with open(new_log_path, 'w') as f:
    for i in range(len(files)):
        if i == 0:
            header_text = '#{:<19}, {:<20}, {:<30}, {:<6}, {:<8}, {:<8}, {:<8}, {:<8}, {:<8}, {:<8}\n'.format(
                'Filename', 'Date', 'Target', 'Filt.', 'Exptime', 'Airmass',
                'X shift', 'Y shift', 'X seeing', 'Y seeing')
            f.write(header_text)
        header = fits.open(files[i])[0].header
        filename = files[i].split('/')[-1]
        date = header['DATE']
        obj = header['OBJECT']
        if (obj == 'dome_lamp_on') or (obj == 'dome_lamp_off'):
            target = 'Flat'
Beispiel #9
0
def get_reduced_science_files(sftp, target_name):
    t1 = time.time()

    #Get the user's pines_analysis_toolkit path
    pines_path = pines_dir_check()

    #Get the target's short name and set up a data directory, if necessary.
    short_name = short_name_creator(target_name)

    if not os.path.exists(pines_path / ('Objects/' + short_name)):
        object_directory_creator(pines_path, short_name)

    reduced_data_path = pines_path / ('Objects/' + short_name + '/reduced/')
    dark_path = pines_path / ('Calibrations/Darks')
    flats_path = pines_path / ('Calibrations/Flats/Domeflats')

    #Grab an up-to-date copy of the master log, which will be used to find images.
    get_master_log(sftp, pines_path)

    #Let's grab all of the available calibration data on pines.bu.edu.
    get_calibrations(sftp, pines_path)
    print('Calibrations up to date!')
    time.sleep(2)

    #Read in the master target list and find images of the requested target.
    df = pines_log_reader(pines_path / ('Logs/master_log.txt'))
    targ_inds = np.where(np.array(df['Target']) == target_name)[0]
    file_names = np.array(df['Filename'])[targ_inds]
    print('')

    print('Searching pines.bu.edu for reduced science files for {}.'.format(
        target_name))
    print('')

    #Get list of dates that data are from, in chronological order.
    dates = [
        int(i) for i in list(
            set([
                str.split(file_names[i], '.')[0]
                for i in range(len(file_names))
            ]))
    ]
    dates = np.array(dates)[np.argsort(dates)]
    comp_dates = np.array(
        [int(file_names[i].split('.')[0]) for i in range(len(file_names))])
    print('Found ', len(file_names), ' raw files for ', target_name, ' on ',
          len(dates), ' dates.')

    date_holder = [[] for x in range(len(dates))]
    for i in range(len(dates)):
        date = dates[i]
        print(date, ': ', len(np.where(comp_dates == date)[0]), ' files.')
        date_holder[i].extend(file_names[np.where(comp_dates == date)[0]])
        time.sleep(0.1)

    dates = [str(i) for i in dates]
    #Now download the identified data.
    sftp.chdir('/data/reduced/mimir')
    run_dirs = sftp.listdir()
    file_num = 1
    for i in range(len(run_dirs)):
        sftp.chdir(run_dirs[i])
        night_dirs = sftp.listdir()
        for j in range(len(night_dirs)):
            night_check = night_dirs[j]
            if night_check in dates:
                sftp.chdir(night_check)
                date_holder_ind = np.where(
                    np.array(dates) == night_check)[0][0]
                files = date_holder[date_holder_ind]
                files_in_path = sftp.listdir()
                for k in range(len(files)):
                    download_filename = files[k].split(
                        '.fits')[0] + '_red.fits'
                    if not (reduced_data_path / download_filename).exists():
                        if download_filename in files_in_path:
                            print('Downloading to {}, {} of {}'.format(
                                reduced_data_path / download_filename,
                                file_num, len(file_names)))
                            sftp.get(download_filename,
                                     reduced_data_path / download_filename)
                        else:
                            print(
                                'A reduced image does not yet exist for {}, ask an administrator to make one!'
                                .format(files[k]))
                    else:
                        print('{} already in {}, skipping.'.format(
                            download_filename, reduced_data_path))
                    file_num += 1
                sftp.chdir('..')
        sftp.chdir('..')

    print('')
    #Now grab the logs.
    sftp.chdir('/data/logs')
    for i in range(len(dates)):
        log_name = dates[i] + '_log.txt'
        print('Downloading {} to {}.'.format(log_name, pines_path /
                                             ('Logs/' + log_name)))
        sftp.get(log_name, pines_path / ('Logs/' + log_name))

    # sftp.chdir('/data/raw/mimir')
    # print('')
    # for i in range(len(run_dirs)):
    #         sftp.chdir(run_dirs[i])
    #         night_dirs = sftp.listdir()
    #         for j in range(len(night_dirs)):
    #             night_check = night_dirs[j]
    #             if night_check in dates:
    #                 sftp.chdir(night_check)
    #                 log_name = night_check+'_log.txt'
    #                 files_in_path = sftp.listdir()
    #                 if log_name in files_in_path:
    #                     if not (pines_path/('Logs/'+log_name)).exists():
    #                         sftp.get(log_name,pines_path/('Logs/'+log_name))
    #                         print('Downloading {} to {}.'.format(log_name, pines_path/('Logs/'+log_name)))
    #                     else:
    #                         print('{} already in {}, skipping.'.format(log_name,pines_path/'Logs/'))
    #                 sftp.chdir('..')
    #         sftp.chdir('..')

    print('')
    print('get_reduced_science_files runtime: ',
          np.round((time.time() - t1) / 60, 1), ' minutes.')
    print('Done!')
Beispiel #10
0
def simple_lightcurve(target,
                      sources,
                      centroided_sources,
                      phot_type='aper',
                      ref_set_choice=[],
                      plot_mode='combined'):
    print('\nRunning simple_lightcurve().\n')
    '''Authors: 
        Patrick Tamburo, Boston University, June 2020
    Purpose: 
            Makes a "simple" lightcurve with each reference star weighted equally when creating the artificial comparison lightcurve. 
    Inputs:
        target (str): the target's long name (e.g. '2MASS J12345678+1234567').
        sources (pandas DataFrame): DataFrame with source names, and x/y positions in the source_detect_image. Output from ref_star_chooser. 
        centroided_sources (pandas DataFrame): Dataframe with source positions in every image. Output from centroider. 
        phot_type (str): 'aper' or 'psf'. Whether to use aperture or PSF phomometry NOTE: PSF photometry currently not implemented.
        ref_set_choice (list): list of reference IDs to use to make the lightcurve, in case you want to exclude any. 
        plot_mode (str): 'combined' or 'separate'. 'combined' plots all nights in one figure, while 'separate' plots nights in separate figures. 
    Outputs:
        Saves lightcurve plots to target's analysis directory. 
    TODO:
        PSF photometry 
        Regression? 
'''
    def regression(flux, seeing, airmass, corr_significance=1e-5):
        #Looks at correlations between seeing and airmass with the target flux.
        #Takes those variables which are significantly correlated, and uses them in a linear regression to de-correlated the target flux.

        #Use the seeing in the regression if it's significantly correlated
        if pearsonr(seeing, flux)[1] < corr_significance:
            use_seeing = True
        else:
            use_seeing = False

        #Same thing for airmass
        if pearsonr(airmass, flux)[1] < corr_significance:
            use_airmass = True
        else:
            use_airmass = False

        #Now set up the linear regression.
        regr = linear_model.LinearRegression()

        regress_dict = {}

        #Add seeing, background, and airmass, if applicable.
        if use_seeing:
            key = 'seeing'
            regress_dict[key] = seeing

        if use_airmass:
            key = 'airmass'
            regress_dict[key] = airmass

        #Finally, add target flux
        regress_dict['flux'] = flux

        #Get list of keys
        keylist = list()
        for i in regress_dict.keys():
            keylist.append(i)

        #Create data frame of regressors.
        df = DataFrame(regress_dict, columns=keylist)
        x = df[keylist[0:len(keylist) - 1]]
        y = df['flux']

        if np.shape(x)[1] > 0:
            regr.fit(x, y)

            #Now, define the model.
            linear_regression_model = regr.intercept_
            i = 0

            #Add in the other regressors, as necessary. Can't think of a way of doing this generally, just use a bunch of ifs.
            if (use_seeing) and (use_airmass):
                linear_regression_model = linear_regression_model + regr.coef_[
                    0] * seeing + regr.coef_[1] * airmass
            if (use_seeing) and not (use_airmass):
                linear_regression_model = linear_regression_model + regr.coef_[
                    0] * seeing
            if not (use_seeing) and (use_airmass):
                linear_regression_model = linear_regression_model + regr.coef_[
                    0] * airmass
            #Divide out the fit.
            corrected_flux = flux / linear_regression_model
        else:
            #print('No regressors used.')
            corrected_flux = flux
        return corrected_flux

    #plt.ion()
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    outlier_tolerance = 0.2  #If a reference > outlier_tolerance of its values above sigma clipping threshold, mark it as bad.
    centroided_sources.columns = centroided_sources.keys().str.strip()

    #Get list of photometry files for this target.
    photometry_path = pines_path / ('Objects/' + short_name + '/' + phot_type +
                                    '_phot/')
    analysis_path = pines_path / ('Objects/' + short_name + '/analysis')
    photometry_files = natsort.natsorted(
        [x for x in photometry_path.glob('*.csv')])

    num_refs = len(sources) - 1

    #Loop over all photometry files in the aper_phot directory.
    for i in range(len(photometry_files)):
        #Load in the photometry data.
        if phot_type == 'aper':
            aperture_radius = float(str(photometry_files[i]).split('_')[-3])

        phot_data = pines_log_reader(photometry_files[i])

        #Remove entries that have NaN's for flux values.
        for j in range(len(sources['Name'])):
            name = sources['Name'][j]
            phot_data[name + ' Flux'] = phot_data[name + ' Flux'].astype(float)
            phot_data[name +
                      ' Flux Error'] = phot_data[name +
                                                 ' Flux Error'].astype(float)

        #Get target interpolation warnings.
        targ_interp_flags = np.array(phot_data[short_name +
                                               ' Interpolation Flag'])

        #Get times of exposures.
        times = np.array(phot_data['Time JD'])
        seeing = np.array(phot_data['Seeing'])
        airmass = np.array(phot_data['Airmass'])
        background = np.array(phot_data[sources['Name'][0] + ' Background'])

        #Convert to datetimes for plotting purposes.
        dts = np.array(
            [julian.from_jd(times[i], fmt='jd') for i in range(len(times))])

        #Get the target's flux and background
        targ_flux = np.array(phot_data[short_name + ' Flux'])
        targ_flux_err = np.array(phot_data[short_name + ' Flux Error'])

        #Get the reference stars' fluxes and backgrounds.
        ref_flux = np.zeros((num_refs, len(phot_data)))
        ref_flux_err = np.zeros((num_refs, len(phot_data)))
        for j in range(0, num_refs):
            ref_flux[j, :] = phot_data['Reference ' + str(j + 1) + ' Flux']
            ref_flux_err[j, :] = phot_data['Reference ' + str(j + 1) +
                                           ' Flux Error']
            #Discard variable stars.
            #values, clow, chigh = sigmaclip(ref_flux[j], low=2.5, high=2.5)
            # if (len(phot_data) - len(values)) > (int(outlier_tolerance * len(phot_data))):
            #     print('Have to add flagging bad refs.')

        closest_ref = np.where(
            abs(np.nanmean(ref_flux, axis=1) - np.nanmean(targ_flux)) == min(
                abs(np.nanmean(ref_flux, axis=1) -
                    np.nanmean(targ_flux))))[0][0]

        #Split data up into individual nights.
        night_inds = night_splitter(times)
        num_nights = len(night_inds)

        #if plot_mode == 'combined':
        #    fig, axis = plt.subplots(nrows=1, ncols=num_nights, figsize=(16, 5))

        #colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']

        #Get the time range of each night. Set each plot panel's xrange according to the night with the longest time. Makes seeing potential variability signals easier.
        night_lengths = np.zeros(num_nights)
        for j in range(num_nights):
            inds = night_inds[j]
            night_lengths[j] = times[inds][-1] - times[inds][0]
        longest_night = max(night_lengths)
        longest_night_hours = np.ceil(longest_night * 24)
        global line_list, filename_list
        line_list = []
        filename_list = []

        for j in range(num_nights):
            #if plot_mode == 'separate':
            #    fig, ax = plt.subplots(1, 1, figsize=(16,5))
            #else:
            #    ax = axis[j]

            #if phot_type =='aper':
            #    fig.suptitle(short_name, fontsize=16)

            j += 1
            #if j == 1:
            #    ax.set_ylabel('Normalized Flux', fontsize=16)
            #ax.set_xlabel('Time (UT)', fontsize=16)

            inds = night_inds[j - 1]
            filename_list.append(
                np.array([phot_data['Filename'][z] for z in inds]))
            alc = np.zeros(len(inds))

            #Normalize reference lightcurves
            #TODO: Each night should be normalized separately.
            for k in range(num_refs):
                ref_flux[k][inds] = ref_flux[k][inds] / np.nanmedian(
                    ref_flux[k][inds])

            for k in range(len(inds)):
                #Do a sigma clip on normalized references to avoid biasing median.
                ###values, clow, chigh = sigmaclip(ref_flux[:,inds[k]][~np.isnan(ref_flux[:,inds[k]])], low=1.5, high=1.5)
                ###alc[k] = np.median(values)
                avg, med, std = sigma_clipped_stats(
                    ref_flux[:, inds[k]][~np.isnan(ref_flux[:, inds[k]])],
                    sigma=1.5)
                alc[k] = med

            #Correct the target lightcurve using the alc.
            alc = alc / np.nanmedian(alc)
            targ_flux_norm = targ_flux[inds] / np.nanmedian(targ_flux[inds])
            targ_corr = targ_flux_norm / alc
            targ_corr = targ_corr / np.nanmedian(targ_corr)

            #Correct the example reference lightcurve using the alc.
            ref_corr_norm = ref_flux[closest_ref][inds] / np.nanmedian(
                ref_flux[closest_ref][inds])
            ref_corr = ref_corr_norm / alc
            ref_corr = ref_corr / np.nanmedian(ref_corr)

            #Plot the target and reference lightcurves.
            #t_plot, = ax.plot(dts[inds], targ_corr, '.', color=colors[i])
            #line_list.append(t_plot)
            #myFmt = mdates.DateFormatter('%H:%M')
            #ax.xaxis.set_major_formatter(myFmt)
            #fig.autofmt_xdate()

            #Do sigma clipping on the corrected lightcurve to get rid of outliers (from clouds, bad target centroid, cosmic rays, etc.)
            ###vals, lo, hi = sigmaclip(targ_corr, low=2.5, high=2.5)
            avg, med, std = sigma_clipped_stats(targ_corr, sigma=3)
            bad_vals = np.where((targ_corr > med + 5 * std)
                                | (targ_corr < med - 5 * std))[0]
            good_vals = np.where((targ_corr < med + 5 * std)
                                 & (targ_corr > med - 5 * std))[0]
            vals = targ_corr[good_vals]
            #if len(bad_vals) != 0:
            #    plt.plot(dts[inds][bad_vals], targ_corr[bad_vals], marker='x',color='r', mew=1.8, ms=7, zorder=0, ls='')

            blocks = block_splitter(times[inds], bad_vals)
            bin_times = np.zeros(len(blocks))
            bin_fluxes = np.zeros(len(blocks))
            bin_errs = np.zeros(len(blocks))
            bin_dts = []
            for k in range(len(blocks)):
                try:
                    bin_times[k] = np.mean(times[inds][blocks[k]])
                    #vals, hi, lo = sigmaclip(targ_corr[blocks[k]],high=3,low=3) #Exclude outliers.
                    bin_fluxes[k] = np.mean(targ_corr[blocks[k]])
                    bin_errs[k] = np.std(targ_corr[blocks[k]]) / np.sqrt(
                        len(targ_corr[blocks[k]]))
                    bin_dts.append(julian.from_jd(bin_times[k], fmt='jd'))
                except:
                    pdb.set_trace()
            bin_dts = np.array(bin_dts)
            #ax.errorbar(bin_dts, bin_fluxes, yerr=bin_errs, marker='o', color='k',zorder=3, ls='')

            #Draw the y=1 and 5-sigma detection threshold lines.
            #ax.axhline(y=1, color='r', lw=2, zorder=0)
            #ax.axhline(1-5*np.median(bin_errs), zorder=0, lw=2, color='k', ls='--', alpha=0.4)

            #Set the y-range so you can see the 5-sigma detection line.
            #ax.set_ylim(0.9, 1.1)

            #Set the x-range to be the same for all nights.
            #ax.set_xlim(julian.from_jd(times[inds][0]-0.025, fmt='jd'), julian.from_jd(times[inds][0]+longest_night+0.025, fmt='jd'))

            #ax.grid(alpha=0.2)
            #ax.set_title(phot_data['Time UT'][inds[0]].split('T')[0], fontsize=14)
            #ax.tick_params(labelsize=12)
            #print('average seeing, night {}: {}'.format(j, np.mean(seeing[inds])))
            #pdb.set_trace()
            #print('pearson correlation between target and closest ref: {}'.format(pearsonr(targ_corr[good_vals], ref_corr[good_vals])))

        #print(np.mean(bin_errs))
        #print('')
        #fig.tight_layout(rect=[0, 0.03, 1, 0.93])

        #Output the simple lc data to a csv.
        time_save = times
        flux_save = targ_corr
        flux_err_save = np.zeros(len(flux_save)) + np.std(targ_corr)
        output_dict = {
            'Time': time_save,
            'Flux': flux_save,
            'Flux Error': flux_err_save
        }
        output_df = pd.DataFrame(data=output_dict)
        if phot_type == 'aper':
            output_filename = analysis_path / (
                short_name + '_simple_lc_aper_phot_' +
                str(np.round(aperture_radius, 1)) + '_pix.csv')
            print('\nSaving to {}.\n'.format(output_filename))
            output_df.to_csv(output_filename)
        elif phot_type == 'psf':
            print("ERROR: Need to create flux output for PSF photometry.")

    return
Beispiel #11
0
def basic_psf_phot(target, centroided_sources, plots=False):
    def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
        """
        Convert hours, minutes, seconds, and microseconds to fractional days.
        
        """
        days = sec + (micro / 1.e6)
        days = min + (days / 60.)
        days = hour + (days / 60.)
        return days / 24.
    
    def date_to_jd(year,month,day):
        """
        Convert a date to Julian Day.
        
        Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 
            4th ed., Duffet-Smith and Zwart, 2011.
        
        """
        if month == 1 or month == 2:
            yearp = year - 1
            monthp = month + 12
        else:
            yearp = year
            monthp = month
        
        # this checks where we are in relation to October 15, 1582, the beginning
        # of the Gregorian calendar.
        if ((year < 1582) or
            (year == 1582 and month < 10) or
            (year == 1582 and month == 10 and day < 15)):
            # before start of Gregorian calendar
            B = 0
        else:
            # after start of Gregorian calendar
            A = math.trunc(yearp / 100.)
            B = 2 - A + math.trunc(A / 4.)
            
        if yearp < 0:
            C = math.trunc((365.25 * yearp) - 0.75)
        else:
            C = math.trunc(365.25 * yearp)
            
        D = math.trunc(30.6001 * (monthp + 1))
        
        jd = B + C + D + day + 1720994.5
        
        return jd

    def gaussian(p, x, y):
        height, center_x, center_y, width_x, width_y, rotation = p
        rotation = np.deg2rad(rotation)
        x_0 = center_x * np.cos(rotation) - center_y * np.sin(rotation)
        y_0 = center_x * np.sin(rotation) + center_y * np.cos(rotation)

        def rotgauss(x,y):
            xp = x * np.cos(rotation) - y * np.sin(rotation)
            yp = x * np.sin(rotation) + y * np.cos(rotation)
            g = height*np.exp(
                -(((x_0-xp)/width_x)**2+
                  ((y_0-yp)/width_y)**2)/2.)
            return g
        
        g = rotgauss(x,y)

        return g

    def moments(data):
        total = np.nansum(data)
        X, Y = np.indices(data.shape)
        center_x = int(np.shape(data)[1]/2)
        center_y = int(np.shape(data)[0]/2)
        row = data[int(center_x), :]
        col = data[:, int(center_y)]
        width_x = np.nansum(np.sqrt(abs((np.arange(col.size)-center_y)**2*col))
                            /np.nansum(col))
        width_y = np.nansum(np.sqrt(abs((np.arange(row.size)-center_x)**2*row))
                            /np.nansum(row))
        height = np.nanmax(data)
        rotation = 0.0
        return height, center_x, center_y, width_x, width_y, rotation

    def errorfunction(p, x, y, data):
        return gaussian(p, x, y) - data

    def fitgaussian(data):
        params = moments(data)
        X, Y = np.indices(data.shape)
        mask = ~np.isnan(data)
        x = X[mask]
        y = Y[mask]
        data = data[mask]
        p, success = optimize.leastsq(errorfunction, params, args=(x, y, data))
        return p

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    reduced_path = pines_path/('Objects/'+short_name+'/reduced/')
    reduced_files = np.array(natsort.natsorted([x for x in reduced_path.glob('*.fits')]))

    centroided_sources.columns = centroided_sources.columns.str.strip()
    source_names = natsort.natsorted(list(set([i[0:-2].replace('X','').replace('Y','').rstrip().lstrip() for i in centroided_sources.keys()])))    

    #Declare a new dataframe to hold the information for all targets for this .
    columns = ['Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing']
    for i in range(0, len(source_names)):
        columns.append(source_names[i]+' Flux')
        columns.append(source_names[i]+' Flux Error')
    psf_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
    output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

    for i in range(len(reduced_files)):
        #Read in image data/header. 
        file = reduced_files[i]
        data = fits.open(file)[0].data
        header = fits.open(file)[0].header
        print('{}, image {} of {}.'.format(file.name, i+1, len(reduced_files)))

        #Read in some supporting information.
        log_path = pines_path/('Logs/'+file.name.split('.')[0]+'_log.txt')
        log = pines_log_reader(log_path)
        date_obs = header['DATE-OBS']
        #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds. 
        if len(date_obs.split(':')[-1].split('.')[0]) == 3:
            date_obs = date_obs.split(':')[0] + ':' + date_obs.split(':')[1] + ':' + date_obs.split(':')[-1][1:]
        #Keep a try/except clause here in case other unknown DATE-OBS formats pop up. 
        try:
            date = datetime.datetime.strptime(date_obs, '%Y-%m-%dT%H:%M:%S.%f')
        except:
            print('Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.')
            pdb.set_trace()
        
        days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
        jd = date_to_jd(date.year,date.month,days)
        psf_df['Filename'][i] = file.name
        psf_df['Time UT'][i] = header['DATE-OBS']
        psf_df['Time JD'][i] = jd
        psf_df['Airmass'][i] = header['AIRMASS']
        psf_df['Seeing'][i] = log['X seeing'][np.where(log['Filename'] == file.name.split('_')[0]+'.fits')[0][0]]
        
        #Read in source centroids for this image
        x = np.zeros(len(source_names))
        y = np.zeros(len(source_names))
        seeing = psf_df['Seeing'][i]

        for j in range(len(source_names)):
            source = source_names[j]
            x[j] = centroided_sources[source+' X'][i]
            y[j] = centroided_sources[source+' Y'][i]

         #The extract_stars() function requires the input data as an NDData object. 
        nddata = NDData(data=data)  

        #Create table of good star positions
        stars_tbl = Table()
        stars_tbl['x'] = x
        stars_tbl['y'] = y

        size = 25
        x, y = np.meshgrid(np.arange(0,size), np.arange(0,size))

        #Extract star cutouts.
        stars = extract_stars(nddata, stars_tbl, size=size)  

        fitter = fitting.LevMarLSQFitter()

        fig, ax = plt.subplots(nrows=len(stars), ncols=3, sharex=True, sharey=True, figsize=(12,40))

        #Fit a 2D Gaussian to each star. 
        for j in range(len(stars)): 
            star = stars[j]
            source = source_names[j]
            mmm_bkg = MMMBackground()
            cutout = star.data - mmm_bkg(star.data)            

            #Get the star's centroid position in the cutout. 
            dtype = [('x_0', 'f8'), ('y_0', 'f8')]
            pos = Table(data=np.zeros(1, dtype=dtype))
            source_x = stars_tbl['x'][j]
            source_y = stars_tbl['y'][j]
            pos['x_0'] = source_x - int(source_x - size/2 + 1)
            pos['y_0'] = source_y - int(source_y - size/2 + 1)

            parameters = fitgaussian(cutout)
            g2d_fit = gaussian(parameters, x, y)

            avg, med, std = sigma_clipped_stats(cutout)
            im = ax[j,0].imshow(cutout, origin='lower', vmin=med-std, vmax=med+8*std)
            divider = make_axes_locatable(ax[j,0])
            cax = divider.append_axes('right', size='5%', pad=0.05)
            fig.colorbar(im, cax=cax, orientation='vertical')
            ax[j,0].plot(pos['x_0'], pos['y_0'], 'rx')
            ax[j,0].set_ylabel(source)
            ax[j,0].text(pos['x_0'], pos['y_0']+1, '('+str(np.round(source_x,1))+', '+str(np.round(source_y,1))+')', color='r', ha='center')
            ax[j,0].axis('off')

            axins = ax[j,0].inset_axes([0.75, 0.75, 0.25, 0.25])
            axins.set_yticklabels([])
            axins.set_yticks([])
            axins.set_xticklabels([])
            axins.set_xticks([])
            axins.imshow(data, origin='lower', vmin=med-std, vmax=med+8*std)
            axins.plot(source_x, source_y, 'rx')

            im = ax[j,1].imshow(g2d_fit, origin='lower', vmin=med-std, vmax=med+8*std)
            divider = make_axes_locatable(ax[j,1])
            cax = divider.append_axes('right', size='5%', pad=0.05)
            fig.colorbar(im, cax=cax, orientation='vertical')
            ax[j,1].axis('off')

            avg, med, std = sigma_clipped_stats(cutout - g2d_fit)
            im = ax[j,2].imshow(cutout - g2d_fit, origin='lower', vmin=med-std, vmax=med+8*std)
            divider = make_axes_locatable(ax[j,2])
            cax = divider.append_axes('right', size='5%', pad=0.05)
            fig.colorbar(im, cax=cax, orientation='vertical')
            ax[j,2].axis('off')

            if j == 0:
                ax[j,0].set_title('Data')
                ax[j,1].set_title('2D Gaussian Model')
                ax[j,2].set_title('Data - Model')

            plt.tight_layout()

        output_filename = pines_path/('Objects/'+short_name+'/basic_psf_phot/'+reduced_files[i].name.split('_')[0]+'_'+'source_modeling.pdf')
        plt.savefig(output_filename)
        plt.close()

        
    return
           
Beispiel #12
0
def background_plot(target, centroided_sources, gain=8.21):

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    #Get plot style parameters.
    title_size, axis_title_size, axis_ticks_font_size, legend_font_size = plot_style(
    )

    analysis_path = pines_path / ('Objects/' + short_name + '/analysis')
    phot_path = pines_path / ('Objects/' + short_name + '/aper_phot')
    phot_files = np.array(natsorted([x for x in phot_path.glob('*.csv')]))

    if os.path.exists(analysis_path / ('optimal_aperture.txt')):
        with open(analysis_path / ('optimal_aperture.txt'), 'r') as f:
            best_ap = f.readlines()[0].split(':  ')[1].split('_')[0]
        ap_list = np.array(
            [str(i).split('/')[-1].split('_')[4] for i in phot_files])
        best_ap_ind = np.where(ap_list == best_ap)[0][0]
    else:
        print(
            'No optimal_aperture.txt file for {}.\nUsing first photometry file in {}.'
            .format(target, phot_path))
        best_ap_ind = 0

    phot_file = phot_files[best_ap_ind]
    phot_df = pines_log_reader(phot_file)

    backgrounds = np.array(phot_df[short_name + ' Background'],
                           dtype='float') / gain
    times_full = np.array(phot_df['Time JD'], dtype='float')
    night_inds = night_splitter(times_full)
    num_nights = len(night_inds)
    times_nights = [times_full[night_inds[i]] for i in range(num_nights)]
    standard_x = standard_x_range(times_nights)

    fig, ax = plt.subplots(nrows=1,
                           ncols=num_nights,
                           figsize=(17, 5),
                           sharey=True)
    for i in range(num_nights):
        if i == 0:
            ax[i].set_ylabel('Background (ADU)', fontsize=axis_title_size)

        inds = night_inds[i]
        ax[i].plot(times_full[inds],
                   backgrounds[inds],
                   marker='.',
                   linestyle='',
                   color='tab:orange',
                   alpha=0.3,
                   label='Raw bkg.')
        ax[i].tick_params(labelsize=axis_ticks_font_size)
        ax[i].set_xlabel('Time (JD UTC)', fontsize=axis_title_size)
        ax[i].grid(alpha=0.2)
        ax[i].set_xlim(
            np.mean(times_full[inds]) - standard_x / 2,
            np.mean(times_full[inds] + standard_x / 2))

        #bin
        block_inds = block_splitter(times_full[inds])
        block_x = np.zeros(len(block_inds))
        block_y = np.zeros(len(block_inds))
        block_y_err = np.zeros(len(block_inds))
        for j in range(len(block_inds)):
            block_x[j] = np.nanmean(times_full[inds][block_inds[j]])
            block_y[j] = np.nanmean(backgrounds[inds][block_inds[j]])
            block_y_err[j] = np.nanstd(
                backgrounds[inds][block_inds[j]]) / np.sqrt(
                    len(backgrounds[inds][block_inds[j]]))

        block_x = block_x[~np.isnan(block_y)]
        block_y_err = block_y_err[~np.isnan(block_y)]
        block_y = block_y[~np.isnan(block_y)]

        ax[i].errorbar(block_x,
                       block_y,
                       block_y_err,
                       marker='o',
                       linestyle='',
                       color='tab:orange',
                       ms=8,
                       mfc='none',
                       mew=2,
                       label='Bin bkg.')

        #Interpolate each night's seeing.
        fit_times = np.linspace(block_x[0], block_x[-1], 1000)
        try:
            interp = CubicSpline(block_x, block_y)
        except:
            pdb.set_trace()
        interp_fit = interp(fit_times)
        ax[i].plot(fit_times,
                   interp_fit,
                   color='b',
                   lw=2,
                   zorder=0,
                   alpha=0.7,
                   label='CS Interp.')

    ax[i].legend(bbox_to_anchor=(1.01, 0.5), fontsize=legend_font_size)
    plt.suptitle(short_name + ' Background Measurements', fontsize=title_size)
    plt.subplots_adjust(left=0.07, wspace=0.05, top=0.92, bottom=0.17)

    output_filename = pines_path / ('Objects/' + short_name +
                                    '/analysis/diagnostic_plots/' +
                                    short_name + '_backgrounds.png')
    plt.savefig(output_filename, dpi=300)
    return
Beispiel #13
0
def epsf_phot(target, centroided_sources, plots=False):
    def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
        """
        Convert hours, minutes, seconds, and microseconds to fractional days.
        
        """
        days = sec + (micro / 1.e6)
        days = min + (days / 60.)
        days = hour + (days / 60.)
        return days / 24.
    
    def date_to_jd(year,month,day):
        """
        Convert a date to Julian Day.
        
        Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 
            4th ed., Duffet-Smith and Zwart, 2011.
        
        """
        if month == 1 or month == 2:
            yearp = year - 1
            monthp = month + 12
        else:
            yearp = year
            monthp = month
        
        # this checks where we are in relation to October 15, 1582, the beginning
        # of the Gregorian calendar.
        if ((year < 1582) or
            (year == 1582 and month < 10) or
            (year == 1582 and month == 10 and day < 15)):
            # before start of Gregorian calendar
            B = 0
        else:
            # after start of Gregorian calendar
            A = math.trunc(yearp / 100.)
            B = 2 - A + math.trunc(A / 4.)
            
        if yearp < 0:
            C = math.trunc((365.25 * yearp) - 0.75)
        else:
            C = math.trunc(365.25 * yearp)
            
        D = math.trunc(30.6001 * (monthp + 1))
        
        jd = B + C + D + day + 1720994.5
        
        return jd

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    reduced_path = pines_path/('Objects/'+short_name+'/reduced/')
    reduced_filenames = natsort.natsorted([x.name for x in reduced_path.glob('*.fits')])
    reduced_files = np.array([reduced_path/i for i in reduced_filenames])

    centroided_sources.columns = centroided_sources.columns.str.strip()
    source_names = natsort.natsorted(list(set([i.split(' ')[0]+' '+i.split(' ')[1] for i in centroided_sources.keys() if (i[0] == '2') or (i[0] == 'R')])))
    
    #Create output plot directories for each source.
    if plots:
        for name in source_names:
            #If the folders are already there, delete them. 
            source_path = (pines_path/('Objects/'+short_name+'/psf_phot/'+name+'/'))
            if source_path.exists():
                shutil.rmtree(source_path)
            #Create folders.
            os.mkdir(source_path)

    #Declare a new dataframe to hold the information for all targets for this .
    columns = ['Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing']
    for i in range(0, len(source_names)):
        columns.append(source_names[i]+' Flux')
        columns.append(source_names[i]+' Flux Error')
    psf_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
    output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

    for i in range(0, len(reduced_files)):
        #Read in image data/header. 
        file = reduced_files[i]
        data = fits.open(file)[0].data
        header = fits.open(file)[0].header
        print('{}, image {} of {}.'.format(file.name, i+1, len(reduced_files)))

        #Read in some supporting information.
        log_path = pines_path/('Logs/'+file.name.split('.')[0]+'_log.txt')
        log = pines_log_reader(log_path)
        date_obs = header['DATE-OBS']
        #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds. 
        if len(date_obs.split(':')[-1].split('.')[0]) == 3:
            date_obs = date_obs.split(':')[0] + ':' + date_obs.split(':')[1] + ':' + date_obs.split(':')[-1][1:]
        #Keep a try/except clause here in case other unknown DATE-OBS formats pop up. 
        try:
            date = datetime.datetime.strptime(date_obs, '%Y-%m-%dT%H:%M:%S.%f')
        except:
            print('Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.')
            pdb.set_trace()
        
        days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
        jd = date_to_jd(date.year,date.month,days)
        psf_df['Filename'][i] = file.name
        psf_df['Time UT'][i] = header['DATE-OBS']
        psf_df['Time JD'][i] = jd
        psf_df['Airmass'][i] = header['AIRMASS']
        psf_df['Seeing'][i] = log['X seeing'][np.where(log['Filename'] == file.name.split('_')[0]+'.fits')[0][0]]
        
        #Read in source centroids for this image
        x = np.zeros(len(source_names))
        y = np.zeros(len(source_names))
        for j in range(len(source_names)):
            source = source_names[j]
            x[j] = centroided_sources[source+' X'][i]
            y[j] = centroided_sources[source+' Y'][i]

        #Extract pixel cutouts of our stars, so let’s explicitly exclude stars that are too close to the image boundaries (because they cannot be extracted).
        size = 13
        hsize = (size - 1) / 2
        #mask = ((x > hsize) & (x < (data.shape[1] -1 - hsize)) & (y > hsize) & (y < (data.shape[0] -1 - hsize)) & (y > 100) & (y < 923))

        #Create table of good star positions
        stars_tbl = Table()
        stars_tbl['x'] = x
        stars_tbl['y'] = y
        
        #Subtract background (star cutouts from which we build the ePSF must have background subtracted).
        mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=2.)  
        data -= median_val
        
        #Replace nans in data using Gaussian. 
        # kernel = Gaussian2DKernel(x_stddev=0.5)
        # data = interpolate_replace_nans(data, kernel)

        #The extract_stars() function requires the input data as an NDData object. 
        nddata = NDData(data=data)  

        #Extract star cutouts.
        stars = extract_stars(nddata, stars_tbl, size=size)  
                        

        #Plot. 
        nrows = 5
        ncols = 5
        fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 10), squeeze=True)
        ax = ax.ravel()
        for j in range(len(stars)):           
            norm = simple_norm(stars[j], 'log', percent=99.)
            ax[j].imshow(stars[j].data, norm=norm, origin='lower', cmap='viridis')

        pdb.set_trace()

        #Construct the ePSF using the star cutouts.
        epsf_fitter = EPSFFitter()
        epsf_builder = EPSFBuilder(maxiters=4, progress_bar=False, fitter=epsf_fitter)   

        try:
            epsf, fitted_stars = epsf_builder(stars)
            output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

            for j in range(len(stars)):
                star = stars[j]
                source_name = source_names[j]
                sigma_psf = 1.85

                dtype = [('x_0', 'f8'), ('y_0', 'f8')]
                pos = Table(data=np.zeros(1, dtype=dtype))
                source_x = stars_tbl['x'][j]
                source_y = stars_tbl['y'][j]
                pos['x_0'] = source_x - int(source_x - size/2 + 1)
                pos['y_0'] = source_y - int(source_y - size/2 + 1)

                daogroup = DAOGroup(4.0*sigma_psf*gaussian_sigma_to_fwhm)
                mmm_bkg = MMMBackground()
                photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=epsf,
                                    fitter=LevMarLSQFitter(),
                                    fitshape=(13,13),
                                    aperture_radius=4.)
                

                result_tab = photometry(image=star, init_guesses=pos)
                residual_image = photometry.get_residual_image()
                psf_df[source_name+' Flux'][i] = result_tab['flux_fit'][0]
                psf_df[source_name+' Flux Error'][i] = result_tab['flux_unc'][0]

                if plots:
                    fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12,4))
                    im = ax[0].imshow(star, origin='lower')
                    divider = make_axes_locatable(ax[0])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im, cax=cax, orientation='vertical')
                    ax[0].plot(result_tab['x_fit'][0], result_tab['y_fit'][0], 'rx')
                    ax[0].set_title('Data')

                    im2 = ax[1].imshow(epsf.data, origin='lower')
                    ax[1].set_title('EPSF Model')
                    divider = make_axes_locatable(ax[1])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im2, cax=cax, orientation='vertical')

                    im3 = ax[2].imshow(residual_image, origin='lower')
                    ax[2].set_title('Residual Image')
                    divider = make_axes_locatable(ax[2])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im3, cax=cax, orientation='vertical')
                    plt.suptitle(source_name+'\n'+reduced_files[i].name+', image '+str(i+1)+' of '+str(len(reduced_files)))
                    plt.subplots_adjust(wspace=0.5, top=0.95, bottom = 0.05)
                    plot_output_name = pines_path/('Objects/'+short_name+'/psf_phot/'+source_name+'/'+str(i).zfill(4)+'.jpg')
                    plt.savefig(plot_output_name)
                    plt.close()
        except:
            print('')
            print('EPSF BUILDER FAILED, SKIPPING IMAGE.')
            print('')
        #Plot the ePSF. 
        # plt.figure()
        # norm = simple_norm(epsf.data, 'log', percent=99.)
        # plt.imshow(epsf.data, norm=norm, origin='lower', cmap='viridis')
        # cb = plt.colorbar()
        # plt.tight_layout()   

        

    print('Saving psf photometry output to {}.'.format(output_filename))
    with open(output_filename, 'w') as f:
        for j in range(len(psf_df)):
            if j == 0:
                f.write('{:>21s}, {:>22s}, {:>17s}, {:>7s}, {:>7s}, '.format('Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing'))
                for i in range(len(source_names)):
                    if i != len(source_names) - 1:
                        f.write('{:>20s}, {:>26s}, '.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))
                    else:
                        f.write('{:>20s}, {:>26s}\n'.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))

            format_string = '{:21s}, {:22s}, {:17.9f}, {:7.2f}, {:7.1f}, '

            #If the seeing value for this image is 'nan' (a string), convert it to a float. 
            #TODO: Not sure why it's being read in as a string, fix that. 
            if type(psf_df['Seeing'][j]) == str:
                psf_df['Seeing'][j] = float(psf_df['Seeing'][j])

            #Do a try/except clause for writeout, in case it breaks in the future. 
            try:
                f.write(format_string.format(psf_df['Filename'][j], psf_df['Time UT'][j], psf_df['Time JD'][j], psf_df['Airmass'][j], psf_df['Seeing'][j]))
            except:
                print('Writeout failed! Inspect quantities you are trying to write out.')
                pdb.set_trace()
            for i in range(len(source_names)):                    
                if i != len(source_names) - 1:
                    format_string = '{:20.11f}, {:26.11f}, '
                else:
                    format_string = '{:20.11f}, {:26.11f}\n'
                
                f.write(format_string.format(psf_df[source_names[i]+' Flux'][j], psf_df[source_names[i]+' Flux Error'][j]))
    print('')    
    return
           
Beispiel #14
0
def centroider(target,
               sources,
               output_plots=False,
               gif=False,
               restore=False,
               box_w=8):
    matplotlib.use('TkAgg')
    plt.ioff()
    t1 = time.time()
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    kernel = Gaussian2DKernel(x_stddev=1)  #For fixing nans in cutouts.

    #If restore == True, read in existing output and return.
    if restore:
        centroid_df = pd.read_csv(
            pines_path / ('Objects/' + short_name +
                          '/sources/target_and_references_centroids.csv'),
            converters={
                'X Centroids': eval,
                'Y Centroids': eval
            })
        print('Restoring centroider output from {}.'.format(
            pines_path / ('Objects/' + short_name +
                          '/sources/target_and_references_centroids.csv')))
        print('')
        return centroid_df

    #Create subdirectories in sources folder to contain output plots.
    if output_plots:
        subdirs = glob(
            str(pines_path / ('Objects/' + short_name + '/sources')) + '/*/')
        #Delete any source directories that are already there.
        for name in subdirs:
            shutil.rmtree(name)

        #Create new source directories.
        for name in sources['Name']:
            source_path = (
                pines_path /
                ('Objects/' + short_name + '/sources/' + name + '/'))
            os.mkdir(source_path)

    #Read in extra shifts, in case the master image wasn't used for source detection.
    extra_shift_path = pines_path / ('Objects/' + short_name +
                                     '/sources/extra_shifts.txt')
    extra_shifts = pd.read_csv(extra_shift_path,
                               delimiter=' ',
                               names=['Extra X shift', 'Extra Y shift'])
    extra_x_shift = extra_shifts['Extra X shift'][0]
    extra_y_shift = extra_shifts['Extra Y shift'][0]

    np.seterr(
        divide='ignore', invalid='ignore'
    )  #Suppress some warnings we don't care about in median combining.

    #Get list of reduced files for target.
    reduced_path = pines_path / ('Objects/' + short_name + '/reduced')
    reduced_filenames = natsort.natsorted(
        [x.name for x in reduced_path.glob('*red.fits')])
    reduced_files = np.array([reduced_path / i for i in reduced_filenames])

    #Declare a new dataframe to hold the centroid information for all sources we want to track.
    columns = []
    columns.append('Filename')
    columns.append('Seeing')
    columns.append('Time (JD UTC)')
    columns.append('Airmass')

    #Add x/y positions and cenroid flags for every tracked source
    for i in range(0, len(sources)):
        columns.append(sources['Name'][i] + ' Image X')
        columns.append(sources['Name'][i] + ' Image Y')
        columns.append(sources['Name'][i] + ' Cutout X')
        columns.append(sources['Name'][i] + ' Cutout Y')
        columns.append(sources['Name'][i] + ' Centroid Warning')

    centroid_df = pd.DataFrame(index=range(len(reduced_files)),
                               columns=columns)

    log_path = pines_path / ('Logs/')
    log_dates = np.array(
        natsort.natsorted(
            [x.name.split('_')[0] for x in log_path.glob('*.txt')]))

    #Make sure we have logs for all the nights of these data. Need them to account for image shifts.
    nights = list(set([i.name.split('.')[0] for i in reduced_files]))
    for i in nights:
        if i not in log_dates:
            print('ERROR: {} not in {}. Download it from the PINES server.'.
                  format(i + '_log.txt', log_path))
            pdb.set_trace()

    shift_tolerance = 2.0  #Number of pixels that the measured centroid can be away from the expected position in either x or y before trying other centroiding algorithms.
    for i in range(len(sources)):
        #Get the initial source position.
        x_pos = sources['Source Detect X'][i]
        y_pos = sources['Source Detect Y'][i]
        print('')
        print(
            'Getting centroids for {}, ({:3.1f}, {:3.1f}) in source detection image. Source {} of {}.'
            .format(sources['Name'][i], x_pos, y_pos, i + 1, len(sources)))
        if output_plots:
            print('Saving centroid plots to {}.'.format(
                pines_path / ('Objects/' + short_name + '/sources/' +
                              sources['Name'][i] + '/')))
        pbar = ProgressBar()
        for j in pbar(range(len(reduced_files))):
            centroid_df[sources['Name'][i] + ' Centroid Warning'][j] = 0
            file = reduced_files[j]
            image = fits.open(file)[0].data
            #Get the measured image shift for this image.
            log = pines_log_reader(log_path /
                                   (file.name.split('.')[0] + '_log.txt'))
            log_ind = np.where(log['Filename'] == file.name.split('_')[0] +
                               '.fits')[0][0]

            x_shift = float(log['X shift'][log_ind])
            y_shift = float(log['Y shift'][log_ind])

            #Save the filename for readability. Save the seeing for use in variable aperture photometry. Save the time for diagnostic plots.
            if i == 0:
                centroid_df['Filename'][j] = file.name.split('_')[0] + '.fits'
                centroid_df['Seeing'][j] = log['X seeing'][log_ind]
                time_str = fits.open(file)[0].header['DATE-OBS']

                #Correct some formatting issues that can occur in Mimir time stamps.
                if time_str.split(':')[-1] == '60.00':
                    time_str = time_str[0:14] + str(
                        int(time_str.split(':')[-2]) + 1) + ':00.00'
                elif time_str.split(':')[-1] == '010.00':
                    time_str = time_str[0:17] + time_str.split(':')[-1][1:]

                centroid_df['Time (JD UTC)'][j] = julian.to_jd(
                    datetime.datetime.strptime(time_str,
                                               '%Y-%m-%dT%H:%M:%S.%f'))
                centroid_df['Airmass'][j] = log['Airmass'][log_ind]

            nan_flag = False  #Flag indicating if you should not trust the log's shifts. Set to true if x_shift/y_shift are 'nan' or > 30 pixels.

            #If bad shifts were measured for this image, skip.
            if log['Shift quality flag'][log_ind] == 1:
                continue

            if np.isnan(x_shift) or np.isnan(y_shift):
                x_shift = 0
                y_shift = 0
                nan_flag = True

            #If there are clouds, shifts could have been erroneously high...just zero them?
            if abs(x_shift) > 200:
                #x_shift = 0
                nan_flag = True
            if abs(y_shift) > 200:
                #y_shift = 0
                nan_flag = True

            #Apply the shift. NOTE: This relies on having accurate x_shift and y_shift values from the log.
            #If they're incorrect, the cutout will not be in the right place.
            #x_pos = sources['Source Detect X'][i] - x_shift + extra_x_shift
            #y_pos = sources['Source Detect Y'][i] + y_shift - extra_y_shift

            x_pos = sources['Source Detect X'][i] - (x_shift - extra_x_shift)
            y_pos = sources['Source Detect Y'][i] + (y_shift - extra_y_shift)

            #TODO: Make all this its own function.

            #Cutout around the expected position and interpolate over any NaNs (which screw up source detection).
            cutout = interpolate_replace_nans(
                image[int(y_pos - box_w):int(y_pos + box_w) + 1,
                      int(x_pos - box_w):int(x_pos + box_w) + 1],
                kernel=Gaussian2DKernel(x_stddev=0.5))

            #interpolate_replace_nans struggles with edge pixels, so shave off edge_shave pixels in each direction of the cutout.
            edge_shave = 1
            cutout = cutout[edge_shave:len(cutout) - edge_shave,
                            edge_shave:len(cutout) - edge_shave]

            vals, lower, upper = sigmaclip(
                cutout, low=1.5,
                high=2.5)  #Get sigma clipped stats on the cutout
            med = np.nanmedian(vals)
            std = np.nanstd(vals)

            try:
                centroid_x_cutout, centroid_y_cutout = centroid_2dg(
                    cutout - med)  #Perform centroid detection on the cutout.
            except:
                pdb.set_trace()

            centroid_x = centroid_x_cutout + int(
                x_pos
            ) - box_w + edge_shave  #Translate the detected centroid from the cutout coordinates back to the full-frame coordinates.
            centroid_y = centroid_y_cutout + int(y_pos) - box_w + edge_shave

            # if i == 0:
            #     qp(cutout)
            #     plt.plot(centroid_x_cutout, centroid_y_cutout, 'rx')

            #     # qp(image)
            #     # plt.plot(centroid_x, centroid_y, 'rx')
            #     pdb.set_trace()

            #If the shifts in the log are not 'nan' or > 200 pixels, check if the measured shifts are within shift_tolerance pixels of the expected position.
            #   If they aren't, try alternate centroiding methods to try and find it.

            #Otherwise, use the shifts as measured with centroid_1dg. PINES_watchdog likely failed while observing, and we don't expect the centroids measured here to actually be at the expected position.
            if not nan_flag:
                #Try a 2D Gaussian detection.
                if (abs(centroid_x - x_pos) > shift_tolerance) or (
                        abs(centroid_y - y_pos) > shift_tolerance):
                    centroid_x_cutout, centroid_y_cutout = centroid_2dg(
                        cutout - med)
                    centroid_x = centroid_x_cutout + int(x_pos) - box_w
                    centroid_y = centroid_y_cutout + int(y_pos) - box_w

                    #If that fails, try a COM detection.
                    if (abs(centroid_x - x_pos) > shift_tolerance) or (
                            abs(centroid_y - y_pos) > shift_tolerance):
                        centroid_x_cutout, centroid_y_cutout = centroid_com(
                            cutout - med)
                        centroid_x = centroid_x_cutout + int(x_pos) - box_w
                        centroid_y = centroid_y_cutout + int(y_pos) - box_w

                        #If that fails, try masking source and interpolate over any bad pixels that aren't in the bad pixel mask, then redo 1D gaussian detection.
                        if (abs(centroid_x - x_pos) > shift_tolerance) or (
                                abs(centroid_y - y_pos) > shift_tolerance):
                            mask = make_source_mask(cutout,
                                                    nsigma=4,
                                                    npixels=5,
                                                    dilate_size=3)
                            vals, lo, hi = sigmaclip(cutout[~mask])
                            bad_locs = np.where((mask == False) & (
                                (cutout > hi) | (cutout < lo)))
                            cutout[bad_locs] = np.nan
                            cutout = interpolate_replace_nans(
                                cutout, kernel=Gaussian2DKernel(x_stddev=0.5))

                            centroid_x_cutout, centroid_y_cutout = centroid_1dg(
                                cutout - med)
                            centroid_x = centroid_x_cutout + int(x_pos) - box_w
                            centroid_y = centroid_y_cutout + int(y_pos) - box_w

                            #Try a 2D Gaussian detection on the interpolated cutout
                            if (abs(centroid_x - x_pos) > shift_tolerance) or (
                                    abs(centroid_y - y_pos) > shift_tolerance):
                                centroid_x_cutout, centroid_y_cutout = centroid_2dg(
                                    cutout - med)
                                centroid_x = centroid_x_cutout + int(
                                    x_pos) - box_w
                                centroid_y = centroid_y_cutout + int(
                                    y_pos) - box_w

                                #Try a COM on the interpolated cutout.
                                if (abs(centroid_x - x_pos) > shift_tolerance
                                    ) or (abs(centroid_y - y_pos) >
                                          shift_tolerance):
                                    centroid_x_cutout, centroid_y_cutout = centroid_com(
                                        cutout)
                                    centroid_x = centroid_x_cutout + int(
                                        x_pos) - box_w
                                    centroid_y = centroid_y_cutout + int(
                                        y_pos) - box_w

                                    #Last resort: try cutting off the edge of the cutout. Edge pixels can experience poor interpolation, and this sometimes helps.
                                    if (abs(centroid_x - x_pos) >
                                            shift_tolerance) or (
                                                abs(centroid_y - y_pos) >
                                                shift_tolerance):
                                        cutout = cutout[1:-1, 1:-1]
                                        centroid_x_cutout, centroid_y_cutout = centroid_1dg(
                                            cutout - med)
                                        centroid_x = centroid_x_cutout + int(
                                            x_pos) - box_w + 1
                                        centroid_y = centroid_y_cutout + int(
                                            y_pos) - box_w + 1

                                        #Try with a 2DG
                                        if (abs(centroid_x - x_pos) >
                                                shift_tolerance) or (
                                                    abs(centroid_y - y_pos) >
                                                    shift_tolerance):
                                            centroid_x_cutout, centroid_y_cutout = centroid_2dg(
                                                cutout - med)
                                            centroid_x = centroid_x_cutout + int(
                                                x_pos) - box_w + 1
                                            centroid_y = centroid_y_cutout + int(
                                                y_pos) - box_w + 1

                                            #If ALL that fails, report the expected position as the centroid.
                                            if (abs(centroid_x - x_pos) >
                                                    shift_tolerance) or (
                                                        abs(centroid_y - y_pos)
                                                        > shift_tolerance):
                                                print(
                                                    'WARNING: large centroid deviation measured, returning predicted position'
                                                )
                                                print('')
                                                centroid_df[
                                                    sources['Name'][i] +
                                                    ' Centroid Warning'][j] = 1
                                                centroid_x = x_pos
                                                centroid_y = y_pos
                                                #pdb.set_trace()

            #Check that your measured position is actually on the detector.
            if (centroid_x < 0) or (centroid_y < 0) or (centroid_x > 1023) or (
                    centroid_y > 1023):
                #Try a quick mask/interpolation of the cutout.
                mask = make_source_mask(cutout,
                                        nsigma=3,
                                        npixels=5,
                                        dilate_size=3)
                vals, lo, hi = sigmaclip(cutout[~mask])
                bad_locs = np.where((mask == False)
                                    & ((cutout > hi) | (cutout < lo)))
                cutout[bad_locs] = np.nan
                cutout = interpolate_replace_nans(
                    cutout, kernel=Gaussian2DKernel(x_stddev=0.5))
                centroid_x, centroid_y = centroid_2dg(cutout - med)
                centroid_x += int(x_pos) - box_w
                centroid_y += int(y_pos) - box_w
                if (centroid_x < 0) or (centroid_y < 0) or (
                        centroid_x > 1023) or (centroid_y > 1023):
                    print(
                        'WARNING: large centroid deviation measured, returning predicted position'
                    )
                    print('')
                    centroid_df[sources['Name'][i] +
                                ' Centroid Warning'][j] = 1
                    centroid_x = x_pos
                    centroid_y = y_pos
                    #pdb.set_trace()

            #Check to make sure you didn't measure nan's.
            if np.isnan(centroid_x):
                centroid_x = x_pos
                print(
                    'NaN returned from centroid algorithm, defaulting to target position in source_detct_image.'
                )
            if np.isnan(centroid_y):
                centroid_y = y_pos
                print(
                    'NaN returned from centroid algorithm, defaulting to target position in source_detct_image.'
                )

            #Record the image and relative cutout positions.
            centroid_df[sources['Name'][i] + ' Image X'][j] = centroid_x
            centroid_df[sources['Name'][i] + ' Image Y'][j] = centroid_y
            centroid_df[sources['Name'][i] +
                        ' Cutout X'][j] = centroid_x_cutout
            centroid_df[sources['Name'][i] +
                        ' Cutout Y'][j] = centroid_y_cutout

            if output_plots:
                #Plot
                lock_x = int(centroid_df[sources['Name'][i] + ' Image X'][0])
                lock_y = int(centroid_df[sources['Name'][i] + ' Image Y'][0])
                norm = ImageNormalize(data=cutout, interval=ZScaleInterval())
                plt.imshow(image, origin='lower', norm=norm)
                plt.plot(centroid_x, centroid_y, 'rx')
                ap = CircularAperture((centroid_x, centroid_y), r=5)
                ap.plot(lw=2, color='b')
                plt.ylim(lock_y - 30, lock_y + 30 - 1)
                plt.xlim(lock_x - 30, lock_x + 30 - 1)
                plt.title('CENTROID DIAGNOSTIC PLOT\n' + sources['Name'][i] +
                          ', ' + reduced_files[j].name + ' (image ' +
                          str(j + 1) + ' of ' + str(len(reduced_files)) + ')',
                          fontsize=10)
                plt.text(centroid_x,
                         centroid_y + 0.5,
                         '(' + str(np.round(centroid_x, 1)) + ', ' +
                         str(np.round(centroid_y, 1)) + ')',
                         color='r',
                         ha='center')
                plot_output_path = (
                    pines_path /
                    ('Objects/' + short_name + '/sources/' +
                     sources['Name'][i] + '/' + str(j).zfill(4) + '.jpg'))
                plt.gca().set_axis_off()
                plt.subplots_adjust(top=1,
                                    bottom=0,
                                    right=1,
                                    left=0,
                                    hspace=0,
                                    wspace=0)
                plt.margins(0, 0)
                plt.gca().xaxis.set_major_locator(plt.NullLocator())
                plt.gca().yaxis.set_major_locator(plt.NullLocator())
                plt.savefig(plot_output_path,
                            bbox_inches='tight',
                            pad_inches=0,
                            dpi=150)
                plt.close()

        if gif:
            gif_path = (pines_path / ('Objects/' + short_name + '/sources/' +
                                      sources['Name'][i] + '/'))
            gif_maker(path=gif_path, fps=10)

    output_filename = pines_path / (
        'Objects/' + short_name +
        '/sources/target_and_references_centroids.csv')
    #centroid_df.to_csv(pines_path/('Objects/'+short_name+'/sources/target_and_references_centroids.csv'))

    print('Saving centroiding output to {}.'.format(output_filename))
    with open(output_filename, 'w') as f:
        for j in range(len(centroid_df)):
            #Write the header line.
            if j == 0:
                f.write('{:<17s}, '.format('Filename'))
                f.write('{:<15s}, '.format('Time (JD UTC)'))
                f.write('{:<6s}, '.format('Seeing'))
                f.write('{:<7s}, '.format('Airmass'))
                for i in range(len(sources['Name'])):
                    n = sources['Name'][i]
                    if i != len(sources['Name']) - 1:
                        f.write(
                            '{:<23s}, {:<23s}, {:<24s}, {:<24s}, {:<34s}, '.
                            format(n + ' Image X', n + ' Image Y',
                                   n + ' Cutout X', n + ' Cutout Y',
                                   n + ' Centroid Warning'))
                    else:
                        f.write(
                            '{:<23s}, {:<23s}, {:<24s}, {:<24s}, {:<34s}\n'.
                            format(n + ' Image X', n + ' Image Y',
                                   n + ' Cutout X', n + ' Cutout Y',
                                   n + ' Centroid Warning'))

            #Write in the data lines.
            try:
                f.write('{:<17s}, '.format(centroid_df['Filename'][j]))
                f.write('{:<15.7f}, '.format(centroid_df['Time (JD UTC)'][j]))
                f.write('{:<6.1f}, '.format(float(centroid_df['Seeing'][j])))
                f.write('{:<7.2f}, '.format(centroid_df['Airmass'][j]))
            except:
                pdb.set_trace()

            for i in range(len(sources['Name'])):
                n = sources['Name'][i]
                if i != len(sources['Name']) - 1:
                    format_string = '{:<23.4f}, {:<23.4f}, {:<24.4f}, {:<24.4f}, {:<34d}, '
                else:
                    format_string = '{:<23.4f}, {:<23.4f}, {:<24.4f}, {:<24.4f}, {:<34d}\n'

                f.write(
                    format_string.format(
                        centroid_df[n + ' Image X'][j],
                        centroid_df[n + ' Image Y'][j],
                        centroid_df[n + ' Cutout X'][j],
                        centroid_df[n + ' Cutout Y'][j],
                        centroid_df[n + ' Centroid Warning'][j]))
    np.seterr(divide='warn', invalid='warn')
    print('')
    print('centroider runtime: {:.2f} minutes.'.format(
        (time.time() - t1) / 60))
    print('')
    return centroid_df
Beispiel #15
0
def dome_flat_field(date,
                    band,
                    lights_on_start=0,
                    lights_on_stop=0,
                    lights_off_start=0,
                    lights_off_stop=0,
                    upload=False,
                    delete_raw=False,
                    sftp=''):
    clip_lvl = 3  #The value to use for sigma clipping.
    np.seterr(
        invalid='ignore'
    )  #Suppress some warnings we don't care about in median combining.
    plt.ion()  #Turn on interactive plotting.
    pines_path = pines_dir_check()

    t1 = time.time()
    #If an sftp connection to the PINES server was passed, download the flat data.
    if type(sftp) == pysftp.Connection:
        sftp.chdir('/')
        sftp.chdir('data/raw/mimir')
        run_list = sftp.listdir()
        data_path = ''  #Initialize to check that it gets filled.
        for i in range(len(run_list)):
            sftp.chdir(run_list[i])
            date_list = sftp.listdir()
            if date in date_list:
                data_path = sftp.getcwd()
                print('{} directory found in pines.bu.edu:{}/'.format(
                    date, data_path))
                print('')
                sftp.chdir(date)
                break
            sftp.chdir('..')

        if data_path == '':
            print(
                'ERROR: {} not found in any run on pines.bu.edu:data/raw/mimir/.'
                .format(date))
            return

        else:
            #If the file start/stop numbers are specfied, grab those files.
            if (lights_on_stop != 0) or (lights_off_stop != 0):
                files_in_dir = sftp.listdir()
                on_flat_filenums = np.arange(lights_on_start,
                                             lights_on_stop + 1,
                                             step=1)
                off_flat_filenums = np.arange(lights_off_start,
                                              lights_off_stop + 1,
                                              step=1)
                flat_files = []
                lights_on_files = []
                lights_off_files = []

                #Add the lights-on flats to the file list.
                for i in range(len(on_flat_filenums)):
                    file_num = on_flat_filenums[i]
                    #Generate the filename.
                    if file_num < 10:
                        file_name = date + '.00' + str(file_num) + '.fits'
                    elif (file_num >= 10) and (file_num < 100):
                        file_name = date + '.0' + str(file_num) + '.fits'
                    else:
                        file_name = date + '.' + str(file_num) + '.fits'
                    #Check if the file name is in the directory, and if so, append it to the list of flat files.
                    if file_name in files_in_dir:
                        flat_files.append(file_name)
                        lights_on_files.append(file_name)
                    else:
                        print('{} not found in directory, skipping.'.format(
                            file_name))

                #Do the same for the lights-off files.
                for i in range(len(off_flat_filenums)):
                    file_num = off_flat_filenums[i]
                    #Generate the filename.
                    if file_num < 10:
                        file_name = date + '.00' + str(file_num) + '.fits'
                    elif (file_num >= 10) and (file_num < 100):
                        file_name = date + '.0' + str(file_num) + '.fits'
                    else:
                        file_name = date + '.' + str(file_num) + '.fits'
                    #Check if the file name is in the directory, and if so, append it to the list of flat files.
                    if file_name in files_in_dir:
                        flat_files.append(file_name)
                        lights_off_files.append(file_name)
                    else:
                        print('{} not found in directory, skipping.'.format(
                            file_name))
            #Otherwise, find the files automatically using the night's log.
            else:
                log_path = pines_path / 'Logs'
                #Check if you already have the log for this date, if not, download it.
                #Download from the /data/logs/ directory on PINES.
                if not (log_path / (date + '_log.txt')).exists():
                    print('Downloading {}_log.txt to {}'.format(
                        date, log_path))
                    sftp.get('/data/logs/' + date + '_log.txt',
                             log_path / (date + '_log.txt'))

                log = pines_log_reader(log_path / (date + '_log.txt'))

                #Identify flat files.
                flat_inds = np.where((log['Target'] == 'Flat')
                                     & (log['Filename'] != 'test.fits')
                                     & (log['Filt.'] == band))[0]
                flat_files = natsort.natsorted(
                    list(set(log['Filename'][flat_inds]))
                )  #Set guarantees we only grab the unique files that have been identified as flats, in case the log bugged out.

            print('Found {} flat files.'.format(len(flat_files)))
            print('')

            #Download data to the appropriate Calibrations/Flats/Domeflats/band/Raw/ directory.
            dome_flat_raw_path = pines_path / (
                'Calibrations/Flats/Domeflats/' + band + '/Raw')
            for j in range(len(flat_files)):
                if not (dome_flat_raw_path / flat_files[j]).exists():
                    sftp.get(flat_files[j],
                             (dome_flat_raw_path / flat_files[j]))
                    print('Downloading {} to {}, {} of {}.'.format(
                        flat_files[j], dome_flat_raw_path, j + 1,
                        len(flat_files)))
                else:
                    print('{} already in {}, skipping download.'.format(
                        flat_files[j], dome_flat_raw_path))
            print('')

            if (lights_on_stop == 0) and (lights_off_stop == 0):
                #Find the lights-on and lights-off flat files.
                lights_on_files = []
                lights_off_files = []
                for j in range(len(flat_files)):
                    header = fits.open(dome_flat_raw_path /
                                       flat_files[j])[0].header
                    if header['FILTNME2'] != band:
                        print(
                            'ERROR: {} taken in filter other than {}. Double check your date, try specifying start/stop file numbers, etc.'
                            .format(flat_files[j], band))
                        return
                    if header['OBJECT'] == 'dome_lamp_on':
                        lights_on_files.append(flat_files[j])
                    elif header['OBJECT'] == 'dome_lamp_off':
                        lights_off_files.append(flat_files[j])
                    else:
                        print(
                            "ERROR: header['OBJECT'] for {} is not 'dome_lamp_on' or 'dome_lamp_off. Double check your date, try specifying start/stop file numbers, etc."
                            .format(flat_files[j]))
    else:
        dome_flat_raw_path = pines_path / ('Calibrations/Flats/Domeflats/' +
                                           band + '/Raw')
        flat_files = natsort.natsorted(
            list(Path(dome_flat_raw_path).rglob(date + '*.fits')))
        #Find the lights-on and lights-off flat files.
        lights_on_files = []
        lights_off_files = []
        for j in range(len(flat_files)):
            header = fits.open(dome_flat_raw_path / flat_files[j])[0].header
            if header['FILTNME2'] != band:
                print(
                    'ERROR: {} taken in filter other than {}. Double check your date, try specifying start/stop file numbers, etc.'
                    .format(flat_files[j], band))
                return
            if header['OBJECT'] == 'dome_lamp_on':
                lights_on_files.append(flat_files[j])
            elif header['OBJECT'] == 'dome_lamp_off':
                lights_off_files.append(flat_files[j])
            else:
                print(
                    "ERROR: header['OBJECT'] for {} is not 'dome_lamp_on' or 'dome_lamp_off. Double check your date, try specifying start/stop file numbers, etc."
                    .format(flat_files[j]))

    if len(lights_on_files) == 0 or len(lights_off_files) == 0:
        raise RuntimeError('No raw lights on/off flat files found with date ' +
                           date + ' in ' + band + ' band!')

    print('Found {} lights-on flat files.'.format(len(lights_on_files)))
    print('Found {} lights-off flat files.'.format(len(lights_off_files)))
    print('')
    time.sleep(1)

    #Make cube of the lights-on images.
    num_images = len(lights_on_files)
    print('Reading in ', num_images, ' lights-on flat images.')
    flat_lights_on_cube_raw = np.zeros(
        [len(lights_on_files), 1024,
         1024])  #Declare datatype to match raw mimir data.
    print('')
    print('Flat frame information')
    print('-------------------------------------------------')
    print('ID   Mean               Stddev         Max    Min')
    print('-------------------------------------------------')
    lights_on_std_devs = np.zeros(num_images)
    for j in range(len(lights_on_files)):
        image_data = fits.open(dome_flat_raw_path /
                               lights_on_files[j])[0].data[0:1024, :]
        header = fits.open(dome_flat_raw_path / lights_on_files[j])[0].header
        if header['FILTNME2'] != band:
            print(
                'ERROR: {} taken in filter other than {}. Double check your date, try specifying start/stop file numbers, etc.'
                .format(lights_on_files[j], band))
            return
        flat_lights_on_cube_raw[
            j, :, :] = image_data  #This line trims off the top two rows of the image, which are overscan.
        lights_on_std_devs[j] = np.std(
            image_data
        )  #Save standard deviation of flat images to identify flats with "ski jump" horizontal bars issue.
        print(
            str(j + 1) + '    ' + str(np.mean(image_data)) + '    ' +
            str(np.std(image_data)) + '    ' + str(np.std(image_data)) +
            '    ' + str(np.amin(image_data)))

    #Identify bad lights-on flat images (usually have bright horizontal bands at the top/bottom of images.)
    vals, lo, hi = sigmaclip(lights_on_std_devs)
    bad_locs = np.where((lights_on_std_devs < lo)
                        | (lights_on_std_devs > hi))[0]
    good_locs = np.where((lights_on_std_devs > lo)
                         & (lights_on_std_devs < hi))[0]
    if len(bad_locs > 0):
        print(
            'Found {} bad lights-on flats: {}.\nExcluding from combination step.\n'
            .format(len(bad_locs),
                    np.array(lights_on_files)[bad_locs]))
        time.sleep(2)
    flat_lights_on_cube_raw = flat_lights_on_cube_raw[good_locs]

    #For each pixel, calculate the mean, median, and standard deviation "through the stack" of lights on flat images.
    lights_on_cube_shape = np.shape(flat_lights_on_cube_raw)
    master_flat_lights_on = np.zeros(
        (lights_on_cube_shape[1], lights_on_cube_shape[2]), dtype='float32')
    master_flat_lights_on_stddev = np.zeros(
        (lights_on_cube_shape[1], lights_on_cube_shape[2]), dtype='float32')
    print('')
    print('Combining the lights-on flats.')
    print('......')
    pbar = ProgressBar()
    for x in pbar(range(lights_on_cube_shape[1])):
        for y in range(lights_on_cube_shape[2]):
            through_stack = flat_lights_on_cube_raw[:, y, x]
            through_stack_median = np.nanmedian(through_stack)
            through_stack_stddev = np.nanstd(through_stack)

            #Flag values that are > clip_lvl-sigma discrepant from the median.
            good_inds = np.where((abs(through_stack - through_stack_median) /
                                  through_stack_stddev <= clip_lvl))[0]

            #Calculate the sigma-clipped mean and sigma-clipped stddev using good_inds.
            s_c_mean = np.nanmean(through_stack[good_inds])
            s_c_stddev = np.nanstd(through_stack[good_inds])

            #Store the sigma-clipped mean as the master dark value for this pixel.
            master_flat_lights_on[y, x] = s_c_mean
            master_flat_lights_on_stddev[y, x] = s_c_stddev

    #Make cube of the lights-off images
    num_images = len(lights_off_files)
    print('Reading in ', num_images, ' lights-off flat images.')
    flat_lights_off_cube_raw = np.zeros([len(lights_off_files), 1024, 1024])
    lights_off_std_devs = np.zeros(num_images)
    print('')
    print('Flat frame information')
    print('-------------------------------------------------')
    print('ID   Mean               Stddev         Max    Min')
    print('-------------------------------------------------')
    for j in range(len(lights_off_files)):
        image_data = fits.open(dome_flat_raw_path /
                               lights_off_files[j])[0].data[0:1024, :]
        header = fits.open(dome_flat_raw_path / lights_off_files[j])[0].header
        if header['FILTNME2'] != band:
            print(
                'ERROR: {} taken in filter other than {}. Double check your date, try specifying start/stop file numbers, etc.'
                .format(lights_off_files[j], band))
            return
        flat_lights_off_cube_raw[
            j, :, :] = image_data  #This line trims off the top two rows of the image, which are overscan.
        lights_off_std_devs[j] = np.std(
            image_data
        )  #Save standard deviation of flat images to identify flats with "ski jump" horizontal bars issue.
        print(
            str(j + 1) + '    ' + str(np.mean(image_data)) + '    ' +
            str(np.std(image_data)) + '    ' + str(np.std(image_data)) +
            '    ' + str(np.amin(image_data)))

    #Identify bad lights-on flat images (usually have bright horizontal bands at the top/bottom of images.)
    vals, lo, hi = sigmaclip(lights_off_std_devs)
    bad_locs = np.where((lights_off_std_devs < lo)
                        | (lights_off_std_devs > hi))[0]
    good_locs = np.where((lights_off_std_devs > lo)
                         & (lights_off_std_devs < hi))[0]
    if len(bad_locs > 0):
        print(
            'Found {} bad lights-off flats: {}.\nExcluding from combination step.\n'
            .format(len(bad_locs),
                    np.array(lights_off_files)[bad_locs]))
        time.sleep(2)
    flat_lights_off_cube_raw = flat_lights_off_cube_raw[good_locs]

    #For each pixel, calculate the mean, median, and standard deviation "through the stack" of lights off flat images.
    lights_off_cube_shape = np.shape(flat_lights_off_cube_raw)
    master_flat_lights_off = np.zeros(
        (lights_off_cube_shape[1], lights_off_cube_shape[2]), dtype='float32')
    master_flat_lights_off_stddev = np.zeros(
        (lights_off_cube_shape[1], lights_off_cube_shape[2]), dtype='float32')
    print('')
    print('Combining the lights-off flats.')
    print('......')
    pbar = ProgressBar()
    for x in pbar(range(lights_off_cube_shape[1])):
        for y in range(lights_off_cube_shape[2]):
            through_stack = flat_lights_off_cube_raw[:, y, x]
            through_stack_median = np.nanmedian(through_stack)
            through_stack_stddev = np.nanstd(through_stack)

            #Flag values that are > clip_lvl-sigma discrepant from the median.
            good_inds = np.where((abs(through_stack - through_stack_median) /
                                  through_stack_stddev <= clip_lvl))[0]

            #Calculate the sigma-clipped mean and sigma-clipped stddev using good_inds.
            s_c_mean = np.nanmean(through_stack[good_inds])
            s_c_stddev = np.nanstd(through_stack[good_inds])

            #Store the sigma-clipped mean as the master dark value for this pixel.
            master_flat_lights_off[y, x] = s_c_mean
            master_flat_lights_off_stddev[y, x] = s_c_stddev

    #Create the master flat
    master_flat = master_flat_lights_on - master_flat_lights_off
    master_flat_norm = np.nanmedian(master_flat)
    master_flat = master_flat / master_flat_norm

    #Create the master error flat
    master_flat_error = np.sqrt(master_flat_lights_on_stddev**2 +
                                master_flat_lights_off_stddev**2)
    master_flat_error = master_flat_error / master_flat_norm

    #Ourput flat files.
    output_filename = pines_path / ('Calibrations/Flats/Domeflats/' + band +
                                    '/Master Flats/master_flat_' + band + '_' +
                                    date + '.fits')
    #Add some header keywords detailing the master_dark creation process.
    hdu = fits.PrimaryHDU(master_flat)
    hdu.header['HIERARCH DATE CREATED'] = datetime.utcnow().strftime(
        '%Y-%m-%d') + 'T' + datetime.utcnow().strftime('%H:%M:%S')

    #Now save to a file on your local machine.
    print('')
    print('Writing the file to {}'.format(output_filename))
    #Check to see if other files of this name exist
    # if os.path.exists(output_filename):
    #     print('')
    #     print('WARNING: This will overwrite {}!'.format(output_filename))
    #     flat_check = input('Do you want to continue? y/n: ')
    #     if flat_check == 'y':
    #         hdu.writeto(output_filename,overwrite=True)
    #         print('Wrote to {}!'.format(output_filename))
    #     else:
    #         print('Not overwriting!')
    # else:
    hdu.writeto(output_filename, overwrite=True)
    print('Wrote to {}!'.format(output_filename))
    print('')

    if upload:
        print('Beginning upload process to pines.bu.edu...')
        print('Note, only PINES admins will be able to upload.')
        print('')
        sftp.chdir('/')
        sftp.chdir('data/calibrations/Flats/Domeflats/' + band)
        upload_name = 'master_flat_' + band + '_' + date + '.fits'
        # if upload_name in sftp.listdir():
        #     print('WARNING: This will overwrite {} in pines.bu.edu:data/calibrations/Flats/Domeflats/{}/'.format(upload_name,band))
        #     upload_check = input('Do you want to continue? y/n: ')
        #     if upload_check == 'y':
        #         sftp.put(output_filename,upload_name)
        #         print('Uploaded to pines.bu.edu:data/calibrations/Flats/Domeflats/{}/ !'.format(band))
        #     else:
        #         print('Skipping upload!')
        # else:
        sftp.put(output_filename, upload_name)
        print(
            'Uploaded {} to pines.bu.edu:data/calibrations/Flats/Domeflats/{}/!'
            .format(upload_name, band))

    output_filename = pines_path / ('Calibrations/Flats/Domeflats/' + band +
                                    '/Master Flats Stddev/master_flat_stddev_'
                                    + band + '_' + date + '.fits')
    #Add some header keywords detailing the master_dark creation process.
    hdu = fits.PrimaryHDU(master_flat_error)
    hdu.header['HIERARCH DATE CREATED'] = datetime.utcnow().strftime(
        '%Y-%m-%d') + 'T' + datetime.utcnow().strftime('%H:%M:%S')

    #Now save to a file on your local machine.
    #Check to see if other files of this name exist
    # if os.path.exists(output_filename):
    #     print('')
    #     print('WARNING: This will overwrite {}!'.format(output_filename))
    #     flat_check = input('Do you want to continue? y/n: ')
    #     if flat_check == 'y':
    #         hdu.writeto(output_filename,overwrite=True)
    #         print('Wrote to {}!'.format(output_filename))
    #     else:
    #         print('Not overwriting!')
    # else:
    hdu.writeto(output_filename, overwrite=True)
    print('Wrote to {}!'.format(output_filename))

    if upload:
        print('Uploading to pines.bu.edu...')
        sftp.chdir('/')
        sftp.chdir('data/calibrations/Flats Stddev/Domeflats/' + band)
        upload_name = 'master_flat_stddev_' + band + '_' + date + '.fits'
        # if upload_name in sftp.listdir():
        #     print('WARNING: This will overwrite {} in pines.bu.edu:data/calibrations/Flats/Domeflats/{}/'.format(upload_name,band))
        #     upload_check = input('Do you want to continue? y/n: ')
        #     if upload_check == 'y':
        #         sftp.put(output_filename,upload_name)
        #         print('Uploaded to pines.bu.edu:data/calibrations/Flats Stddev/Domeflats/{}/ !'.format(band))
        #     else:
        #         print('Skipping upload!')
        # else:
        sftp.put(output_filename, upload_name)
        print(
            'Uploaded {} to pines.bu.edu:data/calibrations/Flats Stddev/Domeflats/{}/!'
            .format(upload_name, band))
    print('')
    if delete_raw:
        files_to_delete = glob.glob(os.path.join(dome_flat_raw_path /
                                                 '*.fits'))
        for j in range(len(files_to_delete)):
            os.remove(files_to_delete[j])

    print('dome_flat_field runtime: ', np.round((time.time() - t1) / 60, 1),
          ' minutes.')
    print('Done!')