Example #1
0
def get_master_synthetic_image(sftp, target_name):
    pines_path = pines_dir_check()
    sftp.chdir('/data/master_images/')
    synthetic_filename = target_name.replace(' ',
                                             '') + '_master_synthetic.fits'
    download_path = pines_path / ('Calibrations/Master Synthetic Images/' +
                                  synthetic_filename)
    sftp.get(synthetic_filename, download_path)
    print('Downloaded {} to {}!'.format(synthetic_filename, download_path))
Example #2
0
def bad_shift_identifier(target, date, bad_shift_threshold=200):
    pines_path = pines_dir_check()
    log_path = pines_path / ('Logs/' + date + '_log.txt')
    log = pines_log_reader(log_path)
    target_inds = np.where(log['Target'] == target)[0]
    x_shifts = np.array(log['X shift'][target_inds])
    y_shifts = np.array(log['Y shift'][target_inds])

    bad_shift_inds = np.where((x_shifts > bad_shift_threshold)
                              | (y_shifts > bad_shift_threshold))[0]
    shift_flags = np.zeros(len(target_inds), dtype=int)
    shift_flags[bad_shift_inds] = 1

    return
Example #3
0
def straggler_upload(sftp, targets, delete_raw=False):
    sftp.chdir('/data/reduced/mimir/')
    runs = sftp.listdir()
    pines_path = pines_dir_check()
    for i in range(len(targets)):
        target = targets[i]
        short_name = short_name_creator(target)
        raw_path = pines_path / ('Objects/' + short_name + '/raw/')
        red_path = pines_path / ('Objects/' + short_name + '/reduced/')
        raw_files = np.array(natsorted(glob(str(raw_path) + '/*.fits')))
        for j in range(len(raw_files)):
            sftp.chdir('/data/reduced/mimir/')
            red_file = str(red_path) + '/' + raw_files[j].split('/')[-1].split(
                '.fits')[0] + '_red.fits'
            night = red_file.split('/')[-1].split('.')[0]
            run_guess = night[0:6]
            ind = np.where(np.array(runs) == run_guess)[0][0]
            if ind + 1 == len(runs):
                inds = np.arange(ind - 1, ind + 1)
                runs = np.array(runs)[inds]
                runs = [runs[1], runs[0]]
            else:
                inds = np.arange(ind - 1, ind + 2)
                runs = np.array(runs)[inds]
                runs = [runs[1], runs[0], runs[2]]

            found = False
            for k in range(len(runs)):
                sftp.chdir(runs[k])
                if night in sftp.listdir():
                    print('Uploading {} to {}.'.format(
                        red_file.split('/')[-1], sftp.pwd + '/' + night))
                    sftp.chdir(night)
                    sftp.put(red_file, red_file.split('/')[-1])
                    found = True
                else:
                    sftp.chdir('..')
                if found:
                    break
                pdb.set_trace()
Example #4
0
def output_wrangler(target):
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    object_path = pines_path / ('Objects/' + short_name)
    sources_path = object_path / ('sources')
    aper_phot_path = object_path / ('aper_phot')
    analysis_path = object_path / ('analysis')
    output_path = object_path / ('output')

    #Copy souce detect image.
    src = sources_path / ('target_and_refs.png')
    dest = output_path / ('target_and_refs.png')
    shutil.copyfile(src, dest)

    #Copy image centroids.
    src = sources_path / ('target_and_references_centroids.csv')
    dest = output_path / ('target_and_references_centroids.csv')
    shutil.copyfile(src, dest)

    #Copy best aperture raw photometry.
    best_ap_path = analysis_path / ('optimal_aperture.txt')
    with open(best_ap_path, 'r') as f:
        best_ap = f.readlines()[0]
    src = aper_phot_path / (short_name + '_aper_phot_' + best_ap +
                            '_pix_radius.csv')
    dest = output_path / (short_name + '_aper_phot_' + best_ap +
                          '_pix_radius.csv')
    shutil.copyfile(src, dest)

    #Copy best aperture weighted lightcurve photometry.
    src = analysis_path / ('aper_phot_analysis/' + best_ap + '/' + short_name +
                           '_weighted_lc_aper_phot_' + best_ap + '_pix.csv')
    dest = output_path / (short_name + '_weighted_lc_aper_phot_' + best_ap +
                          '_pix.csv')
    shutil.copyfile(src, dest)

    pdb.set_trace()
    return
Example #5
0
def align_and_stack_images(target):
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    reduced_path = pines_path / ('Objects/' + short_name + '/reduced')
    log_path = pines_path / 'Logs/'

    reduced_files = np.array(
        natsorted([x for x in reduced_path.glob('*red.fits')]))
    dates = np.array(list(set([i.name.split('.')[0] for i in reduced_files])))
    log_names = np.array(natsorted([i + '_log.txt' for i in dates]))

    aligned_files = []
    for i in range(len(log_names)):
        log = log_path / log_names[i]
        log_df = pat.utils.pines_log_reader(log)
        locs = np.where((abs(log_df['X shift']) < 0.5)
                        & (abs(log_df['Y shift']) < 0.5))[0]
        good_files = np.array(
            log_df['Filename'][np.where((abs(log_df['X shift']) < 0.5)
                                        & (abs(log_df['Y shift']) < 0.5))[0]])
        good_files = [i.split('.fits')[0] + '_red.fits' for i in good_files]
        aligned_files.extend(good_files)
    pdb.set_trace()
    return
Example #6
0
def absolute_image_position_plot(target, centroided_sources):
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    #Get plot style parameters.
    title_size, axis_title_size, axis_ticks_font_size, legend_font_size = plot_style(
    )

    #Get list of souce names in the centroid output.
    source_names = get_source_names(centroided_sources)
    centroided_sources.columns = centroided_sources.keys().str.strip()

    #Get times from the centroid output and split them by night.
    times_full = np.array(centroided_sources['Time (JD UTC)'])
    night_inds = night_splitter(times_full)
    num_nights = len(night_inds)
    times_nights = [times_full[night_inds[i]] for i in range(num_nights)]
    standard_x = standard_x_range(times_nights)

    source = source_names[0]
    fig, ax = plt.subplots(nrows=2,
                           ncols=num_nights,
                           figsize=(17, 9),
                           sharex='col',
                           sharey='row')
    plt.subplots_adjust(left=0.07,
                        hspace=0.05,
                        wspace=0.05,
                        top=0.92,
                        bottom=0.17)
    for j in range(num_nights):
        if j == 0:
            ax[0, j].set_ylabel('Image X', fontsize=axis_title_size)
            ax[1, j].set_ylabel('Image Y', fontsize=axis_title_size)

        inds = night_inds[j]
        times = times_nights[j]
        absolute_x = np.array(centroided_sources[source + ' Image X'][inds],
                              dtype='float')
        absolute_y = np.array(centroided_sources[source + ' Image Y'][inds],
                              dtype='float')
        ax[0, j].plot(times,
                      absolute_x,
                      marker='.',
                      linestyle='',
                      alpha=0.3,
                      color='tab:blue',
                      label='Raw x')
        ax[1, j].plot(times,
                      absolute_y,
                      marker='.',
                      linestyle='',
                      alpha=0.3,
                      color='tab:orange',
                      label='Raw y')

        #bin
        block_inds = block_splitter(times)
        block_times = np.zeros(len(block_inds))
        block_x = np.zeros(len(block_inds))
        block_x_err = np.zeros(len(block_inds))
        block_y = np.zeros(len(block_inds))
        block_y_err = np.zeros(len(block_inds))
        for k in range(len(block_inds)):
            try:
                block_times[k] = np.nanmean(times[block_inds[k]])
                block_x[k] = np.nanmean(absolute_x[block_inds[k]])
                block_x_err[k] = np.nanstd(
                    absolute_x[block_inds[k]]) / np.sqrt(
                        len(absolute_x[block_inds[k]]))
                block_y[k] = np.nanmean(absolute_y[block_inds[k]])
                block_y_err[k] = np.nanstd(
                    absolute_y[block_inds[k]]) / np.sqrt(
                        len(absolute_y[block_inds[k]]))
            except:
                pdb.set_trace()

        ax[0, j].errorbar(block_times,
                          block_x,
                          block_x_err,
                          marker='o',
                          linestyle='',
                          color='tab:blue',
                          ms=8,
                          mfc='none',
                          mew=2,
                          label='Bin x')
        ax[1, j].errorbar(block_times,
                          block_y,
                          block_y_err,
                          marker='o',
                          linestyle='',
                          color='tab:orange',
                          ms=8,
                          mfc='none',
                          mew=2,
                          label='Bin y')

        ax[0, j].tick_params(labelsize=axis_ticks_font_size)
        ax[1, j].tick_params(labelsize=axis_ticks_font_size)

        ax[0, j].grid(alpha=0.2)
        ax[1, j].grid(alpha=0.2)
        ax[1, j].set_xlabel('Time (JD UTC)', fontsize=axis_title_size)

        if j == num_nights - 1:
            ax[0, j].legend(bbox_to_anchor=(1.29, 1),
                            fontsize=legend_font_size)
            ax[1, j].legend(bbox_to_anchor=(1.29, 1),
                            fontsize=legend_font_size)

    plt.suptitle(source + ' Image Centroid Positions', fontsize=title_size)
    #plt.subplots_adjust(left=0.07, hspace=0.05, wspace=0.05, top=0.92, bottom=0.08, right=0.85)

    #ax.legend(bbox_to_anchor=(1.01, 1), fontsize=14)

    ax[0, j].set_xlim(
        np.mean(times) - standard_x / 2,
        np.mean(times) + standard_x / 2)
    ax[1, j].set_xlim(
        np.mean(times) - standard_x / 2,
        np.mean(times) + standard_x / 2)

    output_filename = pines_path / ('Objects/' + short_name +
                                    '/analysis/diagnostic_plots/' + source +
                                    '_image_positions.png')
    plt.savefig(output_filename, dpi=300)

    return
Example #7
0
def master_synthetic_image_creator(target,
                                   image_name,
                                   seeing=2.5,
                                   sigma_above_bg=5):
    '''Authors:
            Patrick Tamburo, Boston University, February 2021
        Purpose:
            Creates a master synthetic image for a PINES target by detecting sources in a reduced image of the field.
        Inputs:
            target (str): The target's full 2MASS name.
            image_name (str): The name of the reduced file (e.g., 20210204.420_red.fits).
            seeing (float): The FWHM seeing of the image in arcsec. By default, 2.5". 
            sigma_above_bg (float): The sigma above background used to rule in sources in daostarfinder. By default, 5.
        Outputs:
            Writes out a master synthetic image to PINES_analysis_toolkit/Calibrations/Master Synthetic Images/.
        TODO:
            Write the filename used to create the master image to the master image header. 
    '''
    def mimir_source_finder(image_path,
                            sigma_above_bg,
                            fwhm,
                            exclude_lower_left=False):
        """Find sources in Mimir images."""

        np.seterr(all='ignore')  #Ignore invalids (i.e. divide by zeros)

        #Find stars in the master image.
        avg, med, stddev = sigma_clipped_stats(
            image, sigma=3.0, maxiters=3)  #Previously maxiters = 5!
        daofind = DAOStarFinder(fwhm=fwhm,
                                threshold=sigma_above_bg * stddev,
                                sky=med,
                                ratio=0.8)
        new_sources = daofind(image)
        x_centroids = new_sources['xcentroid']
        y_centroids = new_sources['ycentroid']
        sharpness = new_sources['sharpness']
        fluxes = new_sources['flux']
        peaks = new_sources['peak']

        #Cut sources that are found within 20 pix of the edges.
        use_x = np.where((x_centroids > 20) & (x_centroids < 1004))[0]
        x_centroids = x_centroids[use_x]
        y_centroids = y_centroids[use_x]
        sharpness = sharpness[use_x]
        fluxes = fluxes[use_x]
        peaks = peaks[use_x]
        use_y = np.where((y_centroids > 20) & (y_centroids < 1004))[0]
        x_centroids = x_centroids[use_y]
        y_centroids = y_centroids[use_y]
        sharpness = sharpness[use_y]
        fluxes = fluxes[use_y]
        peaks = peaks[use_y]

        #Also cut using sharpness, this seems to eliminate a lot of false detections.
        use_sharp = np.where(sharpness > 0.5)[0]
        x_centroids = x_centroids[use_sharp]
        y_centroids = y_centroids[use_sharp]
        sharpness = sharpness[use_sharp]
        fluxes = fluxes[use_sharp]
        peaks = peaks[use_sharp]

        if exclude_lower_left:
            #Cut sources in the lower left, if bars are present.
            use_ll = np.where((x_centroids > 512) | (y_centroids > 512))
            x_centroids = x_centroids[use_ll]
            y_centroids = y_centroids[use_ll]
            sharpness = sharpness[use_ll]
            fluxes = fluxes[use_ll]
            peaks = peaks[use_ll]

        #Cut targets whose y centroids are near y = 512. These are usually bad.
        use_512 = np.where(
            np.logical_or((y_centroids < 510), (y_centroids > 514)))[0]
        x_centroids = x_centroids[use_512]
        y_centroids = y_centroids[use_512]
        sharpness = sharpness[use_512]
        fluxes = fluxes[use_512]
        peaks = peaks[use_512]

        #Cut sources with negative/saturated peaks
        use_peaks = np.where((peaks > 30) & (peaks < 3000))[0]
        x_centroids = x_centroids[use_peaks]
        y_centroids = y_centroids[use_peaks]
        sharpness = sharpness[use_peaks]
        fluxes = fluxes[use_peaks]
        peaks = peaks[use_peaks]

        #Do quick photometry on the remaining sources.
        positions = [(x_centroids[i], y_centroids[i])
                     for i in range(len(x_centroids))]
        apertures = CircularAperture(positions, r=4)
        phot_table = aperture_photometry(image - med, apertures)

        #Cut based on brightness.
        phot_table.sort('aperture_sum')
        cutoff = 1 * std * np.pi * 4**2
        bad_source_locs = np.where(phot_table['aperture_sum'] < cutoff)
        phot_table.remove_rows(bad_source_locs)

        x_centroids = phot_table['xcenter'].value
        y_centroids = phot_table['ycenter'].value

        return (x_centroids, y_centroids)

    def synthetic_image_maker(x_centroids, y_centroids, fwhm):
        #Construct synthetic images from centroid/flux data.
        synthetic_image = np.zeros((1024, 1024))
        sigma = fwhm / 2.355
        for i in range(len(x_centroids)):
            #Cut out little boxes around each source and add in Gaussian representations. This saves time.
            int_centroid_x = int(np.round(x_centroids[i]))
            int_centroid_y = int(np.round(y_centroids[i]))
            y_cut, x_cut = np.mgrid[int_centroid_y - 10:int_centroid_y + 10,
                                    int_centroid_x - 10:int_centroid_x + 10]
            dist = np.sqrt((x_cut - x_centroids[i])**2 +
                           (y_cut - y_centroids[i])**2)
            synthetic_image[y_cut,
                            x_cut] += np.exp(-((dist)**2 / (2 * sigma**2) +
                                               ((dist)**2 / (2 * sigma**2))))
        return (synthetic_image)

    target = target.replace(' ', '')
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    master_synthetic_path = pines_path / (
        'Calibrations/Master Synthetic Images/' + target +
        '_master_synthetic.fits')
    image_path = pines_path / ('Objects/' + short_name + '/reduced/' +
                               image_name)
    plt.ion()

    target = target.replace(' ', '')
    seeing = float(seeing)
    daostarfinder_fwhm = seeing * 2.355 / 0.579

    #Open the image and calibration files.
    header = fits.open(image_path)[0].header
    image = fits.open(image_path)[0].data

    #Interpolate over bad pixels
    kernel = Gaussian2DKernel(x_stddev=1)
    image = interpolate_replace_nans(image, kernel)

    #Do a simple 2d background model.
    box_size = 32
    sigma_clip = SigmaClip(sigma=3.)
    bkg_estimator = MedianBackground()
    bkg = Background2D(image, (box_size, box_size),
                       filter_size=(3, 3),
                       sigma_clip=sigma_clip,
                       bkg_estimator=bkg_estimator)
    image = image - bkg.background

    avg, med, std = sigma_clipped_stats(image)

    #Find sources in the image.
    (x_centroids,
     y_centroids) = mimir_source_finder(image,
                                        sigma_above_bg=sigma_above_bg,
                                        fwhm=daostarfinder_fwhm)

    #Plot the field with detected sources.
    qp(image)
    plt.plot(x_centroids, y_centroids, 'rx')
    for i in range(len(x_centroids)):
        plt.text(x_centroids[i] + 8,
                 y_centroids[i] + 8,
                 str(i),
                 color='r',
                 fontsize=14)
    plt.title(
        'Inspect to make sure stars were found!\nO for magnification tool, R to reset view'
    )
    plt.tight_layout()
    plt.show()

    print('')
    print('')
    print('')

    #Prompt the user to remove any false detections.
    ids = input(
        'Enter ids of sources to be removed separated by commas (i.e., 4,18,22). If none to remove, hit enter. To break, ctrl + D. '
    )
    if ids != '':
        ids_to_eliminate = [int(i) for i in ids.split(',')]
        ids = [
            int(i) for i in np.linspace(0,
                                        len(x_centroids) - 1, len(x_centroids))
        ]
        ids_to_keep = []
        for i in range(len(ids)):
            if ids[i] not in ids_to_eliminate:
                ids_to_keep.append(ids[i])
    else:
        ids_to_keep = [
            int(i) for i in np.linspace(0,
                                        len(x_centroids) - 1, len(x_centroids))
        ]
    plt.clf()
    plt.imshow(image, origin='lower', vmin=med, vmax=med + 5 * std)
    plt.plot(x_centroids[ids_to_keep], y_centroids[ids_to_keep], 'rx')
    for i in range(len(x_centroids[ids_to_keep])):
        plt.text(x_centroids[ids_to_keep][i] + 8,
                 y_centroids[ids_to_keep][i] + 8,
                 str(i),
                 color='r')

    #Create the synthetic image using the accepted sources.
    synthetic_image = synthetic_image_maker(x_centroids[ids_to_keep],
                                            y_centroids[ids_to_keep], 8)
    plt.figure(figsize=(9, 7))
    plt.imshow(synthetic_image, origin='lower')
    plt.title('Synthetic image')
    plt.show()

    pdb.set_trace()
    print('')
    print('')
    print('')
    #Now write to a master synthetic image.fits file.
    hdu = fits.PrimaryHDU(synthetic_image)
    hdu.writeto(master_synthetic_path, overwrite=True)
    print(
        'Writing master synthetic image to {}/'.format(master_synthetic_path))
Example #8
0
def corr_all_sources_plot(target):
    print('Generating corrected flux plots for all sources...\n')
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    analysis_path = pines_path / ('Objects/' + short_name + '/analysis/')
    photometry_path = pines_path / ('Objects/' + short_name + '/aper_phot/')

    #Grab the data for the best aperture.
    if os.path.exists(analysis_path / ('optimal_aperture.txt')):
        with open(analysis_path / ('optimal_aperture.txt'), 'r') as f:
            best_ap = f.readlines()[0].split(':  ')[1].split('\n')[0]
            phot_type = best_ap.split('_')[1]
            if phot_type == 'fixed':
                s = 'r'
            elif phot_type == 'variable':
                s = 'f'
    else:
        raise RuntimeError(
            'No optimal_aperture.txt file for {}.\nUsing first photometry file in {}.'
            .format(target, phot_path))

    filename = short_name.replace(
        ' ', '') + '_' + phot_type + '_aper_phot_' + s + '=' + best_ap.split(
            '_')[0] + '_nightly_weighted_lc.csv'
    best_phot_path = analysis_path / ('aper_phot_analysis/' + best_ap + '/')
    output_path = best_phot_path / ('corr_ref_plots/')
    if not os.path.exists(output_path):
        os.mkdir(output_path)

    data = pines_log_reader(best_phot_path / filename)
    ref_names = get_source_names(data)[1:]
    num_refs = len(ref_names)

    times = np.array(data['Time BJD TDB'])
    night_inds = night_splitter(times)
    num_nights = len(night_inds)

    cmap = plt.get_cmap('viridis')
    for i in range(num_refs + 1):
        fig, ax = plt.subplots(nrows=1,
                               ncols=num_nights,
                               figsize=(17, 5),
                               sharey=True)
        plt.subplots_adjust(left=0.07, wspace=0.05, top=0.92, bottom=0.17)

        if i == 0:
            color = cmap(0)
            flux = np.array(data[short_name + ' Corrected Flux'],
                            dtype='float64')
            flux_err = np.array(data[short_name + ' Corrected Flux Error'],
                                dtype='float64')
            title = short_name
            output_name = short_name + '_corrected_flux.png'

        else:
            color = cmap(95)
            ref_name = ref_names[i - 1]
            flux = np.array(data[ref_name + ' Corrected Flux'],
                            dtype='float64')
            flux_err = np.array(data[ref_name + ' Corrected Flux Error'],
                                dtype='float64')
            if i < 10:
                num = '0' + str(i)
            else:
                num = str(i)
            output_name = 'reference_' + num + '_corrected_flux.png'

        for j in range(num_nights):
            if i != 0:
                weight = np.array(data[ref_name +
                                       ' ALC Weight'])[night_inds[j]][0]
                title = ref_name.replace(
                    'erence', '.') + ', weight = {:1.3f}'.format(weight)

            if j == 0:
                ax[j].set_ylabel('Normalized Flux', fontsize=20)

            inds = night_inds[j]

            block_inds = block_splitter(times[inds])
            binned_time = []
            binned_flux = []
            binned_err = []
            for k in range(len(block_inds)):
                binned_time.append(np.nanmean(times[inds][block_inds[k]]))
                binned_flux.append(np.nanmean(flux[inds][block_inds[k]]))
                binned_err.append(
                    np.nanstd(flux[inds][block_inds[k]]) /
                    np.sqrt(len(block_inds[k])))

            ax[j].plot(times[inds],
                       flux[inds],
                       color=color,
                       linestyle='',
                       marker='.',
                       alpha=0.25)
            ax[j].errorbar(binned_time,
                           binned_flux,
                           binned_err,
                           color=color,
                           linestyle='',
                           marker='o',
                           ms=10,
                           mfc='none',
                           mew=2)
            ax[j].set_xlabel('Time (BJD$_{TDB}$)', fontsize=20)
            ax[j].tick_params(labelsize=16)
            ax[j].axhline(1, color='k', alpha=0.7, lw=1, zorder=0)
            ax[j].grid(alpha=0.2)
            ax[j].set_title(title, fontsize=20, color=color)
            ax[j].set_ylim(0.9, 1.1)

        plt.savefig(output_path / output_name, dpi=300)
        plt.close()
Example #9
0
            seeing = np.array(seeing[np.where(~np.isnan(seeing))], dtype=float)
            seeing = seeing[np.where((seeing > 1.2) & (seeing < 7.0))[0]]
            mean_seeing = np.nanmean(seeing)
            std_seeing = np.nanstd(seeing)
            print('Average seeing for {}: {:1.1f} +/- {:1.1f}"'.format(
                log_path.split('/')[-1].split('_')[0], mean_seeing,
                std_seeing))
            return mean_seeing
    except:
        print('{}: No seeing measurements, inspect manually.'.format(
            log_path.split('/')[-1].split('_')[0]))
        return np.nan


if __name__ == '__main__':
    pines_dir = pines_dir_check()
    log_path = pines_dir / 'Logs'
    logs = np.array(natsorted(glob(str(log_path / '*.txt'))))

    #Remove one bad log
    bad_loc = np.where(
        np.array([logs[i].split('/')[-1]
                  for i in range(len(logs))]) == '20201003_log.txt')[0][0]
    logs = np.delete(logs, bad_loc)
    #logs = logs[6:-1] #Ignore first set of logs, ignore master_log

    seeings = np.zeros(len(logs))
    for i in range(len(logs)):
        log_path = logs[i]
        seeings[i] = average_seeing(log_path)
Example #10
0
def get_reduced_science_files(sftp, target_name):
    t1 = time.time()

    #Get the user's pines_analysis_toolkit path
    pines_path = pines_dir_check()

    #Get the target's short name and set up a data directory, if necessary.
    short_name = short_name_creator(target_name)

    if not os.path.exists(pines_path / ('Objects/' + short_name)):
        object_directory_creator(pines_path, short_name)

    reduced_data_path = pines_path / ('Objects/' + short_name + '/reduced/')
    dark_path = pines_path / ('Calibrations/Darks')
    flats_path = pines_path / ('Calibrations/Flats/Domeflats')

    #Grab an up-to-date copy of the master log, which will be used to find images.
    get_master_log(sftp, pines_path)

    #Let's grab all of the available calibration data on pines.bu.edu.
    get_calibrations(sftp, pines_path)
    print('Calibrations up to date!')
    time.sleep(2)

    #Read in the master target list and find images of the requested target.
    df = pines_log_reader(pines_path / ('Logs/master_log.txt'))
    targ_inds = np.where(np.array(df['Target']) == target_name)[0]
    file_names = np.array(df['Filename'])[targ_inds]
    print('')

    print('Searching pines.bu.edu for reduced science files for {}.'.format(
        target_name))
    print('')

    #Get list of dates that data are from, in chronological order.
    dates = [
        int(i) for i in list(
            set([
                str.split(file_names[i], '.')[0]
                for i in range(len(file_names))
            ]))
    ]
    dates = np.array(dates)[np.argsort(dates)]
    comp_dates = np.array(
        [int(file_names[i].split('.')[0]) for i in range(len(file_names))])
    print('Found ', len(file_names), ' raw files for ', target_name, ' on ',
          len(dates), ' dates.')

    date_holder = [[] for x in range(len(dates))]
    for i in range(len(dates)):
        date = dates[i]
        print(date, ': ', len(np.where(comp_dates == date)[0]), ' files.')
        date_holder[i].extend(file_names[np.where(comp_dates == date)[0]])
        time.sleep(0.1)

    dates = [str(i) for i in dates]
    #Now download the identified data.
    sftp.chdir('/data/reduced/mimir')
    run_dirs = sftp.listdir()
    file_num = 1
    for i in range(len(run_dirs)):
        sftp.chdir(run_dirs[i])
        night_dirs = sftp.listdir()
        for j in range(len(night_dirs)):
            night_check = night_dirs[j]
            if night_check in dates:
                sftp.chdir(night_check)
                date_holder_ind = np.where(
                    np.array(dates) == night_check)[0][0]
                files = date_holder[date_holder_ind]
                files_in_path = sftp.listdir()
                for k in range(len(files)):
                    download_filename = files[k].split(
                        '.fits')[0] + '_red.fits'
                    if not (reduced_data_path / download_filename).exists():
                        if download_filename in files_in_path:
                            print('Downloading to {}, {} of {}'.format(
                                reduced_data_path / download_filename,
                                file_num, len(file_names)))
                            sftp.get(download_filename,
                                     reduced_data_path / download_filename)
                        else:
                            print(
                                'A reduced image does not yet exist for {}, ask an administrator to make one!'
                                .format(files[k]))
                    else:
                        print('{} already in {}, skipping.'.format(
                            download_filename, reduced_data_path))
                    file_num += 1
                sftp.chdir('..')
        sftp.chdir('..')

    print('')
    #Now grab the logs.
    sftp.chdir('/data/logs')
    for i in range(len(dates)):
        log_name = dates[i] + '_log.txt'
        print('Downloading {} to {}.'.format(log_name, pines_path /
                                             ('Logs/' + log_name)))
        sftp.get(log_name, pines_path / ('Logs/' + log_name))

    # sftp.chdir('/data/raw/mimir')
    # print('')
    # for i in range(len(run_dirs)):
    #         sftp.chdir(run_dirs[i])
    #         night_dirs = sftp.listdir()
    #         for j in range(len(night_dirs)):
    #             night_check = night_dirs[j]
    #             if night_check in dates:
    #                 sftp.chdir(night_check)
    #                 log_name = night_check+'_log.txt'
    #                 files_in_path = sftp.listdir()
    #                 if log_name in files_in_path:
    #                     if not (pines_path/('Logs/'+log_name)).exists():
    #                         sftp.get(log_name,pines_path/('Logs/'+log_name))
    #                         print('Downloading {} to {}.'.format(log_name, pines_path/('Logs/'+log_name)))
    #                     else:
    #                         print('{} already in {}, skipping.'.format(log_name,pines_path/'Logs/'))
    #                 sftp.chdir('..')
    #         sftp.chdir('..')

    print('')
    print('get_reduced_science_files runtime: ',
          np.round((time.time() - t1) / 60, 1), ' minutes.')
    print('Done!')
Example #11
0
def dome_flat_field(date,
                    band,
                    lights_on_start=0,
                    lights_on_stop=0,
                    lights_off_start=0,
                    lights_off_stop=0,
                    upload=False,
                    delete_raw=False,
                    sftp=''):
    clip_lvl = 3  #The value to use for sigma clipping.
    np.seterr(
        invalid='ignore'
    )  #Suppress some warnings we don't care about in median combining.
    plt.ion()  #Turn on interactive plotting.
    pines_path = pines_dir_check()

    t1 = time.time()
    #If an sftp connection to the PINES server was passed, download the flat data.
    if type(sftp) == pysftp.Connection:
        sftp.chdir('/')
        sftp.chdir('data/raw/mimir')
        run_list = sftp.listdir()
        data_path = ''  #Initialize to check that it gets filled.
        for i in range(len(run_list)):
            sftp.chdir(run_list[i])
            date_list = sftp.listdir()
            if date in date_list:
                data_path = sftp.getcwd()
                print('{} directory found in pines.bu.edu:{}/'.format(
                    date, data_path))
                print('')
                sftp.chdir(date)
                break
            sftp.chdir('..')

        if data_path == '':
            print(
                'ERROR: {} not found in any run on pines.bu.edu:data/raw/mimir/.'
                .format(date))
            return

        else:
            #If the file start/stop numbers are specfied, grab those files.
            if (lights_on_stop != 0) or (lights_off_stop != 0):
                files_in_dir = sftp.listdir()
                on_flat_filenums = np.arange(lights_on_start,
                                             lights_on_stop + 1,
                                             step=1)
                off_flat_filenums = np.arange(lights_off_start,
                                              lights_off_stop + 1,
                                              step=1)
                flat_files = []
                lights_on_files = []
                lights_off_files = []

                #Add the lights-on flats to the file list.
                for i in range(len(on_flat_filenums)):
                    file_num = on_flat_filenums[i]
                    #Generate the filename.
                    if file_num < 10:
                        file_name = date + '.00' + str(file_num) + '.fits'
                    elif (file_num >= 10) and (file_num < 100):
                        file_name = date + '.0' + str(file_num) + '.fits'
                    else:
                        file_name = date + '.' + str(file_num) + '.fits'
                    #Check if the file name is in the directory, and if so, append it to the list of flat files.
                    if file_name in files_in_dir:
                        flat_files.append(file_name)
                        lights_on_files.append(file_name)
                    else:
                        print('{} not found in directory, skipping.'.format(
                            file_name))

                #Do the same for the lights-off files.
                for i in range(len(off_flat_filenums)):
                    file_num = off_flat_filenums[i]
                    #Generate the filename.
                    if file_num < 10:
                        file_name = date + '.00' + str(file_num) + '.fits'
                    elif (file_num >= 10) and (file_num < 100):
                        file_name = date + '.0' + str(file_num) + '.fits'
                    else:
                        file_name = date + '.' + str(file_num) + '.fits'
                    #Check if the file name is in the directory, and if so, append it to the list of flat files.
                    if file_name in files_in_dir:
                        flat_files.append(file_name)
                        lights_off_files.append(file_name)
                    else:
                        print('{} not found in directory, skipping.'.format(
                            file_name))
            #Otherwise, find the files automatically using the night's log.
            else:
                log_path = pines_path / 'Logs'
                #Check if you already have the log for this date, if not, download it.
                #Download from the /data/logs/ directory on PINES.
                if not (log_path / (date + '_log.txt')).exists():
                    print('Downloading {}_log.txt to {}'.format(
                        date, log_path))
                    sftp.get('/data/logs/' + date + '_log.txt',
                             log_path / (date + '_log.txt'))

                log = pines_log_reader(log_path / (date + '_log.txt'))

                #Identify flat files.
                flat_inds = np.where((log['Target'] == 'Flat')
                                     & (log['Filename'] != 'test.fits')
                                     & (log['Filt.'] == band))[0]
                flat_files = natsort.natsorted(
                    list(set(log['Filename'][flat_inds]))
                )  #Set guarantees we only grab the unique files that have been identified as flats, in case the log bugged out.

            print('Found {} flat files.'.format(len(flat_files)))
            print('')

            #Download data to the appropriate Calibrations/Flats/Domeflats/band/Raw/ directory.
            dome_flat_raw_path = pines_path / (
                'Calibrations/Flats/Domeflats/' + band + '/Raw')
            for j in range(len(flat_files)):
                if not (dome_flat_raw_path / flat_files[j]).exists():
                    sftp.get(flat_files[j],
                             (dome_flat_raw_path / flat_files[j]))
                    print('Downloading {} to {}, {} of {}.'.format(
                        flat_files[j], dome_flat_raw_path, j + 1,
                        len(flat_files)))
                else:
                    print('{} already in {}, skipping download.'.format(
                        flat_files[j], dome_flat_raw_path))
            print('')

            if (lights_on_stop == 0) and (lights_off_stop == 0):
                #Find the lights-on and lights-off flat files.
                lights_on_files = []
                lights_off_files = []
                for j in range(len(flat_files)):
                    header = fits.open(dome_flat_raw_path /
                                       flat_files[j])[0].header
                    if header['FILTNME2'] != band:
                        print(
                            'ERROR: {} taken in filter other than {}. Double check your date, try specifying start/stop file numbers, etc.'
                            .format(flat_files[j], band))
                        return
                    if header['OBJECT'] == 'dome_lamp_on':
                        lights_on_files.append(flat_files[j])
                    elif header['OBJECT'] == 'dome_lamp_off':
                        lights_off_files.append(flat_files[j])
                    else:
                        print(
                            "ERROR: header['OBJECT'] for {} is not 'dome_lamp_on' or 'dome_lamp_off. Double check your date, try specifying start/stop file numbers, etc."
                            .format(flat_files[j]))
    else:
        dome_flat_raw_path = pines_path / ('Calibrations/Flats/Domeflats/' +
                                           band + '/Raw')
        flat_files = natsort.natsorted(
            list(Path(dome_flat_raw_path).rglob(date + '*.fits')))
        #Find the lights-on and lights-off flat files.
        lights_on_files = []
        lights_off_files = []
        for j in range(len(flat_files)):
            header = fits.open(dome_flat_raw_path / flat_files[j])[0].header
            if header['FILTNME2'] != band:
                print(
                    'ERROR: {} taken in filter other than {}. Double check your date, try specifying start/stop file numbers, etc.'
                    .format(flat_files[j], band))
                return
            if header['OBJECT'] == 'dome_lamp_on':
                lights_on_files.append(flat_files[j])
            elif header['OBJECT'] == 'dome_lamp_off':
                lights_off_files.append(flat_files[j])
            else:
                print(
                    "ERROR: header['OBJECT'] for {} is not 'dome_lamp_on' or 'dome_lamp_off. Double check your date, try specifying start/stop file numbers, etc."
                    .format(flat_files[j]))

    if len(lights_on_files) == 0 or len(lights_off_files) == 0:
        raise RuntimeError('No raw lights on/off flat files found with date ' +
                           date + ' in ' + band + ' band!')

    print('Found {} lights-on flat files.'.format(len(lights_on_files)))
    print('Found {} lights-off flat files.'.format(len(lights_off_files)))
    print('')
    time.sleep(1)

    #Make cube of the lights-on images.
    num_images = len(lights_on_files)
    print('Reading in ', num_images, ' lights-on flat images.')
    flat_lights_on_cube_raw = np.zeros(
        [len(lights_on_files), 1024,
         1024])  #Declare datatype to match raw mimir data.
    print('')
    print('Flat frame information')
    print('-------------------------------------------------')
    print('ID   Mean               Stddev         Max    Min')
    print('-------------------------------------------------')
    lights_on_std_devs = np.zeros(num_images)
    for j in range(len(lights_on_files)):
        image_data = fits.open(dome_flat_raw_path /
                               lights_on_files[j])[0].data[0:1024, :]
        header = fits.open(dome_flat_raw_path / lights_on_files[j])[0].header
        if header['FILTNME2'] != band:
            print(
                'ERROR: {} taken in filter other than {}. Double check your date, try specifying start/stop file numbers, etc.'
                .format(lights_on_files[j], band))
            return
        flat_lights_on_cube_raw[
            j, :, :] = image_data  #This line trims off the top two rows of the image, which are overscan.
        lights_on_std_devs[j] = np.std(
            image_data
        )  #Save standard deviation of flat images to identify flats with "ski jump" horizontal bars issue.
        print(
            str(j + 1) + '    ' + str(np.mean(image_data)) + '    ' +
            str(np.std(image_data)) + '    ' + str(np.std(image_data)) +
            '    ' + str(np.amin(image_data)))

    #Identify bad lights-on flat images (usually have bright horizontal bands at the top/bottom of images.)
    vals, lo, hi = sigmaclip(lights_on_std_devs)
    bad_locs = np.where((lights_on_std_devs < lo)
                        | (lights_on_std_devs > hi))[0]
    good_locs = np.where((lights_on_std_devs > lo)
                         & (lights_on_std_devs < hi))[0]
    if len(bad_locs > 0):
        print(
            'Found {} bad lights-on flats: {}.\nExcluding from combination step.\n'
            .format(len(bad_locs),
                    np.array(lights_on_files)[bad_locs]))
        time.sleep(2)
    flat_lights_on_cube_raw = flat_lights_on_cube_raw[good_locs]

    #For each pixel, calculate the mean, median, and standard deviation "through the stack" of lights on flat images.
    lights_on_cube_shape = np.shape(flat_lights_on_cube_raw)
    master_flat_lights_on = np.zeros(
        (lights_on_cube_shape[1], lights_on_cube_shape[2]), dtype='float32')
    master_flat_lights_on_stddev = np.zeros(
        (lights_on_cube_shape[1], lights_on_cube_shape[2]), dtype='float32')
    print('')
    print('Combining the lights-on flats.')
    print('......')
    pbar = ProgressBar()
    for x in pbar(range(lights_on_cube_shape[1])):
        for y in range(lights_on_cube_shape[2]):
            through_stack = flat_lights_on_cube_raw[:, y, x]
            through_stack_median = np.nanmedian(through_stack)
            through_stack_stddev = np.nanstd(through_stack)

            #Flag values that are > clip_lvl-sigma discrepant from the median.
            good_inds = np.where((abs(through_stack - through_stack_median) /
                                  through_stack_stddev <= clip_lvl))[0]

            #Calculate the sigma-clipped mean and sigma-clipped stddev using good_inds.
            s_c_mean = np.nanmean(through_stack[good_inds])
            s_c_stddev = np.nanstd(through_stack[good_inds])

            #Store the sigma-clipped mean as the master dark value for this pixel.
            master_flat_lights_on[y, x] = s_c_mean
            master_flat_lights_on_stddev[y, x] = s_c_stddev

    #Make cube of the lights-off images
    num_images = len(lights_off_files)
    print('Reading in ', num_images, ' lights-off flat images.')
    flat_lights_off_cube_raw = np.zeros([len(lights_off_files), 1024, 1024])
    lights_off_std_devs = np.zeros(num_images)
    print('')
    print('Flat frame information')
    print('-------------------------------------------------')
    print('ID   Mean               Stddev         Max    Min')
    print('-------------------------------------------------')
    for j in range(len(lights_off_files)):
        image_data = fits.open(dome_flat_raw_path /
                               lights_off_files[j])[0].data[0:1024, :]
        header = fits.open(dome_flat_raw_path / lights_off_files[j])[0].header
        if header['FILTNME2'] != band:
            print(
                'ERROR: {} taken in filter other than {}. Double check your date, try specifying start/stop file numbers, etc.'
                .format(lights_off_files[j], band))
            return
        flat_lights_off_cube_raw[
            j, :, :] = image_data  #This line trims off the top two rows of the image, which are overscan.
        lights_off_std_devs[j] = np.std(
            image_data
        )  #Save standard deviation of flat images to identify flats with "ski jump" horizontal bars issue.
        print(
            str(j + 1) + '    ' + str(np.mean(image_data)) + '    ' +
            str(np.std(image_data)) + '    ' + str(np.std(image_data)) +
            '    ' + str(np.amin(image_data)))

    #Identify bad lights-on flat images (usually have bright horizontal bands at the top/bottom of images.)
    vals, lo, hi = sigmaclip(lights_off_std_devs)
    bad_locs = np.where((lights_off_std_devs < lo)
                        | (lights_off_std_devs > hi))[0]
    good_locs = np.where((lights_off_std_devs > lo)
                         & (lights_off_std_devs < hi))[0]
    if len(bad_locs > 0):
        print(
            'Found {} bad lights-off flats: {}.\nExcluding from combination step.\n'
            .format(len(bad_locs),
                    np.array(lights_off_files)[bad_locs]))
        time.sleep(2)
    flat_lights_off_cube_raw = flat_lights_off_cube_raw[good_locs]

    #For each pixel, calculate the mean, median, and standard deviation "through the stack" of lights off flat images.
    lights_off_cube_shape = np.shape(flat_lights_off_cube_raw)
    master_flat_lights_off = np.zeros(
        (lights_off_cube_shape[1], lights_off_cube_shape[2]), dtype='float32')
    master_flat_lights_off_stddev = np.zeros(
        (lights_off_cube_shape[1], lights_off_cube_shape[2]), dtype='float32')
    print('')
    print('Combining the lights-off flats.')
    print('......')
    pbar = ProgressBar()
    for x in pbar(range(lights_off_cube_shape[1])):
        for y in range(lights_off_cube_shape[2]):
            through_stack = flat_lights_off_cube_raw[:, y, x]
            through_stack_median = np.nanmedian(through_stack)
            through_stack_stddev = np.nanstd(through_stack)

            #Flag values that are > clip_lvl-sigma discrepant from the median.
            good_inds = np.where((abs(through_stack - through_stack_median) /
                                  through_stack_stddev <= clip_lvl))[0]

            #Calculate the sigma-clipped mean and sigma-clipped stddev using good_inds.
            s_c_mean = np.nanmean(through_stack[good_inds])
            s_c_stddev = np.nanstd(through_stack[good_inds])

            #Store the sigma-clipped mean as the master dark value for this pixel.
            master_flat_lights_off[y, x] = s_c_mean
            master_flat_lights_off_stddev[y, x] = s_c_stddev

    #Create the master flat
    master_flat = master_flat_lights_on - master_flat_lights_off
    master_flat_norm = np.nanmedian(master_flat)
    master_flat = master_flat / master_flat_norm

    #Create the master error flat
    master_flat_error = np.sqrt(master_flat_lights_on_stddev**2 +
                                master_flat_lights_off_stddev**2)
    master_flat_error = master_flat_error / master_flat_norm

    #Ourput flat files.
    output_filename = pines_path / ('Calibrations/Flats/Domeflats/' + band +
                                    '/Master Flats/master_flat_' + band + '_' +
                                    date + '.fits')
    #Add some header keywords detailing the master_dark creation process.
    hdu = fits.PrimaryHDU(master_flat)
    hdu.header['HIERARCH DATE CREATED'] = datetime.utcnow().strftime(
        '%Y-%m-%d') + 'T' + datetime.utcnow().strftime('%H:%M:%S')

    #Now save to a file on your local machine.
    print('')
    print('Writing the file to {}'.format(output_filename))
    #Check to see if other files of this name exist
    # if os.path.exists(output_filename):
    #     print('')
    #     print('WARNING: This will overwrite {}!'.format(output_filename))
    #     flat_check = input('Do you want to continue? y/n: ')
    #     if flat_check == 'y':
    #         hdu.writeto(output_filename,overwrite=True)
    #         print('Wrote to {}!'.format(output_filename))
    #     else:
    #         print('Not overwriting!')
    # else:
    hdu.writeto(output_filename, overwrite=True)
    print('Wrote to {}!'.format(output_filename))
    print('')

    if upload:
        print('Beginning upload process to pines.bu.edu...')
        print('Note, only PINES admins will be able to upload.')
        print('')
        sftp.chdir('/')
        sftp.chdir('data/calibrations/Flats/Domeflats/' + band)
        upload_name = 'master_flat_' + band + '_' + date + '.fits'
        # if upload_name in sftp.listdir():
        #     print('WARNING: This will overwrite {} in pines.bu.edu:data/calibrations/Flats/Domeflats/{}/'.format(upload_name,band))
        #     upload_check = input('Do you want to continue? y/n: ')
        #     if upload_check == 'y':
        #         sftp.put(output_filename,upload_name)
        #         print('Uploaded to pines.bu.edu:data/calibrations/Flats/Domeflats/{}/ !'.format(band))
        #     else:
        #         print('Skipping upload!')
        # else:
        sftp.put(output_filename, upload_name)
        print(
            'Uploaded {} to pines.bu.edu:data/calibrations/Flats/Domeflats/{}/!'
            .format(upload_name, band))

    output_filename = pines_path / ('Calibrations/Flats/Domeflats/' + band +
                                    '/Master Flats Stddev/master_flat_stddev_'
                                    + band + '_' + date + '.fits')
    #Add some header keywords detailing the master_dark creation process.
    hdu = fits.PrimaryHDU(master_flat_error)
    hdu.header['HIERARCH DATE CREATED'] = datetime.utcnow().strftime(
        '%Y-%m-%d') + 'T' + datetime.utcnow().strftime('%H:%M:%S')

    #Now save to a file on your local machine.
    #Check to see if other files of this name exist
    # if os.path.exists(output_filename):
    #     print('')
    #     print('WARNING: This will overwrite {}!'.format(output_filename))
    #     flat_check = input('Do you want to continue? y/n: ')
    #     if flat_check == 'y':
    #         hdu.writeto(output_filename,overwrite=True)
    #         print('Wrote to {}!'.format(output_filename))
    #     else:
    #         print('Not overwriting!')
    # else:
    hdu.writeto(output_filename, overwrite=True)
    print('Wrote to {}!'.format(output_filename))

    if upload:
        print('Uploading to pines.bu.edu...')
        sftp.chdir('/')
        sftp.chdir('data/calibrations/Flats Stddev/Domeflats/' + band)
        upload_name = 'master_flat_stddev_' + band + '_' + date + '.fits'
        # if upload_name in sftp.listdir():
        #     print('WARNING: This will overwrite {} in pines.bu.edu:data/calibrations/Flats/Domeflats/{}/'.format(upload_name,band))
        #     upload_check = input('Do you want to continue? y/n: ')
        #     if upload_check == 'y':
        #         sftp.put(output_filename,upload_name)
        #         print('Uploaded to pines.bu.edu:data/calibrations/Flats Stddev/Domeflats/{}/ !'.format(band))
        #     else:
        #         print('Skipping upload!')
        # else:
        sftp.put(output_filename, upload_name)
        print(
            'Uploaded {} to pines.bu.edu:data/calibrations/Flats Stddev/Domeflats/{}/!'
            .format(upload_name, band))
    print('')
    if delete_raw:
        files_to_delete = glob.glob(os.path.join(dome_flat_raw_path /
                                                 '*.fits'))
        for j in range(len(files_to_delete)):
            os.remove(files_to_delete[j])

    print('dome_flat_field runtime: ', np.round((time.time() - t1) / 60, 1),
          ' minutes.')
    print('Done!')
Example #12
0
def simple_lightcurve(target,
                      sources,
                      centroided_sources,
                      phot_type='aper',
                      ref_set_choice=[],
                      plot_mode='combined'):
    print('\nRunning simple_lightcurve().\n')
    '''Authors: 
        Patrick Tamburo, Boston University, June 2020
    Purpose: 
            Makes a "simple" lightcurve with each reference star weighted equally when creating the artificial comparison lightcurve. 
    Inputs:
        target (str): the target's long name (e.g. '2MASS J12345678+1234567').
        sources (pandas DataFrame): DataFrame with source names, and x/y positions in the source_detect_image. Output from ref_star_chooser. 
        centroided_sources (pandas DataFrame): Dataframe with source positions in every image. Output from centroider. 
        phot_type (str): 'aper' or 'psf'. Whether to use aperture or PSF phomometry NOTE: PSF photometry currently not implemented.
        ref_set_choice (list): list of reference IDs to use to make the lightcurve, in case you want to exclude any. 
        plot_mode (str): 'combined' or 'separate'. 'combined' plots all nights in one figure, while 'separate' plots nights in separate figures. 
    Outputs:
        Saves lightcurve plots to target's analysis directory. 
    TODO:
        PSF photometry 
        Regression? 
'''
    def regression(flux, seeing, airmass, corr_significance=1e-5):
        #Looks at correlations between seeing and airmass with the target flux.
        #Takes those variables which are significantly correlated, and uses them in a linear regression to de-correlated the target flux.

        #Use the seeing in the regression if it's significantly correlated
        if pearsonr(seeing, flux)[1] < corr_significance:
            use_seeing = True
        else:
            use_seeing = False

        #Same thing for airmass
        if pearsonr(airmass, flux)[1] < corr_significance:
            use_airmass = True
        else:
            use_airmass = False

        #Now set up the linear regression.
        regr = linear_model.LinearRegression()

        regress_dict = {}

        #Add seeing, background, and airmass, if applicable.
        if use_seeing:
            key = 'seeing'
            regress_dict[key] = seeing

        if use_airmass:
            key = 'airmass'
            regress_dict[key] = airmass

        #Finally, add target flux
        regress_dict['flux'] = flux

        #Get list of keys
        keylist = list()
        for i in regress_dict.keys():
            keylist.append(i)

        #Create data frame of regressors.
        df = DataFrame(regress_dict, columns=keylist)
        x = df[keylist[0:len(keylist) - 1]]
        y = df['flux']

        if np.shape(x)[1] > 0:
            regr.fit(x, y)

            #Now, define the model.
            linear_regression_model = regr.intercept_
            i = 0

            #Add in the other regressors, as necessary. Can't think of a way of doing this generally, just use a bunch of ifs.
            if (use_seeing) and (use_airmass):
                linear_regression_model = linear_regression_model + regr.coef_[
                    0] * seeing + regr.coef_[1] * airmass
            if (use_seeing) and not (use_airmass):
                linear_regression_model = linear_regression_model + regr.coef_[
                    0] * seeing
            if not (use_seeing) and (use_airmass):
                linear_regression_model = linear_regression_model + regr.coef_[
                    0] * airmass
            #Divide out the fit.
            corrected_flux = flux / linear_regression_model
        else:
            #print('No regressors used.')
            corrected_flux = flux
        return corrected_flux

    #plt.ion()
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    outlier_tolerance = 0.2  #If a reference > outlier_tolerance of its values above sigma clipping threshold, mark it as bad.
    centroided_sources.columns = centroided_sources.keys().str.strip()

    #Get list of photometry files for this target.
    photometry_path = pines_path / ('Objects/' + short_name + '/' + phot_type +
                                    '_phot/')
    analysis_path = pines_path / ('Objects/' + short_name + '/analysis')
    photometry_files = natsort.natsorted(
        [x for x in photometry_path.glob('*.csv')])

    num_refs = len(sources) - 1

    #Loop over all photometry files in the aper_phot directory.
    for i in range(len(photometry_files)):
        #Load in the photometry data.
        if phot_type == 'aper':
            aperture_radius = float(str(photometry_files[i]).split('_')[-3])

        phot_data = pines_log_reader(photometry_files[i])

        #Remove entries that have NaN's for flux values.
        for j in range(len(sources['Name'])):
            name = sources['Name'][j]
            phot_data[name + ' Flux'] = phot_data[name + ' Flux'].astype(float)
            phot_data[name +
                      ' Flux Error'] = phot_data[name +
                                                 ' Flux Error'].astype(float)

        #Get target interpolation warnings.
        targ_interp_flags = np.array(phot_data[short_name +
                                               ' Interpolation Flag'])

        #Get times of exposures.
        times = np.array(phot_data['Time JD'])
        seeing = np.array(phot_data['Seeing'])
        airmass = np.array(phot_data['Airmass'])
        background = np.array(phot_data[sources['Name'][0] + ' Background'])

        #Convert to datetimes for plotting purposes.
        dts = np.array(
            [julian.from_jd(times[i], fmt='jd') for i in range(len(times))])

        #Get the target's flux and background
        targ_flux = np.array(phot_data[short_name + ' Flux'])
        targ_flux_err = np.array(phot_data[short_name + ' Flux Error'])

        #Get the reference stars' fluxes and backgrounds.
        ref_flux = np.zeros((num_refs, len(phot_data)))
        ref_flux_err = np.zeros((num_refs, len(phot_data)))
        for j in range(0, num_refs):
            ref_flux[j, :] = phot_data['Reference ' + str(j + 1) + ' Flux']
            ref_flux_err[j, :] = phot_data['Reference ' + str(j + 1) +
                                           ' Flux Error']
            #Discard variable stars.
            #values, clow, chigh = sigmaclip(ref_flux[j], low=2.5, high=2.5)
            # if (len(phot_data) - len(values)) > (int(outlier_tolerance * len(phot_data))):
            #     print('Have to add flagging bad refs.')

        closest_ref = np.where(
            abs(np.nanmean(ref_flux, axis=1) - np.nanmean(targ_flux)) == min(
                abs(np.nanmean(ref_flux, axis=1) -
                    np.nanmean(targ_flux))))[0][0]

        #Split data up into individual nights.
        night_inds = night_splitter(times)
        num_nights = len(night_inds)

        #if plot_mode == 'combined':
        #    fig, axis = plt.subplots(nrows=1, ncols=num_nights, figsize=(16, 5))

        #colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']

        #Get the time range of each night. Set each plot panel's xrange according to the night with the longest time. Makes seeing potential variability signals easier.
        night_lengths = np.zeros(num_nights)
        for j in range(num_nights):
            inds = night_inds[j]
            night_lengths[j] = times[inds][-1] - times[inds][0]
        longest_night = max(night_lengths)
        longest_night_hours = np.ceil(longest_night * 24)
        global line_list, filename_list
        line_list = []
        filename_list = []

        for j in range(num_nights):
            #if plot_mode == 'separate':
            #    fig, ax = plt.subplots(1, 1, figsize=(16,5))
            #else:
            #    ax = axis[j]

            #if phot_type =='aper':
            #    fig.suptitle(short_name, fontsize=16)

            j += 1
            #if j == 1:
            #    ax.set_ylabel('Normalized Flux', fontsize=16)
            #ax.set_xlabel('Time (UT)', fontsize=16)

            inds = night_inds[j - 1]
            filename_list.append(
                np.array([phot_data['Filename'][z] for z in inds]))
            alc = np.zeros(len(inds))

            #Normalize reference lightcurves
            #TODO: Each night should be normalized separately.
            for k in range(num_refs):
                ref_flux[k][inds] = ref_flux[k][inds] / np.nanmedian(
                    ref_flux[k][inds])

            for k in range(len(inds)):
                #Do a sigma clip on normalized references to avoid biasing median.
                ###values, clow, chigh = sigmaclip(ref_flux[:,inds[k]][~np.isnan(ref_flux[:,inds[k]])], low=1.5, high=1.5)
                ###alc[k] = np.median(values)
                avg, med, std = sigma_clipped_stats(
                    ref_flux[:, inds[k]][~np.isnan(ref_flux[:, inds[k]])],
                    sigma=1.5)
                alc[k] = med

            #Correct the target lightcurve using the alc.
            alc = alc / np.nanmedian(alc)
            targ_flux_norm = targ_flux[inds] / np.nanmedian(targ_flux[inds])
            targ_corr = targ_flux_norm / alc
            targ_corr = targ_corr / np.nanmedian(targ_corr)

            #Correct the example reference lightcurve using the alc.
            ref_corr_norm = ref_flux[closest_ref][inds] / np.nanmedian(
                ref_flux[closest_ref][inds])
            ref_corr = ref_corr_norm / alc
            ref_corr = ref_corr / np.nanmedian(ref_corr)

            #Plot the target and reference lightcurves.
            #t_plot, = ax.plot(dts[inds], targ_corr, '.', color=colors[i])
            #line_list.append(t_plot)
            #myFmt = mdates.DateFormatter('%H:%M')
            #ax.xaxis.set_major_formatter(myFmt)
            #fig.autofmt_xdate()

            #Do sigma clipping on the corrected lightcurve to get rid of outliers (from clouds, bad target centroid, cosmic rays, etc.)
            ###vals, lo, hi = sigmaclip(targ_corr, low=2.5, high=2.5)
            avg, med, std = sigma_clipped_stats(targ_corr, sigma=3)
            bad_vals = np.where((targ_corr > med + 5 * std)
                                | (targ_corr < med - 5 * std))[0]
            good_vals = np.where((targ_corr < med + 5 * std)
                                 & (targ_corr > med - 5 * std))[0]
            vals = targ_corr[good_vals]
            #if len(bad_vals) != 0:
            #    plt.plot(dts[inds][bad_vals], targ_corr[bad_vals], marker='x',color='r', mew=1.8, ms=7, zorder=0, ls='')

            blocks = block_splitter(times[inds], bad_vals)
            bin_times = np.zeros(len(blocks))
            bin_fluxes = np.zeros(len(blocks))
            bin_errs = np.zeros(len(blocks))
            bin_dts = []
            for k in range(len(blocks)):
                try:
                    bin_times[k] = np.mean(times[inds][blocks[k]])
                    #vals, hi, lo = sigmaclip(targ_corr[blocks[k]],high=3,low=3) #Exclude outliers.
                    bin_fluxes[k] = np.mean(targ_corr[blocks[k]])
                    bin_errs[k] = np.std(targ_corr[blocks[k]]) / np.sqrt(
                        len(targ_corr[blocks[k]]))
                    bin_dts.append(julian.from_jd(bin_times[k], fmt='jd'))
                except:
                    pdb.set_trace()
            bin_dts = np.array(bin_dts)
            #ax.errorbar(bin_dts, bin_fluxes, yerr=bin_errs, marker='o', color='k',zorder=3, ls='')

            #Draw the y=1 and 5-sigma detection threshold lines.
            #ax.axhline(y=1, color='r', lw=2, zorder=0)
            #ax.axhline(1-5*np.median(bin_errs), zorder=0, lw=2, color='k', ls='--', alpha=0.4)

            #Set the y-range so you can see the 5-sigma detection line.
            #ax.set_ylim(0.9, 1.1)

            #Set the x-range to be the same for all nights.
            #ax.set_xlim(julian.from_jd(times[inds][0]-0.025, fmt='jd'), julian.from_jd(times[inds][0]+longest_night+0.025, fmt='jd'))

            #ax.grid(alpha=0.2)
            #ax.set_title(phot_data['Time UT'][inds[0]].split('T')[0], fontsize=14)
            #ax.tick_params(labelsize=12)
            #print('average seeing, night {}: {}'.format(j, np.mean(seeing[inds])))
            #pdb.set_trace()
            #print('pearson correlation between target and closest ref: {}'.format(pearsonr(targ_corr[good_vals], ref_corr[good_vals])))

        #print(np.mean(bin_errs))
        #print('')
        #fig.tight_layout(rect=[0, 0.03, 1, 0.93])

        #Output the simple lc data to a csv.
        time_save = times
        flux_save = targ_corr
        flux_err_save = np.zeros(len(flux_save)) + np.std(targ_corr)
        output_dict = {
            'Time': time_save,
            'Flux': flux_save,
            'Flux Error': flux_err_save
        }
        output_df = pd.DataFrame(data=output_dict)
        if phot_type == 'aper':
            output_filename = analysis_path / (
                short_name + '_simple_lc_aper_phot_' +
                str(np.round(aperture_radius, 1)) + '_pix.csv')
            print('\nSaving to {}.\n'.format(output_filename))
            output_df.to_csv(output_filename)
        elif phot_type == 'psf':
            print("ERROR: Need to create flux output for PSF photometry.")

    return
Example #13
0
def variable_aper_phot(target,
                       centroided_sources,
                       multiplicative_factors,
                       an_in=12.,
                       an_out=30.,
                       plots=False,
                       gain=8.21,
                       qe=0.9,
                       plate_scale=0.579):
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    #Remove any leading/trailing spaces in the column names.
    centroided_sources.columns = centroided_sources.columns.str.lstrip()
    centroided_sources.columns = centroided_sources.columns.str.rstrip()

    #Get list of reduced files for target.
    reduced_path = pines_path / ('Objects/' + short_name + '/reduced')
    reduced_filenames = natsort.natsorted(
        [x.name for x in reduced_path.glob('*red.fits')])
    reduced_files = np.array([reduced_path / i for i in reduced_filenames])

    #Get source names.
    source_names = get_source_names(centroided_sources)

    #Get seeing.
    seeing = np.array(centroided_sources['Seeing'])

    #Loop over multiplicative factors
    for i in range(len(multiplicative_factors)):
        fact = multiplicative_factors[i]
        print(
            'Doing variable aperture photometry for {}, multiplicative seeing factor = {}, inner annulus radius = {} pix, outer annulus radius = {} pix.'
            .format(target, fact, an_in, an_out))

        #Declare a new dataframe to hold the information for all targets for this aperture.
        columns = [
            'Filename', 'Time UT', 'Time JD UTC', 'Time BJD TDB', 'Airmass',
            'Seeing'
        ]
        for j in range(0, len(source_names)):
            columns.append(source_names[j] + ' Flux')
            columns.append(source_names[j] + ' Flux Error')
            columns.append(source_names[j] + ' Background')
            columns.append(source_names[j] + ' Interpolation Flag')

        var_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
        output_filename = pines_path / (
            'Objects/' + short_name + '/aper_phot/' + short_name +
            '_variable_aper_phot_' + str(float(fact)) + '_seeing_factor.csv')

        #Loop over all images.
        pbar = ProgressBar()
        for j in pbar(range(len(reduced_files))):
            data = fits.open(reduced_files[j])[0].data
            #Read in some supporting information.
            log_path = pines_path / (
                'Logs/' + reduced_files[j].name.split('.')[0] + '_log.txt')
            log = pines_log_reader(log_path)
            log_ind = np.where(
                log['Filename'] == reduced_files[j].name.split('_')[0] +
                '.fits')[0][0]

            header = fits.open(reduced_files[j])[0].header
            date_obs = header['DATE-OBS']
            #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds.
            if len(date_obs.split(':')[-1].split('.')[0]) == 3:
                date_obs = date_obs.split(':')[0] + ':' + date_obs.split(
                    ':')[1] + ':' + date_obs.split(':')[-1][1:]

            if date_obs.split(':')[-1] == '60.00':
                date_obs = date_obs.split(':')[0] + ':' + str(
                    int(date_obs.split(':')[1]) + 1) + ':00.00'
            #Keep a try/except clause here in case other unknown DATE-OBS formats pop up.
            try:
                date = datetime.datetime.strptime(date_obs,
                                                  '%Y-%m-%dT%H:%M:%S.%f')
            except:
                print(
                    'Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.'
                )
                pdb.set_trace()

            #Get the closest date master_dark_stddev image for this exposure time.
            #We'll use this to measure read noise and dark current.
            date_str = date_obs.split('T')[0].replace('-', '')
            master_dark_stddev = master_dark_stddev_chooser(
                pines_path / ('Calibrations/Darks/Master Darks Stddev/'),
                header)

            days = date.day + hmsm_to_days(date.hour, date.minute, date.second,
                                           date.microsecond)
            jd = date_to_jd(date.year, date.month, days)
            var_df['Filename'][j] = reduced_files[j].name
            var_df['Time UT'][j] = header['DATE-OBS']
            var_df['Time JD UTC'][j] = jd
            var_df['Time BJD TDB'][j] = jd_utc_to_bjd_tdb(
                jd, header['TELRA'], header['TELDEC'])
            var_df['Airmass'][j] = header['AIRMASS']
            var_df['Seeing'][j] = log['X seeing'][np.where(
                log['Filename'] == reduced_files[j].name.split('_')[0] +
                '.fits')[0][0]]

            #If the shift quality has been flagged, skip this image.
            if log['Shift quality flag'].iloc[log_ind] == 1:
                continue

            #Get the source positions in this image.
            positions = []
            for k in range(len(source_names)):
                positions.append(
                    (centroided_sources[source_names[k] + ' Image X'][j],
                     centroided_sources[source_names[k] + ' Image Y'][j]))

            #Create an aperture centered on this position with radius (in pixels) of (seeing*multiplicative_factor[j])/plate_scale.
            try:
                apertures = CircularAperture(positions,
                                             r=(seeing[j] * fact) /
                                             plate_scale)
            except:
                pdb.set_trace()

            #Create an annulus centered on this position.
            annuli = CircularAnnulus(positions, r_in=an_in, r_out=an_out)

            photometry_tbl = iraf_style_photometry(apertures, annuli,
                                                   data * gain,
                                                   master_dark_stddev * gain,
                                                   header, var_df['Seeing'][j])

            for k in range(len(photometry_tbl)):
                var_df[source_names[k] +
                       ' Flux'][j] = photometry_tbl['flux'][k]
                var_df[source_names[k] +
                       ' Flux Error'][j] = photometry_tbl['flux_error'][k]
                var_df[source_names[k] +
                       ' Background'][j] = photometry_tbl['background'][k]
                var_df[source_names[k] + ' Interpolation Flag'][j] = int(
                    photometry_tbl['interpolation_flag'][k])

        #Write output to file.
        print(
            'Saving multiplicative factor = {} variable aperture photometry output to {}.'
            .format(fact, output_filename))
        print('')
        with open(output_filename, 'w') as f:
            for j in range(len(var_df)):
                #Write in the header.
                if j == 0:
                    f.write(
                        '{:>21s}, {:>22s}, {:>17s}, {:>17s}, {:>7s}, {:>7s}, '.
                        format('Filename', 'Time UT', 'Time JD UTC',
                               'Time BJD TDB', 'Airmass', 'Seeing'))
                    for k in range(len(source_names)):
                        if k != len(source_names) - 1:
                            f.write(
                                '{:>22s}, {:>28s}, {:>28s}, {:>34s}, '.format(
                                    source_names[k] + ' Flux',
                                    source_names[k] + ' Flux Error',
                                    source_names[k] + ' Background',
                                    source_names[k] + ' Interpolation Flag'))
                        else:
                            f.write(
                                '{:>22s}, {:>28s}, {:>28s}, {:>34s}\n'.format(
                                    source_names[k] + ' Flux',
                                    source_names[k] + ' Flux Error',
                                    source_names[k] + ' Background',
                                    source_names[k] + ' Interpolation Flag'))

                #Write in Filename, Time UT, Time JD, Airmass, Seeing values.
                format_string = '{:21s}, {:22s}, {:17.9f}, {:17.9f}, {:7.2f}, {:7.1f}, '
                #If the seeing value for this image is 'nan' (a string), convert it to a float.
                #TODO: Not sure why it's being read in as a string, fix that.
                if type(var_df['Seeing'][j]) == str:
                    var_df['Seeing'][j] = float(var_df['Seeing'][j])

                #Do a try/except clause for writeout, in case it breaks in the future.
                try:
                    f.write(
                        format_string.format(var_df['Filename'][j],
                                             var_df['Time UT'][j],
                                             var_df['Time JD UTC'][j],
                                             var_df['Time BJD TDB'][j],
                                             var_df['Airmass'][j],
                                             var_df['Seeing'][j]))
                except:
                    print(
                        'Writeout failed! Inspect quantities you are trying to write out.'
                    )
                    pdb.set_trace()

                #Write in Flux, Flux Error, and Background values for every source.
                for i in range(len(source_names)):
                    if i != len(source_names) - 1:
                        format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34d}, '
                    else:
                        format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34d}\n'
                    try:
                        f.write(
                            format_string.format(
                                var_df[source_names[i] + ' Flux'][j],
                                var_df[source_names[i] + ' Flux Error'][j],
                                var_df[source_names[i] + ' Background'][j],
                                var_df[source_names[i] +
                                       ' Interpolation Flag'][j]))
                    except:
                        if i != len(source_names) - 1:
                            format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34f}, '
                        else:
                            format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34f}\n'
                        f.write(
                            format_string.format(
                                var_df[source_names[i] + ' Flux'][j],
                                var_df[source_names[i] + ' Flux Error'][j],
                                var_df[source_names[i] + ' Background'][j],
                                var_df[source_names[i] +
                                       ' Interpolation Flag'][j]))
        print('')
    return
Example #14
0
def drift_rate(target):
    plt.ion()

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    source_centroids = pd.read_csv(
        pines_path / ('Objects/' + short_name +
                      '/sources/target_and_references_centroids.csv'))
    source_centroids.columns = source_centroids.columns.str.strip()

    x = source_centroids[short_name + ' X']
    y = source_centroids[short_name + ' Y']
    z = np.sqrt(x**2 + y**2)

    reduced_images = natsort.natsorted(
        list((pines_path /
              ('Objects/' + short_name + '/reduced/')).rglob('*.fits')))

    if len(reduced_images) != len(x):
        raise RuntimeError(
            'Number of reduced images does not match number of centroids.')

    dates = []
    for i in range(len(reduced_images)):
        file = reduced_images[i]
        date_obs = fits.open(file)[0].header['DATE-OBS']
        dates.append(
            datetime.datetime.strptime(date_obs, '%Y-%m-%dT%H:%M:%S.%f'))
    dates = np.array(dates)
    unique_days = natsort.natsorted(
        list({(i.day, i.month, i.year)
              for i in dates}))
    days = [(dates[i].day, dates[i].month, dates[i].year)
            for i in range(len(dates))]

    colors = ['tab:blue', 'tab:orange', 'tab:green']
    fig, ax = plt.subplots(nrows=3,
                           ncols=len(unique_days),
                           figsize=(17, 8),
                           sharey=True)
    for i in range(len(unique_days)):
        day = unique_days[i]
        truth_arr = [days[i] == day for i in range(len(days))]
        date_locs = np.where(truth_arr)[0]
        ax[i, 0].plot(dates[date_locs],
                      np.gradient(x[date_locs]),
                      marker='o',
                      ls='',
                      color=colors[0],
                      alpha=0.3)
        smoothed = scipy.ndimage.filters.uniform_filter(np.gradient(
            x[date_locs]),
                                                        size=5)
        ax[i, 0].plot(dates[date_locs],
                      smoothed,
                      lw=2,
                      color=colors[0],
                      zorder=0)
        for label in ax[i, 0].get_xticklabels():
            label.set_rotation(30)
            label.set_horizontalalignment('right')

        ax[i, 1].plot(dates[date_locs],
                      np.gradient(y[date_locs]),
                      marker='o',
                      ls='',
                      color=colors[1],
                      alpha=0.3)
        smoothed = scipy.ndimage.filters.uniform_filter(np.gradient(
            y[date_locs]),
                                                        size=5)
        ax[i, 1].plot(dates[date_locs],
                      smoothed,
                      lw=2,
                      color=colors[1],
                      zorder=0)
        for label in ax[i, 1].get_xticklabels():
            label.set_rotation(30)
            label.set_horizontalalignment('right')

        ax[i, 2].plot(dates[date_locs],
                      np.gradient(z[date_locs]),
                      marker='o',
                      ls='',
                      color=colors[2],
                      alpha=0.3)
        smoothed = scipy.ndimage.filters.uniform_filter(np.gradient(
            z[date_locs]),
                                                        size=5)
        ax[i, 2].plot(dates[date_locs],
                      smoothed,
                      lw=2,
                      color=colors[2],
                      zorder=0)
        for label in ax[i, 2].get_xticklabels():
            label.set_rotation(30)
            label.set_horizontalalignment('right')

        ax[i, 0].grid(True, alpha=0.4)
        ax[i, 1].grid(True, alpha=0.4)
        ax[i, 2].grid(True, alpha=0.4)

        if i == 0:
            ax[i, 0].set_title('dx/dt', color=colors[0], fontsize=16)
            ax[i, 1].set_title('dy/dt', color=colors[1], fontsize=16)
            ax[i, 2].set_title('dz/dt', color=colors[2], fontsize=16)
    plt.tight_layout()
    pdb.set_trace()
Example #15
0
def log_updater(date, sftp, shift_tolerance=30, upload=False):
    '''
    Authors:
		Patrick Tamburo, Boston University, January 2021
	Purpose:
        Updates x_shift and y_shift measurements from a PINES log. These shifts are measured using *full* resolution images, while at the telescope,
        we use *half* resolution images (to save time between exposures). By measuring on full-res images, we get more accurate shifts, which allows 
        us to determine centroids more easily.
	Inputs:
        date (str): the UT date of the log whose shifts you want to update in YYYYMMDD format, e.g. '20151110'
        sftp (pysftp connection): sftp connection to the PINES server
        shift_tolerance (float): the maximum distance an x/y shift can be before shifts will be flagged as poor quality. 
        upload (bool): whether or not to push the updated log to the PINES server (only admins can do this)
    Outputs:
		Writes updated log file to disk. 
	TODO:
        Re-measure seeing?
    FIXME:
    '''
    def tie_sigma(model):
        return model.x_stddev_1

    def guide_star_seeing(subframe):
        # subframe = subframe - np.median(subframe)
        subframe = subframe - np.percentile(subframe,5)
        sub_frame_l = int(np.shape(subframe)[0])
        y, x = np.mgrid[:sub_frame_l, :sub_frame_l]

        # Fit with constant, bounds, tied x and y sigmas and outlier rejection:
        gaussian_init = models.Const2D(0.0) + models.Gaussian2D(subframe[int(sub_frame_l/2),int(sub_frame_l/2)],int(sub_frame_l/2),int(sub_frame_l/2),8/2.355,8/2.355,0)
        gaussian_init.x_stddev_1.min = 1.0/2.355
        gaussian_init.x_stddev_1.max = 20.0/2.355
        gaussian_init.y_stddev_1.min = 1.0/2.355
        gaussian_init.y_stddev_1.max = 20.0/2.355
        gaussian_init.y_stddev_1.tied = tie_sigma
        gaussian_init.theta_1.fixed = True
        fit_gauss = fitting.FittingWithOutlierRemoval(fitting.LevMarLSQFitter(),sigma_clip,niter=3,sigma=3.0)
        # gaussian, mask = fit_gauss(gaussian_init, x, y, subframe)
        gain = 8.21 #e per ADU
        read_noise = 2.43 #ADU
        weights = gain / np.sqrt(np.absolute(subframe)*gain + (read_noise*gain)**2) #1/sigma for each pixel
        gaussian, mask = fit_gauss(gaussian_init, x, y, subframe, weights)
        fwhm_x = 2.355*gaussian.x_stddev_1.value
        fwhm_y = 2.355*gaussian.y_stddev_1.value

        x_seeing = fwhm_x * 0.579
        y_seeing = fwhm_y * 0.579
        return(x_seeing,y_seeing)

    pines_path = pines_dir_check()
    log_path = pines_path/('Logs/'+date+'_log.txt')

    #Begin by checking filenames, making sure they're in sequential order, and that there is only one entry for each. 
    log_out_of_order_fixer(log_path, sftp)
    
    log = pines_log_reader(log_path) #Get telescope log shifts.
    myfile = open(log_path, 'r')
    lines = myfile.readlines()
    myfile.close()

    #Now loop over all files in the log, measure shifts in each file and update the line in the log. 
    for i in range(len(log)):
        if (log['Target'][i].lower() != 'flat') & (log['Target'][i].lower() != 'skyflat') & (log['Target'][i].lower() != 'supersky') & (log['Target'][i].lower() != 'dark') & (log['Target'][i].lower() != 'bias') & (log['Target'][i].lower() != 'dummy') & (log['Post-processing flag'][i] != 1):
            filename = log['Filename'][i].split('.fits')[0]+'_red.fits'
            target = log['Target'][i]
            short_name = short_name_creator(target)
            image_path = pines_path/('Objects/'+short_name+'/reduced/'+filename)

            #Figure out which file you're looking at and its position in the log. 
            log_ind = np.where(log['Filename'] == filename.split('_')[0]+'.fits')[0][0]

            #Measure the shifts and get positions of targets.
            (measured_x_shift, measured_y_shift, source_x, source_y, check_image) = shift_measurer(target, filename, sftp)

            if (abs(measured_x_shift) > shift_tolerance) or (abs(measured_y_shift) > shift_tolerance):
                print('Shift greater than {} pixels measured for {} in {}.'.format(shift_tolerance, short_name, image_path.name))
                print('Inspect manually.')
                shift_quality_flag = 1
            elif np.isnan(measured_x_shift) or np.isnan(measured_y_shift):
                raise RuntimeError('Found nans for shifts!')
                shift_quality_flag = 1
            else:
                shift_quality_flag = 0
            
            
            #Measure the seeing. 
            guide_star_cut = np.where((source_x > 50) & (source_x < 975) & (source_y > 50) & (source_y < 975))[0]
            if len(guide_star_cut) != 0:
                x_seeing_array = []
                y_seeing_array = []
                for guide_star_ind in guide_star_cut:
                    guide_star_x_int = int(source_x[guide_star_ind])
                    guide_star_y_int = int(source_y[guide_star_ind])
                    guide_star_subframe = check_image[guide_star_y_int-15:guide_star_y_int+15,guide_star_x_int-15:guide_star_x_int+15]
                    (x_seeing,y_seeing) = guide_star_seeing(guide_star_subframe)
                    #Cut unrealistic values/saturated stars. 
                    if x_seeing > 1.2 and x_seeing < 7.0:
                        x_seeing_array.append(x_seeing)
                        y_seeing_array.append(y_seeing)
                x_seeing = np.nanmedian(x_seeing_array)
                y_seeing = np.nanmedian(y_seeing_array)
            else:
                #Default to the average PINES value if no sources were found for guiding. 
                x_seeing = 2.6
                y_seeing = 2.6

            print('Log line {} of {}.'.format(i+1, len(log)))
            print('Measured x shift: {:4.1f}, measured y shift: {:4.1f}'.format(measured_x_shift, measured_y_shift))
            print('Measured seeing: {:4.1f}'.format(x_seeing))
            print('')

            #Overwrite the telescope's logged shifts and seeing values with the new measurements. 
            log['X shift'][log_ind] = str(np.round(measured_x_shift,1))
            log['Y shift'][log_ind] = str(np.round(measured_y_shift,1))
            log['X seeing'][log_ind] = str(np.round(x_seeing, 1))
            log['Y seeing'][log_ind] = str(np.round(y_seeing, 1))

            #Grab entries for log line.
            filename = log['Filename'][log_ind]
            log_date = log['Date'][log_ind]
            target_name = log['Target'][log_ind]
            filter_name = log['Filt.'][log_ind]
            exptime = log['Exptime'][log_ind]
            airmass = log['Airmass'][log_ind]
            x_shift = log['X shift'][log_ind]
            y_shift = log['Y shift'][log_ind]
            x_seeing = log['X seeing'][log_ind]
            y_seeing = log['Y seeing'][log_ind]
            post_processing_flag = 1
            #Generate line of log text following the PINES telescope log format. 
            log_text = pines_logging(filename, log_date, target_name, filter_name, exptime, airmass, x_shift, y_shift, x_seeing, y_seeing, post_processing_flag, shift_quality_flag)

            #Overwrite the line with the new shifts.
            line_ind = log_ind + 1
            lines[line_ind] = log_text

            #Update the log on disk.
            with open(log_path, 'w') as f:
                for line in lines:
                    f.write(line)

        elif (log['Post-processing flag'][i] == 1):
            print('File already post-processed, skipping. {} of {}'.format(i+1, len(log)))
        else:
            print('File not a science target, skipping. {} of {}.'.format(i+1, len(log)))

    if upload:
        sftp.chdir('/data/logs/')
        print('Uploading to /data/logs/{}_log.txt.'.format(date))
        sftp.put(log_path,date+'_log.txt')
    return 
Example #16
0
def reduce(target_name, upload=False, delete_raw=False, delete_reduced=False, sftp='', manual_flat_path='', manual_dark_path='', manual_bpm_path='', linearity_correction=False, linearity_correction_degree=5):
	t1 = time.time()
	print('')
	if (upload is True) and (sftp == ''):
		print('ERROR: You must pass an sftp connection if you want to upload reduced files to pines.bu.edu.!')
		return

	pines_path = pines_dir_check()
	short_name = short_name_creator(target_name)

	#Paths
	raw_path = pines_path/('Objects/'+short_name+'/raw')
	raw_files = [Path(i) for i in natsort.natsorted(glob.glob(os.path.join(raw_path,'*.fits')))] #Natsort sorts things how you expect them to be sorted.
	dark_path = pines_path/('Calibrations/Darks')
	reduced_path = pines_path/('Objects/'+short_name+'/reduced')
	flats_path = pines_path/('Calibrations/Flats/Domeflats')
	bpm_path = pines_path/('Calibrations/Bad Pixel Masks')

	#Now begin the loop to load and reduce the raw science data
	print("Reducing data for {}.".format(target_name))
	print('Note: any reduced data already in reduced directory will be not be re-reduced.')
				
	pbar = ProgressBar()
	for i in pbar(range(np.size(raw_files))):
		target_filename = reduced_path/(raw_files[i].name.split('.fits')[0]+'_red.fits')
		if target_filename.exists():
			#print('{} already in reduced directory, skipping.'.format(target_filename.name))
			continue
		hdulist = fits.open(raw_files[i])
		header = hdulist[0].header

		try:
			frame_raw = fits.open(raw_files[i])[0].data.astype('float32')
		except:
			pdb.set_trace()


		frame_raw = frame_raw[0:1024,:] #Cuts off 2 rows of overscan (?) pixels
			
		#Add a flag to the header to check if background is near saturation
		if sigma_clipped_stats(frame_raw)[1] > 3800: 
			sat_flag = 1
		else:
			sat_flag = 0

		#Load in the dark/flat files. If a manual path is not provided, choose the reduction image that is closest in time to when the image was taken.
		if manual_flat_path == '':
			master_flat, master_flat_name = master_flat_chooser(flats_path, header)
		else:
			master_flat = fits.open(manual_flat_path)[0].data
			master_flat_name = manual_flat_path.name

		if manual_dark_path == '':
			master_dark, master_dark_name = master_dark_chooser(dark_path, header)
		else:
			master_dark = fits.open(manual_dark_path)[0].data
			master_dark_name = manual_dark_path.name
		
		if manual_bpm_path == '':
			bad_pixel_mask, bad_pixel_mask_name = bpm_chooser(bpm_path, header)
		else:
			bad_pixel_mask = fits.open(manual_bpm_path)[0].data
			bad_pixel_mask_name = manual_bpm_path.name



		#If linearity_correction is set, correct the pixels for linearity.
		if linearity_correction:

			poly_coeffs_path = pines_path/('Calibrations/Linearity/meas2corr_poly_coeffs_deg_{}_no_redchi.fits'.format(linearity_correction_degree))
			hdu_list = fits.open(poly_coeffs_path)
			meas2corr_poly_coeffs = hdu_list[0].data[0:1024,:,:]
			degree = meas2corr_poly_coeffs.shape[2]
			frame_raw[np.where(bad_pixel_mask == 1)] = np.nan
			measured_counts = frame_raw - master_dark

			#Make an array of corrected counts, and correct using the polynomial coefficients form meas2corr_poly_coeffs.fits
			corrected_counts = np.zeros((1024,1024), dtype='float32')
			for i in np.arange(degree): 
				corrected_counts += meas2corr_poly_coeffs[:,:,i] * measured_counts**i
			frame_red = corrected_counts

		else:
			#Reduce the image. 
			frame_red = (frame_raw - master_dark)/master_flat
			frame_red = frame_red.astype('float32')
			
			#Set bad pixels to NaNs. 
			frame_red[np.where(bad_pixel_mask == 1)] = np.nan
		
		# pdb.set_trace()

		# #Do a background model subtraction
		# frame_red = bg_2d(frame_red)

		#Store some parameters in the reduced file's header. 
		#Naming convention follows https://docs.astropy.org/en/stable/io/fits/usage/headers.html.
		header['HIERARCH DATE REDUCED'] = datetime.utcnow().strftime('%Y-%m-%d')+'T'+datetime.utcnow().strftime('%H:%M:%S')
		header['HIERARCH MASTER DARK'] = master_dark_name
		header['HIERARCH MASTER FLAT'] = master_flat_name
		header['HIERARCH BAD PIXEL MASK'] = bad_pixel_mask_name
		header['HIERARCH SATURATION FLAG'] = sat_flag

		if not os.path.exists(target_filename):
			fits.writeto(target_filename, frame_red, header)
			#print("Reducing {}: {} of {}, band = {}, exptime = {} s, dark = {}, flat = {}".format(raw_files[i].name, str(i+1), str(np.size(raw_files)), header['FILTNME2'], header['EXPTIME'], master_dark_name, master_flat_name))
				
	if upload:
		print('')
		print('Beginning upload process to pines.bu.edu...')
		print('NOTE:    Only PINES admins are able to upload.')
		print('WARNING: If these reduced images already exist on the PINES server, they will be overwritten!')
		time.sleep(1)
		sftp.chdir('/data/reduced/mimir/')
		files_to_upload = np.array(natsort.natsorted(np.array([x for x in reduced_path.glob('*.fits')])))
		print('Uploading reduced {} data to the PINES server!'.format(target_name))
		pbar = ProgressBar()
		for i in pbar(range(len(files_to_upload))):
			file = files_to_upload[i]
			night_name = files_to_upload[i].name.split('.')[0]
			for dir_change in sftp.listdir():
				sftp.chdir(dir_change)
				nights = sftp.listdir()
				if night_name in nights:
					ind  = np.where(np.array(nights) == night_name)[0][0]
					break
				sftp.chdir('..')
			if nights[ind] != night_name:
				print('ERROR: the date of the file you want to upload does not match the date directory where the program wants to upload it.')
				pdb.set_trace()
			
			#print('Uploading to {}/{}, {} of {}'.format(sftp.getcwd(),nights[ind]+'/'+file.name, i+1,len(files_to_upload)))
			sftp.put(file,nights[ind]+'/'+file.name)
			sftp.chdir('..')
			

	if delete_raw:
		files_to_delete = glob.glob(os.path.join(raw_path/'*.fits'))
		for j in range(len(files_to_delete)):
			os.remove(files_to_delete[j])

	if delete_reduced:
		files_to_delete = glob.glob(os.path.join(reduced_path/'*.fits'))
		for j in range(len(files_to_delete)):
			os.remove(files_to_delete[j])
Example #17
0
def relative_cutout_position_plot(target, centroided_sources):
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    #Get plot style parameters.
    title_size, axis_title_size, axis_ticks_font_size, legend_font_size = plot_style(
    )

    #Get list of souce names in the centroid output.
    source_names = get_source_names(centroided_sources)
    centroided_sources.columns = centroided_sources.keys().str.strip()

    #Get times from the centroid output and split them by night.
    times_full = np.array(centroided_sources['Time (JD UTC)'])
    night_inds = night_splitter(times_full)
    num_nights = len(night_inds)
    times_nights = [times_full[night_inds[i]] for i in range(num_nights)]
    standard_x = standard_x_range(times_nights)

    #Get the box size (I don't like that this is being determined by using the mean of the data...output it from centroider?)
    box_w = int(
        np.round(
            2 * np.nanmean(
                np.array(centroided_sources['Reference 1 Cutout X'],
                         dtype='float')), 0))

    fig, ax = plt.subplots(nrows=2,
                           ncols=num_nights,
                           figsize=(17, 9),
                           sharey=True)
    plt.subplots_adjust(left=0.07,
                        hspace=0.05,
                        wspace=0.05,
                        top=0.92,
                        bottom=0.17)
    markers = ['+', 'x', '*', 'X']
    for j in range(num_nights):
        inds = night_inds[j]
        if j == 0:
            ax[0, j].set_ylabel('Cutout X Position', fontsize=axis_title_size)
            ax[1, j].set_ylabel('Cutout Y Position', fontsize=axis_title_size)

        for i in range(len(source_names)):
            cutout_x = np.array(centroided_sources[source_names[i] +
                                                   ' Cutout X'][inds],
                                dtype='float')
            cutout_y = np.array(centroided_sources[source_names[i] +
                                                   ' Cutout Y'][inds],
                                dtype='float')

            if i == 0:
                marker = 'o'
                label = 'Target'
            else:
                marker = markers[(i - 1) % len(markers)]
                label = 'Ref. ' + str(i)
            ax[0, j].plot(times_nights[j],
                          cutout_x,
                          marker=marker,
                          label=label,
                          linestyle='')
            ax[1, j].plot(times_nights[j],
                          cutout_y,
                          marker=marker,
                          linestyle='')

        ax[0, j].tick_params(labelsize=axis_ticks_font_size)
        ax[0, j].set_xticklabels([])
        ax[0, j].axhline(box_w / 2,
                         zorder=0,
                         color='r',
                         label='Center pix.',
                         lw=2)
        ax[0, j].set_xlim(
            np.mean(times_nights[j]) - standard_x / 2,
            np.mean(times_nights[j]) + standard_x / 2)
        ax[0, j].grid(alpha=0.2)
        ax[1, j].tick_params(labelsize=axis_ticks_font_size)
        ax[1, j].axhline(box_w / 2,
                         zorder=0,
                         color='r',
                         label='Center pix.',
                         lw=2)
        ax[1, j].set_xlim(
            np.mean(times_nights[j]) - standard_x / 2,
            np.mean(times_nights[j]) + standard_x / 2)
        ax[1, j].set_xlabel('Time (JD UTC)', fontsize=axis_title_size)
        ax[1, j].grid(alpha=0.2)

        if j == num_nights - 1:
            ax[0, j].legend(bbox_to_anchor=(1.01, 1.0),
                            fontsize=legend_font_size)

    plt.suptitle(short_name + ' Cutout Centroid Positions',
                 fontsize=title_size)

    output_filename = pines_path / ('Objects/' + short_name +
                                    '/analysis/diagnostic_plots/' +
                                    short_name + '_cutout_positions.png')
    plt.savefig(output_filename, dpi=300)

    return
Example #18
0
def epsf_phot(target, centroided_sources, plots=False):
    def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
        """
        Convert hours, minutes, seconds, and microseconds to fractional days.
        
        """
        days = sec + (micro / 1.e6)
        days = min + (days / 60.)
        days = hour + (days / 60.)
        return days / 24.
    
    def date_to_jd(year,month,day):
        """
        Convert a date to Julian Day.
        
        Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 
            4th ed., Duffet-Smith and Zwart, 2011.
        
        """
        if month == 1 or month == 2:
            yearp = year - 1
            monthp = month + 12
        else:
            yearp = year
            monthp = month
        
        # this checks where we are in relation to October 15, 1582, the beginning
        # of the Gregorian calendar.
        if ((year < 1582) or
            (year == 1582 and month < 10) or
            (year == 1582 and month == 10 and day < 15)):
            # before start of Gregorian calendar
            B = 0
        else:
            # after start of Gregorian calendar
            A = math.trunc(yearp / 100.)
            B = 2 - A + math.trunc(A / 4.)
            
        if yearp < 0:
            C = math.trunc((365.25 * yearp) - 0.75)
        else:
            C = math.trunc(365.25 * yearp)
            
        D = math.trunc(30.6001 * (monthp + 1))
        
        jd = B + C + D + day + 1720994.5
        
        return jd

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    reduced_path = pines_path/('Objects/'+short_name+'/reduced/')
    reduced_filenames = natsort.natsorted([x.name for x in reduced_path.glob('*.fits')])
    reduced_files = np.array([reduced_path/i for i in reduced_filenames])

    centroided_sources.columns = centroided_sources.columns.str.strip()
    source_names = natsort.natsorted(list(set([i.split(' ')[0]+' '+i.split(' ')[1] for i in centroided_sources.keys() if (i[0] == '2') or (i[0] == 'R')])))
    
    #Create output plot directories for each source.
    if plots:
        for name in source_names:
            #If the folders are already there, delete them. 
            source_path = (pines_path/('Objects/'+short_name+'/psf_phot/'+name+'/'))
            if source_path.exists():
                shutil.rmtree(source_path)
            #Create folders.
            os.mkdir(source_path)

    #Declare a new dataframe to hold the information for all targets for this .
    columns = ['Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing']
    for i in range(0, len(source_names)):
        columns.append(source_names[i]+' Flux')
        columns.append(source_names[i]+' Flux Error')
    psf_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
    output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

    for i in range(0, len(reduced_files)):
        #Read in image data/header. 
        file = reduced_files[i]
        data = fits.open(file)[0].data
        header = fits.open(file)[0].header
        print('{}, image {} of {}.'.format(file.name, i+1, len(reduced_files)))

        #Read in some supporting information.
        log_path = pines_path/('Logs/'+file.name.split('.')[0]+'_log.txt')
        log = pines_log_reader(log_path)
        date_obs = header['DATE-OBS']
        #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds. 
        if len(date_obs.split(':')[-1].split('.')[0]) == 3:
            date_obs = date_obs.split(':')[0] + ':' + date_obs.split(':')[1] + ':' + date_obs.split(':')[-1][1:]
        #Keep a try/except clause here in case other unknown DATE-OBS formats pop up. 
        try:
            date = datetime.datetime.strptime(date_obs, '%Y-%m-%dT%H:%M:%S.%f')
        except:
            print('Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.')
            pdb.set_trace()
        
        days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
        jd = date_to_jd(date.year,date.month,days)
        psf_df['Filename'][i] = file.name
        psf_df['Time UT'][i] = header['DATE-OBS']
        psf_df['Time JD'][i] = jd
        psf_df['Airmass'][i] = header['AIRMASS']
        psf_df['Seeing'][i] = log['X seeing'][np.where(log['Filename'] == file.name.split('_')[0]+'.fits')[0][0]]
        
        #Read in source centroids for this image
        x = np.zeros(len(source_names))
        y = np.zeros(len(source_names))
        for j in range(len(source_names)):
            source = source_names[j]
            x[j] = centroided_sources[source+' X'][i]
            y[j] = centroided_sources[source+' Y'][i]

        #Extract pixel cutouts of our stars, so let’s explicitly exclude stars that are too close to the image boundaries (because they cannot be extracted).
        size = 13
        hsize = (size - 1) / 2
        #mask = ((x > hsize) & (x < (data.shape[1] -1 - hsize)) & (y > hsize) & (y < (data.shape[0] -1 - hsize)) & (y > 100) & (y < 923))

        #Create table of good star positions
        stars_tbl = Table()
        stars_tbl['x'] = x
        stars_tbl['y'] = y
        
        #Subtract background (star cutouts from which we build the ePSF must have background subtracted).
        mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=2.)  
        data -= median_val
        
        #Replace nans in data using Gaussian. 
        # kernel = Gaussian2DKernel(x_stddev=0.5)
        # data = interpolate_replace_nans(data, kernel)

        #The extract_stars() function requires the input data as an NDData object. 
        nddata = NDData(data=data)  

        #Extract star cutouts.
        stars = extract_stars(nddata, stars_tbl, size=size)  
                        

        #Plot. 
        nrows = 5
        ncols = 5
        fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 10), squeeze=True)
        ax = ax.ravel()
        for j in range(len(stars)):           
            norm = simple_norm(stars[j], 'log', percent=99.)
            ax[j].imshow(stars[j].data, norm=norm, origin='lower', cmap='viridis')

        pdb.set_trace()

        #Construct the ePSF using the star cutouts.
        epsf_fitter = EPSFFitter()
        epsf_builder = EPSFBuilder(maxiters=4, progress_bar=False, fitter=epsf_fitter)   

        try:
            epsf, fitted_stars = epsf_builder(stars)
            output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

            for j in range(len(stars)):
                star = stars[j]
                source_name = source_names[j]
                sigma_psf = 1.85

                dtype = [('x_0', 'f8'), ('y_0', 'f8')]
                pos = Table(data=np.zeros(1, dtype=dtype))
                source_x = stars_tbl['x'][j]
                source_y = stars_tbl['y'][j]
                pos['x_0'] = source_x - int(source_x - size/2 + 1)
                pos['y_0'] = source_y - int(source_y - size/2 + 1)

                daogroup = DAOGroup(4.0*sigma_psf*gaussian_sigma_to_fwhm)
                mmm_bkg = MMMBackground()
                photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=epsf,
                                    fitter=LevMarLSQFitter(),
                                    fitshape=(13,13),
                                    aperture_radius=4.)
                

                result_tab = photometry(image=star, init_guesses=pos)
                residual_image = photometry.get_residual_image()
                psf_df[source_name+' Flux'][i] = result_tab['flux_fit'][0]
                psf_df[source_name+' Flux Error'][i] = result_tab['flux_unc'][0]

                if plots:
                    fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12,4))
                    im = ax[0].imshow(star, origin='lower')
                    divider = make_axes_locatable(ax[0])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im, cax=cax, orientation='vertical')
                    ax[0].plot(result_tab['x_fit'][0], result_tab['y_fit'][0], 'rx')
                    ax[0].set_title('Data')

                    im2 = ax[1].imshow(epsf.data, origin='lower')
                    ax[1].set_title('EPSF Model')
                    divider = make_axes_locatable(ax[1])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im2, cax=cax, orientation='vertical')

                    im3 = ax[2].imshow(residual_image, origin='lower')
                    ax[2].set_title('Residual Image')
                    divider = make_axes_locatable(ax[2])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im3, cax=cax, orientation='vertical')
                    plt.suptitle(source_name+'\n'+reduced_files[i].name+', image '+str(i+1)+' of '+str(len(reduced_files)))
                    plt.subplots_adjust(wspace=0.5, top=0.95, bottom = 0.05)
                    plot_output_name = pines_path/('Objects/'+short_name+'/psf_phot/'+source_name+'/'+str(i).zfill(4)+'.jpg')
                    plt.savefig(plot_output_name)
                    plt.close()
        except:
            print('')
            print('EPSF BUILDER FAILED, SKIPPING IMAGE.')
            print('')
        #Plot the ePSF. 
        # plt.figure()
        # norm = simple_norm(epsf.data, 'log', percent=99.)
        # plt.imshow(epsf.data, norm=norm, origin='lower', cmap='viridis')
        # cb = plt.colorbar()
        # plt.tight_layout()   

        

    print('Saving psf photometry output to {}.'.format(output_filename))
    with open(output_filename, 'w') as f:
        for j in range(len(psf_df)):
            if j == 0:
                f.write('{:>21s}, {:>22s}, {:>17s}, {:>7s}, {:>7s}, '.format('Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing'))
                for i in range(len(source_names)):
                    if i != len(source_names) - 1:
                        f.write('{:>20s}, {:>26s}, '.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))
                    else:
                        f.write('{:>20s}, {:>26s}\n'.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))

            format_string = '{:21s}, {:22s}, {:17.9f}, {:7.2f}, {:7.1f}, '

            #If the seeing value for this image is 'nan' (a string), convert it to a float. 
            #TODO: Not sure why it's being read in as a string, fix that. 
            if type(psf_df['Seeing'][j]) == str:
                psf_df['Seeing'][j] = float(psf_df['Seeing'][j])

            #Do a try/except clause for writeout, in case it breaks in the future. 
            try:
                f.write(format_string.format(psf_df['Filename'][j], psf_df['Time UT'][j], psf_df['Time JD'][j], psf_df['Airmass'][j], psf_df['Seeing'][j]))
            except:
                print('Writeout failed! Inspect quantities you are trying to write out.')
                pdb.set_trace()
            for i in range(len(source_names)):                    
                if i != len(source_names) - 1:
                    format_string = '{:20.11f}, {:26.11f}, '
                else:
                    format_string = '{:20.11f}, {:26.11f}\n'
                
                f.write(format_string.format(psf_df[source_names[i]+' Flux'][j], psf_df[source_names[i]+' Flux Error'][j]))
    print('')    
    return
           
Example #19
0
def bpm_maker(flat_date, dark_date, exptime, band, upload=False, sftp=''):
    pines_path = pines_dir_check()

    #Load in the different masks.
    kokopelli_path = pines_path / (
        'Calibrations/Kokopelli Mask/kokopelli_mask.fits')
    kokopelli_mask = (
        1 - fits.open(kokopelli_path)[0].data).astype('int')[0:1024, :]

    variable_path = pines_path / ('Calibrations/Variable Pixel Masks/vpm_' +
                                  str(exptime) + '_s_' + dark_date + '.fits')
    variable_mask = fits.open(variable_path)[0].data

    hot_path = pines_path / ('Calibrations/Hot Pixel Masks/hpm_' +
                             str(exptime) + '_s_' + dark_date + '.fits')
    hot_mask = fits.open(hot_path)[0].data

    dead_path = pines_path / ('Calibrations/Dead Pixel Masks/dpm_' + band +
                              '_' + flat_date + '.fits')
    dead_mask = fits.open(dead_path)[0].data

    #Visualize all masks
    bpm = np.zeros(np.shape(dead_mask), dtype='int')
    bad_locs = np.where((kokopelli_mask == 1) | (variable_mask == 1)
                        | (hot_mask == 1) | (dead_mask == 1))
    bpm[bad_locs] = 1

    num_bad = len(np.where(bpm == 1)[0])
    frac_bad = num_bad / 1024**2

    print('{} percent of the detector flagged as bad.'.format(
        np.round(frac_bad * 100, 1)))
    # plt.ion()
    # plt.imshow(bpm, origin='lower')

    output_filename = 'bpm_' + band + '_' + str(
        exptime) + '_s_' + flat_date + '.fits'
    output_path = pines_path / ('Calibrations/Bad Pixel Masks/' +
                                output_filename)

    hdu = fits.PrimaryHDU(bpm)
    hdu.header['HIERARCH DATE CREATED'] = datetime.utcnow().strftime(
        '%Y-%m-%d') + 'T' + datetime.utcnow().strftime('%H:%M:%S')

    #Now save to a file on your local machine.
    print('')
    print('Writing the file to ' + output_filename)

    #Check to see if other files of this name exist.
    # if os.path.exists(output_path):
    #     print('')
    #     print('WARNING: This will overwrite {}!'.format(output_path))
    #     dark_check = input('Do you want to continue? y/n: ')
    #     if dark_check == 'y':
    #         hdu.writeto(output_path,overwrite=True)
    #         print('Wrote to {}!'.format(output_path))
    #     else:
    #         print('Not overwriting!')
    # else:
    hdu.writeto(output_path, overwrite=True)
    print('Wrote to {}!'.format(output_path))
    print('')

    #Upload the master dark to PINES server.
    if upload:
        print('Beginning upload process to pines.bu.edu...')
        print('Note, only PINES admins will be able to upload.')
        print('')
        sftp.chdir('/')
        sftp.chdir('data/calibrations/Bad Pixel Masks')
        upload_name = output_filename
        # if upload_name in sftp.listdir():
        #     print('WARNING: This will overwrite {} in pines.bu.edu:data/calibrations/Bad Pixel Masks/'.format(upload_name))
        #     upload_check = input('Do you want to continue? y/n: ')
        #     if upload_check == 'y':
        #         sftp.put(output_path,upload_name)
        #         print('Uploaded to pines.bu.edu:data/calibrations/Bad Pixel Masks/!')
        #     else:
        #         print('Skipping upload!')
        # else:
        sftp.put(output_path, upload_name)
        print(
            'Uploaded {} to pines.bu.edu:data/calibrations/Bad Pixel Masks/!'.
            format(upload_name))
Example #20
0
def centroider(target,
               sources,
               output_plots=False,
               gif=False,
               restore=False,
               box_w=8):
    matplotlib.use('TkAgg')
    plt.ioff()
    t1 = time.time()
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    kernel = Gaussian2DKernel(x_stddev=1)  #For fixing nans in cutouts.

    #If restore == True, read in existing output and return.
    if restore:
        centroid_df = pd.read_csv(
            pines_path / ('Objects/' + short_name +
                          '/sources/target_and_references_centroids.csv'),
            converters={
                'X Centroids': eval,
                'Y Centroids': eval
            })
        print('Restoring centroider output from {}.'.format(
            pines_path / ('Objects/' + short_name +
                          '/sources/target_and_references_centroids.csv')))
        print('')
        return centroid_df

    #Create subdirectories in sources folder to contain output plots.
    if output_plots:
        subdirs = glob(
            str(pines_path / ('Objects/' + short_name + '/sources')) + '/*/')
        #Delete any source directories that are already there.
        for name in subdirs:
            shutil.rmtree(name)

        #Create new source directories.
        for name in sources['Name']:
            source_path = (
                pines_path /
                ('Objects/' + short_name + '/sources/' + name + '/'))
            os.mkdir(source_path)

    #Read in extra shifts, in case the master image wasn't used for source detection.
    extra_shift_path = pines_path / ('Objects/' + short_name +
                                     '/sources/extra_shifts.txt')
    extra_shifts = pd.read_csv(extra_shift_path,
                               delimiter=' ',
                               names=['Extra X shift', 'Extra Y shift'])
    extra_x_shift = extra_shifts['Extra X shift'][0]
    extra_y_shift = extra_shifts['Extra Y shift'][0]

    np.seterr(
        divide='ignore', invalid='ignore'
    )  #Suppress some warnings we don't care about in median combining.

    #Get list of reduced files for target.
    reduced_path = pines_path / ('Objects/' + short_name + '/reduced')
    reduced_filenames = natsort.natsorted(
        [x.name for x in reduced_path.glob('*red.fits')])
    reduced_files = np.array([reduced_path / i for i in reduced_filenames])

    #Declare a new dataframe to hold the centroid information for all sources we want to track.
    columns = []
    columns.append('Filename')
    columns.append('Seeing')
    columns.append('Time (JD UTC)')
    columns.append('Airmass')

    #Add x/y positions and cenroid flags for every tracked source
    for i in range(0, len(sources)):
        columns.append(sources['Name'][i] + ' Image X')
        columns.append(sources['Name'][i] + ' Image Y')
        columns.append(sources['Name'][i] + ' Cutout X')
        columns.append(sources['Name'][i] + ' Cutout Y')
        columns.append(sources['Name'][i] + ' Centroid Warning')

    centroid_df = pd.DataFrame(index=range(len(reduced_files)),
                               columns=columns)

    log_path = pines_path / ('Logs/')
    log_dates = np.array(
        natsort.natsorted(
            [x.name.split('_')[0] for x in log_path.glob('*.txt')]))

    #Make sure we have logs for all the nights of these data. Need them to account for image shifts.
    nights = list(set([i.name.split('.')[0] for i in reduced_files]))
    for i in nights:
        if i not in log_dates:
            print('ERROR: {} not in {}. Download it from the PINES server.'.
                  format(i + '_log.txt', log_path))
            pdb.set_trace()

    shift_tolerance = 2.0  #Number of pixels that the measured centroid can be away from the expected position in either x or y before trying other centroiding algorithms.
    for i in range(len(sources)):
        #Get the initial source position.
        x_pos = sources['Source Detect X'][i]
        y_pos = sources['Source Detect Y'][i]
        print('')
        print(
            'Getting centroids for {}, ({:3.1f}, {:3.1f}) in source detection image. Source {} of {}.'
            .format(sources['Name'][i], x_pos, y_pos, i + 1, len(sources)))
        if output_plots:
            print('Saving centroid plots to {}.'.format(
                pines_path / ('Objects/' + short_name + '/sources/' +
                              sources['Name'][i] + '/')))
        pbar = ProgressBar()
        for j in pbar(range(len(reduced_files))):
            centroid_df[sources['Name'][i] + ' Centroid Warning'][j] = 0
            file = reduced_files[j]
            image = fits.open(file)[0].data
            #Get the measured image shift for this image.
            log = pines_log_reader(log_path /
                                   (file.name.split('.')[0] + '_log.txt'))
            log_ind = np.where(log['Filename'] == file.name.split('_')[0] +
                               '.fits')[0][0]

            x_shift = float(log['X shift'][log_ind])
            y_shift = float(log['Y shift'][log_ind])

            #Save the filename for readability. Save the seeing for use in variable aperture photometry. Save the time for diagnostic plots.
            if i == 0:
                centroid_df['Filename'][j] = file.name.split('_')[0] + '.fits'
                centroid_df['Seeing'][j] = log['X seeing'][log_ind]
                time_str = fits.open(file)[0].header['DATE-OBS']

                #Correct some formatting issues that can occur in Mimir time stamps.
                if time_str.split(':')[-1] == '60.00':
                    time_str = time_str[0:14] + str(
                        int(time_str.split(':')[-2]) + 1) + ':00.00'
                elif time_str.split(':')[-1] == '010.00':
                    time_str = time_str[0:17] + time_str.split(':')[-1][1:]

                centroid_df['Time (JD UTC)'][j] = julian.to_jd(
                    datetime.datetime.strptime(time_str,
                                               '%Y-%m-%dT%H:%M:%S.%f'))
                centroid_df['Airmass'][j] = log['Airmass'][log_ind]

            nan_flag = False  #Flag indicating if you should not trust the log's shifts. Set to true if x_shift/y_shift are 'nan' or > 30 pixels.

            #If bad shifts were measured for this image, skip.
            if log['Shift quality flag'][log_ind] == 1:
                continue

            if np.isnan(x_shift) or np.isnan(y_shift):
                x_shift = 0
                y_shift = 0
                nan_flag = True

            #If there are clouds, shifts could have been erroneously high...just zero them?
            if abs(x_shift) > 200:
                #x_shift = 0
                nan_flag = True
            if abs(y_shift) > 200:
                #y_shift = 0
                nan_flag = True

            #Apply the shift. NOTE: This relies on having accurate x_shift and y_shift values from the log.
            #If they're incorrect, the cutout will not be in the right place.
            #x_pos = sources['Source Detect X'][i] - x_shift + extra_x_shift
            #y_pos = sources['Source Detect Y'][i] + y_shift - extra_y_shift

            x_pos = sources['Source Detect X'][i] - (x_shift - extra_x_shift)
            y_pos = sources['Source Detect Y'][i] + (y_shift - extra_y_shift)

            #TODO: Make all this its own function.

            #Cutout around the expected position and interpolate over any NaNs (which screw up source detection).
            cutout = interpolate_replace_nans(
                image[int(y_pos - box_w):int(y_pos + box_w) + 1,
                      int(x_pos - box_w):int(x_pos + box_w) + 1],
                kernel=Gaussian2DKernel(x_stddev=0.5))

            #interpolate_replace_nans struggles with edge pixels, so shave off edge_shave pixels in each direction of the cutout.
            edge_shave = 1
            cutout = cutout[edge_shave:len(cutout) - edge_shave,
                            edge_shave:len(cutout) - edge_shave]

            vals, lower, upper = sigmaclip(
                cutout, low=1.5,
                high=2.5)  #Get sigma clipped stats on the cutout
            med = np.nanmedian(vals)
            std = np.nanstd(vals)

            try:
                centroid_x_cutout, centroid_y_cutout = centroid_2dg(
                    cutout - med)  #Perform centroid detection on the cutout.
            except:
                pdb.set_trace()

            centroid_x = centroid_x_cutout + int(
                x_pos
            ) - box_w + edge_shave  #Translate the detected centroid from the cutout coordinates back to the full-frame coordinates.
            centroid_y = centroid_y_cutout + int(y_pos) - box_w + edge_shave

            # if i == 0:
            #     qp(cutout)
            #     plt.plot(centroid_x_cutout, centroid_y_cutout, 'rx')

            #     # qp(image)
            #     # plt.plot(centroid_x, centroid_y, 'rx')
            #     pdb.set_trace()

            #If the shifts in the log are not 'nan' or > 200 pixels, check if the measured shifts are within shift_tolerance pixels of the expected position.
            #   If they aren't, try alternate centroiding methods to try and find it.

            #Otherwise, use the shifts as measured with centroid_1dg. PINES_watchdog likely failed while observing, and we don't expect the centroids measured here to actually be at the expected position.
            if not nan_flag:
                #Try a 2D Gaussian detection.
                if (abs(centroid_x - x_pos) > shift_tolerance) or (
                        abs(centroid_y - y_pos) > shift_tolerance):
                    centroid_x_cutout, centroid_y_cutout = centroid_2dg(
                        cutout - med)
                    centroid_x = centroid_x_cutout + int(x_pos) - box_w
                    centroid_y = centroid_y_cutout + int(y_pos) - box_w

                    #If that fails, try a COM detection.
                    if (abs(centroid_x - x_pos) > shift_tolerance) or (
                            abs(centroid_y - y_pos) > shift_tolerance):
                        centroid_x_cutout, centroid_y_cutout = centroid_com(
                            cutout - med)
                        centroid_x = centroid_x_cutout + int(x_pos) - box_w
                        centroid_y = centroid_y_cutout + int(y_pos) - box_w

                        #If that fails, try masking source and interpolate over any bad pixels that aren't in the bad pixel mask, then redo 1D gaussian detection.
                        if (abs(centroid_x - x_pos) > shift_tolerance) or (
                                abs(centroid_y - y_pos) > shift_tolerance):
                            mask = make_source_mask(cutout,
                                                    nsigma=4,
                                                    npixels=5,
                                                    dilate_size=3)
                            vals, lo, hi = sigmaclip(cutout[~mask])
                            bad_locs = np.where((mask == False) & (
                                (cutout > hi) | (cutout < lo)))
                            cutout[bad_locs] = np.nan
                            cutout = interpolate_replace_nans(
                                cutout, kernel=Gaussian2DKernel(x_stddev=0.5))

                            centroid_x_cutout, centroid_y_cutout = centroid_1dg(
                                cutout - med)
                            centroid_x = centroid_x_cutout + int(x_pos) - box_w
                            centroid_y = centroid_y_cutout + int(y_pos) - box_w

                            #Try a 2D Gaussian detection on the interpolated cutout
                            if (abs(centroid_x - x_pos) > shift_tolerance) or (
                                    abs(centroid_y - y_pos) > shift_tolerance):
                                centroid_x_cutout, centroid_y_cutout = centroid_2dg(
                                    cutout - med)
                                centroid_x = centroid_x_cutout + int(
                                    x_pos) - box_w
                                centroid_y = centroid_y_cutout + int(
                                    y_pos) - box_w

                                #Try a COM on the interpolated cutout.
                                if (abs(centroid_x - x_pos) > shift_tolerance
                                    ) or (abs(centroid_y - y_pos) >
                                          shift_tolerance):
                                    centroid_x_cutout, centroid_y_cutout = centroid_com(
                                        cutout)
                                    centroid_x = centroid_x_cutout + int(
                                        x_pos) - box_w
                                    centroid_y = centroid_y_cutout + int(
                                        y_pos) - box_w

                                    #Last resort: try cutting off the edge of the cutout. Edge pixels can experience poor interpolation, and this sometimes helps.
                                    if (abs(centroid_x - x_pos) >
                                            shift_tolerance) or (
                                                abs(centroid_y - y_pos) >
                                                shift_tolerance):
                                        cutout = cutout[1:-1, 1:-1]
                                        centroid_x_cutout, centroid_y_cutout = centroid_1dg(
                                            cutout - med)
                                        centroid_x = centroid_x_cutout + int(
                                            x_pos) - box_w + 1
                                        centroid_y = centroid_y_cutout + int(
                                            y_pos) - box_w + 1

                                        #Try with a 2DG
                                        if (abs(centroid_x - x_pos) >
                                                shift_tolerance) or (
                                                    abs(centroid_y - y_pos) >
                                                    shift_tolerance):
                                            centroid_x_cutout, centroid_y_cutout = centroid_2dg(
                                                cutout - med)
                                            centroid_x = centroid_x_cutout + int(
                                                x_pos) - box_w + 1
                                            centroid_y = centroid_y_cutout + int(
                                                y_pos) - box_w + 1

                                            #If ALL that fails, report the expected position as the centroid.
                                            if (abs(centroid_x - x_pos) >
                                                    shift_tolerance) or (
                                                        abs(centroid_y - y_pos)
                                                        > shift_tolerance):
                                                print(
                                                    'WARNING: large centroid deviation measured, returning predicted position'
                                                )
                                                print('')
                                                centroid_df[
                                                    sources['Name'][i] +
                                                    ' Centroid Warning'][j] = 1
                                                centroid_x = x_pos
                                                centroid_y = y_pos
                                                #pdb.set_trace()

            #Check that your measured position is actually on the detector.
            if (centroid_x < 0) or (centroid_y < 0) or (centroid_x > 1023) or (
                    centroid_y > 1023):
                #Try a quick mask/interpolation of the cutout.
                mask = make_source_mask(cutout,
                                        nsigma=3,
                                        npixels=5,
                                        dilate_size=3)
                vals, lo, hi = sigmaclip(cutout[~mask])
                bad_locs = np.where((mask == False)
                                    & ((cutout > hi) | (cutout < lo)))
                cutout[bad_locs] = np.nan
                cutout = interpolate_replace_nans(
                    cutout, kernel=Gaussian2DKernel(x_stddev=0.5))
                centroid_x, centroid_y = centroid_2dg(cutout - med)
                centroid_x += int(x_pos) - box_w
                centroid_y += int(y_pos) - box_w
                if (centroid_x < 0) or (centroid_y < 0) or (
                        centroid_x > 1023) or (centroid_y > 1023):
                    print(
                        'WARNING: large centroid deviation measured, returning predicted position'
                    )
                    print('')
                    centroid_df[sources['Name'][i] +
                                ' Centroid Warning'][j] = 1
                    centroid_x = x_pos
                    centroid_y = y_pos
                    #pdb.set_trace()

            #Check to make sure you didn't measure nan's.
            if np.isnan(centroid_x):
                centroid_x = x_pos
                print(
                    'NaN returned from centroid algorithm, defaulting to target position in source_detct_image.'
                )
            if np.isnan(centroid_y):
                centroid_y = y_pos
                print(
                    'NaN returned from centroid algorithm, defaulting to target position in source_detct_image.'
                )

            #Record the image and relative cutout positions.
            centroid_df[sources['Name'][i] + ' Image X'][j] = centroid_x
            centroid_df[sources['Name'][i] + ' Image Y'][j] = centroid_y
            centroid_df[sources['Name'][i] +
                        ' Cutout X'][j] = centroid_x_cutout
            centroid_df[sources['Name'][i] +
                        ' Cutout Y'][j] = centroid_y_cutout

            if output_plots:
                #Plot
                lock_x = int(centroid_df[sources['Name'][i] + ' Image X'][0])
                lock_y = int(centroid_df[sources['Name'][i] + ' Image Y'][0])
                norm = ImageNormalize(data=cutout, interval=ZScaleInterval())
                plt.imshow(image, origin='lower', norm=norm)
                plt.plot(centroid_x, centroid_y, 'rx')
                ap = CircularAperture((centroid_x, centroid_y), r=5)
                ap.plot(lw=2, color='b')
                plt.ylim(lock_y - 30, lock_y + 30 - 1)
                plt.xlim(lock_x - 30, lock_x + 30 - 1)
                plt.title('CENTROID DIAGNOSTIC PLOT\n' + sources['Name'][i] +
                          ', ' + reduced_files[j].name + ' (image ' +
                          str(j + 1) + ' of ' + str(len(reduced_files)) + ')',
                          fontsize=10)
                plt.text(centroid_x,
                         centroid_y + 0.5,
                         '(' + str(np.round(centroid_x, 1)) + ', ' +
                         str(np.round(centroid_y, 1)) + ')',
                         color='r',
                         ha='center')
                plot_output_path = (
                    pines_path /
                    ('Objects/' + short_name + '/sources/' +
                     sources['Name'][i] + '/' + str(j).zfill(4) + '.jpg'))
                plt.gca().set_axis_off()
                plt.subplots_adjust(top=1,
                                    bottom=0,
                                    right=1,
                                    left=0,
                                    hspace=0,
                                    wspace=0)
                plt.margins(0, 0)
                plt.gca().xaxis.set_major_locator(plt.NullLocator())
                plt.gca().yaxis.set_major_locator(plt.NullLocator())
                plt.savefig(plot_output_path,
                            bbox_inches='tight',
                            pad_inches=0,
                            dpi=150)
                plt.close()

        if gif:
            gif_path = (pines_path / ('Objects/' + short_name + '/sources/' +
                                      sources['Name'][i] + '/'))
            gif_maker(path=gif_path, fps=10)

    output_filename = pines_path / (
        'Objects/' + short_name +
        '/sources/target_and_references_centroids.csv')
    #centroid_df.to_csv(pines_path/('Objects/'+short_name+'/sources/target_and_references_centroids.csv'))

    print('Saving centroiding output to {}.'.format(output_filename))
    with open(output_filename, 'w') as f:
        for j in range(len(centroid_df)):
            #Write the header line.
            if j == 0:
                f.write('{:<17s}, '.format('Filename'))
                f.write('{:<15s}, '.format('Time (JD UTC)'))
                f.write('{:<6s}, '.format('Seeing'))
                f.write('{:<7s}, '.format('Airmass'))
                for i in range(len(sources['Name'])):
                    n = sources['Name'][i]
                    if i != len(sources['Name']) - 1:
                        f.write(
                            '{:<23s}, {:<23s}, {:<24s}, {:<24s}, {:<34s}, '.
                            format(n + ' Image X', n + ' Image Y',
                                   n + ' Cutout X', n + ' Cutout Y',
                                   n + ' Centroid Warning'))
                    else:
                        f.write(
                            '{:<23s}, {:<23s}, {:<24s}, {:<24s}, {:<34s}\n'.
                            format(n + ' Image X', n + ' Image Y',
                                   n + ' Cutout X', n + ' Cutout Y',
                                   n + ' Centroid Warning'))

            #Write in the data lines.
            try:
                f.write('{:<17s}, '.format(centroid_df['Filename'][j]))
                f.write('{:<15.7f}, '.format(centroid_df['Time (JD UTC)'][j]))
                f.write('{:<6.1f}, '.format(float(centroid_df['Seeing'][j])))
                f.write('{:<7.2f}, '.format(centroid_df['Airmass'][j]))
            except:
                pdb.set_trace()

            for i in range(len(sources['Name'])):
                n = sources['Name'][i]
                if i != len(sources['Name']) - 1:
                    format_string = '{:<23.4f}, {:<23.4f}, {:<24.4f}, {:<24.4f}, {:<34d}, '
                else:
                    format_string = '{:<23.4f}, {:<23.4f}, {:<24.4f}, {:<24.4f}, {:<34d}\n'

                f.write(
                    format_string.format(
                        centroid_df[n + ' Image X'][j],
                        centroid_df[n + ' Image Y'][j],
                        centroid_df[n + ' Cutout X'][j],
                        centroid_df[n + ' Cutout Y'][j],
                        centroid_df[n + ' Centroid Warning'][j]))
    np.seterr(divide='warn', invalid='warn')
    print('')
    print('centroider runtime: {:.2f} minutes.'.format(
        (time.time() - t1) / 60))
    print('')
    return centroid_df
Example #21
0
def movie(long_target_name,
          dates_to_examine=[],
          target_number=0,
          box_size=300,
          show_centroid=1):
    import time
    '''Authors:
            Patrick Tamburo, Boston University, Jan 2020, July 2020
        Purpose:
        Inputs:
        Outputs:
        TODO:
    '''
    warnings.filterwarnings(
        'ignore', category=UserWarning, append=True
    )  #This turns of NaN warnings in sigma_clipped_stats, otherwise we'd get a warning every line.
    kernel = Gaussian2DKernel(x_stddev=0.5)

    pines_path = pines_dir_check()
    target_name = short_name_creator(long_target_name)
    object_path = pines_path / ('Objects/' + target_name + '/')
    reduced_path = object_path / 'reduced/'
    reduced_files = np.array(
        natsort.natsorted([x for x in reduced_path.glob('*.fits')]))
    centroid_path = object_path / 'sources/'

    positions = pd.read_csv(centroid_path /
                            'target_and_references_centroids.csv')
    x_positions = np.array(positions[positions.keys()[2 * target_number]])
    y_positions = np.array(positions[positions.keys()[2 * target_number + 1]])
    initial_position = (int(x_positions[0]), int(y_positions[0]))

    #Get list of files
    all_frame_list = np.array([])
    for i in range(np.size(reduced_files)):
        file_name = reduced_files[i].name
        all_frame_list = np.append(all_frame_list, file_name)
    num_files = len(reduced_files)

    dates = np.array([
        reduced_files[i].name.split('.')[0] for i in range(len(reduced_files))
    ])

    plt.ion()
    fig, ax = plt.subplots(1, 1, figsize=(8, 7))
    for i in range(len(reduced_files)):
        if len(dates_to_examine) != 0:
            if reduced_files[i].name.split('.')[0] in dates_to_examine:
                image_path = reduced_files[i]
                title = image_path.name
                image = fits.open(image_path)[0].data
                header = fits.open(image_path)[0].header

                image = interpolate_replace_nans(image, kernel)

                image_bg = Background2D(image, 64)
                image = image - image_bg.background
                frame = image[initial_position[1] -
                              int(box_size / 2):initial_position[1] +
                              int(box_size / 2), initial_position[0] -
                              int(box_size / 2):initial_position[0] +
                              int(box_size / 2)]

                norm = ImageNormalize(frame,
                                      interval=ZScaleInterval(),
                                      stretch=SquaredStretch())
                im = ax.imshow(image, origin='lower', norm=norm)
                ax.set_xlim(initial_position[0] - int(box_size / 2),
                            initial_position[0] + int(box_size / 2))
                ax.set_ylim(initial_position[1] - int(box_size / 2),
                            initial_position[1] + int(box_size / 2))
                if show_centroid:
                    ax.plot(x_positions[i], y_positions[i], 'mo')
                ax.set_title(title)
                plt.pause(0.01)
                ax.cla()
        else:
            image_path = reduced_files[i]
            title = image_path.name
            image = fits.open(image_path)[0].data
            header = fits.open(image_path)[0].header
            frame = image[initial_position[1] -
                          int(box_size / 2):initial_position[1] +
                          int(box_size / 2), initial_position[0] -
                          int(box_size / 2):initial_position[0] +
                          int(box_size / 2)]
            avg, med, std = sigma_clipped_stats(image)
            im = ax.imshow(image, origin='lower', vmin=med, vmax=med + 5 * std)
            ax.set_xlim(initial_position[0] - int(box_size / 2),
                        initial_position[0] + int(box_size / 2))
            ax.set_ylim(initial_position[1] - int(box_size / 2),
                        initial_position[1] + int(box_size / 2))
            cb = fig.colorbar(im, orientation='vertical', label='Counts')
            if show_centroid:
                ax.plot(x_positions[i], y_positions[i], 'bx')
            ax.set_title(title)
            plt.pause(0.01)
            ax.cla()
            cb.remove()
Example #22
0
def dark(date, exptime, dark_start=0, dark_stop=0, upload=False, delete_raw=False, sftp=''):
    clip_lvl = 3 #The value to use for sigma clipping. 
    pines_path = pines_dir_check()
    np.seterr(invalid='ignore') #Suppress some warnings we don't care about in median combining. 
    exptime = float(exptime)
    plt.ion() #Turn on interactive plotting.

    t1 = time.time()
    #If an sftp connection to the PINES server was passed, download the dark data. 
    if type(sftp) == pysftp.Connection:
        sftp.chdir('/data/raw/mimir')
        run_list = sftp.listdir()
        data_path = '' #Initialize to check that it gets filled. 
        for i in range(len(run_list)):
            sftp.chdir(run_list[i])    
            date_list = sftp.listdir()
            if date in date_list:
                data_path = sftp.getcwd()
                print('{} directory found in pines.bu.edu:{}/\n'.format(date,data_path))
                sftp.chdir(date)
                break
            sftp.chdir('..')
    
        if data_path == '':
            print('ERROR: specified date not found in any run on pines.bu.edu:data/raw/mimir/\n')
            return
        else:
            #If the file start/stop numbers are specified, grab those files.
            if (dark_stop != 0):
                files_in_dir = sftp.listdir()
                dark_filenums = np.arange(dark_start, dark_stop+1, step=1)
                dark_files = []

                #Add the darks to the file list. 
                for i in range(len(dark_filenums)):
                    file_num = dark_filenums[i]
                    #Generate the filename. 
                    if file_num < 10:
                        file_name = date+'.00'+str(file_num)+'.fits'
                    elif (file_num >= 10) and (file_num < 100):
                        file_name = date+'.0'+str(file_num)+'.fits'
                    else:
                        file_name = date+'.'+str(file_num)+'.fits'
                    #Check if the file name is in the directory, and if so, append it to the list of flat files. 
                    if file_name in files_in_dir:
                        dark_files.append(file_name)
                    else:
                        print('{} not found in directory, skipping.'.format(file_name))        
                pdb.set_trace()
            else:
                #Otherwise, find the files automatically using the night's log. 
                log_path = pines_path/'Logs'
                #Check if you already have the log for this date, if not, download it. 
                #Download from the /data/logs/ directory on PINES.
                if not (log_path/(date+'_log.txt')).exists():
                    print('Downloading {}_log.txt to {}\n'.format(date,log_path))
                    sftp.get('/data/logs/'+date+'_log.txt',log_path/(date+'_log.txt'))
                
                #Read in the log from this date.
                log = pines_log_reader(log_path/(date+'_log.txt'))

                #Identify dark files. 
                dark_inds = np.where((log['Target'] == 'Dark') & (log['Filename'] != 'test.fits') & (log['Exptime'] == exptime))[0]
                dark_files = natsort.natsorted(list(set(log['Filename'][dark_inds]))) #Set guarantees we only grab the unique files that have been identified as flats, in case the log bugged out. 
            print('Found {} dark files.'.format(len(dark_files)))
            print('')
            #Downoad data to the Calibrations/Darks/Raw/ directory. 
            dark_path = pines_path/('Calibrations/Darks/Raw')
            for j in range(len(dark_files)):
                if not (dark_path/dark_files[j]).exists():
                    sftp.get(dark_files[j],os.path.join(pines_path,dark_path/dark_files[j]))
                    print('Downloading {} to {}, {} of {}.'.format(dark_files[j], dark_path, j+1, len(dark_files)))
                else:
                    print('{} already in {}, skipping download.'.format(dark_files[j],dark_path))
            print('')

    #If no sftp was passed, search for files on disk. 
    else:
        dark_path = pines_path/('Calibrations/Darks/Raw')
        all_dark_files = natsort.natsorted(list(Path(dark_path).rglob(date+'*.fits')))
        dark_files = []
        for file in all_dark_files:
            if fits.open(file)[0].header['EXPTIME'] == exptime:
                dark_files.append(file)

    num_images = len(dark_files)

    if num_images == 0:
        raise RuntimeError('No raw dark files found on disk with date '+date+'!')

    print('Reading in ', num_images,' dark images.')
    dark_cube_raw = np.zeros([len(dark_files),1024,1024]) 
    print('')
    print('Dark frame information')
    print('-------------------------------------------------')
    print('ID   Mean               Stddev         Max    Min')
    print('-------------------------------------------------')
    for j in range(len(dark_files)):
        image_data = fits.open(dark_path/dark_files[j])[0].data[0:1024,:] #This line trims off the top two rows of the image, which are overscan.
        header = fits.open(dark_path/dark_files[j])[0].header
        if header['EXPTIME'] != exptime:
            print('ERROR: {} taken has exposure time different than than exptime.'.format(dark_files[j]))
            return
        dark_cube_raw[j,:,:] = image_data 
        print(str(j+1)+'    '+str(np.mean(image_data))+'    '+str(np.std(image_data))+'    '+ str(np.amax(image_data))+'    '+str(np.amin(image_data)))

    cube_shape = np.shape(dark_cube_raw)

    master_dark = np.zeros((cube_shape[1], cube_shape[2]), dtype='float32')
    master_dark_stddev = np.zeros((cube_shape[1], cube_shape[2]), dtype='float32')

    print('')
    print('Combining the darks')
    print('......')

    pbar = ProgressBar()
    #For each pixel, calculate the mean, median, and standard deviation "through the stack" of darks.
    for x in pbar(range(cube_shape[1])):
        for y in range(cube_shape[2]):
            through_stack = dark_cube_raw[:,y,x]
            through_stack_median = np.nanmedian(through_stack)
            through_stack_stddev = np.nanstd(through_stack)

            #Flag values that are > clip_lvl-sigma discrepant from the median.
            good_inds = np.where((abs(through_stack - through_stack_median) / through_stack_stddev <= clip_lvl))[0]

            #Calculate the sigma-clipped mean and sigma-clipped stddev using good_inds. 
            s_c_mean = np.nanmean(through_stack[good_inds])
            s_c_stddev = np.nanstd(through_stack[good_inds])

            #Store the sigma-clipped mean as the master dark value for this pixel. 
            master_dark[y,x] = s_c_mean
            master_dark_stddev[y,x] = s_c_stddev
    

    np.seterr(invalid='warn') #Turn invalid warnings back on, in case it would permanently turn it off otherwise.

    output_filename = pines_path/('Calibrations/Darks/Master Darks/master_dark_'+str(exptime)+'_s_'+date+'.fits')
    
    #Add some header keywords detailing the master_dark creation process. 
    hdu = fits.PrimaryHDU(master_dark)
    hdu.header['HIERARCH DATE CREATED'] = datetime.utcnow().strftime('%Y-%m-%d')+'T'+datetime.utcnow().strftime('%H:%M:%S')

    #Now save to a file on your local machine. 
    #Check to see if other files of this name exist.
    # if os.path.exists(output_filename):
    #     print('')
    #     print('WARNING: This will overwrite {}!'.format(output_filename))
    #     dark_check = input('Do you want to continue? y/n: ')
    #     if dark_check == 'y':
    #         hdu.writeto(output_filename,overwrite=True)
    #         print('Wrote to {}!'.format(output_filename))
    #     else:
    #         print('Not overwriting!')
    # else:
    hdu.writeto(output_filename,overwrite=True)
    print('Wrote to {}!'.format(output_filename))
    
    #Upload the master dark to PINES server.
    if upload:
        print('Uploading to pines.bu.edu...')
        sftp.chdir('..')
        sftp.chdir('..')
        sftp.chdir('..')
        sftp.chdir('..')
        sftp.chdir('calibrations/Darks')
        upload_name = 'master_dark_'+str(exptime)+'_s_'+date+'.fits'
        # if upload_name in sftp.listdir():
        #     print('WARNING: This will overwrite {} in pines.bu.edu:data/calibrations/Darks/'.format(upload_name))
        #     upload_check = input('Do you want to continue? y/n: ')
        #     if upload_check == 'y':
        #         sftp.put(output_filename,upload_name)
        #         print('Uploaded to pines.bu.edu:data/calibrations/Darks/!')
        #     else:
        #         print('Skipping upload!')
        # else:
        sftp.put(output_filename,upload_name)
        print('Uploaded {} to pines.bu.edu:data/calibrations/Darks/!'.format(upload_name))

        sftp.chdir('..')

    #Do the same thing for the sigma-clipped standard deviation image. 

    output_filename = pines_path/('Calibrations/Darks/Master Darks Stddev/master_dark_stddev_'+str(exptime)+'_s_'+date+'.fits')
    
    #Add some header keywords detailing the master_dark creation process. 
    hdu = fits.PrimaryHDU(master_dark_stddev)
    hdu.header['HIERARCH DATE CREATED'] = datetime.utcnow().strftime('%Y-%m-%d')+'T'+datetime.utcnow().strftime('%H:%M:%S')
    username = ''

    #Now save to a file on your local machine. 
    print('')

    #Check to see if other files of this name exist.
    # if os.path.exists(output_filename):
    #     print('')
    #     print('WARNING: This will overwrite {}!'.format(output_filename))
    #     dark_check = input('Do you want to continue? y/n: ')
    #     if dark_check == 'y':
    #         hdu.writeto(output_filename,overwrite=True)
    #         print('Wrote to {}!'.format(output_filename))
    #     else:
    #         print('Not overwriting!')
    # else:
    hdu.writeto(output_filename,overwrite=True)
    print('Wrote to {}!'.format(output_filename))
    
    #Upload the master dark to PINES server.
    if upload:
        print('Uploading to pines.bu.edu...')
        sftp.chdir('Darks Stddev')
        upload_name = 'master_dark_stddev_'+str(exptime)+'_s_'+date+'.fits'
        # if upload_name in sftp.listdir():
        #     print('WARNING: This will overwrite {} in pines.bu.edu:data/calibrations/Darks Stddev/'.format(upload_name))
        #     upload_check = input('Do you want to continue? y/n: ')
        #     if upload_check == 'y':
        #         sftp.put(output_filename,upload_name)
        #         print('Uploaded to pines.bu.edu:data/calibrations/Darks Stddev/!')
        #     else:
        #         print('Skipping upload!')
        # else:
        sftp.put(output_filename,upload_name)
        print('Uploaded {} to pines.bu.edu:data/calibrations/Darks Stddev/!'.format(upload_name))

    print('')
    #Delete raw dark images from disk.
    if delete_raw:
        files_to_delete = glob.glob(os.path.join(dark_path/'*.fits'))
        for j in range(len(files_to_delete)):
            os.remove(files_to_delete[j])

    print('dark runtime: ', np.round((time.time()-t1)/60,1), ' minutes.')
    print('Done!')
Example #23
0
def background_plot(target, centroided_sources, gain=8.21):

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    #Get plot style parameters.
    title_size, axis_title_size, axis_ticks_font_size, legend_font_size = plot_style(
    )

    analysis_path = pines_path / ('Objects/' + short_name + '/analysis')
    phot_path = pines_path / ('Objects/' + short_name + '/aper_phot')
    phot_files = np.array(natsorted([x for x in phot_path.glob('*.csv')]))

    if os.path.exists(analysis_path / ('optimal_aperture.txt')):
        with open(analysis_path / ('optimal_aperture.txt'), 'r') as f:
            best_ap = f.readlines()[0].split(':  ')[1].split('_')[0]
        ap_list = np.array(
            [str(i).split('/')[-1].split('_')[4] for i in phot_files])
        best_ap_ind = np.where(ap_list == best_ap)[0][0]
    else:
        print(
            'No optimal_aperture.txt file for {}.\nUsing first photometry file in {}.'
            .format(target, phot_path))
        best_ap_ind = 0

    phot_file = phot_files[best_ap_ind]
    phot_df = pines_log_reader(phot_file)

    backgrounds = np.array(phot_df[short_name + ' Background'],
                           dtype='float') / gain
    times_full = np.array(phot_df['Time JD'], dtype='float')
    night_inds = night_splitter(times_full)
    num_nights = len(night_inds)
    times_nights = [times_full[night_inds[i]] for i in range(num_nights)]
    standard_x = standard_x_range(times_nights)

    fig, ax = plt.subplots(nrows=1,
                           ncols=num_nights,
                           figsize=(17, 5),
                           sharey=True)
    for i in range(num_nights):
        if i == 0:
            ax[i].set_ylabel('Background (ADU)', fontsize=axis_title_size)

        inds = night_inds[i]
        ax[i].plot(times_full[inds],
                   backgrounds[inds],
                   marker='.',
                   linestyle='',
                   color='tab:orange',
                   alpha=0.3,
                   label='Raw bkg.')
        ax[i].tick_params(labelsize=axis_ticks_font_size)
        ax[i].set_xlabel('Time (JD UTC)', fontsize=axis_title_size)
        ax[i].grid(alpha=0.2)
        ax[i].set_xlim(
            np.mean(times_full[inds]) - standard_x / 2,
            np.mean(times_full[inds] + standard_x / 2))

        #bin
        block_inds = block_splitter(times_full[inds])
        block_x = np.zeros(len(block_inds))
        block_y = np.zeros(len(block_inds))
        block_y_err = np.zeros(len(block_inds))
        for j in range(len(block_inds)):
            block_x[j] = np.nanmean(times_full[inds][block_inds[j]])
            block_y[j] = np.nanmean(backgrounds[inds][block_inds[j]])
            block_y_err[j] = np.nanstd(
                backgrounds[inds][block_inds[j]]) / np.sqrt(
                    len(backgrounds[inds][block_inds[j]]))

        block_x = block_x[~np.isnan(block_y)]
        block_y_err = block_y_err[~np.isnan(block_y)]
        block_y = block_y[~np.isnan(block_y)]

        ax[i].errorbar(block_x,
                       block_y,
                       block_y_err,
                       marker='o',
                       linestyle='',
                       color='tab:orange',
                       ms=8,
                       mfc='none',
                       mew=2,
                       label='Bin bkg.')

        #Interpolate each night's seeing.
        fit_times = np.linspace(block_x[0], block_x[-1], 1000)
        try:
            interp = CubicSpline(block_x, block_y)
        except:
            pdb.set_trace()
        interp_fit = interp(fit_times)
        ax[i].plot(fit_times,
                   interp_fit,
                   color='b',
                   lw=2,
                   zorder=0,
                   alpha=0.7,
                   label='CS Interp.')

    ax[i].legend(bbox_to_anchor=(1.01, 0.5), fontsize=legend_font_size)
    plt.suptitle(short_name + ' Background Measurements', fontsize=title_size)
    plt.subplots_adjust(left=0.07, wspace=0.05, top=0.92, bottom=0.17)

    output_filename = pines_path / ('Objects/' + short_name +
                                    '/analysis/diagnostic_plots/' +
                                    short_name + '_backgrounds.png')
    plt.savefig(output_filename, dpi=300)
    return
Example #24
0
def fixed_aper_phot(target,
                    centroided_sources,
                    ap_radii,
                    an_in=12.,
                    an_out=30.,
                    plots=False,
                    gain=8.21,
                    qe=0.9):
    '''Authors:
		Patrick Tamburo, Boston University, June 2020
	Purpose:
        Performs *fixed* aperture photometry on a set of reduced images given dataframe of source positions.
        The iraf_style_photometry, compute_phot_error, perture_stats_tbl, and calc_aperture_mmm routines are from Varun Bajaj on github:
            https://github.com/spacetelescope/wfc3_photometry/blob/master/photometry_tools/photometry_with_errors.py. 
	Inputs:
        target (str): The target's full 2MASS name.
        sources (pandas dataframe): List of source names, x and y positions in every image. 
        ap_radii (list of floats): List of aperture radii in pixels for which aperture photometry wil be performed. 
        an_in (float, optional): The inner radius of the annulus used to estimate background, in pixels. 
        an_out (float, optional): The outer radius of the annulus used to estimate background, in pixels. 
        plots (bool, optional): Whether or not to output surface plots. Images output to aper_phot directory within the object directory.
        gain (float, optional): The gain of the detector in e-/ADU.
        qe (float, optional): The quantum efficiency of the detector.
    Outputs:
        Saves aperture photometry csv to PINES_analysis_toolkit/Objects/short_name/aper_phot/ for each aperture.
	TODO:
    '''

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    #Remove any leading/trailing spaces in the column names.
    centroided_sources.columns = centroided_sources.columns.str.lstrip()
    centroided_sources.columns = centroided_sources.columns.str.rstrip()

    #Get list of reduced files for target.
    reduced_path = pines_path / ('Objects/' + short_name + '/reduced')
    reduced_filenames = natsort.natsorted(
        [x.name for x in reduced_path.glob('*red.fits')])
    reduced_files = np.array([reduced_path / i for i in reduced_filenames])

    #source_names = natsort.natsorted(list(set([i.replace('X','').replace('Y','').replace('Centroid Warning','').strip() for i in centroided_sources.keys() if i != 'Filename'])))
    source_names = get_source_names(centroided_sources)

    #Create output plot directories for each source.
    if plots:
        #Camera angles for surface plots
        azim_angles = np.linspace(0, 360 * 1.5, len(reduced_files)) % 360
        elev_angles = np.zeros(len(azim_angles)) + 25
        for name in source_names:
            #If the folders are already there, delete them.
            source_path = (
                pines_path /
                ('Objects/' + short_name + '/aper_phot/' + name + '/'))
            if source_path.exists():
                shutil.rmtree(source_path)
            #Create folders.
            os.mkdir(source_path)

    #Loop over all aperture radii.
    for ap in ap_radii:
        print(
            'Doing fixed aperture photometry for {}, aperture radius = {:1.1f} pix, inner annulus radius = {} pix, outer annulus radius = {} pix.'
            .format(target, ap, an_in, an_out))

        #Declare a new dataframe to hold the information for all targets for this aperture.
        columns = [
            'Filename', 'Time UT', 'Time JD UTC', 'Time BJD TDB', 'Airmass',
            'Seeing'
        ]
        for i in range(0, len(source_names)):
            columns.append(source_names[i] + ' Flux')
            columns.append(source_names[i] + ' Flux Error')
            columns.append(source_names[i] + ' Background')
            columns.append(source_names[i] + ' Interpolation Flag')

        ap_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
        output_filename = pines_path / (
            'Objects/' + short_name + '/aper_phot/' + short_name +
            '_fixed_aper_phot_{:1.1f}_pix_radius.csv'.format(float(ap)))

        #Loop over all images.
        pbar = ProgressBar()
        for j in pbar(range(len(reduced_files))):
            data = fits.open(reduced_files[j])[0].data

            #Read in some supporting information.
            log_path = pines_path / (
                'Logs/' + reduced_files[j].name.split('.')[0] + '_log.txt')
            log = pines_log_reader(log_path)
            log_ind = np.where(
                log['Filename'] == reduced_files[j].name.split('_')[0] +
                '.fits')[0][0]

            header = fits.open(reduced_files[j])[0].header
            date_obs = header['DATE-OBS']
            #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds.
            if len(date_obs.split(':')[-1].split('.')[0]) == 3:
                date_obs = date_obs.split(':')[0] + ':' + date_obs.split(
                    ':')[1] + ':' + date_obs.split(':')[-1][1:]

            if date_obs.split(':')[-1] == '60.00':
                date_obs = date_obs.split(':')[0] + ':' + str(
                    int(date_obs.split(':')[1]) + 1) + ':00.00'
            #Keep a try/except clause here in case other unknown DATE-OBS formats pop up.
            try:
                date = datetime.datetime.strptime(date_obs,
                                                  '%Y-%m-%dT%H:%M:%S.%f')
            except:
                print(
                    'Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.'
                )
                pdb.set_trace()

            #Get the closest date master_dark_stddev image for this exposure time.
            #We'll use this to measure read noise and dark current.
            date_str = date_obs.split('T')[0].replace('-', '')
            master_dark_stddev = master_dark_stddev_chooser(
                pines_path / ('Calibrations/Darks/Master Darks Stddev/'),
                header)

            days = date.day + hmsm_to_days(date.hour, date.minute, date.second,
                                           date.microsecond)
            jd = date_to_jd(date.year, date.month, days)
            ap_df['Filename'][j] = reduced_files[j].name
            ap_df['Time UT'][j] = header['DATE-OBS']
            ap_df['Time JD UTC'][j] = jd
            ap_df['Time BJD TDB'][j] = jd_utc_to_bjd_tdb(
                jd, header['TELRA'], header['TELDEC']
            )  #Using the telescope ra and dec should be accurate enough for our purposes
            ap_df['Airmass'][j] = header['AIRMASS']
            ap_df['Seeing'][j] = log['X seeing'][log_ind]

            #If the shift quality has been flagged, skip this image.
            if log['Shift quality flag'].iloc[log_ind] == 1:
                continue

            #Get the source positions in this image.
            positions = []
            for i in range(len(source_names)):
                positions.append((float(centroided_sources[source_names[i] +
                                                           ' Image X'][j]),
                                  float(centroided_sources[source_names[i] +
                                                           ' Image Y'][j])))

            #Create an aperture centered on this position with radius = ap.
            try:
                apertures = CircularAperture(positions, r=ap)
            except:
                pdb.set_trace()

            #Create an annulus centered on this position.
            annuli = CircularAnnulus(positions, r_in=an_in, r_out=an_out)

            photometry_tbl = iraf_style_photometry(apertures, annuli,
                                                   data * gain,
                                                   master_dark_stddev * gain,
                                                   header, ap_df['Seeing'][j])

            for i in range(len(photometry_tbl)):
                ap_df[source_names[i] + ' Flux'][j] = photometry_tbl['flux'][i]
                ap_df[source_names[i] +
                      ' Flux Error'][j] = photometry_tbl['flux_error'][i]
                ap_df[source_names[i] +
                      ' Background'][j] = photometry_tbl['background'][i]
                ap_df[source_names[i] + ' Interpolation Flag'][j] = int(
                    photometry_tbl['interpolation_flag'][i])

            #Make surface plots.
            if plots:
                for i in range(len(photometry_tbl)):
                    x_p = photometry_tbl['X'][i]
                    y_p = photometry_tbl['Y'][i]

                    fig = plt.figure()
                    ax = fig.add_subplot(111, projection='3d')
                    xx, yy = np.meshgrid(
                        np.arange(int(x_p) - 10,
                                  int(x_p) + 10 + 1),
                        np.arange(int(y_p) - 10,
                                  int(y_p) + 10 + 1))
                    theta = np.linspace(0, 2 * np.pi, 201)
                    y_circ = ap * np.cos(theta) + y_p
                    x_circ = ap * np.sin(theta) + x_p
                    vmin = np.nanmedian(data[yy, xx])
                    vmax = vmin + 2.5 * np.nanstd(data[yy, xx])
                    ax.plot_surface(xx,
                                    yy,
                                    data[yy, xx],
                                    cmap=cm.viridis,
                                    alpha=0.8,
                                    rstride=1,
                                    cstride=1,
                                    edgecolor='k',
                                    lw=0.2,
                                    vmin=vmin,
                                    vmax=vmax)
                    current_z = ax.get_zlim()
                    ax.set_zlim(current_z[0] - 150, current_z[1])
                    current_z = ax.get_zlim()
                    cset = ax.contourf(xx,
                                       yy,
                                       data[yy, xx],
                                       zdir='z',
                                       offset=current_z[0],
                                       cmap=cm.viridis)
                    ax.plot(x_circ,
                            y_circ,
                            np.zeros(len(x_circ)) + current_z[0],
                            color='r',
                            lw=2,
                            zorder=100)
                    ax.set_xlabel('X')
                    ax.set_ylabel('Y')
                    ax.set_zlabel('Counts')

                    ax.set_title('SURFACE DIAGNOSTIC PLOT, ' + ', Ap. = ' +
                                 str(ap) + '\n' + source_names[i] + ', ' +
                                 reduced_files[j].name + ' (image ' +
                                 str(j + 1) + ' of ' +
                                 str(len(reduced_files)) + ')')
                    ax.view_init(elev=elev_angles[j], azim=azim_angles[j])
                    plot_output_path = (
                        pines_path /
                        ('Objects/' + short_name + '/aper_phot/' +
                         source_names[i] + '/' + str(j).zfill(4) + '.jpg'))
                    plt.tight_layout()
                    plt.savefig(plot_output_path)
                    plt.close()

        #Write output to file.
        print('Saving ap = {:1.1f} aperture photometry output to {}.'.format(
            ap, output_filename))
        print('')
        with open(output_filename, 'w') as f:
            for j in range(len(ap_df)):
                #Write in the header.
                if j == 0:
                    f.write(
                        '{:>21s}, {:>22s}, {:>17s}, {:>17s}, {:>7s}, {:>7s}, '.
                        format('Filename', 'Time UT', 'Time JD UTC',
                               'Time BJD TDB', 'Airmass', 'Seeing'))
                    for i in range(len(source_names)):
                        if i != len(source_names) - 1:
                            f.write(
                                '{:>22s}, {:>28s}, {:>28s}, {:>34s}, '.format(
                                    source_names[i] + ' Flux',
                                    source_names[i] + ' Flux Error',
                                    source_names[i] + ' Background',
                                    source_names[i] + ' Interpolation Flag'))
                        else:
                            f.write(
                                '{:>22s}, {:>28s}, {:>28s}, {:>34s}\n'.format(
                                    source_names[i] + ' Flux',
                                    source_names[i] + ' Flux Error',
                                    source_names[i] + ' Background',
                                    source_names[i] + ' Interpolation Flag'))

                #Write in Filename, Time UT, Time JD, Airmass, Seeing values.
                format_string = '{:21s}, {:22s}, {:17.9f}, {:17.9f}, {:7.2f}, {:7.1f}, '
                #If the seeing value for this image is 'nan' (a string), convert it to a float.
                #TODO: Not sure why it's being read in as a string, fix that.
                if type(ap_df['Seeing'][j]) == str:
                    ap_df['Seeing'][j] = float(ap_df['Seeing'][j])

                #Do a try/except clause for writeout, in case it breaks in the future.
                try:
                    f.write(
                        format_string.format(ap_df['Filename'][j],
                                             ap_df['Time UT'][j],
                                             ap_df['Time JD UTC'][j],
                                             ap_df['Time BJD TDB'][j],
                                             ap_df['Airmass'][j],
                                             ap_df['Seeing'][j]))
                except:
                    print(
                        'Writeout failed! Inspect quantities you are trying to write out.'
                    )
                    pdb.set_trace()

                #Write in Flux, Flux Error, and Background values for every source.
                for i in range(len(source_names)):
                    if i != len(source_names) - 1:
                        format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34d}, '
                    else:
                        format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34d}\n'
                    try:
                        f.write(
                            format_string.format(
                                ap_df[source_names[i] + ' Flux'][j],
                                ap_df[source_names[i] + ' Flux Error'][j],
                                ap_df[source_names[i] + ' Background'][j],
                                ap_df[source_names[i] +
                                      ' Interpolation Flag'][j]))
                    except:
                        if i != len(source_names) - 1:
                            format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34f}, '
                        else:
                            format_string = '{:22.5f}, {:28.5f}, {:28.5f}, {:34f}\n'
                        f.write(
                            format_string.format(
                                ap_df[source_names[i] + ' Flux'][j],
                                ap_df[source_names[i] + ' Flux Error'][j],
                                ap_df[source_names[i] + ' Background'][j],
                                ap_df[source_names[i] +
                                      ' Interpolation Flag'][j]))

    print('')
    return
Example #25
0
def airmass_plot(target, centroided_sources):
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)

    #Get plot style parameters.
    title_size, axis_title_size, axis_ticks_font_size, legend_font_size = plot_style(
    )

    #Get list of souce names in the centroid output.
    centroided_sources.columns = centroided_sources.keys().str.strip()
    airmasses = np.array(centroided_sources['Airmass'])
    times_full = np.array(centroided_sources['Time (JD UTC)'])
    night_inds = night_splitter(times_full)
    num_nights = len(night_inds)
    times_nights = [times_full[night_inds[i]] for i in range(num_nights)]
    standard_x = standard_x_range(times_nights)

    fig, ax = plt.subplots(nrows=1,
                           ncols=num_nights,
                           figsize=(17, 5),
                           sharey=True)
    for i in range(num_nights):
        if i == 0:
            ax[i].set_ylabel('Airmass', fontsize=axis_title_size)

        inds = night_inds[i]
        ax[i].plot(times_full[inds],
                   airmasses[inds],
                   marker='.',
                   linestyle='',
                   color='m',
                   alpha=0.3,
                   label='Raw airmass')
        ax[i].tick_params(labelsize=axis_ticks_font_size)
        ax[i].set_xlabel('Time (JD UTC)', fontsize=axis_title_size)
        ax[i].grid(alpha=0.2)
        ax[i].set_xlim(
            np.mean(times_full[inds]) - standard_x / 2,
            np.mean(times_full[inds] + standard_x / 2))

        #bin
        block_inds = block_splitter(times_full[inds])
        block_x = np.zeros(len(block_inds))
        block_y = np.zeros(len(block_inds))
        block_y_err = np.zeros(len(block_inds))
        for j in range(len(block_inds)):
            block_x[j] = np.mean(times_full[inds][block_inds[j]])
            block_y[j] = np.mean(airmasses[inds][block_inds[j]])
            block_y_err[j] = np.std(airmasses[inds][block_inds[j]]) / np.sqrt(
                len(airmasses[inds][block_inds[j]]))

        ax[i].errorbar(block_x,
                       block_y,
                       block_y_err,
                       marker='o',
                       linestyle='',
                       color='m',
                       ms=8,
                       mfc='none',
                       mew=2,
                       label='Bin airmass')

        #Interpolate each night's seeing.
        fit_times = np.linspace(block_x[0], block_x[-1], 1000)
        interp = CubicSpline(block_x, block_y)
        interp_fit = interp(fit_times)
        ax[i].plot(fit_times,
                   interp_fit,
                   color='c',
                   lw=2,
                   zorder=0,
                   alpha=0.7,
                   label='CS Interp.')

    ax[i].legend(bbox_to_anchor=(1.005, 0.5), fontsize=legend_font_size)
    plt.suptitle(short_name + ' Airmass Measurements', fontsize=title_size)
    plt.subplots_adjust(left=0.07, wspace=0.05, top=0.92, bottom=0.17)

    output_filename = pines_path / ('Objects/' + short_name +
                                    '/analysis/diagnostic_plots/' +
                                    short_name + '_airmasses.png')
    plt.savefig(output_filename, dpi=300)
    return
Example #26
0
def dv_report(target, pat_version='1.0'):
    
    def page_header():
        today = date.today()
        date_str = today.strftime('%b %d, %Y')
        time = datetime.now()
        time_str = time.strftime('%H:%M')
        canvas.setFont('Times-Italic', 8) #Font for captions
        canvas.setFillColor('Grey')
        header_text = short_name+' DV Report, Compiled on '+date_str+' at '+time_str+' with PINES Analysis Toolkit V. '+str(pat_version)
        canvas.drawCentredString(612*0.5, 792-15, header_text)

    def page_footer():
        canvas.setFont('Times-Italic', 8) #Font for captions
        canvas.setFillColor('Grey')
        footer_text = str(page_num)
        canvas.drawCentredString(612*0.95, 792*0.02, footer_text)

    def title_page():
        page_header()
        global fig_num, page_num
        canvas.setFont('Times-Roman', 20) #Font for captions
        canvas.setFillColor('Black')
        title_text = 'PINES DV Report'
        canvas.drawCentredString(fig_x*0.5, fig_y*0.9, title_text)
        object_text = target+' ('+short_name+')'
        canvas.setFont('Times-Roman', 16) #Font for captions
        canvas.drawCentredString(fig_x*0.5, fig_y*0.87, object_text)
         #Add the source image. 
        source_image_path = source_path/('target_and_refs.png')
        img = ImageReader(source_image_path)
        y = 50
        w = fig_x #All the way across the page
        h = w * source_aspect_ratio
        canvas.drawImage(img, x, y, x+w, h)
        canvas.setFont('Times-Roman', 12) #Font for captions
        caption_text = 'Figure {}: Target and references.'.format(fig_num)
        canvas.drawCentredString(x+w*0.5, 30, caption_text)
        fig_num += 1
        page_footer()

        canvas.showPage()
        page_num += 1

    def night_page(): 
        global fig_num, page_num
        #Get all nightly-normalized target lightcurve plots in the analysis directory.
        nightly_glob = natsorted(np.array([x for x in analysis_path.glob('*nightly*target_flux.png')]))

        if os.path.exists(analysis_path/('optimal_aperture.txt')):
            with open(analysis_path/('optimal_aperture.txt'), 'r') as f:
                lines = f.readlines()
                best_ap = lines[0].split('  ')[1].split('\n')[0]
                ap_type = best_ap.split('_')[1]
        best_aper_phot_path = analysis_path/('aper_phot_analysis/'+best_ap)

        nightly_glob = np.array(natsorted([x for x in best_aper_phot_path.glob('*nightly*.png')]))
        #Sort plots in order we want them in the report: arget lc, raw flux, normalized flux
        nightly_glob = np.array([nightly_glob[2], nightly_glob[1], nightly_glob[0]])

        if ap_type == 'fixed':
            rad = best_ap.split('_')[0]
            cap_ender = 'radius = {} pixels'.format(rad)
        elif ap_type == 'variable':
            fact = best_ap.split('_')[0]
            cap_ender = 'multiplicative seeing factor = {}'.format(fact)
            page_footer

        caption_texts = ['Best nightly-normalized corrected target flux, {} aperture photometry, {}.'.format(ap_type, cap_ender),
                        'Raw flux, {} aperture photometry, {}.'.format(ap_type, cap_ender),
                        'Nightly-normalized flux, {} aperture photometry, {}.'.format(ap_type, cap_ender)]
        
        for i in range(len(nightly_glob)):
            if i == 0:
                page_header()
            canvas.setFont('Times-Roman', 12) #Font for captions
            canvas.setFillColor('Black')
            image_path = str(nightly_glob[i])
            aperture_radius = image_path.split('=')[1].split('_')[0]
            
            img = ImageReader(image_path)
            y = (3-(i+1))*fig_y/3 + 50 
            w = fig_x #All the way across the page
            h = w * lc_aspect_ratio
            canvas.drawImage(img, x, y, x+w, h)
            caption_text = 'Figure {}: '.format(fig_num)+caption_texts[i]
            canvas.drawCentredString(x+w*0.5,y-h*0.1, caption_text)
            fig_num += 1

            if i == len(nightly_glob) - 1:
                page_footer()

        canvas.showPage()
        page_num += 1

    def global_page(): 
        global fig_num, page_num

        if os.path.exists(analysis_path/('optimal_aperture.txt')):
            with open(analysis_path/('optimal_aperture.txt'), 'r') as f:
                lines = f.readlines()
                best_ap = lines[1].split(' ')[1]
                ap_type = best_ap.split('_')[1]
        best_aper_phot_path = analysis_path/('aper_phot_analysis/'+best_ap)
        

        global_glob = np.array(natsorted([x for x in best_aper_phot_path.glob('*global*.png')]))
        #Sort plots in order we want them in the report: arget lc, raw flux, normalized flux
        global_glob = np.array([global_glob[2], global_glob[1], global_glob[0]])

        if ap_type == 'fixed':
            rad = best_ap.split('_')[0]
            cap_ender = 'radius = {} pixels'.format(rad)
        elif ap_type == 'variable':
            fact = best_ap.split('_')[0]
            cap_ender = 'multiplicative seeing factor = {}'.format(fact)
            page_footer

        caption_texts = ['Best globally-normalized corrected target flux, {} aperture photometry, {}.'.format(ap_type, cap_ender),
                        'Raw flux, {} aperture photometry, {}.'.format(ap_type, cap_ender),
                        'Globally-normalized flux, {} aperture photometry, {}.'.format(ap_type, cap_ender)]
        
        for i in range(len(global_glob)):
            if i == 0:
                page_header()
            canvas.setFont('Times-Roman', 12) #Font for captions
            canvas.setFillColor('Black')
            image_path = str(global_glob[i])
            aperture_radius = image_path.split('=')[1].split('_')[0]
            
            img = ImageReader(image_path)
            y = (3-(i+1))*fig_y/3 + 50 
            w = fig_x #All the way across the page
            h = w * lc_aspect_ratio
            canvas.drawImage(img, x, y, x+w, h)
            caption_text = 'Figure {}: '.format(fig_num)+caption_texts[i]
            canvas.drawCentredString(x+w*0.5,y-h*0.1, caption_text)
            fig_num += 1

            if i == len(global_glob) - 1:
                page_footer()

        canvas.showPage()
        page_num += 1
    
    def centroid_page():
        global fig_num, page_num
        page_header()

        canvas.setFont('Times-Roman', 12) #Font for captions
        canvas.setFillColor('Black')
        cutout_position_path = diagnostic_plot_path/(short_name+'_cutout_positions.png')
        img = ImageReader(cutout_position_path)
        y = 2*fig_y/3 - 100
        w = fig_x #All the way across the page
        h = w * centroid_aspect_ratio
        canvas.drawImage(img, x, y, x+w, h)
        caption_text = 'Figure {}: '.format(fig_num)+'Cutout image positions for all sources.'
        canvas.drawCentredString(x+w*0.5,y-h*0.05, caption_text)
        fig_num += 1

        image_position_path = diagnostic_plot_path/(short_name+'_image_positions.png')
        img = ImageReader(image_position_path)
        y = 1*fig_y/3 - 200
        w = fig_x #All the way across the page
        h = w * centroid_aspect_ratio
        canvas.drawImage(img, x, y, x+w, h)
        caption_text = 'Figure {}: '.format(fig_num)+'Absolute image positions for the target.'
        canvas.drawCentredString(x+w*0.5,y-h*0.05, caption_text)
        fig_num += 1
        
        page_footer()
        canvas.showPage()
        page_num += 1

    def diagnostics_page():
        global fig_num, page_num
        page_header()

        canvas.setFont('Times-Roman', 12) #Font for captions
        canvas.setFillColor('Black')
        seeing_path = diagnostic_plot_path/(short_name+'_seeing.png')
        img = ImageReader(seeing_path)
        y = 2*fig_y/3 +50
        w = fig_x #All the way across the page
        h = w * lc_aspect_ratio
        canvas.drawImage(img, x, y, x+w, h)
        caption_text = 'Figure {}: '.format(fig_num)+'Seeing measurements (in arcsec).'
        canvas.drawCentredString(x+w*0.5,y-h*0.1, caption_text)
        fig_num += 1


        background_path = diagnostic_plot_path/(short_name+'_backgrounds.png')
        img = ImageReader(background_path)
        y = 1*fig_y/3 + 50
        w = fig_x #All the way across the page
        h = w * lc_aspect_ratio
        canvas.drawImage(img, x, y, x+w, h)
        caption_text = 'Figure {}: '.format(fig_num)+'Background measurements (in ADU). Non-linear effects begin near 4000 ADU.'
        canvas.drawCentredString(x+w*0.5,y-h*0.1, caption_text)
        fig_num += 1

        airmass_path = diagnostic_plot_path/(short_name+'_airmasses.png')
        img = ImageReader(airmass_path)
        y = 0*fig_y/3 + 50
        w = fig_x #All the way across the page
        h = w * lc_aspect_ratio
        canvas.drawImage(img, x, y, x+w, h)
        caption_text = 'Figure {}: '.format(fig_num)+'Airmass measurements.'
        canvas.drawCentredString(x+w*0.5,y-h*0.1, caption_text)
        fig_num += 1

        page_footer()

        canvas.showPage()

    def corr_refs_pages(): 
        global fig_num, page_num

        if os.path.exists(analysis_path/('optimal_aperture.txt')):
            with open(analysis_path/('optimal_aperture.txt'), 'r') as f:
                lines = f.readlines()
                best_ap = lines[0].split('  ')[1].split('\n')[0]
                ap_type = best_ap.split('_')[1]
        best_aper_phot_path = analysis_path/('aper_phot_analysis/'+best_ap+'/corr_ref_plots/')
        corr_glob = np.array(natsorted(list([x for x in best_aper_phot_path.glob('*.png')])))

        count = 0 
        for i in range(len(corr_glob)):
            if count % 3 == 0:
                page_header()
            canvas.setFont('Times-Roman', 12) #Font for captions
            canvas.setFillColor('Black')
            image_path = str(corr_glob[i])
            
            img = ImageReader(image_path)
            y = (3-((count%3)+1))*fig_y/3 + 50 
            w = fig_x #All the way across the page
            h = w * lc_aspect_ratio
            canvas.drawImage(img, x, y, x+w, h)
            if i == 0:
                caption_text = 'Figure {}: '.format(fig_num)+image_path.split('/')[-1].split('_')[0]+' corrected flux.'
            else:
                caption_text = 'Figure {}: '.format(fig_num)+'Reference '+str(i)+' corrected flux.'

            canvas.drawCentredString(x+w*0.5,y-h*0.1, caption_text)
            fig_num += 1
            
            count += 1
            if count % 3 == 0:
                page_footer()
                canvas.showPage()
                page_num += 1        

    #Set up pathing.
    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    print('Generating PINES DV report for {}.'.format(short_name))
    output_filename = (short_name+'_dv_report.pdf').replace(' ','')
    target_path = pines_path/('Objects/'+short_name)
    source_path = target_path/('sources/')
    analysis_path = target_path/('analysis/')
    diagnostic_plot_path = analysis_path/('diagnostic_plots/')
    output_path = target_path/('output/'+output_filename)

    #Set up the DV report
    canvas = Canvas(str(output_path), pagesize=LETTER)
    global fig_num, page_num, fig_x, fig_y, source_aspect_ratio, lc_aspect_ratio, centroid_aspect_ratio, x
    fig_num = 1 #Initialize 
    page_num = 1 
    fig_x = 612
    fig_y = 792 #Dimensions (in pixels?) for 8.5x11 page
    source_aspect_ratio = 9/10
    lc_aspect_ratio = 5/17
    centroid_aspect_ratio = 27/51
    x = 0 #Start all figures at the left margin

    #------------------------------------------------------------------------------------------------------------------------
    #PAGE 1: Title and Source Detection Image.
    #Make the title page. 
    title_page()


    #------------------------------------------------------------------------------------------------------------------------
    #PAGE 2: Best nightly-normalized lightcurve, raw flux, and normalized flux. 
    night_page()
    
    #------------------------------------------------------------------------------------------------------------------------
    #PAGE 3: Best globally-normalized lightcurve, raw flux, and normalized flux. 
    global_page()

    #------------------------------------------------------------------------------------------------------------------------
    
    #PAGE 4: Centroid diagnostic plots. 
    centroid_page() 

    #------------------------------------------------------------------------------------------------------------------------
    #PAGE 5: Diagnostic plots.
    diagnostics_page()
    
    #------------------------------------------------------------------------------------------------------------------------
    #PAGES 6-N: Plots of corrected target flux. 
    corr_refs_pages()

    canvas.save()
    
    return
Example #27
0
def observed_sample_plots(upload=True):
    def plot_mwd(RA,
                 Dec,
                 observed_flag,
                 org=0,
                 title='Mollweide projection',
                 projection='mollweide',
                 observed_plot=0):
        ''' 
        Plots targets on the sky in a 'Mollweide' projection.
        RA, Dec are arrays of the same length.
        RA takes values in [0,360), Dec in [-90,90],
        which represent angles in degrees.
        org is the origin of the plot, 0 or a multiple of 30 degrees in [0,360).
        title is the title of the figure.
        projection is the kind of projection: 'mollweide', 'aitoff', 'hammer', 'lambert'
        '''

        x = np.remainder(RA + 360 - org, 360)  # shift RA values
        ind = x > 180
        x[ind] -= 360  # scale conversion to [-180, 180]
        x = -x  # reverse the scale: East to the left
        x_tick_labels = np.array(
            [150, 120, 90, 60, 30, 0, 330, 300, 270, 240,
             210])  #Label in degrees
        #x_tick_labels = np.array([150,140,130,120,110,100,90,80,70,60,50,40,30,20,10,0,350,340,330,320,310,300,290,280,270,260,250,240,230,220,210]) #FinerLabel in degrees

        x_tick_labels = np.remainder(x_tick_labels + 360 + org, 360)
        # x_tick_labels = np.array([150, 120, 90, 60, 30, 0, 330, 300, 270, 240, 210])/15 #Label in hours
        # x_tick_labels = np.remainder(x_tick_labels+24+org/15,24)
        x_tick_labels = [int(i) for i in x_tick_labels]
        fig = plt.figure(figsize=(15 * .8, 7 * .8))
        ax = fig.add_subplot(111, projection=projection)
        #ax.scatter(np.radians(x),np.radians(Dec),color=color,alpha=0.4,zorder=1, label='Targets')  # convert degrees to radians
        for i in range(len(x)):
            if np.array(observed_flag)[i] == 0:
                color = 'k'
            else:
                color = 'k'
                if observed_plot == 1:
                    color = 'g'  #Turn on observed targest plotting.
            ax.scatter(np.radians(x[i]),
                       np.radians(Dec[i]),
                       color=color,
                       alpha=0.4,
                       zorder=1,
                       s=25)
        ax.set_yticklabels([
            str(int(i)) + '$^\circ$'
            for i in np.round(ax.get_yticks() * 180 / np.pi)
        ],
                           fontsize=15)
        ax.title.set_fontsize(20)
        ax.set_xlabel('RA')
        ax.xaxis.label.set_fontsize(20)
        ax.set_ylabel("Dec")
        ax.yaxis.label.set_fontsize(20)
        ax.set_xticklabels([], fontsize=16)  # we add the scale on the x axis
        ax.grid(True, alpha=0.3)
        month_texts = [
            'Sep', 'Aug', 'Jul', 'Jun', 'May', 'Apr', 'Mar', 'Feb', 'Jan',
            'Dec', 'Nov', 'Oct'
        ]
        for i in range(len(month_texts)):
            ax.text(-180 * np.pi / 180 + 15 * np.pi / 180 +
                    30 * np.pi / 180 * i,
                    -35 * np.pi / 180,
                    month_texts[i],
                    ha='center',
                    va='center',
                    fontsize=14)
        for i in range(len(x_tick_labels)):
            ax.text(-150 * np.pi / 180 + 30 * np.pi / 180 * i,
                    -22.5 * np.pi / 180,
                    str(x_tick_labels[i]) + '$^\circ$',
                    ha='center',
                    va='center',
                    fontsize=15)

        #Plot monsoon season.
        monsoon_x_vertices = np.array([-150, -150, -90, -90, -150
                                       ]) * np.pi / 180
        monsoon_y_vertices = np.array([-90, 90, 90, -90, -90]) * np.pi / 180
        monsoon_polygon = Polygon(np.array(
            [[monsoon_x_vertices[i], monsoon_y_vertices[i]]
             for i in range(len(monsoon_x_vertices))]),
                                  color='r',
                                  alpha=0.15,
                                  label='Flagstaff monsoon season')
        ax.add_patch(monsoon_polygon)
        plt.show()
        return ax

    '''Plots the current sample as given in 'PINES sample.xlsx' on Google drive and uploads to the PINES website.'''
    pines_path = pines_dir_check()
    sample_path = pines_path / ('Misc/PINES Sample.xlsx')
    print('Make sure an up-to-date copy of PINES Sample.xlsx exists in {}.'.
          format(pines_path / 'Misc/'))
    print('Download from the PINES Google Drive.\n')

    df = pd.read_excel(sample_path)
    df = df.dropna(how='all')  #Remove rows that are all NaNs.

    good_locs = np.where(df['Good'] == 1)[0]  #Get only "good" targets
    ra = np.array(df['RA (deg)'][good_locs])
    dec = np.array(df['Dec (deg)'][good_locs])
    group_ids = df['Group ID'][good_locs]
    observed_flag = df['Observed?'][good_locs]
    observed_groups = np.unique(
        np.array(group_ids)[np.where(
            observed_flag != 0)[0]])  #Get the groups that have been observed.
    number_observed = len(np.array(group_ids)[np.where(observed_flag != 0)[0]])

    #Plot 1: Sky map of good targets based on group.
    print('Updating sky plot...')
    ax = plot_mwd(ra,
                  dec,
                  observed_flag,
                  org=180,
                  projection='mollweide',
                  observed_plot=1)
    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = dict(zip(labels, handles))
    ax.legend(by_label.values(),
              by_label.keys(),
              loc=1,
              bbox_to_anchor=(1.1, 1.1),
              fontsize=16)
    ax.grid(alpha=0.2)

    group_id_inds = np.arange(0, max(group_ids) + 1)

    #Now loop over group_id inds, and draw boundaries around each group.
    for i in group_id_inds:
        targs_in_group = np.where(group_ids == i)[0]
        try:
            cluster_coords = np.array([[ra[i], dec[i]]
                                       for i in targs_in_group])
        except:
            pdb.set_trace()
        hull = ConvexHull(cluster_coords)
        for s in range(len(hull.simplices)):
            simplex = hull.simplices[s]
            x = np.remainder(cluster_coords[simplex, 0] + 360 - 180,
                             360)  # shift RA values
            ind = x > 180
            x[ind] -= 360  # scale conversion to [-180, 180]
            x = -x  # reverse the scale: East to the left
            if i in observed_groups:
                color = 'g'
                ax.plot(x * np.pi / 180,
                        cluster_coords[simplex, 1] * np.pi / 180,
                        color=color,
                        lw=2,
                        zorder=0,
                        alpha=0.6,
                        label='Observed')
            else:
                color = 'k'
                ax.plot(x * np.pi / 180,
                        cluster_coords[simplex, 1] * np.pi / 180,
                        color=color,
                        lw=2,
                        zorder=0,
                        alpha=0.6,
                        label='Not yet observed')

    ax.grid(alpha=0.4)
    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = dict(zip(labels, handles))

    ax.legend(by_label.values(),
              by_label.keys(),
              loc=1,
              bbox_to_anchor=(0.65, 0.225))
    ax.set_title('PINES sample \n ' + str(int(max(group_ids) + 1)) +
                 ' groups, ' + str(len(good_locs)) + ' targets',
                 fontsize=20)
    plt.tight_layout()
    sky_map_output_path = pines_path / ('Misc/updated_sky_plot.png')
    plt.savefig(sky_map_output_path, dpi=300)
    plt.close()

    ntargs = len(df)
    #Now do magnitude/SpT histograms
    print('Updating target histograms...')
    mags = np.zeros(ntargs)
    observed_SpTs = []
    observed_mags = []
    SpT = []
    for i in range(ntargs):
        try:
            #mags[i] = float(df['2MASS H'][i][0:6])
            mags[i] = float(df['2MASS J'][i][0:6])
            SpT.append(df['SpT'][i])
            if df['Observed?'][i] != 0:
                observed_SpTs.append(df['SpT'][i])
                observed_mags.append(mags[i])
        except:  #Some values don't follow the normal +/- convention (they were upper limits in the Gagne sheet), so have to read them in differently.
            #mags[i] = float(df['2MASS H'][i])
            mags[i] = float(df['2MASS J'][i])
            SpT.append(df['SpT'][i])
            if df['Observed?'][i] != 0:
                observed_SpTs.append(df['SpT'][i])
                observed_mags.append(mags[i])

    mags = mags[good_locs]
    SpT = np.array(SpT)
    observed_SpTs = np.array(observed_SpTs)
    observed_mags = np.array(observed_mags)
    SpT = SpT[good_locs]

    SpT_number = np.zeros(ntargs)
    observed_SpT_numbers = []
    for i in range(ntargs):
        if df['SpT'][i][0] == 'L':
            SpT_number[i] = float(df['SpT'][i][1:])
            if df['Observed?'][i] != 0:
                observed_SpT_numbers.append(SpT_number[i])
        else:
            SpT_number[i] = 10 + float(df['SpT'][i][1:])
            if df['Observed?'][i] != 0:
                observed_SpT_numbers.append(SpT_number[i])
    SpT_number = SpT_number[good_locs]
    SpT_number = np.array(SpT_number)
    observed_SpT_numbers = np.array(observed_SpT_numbers)

    median_mag = np.median(mags)

    scale_factor = 0.5
    fig, ax = plt.subplots(nrows=2,
                           ncols=1,
                           figsize=(18 * scale_factor, 15 * scale_factor))
    bins = np.array([
        11.25, 11.75, 12.25, 12.75, 13.25, 13.75, 14.25, 14.75, 15.25, 15.75,
        16.25, 16.75
    ]) - 0.25
    ax[0].hist(mags,
               bins=bins,
               histtype='step',
               lw=3,
               ls='--',
               label='Full sample')
    ax[0].hist(observed_mags,
               bins=bins,
               histtype='bar',
               label='Observed sample',
               color='tab:blue')
    ax[0].axvline(median_mag,
                  color='r',
                  label='Median $m_J$ = {:2.1f}'.format(median_mag))
    ticks = [11, 11.5, 12, 12.5, 13, 13.5, 14, 14.5, 15, 15.5, 16, 16.5]
    ax[0].plot()
    ax[0].set_xticks(ticks)
    ax[0].set_xticklabels([str(i) for i in ticks])
    ax[0].set_xlabel('$m_J$', fontsize=20)
    ax[0].set_ylabel('Number of targets', fontsize=20)
    ax[0].tick_params(axis='both', which='major', labelsize=16)
    ax[0].legend(fontsize=16, loc='upper left')
    #ax[0].grid(alpha=0.2)

    ax[1].hist(SpT_number,
               bins=np.arange(-0.5,
                              max(SpT_number) + 0.5, 1),
               histtype='step',
               lw=3,
               color='orange',
               ls='--',
               label='Full sample')
    ax[1].hist(observed_SpT_numbers,
               bins=np.arange(-0.5,
                              max(SpT_number) + 0.5, 1),
               histtype='bar',
               lw=3,
               color='orange',
               label='Observed sample')
    ticks = np.arange(0, max(SpT_number), 1)
    ax[1].set_xticks(ticks)
    ax[1].set_xticklabels([
        'L0', 'L1', 'L2', 'L3', 'L4', 'L5', 'L6', 'L7', 'L8', 'L9', 'T0', 'T1',
        'T2', 'T3', 'T4', 'T5', 'T6', 'T7'
    ])
    ax[1].set_xlabel('Spectral Type', fontsize=20)
    ax[1].set_ylabel('Number of targets', fontsize=20)
    ax[1].tick_params(axis='both', which='major', labelsize=16)
    ax[1].legend(fontsize=16, loc='upper right')
    #ax[1].grid(alpha=0.2)

    plt.tight_layout()
    histogram_output_path = pines_path / 'Misc/target_histograms.png'
    plt.savefig(histogram_output_path, dpi=300)
    plt.close()

    #Edit the observing.html page to update the number of observed targets.
    print('Updating observing.html...')
    if not (pines_path / 'Misc/observing.html').exists():
        print('Grabbing copy of observing.html from the PINES server.')
        sftp = pines_login()
        sftp.chdir('/web')
        remote_path = '/web/observing.html'
        local_path = pines_path / ('Misc/observing.html')
        sftp.get(remote_path, local_path)
        sftp.close()

    with open(str(pines_path / ('Misc/observing.html')), 'r') as f:
        lines = f.readlines()

    edit_line_ind = np.where(
        ['To date, PINES has observed' in i for i in lines])[0][0]
    edit_line = lines[edit_line_ind]
    edit_line = edit_line.replace(
        edit_line.split('<u>')[1].split('</u>')[0], str(number_observed))
    lines[edit_line_ind] = edit_line
    with open(str(pines_path / ('Misc/observing.html')), 'w') as f:
        f.writelines(lines)

    if upload:
        sftp = pines_login()
        print('Uploading plots and observing.html to the PINES server.')
        sftp.chdir('/web/images')
        sftp.put(sky_map_output_path, '/web/images/updated_sky_plot.png')
        sftp.put(histogram_output_path, '/web/images/target_histograms.png')
        sftp.chdir('/web')
        sftp.put(pines_path / ('Misc/observing.html'), '/web/observing.html')
        print('PINES website updated!')
Example #28
0
from pines_analysis_toolkit.pwv.fyodor import download_nc, rename_nc
from pines_analysis_toolkit.utils.pines_dir_check import pines_dir_check
import pdb 

ut_dates = ['20201203', '20201204', '20201205', '20201206']
url_codes = ['8042323663', '8042323665', '8042323666', '8042323878']

pines_path = pines_dir_check()
download_directory = pines_path/('Calibrations/PWV/')

if len(ut_dates) != len(url_codes):
    raise RuntimeError('There must be one url code for every UT date you want to download.')

for i in range(len(ut_dates)):
    date = ut_dates[i]
    url = 'https://download.avl.class.noaa.gov/download/'+url_codes[i]+'/001'
    download_nc(url, str(download_directory), date, n_threads=1)
    rename_nc(str(download_directory/date))

Example #29
0
def basic_psf_phot(target, centroided_sources, plots=False):
    def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
        """
        Convert hours, minutes, seconds, and microseconds to fractional days.
        
        """
        days = sec + (micro / 1.e6)
        days = min + (days / 60.)
        days = hour + (days / 60.)
        return days / 24.
    
    def date_to_jd(year,month,day):
        """
        Convert a date to Julian Day.
        
        Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 
            4th ed., Duffet-Smith and Zwart, 2011.
        
        """
        if month == 1 or month == 2:
            yearp = year - 1
            monthp = month + 12
        else:
            yearp = year
            monthp = month
        
        # this checks where we are in relation to October 15, 1582, the beginning
        # of the Gregorian calendar.
        if ((year < 1582) or
            (year == 1582 and month < 10) or
            (year == 1582 and month == 10 and day < 15)):
            # before start of Gregorian calendar
            B = 0
        else:
            # after start of Gregorian calendar
            A = math.trunc(yearp / 100.)
            B = 2 - A + math.trunc(A / 4.)
            
        if yearp < 0:
            C = math.trunc((365.25 * yearp) - 0.75)
        else:
            C = math.trunc(365.25 * yearp)
            
        D = math.trunc(30.6001 * (monthp + 1))
        
        jd = B + C + D + day + 1720994.5
        
        return jd

    def gaussian(p, x, y):
        height, center_x, center_y, width_x, width_y, rotation = p
        rotation = np.deg2rad(rotation)
        x_0 = center_x * np.cos(rotation) - center_y * np.sin(rotation)
        y_0 = center_x * np.sin(rotation) + center_y * np.cos(rotation)

        def rotgauss(x,y):
            xp = x * np.cos(rotation) - y * np.sin(rotation)
            yp = x * np.sin(rotation) + y * np.cos(rotation)
            g = height*np.exp(
                -(((x_0-xp)/width_x)**2+
                  ((y_0-yp)/width_y)**2)/2.)
            return g
        
        g = rotgauss(x,y)

        return g

    def moments(data):
        total = np.nansum(data)
        X, Y = np.indices(data.shape)
        center_x = int(np.shape(data)[1]/2)
        center_y = int(np.shape(data)[0]/2)
        row = data[int(center_x), :]
        col = data[:, int(center_y)]
        width_x = np.nansum(np.sqrt(abs((np.arange(col.size)-center_y)**2*col))
                            /np.nansum(col))
        width_y = np.nansum(np.sqrt(abs((np.arange(row.size)-center_x)**2*row))
                            /np.nansum(row))
        height = np.nanmax(data)
        rotation = 0.0
        return height, center_x, center_y, width_x, width_y, rotation

    def errorfunction(p, x, y, data):
        return gaussian(p, x, y) - data

    def fitgaussian(data):
        params = moments(data)
        X, Y = np.indices(data.shape)
        mask = ~np.isnan(data)
        x = X[mask]
        y = Y[mask]
        data = data[mask]
        p, success = optimize.leastsq(errorfunction, params, args=(x, y, data))
        return p

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    reduced_path = pines_path/('Objects/'+short_name+'/reduced/')
    reduced_files = np.array(natsort.natsorted([x for x in reduced_path.glob('*.fits')]))

    centroided_sources.columns = centroided_sources.columns.str.strip()
    source_names = natsort.natsorted(list(set([i[0:-2].replace('X','').replace('Y','').rstrip().lstrip() for i in centroided_sources.keys()])))    

    #Declare a new dataframe to hold the information for all targets for this .
    columns = ['Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing']
    for i in range(0, len(source_names)):
        columns.append(source_names[i]+' Flux')
        columns.append(source_names[i]+' Flux Error')
    psf_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
    output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

    for i in range(len(reduced_files)):
        #Read in image data/header. 
        file = reduced_files[i]
        data = fits.open(file)[0].data
        header = fits.open(file)[0].header
        print('{}, image {} of {}.'.format(file.name, i+1, len(reduced_files)))

        #Read in some supporting information.
        log_path = pines_path/('Logs/'+file.name.split('.')[0]+'_log.txt')
        log = pines_log_reader(log_path)
        date_obs = header['DATE-OBS']
        #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds. 
        if len(date_obs.split(':')[-1].split('.')[0]) == 3:
            date_obs = date_obs.split(':')[0] + ':' + date_obs.split(':')[1] + ':' + date_obs.split(':')[-1][1:]
        #Keep a try/except clause here in case other unknown DATE-OBS formats pop up. 
        try:
            date = datetime.datetime.strptime(date_obs, '%Y-%m-%dT%H:%M:%S.%f')
        except:
            print('Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.')
            pdb.set_trace()
        
        days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
        jd = date_to_jd(date.year,date.month,days)
        psf_df['Filename'][i] = file.name
        psf_df['Time UT'][i] = header['DATE-OBS']
        psf_df['Time JD'][i] = jd
        psf_df['Airmass'][i] = header['AIRMASS']
        psf_df['Seeing'][i] = log['X seeing'][np.where(log['Filename'] == file.name.split('_')[0]+'.fits')[0][0]]
        
        #Read in source centroids for this image
        x = np.zeros(len(source_names))
        y = np.zeros(len(source_names))
        seeing = psf_df['Seeing'][i]

        for j in range(len(source_names)):
            source = source_names[j]
            x[j] = centroided_sources[source+' X'][i]
            y[j] = centroided_sources[source+' Y'][i]

         #The extract_stars() function requires the input data as an NDData object. 
        nddata = NDData(data=data)  

        #Create table of good star positions
        stars_tbl = Table()
        stars_tbl['x'] = x
        stars_tbl['y'] = y

        size = 25
        x, y = np.meshgrid(np.arange(0,size), np.arange(0,size))

        #Extract star cutouts.
        stars = extract_stars(nddata, stars_tbl, size=size)  

        fitter = fitting.LevMarLSQFitter()

        fig, ax = plt.subplots(nrows=len(stars), ncols=3, sharex=True, sharey=True, figsize=(12,40))

        #Fit a 2D Gaussian to each star. 
        for j in range(len(stars)): 
            star = stars[j]
            source = source_names[j]
            mmm_bkg = MMMBackground()
            cutout = star.data - mmm_bkg(star.data)            

            #Get the star's centroid position in the cutout. 
            dtype = [('x_0', 'f8'), ('y_0', 'f8')]
            pos = Table(data=np.zeros(1, dtype=dtype))
            source_x = stars_tbl['x'][j]
            source_y = stars_tbl['y'][j]
            pos['x_0'] = source_x - int(source_x - size/2 + 1)
            pos['y_0'] = source_y - int(source_y - size/2 + 1)

            parameters = fitgaussian(cutout)
            g2d_fit = gaussian(parameters, x, y)

            avg, med, std = sigma_clipped_stats(cutout)
            im = ax[j,0].imshow(cutout, origin='lower', vmin=med-std, vmax=med+8*std)
            divider = make_axes_locatable(ax[j,0])
            cax = divider.append_axes('right', size='5%', pad=0.05)
            fig.colorbar(im, cax=cax, orientation='vertical')
            ax[j,0].plot(pos['x_0'], pos['y_0'], 'rx')
            ax[j,0].set_ylabel(source)
            ax[j,0].text(pos['x_0'], pos['y_0']+1, '('+str(np.round(source_x,1))+', '+str(np.round(source_y,1))+')', color='r', ha='center')
            ax[j,0].axis('off')

            axins = ax[j,0].inset_axes([0.75, 0.75, 0.25, 0.25])
            axins.set_yticklabels([])
            axins.set_yticks([])
            axins.set_xticklabels([])
            axins.set_xticks([])
            axins.imshow(data, origin='lower', vmin=med-std, vmax=med+8*std)
            axins.plot(source_x, source_y, 'rx')

            im = ax[j,1].imshow(g2d_fit, origin='lower', vmin=med-std, vmax=med+8*std)
            divider = make_axes_locatable(ax[j,1])
            cax = divider.append_axes('right', size='5%', pad=0.05)
            fig.colorbar(im, cax=cax, orientation='vertical')
            ax[j,1].axis('off')

            avg, med, std = sigma_clipped_stats(cutout - g2d_fit)
            im = ax[j,2].imshow(cutout - g2d_fit, origin='lower', vmin=med-std, vmax=med+8*std)
            divider = make_axes_locatable(ax[j,2])
            cax = divider.append_axes('right', size='5%', pad=0.05)
            fig.colorbar(im, cax=cax, orientation='vertical')
            ax[j,2].axis('off')

            if j == 0:
                ax[j,0].set_title('Data')
                ax[j,1].set_title('2D Gaussian Model')
                ax[j,2].set_title('Data - Model')

            plt.tight_layout()

        output_filename = pines_path/('Objects/'+short_name+'/basic_psf_phot/'+reduced_files[i].name.split('_')[0]+'_'+'source_modeling.pdf')
        plt.savefig(output_filename)
        plt.close()

        
    return