Beispiel #1
0
def main():
    obj_loc = [252.7534888, 25.8759437]  #Holds ra, dec in degrees
    get_ref = False  #Get reference image?
    get_dif = True  #Get difference images?
    dist = 0.01  #get all observations within dist from obj_loc (in degrees)
    #For the query note the following:
    #fid=1->zg, 2->zr, 3->zi filter
    #dates are in jd
    #obsjd BETWEEN start_jd and stop_jd, or obsjd>, obsjd=, etc.
    #seeing<2 for seeing below 2 arcsec
    t1 = time.Time(58355, format='mjd').jd
    t2 = time.Time(58365, format='mjd').jd
    t3 = time.Time(58590, format='mjd').jd
    t4 = time.Time(58750, format='mjd').jd
    q1 = 'fid=2 and obsjd BETWEEN {} and {}'.format(
        t1, t2)  #Get r im at peak for this obj
    #q2='fid=2 and obsjd BETWEEN {} and {}'.format(t3,t4) #Get r ims just before and during possible late-time detects
    #Try to get the 3 interesting observations in 1 go
    t = time.Time([58503, 58504, 58543, 58544, 58577, 58578], format='mjd').jd
    q1 = 'fid=2 and (obsjd BETWEEN {} and {} or obsjd BETWEEN {} and {} or obsjd BETWEEN {} and {})'.format(
        t[0], t[1], t[2], t[3], t[4], t[5])
    print(q1)
    if get_ref:
        zquery = query.ZTFQuery()
        zquery.load_metadata(kind='ref', radec=obj_loc)
        zquery.download_data()
    if get_dif:
        zquery = query.ZTFQuery()
        #zquery2=query.ZTFQuery()
        zquery.load_metadata(radec=obj_loc, size=dist, sql_query=q1)
        #zquery2.load_metadata(radec=obj_loc, size=dist, sql_query=q2)
        zquery.download_data('scimrefdiffimg.fits.fz')
        #zquery2.download_data('scimrefdiffimg.fits.fz')
    return
def download_images(radec, size, sql_query, download_data): #Downloads the (meta-)data, and returns the meta-data
	#Get the meta-data and sort them to be in chronologically order
	refquery = query.ZTFQuery()
	refquery.load_metadata(kind='ref', radec=radec)
	difquery = query.ZTFQuery()
	difquery.load_metadata(radec=radec, size=size, sql_query=sql_query)
	difquery.metatable.sort_values(by=['obsjd'], inplace = True)
	#get the images if wanted
	if download_data:
		refquery.download_data()
		difquery.download_data('scimrefdiffimg.fits.fz')
	return refquery, difquery
Beispiel #3
0
def download_metadata(corner_df, metadata_file=None):
    """
    Download ZTF metadata corresponding to a set of points in a dataframe. If the metadata
    are already downloaded read from the metadata_file instead

    Parameters
    ----------
    corner_df : `pd.DataFrame`
        a pandas dataframe with columns ra, dec in degrees
    metadata_file : string
        absolute path to a csv file with the columns `expid` and `rcid`
        specifying the unique set of images to download.

    Returns
    -------
    unique_table: `pd.DataFrame`
        a dataframe of unique images with their entire metadata as downloaded using
, download_few=download_few        `ZTFQuery`.
    """
    if metadata_file is None:
        zquery = query.ZTFQuery()
        dfs = []
        for (ra, dec) in corner_df[['ra', 'dec']].values:
            zquery.load_metadata(radec=[ra, dec], size=0.01)
            dfs.append(zquery.metatable)
        meta = pd.concat(dfs)
    else:
        meta = pd.read_csv(metadata_file)
    # Note in many cases these duplicates might already be
    # filtered and the drop statement not needed.
    return meta.drop_duplicates(subset=['expid', 'rcid'])
Beispiel #4
0
def query_metadata(ra,
                   dec,
                   username,
                   password,
                   start_jd=None,
                   end_jd=None,
                   out_csv=None):
    """Use ZTFquery to get more reliable upper limits"""

    zquery = query.ZTFQuery()
    if start_jd == None and end_jd == None:
        zquery.load_metadata(kind='sci',
                             radec=[str(ra.deg), str(dec.deg)],
                             size=0.003,
                             auth=[username, password])
    else:
        if start_jd != None and end_jd == None:
            sql_query = 'obsjd>' + repr(start_jd)
        elif start_jd == None and end_jd != None:
            sql_query = 'obsjd<' + repr(end_jd)
        elif start_jd != None and end_jd != None:
            sql_query = 'obsjd<' + repr(end_jd) + '+AND+' + 'obsjd>' + repr(
                start_jd)
        zquery.load_metadata(kind='sci',
                             radec=[str(ra.deg), str(dec.deg)],
                             size=0.003,
                             sql_query=sql_query,
                             auth=[username, password])
    out = zquery.metatable
    final_out = out.sort_values(by=['obsjd'])
    if out_csv is not None:
        final_out.to_csv(out_csv)

    return final_out
Beispiel #5
0
	def download_images(self):
		'''
		Get all relevant images, download if needed & wanted,
		and list them in chronological order.

		Refquery is for the refence images, difquery for the difference images.

		Returns:
			finallist: list of locations of each image
		'''
		refquery = query.ZTFQuery()
		refquery.load_metadata(kind='ref', radec=[self.ra, self.dec])
		difquery = query.ZTFQuery()
		difquery.load_metadata(radec=[self.ra, self.dec], size=self.size,
			sql_query=self.sql)
		difquery.metatable.sort_values(by=['obsjd'], inplace = True)
		if self.download_data:	#Download images if it is requested.
			refquery.download_data()
			difquery.download_data('scimrefdiffimg.fits.fz')
		finallist = refquery.get_local_data() + difquery.get_local_data(
			'scimrefdiffimg.fits.fz')
		return finallist
Beispiel #6
0
__adv__ = 'download_query.py'

print("ziff download_query is deprecated. Use 'irsa_query.py' instead.")

import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--query",
                    type=str,
                    default="obsjd BETWEEN 2458554.5 AND 2458564")
parser.add_argument("--overwrite", type=int, default=1)
parser.add_argument("--nprocess", type=int, default=1)

args = parser.parse_args()

in_query = args.query
from ztfquery import query
print(in_query)
zquery = query.ZTFQuery()
zquery.load_metadata(sql_query=in_query)
print(zquery.metatable)
keys = ['sciimg.fits', 'mskimg.fits', 'psfcat.fits']
for _key in keys:
    zquery.download_data(_key,
                         show_progress=True,
                         notebook=False,
                         nprocess=args.nprocess,
                         overwrite=bool(args.overwrite))

# End of download_query.py ========================================================
def select_variability(tbl, hard_reject=[], update_database=False,
                       read_database=True,
                       use_forced_phot=False, stacked=False,
                       baseline=0.02, var_baseline={'g': 6, 'r': 8, 'i': 10},
                       max_duration_tot=30., max_days_g=1e5, snr=4,
                       index_rise=-0.0, index_decay=0.0,
                       path_secrets_db='db_access.csv',
                       save_plot=False, path_plot='./',
                       show_plot=False, use_metadata=False,
                       path_secrets_meta='../kowalski/secrets.csv',
                       save_csv=False, path_csv='./',
                       path_forced='./forced_phot/'):

    """
    Select candidates based on their duration and on their evolution rate.

    ---
    Parameters

    tbl astropy table
        table with the photometry of the candidates
        from the AVRO packets. It can be created using get_lc_kowalski.py
        and it usually includes:
        name,ra,dec,jd,magpsf,sigmapsf,filter,magzpsci,magzpsciunc,programid,
        field,rcid,pid,sgscore1,sgscore2,sgscore3,distpsnr1,distpsnr2,distpsnr3

    hard_reject list of str
        list of candidates that have to be ignored

    update_database bool
        if True, it updates the psql database with the results

    read_database bool
        if True, it reads light curves from the psql database

    use_forced_phot bool
        if True, forced ForcePhotZTF photometry will be used;
        if False, only alerts will be considered.

    stacked bool
        if True, the light curve will be stacked nightly in flux

    baseline float
        min time (days) between first and last detection for the fit
        to be performed (e.g.: baseline=2. means that if a transient
        is found only on a single night, it will not be fit to find
        its rise and decay rates)

    var_baseline dict
        if no evolution (1-sigma) is measured beyond these thresholds,
        the candidate is "rejected". Different filters have different 
        thresholds, the default being {'g': 6, 'r': 8, 'i': 10}

    max_duration_tot float
        max time (days) allowed between the first and last detections
        in any band

    max_days_g float
        max time (days) allowed between the first and last detections
        in g band

    snr float
        min signal-to-noise ratio for forced photometry data points;
        the maxlike method of ForcePhotZTF tends to underestimate the
        errors, so snr=4 is preferred over snr=3.
 
    index_rise float
        negative number, minimum rise rate (mag/day) for a candidate to
        be selected, if the rise index can be measured at all
        (e.g.: index_rise=-0.5 will allow you to select only
        those candidates rising faster than 0.5 mag/day).
 
    index_decay float
        positive number, minimum decay rate (mag/day) for a candidate to
        be selected, if the decay index can be measured at all
        (e.g.: index_decay=0.3 will allow you to select only
        those candidates decaying faster than 0.3 mag/day).

    path_secrets_db str
        path to the CSV secrets file to access the psql db.
        The file will need to have: 
        db,host,dbname,port,user,password

    path_forced str
        path to the directory where forced photometry light curves are stored

    save_plot bool
        save the plot to a png file

    path_plot str
         path to the folder where to save the plot

    show_plot
         display the plot while the code is running.
         The code will resume running when the plot is manually closed.

    use_metadata bool
        if True, use ztfquery to fetch upper limits based onZTF pipeline
        non-detection of the transient (for plotting only)

    path_secrets_meta str
        path to the CSV secrets file to access ztfquery.
        The file will need to have:
        ztfquery_user, ztfquery_pwd

    save_csv bool
        if True, the light curve is saved in a CSV file

    path_csv str
        path to the folder where the light curve will be saved

       
    ---
    Returns

    if update_database=True, it automatically updates the database with the
    following information:
    duration_tot, duration per band, rise or decay rate (index) per band

    selected list of str
        list of candidates that meet the input selection criteria

    rejected
        list of candidates that don't meet the input selection criteria

    cantsay
        list of candidates without enough information to tell
        if they meet the input selection criteria
    """

    # Useful definitions, specific for ZTF
    candidates = set(tbl["name"])
    filters = ['g', 'r', 'i']
    filters_id = {'1': 'g', '2': 'r', '3': 'i'}
    colors = {'g': 'g', 'r': 'r', 'i': 'y'}

    if update_database is True or read_database is True:
        # Connect to psql db
        con, cur = connect_database(update_database=update_database,
                                    path_secrets_db=path_secrets_db)

    if save_plot is True:
        if not os.path.isdir(path_plot):
            os.makedirs(path_plot) 
    if save_csv is True:
        if not os.path.isdir(path_csv):
            os.makedirs(path_csv)

    names_select = []
    names_reject = []
    empty_lc = []

    # Get forced phot for all the candidates
    if read_database is True and use_forced_phot is True:
        str_names = "'" + "','".join(candidates) + "'"
        # table name
        if stacked is True:
            table_name = "lightcurve_stacked"
            column_names = "name, jd, flux, flux_unc, mag, mag_unc, \
limmag, filter, zp, ezp, programid, field, ccdid, qid"
        else:
            table_name = "lightcurve_forced"
            column_names = "name, jd, filter, programid, \
field, mag, mag_unc, limmag, zp, ezp, flux_maxlike, flux_maxlike_unc"
        # Read the light curve from the database
        t_pd = pd.read_sql_query(f"SELECT {column_names} \
                                 from {table_name} \
                                 where name IN ({str_names})", con)
        # If the table is empty, return
        if t_pd.empty:
            print("There is no forced photometry in the database \
for any of the given candidates!")

            return None, None, None

        t_forced = Table.from_pandas(t_pd)

    for name in candidates:
        # Is the candidate to be ignored?
        if name in hard_reject:
            continue
        # Check if the forced photometry light curve is available
        if use_forced_phot is True:
            with_forced_phot = True
            if read_database is True:
                t = t_forced[t_forced['name'] == name]
            else:
                # Read the light curve from a file
                files = glob.glob(f"{path_forced}/*{name}*maxlike*fits")
                if len(files) == 0:
                    print(f"No forced photometry available for {name}: skipping")
                    continue
                elif len(files) > 1:
                    print(f"WARNING: more than one light curve found for {name}")
                    print(f"Using {files[0]}")
                    filename = files[0]
                else:
                    filename = files[0]
                t = Table(fits.open(filename)[1].data)
            empty = False
            if len(t) == 0:
                empty = True
                empty_lc.append(name)
                print(f"Empty forced photometry light curve for {name}: skipping")
                continue
                # Keep going with only the alerts?
                #t = tbl[tbl['name'] == name]
                #t_ul = t[:0].copy()
            else:
                t_ul = t[t["mag"] > 50]
                t = t[t["mag"] < 50]
                # Fix the column names
                t.rename_column('mag', 'magpsf')
                t.rename_column('mag_unc', 'sigmapsf')
                if read_database is False:
                    t.rename_column('jdobs', 'jd')
                    t_ul.rename_column('jdobs', 'jd')

                # Add missing epochs packets
                for l in tbl[tbl['name'] == name]:
                    if len(t_ul) > 0:
                        min_delta_ul = np.min(np.abs(t_ul['jd'] - l['jd']))
                    else:
                        min_delta_ul = np.inf
                    if len(t) > 0:
                        min_delta_det = np.min(np.abs(t['jd'] - l['jd']))
                    else:
                        min_delta_det = np.inf
                    # Min time from forced photometry for using alerts
                    if stacked is True:
                        # 12 hours for stacked images
                        min_delta = 12./24.
                    else:
                        # 15 seconds for non-stacked light curves
                        min_delta = 15./24./60./60.
                    if np.min([min_delta_ul, min_delta_det]) > min_delta:
                        if stacked is False and read_database is False:
                            new_row = [l['jd'], l['filter'], np.nan,
                                       np.nan, np.nan, np.nan, 1, l['field'],
                                       l['rcid'], np.nan, np.nan, np.nan,
                                       np.nan, np.nan, np.nan, np.nan, np.nan,
                                       np.nan, np.nan, np.nan, '', '', np.nan,
                                       np.nan, np.nan, np.nan, np.nan,
                                       l['magpsf'], l['sigmapsf'], np.nan]
                        elif stacked is False and read_database is True:
                            new_row = [name, l['jd'], l['filter'], 1,
                                       l['field'], l['magpsf'], l['sigmapsf'],
                                       99., 0., 0., 0., 0.]
                        elif stacked is True and read_database is True:
                            new_row = [name, l['jd'], np.nan, np.nan,
                                       l['magpsf'], l['sigmapsf'], 99.,
                                       l['filter'], 0., 0., l['programid'], l['field'], 0., 0.]
                        else:
                            new_row = [l['jd'], np.nan, np.nan, np.nan, np.nan,
                                       l['magpsf'], l['sigmapsf'], np.nan,
                                       l['filter'], 1]
                        t.add_row(new_row)
        else:
            with_forced_phot = False
            empty = False
            t = tbl[tbl['name'] == name]
            t_ul = t[:0].copy()

        # Reject those with only upper limits
        if len(t) == 0 and empty is False:
            names_reject.append(name)
            continue

        # Determine the light curve starting time
        t0 = min(t["jd"])

        # Reject if the overall duration is longer than ??  days
        # or if there is only 1 detection
        try:
            if update_database is True:
                # FIXME do we want to report only the max duration_tot between alerts, forcephot, and stack?
                cur.execute(f"UPDATE candidate SET \
                            duration_tot = {np.max(t['jd']) - np.min(t['jd'])}\
                            where name = '{name}'")
            if np.max(t['jd']) - np.min(t['jd']) > max_duration_tot or np.max(t['jd']) - np.min(t['jd']) == 0:
                names_reject.append(name)
                continue
        except ValueError:
            print("Failed calculating max(t['jd']) - min(t['jd']) > 10.")
            pdb.set_trace()
        try:
            plt.close()
        except:
            pass
        #plt.clf()
        plt.figure(figsize=(8,6))
        plt.subplot(1, 1, 1)

        plotted = False
        #print(f"-------- {name}")

        for f in filters:
            tf = t[t['filter'] == f]
            if len(tf) == 0:
                continue
            if use_metadata is False:
                tf_ul = t_ul[t_ul['filter'] == f]
                if len(tf_ul) > 0:
                    tf_ul["jd"] = tf_ul["jd"] - t0
                    plt.plot(np.array(tf_ul["jd"]), np.array(tf_ul["limmag"]),
                             colors[f]+'v', markeredgecolor=colors[f],
                             markerfacecolor='w')
                    plt.plot([],[], 'kv', label='UL')
            # Correct the start time
            tf["jd"] = tf["jd"] - t0

            #brightest, faintest detections
            bright = np.min(tf["magpsf"])
            try:
                brighterr = tf["sigmapsf"][tf["magpsf"] == bright][0]
            except:
                print("problems with",
                      "brighterr = tf['sigmapsf'][tf['magpsf'] == bright][0]")
                print(tf)
                pdb.set_trace()
                continue
            bright_jd = tf["jd"][tf["magpsf"] == bright][0]
            faint = np.max(tf["magpsf"])
            fainterr = tf["sigmapsf"][tf["magpsf"] == faint][0]
            faint_jd = tf["jd"][tf["magpsf"] == faint][0]

            # First and last detections
            first = np.min(tf["jd"])
            last = np.max(tf["jd"])

            # Add the information regarding the duration in the db
            # duration_g is the max number of days between the first
            # detection and the last detection in g band.
            if update_database is True:
                cur.execute(f"UPDATE candidate SET \
                            duration_{f} = {last-first} \
                            where name = '{name}'")

            time = np.array(tf["jd"])
            mag = np.array(tf["magpsf"])
            magerr = np.array(tf["sigmapsf"])
            plt.errorbar(np.array(tf["jd"][tf['programid']!=1]),
                         np.array(tf["magpsf"][tf['programid']!=1]),
                         fmt=colors[f]+'s',
                         yerr=np.array(tf["sigmapsf"][tf['programid']!=1]),
                         markeredgecolor='k', markersize=8)
            plt.errorbar(np.array(tf["jd"][tf['programid']==1]),
                         np.array(tf["magpsf"][tf['programid']==1]),
                         fmt=colors[f]+'o',
                         yerr=np.array(tf["sigmapsf"][tf['programid']==1]),
                         markeredgecolor='k', markersize=8)

            plt.plot([],[], 'ks', label='programid=2,3')
            plt.plot([],[], 'ko', label='programid=1')

            # max_days_g is the max number of days between the first
            # detection in any band and the last g-band detection
            if update_database is True:
                cur.execute(f"UPDATE candidate SET \
                            max_days_{f} = {np.max(tf['jd'])} \
                            where name = '{name}'")

            # SELECT: not enough baseline - no action taken
            if np.abs(last-first) < baseline:
                continue

            # SELECT: no variability between the first and last detection - rejection
            if bright+brighterr > faint-fainterr and np.abs(bright_jd - faint_jd) >= var_baseline[f]:
                names_reject.append(name)
                continue

            # SELECT: if a g-band detection is present xx days
	    # after the first detection, reject
            if f == 'g' and np.max(tf['jd']) > max_days_g:
                names_reject.append(name)
                continue

            onlyrise = False
            onlyfade = False

            if bright_jd < first + baseline:
                onlyfade = True
                riseorfade = 'fade'
                # Some info may be stored that we want to remove
                if update_database is True and with_forced_phot is False:
                    cur.execute(f"UPDATE candidate SET \
                                index_rise_{f} = NULL \
                                where name = '{name}'")
                elif update_database is True and with_forced_phot is True:
                    if stacked is True:
                        column = f"index_rise_stack_{f}"
                    else:
                        column = f"index_rise_forced_{f}"
                    cur.execute(f"UPDATE candidate SET \
                                {column} = NULL \
                                where name = '{name}'")

            elif bright_jd > last - baseline:
                onlyrise = True
                riseorfade = 'rise'
                if update_database is True and with_forced_phot is False:
                    cur.execute(f"UPDATE candidate SET \
                                index_fade_{f} = NULL \
                                where name = '{name}'")
                elif update_database is True and with_forced_phot is True:
                    if stacked is True:
                        column = f"index_fade_stack_{f}"
                    else:
                        column = f"index_fade_forced_{f}"
                    cur.execute(f"UPDATE candidate SET \
                                {column} = NULL \
                                where name = '{name}'")

            # Fit
            fitfunc = lambda p, x: p[0] + p[1] * x
            errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
            pinit = [1.0, -1.0]

            if onlyrise or onlyfade:
                pfinal, covar, index, amp, indexErr, ampErr = do_fit(errfunc, pinit, time, mag, magerr)
                plt.plot(time, fitfunc(pfinal, time), color=colors[f],
                         label=f"{f}, index= {'{:.2f}'.format(index)}+-{'{:.2f}'.format(indexErr)}")

                # Add info to the database
                if update_database is True and with_forced_phot is False:
                    cur.execute(f"UPDATE candidate SET \
                                index_{riseorfade}_{f} = {index} \
                                where name = '{name}'")
                elif update_database is True and with_forced_phot is True:
                    if stacked is True:
                        column = f"index_{riseorfade}_stack_{f}"
                    else:
                        column = f"index_{riseorfade}_forced_{f}"
                    cur.execute(f"UPDATE candidate SET \
                                {column} = {index} \
                                where name = '{name}'")

                # SELECT: slow evolution over a time longer than the baseline
                if ((index > 0 and index <= index_decay) and
                    (last-first) > var_baseline[f]):
                    names_reject.append(name)
                else:
                    plotted = True
            else:
                indexrise = np.where(time <= bright_jd)
                indexfade = np.where(time >= bright_jd)
                for i, riseorfade in zip([indexrise, indexfade], ['rise', 'fade']):
                    time_new = time[i[0]]
                    mag_new = mag[i[0]]
                    magerr_new = magerr[i[0]]

                    faint_new = np.max(mag_new)
                    fainterr_new = magerr_new[np.where(mag_new == faint_new)[0]]
                    faint_jd_new = time_new[np.where(mag_new == faint_new)[0]]
                    # Format check
                    if type(magerr_new) is list or type(magerr_new) is np.ndarray:
                        magerr_new = magerr_new[0]
                    if type(fainterr_new) is list or type(fainterr_new) is np.ndarray:
                        fainterr_new = fainterr_new[0]
                    if type(faint_jd_new) is list or type(faint_jd_new) is np.ndarray:
                        faint_jd_new = faint_jd_new[0]

                    # SELECT: no evolution
                    try:
                        if (bright+brighterr > faint_new - fainterr_new and
                            np.abs(faint_jd_new - bright_jd) >= var_baseline[f]):
                            names_reject.append(name)
                            plt.errorbar(time, mag, yerr=magerr,
                                         fmt=colors[f]+'.',
                                         markeredgecolor='k',
                                         markersize=8)
                            continue
                    except:
                        print(bright, brighterr, faint_new, fainterr_new)
                        pdb.set_trace()
                    pfinal, covar, index, amp, indexErr, ampErr = do_fit(errfunc, pinit, time_new, mag_new, magerr_new)
                    plt.plot(time_new, fitfunc(pfinal, time_new),
                             color=colors[f],
                             label=f"{f}, index= {'{:.2f}'.format(index)}+-{'{:.2f}'.format(indexErr)}")

                    if update_database is True and with_forced_phot is False:
                        cur.execute(f"UPDATE candidate SET \
                                    index_{riseorfade}_{f} = {index} \
                                    where name = '{name}'")
                    elif update_database is True and with_forced_phot is True:
                        if stacked is True:
                            column = f"index_{riseorfade}_stack_{f}"
                        else:
                            column = f"index_{riseorfade}_forced_{f}"
                        cur.execute(f"UPDATE candidate SET \
                                    {column} = NULL \
                                    where name = '{name}'")

                    # SELECT: slow evolution
                    if ((index > 0 and index <= index_decay) and
                        (last-first) > var_baseline[f]):
                        names_reject.append(name)
                    else:
                        plotted = True

        if name in names_reject:
            plt.close()
            continue

        if plotted is True:
            # The candidate was selected!
            names_select.append(name)

        if plotted is True and (show_plot is True or save_plot is True):
            # The following is for the file naming
            if use_forced_phot is True:
                forcedbool = 1
            else:
                forcedbool = 0
            if stacked is True:
                stackbool = 1
            else:
                stackbool = 0

            if use_metadata:
                # Fetch metadata on all the available ZTF images
                #start_jd = Time('2018-03-01 00:00:00', format='iso').jd
                #end_jd =  Time('2030-02-01 04:50:59.998', format='iso').jd
                start_jd, end_jd = None, None

                # Read the secrets
                secrets = ascii.read(path_secrets_meta, format='csv')
                username = secrets['ztfquery_user'][0]
                password = secrets['ztfquery_pwd'][0]

                zquery = query.ZTFQuery()
                coords = SkyCoord(ra=np.mean(tbl[tbl['name'] == name]['ra']*u.deg),
                                  dec=np.mean(tbl[tbl['name'] == name]['dec']*u.deg))
                metadata = query_metadata(coords.ra, coords.dec, username, password,
                                          zquery, start_jd=start_jd, end_jd=end_jd,
                                          out_csv=None)
                t_ul = Table([[],[],[],[],[],[],[],[]],
                             names=('jd', 'magpsf', 'sigmapsf', 'filter',
                                   'snr', 'limmag', 'seeing', 'programid'),
                             dtype=('double','f','f','S','f','f','f','int'))
                for j, ml, fid, s, pid in zip(metadata['obsjd'],
                                              metadata['maglimit'],
                                              metadata['fid'],
                                              metadata['seeing'],
                                              metadata['pid']):
                    if not (j in t['jd']):
                        new_row = [j, 99.9, 99.9, filters_id[str(fid)],
                                   np.nan, ml, s, 0]
                        t_ul.add_row(new_row)
                for f in filters:
                    tf_ul = t_ul[t_ul['filter'] == f]
                    if len(tf_ul) > 0:
                        tf_ul["jd"] = tf_ul["jd"] - t0
                        plt.plot(np.array(tf_ul["jd"]), np.array(tf_ul["limmag"]),
                                 colors[f]+'v', markeredgecolor=colors[f],
                                 markerfacecolor='w')
                        plt.plot([],[], 'kv', label='UL')
 
            plt.title(f"{name}")
            plt.xlabel('Time [days]', fontsize=18)
            plt.ylabel('mag', fontsize=18)

            plt.tick_params(axis='both',    # changes apply to the x-axis
                            which='both',   # both major and minor ticks are affected
                            labelsize=16)

            handles, labels = plt.gca().get_legend_handles_labels()
            by_label = OrderedDict(zip(labels, handles))
            plt.legend(by_label.values(), by_label.keys(), fontsize=16)

            plt.gca().invert_yaxis()

            if save_plot is True:
                # Save the image
                plt.savefig(f"{path_plot}/lc_{name}_forced{forcedbool}_stacked{stackbool}.png")
            if save_csv is True:
                # Save the light curve
                t_union = vstack([t, t_ul])
                ascii.write(t_union,
                            f"{path_csv}/lc_{name}_forced{forcedbool}_stacked{stackbool}.csv",
                            format='csv', overwrite=True)
            if show_plot is True:
                plt.show()
        else:
            plt.close()

    if update_database is True:
        # Update the hard_reject flag
        names_reject_str = "','".join(list(names_reject))
        cur.execute(f"UPDATE candidate SET \
                    hard_reject = 1 \
                    where name IN ('{names_reject_str}')")
        # Commit the changes
        con.commit()

        # Close the connection
        cur.close()
        con.close()

    print(f"{len(set(empty_lc))} empty light curves")
    print(f"Select {set(names_select)}")
    print(f"Reject {set(names_reject)}")
    cantsay = list(n for n in candidates if
                   not (n in names_select) and not (n in names_reject))
    print(f"Cannot say {set(cantsay)}")
    print(f"{len(set(names_reject))}/{len(candidates)} objects rejected")
    print(f"{len(set(names_select))}/{len(candidates)} objects selected")
    print(f"{len(set(cantsay))}/{len(candidates)} objects cannot say")

    return names_select, names_reject, cantsay
Beispiel #8
0
def download_images_diffpsf_refdiff(targetdir,
                                    ra1,
                                    dec1,
                                    start_jd=None,
                                    open_check=True):
    '''
    Download subtracted images and psf images from irsa

    This function will create two folders to store fits files downloaded from ipac:
	difference images go to --> targetdir/images_refdiff/
	psf images go to --> targetdir/images_diffpsf/

    ra1, dec1 are the coordinates of the target

    set start_jd = None if you want all images in ZTF history

    set open_check = True, the function will try to opeb all files in the final step.
    	Sometimes (although very seldom) the fits file can be broken.
    '''
    try:
        os.stat(targetdir)
    except:
        os.mkdir(targetdir)

    subdir1 = os.path.dirname(targetdir + '/images_diffpsf/')
    try:
        os.stat(subdir1)
    except:
        os.mkdir(subdir1)
    subdir2 = os.path.dirname(targetdir + '/images_refdiff/')
    try:
        os.stat(subdir2)
    except:
        os.mkdir(subdir2)

    ############## Get metadata ocf all images at this location ################
    zquery = query.ZTFQuery()
    print('\n')
    print("Querying for metadata...")
    if start_jd == None:
        zquery.load_metadata(kind='sci', radec=[ra1, dec1], size=0.003)
    else:
        zquery.load_metadata(kind='sci',
                             radec=[ra1, dec1],
                             size=0.003,
                             sql_query='obsjd>' + repr(start_jd))
    out = zquery.metatable
    final_out = out.sort_values(by=['obsjd'])
    final_out.to_csv(targetdir + '/irsafile.csv')

    urls, dl_loc = zquery.download_data(nodl=True)
    urls = np.array(urls)
    dl_loc = np.array(dl_loc)
    print('Trying to download %d images from irsa...' % len(urls))

    for i in range(len(urls)):
        if i % 50 == 0:
            print('In progress: %d in %d' % (i, len(urls)))
        _url = urls[i]
        _url_diffpsf = _url.split('sciimg')[0] + 'diffimgpsf.fits'
        _url_ref = _url.split('sciimg')[0] + 'scimrefdiffimg.fits.fz'
        fitsfile1 = _url_diffpsf.split('/')[-1]
        fitsfile2 = _url_ref.split('/')[-1]
        savename1 = subdir1 + '/' + fitsfile1.split('.fits')[0] + '.fits'
        savename2 = subdir2 + '/' + fitsfile2.split('.fits')[0] + '.fits'

        if os.path.isfile(savename1) == False:
            download_single_url(_url_diffpsf,
                                fileout=savename1,
                                cookies=None,
                                verbose=False)
        if os.path.isfile(savename2) == False:
            download_single_url(_url_ref,
                                fileout=savename2,
                                cookies=None,
                                verbose=False)

    ################## we do not have access to some images ###################
    original_names = []
    for i in range(len(urls)):
        original_names.append(urls[i].split('/')[-1].split('_sciimg.fits')[0])
    original_names = np.array(original_names)
    argo = np.argsort(original_names)
    original_names = original_names[argo]

    durls = glob.glob(subdir1 + '/*.fits')
    downloaded_names = []
    for i in range(len(durls)):
        downloaded_names.append(
            durls[i].split('/')[-1].split('_diffimgpsf.fits')[0])
    downloaded_names = np.array(downloaded_names)
    argd = np.argsort(downloaded_names)
    downloaded_names = downloaded_names[argd]

    ix_why = np.in1d(original_names, downloaded_names)
    print('%d images in %d we do not have data:' %
          (np.sum(~ix_why), len(urls)))
    print('\n')

    ###################### check if files can be opened #######################
    if open_check == True:
        print('checking if all files can be opened...')
        imgdir = targetdir + '/images_refdiff/'
        psfdir = targetdir + '/images_diffpsf/'
        imgfiles = np.array(glob.glob(imgdir + '*.fits'))
        arg = np.argsort(imgfiles)
        imgfiles = imgfiles[arg]
        psffiles = np.array(glob.glob(psfdir + '*.fits'))
        arg = np.argsort(psffiles)
        psffiles = psffiles[arg]
        n = len(imgfiles)

        if len(imgfiles) != len(psffiles):
            imgstrings = [x.split('/')[-1][:-20] for x in imgfiles]
            psfstrings = [x.split('/')[-1][:-16] for x in psffiles]
            imgstrings = np.array(imgstrings)
            psfstrings = np.array(psfstrings)
            if len(imgstrings) > len(psfstrings):
                ix = np.in1d(imgstrings, psfstrings)
                for x in imgfiles[~ix]:
                    os.remove(x)
            else:
                ix = np.in1d(psfstrings, imgstrings)
                for x in psffiles[~ix]:
                    os.remove(x)
            imgdir = targetdir + '/images_refdiff/'
            psfdir = targetdir + '/images_diffpsf/'
            imgfiles = np.array(glob.glob(imgdir + '*.fits'))
            arg = np.argsort(imgfiles)
            imgfiles = imgfiles[arg]
            psffiles = np.array(glob.glob(psfdir + '*.fits'))
            arg = np.argsort(psffiles)
            psffiles = psffiles[arg]
            n = len(imgfiles)

        for i in range(n):
            if i % 50 == 0:
                print('In progress: %d in %d...' % (i, n))
            imgpath = imgfiles[i]
            psfpath = psffiles[i]
            try:
                fits.open(imgpath)[1].header
                fits.open(imgpath)[1].data
                fits.open(psfpath)[0].data
            except:
                print('file broken, remove %s, %s' % (imgpath, psffiles[i]))
                os.remove(imgfiles[i])
                os.remove(psffiles[i])
        print('\n')
Beispiel #9
0
def get_finder(ra,
               dec,
               name,
               rad=0.01,
               target_mag=np.nan,
               starlist=None,
               print_starlist=True,
               telescope="P200",
               main_comment="",
               minmag=15,
               maxmag=18.5,
               figdir=None):
    """ 
    Aim: Generate finder chart 
    
    Parameters:
    ra: float or string. 
        eg: ra="18h24m25.36s", 210.437583
        
    dec: float or string, type must be consistent with ra. 
        eg: dec="+44d07m50.0s", 46.215583
        
    name: ZTFname or host name. 
        eg: name="18aaslhxt_h", "ZTF18abclfee"
        
    rad: search radius in the unit of degree
    
    target_mag: magnitude of the target at time of observation, 
        default shoudl be r band
    
    starlist: name of the starlist
        eg: starlist="/Users/yuhanyao/Desktop/observation/20190428_LRIS/starlist"
        
        
    References:
    Keck starlist format: https://www2.keck.hawaii.edu/observing/starlist.html
        First field is the target name in columns 1-16 (tabs and spaces allowed). Maximum length is 15 characters.
        Next three space-separated tokens (beginning in column 17) are RA in the form HH MM SS.SS (including an arbitrary number of decimal places).
        Next three space-separated tokens are Dec in the form (-)DD MM SS.S (again, to an arbitrary number of decimal places). Note: if the Dec is between 0 and -1, the DD field MUST be -00).
        Next token is equinox in the form YYYY.YY (no parentheses; arbitrary number of decimal places), with <=1950 meaning FK4 and 2000 meaning FK5 and APP meaning apparent.
        
    """
    print('Using search radius of %.1f arcsec.' % (rad * 3600))

    name = str(name)
    #assert len(name) in [12, 11]

    assert type(ra) == type(dec)
    if type(ra) == str:
        c1 = SkyCoord(ra, dec, frame='icrs')
        ra = c1.ra.degree
        dec = c1.dec.degree
    ra = float(ra)
    dec = float(dec)

    # prepare to print starlist
    if telescope == "Keck":
        name_length = 16  # maximum length of name is 15
        commentchar = "#"
        separator = ""
        rah_length = 11
    elif telescope == "P200":
        name_length = 20
        commentchar = "!"
        separator = "|"
        rah_length = 12

    #Write to the starlist if the name of the starlist was provided.
    rah, dech = deg2hour(ra, dec, sep=" ")
    if (not starlist is None):
        s_target_mag = "yyao: (r={0:.1f})".format(target_mag)
        if telescope == "Keck":
            with open(starlist, "a") as f:
                f.write("{:s}{:s} {:s} 2000.0 {:s} {:s} {:s} {:s} \n".format(
                    name.ljust(name_length), rah.ljust(rah_length), dech,
                    commentchar, main_comment, separator, s_target_mag))
                f.close()
        elif telescope == "P200":
            with open(starlist, "a") as f:
                f.write("{:s}{:s} {:s} 2000.0 {:s} {:s} {:s} {:s} \n".format(
                    name.ljust(name_length), rah.ljust(rah_length), dech,
                    commentchar, main_comment, separator, s_target_mag))
                f.close()

    # Get metadata of all images at this location
    print("Querying for metadata...")
    zquery = query.ZTFQuery()
    zquery.load_metadata(radec=[ra, dec], size=rad)
    out = zquery.metatable

    # Do you need to use a reference image?
    need_ref = len(out) == 0
    if need_ref or name[:4] == "ZTFJ":
        print("Using a reference image")
        imfile, catfile = choose_ref(zquery, ra, dec)
    else:
        print("Using a science image")
        imfile, catfile = choose_sci(zquery, out, name, ra, dec)

    # get the cutout
    inputf = pyfits.open(imfile)
    im = inputf[0].data
    inputf.close()
    head = fits.getheader(imfile)

    # Get the x and y position of the target,
    # as per the IPAC catalog
    wcs = astropy.wcs.WCS(head)
    world = np.array([[ra, dec]], np.float_)
    target_pix = wcs.wcs_world2pix(world, 0)[0]
    xpos = target_pix[0]
    ypos = target_pix[1]

    # adjust counts
    im[np.isnan(im)] = 0
    im[im > 30000] = 30000

    # extract 600x600 region around the position of the target
    width = 600
    height = 600
    xmax = xpos + width / 2
    xmin = xpos - width / 2
    ymax = ypos + height / 2
    ymin = ypos - height / 2

    plt.figure(figsize=(8, 6))
    plt.set_cmap('gray_r')
    smoothedimage = gaussian_filter(im, 1.3)
    # pad the image
    im_padded = np.pad(smoothedimage, 300, mode='constant', constant_values=0)

    # If it's a reference image, you have to flip it up/down and left/right
    if need_ref:
        croppedimage = np.fliplr(
            np.flipud(im_padded[int(ymin) + 300:int(ymax) + 300,
                                int(xmin) + 300:int(xmax) + 300]))

    # If it's a science image, you just flip it up/down
    else:
        croppedimage = np.flipud(im_padded[int(ymin) + 300:int(ymax) + 300,
                                           int(xmin) + 300:int(xmax) + 300])

    plt.imshow(
        croppedimage,
        origin='lower',  # convention for IPAC images
        vmin=np.percentile(im.flatten(), 10),
        vmax=np.percentile(im.flatten(), 99.0))

    # Mark target: should just be the center of the image, now
    # horizontal line
    plt.plot([300 + 5, 300 + 20], [300, 300], 'g-', lw=2)
    # vertical line
    plt.plot([300, 300], [300 + 5, 300 + 20], 'g-', lw=2)

    # and the offset of the original coordinate system with the new coordinates
    offset_x = xpos - 300
    offset_y = ypos - 300

    # Choose offset stars
    cat = pyfits.open(catfile)[1].data
    zp = pyfits.open(catfile)[0].header['MAGZP']
    sep_pix = np.sqrt(
            (xpos-cat['xpos'])**2 + \
            (ypos-cat['ypos'])**2)

    # should be separated by at least 10 pixels
    crit_a = np.logical_and(sep_pix > 10, cat['flags'] == 0)
    crit_b = np.logical_and(cat['chi'] < 2, cat['snr'] > 10)
    crit_c = cat['sharp'] < 0.3
    crit_ab = np.logical_and(crit_a, crit_b)
    crit = np.logical_and(crit_ab, crit_c)

    # should be bright
    mag_crit = np.logical_and(cat['mag'] + zp >= minmag,
                              cat['mag'] + zp <= maxmag)
    choose_ind = np.where(np.logical_and(crit, mag_crit))

    # mark the closest three stars
    nref = 3
    order = np.argsort(sep_pix[choose_ind])
    cols = ['orange', 'purple', 'red']

    for ii in np.arange(nref):
        ref_xpos_original = cat['xpos'][choose_ind][order][ii] - offset_x
        ref_ypos_original = cat['ypos'][choose_ind][order][ii] - offset_y

        # transform to flipped plot
        if need_ref:
            ref_xpos = 600 - ref_xpos_original
            ref_ypos = 600 - ref_ypos_original
        else:
            ref_xpos = ref_xpos_original
            ref_ypos = 600 - ref_ypos_original

        plt.plot([ref_xpos + 5, ref_xpos + 20], [ref_ypos, ref_ypos],
                 c=cols[ii],
                 ls='-',
                 lw=2)
        plt.plot([ref_xpos, ref_xpos], [ref_ypos + 5, ref_ypos + 20],
                 c=cols[ii],
                 ls='-',
                 lw=2)
        refra = cat['ra'][choose_ind][order][ii]
        refdec = cat['dec'][choose_ind][order][ii]
        if telescope == 'Keck':
            refrah, refdech = deg2hour(refra, refdec, sep=" ")
        elif telescope == 'P200':
            refrah, refdech = deg2hour(refra, refdec, sep=":")
        else:
            print("I don't recognize this telescope")
        refmag = cat['mag'][choose_ind][order][ii] + zp
        dra, ddec = get_offset(refra, refdec, ra, dec)

        offsetnum = 0.2
        plt.text(1.02,
                 0.60 - offsetnum * ii,
                 'Ref S%s, mag %s' % ((ii + 1), np.round(refmag, 1)),
                 transform=plt.axes().transAxes,
                 fontweight='bold',
                 color=cols[ii])
        plt.text(1.02,
                 0.55 - offsetnum * ii,
                 '%s %s' % (refrah, refdech),
                 color=cols[ii],
                 transform=plt.axes().transAxes)
        plt.text(1.02,
                 0.50 - offsetnum * ii,
                 str(np.round(ddec, 2)) + "'' N, " + str(np.round(dra, 2)) +
                 "'' E",
                 color=cols[ii],
                 transform=plt.axes().transAxes)

        # Print starlist for telescope
        if telescope == 'Keck':
            # Target name is columns 1-16
            # RA must begin in 17, separated by spaces
            print(
                "{:s}{:s} {:s} 2000.0 {:s} raoffset={:.2f} decoffset={:.2f} {:s} r={:.1f} "
                .format((name + "_S%s" % (ii + 1)).ljust(name_length),
                        refrah.ljust(rah_length), refdech, separator, dra,
                        ddec, commentchar, refmag))
        elif telescope == 'P200':
            print(
                "{:s}{:s} {:s} 2000.0 {:s} raoffset={:.2f} decoffset={:.2f} r={:.1f} \n"
                .format((name + "_S%s" % (ii + 1)).ljust(name_length),
                        refrah.ljust(rah_length), refdech, commentchar, dra,
                        ddec, refmag))

        # and save to file if starlist name is provided
        if (not starlist is None):
            if telescope == "Keck":
                with open(starlist, "a") as f:
                    f.write(
                        "{:s}{:s} {:s} 2000.0 {:s} raoffset={:.2f} decoffset={:.2f} {:s} r={:.1f} \n"
                        .format((name + "_S%s" % (ii + 1)).ljust(name_length),
                                refrah.ljust(rah_length), refdech, commentchar,
                                dra, ddec, separator, refmag))
                    f.close()
            elif telescope == "P200":
                with open(starlist, "a") as f:
                    f.write(
                        "{:s}{:s} {:s} 2000.0 {:s} raoffset={:.2f} decoffset={:.2f} r={:.1f} \n"
                        .format((name + "_S%s" % (ii + 1)).ljust(name_length),
                                refrah.ljust(rah_length), refdech, commentchar,
                                dra, ddec, refmag))
                    f.close()

    # Plot compass
    plt.plot([width - 10, height - 40], [10, 10], 'k-', lw=2)
    plt.plot([width - 10, height - 10], [10, 40], 'k-', lw=2)
    plt.annotate("N",
                 xy=(width - 20, 40),
                 xycoords='data',
                 xytext=(-4, 5),
                 textcoords='offset points')
    plt.annotate("E",
                 xy=(height - 40, 20),
                 xycoords='data',
                 xytext=(-12, -5),
                 textcoords='offset points')

    # Get rid of axis labels
    plt.gca().get_xaxis().set_visible(False)
    plt.gca().get_yaxis().set_visible(False)

    # Set size of window (leaving space to right for ref star coords)
    plt.subplots_adjust(right=0.65, left=0.05, top=0.99, bottom=0.05)

    # List name, coords, mag of the target
    plt.text(1.02,
             0.85,
             name,
             transform=plt.axes().transAxes,
             fontweight='bold')
    # Can't print mag, because we don't know how bright the target is
    #plt.text(1.02, 0.80, "%s"%mag, transform=plt.axes().transAxes, fontweight='bold')
    plt.text(1.02,
             0.80,
             "%.5f %.5f" % (ra, dec),
             transform=plt.axes().transAxes)
    rah, dech = deg2hour(ra, dec)
    plt.text(1.02, 0.75, rah + "  " + dech, transform=plt.axes().transAxes)

    if figdir == None:
        plt.savefig("finder_chart_%s.png" % name)
    else:
        plt.savefig(figdir + "finder_chart_%s.png" % name)
    plt.close()
Beispiel #10
0
 def __init__(self, target=None):
     """ """
     self._zquery = query.ZTFQuery()
     if target is not None:
         self.set_name(target)
Beispiel #11
0
def get_forced_phot(name,ra,dec,jdobs):
    """ 
    Uses Yuhan's code to perform forced photometry 
    for a given RA and Dec and central JD (day of the transient)
    Over a window of +/- 10 days
    
    Parameters
    ----------
    name: name of the source 
    ra: ra in decimal degrees
    dec: dec in decimal degrees
    """
    start_jd = jdobs-200
    end_jd = jdobs+100

    zquery = query.ZTFQuery()
    zquery.load_metadata(
            radec=[ra, dec], size=0.0001, 
            sql_query="obsjd>%s and obsjd<%s" %(start_jd,end_jd))
    zquery.download_data("scimrefdiffimg.fits.fz")
    zquery.download_data("diffimgpsf.fits")

    print("finished downloading data")

    filefracday = zquery.metatable['filefracday'].values

    jd = []
    flux = []
    eflux = []
    mag = []
    emag = []
    filt = []

    # Normalize all fluxes to the same zp
    ZP = 25

    for ffd in filefracday:
        ffd = str(ffd)
        fpath = 'Data/sci/' + ffd[0:4] + '/' + ffd[4:8] + '/' + ffd[8:]

        # Check if directory is empty
        if len(glob.glob(fpath + "/*")) > 0:
            imgpath = glob.glob(fpath + "/*.fits.fz")[0]
            psfpath = glob.glob(fpath + "/*diffimgpsf.fits")[0]

            pobj = ZTFphot(name, ra, dec, imgpath, psfpath)
            pobj.load_source_cutout() 
            pobj.load_bkg_cutout()
            pobj.get_scr_cor_fn()  
            pobj.fit_psf()

            jd.append(pobj.obsjd)
            zp = pobj.zp
            new_flux = pobj.Fpsf * 10**(-0.4*(zp-ZP))
            flux.append(new_flux)
            new_eflux = pobj.eFpsf * 10**(-0.4*(zp-ZP))
            eflux.append(new_eflux)
            mag.append(pobj.mag)
            emag.append(pobj.mag_unc)
            filt.append(pobj.filter)

    jd = np.array(jd)
    flux = np.array(flux)
    eflux = np.array(eflux)
    mag = np.array(mag)
    emag =np.array(emag)
    filt = np.array(filt)
    
    return filt,jd,flux,eflux,mag,emag
Beispiel #12
0
def query_ipac(name):
    '''
    To query ipac for reference exposure epoch, you need to send the filterid, fieldid, ccdid, and qid
    This can firstly be downloaded from ipac using ztfquery
    
    Please see
    https://irsa.ipac.caltech.edu/TAP/sync?query=select+column_name,description,unit,ucd,utype,datatype,principal,indexed+from+TAP_SCHEMA.columns+where+table_name=%27ztf.ztf_current_meta_ref%27+order+by+column_index&format=html
    of the description of IPAC reference image columns
    '''

    # Step 0. As usual, generate directory to store data
    cwd = os.getcwd()
    targetdir = cwd + '/' + name + '/'
    try:
        os.stat(targetdir)
    except:
        os.mkdir(targetdir)

    try:
        os.stat(targetdir + 'lightcurves/')
    except:
        os.mkdir(targetdir + 'lightcurves/')

    # Step 1. ipac does not know ZTF name, so let's get the coordinate first
    ra1, dec1 = get_pos(name)
    np.savetxt(targetdir + '/coo_marshal.reg', [ra1, dec1])

    # Step 2. download infomation about all images that has ever covered this coordinate
    zquery = query.ZTFQuery()
    print("Querying for metadata...")
    # note: the unit of size is [degree], you may want to change it
    zquery.load_metadata(kind='sci', radec=[ra1, dec1], size=0.003)
    out = zquery.metatable
    final_out = out.sort_values(by=['obsjd'])
    final_out.to_csv(targetdir + '/irsafile.csv')

    # Step 3. get reference epoch for every row in the file `irsafile.csv`
    s = requests.Session()
    s.post(
        'https://irsa.ipac.caltech.edu/account/signon/login.do?josso_cmd=login',
        data={
            'josso_username': DEFAULT_AUTH_ipac[0],
            'josso_password': DEFAULT_AUTH_ipac[1]
        })

    mylc = Table([
        final_out['field'].values, final_out['ccdid'].values,
        final_out['fid'].values, final_out['qid'].values,
        final_out['obsjd'].values
    ],
                 names=['field', 'ccdid', 'fid', 'qid', 'obsjd'])
    # For each unique ccd-quarant (fcqf id), we only want to query once to save time
    fcqf = mylc['field'] * 10000 + mylc['ccdid'] * 100 + mylc[
        'qid'] * 10 + mylc['fid']
    mylc['fcqf'] = fcqf
    fcq_uniq = []
    for x in mylc['fcqf']:
        if x not in fcq_uniq:
            fcq_uniq.append(x)
    fcq_uniq = np.array(fcq_uniq)

    jdref_start = np.zeros(len(mylc))
    jdref_end = np.zeros(len(mylc))

    for j in range(len(fcq_uniq)):
        fcqnow = fcq_uniq[j]
        temp1 = fcqnow - fcqnow % 10000
        fieldnow = np.int(temp1 / 10000)
        temp2 = fcqnow - temp1
        temp3 = temp2 - temp2 % 100
        ccdidnow = np.int(temp3 / 100)
        temp4 = temp2 - temp3
        qidnow = np.int((temp4 - temp4 % 10) / 10)
        filteridnow = temp4 - qidnow * 10
        if filteridnow == 1:
            fltidnow = 'zg'
        elif filteridnow == 2:
            fltidnow = 'zr'
        elif filteridnow == 3:
            fltidnow = 'zi'

        url = 'https://irsa.ipac.caltech.edu/ibe/search/ztf/products/ref?WHERE=field=' +\
                    '%d'%(fieldnow)+'%20AND%20ccdid='+'%d'%(ccdidnow) +\
                    '%20AND%20qid='+'%d'%(qidnow)+\
                    '%20AND%20filtercode=%27'+'%s'%(fltidnow)+'%27'
        r = requests.get(url, cookies=s.cookies)
        stringnow = r.content
        stnow = stringnow.decode("utf-8")
        tbnowj = asci.read(stnow)
        if len(tbnowj) == 0:
            print('no reference image: fcqf id = %d' % fcqnow)
        else:
            t0 = tbnowj['startobsdate'].data.data[0]
            t1 = tbnowj['endobsdate'].data.data[0]
            tstart = Time(t0.split(' ')[0] + 'T' + t0.split(' ')[1][:-3],
                          format='isot',
                          scale='utc')
            tend = Time(t1.split(' ')[0] + 'T' + t1.split(' ')[1][:-3],
                        format='isot',
                        scale='utc')

            ind = mylc['fcqf'] == fcqnow
            jdref_start[ind] = tstart.jd
            jdref_end[ind] = tend.jd

    mylc['jdref_start'] = jdref_start
    mylc['jdref_end'] = jdref_end
    mylc.write(targetdir + 'lightcurves/' + '/ipac_info_' + name + '.csv')