Пример #1
0
def uncorrect_red_sources(phot_groups_filepath_tuple):

    """
	identify the 'red' sources by comparing ch1 to ch2 energies, then find their
	entries in the single-exposure catalogs and un-correct (divide) the
	array location dependent correction. red = ch1_flux < 3.6/4.5 * ch2_flux
	"""

    ch1_file, ch2_file = phot_groups_filepath_tuple

    root = os.path.abspath("../bcdphot")
    fp1 = os.path.join(root, "cal", "ch1_photcorr_ap_5.fits")
    fp2 = os.path.join(root, "cal", "ch2_photcorr_ap_5.fits")
    arrloc1 = pyfits.open(fp1)[0].data
    arrloc2 = pyfits.open(fp2)[0].data

    ch1 = json.load(open(ch1_file))
    ch2 = json.load(open(ch2_file))

    ra1, dec1 = zip(*[i[0][:2] for i in ch1.values()])
    ra2, dec2 = zip(*[i[0][:2] for i in ch2.values()])

    if len(ra1) < len(ra2):
        idx1, idx2, ds = spherematch(ra1, dec1, ra2, dec2, tolerance=2 / 3600.0)
    else:
        idx2, idx1, ds = spherematch(ra2, dec2, ra1, dec1, tolerance=2 / 3600.0)

    for k1, k2 in zip(np.array(ch1.keys())[idx1], np.array(ch2.keys())[idx2]):
        f1 = np.array(ch1[k1]).mean(0)[6]
        f2 = np.array(ch2[k2]).mean(0)[6]
        red = f1 < (3.6 / 4.5) * f2
        if red:
            for obs in ch1[k1]:
                x, y = obs[4:6]
                obs[6:] = [i / arrloc1[x, y] for i in obs[6:]]
            for obs in ch2[k2]:
                x, y = obs[4:6]
                obs[6:] = [i / arrloc2[x, y] for i in obs[6:]]

    out_path = ch1_file.replace("_arrayloc.json", "_arrayloc_cor.json")
    with open(out_path, "w") as w:
        json.dump(ch1, w, indent=4 * " ")
    print("created file: " + out_path)
    out_path = ch2_file.replace("_arrayloc.json", "_arrayloc_cor.json")
    with open(out_path, "w") as w:
        json.dump(ch2, w, indent=4 * " ")
    print("created file: " + out_path)
Пример #2
0
def match_sdss(cat_path):
    for catfile in find_files(cat_path, "*merged.txt"):

        # read pipeline catalog
        print("\nreading catalog: {}".format(catfile))
        cat = pd.read_table(catfile, sep=' ')

        # retrieve SDSS data from ViZieR if not already downloaded
        ch = catfile.split('/')[-1].split('_')[1]
        outpath = catfile.replace('{}_merged.txt'.format(ch), 'sdss.vot')
        if not os.path.isfile(outpath):
            cntr_ra = np.median(cat.ra)
            cntr_dec = np.median(cat.dec)
            # get source from one corner of the mosaic to calculate radius
            c1 = (cat.ra.min(), cat.dec[cat.ra == cat.ra.min()].values[0])
            # make radius 10% bigger just to be on safe side
            radius = great_circle_distance(cntr_ra, cntr_dec, *c1) * 1.1
            url = get_url(cntr_ra, cntr_dec, radius)
            print("retrieving URL: {}".format(url))
            handler = urllib2.urlopen(url)
            raw = handler.read()
            with open(outpath, 'wb') as f:
                f.write(raw)
            print("created file: {}".format(outpath))

        # parse VOTable
        print("reading VOTable: {}".format(outpath))
        table = parse_single_table(outpath)

        # if this is one of the southern hemisphere regions, delete and continue
        if table.array.size == 0:
            os.remove(outpath)
            print("outside of SDSS coverage")
            continue

        # make sure no missing data
        for name in table.array.dtype.names:
            assert table.array[name].mask.sum() == 0

        # get unmasked array
        sdss = table.array.data

        # make sure sky coverage is big enough
        assert sdss['RAJ2000'].min() < cat.ra.min()
        assert sdss['RAJ2000'].max() > cat.ra.max()
        assert sdss['DEJ2000'].min() < cat.dec.min()
        assert sdss['DEJ2000'].max() > cat.dec.max()

        # match to catalog
        assert cat.shape[0] < sdss.shape[0]
        tol = 2 / 3600.
        idx1, idx2, ds = spherematch(cat.ra,
                                     cat.dec,
                                     sdss['RAJ2000'],
                                     sdss['DEJ2000'],
                                     tolerance=tol)
        print("matched {} out of {} sources with {} arcsec tolerance".format(
            ds.size, cat.shape[0], tol * 3600))

        # create vector of star/galaxy class (0=missing, 3=galaxy, 6=star)
        cl = np.zeros(cat.shape[0]).astype('int')
        cl[idx1] = sdss['cl'][idx2]

        # add the column to the dataset
        cat['cl'] = cl

        # write to new file
        outpath = catfile.replace('merged.txt', 'merged+sdss.txt')
        # fmt = ['%i']+['%0.8f']*2+['%.4e']*2+['%i']*2
        # hdr = ' '.join(names)+' cl'
        # np.savetxt(outpath, df.to_records(index=False), fmt=fmt, header=hdr)
        cat.to_csv(outpath, index=False, sep=' ', float_format='%.8f')
        print("created file: {}".format(outpath))
Пример #3
0
def match_wise(cat_path, sdss=True):
    if sdss:
        search_pattern = "*merged+sdss.txt"
    else:
        search_pattern = "*merged.txt"

    for catfile in find_files(cat_path, search_pattern):

        # read pipeline catalog
        print("\nreading catalog: {}".format(catfile))
        cat = pd.read_table(catfile, sep=' ')

        # retrieve WISE data from ViZieR if not already downloaded
        ch = catfile.split('/')[-1].split('_')[1]
        if sdss:
            outpath = catfile.replace('{}_merged+sdss.txt'.format(ch),
                                      'wise.vot')
        else:
            outpath = catfile.replace('{}_merged.txt'.format(ch), 'wise.vot')
        if not os.path.isfile(outpath):
            cntr_ra = np.median(cat.ra)
            cntr_dec = np.median(cat.dec)
            # get source from one corner of the mosaic to calculate radius
            c1 = (cat.ra.min(), cat.dec[cat.ra == cat.ra.min()].values[0])
            # make radius 10% bigger just to be on safe side
            radius = great_circle_distance(cntr_ra, cntr_dec, *c1) * 1.1
            url = get_url(cntr_ra, cntr_dec, radius)
            print("retrieving URL: {}".format(url))
            handler = urllib2.urlopen(url)
            raw = handler.read()
            with open(outpath, 'wb') as f:
                f.write(raw)
            print("created file: {}".format(outpath))

        # parse VOTable
        print("reading VOTable: {}".format(outpath))
        table = parse_single_table(outpath)

        # if this is one of the southern hemisphere regions, delete and continue
        if table.array.size == 0:
            os.remove(outpath)
            print("no WISE coverage")
            continue

        # get unmasked array
        wise = table.array.data

        # make sure sky coverage is big enough
        assert wise['RAJ2000'].min() < cat.ra.min()
        assert wise['RAJ2000'].max() > cat.ra.max()
        assert wise['DEJ2000'].min() < cat.dec.min()
        assert wise['DEJ2000'].max() > cat.dec.max()

        # match to catalog
        tol = 2 / 3600.
        if cat.shape[0] < wise.shape[0]:
            idx1, idx2, ds = spherematch(cat.ra,
                                         cat.dec,
                                         wise['RAJ2000'],
                                         wise['DEJ2000'],
                                         tolerance=tol)
        else:
            idx2, idx1, ds = spherematch(wise['RAJ2000'],
                                         wise['DEJ2000'],
                                         cat.ra,
                                         cat.dec,
                                         tolerance=tol)
        print("matched {} out of {} sources with {} arcsec tolerance".format(
            ds.size, cat.shape[0], tol * 3600))

        # add WISE to the catalog
        if ch == '1':
            cat['W1mag'] = np.repeat(np.nan, cat.shape[0])
            cat['e_W1mag'] = np.repeat(np.nan, cat.shape[0])
            cat['W1mag'][idx1] = wise['W1mag'][idx2]
            cat['e_W1mag'][idx1] = wise['e_W1mag'][idx2]
        elif ch == '2':
            cat['W2mag'] = np.repeat(np.nan, cat.shape[0])
            cat['e_W2mag'] = np.repeat(np.nan, cat.shape[0])
            cat['W2mag'][idx1] = wise['W2mag'][idx2]
            cat['e_W2mag'][idx1] = wise['e_W2mag'][idx2]
        else:
            print("unexpected error adding WISE data")

        # write to new file
        outpath = catfile.replace('.txt', '+wise.csv')
        # fmt = ['%i']+['%0.8f']*2+['%.4e']*2+['%i']*2
        # hdr = ' '.join(names)+' cl'
        # np.savetxt(outpath, df.to_records(index=False), fmt=fmt, header=hdr)
        cat.to_csv(outpath, index=False, float_format='%.8f')
        print("created file: {}".format(outpath))
Пример #4
0
def combine_hdr_catalogs(catalog_filepaths_tuple):

    """
	Takes a tuple containing the filepaths to the short and long exposure
	single-channel catalogs for a given region and channel. The result is
	a single catalog containing the union of all sources in both short and
	long exposure catalogs, with the short exposure measurements being used
	for the brighter sources, and the long exposure measurements used for
	the fainter sources. The cutoff between the two is determined by the
	parameter 'hdr_cutoff' in the metadata file and should be set to the
	saturation limit for the long exposure data (there are actually 2
	parameters, one for each channel: hdr_cutoff_ch1, hdr_cutoff_ch2).
	"""

    # read in the data
    long_file, short_file = catalog_filepaths_tuple
    work_dir = "/".join(short_file.split("/")[:-1])
    meta = json.load(open(work_dir + "/metadata.json"))
    header = "id ra dec flux unc n_obs"
    names = header.split()
    long_cat = np.recfromtxt(long_file, names=names)
    short_cat = np.recfromtxt(short_file, names=names)

    # fit a line to short ~ long
    idx_s = short_cat.flux < meta["short_cutoff"]
    idx_l = long_cat.flux < meta["long_cutoff"]
    short_flux = short_cat.flux[idx_s]
    long_flux = long_cat.flux[idx_l]
    short_ra, short_dec = short_cat.ra[idx_s], short_cat.dec[idx_s]
    long_ra, long_dec = long_cat.ra[idx_l], long_cat.dec[idx_l]
    idx1, idx2, ds = spherematch(short_ra, short_dec, long_ra, long_dec, tolerance=1 / 3600.0)
    y = short_flux[idx1]
    X = long_flux[idx2]
    slope = ordinary_least_squares(y, X)

    # divide short flux/unc by the slope so that it agrees with the long flux
    print("region {} correction value: {}".format(meta["name"], slope))
    short_cat.flux /= slope
    short_cat.unc /= slope

    # get everything brighter than the cutoff in short and combine with long
    idx_faint = long_cat.flux < meta["long_cutoff"]
    idx_bright = short_cat.flux > meta["long_cutoff"]

    # before concatenation of long and short subsets, check for any duplicates
    # (if they exist they should tend to have flux very close to the cutoff)
    ls, ss = long_cat[idx_faint], short_cat[idx_bright]
    idx_s, idx_l, ds = spherematch(ss.ra, ss.dec, ls.ra, ls.dec, tolerance=1 / 3600.0)
    dup_ids = []
    for idx in idx_l:
        if (ls.flux[idx] > 0.9 * meta["long_cutoff"]) & (ls.flux[idx] < meta["long_cutoff"]):
            dup_ids.append(ls.id[idx])

            # now use the ids of the duplicates to delete them from the long dataset
    for idx in dup_ids:
        ls = ls[ls.id != idx]

    data = np.concatenate([ls, ss])

    # eliminate sources with negative flux
    good = data["flux"] >= 0
    data = data[good]

    # apply global sigma clip using the value from setup.yaml
    snr = data["flux"] / data["unc"]
    good = snr >= meta["sigma_clip"]
    data = data[good]

    # write to disk
    header = "id ra dec flux unc n_obs"
    data = data[header.split()]
    idx = np.argsort(data["ra"])
    data = data[idx]
    data["id"] = np.arange(1, data.shape[0] + 1)
    fmt = ["%i"] + ["%0.8f"] * 2 + ["%.4e"] * 2 + ["%i"]
    out_name = "_".join([meta["name"], meta["channel"], "combined_hdr_catalog.txt"])
    out_path = "/".join(["/".join(work_dir.split("/")[:-1]), out_name])
    np.savetxt(out_path, data, fmt=fmt, header=header)
    print("created file: " + out_path)
Пример #5
0
def match_sdss(cat_path):
	for catfile in find_files(cat_path, "*merged.txt"):

		# read pipeline catalog
		print("\nreading catalog: {}".format(catfile))
		cat = pd.read_table(catfile, sep=' ')

		# retrieve SDSS data from ViZieR if not already downloaded
		ch = catfile.split('/')[-1].split('_')[1]
		outpath = catfile.replace('{}_merged.txt'.format(ch), 'sdss.vot')
		if not os.path.isfile(outpath):
			cntr_ra = np.median(cat.ra)
			cntr_dec = np.median(cat.dec)
			# get source from one corner of the mosaic to calculate radius
			c1 = (cat.ra.min(), cat.dec[cat.ra==cat.ra.min()].values[0])
			# make radius 10% bigger just to be on safe side
			radius = great_circle_distance(cntr_ra, cntr_dec, *c1) * 1.1
			url = get_url(cntr_ra, cntr_dec, radius)
			print("retrieving URL: {}".format(url))
			handler = urllib2.urlopen(url)
			raw = handler.read()
			with open(outpath,'wb') as f:
				f.write(raw)
			print("created file: {}".format(outpath))

		# parse VOTable
		print("reading VOTable: {}".format(outpath))
		table = parse_single_table(outpath)

		# if this is one of the southern hemisphere regions, delete and continue
		if table.array.size == 0:
			os.remove(outpath)
			print("outside of SDSS coverage")
			continue

		# make sure no missing data
		for name in table.array.dtype.names:
			assert table.array[name].mask.sum() == 0

		# get unmasked array
		sdss = table.array.data

		# make sure sky coverage is big enough
		assert sdss['RAJ2000'].min() < cat.ra.min()
		assert sdss['RAJ2000'].max() > cat.ra.max()
		assert sdss['DEJ2000'].min() < cat.dec.min()
		assert sdss['DEJ2000'].max() > cat.dec.max()

		# match to catalog
		assert cat.shape[0] < sdss.shape[0]
		tol = 2/3600.
		idx1, idx2, ds = spherematch(cat.ra, cat.dec, 
			sdss['RAJ2000'], sdss['DEJ2000'], tolerance = tol)
		print("matched {} out of {} sources with {} arcsec tolerance".format(ds.size, 
			cat.shape[0], tol*3600))

		# create vector of star/galaxy class (0=missing, 3=galaxy, 6=star)
		cl = np.zeros(cat.shape[0]).astype('int')
		cl[idx1] = sdss['cl'][idx2]

		# add the column to the dataset
		cat['cl'] = cl

		# write to new file
		outpath = catfile.replace('merged.txt', 'merged+sdss.txt')
		# fmt = ['%i']+['%0.8f']*2+['%.4e']*2+['%i']*2
		# hdr = ' '.join(names)+' cl'
		# np.savetxt(outpath, df.to_records(index=False), fmt=fmt, header=hdr)
		cat.to_csv(outpath, index=False, sep=' ', float_format='%.8f')
		print("created file: {}".format(outpath))
Пример #6
0
def merge_subarray(vg_dir, bcdphot_dir):
	out_dir = vg_dir.replace('clean','plots_catalogs')
	os.mkdir(out_dir)

	hdr_files = find_files(bcdphot_dir, '*combined_hdr_*xsc_cor.txt')
	# hdr_file = list(hdr_files)[0]
	for hdr_file in hdr_files:
		reg, ch = hdr_file.split('/')[-1].split('_')[:2]
		sub_file = '/'.join([vg_dir, "d{}_ch{}_agg.csv".format(reg, ch)])

		hdr_names = open(hdr_file).readline().split()[1:]
		hdr = np.recfromtxt(hdr_file, names=hdr_names)
		sub = np.recfromcsv(sub_file)
		# sub.flux *= 1e-3	# convert from uJy to mJy

		idx1, idx2, ds = spherematch(sub.ra, sub.dec, hdr.ra, hdr.dec, tolerance=3/3600.)
		df = pd.DataFrame({'sub_flux': sub.flux[idx1], 'hdr_flux':hdr.flux[idx2]})

		slope = fit_line(df, int(ch))
		with open("{}/linefits.txt".format(out_dir),'a') as f:
			f.write("{} {} {}\n".format(reg, ch, slope))

		fig = df.plot(x='hdr_flux',y='sub_flux', kind='scatter')
		fig.plot([0, fig.get_xlim()[1]], [0, slope * fig.get_xlim()[1]], 'r-')
		fig.set_title("region {} channel {}".format(reg, ch))
		fig.text(fig.get_xlim()[1]*0.2, fig.get_ylim()[1]*0.8, 
			"slope: {0:3f}".format(slope), fontsize=24)
		plt.savefig("{}/{}_{}_linefit.png".format(out_dir, reg, ch), dpi=300)
		plt.close()

		# now save the (uncorrected) matched data to disk
		sub_matched = pd.DataFrame.from_records(sub[idx1])
		# rename the columns
		cols = sub_matched.columns.tolist()
		cols_new = ['sub_'+i for i in cols]
		sub_matched.columns = cols_new
		# set hdr_matched dataframe index equal to sub_matched index, this is
		# necessary for concatenation using pandas.concat
		hdr_matched = pd.DataFrame.from_records(hdr[idx2]).set_index(sub_matched.index)
		# rename the columns
		cols = hdr_matched.columns.tolist()
		cols_new = ['hdr_'+i for i in cols]
		hdr_matched.columns = cols_new
		# concatenate
		concat = pd.concat([ sub_matched, hdr_matched ], 1)
		# # convert subarray flux to mJy
		# concat.sub_flux = concat.sub_flux*1e3
		# concat.sub_unc = concat.sub_unc*1e3
		concat.to_csv("{}/{}_{}_hdr_vs_sub.csv".format(out_dir, reg, ch), 
			index=False, float_format='%.8f')

		# now correct all the subarray flux values with the slope
		sub.flux /= slope

		# now merge hdr and subarray into one dataset:
		# want to keep all the hdr photometry that is not saturated, and
		# keep only the subarray photometry above the hdr saturation limit
		cutoff = get_cutoff(ch)
		bad = hdr.flux > cutoff
		hdr_subset = pd.DataFrame.from_records(hdr[~bad])
		bad = sub.flux < cutoff
		sub_subset = pd.DataFrame.from_records(sub[~bad])
		# add n_obs column to subarray data so it has same format as hdr
		sub_subset['n_obs'] = 4
		# add column indicating whether if it came from subarray
		hdr_subset['sub'] = np.zeros(hdr_subset.shape[0]).astype(int)
		sub_subset['sub'] = np.ones(sub_subset.shape[0]).astype(int)
		# concatenate them
		concat = pd.concat([ hdr_subset, sub_subset ], 0, ignore_index=True)
		# get rid of the 'id' field since it is no longer relevant
		# but add a column indicating if it was a 2MASS XSC measurement
		concat['xsc'] = np.zeros(concat.shape[0]).astype(int)
		concat.xsc[concat.id < 1] = 1
		concat = concat.drop('id', 1)
		# apply 1% flux reduction to correct for stray light (only to >100 mJy sources)
		concat.flux[concat.flux > 100] *= 0.99
		concat.unc[concat.flux > 100] *= 0.99
		# write to disk
		concat.to_csv("{}/{}_{}_merged.txt".format(out_dir, reg, ch), 
			index=False, sep=' ', float_format='%.8f')
Пример #7
0
def merge_subarray(vg_dir, bcdphot_dir):
    out_dir = vg_dir.replace('clean', 'plots_catalogs')
    os.mkdir(out_dir)

    hdr_files = find_files(bcdphot_dir, '*combined_hdr_*xsc_cor.txt')
    # hdr_file = list(hdr_files)[0]
    for hdr_file in hdr_files:
        reg, ch = hdr_file.split('/')[-1].split('_')[:2]
        sub_file = '/'.join([vg_dir, "d{}_ch{}_agg.csv".format(reg, ch)])

        hdr_names = open(hdr_file).readline().split()[1:]
        hdr = np.recfromtxt(hdr_file, names=hdr_names)
        sub = np.recfromcsv(sub_file)
        # sub.flux *= 1e-3	# convert from uJy to mJy

        idx1, idx2, ds = spherematch(sub.ra,
                                     sub.dec,
                                     hdr.ra,
                                     hdr.dec,
                                     tolerance=3 / 3600.)
        df = pd.DataFrame({
            'sub_flux': sub.flux[idx1],
            'hdr_flux': hdr.flux[idx2]
        })

        slope = fit_line(df, int(ch))
        with open("{}/linefits.txt".format(out_dir), 'a') as f:
            f.write("{} {} {}\n".format(reg, ch, slope))

        fig = df.plot(x='hdr_flux', y='sub_flux', kind='scatter')
        fig.plot([0, fig.get_xlim()[1]], [0, slope * fig.get_xlim()[1]], 'r-')
        fig.set_title("region {} channel {}".format(reg, ch))
        fig.text(fig.get_xlim()[1] * 0.2,
                 fig.get_ylim()[1] * 0.8,
                 "slope: {0:3f}".format(slope),
                 fontsize=24)
        plt.savefig("{}/{}_{}_linefit.png".format(out_dir, reg, ch), dpi=300)
        plt.close()

        # now save the (uncorrected) matched data to disk
        sub_matched = pd.DataFrame.from_records(sub[idx1])
        # rename the columns
        cols = sub_matched.columns.tolist()
        cols_new = ['sub_' + i for i in cols]
        sub_matched.columns = cols_new
        # set hdr_matched dataframe index equal to sub_matched index, this is
        # necessary for concatenation using pandas.concat
        hdr_matched = pd.DataFrame.from_records(hdr[idx2]).set_index(
            sub_matched.index)
        # rename the columns
        cols = hdr_matched.columns.tolist()
        cols_new = ['hdr_' + i for i in cols]
        hdr_matched.columns = cols_new
        # concatenate
        concat = pd.concat([sub_matched, hdr_matched], 1)
        # # convert subarray flux to mJy
        # concat.sub_flux = concat.sub_flux*1e3
        # concat.sub_unc = concat.sub_unc*1e3
        concat.to_csv("{}/{}_{}_hdr_vs_sub.csv".format(out_dir, reg, ch),
                      index=False,
                      float_format='%.8f')

        # now correct all the subarray flux values with the slope
        sub.flux /= slope

        # now merge hdr and subarray into one dataset:
        # want to keep all the hdr photometry that is not saturated, and
        # keep only the subarray photometry above the hdr saturation limit
        cutoff = get_cutoff(ch)
        bad = hdr.flux > cutoff
        hdr_subset = pd.DataFrame.from_records(hdr[~bad])
        bad = sub.flux < cutoff
        sub_subset = pd.DataFrame.from_records(sub[~bad])
        # add n_obs column to subarray data so it has same format as hdr
        sub_subset['n_obs'] = 4
        # add column indicating whether if it came from subarray
        hdr_subset['sub'] = np.zeros(hdr_subset.shape[0]).astype(int)
        sub_subset['sub'] = np.ones(sub_subset.shape[0]).astype(int)
        # concatenate them
        concat = pd.concat([hdr_subset, sub_subset], 0, ignore_index=True)
        # get rid of the 'id' field since it is no longer relevant
        # but add a column indicating if it was a 2MASS XSC measurement
        concat['xsc'] = np.zeros(concat.shape[0]).astype(int)
        concat.xsc[concat.id < 1] = 1
        concat = concat.drop('id', 1)
        # apply 1% flux reduction to correct for stray light (only to >100 mJy sources)
        concat.flux[concat.flux > 100] *= 0.99
        concat.unc[concat.flux > 100] *= 0.99
        # write to disk
        concat.to_csv("{}/{}_{}_merged.txt".format(out_dir, reg, ch),
                      index=False,
                      sep=' ',
                      float_format='%.8f')
Пример #8
0
def match_wise(cat_path, sdss=True):
	if sdss:
		search_pattern = "*merged+sdss.txt"
	else:
		search_pattern = "*merged.txt"

	for catfile in find_files(cat_path, search_pattern):

		# read pipeline catalog
		print("\nreading catalog: {}".format(catfile))
		cat = pd.read_table(catfile, sep=' ')

		# retrieve WISE data from ViZieR if not already downloaded
		ch = catfile.split('/')[-1].split('_')[1]
		if sdss:
			outpath = catfile.replace('{}_merged+sdss.txt'.format(ch), 'wise.vot')
		else:
			outpath = catfile.replace('{}_merged.txt'.format(ch), 'wise.vot')
		if not os.path.isfile(outpath):
			cntr_ra = np.median(cat.ra)
			cntr_dec = np.median(cat.dec)
			# get source from one corner of the mosaic to calculate radius
			c1 = (cat.ra.min(), cat.dec[cat.ra==cat.ra.min()].values[0])
			# make radius 10% bigger just to be on safe side
			radius = great_circle_distance(cntr_ra, cntr_dec, *c1) * 1.1
			url = get_url(cntr_ra, cntr_dec, radius)
			print("retrieving URL: {}".format(url))
			handler = urllib2.urlopen(url)
			raw = handler.read()
			with open(outpath,'wb') as f:
				f.write(raw)
			print("created file: {}".format(outpath))

		# parse VOTable
		print("reading VOTable: {}".format(outpath))
		table = parse_single_table(outpath)

		# if this is one of the southern hemisphere regions, delete and continue
		if table.array.size == 0:
			os.remove(outpath)
			print("no WISE coverage")
			continue

		# get unmasked array
		wise = table.array.data

		# make sure sky coverage is big enough
		assert wise['RAJ2000'].min() < cat.ra.min()
		assert wise['RAJ2000'].max() > cat.ra.max()
		assert wise['DEJ2000'].min() < cat.dec.min()
		assert wise['DEJ2000'].max() > cat.dec.max()

		# match to catalog
		tol = 2/3600.
		if cat.shape[0] < wise.shape[0]:
			idx1, idx2, ds = spherematch(cat.ra, cat.dec, 
				wise['RAJ2000'], wise['DEJ2000'], tolerance = tol)
		else:
			idx2, idx1, ds = spherematch(wise['RAJ2000'], wise['DEJ2000'],
				cat.ra, cat.dec, tolerance = tol)
		print("matched {} out of {} sources with {} arcsec tolerance".format(ds.size, 
			cat.shape[0], tol*3600))

		# add WISE to the catalog
		if ch == '1':
			cat['W1mag'] = np.repeat(np.nan, cat.shape[0])
			cat['e_W1mag'] = np.repeat(np.nan, cat.shape[0])
			cat['W1mag'][idx1] = wise['W1mag'][idx2]
			cat['e_W1mag'][idx1] = wise['e_W1mag'][idx2]
		elif ch == '2':
			cat['W2mag'] = np.repeat(np.nan, cat.shape[0])
			cat['e_W2mag'] = np.repeat(np.nan, cat.shape[0])
			cat['W2mag'][idx1] = wise['W2mag'][idx2]
			cat['e_W2mag'][idx1] = wise['e_W2mag'][idx2]
		else:
			print("unexpected error adding WISE data")

		# write to new file
		outpath = catfile.replace('.txt', '+wise.csv')
		# fmt = ['%i']+['%0.8f']*2+['%.4e']*2+['%i']*2
		# hdr = ' '.join(names)+' cl'
		# np.savetxt(outpath, df.to_records(index=False), fmt=fmt, header=hdr)
		cat.to_csv(outpath, index=False, float_format='%.8f')
		print("created file: {}".format(outpath))