def do_internal(catalog):
    """Load events from files in the 'internal' repository, and save them."""
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), '*.json')
    files = glob(path_pattern)
    catalog.log.debug("found {} files matching '{}'".format(
        len(files), path_pattern))
    for datafile in pbar_strings(files, task_str):
        new_entry = Supernova.init_from_file(
            catalog, path=datafile, clean=True, merge=True)

        name = new_entry[SUPERNOVA.NAME]
        old_name = None

        for alias in new_entry.get_aliases():
            if catalog.entry_exists(alias):
                old_name = catalog.get_preferred_name(alias)
                if catalog.entries[old_name]._stub:
                    catalog.add_entry(old_name)
                break

        if old_name:
            old_entry = deepcopy(catalog.entries[old_name])
            catalog.copy_entry_to_entry(new_entry, old_entry)
            catalog.entries[old_name] = old_entry
        else:
            catalog.entries[name] = new_entry

        catalog.journal_entries()

    return
def do_internal(catalog):
    """Load events from files in the 'internal' repository, and save them."""
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), '*.json')
    files = glob(path_pattern)
    catalog.log.debug("found {} files matching '{}'".format(
        len(files), path_pattern))
    for datafile in pbar_strings(files, task_str):
        new_entry = FastStars.init_from_file(catalog,
                                             path=datafile,
                                             clean=True,
                                             merge=True)

        name = new_entry[FASTSTARS.NAME]
        old_name = None

        for alias in new_entry.get_aliases():
            if catalog.entry_exists(alias):
                old_name = catalog.get_preferred_name(alias)
                if catalog.entries[old_name]._stub:
                    catalog.add_entry(old_name)
                break

        if old_name:
            old_entry = deepcopy(catalog.entries[old_name])
            catalog.copy_entry_to_entry(new_entry, old_entry)
            catalog.entries[old_name] = old_entry
        else:
            catalog.entries[name] = new_entry

        catalog.journal_entries()

    return
Beispiel #3
0
def do_csp_photo(catalog):
    import re
    cspbands = ['u', 'B', 'V', 'g', 'r', 'i', 'Y', 'J', 'H', 'K']
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'CSP/*.dat'))
    task_str = catalog.get_current_task_str()
    for fname in pbar_strings(file_names, task_str):
        tsvin = csv.reader(open(fname, 'r'),
                           delimiter='\t',
                           skipinitialspace=True)
        eventname = os.path.basename(os.path.splitext(fname)[0])
        eventparts = eventname.split('opt+')
        name = clean_snname(eventparts[0])
        name = catalog.add_entry(name)

        reference = 'Carnegie Supernova Project'
        refbib = '2010AJ....139..519C'
        refurl = 'http://csp.obs.carnegiescience.edu/data'
        source = catalog.entries[name].add_source(bibcode=refbib,
                                                  name=reference,
                                                  url=refurl)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        year = re.findall(r'\d+', name)[0]
        catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
                                           source)

        for r, row in enumerate(tsvin):
            if len(row) > 0 and row[0][0] == "#":
                if r == 2:
                    redz = row[0].split(' ')[-1]
                    catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                                       redz,
                                                       source,
                                                       kind='cmb')
                    catalog.entries[name].add_quantity(SUPERNOVA.RA,
                                                       row[1].split(' ')[-1],
                                                       source)
                    catalog.entries[name].add_quantity(SUPERNOVA.DEC,
                                                       row[2].split(' ')[-1],
                                                       source)
                continue
            for v, val in enumerate(row):
                if v == 0:
                    mjd = val
                elif v % 2 != 0:
                    if float(row[v]) < 90.0:
                        catalog.entries[name].add_photometry(
                            time=mjd,
                            u_time='MJD',
                            observatory='LCO',
                            band=cspbands[(v - 1) // 2],
                            system='CSP',
                            magnitude=row[v],
                            e_magnitude=row[v + 1],
                            source=source)

    catalog.journal_entries()
    return
Beispiel #4
0
def do_csp_spectra(catalog):
    """Import CSP spectra."""
    oldname = ''
    task_str = catalog.get_current_task_str()
    file_names = glob(os.path.join(catalog.get_current_task_repo(), 'CSP/*'))
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        filename = os.path.basename(fname)
        sfile = filename.split('.')
        if sfile[1] == 'txt':
            continue
        sfile = sfile[0]
        fileparts = sfile.split('_')
        name = 'SN20' + fileparts[0][2:]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        telescope = fileparts[-2]
        instrument = fileparts[-1]
        source = catalog.entries[name].add_source(
            bibcode='2013ApJ...773...53F')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        data = csv.reader(open(fname, 'r'),
                          delimiter=' ',
                          skipinitialspace=True)
        specdata = []
        for r, row in enumerate(data):
            if row[0] == '#JDate_of_observation:':
                jd = row[1].strip()
                time = str(jd_to_mjd(Decimal(jd)))
            elif row[0] == '#Redshift:':
                catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                                   row[1].strip(), source)
            if r < 7:
                continue
            specdata.append(list(filter(None, [x.strip(' ') for x in row])))
        specdata = [list(i) for i in zip(*specdata)]
        wavelengths = specdata[0]
        fluxes = specdata[1]

        catalog.entries[name].add_spectrum(u_wavelengths='Angstrom',
                                           u_fluxes='erg/s/cm^2/Angstrom',
                                           u_time='MJD',
                                           time=time,
                                           wavelengths=wavelengths,
                                           fluxes=fluxes,
                                           telescope=telescope,
                                           instrument=instrument,
                                           source=source,
                                           deredshifted=True,
                                           filename=filename)
        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break

    catalog.journal_entries()
    return
Beispiel #5
0
def do_csp_spectra(catalog):
    """Import CSP spectra."""
    oldname = ''
    task_str = catalog.get_current_task_str()
    file_names = glob(os.path.join(catalog.get_current_task_repo(), 'CSP/*'))
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        filename = os.path.basename(fname)
        sfile = filename.split('.')
        if sfile[1] == 'txt':
            continue
        sfile = sfile[0]
        fileparts = sfile.split('_')
        name = 'SN20' + fileparts[0][2:]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        telescope = fileparts[-2]
        instrument = fileparts[-1]
        source = catalog.entries[name].add_source(
            bibcode='2013ApJ...773...53F')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        data = csv.reader(open(fname, 'r'), delimiter=' ',
                          skipinitialspace=True)
        specdata = []
        for r, row in enumerate(data):
            if row[0] == '#JDate_of_observation:':
                jd = row[1].strip()
                time = str(jd_to_mjd(Decimal(jd)))
            elif row[0] == '#Redshift:':
                catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                                   row[1].strip(),
                                                   source)
            if r < 7:
                continue
            specdata.append(list(filter(None, [x.strip(' ') for x in row])))
        specdata = [list(i) for i in zip(*specdata)]
        wavelengths = specdata[0]
        fluxes = specdata[1]

        catalog.entries[name].add_spectrum(
            u_wavelengths='Angstrom', u_fluxes='erg/s/cm^2/Angstrom',
            u_time='MJD',
            time=time, wavelengths=wavelengths, fluxes=fluxes,
            telescope=telescope, instrument=instrument,
            source=source, deredshifted=True, filename=filename)
        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break

    catalog.journal_entries()
    return
Beispiel #6
0
def do_csp_photo(catalog):
    """Import CSP photometry."""
    import re
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'CSP/*.dat'))
    task_str = catalog.get_current_task_str()
    for fname in pbar_strings(file_names, task_str):
        tsvin = csv.reader(open(fname, 'r'), delimiter='\t',
                           skipinitialspace=True)
        eventname = os.path.basename(os.path.splitext(fname)[0])
        eventparts = eventname.split('opt+')
        name = clean_snname(eventparts[0])
        name = catalog.add_entry(name)

        reference = 'Carnegie Supernova Project'
        refbib = '2010AJ....139..519C'
        refurl = 'http://csp.obs.carnegiescience.edu/data'
        source = catalog.entries[name].add_source(
            bibcode=refbib, name=reference, url=refurl)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        year = re.findall(r'\d+', name)[0]
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE, year, source)

        for r, row in enumerate(tsvin):
            if len(row) > 0 and row[0][0] == "#":
                if r == 2:
                    redz = row[0].split(' ')[-1]
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.REDSHIFT, redz, source, kind='cmb')
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.RA, row[1].split(' ')[-1], source)
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.DEC, row[2].split(' ')[-1], source)
                if 'MJD' in ''.join(row):
                    cspbands = list(filter(None, [
                        x.strip()
                        for x in ''.join(row).split('MJD')[-1].split('+/-')]))
                continue
            for v, val in enumerate(row):
                if v == 0:
                    mjd = val
                elif v % 2 != 0:
                    if float(row[v]) < 90.0:
                        catalog.entries[name].add_photometry(
                            time=mjd, u_time='MJD', observatory='LCO',
                            band=cspbands[(v - 1) // 2],
                            system='CSP', magnitude=row[v],
                            e_magnitude=row[v + 1], source=source)

    catalog.journal_entries()
    return
def do_internal(catalog):
    """Load events from files in the 'internal' repository, and save them."""
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), '*.json')
    files = glob(path_pattern)
    catalog.log.debug("found {} files matching '{}'".format(
        len(files), path_pattern))
    for datafile in pbar_strings(files, task_str):
        new_event = Cataclysmic.init_from_file(
            catalog, path=datafile, clean=True)
        catalog.entries.update({new_event[CATACLYSMIC.NAME]: new_event})

    return
def do_internal(catalog):
    """Load events from files in the 'internal' repository, and save them.
    """
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), '*.json')
    files = glob(path_pattern)
    catalog.log.debug("found {} files matching '{}'".format(
        len(files), path_pattern))
    for datafile in pbar_strings(files, task_str):
        new_event = TidalDisruption.init_from_file(
            catalog, path=datafile, clean=True)
        catalog.entries.update({new_event[TIDALDISRUPTION.NAME]: new_event})

    return
def do_external_radio(catalog):
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), '*.txt')
    for datafile in pbar_strings(glob(path_pattern), task_str):
        oldname = os.path.basename(datafile).split('.')[0]
        name = catalog.add_entry(oldname)
        radiosourcedict = OrderedDict()
        with open(datafile, 'r') as ff:
            for li, line in enumerate(
                    [xx.strip() for xx in ff.read().splitlines()]):
                if line.startswith('(') and li <= len(radiosourcedict):
                    key = line.split()[0]
                    bibc = line.split()[-1]
                    radiosourcedict[key] = catalog.entries[name].add_source(
                        bibcode=bibc)
                elif li in [xx + len(radiosourcedict) for xx in range(3)]:
                    continue
                else:
                    cols = list(filter(None, line.split()))
                    source = radiosourcedict[cols[6]]
                    if float(cols[4]) == 0.0:
                        eflux = ''
                        upp = True
                    else:
                        eflux = cols[4]
                        upp = False
                    photodict = {
                        PHOTOMETRY.TIME: cols[0],
                        PHOTOMETRY.FREQUENCY: cols[2],
                        PHOTOMETRY.U_FREQUENCY: 'GHz',
                        PHOTOMETRY.FLUX_DENSITY: cols[3],
                        PHOTOMETRY.E_FLUX_DENSITY: eflux,
                        PHOTOMETRY.U_FLUX_DENSITY: 'µJy',
                        PHOTOMETRY.UPPER_LIMIT: upp,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.INSTRUMENT: cols[5],
                        PHOTOMETRY.SOURCE: source
                    }
                    catalog.entries[name].add_photometry(**photodict)
                    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,
                                                       oldname, source)

    catalog.journal_entries()
    return
Beispiel #10
0
def do_external_radio(catalog):
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), '*.txt')
    for datafile in pbar_strings(glob(path_pattern), task_str):
        oldname = os.path.basename(datafile).split('.')[0]
        name = catalog.add_entry(oldname)
        radiosourcedict = OrderedDict()
        with open(datafile, 'r') as ff:
            for li, line in enumerate(
                [xx.strip() for xx in ff.read().splitlines()]):
                if line.startswith('(') and li <= len(radiosourcedict):
                    key = line.split()[0]
                    bibc = line.split()[-1]
                    radiosourcedict[key] = catalog.entries[name].add_source(
                        bibcode=bibc)
                elif li in [xx + len(radiosourcedict) for xx in range(3)]:
                    continue
                else:
                    cols = list(filter(None, line.split()))
                    source = radiosourcedict[cols[6]]
                    if float(cols[4]) == 0.0:
                        eflux = ''
                        upp = True
                    else:
                        eflux = cols[4]
                        upp = False
                    photodict = {
                        PHOTOMETRY.TIME: cols[0],
                        PHOTOMETRY.FREQUENCY: cols[2],
                        PHOTOMETRY.U_FREQUENCY: 'GHz',
                        PHOTOMETRY.FLUX_DENSITY: cols[3],
                        PHOTOMETRY.E_FLUX_DENSITY: eflux,
                        PHOTOMETRY.U_FLUX_DENSITY: 'µJy',
                        PHOTOMETRY.UPPER_LIMIT: upp,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.INSTRUMENT: cols[5],
                        PHOTOMETRY.SOURCE: source
                    }
                    catalog.entries[name].add_photometry(**photodict)
                    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,
                                                       oldname, source)

    catalog.journal_entries()
    return
Beispiel #11
0
def do_external_xray(catalog):
    """Import supernova X-ray data."""
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), '*.txt')
    for datafile in pbar_strings(glob(path_pattern), task_str):
        oldname = os.path.basename(datafile).split('.')[0]
        name = catalog.add_entry(oldname)
        with open(datafile, 'r') as ff:
            for li, line in enumerate(ff.read().splitlines()):
                if li == 0:
                    source = catalog.entries[name].add_source(
                        bibcode=line.split()[-1])
                elif li in [1, 2, 3]:
                    continue
                else:
                    cols = list(filter(None, line.split()))
                    photodict = {
                        PHOTOMETRY.TIME: cols[:2],
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.ENERGY: cols[2:4],
                        PHOTOMETRY.U_ENERGY: 'keV',
                        PHOTOMETRY.COUNT_RATE: cols[4],
                        PHOTOMETRY.FLUX: cols[6],
                        PHOTOMETRY.UNABSORBED_FLUX: cols[8],
                        PHOTOMETRY.U_FLUX: 'ergs/s/cm^2',
                        PHOTOMETRY.PHOTON_INDEX: cols[15],
                        PHOTOMETRY.INSTRUMENT: cols[17],
                        PHOTOMETRY.NHMW: cols[11],
                        PHOTOMETRY.UPPER_LIMIT: (float(cols[5]) < 0),
                        PHOTOMETRY.SOURCE: source
                    }
                    catalog.entries[name].add_photometry(**photodict)
                    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,
                                                       oldname, source)

    catalog.journal_entries()
    return
def do_external_xray(catalog):
    """Import supernova X-ray data."""
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), '*.txt')
    for datafile in pbar_strings(glob(path_pattern), task_str):
        oldname = os.path.basename(datafile).split('.')[0]
        name = catalog.add_entry(oldname)
        with open(datafile, 'r') as ff:
            for li, line in enumerate(ff.read().splitlines()):
                if li == 0:
                    source = catalog.entries[name].add_source(
                        bibcode=line.split()[-1])
                elif li in [1, 2, 3]:
                    continue
                else:
                    cols = list(filter(None, line.split()))
                    photodict = {
                        PHOTOMETRY.TIME: cols[:2],
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.ENERGY: cols[2:4],
                        PHOTOMETRY.U_ENERGY: 'keV',
                        PHOTOMETRY.COUNT_RATE: cols[4],
                        PHOTOMETRY.FLUX: cols[6],
                        PHOTOMETRY.UNABSORBED_FLUX: cols[8],
                        PHOTOMETRY.U_FLUX: 'ergs/s/cm^2',
                        PHOTOMETRY.PHOTON_INDEX: cols[15],
                        PHOTOMETRY.INSTRUMENT: cols[17],
                        PHOTOMETRY.NHMW: cols[11],
                        PHOTOMETRY.UPPER_LIMIT: (float(cols[5]) < 0),
                        PHOTOMETRY.SOURCE: source
                    }
                    catalog.entries[name].add_photometry(**photodict)
                    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,
                                                       oldname, source)

    catalog.journal_entries()
    return
Beispiel #13
0
def do_snls_spectra(catalog):
    """
    """

    task_str = catalog.get_current_task_str()
    result = Vizier.get_catalogs('J/A+A/507/85/table1')
    table = result[list(result.keys())[0]]
    table.convert_bytestring_to_unicode()
    datedict = {}
    for row in table:
        datedict['SNLS-' + row['SN']] = str(astrotime(row['Date']).mjd)

    oldname = ''
    file_names = glob(os.path.join(catalog.get_current_task_repo(), 'SNLS/*'))
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        filename = os.path.basename(fname)
        fileparts = filename.split('_')
        name = 'SNLS-' + fileparts[1]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2009A&A...507...85B')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE,
                                           '20' + fileparts[1][:2], source)

        f = open(fname, 'r')
        data = csv.reader(f, delimiter=' ', skipinitialspace=True)
        specdata = []
        for r, row in enumerate(data):
            if row[0] == '@TELESCOPE':
                telescope = row[1].strip()
            elif row[0] == '@REDSHIFT':
                catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                                   row[1].strip(), source)
            if r < 14:
                continue
            specdata.append(list(filter(None, [x.strip(' \t') for x in row])))
        specdata = [list(i) for i in zip(*specdata)]
        wavelengths = specdata[1]

        fluxes = [
            pretty_num(float(x) * 1.e-16, sig=get_sig_digits(x))
            for x in specdata[2]
        ]
        # FIX: this isnt being used
        errors = [
            pretty_num(float(x) * 1.e-16, sig=get_sig_digits(x))
            for x in specdata[3]
        ]

        fluxunit = 'erg/s/cm^2/Angstrom'

        specdict = {
            SPECTRUM.WAVELENGTHS: wavelengths,
            SPECTRUM.FLUXES: fluxes,
            SPECTRUM.ERRORS: errors,
            SPECTRUM.U_WAVELENGTHS: 'Angstrom',
            SPECTRUM.U_FLUXES: fluxunit,
            SPECTRUM.U_ERRORS: fluxunit,
            SPECTRUM.TELESCOPE: telescope,
            SPECTRUM.FILENAME: filename,
            SPECTRUM.SOURCE: source
        }
        if name in datedict:
            specdict[SPECTRUM.TIME] = datedict[name]
            specdict[SPECTRUM.U_TIME] = 'MJD'
        catalog.entries[name].add_spectrum(**specdict)
        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break
    catalog.journal_entries()
    return
Beispiel #14
0
def do_ascii(catalog):
    """Process ASCII files that were extracted from datatables appearing in
    published works.
    """
    task_str = catalog.get_current_task_str()

    # 2006ApJ...645..841N
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2006ApJ...645..841N-table3.csv')
    tsvin = list(csv.reader(open(file_path, 'r'), delimiter=','))
    for ri, row in enumerate(pbar(tsvin, task_str)):
        name = 'SNLS-' + row[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2006ApJ...645..841N')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT, row[1], source, kind='spectroscopic')
        astrot = astrotime(float(row[4]) + 2450000., format='jd').datetime
        date_str = make_date_string(astrot.year, astrot.month, astrot.day)
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE, date_str, source)
    catalog.journal_entries()

    # Anderson 2014
    file_names = list(
        glob(os.path.join(
            catalog.get_current_task_repo(), 'SNII_anderson2014/*.dat')))
    for datafile in pbar_strings(file_names, task_str):
        basename = os.path.basename(datafile)
        if not is_number(basename[:2]):
            continue
        if basename == '0210_V.dat':
            name = 'SN0210'
        else:
            name = ('SN20' if int(basename[:2]) <
                    50 else 'SN19') + basename.split('_')[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2014ApJ...786...67A')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        if name in ['SN1999ca', 'SN2003dq', 'SN2008aw']:
            system = 'Swope'
        else:
            system = 'Landolt'

        with open(datafile, 'r') as ff:
            tsvin = csv.reader(ff, delimiter=' ', skipinitialspace=True)
            for row in tsvin:
                if not row[0]:
                    continue
                time = str(jd_to_mjd(Decimal(row[0])))
                catalog.entries[name].add_photometry(
                    time=time, band='V',
                    magnitude=row[1], e_magnitude=row[2],
                    system=system, source=source)
    catalog.journal_entries()

    # stromlo
    stromlobands = ['B', 'V', 'R', 'I', 'VM', 'RM']
    file_path = os.path.join(
        catalog.get_current_task_repo(), 'J_A+A_415_863-1/photometry.csv')
    tsvin = list(csv.reader(open(file_path, 'r'), delimiter=','))
    for row in pbar(tsvin, task_str):
        name = row[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2004A&A...415..863G')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        mjd = str(jd_to_mjd(Decimal(row[1])))
        for ri, ci in enumerate(range(2, len(row), 3)):
            if not row[ci]:
                continue
            band = stromlobands[ri]
            upperlimit = True if (not row[ci + 1] and row[ci + 2]) else False
            e_upper_magnitude = str(
                abs(Decimal(row[ci + 1]))) if row[ci + 1] else ''
            e_lower_magnitude = str(
                abs(Decimal(row[ci + 2]))) if row[ci + 2] else ''
            teles = 'MSSSO 1.3m' if band in ['VM', 'RM'] else 'CTIO'
            instr = 'MaCHO' if band in ['VM', 'RM'] else ''
            catalog.entries[name].add_photometry(
                time=mjd, band=band, magnitude=row[ci],
                e_upper_magnitude=e_upper_magnitude,
                e_lower_magnitude=e_lower_magnitude,
                upperlimit=upperlimit, telescope=teles,
                instrument=instr, source=source)
    catalog.journal_entries()

    # 2015MNRAS.449..451W
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2015MNRAS.449..451W.dat')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if rr == 0:
            continue
        namesplit = row[0].split('/')
        name = namesplit[-1]
        if name.startswith('SN'):
            name = name.replace(' ', '')
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2015MNRAS.449..451W')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        if len(namesplit) > 1:
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, namesplit[0], source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.CLAIMED_TYPE, row[1], source)
        catalog.entries[name].add_photometry(
            time=row[2], band=row[4], magnitude=row[3], source=source)
    catalog.journal_entries()

    # 2016MNRAS.459.1039T
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2016MNRAS.459.1039T.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    name = catalog.add_entry('LSQ13zm')
    source = catalog.entries[name].add_source(bibcode='2016MNRAS.459.1039T')
    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            bands = [xx.replace('(err)', '') for xx in row[3:-1]]
            continue
        mjd = row[1]
        mags = [re.sub(r'\([^)]*\)', '', xx) for xx in row[3:-1]]
        upps = [True if '>' in xx else '' for xx in mags]
        mags = [xx.replace('>', '') for xx in mags]
        errs = [xx[xx.find('(') + 1:xx.find(')')]
                if '(' in xx else '' for xx in row[3:-1]]
        for mi, mag in enumerate(mags):
            if not is_number(mag):
                continue
            catalog.entries[name].add_photometry(
                time=mjd, band=bands[mi], magnitude=mag, e_magnitude=errs[mi],
                instrument=row[-1], upperlimit=upps[mi], source=source)
    catalog.journal_entries()

    # 2015ApJ...804...28G
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2015ApJ...804...28G.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    name = catalog.add_entry('PS1-13arp')
    source = catalog.entries[name].add_source(bibcode='2015ApJ...804...28G')
    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
    for rr, row in enumerate(pbar(data, task_str)):
        if rr == 0:
            continue
        mjd = row[1]
        mag = row[3]
        upp = True if '<' in mag else ''
        mag = mag.replace('<', '')
        err = row[4] if is_number(row[4]) else ''
        ins = row[5]
        catalog.entries[name].add_photometry(
            time=mjd, band=row[0], magnitude=mag, e_magnitude=err,
            instrument=ins, upperlimit=upp, source=source)
    catalog.journal_entries()

    # 2016ApJ...819...35A
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2016ApJ...819...35A.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            bibcode='2016ApJ...819...35A')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
        catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
        catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT, row[3], source)
        disc_date = datetime.strptime(row[4], '%Y %b %d').isoformat()
        disc_date = disc_date.split('T')[0].replace('-', '/')
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE, disc_date, source)
    catalog.journal_entries()

    # 2014ApJ...784..105W
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2014ApJ...784..105W.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            bibcode='2014ApJ...784..105W')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        mjd = row[1]
        band = row[2]
        mag = row[3]
        err = row[4]
        catalog.entries[name].add_photometry(
            time=mjd, band=row[2], magnitude=mag, e_magnitude=err,
            instrument='WHIRC', telescope='WIYN 3.5 m', observatory='NOAO',
            system='WHIRC', source=source)
    catalog.journal_entries()

    # 2012MNRAS.425.1007B
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2012MNRAS.425.1007B.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            bands = row[2:]
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            bibcode='2012MNRAS.425.1007B')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        mjd = row[1]
        mags = [xx.split('±')[0].strip() for xx in row[2:]]
        errs = [xx.split('±')[1].strip()
                if '±' in xx else '' for xx in row[2:]]
        if row[0] == 'PTF09dlc':
            ins = 'HAWK-I'
            tel = 'VLT 8.1m'
            obs = 'ESO'
        else:
            ins = 'NIRI'
            tel = 'Gemini North 8.2m'
            obs = 'Gemini'

        for mi, mag in enumerate(mags):
            if not is_number(mag):
                continue
            catalog.entries[name].add_photometry(
                time=mjd, band=bands[mi], magnitude=mag, e_magnitude=errs[mi],
                instrument=ins, telescope=tel, observatory=obs,
                system='Natural', source=source)

        catalog.journal_entries()

    # 2014ApJ...783...28G
    file_path = os.path.join(
        catalog.get_current_task_repo(), 'apj490105t2_ascii.txt')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            name, source = catalog.new_entry(
                row[0], bibcode='2014ApJ...783...28G')
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.DISCOVER_DATE, '20' + row[0][3:5], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[3], source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT, row[13] if is_number(row[13]) else
                row[10], source)
    catalog.journal_entries()

    # 2005ApJ...634.1190H
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2005ApJ...634.1190H.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            name, source = catalog.new_entry(
                'SNLS-' + row[0], bibcode='2005ApJ...634.1190H')
            catalog.entries[name].add_quantity(
                SUPERNOVA.DISCOVER_DATE, '20' + row[0][:2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT, row[5].replace('?', ''), source,
                e_value=row[6], kind='host')
            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE, row[7].replace('SN', '').strip(':* '),
                source)
    catalog.journal_entries()

    # 2014MNRAS.444.2133S
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2014MNRAS.444.2133S.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            name = row[0]
            if is_number(name[:4]):
                name = 'SN' + name
            name, source = catalog.new_entry(
                name, bibcode='2014MNRAS.444.2133S')
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT, row[3],
                                               source,
                                               kind='host')
    catalog.journal_entries()

    # 2009MNRAS.398.1041B
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2009MNRAS.398.1041B.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                bands = row[2:-1]
                continue
            name, source = catalog.new_entry(
                'SN2008S', bibcode='2009MNRAS.398.1041B')
            mjd = str(jd_to_mjd(Decimal(row[0])))
            mags = [x.split('±')[0].strip() for x in row[2:]]
            upps = [('<' in x.split('±')[0]) for x in row[2:]]
            errs = [x.split('±')[1].strip()
                    if '±' in x else '' for x in row[2:]]

            instrument = row[-1]

            for mi, mag in enumerate(mags):
                if not is_number(mag):
                    continue
                catalog.entries[name].add_photometry(
                    time=mjd, band=bands[mi],
                    magnitude=mag, e_magnitude=errs[mi],
                    instrument=instrument, source=source)
    catalog.journal_entries()

    # 2010arXiv1007.0011P
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2010arXiv1007.0011P.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                bands = row[1:]
                continue
            name, source = catalog.new_entry(
                'SN2008S', bibcode='2010arXiv1007.0011P')
            mjd = row[0]
            mags = [x.split('±')[0].strip() for x in row[1:]]
            errs = [x.split('±')[1].strip()
                    if '±' in x else '' for x in row[1:]]

            for mi, mag in enumerate(mags):
                if not is_number(mag):
                    continue
                catalog.entries[name].add_photometry(
                    time=mjd, band=bands[mi],
                    magnitude=mag, e_magnitude=errs[mi],
                    instrument='LBT', source=source)
    catalog.journal_entries()

    # 2000ApJ...533..320G
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2000ApJ...533..320G.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        name, source = catalog.new_entry(
            'SN1997cy', bibcode='2000ApJ...533..320G')
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                bands = row[1:-1]
                continue
            mjd = str(jd_to_mjd(Decimal(row[0])))
            mags = row[1:len(bands)]
            for mi, mag in enumerate(mags):
                if not is_number(mag):
                    continue
                catalog.entries[name].add_photometry(
                    time=mjd, band=bands[mi],
                    magnitude=mag,
                    observatory='Mount Stromlo', telescope='MSSSO',
                    source=source, kcorrected=True)

    catalog.journal_entries()
    return
Beispiel #15
0
def do_cfa_photo(catalog):
    """Import photometry from the CfA archive."""
    from html import unescape
    import re
    task_str = catalog.get_current_task_str()
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'cfa-input/*.dat'))
    for fname in pbar_strings(file_names, task_str):
        f = open(fname, 'r')
        tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
        csv_data = []
        for r, row in enumerate(tsvin):
            new = []
            for item in row:
                new.extend(item.split('\t'))
            csv_data.append(new)

        for r, row in enumerate(csv_data):
            for c, col in enumerate(row):
                csv_data[r][c] = col.strip()
            csv_data[r] = [_f for _f in csv_data[r] if _f]

        eventname = os.path.basename(os.path.splitext(fname)[0])

        eventparts = eventname.split('_')

        name = clean_snname(eventparts[0])
        name = catalog.add_entry(name)
        secondaryname = 'CfA Supernova Archive'
        secondaryurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
        secondarysource = catalog.entries[name].add_source(
            name=secondaryname,
            url=secondaryurl,
            secondary=True,
            acknowledgment=ACKN_CFA)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name,
                                           secondarysource)

        year = re.findall(r'\d+', name)[0]
        catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
                                           secondarysource)

        eventbands = list(eventparts[1])

        tu = 'MJD'
        jdoffset = Decimal(0.)
        for rc, row in enumerate(csv_data):
            if len(row) > 0 and row[0][0] == "#":
                if len(row[0]) > 2 and row[0][:3] == '#JD':
                    tu = 'JD'
                    rowparts = row[0].split('-')
                    jdoffset = Decimal(rowparts[1])
                elif len(row[0]) > 6 and row[0][:7] == '#Julian':
                    tu = 'JD'
                    jdoffset = Decimal(0.)
                elif len(row) > 1 and row[1].lower() == 'photometry':
                    for ci, col in enumerate(row[2:]):
                        if col[0] == "(":
                            refstr = ' '.join(row[2 + ci:])
                            refstr = refstr.replace('(', '').replace(')', '')
                            bibcode = unescape(refstr)
                            source = catalog.entries[name].add_source(
                                bibcode=bibcode)
                elif len(row) > 1 and row[1] == 'HJD':
                    tu = 'HJD'
                continue

            elif len(row) > 0:
                mjd = row[0]
                for v, val in enumerate(row):
                    if v == 0:
                        if tu == 'JD':
                            mjd = str(jd_to_mjd(Decimal(val) + jdoffset))
                            tuout = 'MJD'
                        elif tu == 'HJD':
                            mjd = str(jd_to_mjd(Decimal(val)))
                            tuout = 'MJD'
                        else:
                            mjd = val
                            tuout = tu
                    elif v % 2 != 0:
                        if float(row[v]) < 90.0:
                            src = secondarysource + ',' + source
                            photodict = {
                                PHOTOMETRY.U_TIME: tuout,
                                PHOTOMETRY.TIME: mjd,
                                PHOTOMETRY.BAND_SET: 'Standard',
                                PHOTOMETRY.BAND: eventbands[(v - 1) // 2],
                                PHOTOMETRY.MAGNITUDE: row[v],
                                PHOTOMETRY.E_MAGNITUDE: row[v + 1],
                                PHOTOMETRY.SOURCE: src
                            }
                            catalog.entries[name].add_photometry(**photodict)
        f.close()

    # Hicken 2012
    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'hicken-2012-standard.dat'), 'r') as infile:
        tsvin = list(csv.reader(infile, delimiter='|', skipinitialspace=True))
        for r, row in enumerate(pbar(tsvin, task_str)):
            if r <= 47:
                continue

            if row[0][:2] != 'sn':
                name = 'SN' + row[0].strip()
            else:
                name = row[0].strip()

            name = catalog.add_entry(name)

            source = catalog.entries[name].add_source(
                bibcode='2012ApJS..200...12H')
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, 'Ia',
                                               source)
            photodict = {
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.TIME: row[2].strip(),
                PHOTOMETRY.BAND: row[1].strip(),
                PHOTOMETRY.BAND_SET: 'Standard',
                PHOTOMETRY.MAGNITUDE: row[6].strip(),
                PHOTOMETRY.E_MAGNITUDE: row[7].strip(),
                PHOTOMETRY.SOURCE: source
            }
            catalog.entries[name].add_photometry(**photodict)

    # Bianco 2014
    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'bianco-2014-standard.dat'), 'r') as infile:
        tsvin = list(csv.reader(infile, delimiter=' ', skipinitialspace=True))
        for row in pbar(tsvin, task_str):
            name = 'SN' + row[0]
            name = catalog.add_entry(name)

            source = catalog.entries[name].add_source(
                bibcode='2014ApJS..213...19B')
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            photodict = {
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.TIME: row[2],
                PHOTOMETRY.BAND: row[1],
                PHOTOMETRY.MAGNITUDE: row[3],
                PHOTOMETRY.E_MAGNITUDE: row[4],
                PHOTOMETRY.TELESCOPE: row[5],
                PHOTOMETRY.BAND_SET: 'Standard',
                PHOTOMETRY.SOURCE: source
            }
            catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Beispiel #16
0
def do_cccp(catalog):
    task_str = catalog.get_current_task_str()
    cccpbands = ['B', 'V', 'R', 'I']
    file_names = list(
        glob(os.path.join(catalog.get_current_task_repo(),
                          'CCCP/apj407397*.txt')))
    for datafile in pbar_strings(file_names, task_str + ': apj407397...'):
        with open(datafile, 'r') as ff:
            tsvin = csv.reader(ff, delimiter='\t', skipinitialspace=True)
            for rr, row in enumerate(tsvin):
                if rr == 0:
                    continue
                elif rr == 1:
                    name = 'SN' + row[0].split('SN ')[-1]
                    name = catalog.add_entry(name)
                    source = catalog.entries[name].add_source(
                        bibcode='2012ApJ...744...10K')
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.ALIAS, name, source)
                elif rr >= 5:
                    mjd = str(Decimal(row[0]) + 53000)
                    for bb, band in enumerate(cccpbands):
                        if row[2 * bb + 1]:
                            mag = row[2 * bb + 1].strip('>')
                            upl = (not row[2 * bb + 2])
                            (catalog.entries[name]
                             .add_photometry(time=mjd, band=band,
                                             magnitude=mag,
                                             e_magnitude=row[2 * bb + 2],
                                             upperlimit=upl, source=source))

    if catalog.current_task.load_archive(catalog.args):
        with open(os.path.join(catalog.get_current_task_repo(),
                               'CCCP/sc_cccp.html'), 'r') as ff:
            html = ff.read()
    else:
        session = requests.Session()
        response = session.get(
            'https://webhome.weizmann.ac.il/home/iair/sc_cccp.html')
        html = response.text
        with open(os.path.join(catalog.get_current_task_repo(),
                               'CCCP/sc_cccp.html'), 'w') as ff:
            ff.write(html)

    soup = BeautifulSoup(html, 'html5lib')
    links = soup.body.findAll("a")
    for link in pbar(links, task_str + ': links'):
        if 'sc_sn' in link['href']:
            name = catalog.add_entry(link.text.replace(' ', ''))
            source = (catalog.entries[name]
                      .add_source(name='CCCP',
                                  url=('https://webhome.weizmann.ac.il'
                                       '/home/iair/sc_cccp.html')))
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

            if catalog.current_task.load_archive(catalog.args):
                fname = os.path.join(catalog.get_current_task_repo(),
                                     'CCCP/') + link['href'].split('/')[-1]
                with open(fname, 'r') as ff:
                    html2 = ff.read()
            else:
                response2 = session.get(
                    'https://webhome.weizmann.ac.il/home/iair/' + link['href'])
                html2 = response2.text
                fname = os.path.join(catalog.get_current_task_repo(),
                                     'CCCP/') + link['href'].split('/')[-1]
                with open(fname, 'w') as ff:
                    ff.write(html2)

            soup2 = BeautifulSoup(html2, 'html5lib')
            links2 = soup2.body.findAll("a")
            for link2 in links2:
                if '.txt' in link2['href'] and '_' in link2['href']:
                    band = link2['href'].split('_')[1].split('.')[0].upper()
                    if catalog.current_task.load_archive(catalog.args):
                        fname = os.path.join(
                            catalog.get_current_task_repo(), 'CCCP/')
                        fname += link2['href'].split('/')[-1]
                        if not os.path.isfile(fname):
                            continue
                        with open(fname, 'r') as ff:
                            html3 = ff.read()
                    else:
                        response3 = (session
                                     .get('https://webhome.weizmann.ac.il'
                                          '/home/iair/cccp/' +
                                          link2['href']))
                        if response3.status_code == 404:
                            continue
                        html3 = response3.text
                        fname = os.path.join(
                            catalog.get_current_task_repo(), 'CCCP/')
                        fname += link2['href'].split('/')[-1]
                        with open(fname, 'w') as ff:
                            ff.write(html3)
                    table = [[str(Decimal(yy.strip())).rstrip('0') for yy in
                              xx.split(',')]
                             for xx in list(filter(None, html3.split('\n')))]
                    for row in table:
                        catalog.entries[name].add_photometry(
                            time=str(Decimal(row[0]) + 53000),
                            band=band, magnitude=row[1],
                            e_magnitude=row[2], source=source)

    catalog.journal_entries()
    return
def do_external(catalog):
    task_str = catalog.get_current_task_str()
    oldbanddict = {
        "Pg": {
            "instrument": "Pan-STARRS1",
            "band": "g"
        },
        "Pr": {
            "instrument": "Pan-STARRS1",
            "band": "r"
        },
        "Pi": {
            "instrument": "Pan-STARRS1",
            "band": "i"
        },
        "Pz": {
            "instrument": "Pan-STARRS1",
            "band": "z"
        },
        "Mu": {
            "instrument": "MegaCam",
            "band": "u"
        },
        "Mg": {
            "instrument": "MegaCam",
            "band": "g"
        },
        "Mr": {
            "instrument": "MegaCam",
            "band": "r"
        },
        "Mi": {
            "instrument": "MegaCam",
            "band": "i"
        },
        "Mz": {
            "instrument": "MegaCam",
            "band": "z"
        },
        "Su": {
            "instrument": "SDSS",
            "band": "u"
        },
        "Sg": {
            "instrument": "SDSS",
            "band": "g"
        },
        "Sr": {
            "instrument": "SDSS",
            "band": "r"
        },
        "Si": {
            "instrument": "SDSS",
            "band": "i"
        },
        "Sz": {
            "instrument": "SDSS",
            "band": "z"
        },
        "bU": {
            "instrument": "Bessel",
            "band": "U"
        },
        "bB": {
            "instrument": "Bessel",
            "band": "B"
        },
        "bV": {
            "instrument": "Bessel",
            "band": "V"
        },
        "bR": {
            "instrument": "Bessel",
            "band": "R"
        },
        "bI": {
            "instrument": "Bessel",
            "band": "I"
        },
        "4g": {
            "instrument": "PTF 48-Inch",
            "band": "g"
        },
        "4r": {
            "instrument": "PTF 48-Inch",
            "band": "r"
        },
        "6g": {
            "instrument": "PTF 60-Inch",
            "band": "g"
        },
        "6r": {
            "instrument": "PTF 60-Inch",
            "band": "r"
        },
        "6i": {
            "instrument": "PTF 60-Inch",
            "band": "i"
        },
        "Uu": {
            "instrument": "UVOT",
            "band": "U"
        },
        "Ub": {
            "instrument": "UVOT",
            "band": "B"
        },
        "Uv": {
            "instrument": "UVOT",
            "band": "V"
        },
        "Um": {
            "instrument": "UVOT",
            "band": "M2"
        },
        "U1": {
            "instrument": "UVOT",
            "band": "W1"
        },
        "U2": {
            "instrument": "UVOT",
            "band": "W2"
        },
        "GN": {
            "instrument": "GALEX",
            "band": "NUV"
        },
        "GF": {
            "instrument": "GALEX",
            "band": "FUV"
        },
        "CR": {
            "instrument": "Clear",
            "band": "r"
        },
        "RO": {
            "instrument": "ROTSE"
        },
        "X1": {
            "instrument": "Chandra"
        },
        "X2": {
            "instrument": "XRT"
        },
        "Xs": {
            "instrument": "XRT",
            "band": "soft"
        },
        "Xm": {
            "instrument": "XRT",
            "band": "hard"
        },
        "XM": {
            "instrument": "XMM"
        }
    }
    path_pattern = os.path.join(catalog.get_current_task_repo(),
                                'old-tdefit/*.dat')
    for datafile in pbar_strings(glob(path_pattern), task_str):
        f = open(datafile, 'r')
        tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)

        sources = []
        yrsmjdoffset = 0.
        for row in tsvin:
            if row[0] == 'name':
                name = re.sub('<[^<]+?>', '', row[1].split(',')[0].strip())
                name = catalog.add_entry(name)
            elif row[0] == 'citations':
                citarr = row[1].split(',')
                for cite in citarr:
                    if '*' in cite:
                        bibcode = urllib.parse.unquote(
                            cite.split('/')[-2].split("'")[0])
                        sources.append(catalog.entries[name].add_source(
                            bibcode=bibcode))
            elif row[0] == 'nhcorr':
                hostnhcorr = True if row[1] == 'T' else False
            elif row[0] == 'restframe':
                restframe = True if row[1] == 'T' else False
            elif row[0] == 'yrsmjdoffset':
                yrsmjdoffset = float(row[1])
            if row[0] == 'redshift':
                redshift = float(row[1].split(',')[0].strip(' *'))

        source = ','.join(sources)

        if not source:
            source = catalog.entries[name].add_self_source()

        f.seek(0)

        for row in tsvin:
            if not row or len(row) < 2 or not row[1]:
                continue
            if row[0] == 'redshift':
                for rs in [x.strip() for x in row[1].split(',')]:
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.REDSHIFT, rs.strip(' *'), source)
            elif row[0] == 'host':
                hostname = re.sub('<[^<]+?>', '', row[1])
                catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.HOST, hostname, source)
            elif row[0] == 'claimedtype' and row[1] != 'TDE':
                cts = row[1].split(',')
                for ct in cts:
                    ctype = ct.strip()
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.CLAIMED_TYPE, ctype, source)
            elif row[0] == 'citations':
                catalog.entries[name].add_quantity(
                    Key('citations', KEY_TYPES.STRING), row[1], source)
            elif row[0] == 'notes':
                catalog.entries[name].add_quantity(
                    Key('notes', KEY_TYPES.STRING), row[1], source)
            elif row[0] == 'nh':
                catalog.entries[name].add_quantity(
                    Key('nhhost', KEY_TYPES.STRING), row[1], source)
            elif row[0] == 'photometry':
                timeunit = row[1]
                if timeunit == 'yrs':
                    timeunit = 'MJD'
                    if restframe:
                        # Currently presume only the time, not the flux, has
                        # been affected by redshifting.
                        time = str(yrsmjdoffset + float(row[2]) * 365.25 * (
                            1.0 + redshift))
                    else:
                        time = str(yrsmjdoffset + float(row[2]) * 365.25)
                    lrestframe = False
                else:
                    time = row[2]
                    if timeunit == 'floatyr':
                        timeunit = 'MJD'
                        time = str(astrotime(float(time), format='jyear').mjd)
                    lrestframe = restframe

                instrument = ''
                iband = row[3]
                if iband in oldbanddict:
                    if 'band' in oldbanddict[iband]:
                        band = oldbanddict[iband]['band']
                    if 'instrument' in oldbanddict[iband]:
                        instrument = oldbanddict[iband]['instrument']
                else:
                    band = iband
                upperlimit = True if row[6] == '1' else False
                if 'X' in iband:
                    counts = Decimal(10.0)**Decimal(row[4])
                    photodict = {
                        PHOTOMETRY.TIME: time,
                        PHOTOMETRY.U_TIME: timeunit,
                        PHOTOMETRY.BAND: band,
                        PHOTOMETRY.COUNT_RATE: counts,
                        PHOTOMETRY.UPPER_LIMIT: upperlimit,
                        PHOTOMETRY.REST_FRAME: lrestframe,
                        PHOTOMETRY.HOST_NH_CORR: hostnhcorr,
                        PHOTOMETRY.INSTRUMENT: instrument,
                        PHOTOMETRY.SOURCE: source
                    }
                    # Old TDEFit stored counts in log
                    if float(row[5]) != 0.0:
                        photodict[PHOTOMETRY.E_COUNT_RATE] = str(
                            (Decimal(10.0)**(Decimal(row[4]) + Decimal(row[5]))
                             - Decimal(10.0)**Decimal(row[4])))
                else:
                    magnitude = row[4]
                    photodict = {
                        PHOTOMETRY.TIME: time,
                        PHOTOMETRY.U_TIME: timeunit,
                        PHOTOMETRY.BAND: band,
                        PHOTOMETRY.MAGNITUDE: magnitude,
                        PHOTOMETRY.UPPER_LIMIT: upperlimit,
                        PHOTOMETRY.REST_FRAME: lrestframe,
                        PHOTOMETRY.HOST_NH_CORR: hostnhcorr,
                        PHOTOMETRY.INSTRUMENT: instrument,
                        PHOTOMETRY.SOURCE: source
                    }
                    if float(row[5]) != 0.0:
                        photodict[PHOTOMETRY.E_MAGNITUDE] = row[5]
                catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Beispiel #18
0
def do_snls_spectra(catalog):
    """
    """

    task_str = catalog.get_current_task_str()
    result = Vizier.get_catalogs('J/A+A/507/85/table1')
    table = result[list(result.keys())[0]]
    table.convert_bytestring_to_unicode(python3_only=True)
    datedict = {}
    for row in table:
        datedict['SNLS-' + row['SN']] = str(astrotime(row['Date']).mjd)

    oldname = ''
    file_names = glob(os.path.join(catalog.get_current_task_repo(), 'SNLS/*'))
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        filename = os.path.basename(fname)
        fileparts = filename.split('_')
        name = 'SNLS-' + fileparts[1]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2009A&A...507...85B')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE, '20' + fileparts[1][:2], source)

        f = open(fname, 'r')
        data = csv.reader(f, delimiter=' ', skipinitialspace=True)
        specdata = []
        for r, row in enumerate(data):
            if row[0] == '@TELESCOPE':
                telescope = row[1].strip()
            elif row[0] == '@REDSHIFT':
                catalog.entries[name].add_quantity(
                    SUPERNOVA.REDSHIFT, row[1].strip(), source)
            if r < 14:
                continue
            specdata.append(list(filter(None, [x.strip(' \t') for x in row])))
        specdata = [list(i) for i in zip(*specdata)]
        wavelengths = specdata[1]

        fluxes = [pretty_num(float(x) * 1.e-16, sig=get_sig_digits(x))
                  for x in specdata[2]]
        # FIX: this isnt being used
        # errors = [pretty_num(float(x)*1.e-16, sig=get_sig_digits(x)) for x in
        # specdata[3]]

        catalog.entries[name].add_spectrum(
            u_wavelengths='Angstrom', u_fluxes='erg/s/cm^2/Angstrom',
            wavelengths=wavelengths,
            fluxes=fluxes, u_time='MJD' if name in datedict else '',
            time=datedict[name] if name in datedict else '',
            telescope=telescope, source=source,
            filename=filename)
        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break
    catalog.journal_entries()
    return
Beispiel #19
0
def do_essence_spectra(catalog):
    task_str = catalog.get_current_task_str()

    insdict = {
        "lris": "LRIS",
        "esi": "ESI",
        "deimos": "DEIMOS",
        "gmos": "GMOS",
        "fors1": "FORS1",
        "bluechannel": "Blue Channel",
        "ldss2": "LDSS-2",
        "ldss3": "LDSS-3",
        "imacs": "IMACS",
        "fast": "FAST"
    }

    teldict = {
        "lris": "Keck",
        "esi": "Keck",
        "deimos": "Keck",
        "gmos": "Gemini",
        "fors1": "VLT",
        "bluechannel": "MMT",
        "ldss2": "Magellan Clay & Baade",
        "ldss3": "Magellan Clay & Baade",
        "imacs": "Magellan Clay & Baade",
        "fast": "FLWO 1.5m"
    }

    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'ESSENCE', '*'))
    oldname = ''
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        filename = os.path.basename(fname)
        fileparts = filename.split('_')
        name = 'ESSENCE ' + fileparts[0]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name

        if is_number(fileparts[1]):
            doffset = 1
        else:
            if fileparts[1] != 'comb':
                continue
            doffset = 2

        dstr = fileparts[doffset]
        mjd = str(
            astrotime(
                datetime.datetime(
                    year=int(dstr[:4]),
                    month=int(dstr[4:6]),
                    day=int(dstr[6:8])) + datetime.timedelta(days=float(dstr[
                        8:]))).mjd)

        instrument = fileparts[-1].split('.')[0]
        telescope = teldict.get(instrument, '')
        instrument = insdict.get(instrument, '')

        with open(fname, 'r') as f:
            data = csv.reader(f, delimiter=' ', skipinitialspace=True)
            data = [list(i) for i in zip(*data)]
            wavelengths = data[0]
            fluxes = [str(Decimal('1.0e-15') * Decimal(x)) for x in data[1]]

        name, source = catalog.new_entry(name, bibcode='2016ApJS..224....3N')

        specdict = {
            SPECTRUM.TIME: mjd,
            SPECTRUM.U_TIME: 'MJD',
            SPECTRUM.U_WAVELENGTHS: 'Angstrom',
            SPECTRUM.WAVELENGTHS: wavelengths,
            SPECTRUM.FLUXES: fluxes,
            SPECTRUM.U_FLUXES: 'erg/s/cm^2/Angstrom',
            SPECTRUM.FILENAME: filename,
            SPECTRUM.SOURCE: source
        }

        if instrument:
            specdict[SPECTRUM.INSTRUMENT] = instrument
        if telescope:
            specdict[SPECTRUM.TELESCOPE] = telescope

        catalog.entries[name].add_spectrum(**specdict)

        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break

    catalog.journal_entries()
    return
Beispiel #20
0
def do_wiserep_spectra(catalog):
    task_str = catalog.get_current_task_str()
    secondaryreference = 'WISeREP'
    secondaryrefurl = 'http://wiserep.weizmann.ac.il/'
    secondarybibcode = '2012PASP..124..668Y'
    wiserepcnt = 0

    # These are known to be in error on the WISeREP page, either fix or ignore
    # them.
    wiserepbibcorrectdict = {'2000AJ....120..367G]': '2000AJ....120..367G',
                             'Harutyunyan et al. 2008': '2008A&A...488..383H',
                             '0609268': '2007AJ....133...58K',
                             '2006ApJ...636...400Q': '2006ApJ...636..400Q',
                             '2011ApJ...741...76': '2011ApJ...741...76C',
                             '2016PASP...128...961': '2016PASP..128...961',
                             '2002AJ....1124..417H': '2002AJ....1124.417H',
                             '2013ApJ…774…58D': '2013ApJ...774...58D',
                             '2011Sci.333..856S': '2011Sci...333..856S',
                             '2014MNRAS.438,368': '2014MNRAS.438..368T',
                             '2012MNRAS.420.1135': '2012MNRAS.420.1135S',
                             '2012Sci..337..942D': '2012Sci...337..942D',
                             'stt1839': '2013MNRAS.436.3614S',
                             'arXiv:1605.03136': '2016arXiv160503136T',
                             '10.1093/mnras/stt1839': '2013MNRAS.436.3614S'}

    file_names = list(
        glob(os.path.join(
            catalog.get_current_task_repo(), '*')))
    for folder in pbar_strings(file_names, task_str):
        if '.txt' in folder:
            continue
        name = os.path.basename(folder).strip()
        if name.startswith('sn'):
            name = 'SN' + name[2:]
        if (name.startswith(('CSS', 'SSS', 'MLS')) and
                ':' not in name):
            name = name.replace('-', ':', 1)
        if name.startswith('MASTERJ'):
            name = name.replace('MASTERJ', 'MASTER OT J')
        if name.startswith('PSNJ'):
            name = name.replace('PSNJ', 'PSN J')
        name = catalog.add_entry(name)

        secondarysource = catalog.entries[name].add_source(
            name=secondaryreference,
            url=secondaryrefurl,
            bibcode=secondarybibcode, secondary=True)
        catalog.entries[name].add_quantity(
            SUPERNOVA.ALIAS, name, secondarysource)

        with open(os.path.join(folder, 'README.json'), 'r') as f:
            fileinfo = json.loads(f.read())

        files = list(set(glob(folder + '/*')) -
                     set(glob(folder + '/README.json')))
        for fname in pbar(files, task_str):
            specfile = os.path.basename(fname)
            claimedtype = fileinfo[specfile]["Type"]
            instrument = fileinfo[specfile]["Instrument"]
            epoch = fileinfo[specfile]["Obs. Date"]
            observer = fileinfo[specfile]["Observer"]
            reducer = fileinfo[specfile]["Reducer"]
            bibcode = fileinfo[specfile]["Bibcode"]
            redshift = fileinfo[specfile]["Redshift"]
            survey = fileinfo[specfile]["Program"]
            reduction = fileinfo[specfile]["Reduction Status"]

            if bibcode:
                newbibcode = bibcode
                if bibcode in wiserepbibcorrectdict:
                    newbibcode = wiserepbibcorrectdict[bibcode]
                if newbibcode and len(newbibcode) == 19:
                    source = catalog.entries[name].add_source(
                        bibcode=unescape(newbibcode))
                else:
                    bibname = unescape(bibcode)
                    source = catalog.entries[name].add_source(
                        name=bibname)
                    catalog.log.warning('Bibcode "{}" is invalid, using as '
                                        '`{}` instead'.format(bibname,
                                                              SOURCE.NAME))
                sources = uniq_cdl([source, secondarysource])
            else:
                sources = secondarysource

            if claimedtype not in ['Other']:
                catalog.entries[name].add_quantity(
                    SUPERNOVA.CLAIMED_TYPE, claimedtype,
                    secondarysource)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT, redshift, secondarysource)

            with open(fname, 'r') as f:
                data = [x.split() for x in f]

                skipspec = False
                newdata = []
                oldval = ''
                for row in data:
                    if row and '#' not in row[0]:
                        if (len(row) >= 2 and
                                is_number(row[0]) and
                                is_number(row[1]) and
                                row[1] != oldval):
                            newdata.append(row)
                            oldval = row[1]

                if skipspec or not newdata:
                    warnings.warn(
                        'Skipped adding spectrum file ' +
                        specfile)
                    continue

                data = [list(i) for i in zip(*newdata)]
                wavelengths = data[0]
                fluxes = data[1]
                errors = ''
                if len(data) == 3:
                    errors = data[1]
                time = str(astrotime(epoch).mjd)

                if max([float(x) for x in fluxes]) < 1.0e-5:
                    fluxunit = 'erg/s/cm^2/Angstrom'
                else:
                    fluxunit = 'Uncalibrated'

                catalog.entries[name].add_spectrum(
                    u_wavelengths='Angstrom',
                    errors=errors,
                    u_fluxes=fluxunit,
                    u_errors=fluxunit if errors else '',
                    wavelengths=wavelengths,
                    fluxes=fluxes,
                    u_time='MJD', time=time,
                    instrument=instrument, source=sources,
                    observer=observer, reducer=reducer, reduction=reduction,
                    filename=specfile, survey=survey, redshift=redshift)

        catalog.journal_entries()

        wiserepcnt = wiserepcnt + 1
        if (catalog.args.travis and
                wiserepcnt %
                catalog.TRAVIS_QUERY_LIMIT == 0):
            break

    return
Beispiel #21
0
def do_donations(catalog):
    task_str = catalog.get_current_task_str()
    # Nicholl 04-01-16 donation
    with open(os.path.join(catalog.get_current_task_repo(),
                           'Nicholl-04-01-16/bibcodes.json'), 'r') as f:
        bcs = json.loads(f.read())

    file_names = glob(os.path.join(
        catalog.get_current_task_repo(), 'Nicholl-04-01-16/*.txt'))
    for datafile in pbar_strings(file_names, task_str +
                                 ': Nicholl-04-01-16'):
        inpname = os.path.basename(datafile).split('_')[0]
        name = catalog.add_entry(inpname)
        bibcode = ''
        for bc in bcs:
            if inpname in bcs[bc]:
                bibcode = bc
        if not bibcode:
            raise ValueError('Bibcode not found!')
        source = catalog.entries[name].add_source(bibcode=bibcode)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, inpname, source)
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
            for r, rrow in enumerate(tsvin):
                row = list(filter(None, rrow))
                if not row:
                    continue
                if row[0][0] == '#' and row[0] != '#MJD':
                    continue
                if row[0] == '#MJD':
                    bands = [x for x in row[1:] if x and 'err' not in x]
                    continue
                mjd = row[0]
                if not is_number(mjd):
                    continue
                for v, val in enumerate(row[1::2]):
                    upperlimit = ''
                    if '>' in val:
                        upperlimit = True
                    mag = val.strip('>')
                    if (not is_number(mag) or isnan(float(mag)) or
                            float(mag) > 90.0):
                        continue
                    err = ''
                    if (is_number(row[2 * v + 2]) and
                            not isnan(float(row[2 * v + 2]))):
                        err = row[2 * v + 2]
                    catalog.entries[name].add_photometry(
                        time=mjd, band=bands[v], magnitude=mag,
                        e_magnitude=err, upperlimit=upperlimit, source=source)
    catalog.journal_entries()

    # Maggi 04-11-16 donation (MC SNRs)
    with open(os.path.join(catalog.get_current_task_repo(),
                           'Maggi-04-11-16/LMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str +
                        ': Maggi-04-11-16/LMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            ra = row[2]
            dec = row[3]
            source = (catalog.entries[name]
                      .add_source(bibcode='2016A&A...585A.162M'))
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, 'LMCSNR J' + rep_chars(ra, ' :.') +
                rep_chars(dec, ' :.'), source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            if row[1] != 'noname':
                catalog.entries[name].add_quantity(
                    SUPERNOVA.ALIAS, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'LMC', source)
            if row[4] == '1':
                catalog.entries[name].add_quantity(
                    SUPERNOVA.CLAIMED_TYPE, 'Ia', source)
            elif row[4] == '2':
                catalog.entries[name].add_quantity(
                    SUPERNOVA.CLAIMED_TYPE, 'CC', source)
    with open(os.path.join(catalog.get_current_task_repo(),
                           'Maggi-04-11-16/SMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str +
                        ': Maggi-04-11-16/SMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            source = catalog.entries[name].add_source(name='Pierre Maggi')
            ra = row[3]
            dec = row[4]
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, 'SMCSNR J' + ra.replace(':', '')[:6] +
                dec.replace(':', '')[:7], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[4], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'SMC', source)
    catalog.journal_entries()

    # Galbany 04-18-16 donation
    folders = next(os.walk(os.path.join(
        catalog.get_current_task_repo(), 'galbany-04-18-16/')))[1]
    bibcode = '2016AJ....151...33G'
    for folder in folders:
        infofiles = glob(os.path.join(catalog.get_current_task_repo(),
                                      'galbany-04-18-16/') + folder +
                         '/*.info')
        photfiles = glob(os.path.join(catalog.get_current_task_repo(),
                                      'galbany-04-18-16/') + folder +
                         '/*.out*')

        zhel = ''
        zcmb = ''
        zerr = ''
        for path in infofiles:
            with open(path, 'r') as f:
                lines = f.read().splitlines()
                for line in lines:
                    splitline = line.split(':')
                    field = splitline[0].strip().lower()
                    value = splitline[1].strip()
                    if field == 'name':
                        name = value[:6].upper()
                        name += (value[6].upper() if len(value) == 7
                                 else value[6:])
                        name = catalog.add_entry(name)
                        source = (catalog.entries[name]
                                  .add_source(bibcode=bibcode))
                        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,
                                                           name,
                                                           source)
                    elif field == 'type':
                        claimedtype = value.replace('SN', '')
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.CLAIMED_TYPE, claimedtype, source)
                    elif field == 'zhel':
                        zhel = value
                    elif field == 'redshift_error':
                        zerr = value
                    elif field == 'zcmb':
                        zcmb = value
                    elif field == 'ra':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.RA, value, source, u_value='floatdegrees')
                    elif field == 'dec':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DEC, value, source, u_value='floatdegrees')
                    elif field == 'host':
                        value = value.replace('- ', '-').replace('G ', 'G')
                        catalog.entries[name].add_quantity(SUPERNOVA.HOST,
                                                           value,
                                                           source)
                    elif field == 'e(b-v)_mw':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.EBV, value, source)

        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT, zhel, source, e_value=zerr, kind='heliocentric')
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT, zcmb, source, e_value=zerr, kind='cmb')

        for path in photfiles:
            with open(path, 'r') as f:
                band = ''
                lines = f.read().splitlines()
                for li, line in enumerate(lines):
                    if li in [0, 2, 3]:
                        continue
                    if li == 1:
                        band = line.split(':')[-1].strip()
                    else:
                        cols = list(filter(None, line.split()))
                        if not cols:
                            continue
                        catalog.entries[name].add_photometry(
                            time=cols[0], magnitude=cols[1],
                            e_magnitude=cols[2],
                            band=band, system=cols[3], telescope=cols[4],
                            source=source)
    catalog.journal_entries()

    # Brown 05-14-16
    files = glob(os.path.join(
        catalog.get_current_task_repo(), 'brown-05-14-16/*.dat'))
    for fi in pbar(files, task_str):
        name = os.path.basename(fi).split('_')[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            name='Swift Supernovae', bibcode='2014Ap&SS.354...89B',
            url='http://people.physics.tamu.edu/pbrown/SwiftSN/swift_sn.html')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        with open(fi, 'r') as f:
            lines = f.read().splitlines()
            for line in lines:
                if not line or line[0] == '#':
                    continue
                cols = list(filter(None, line.split()))
                band = cols[0]
                mjd = cols[1]
                # Skip lower limit entries for now
                if cols[2] == 'NULL' and cols[6] == 'NULL':
                    continue
                isupp = cols[2] == 'NULL' and cols[6] != 'NULL'
                mag = cols[2] if not isupp else cols[4]
                e_mag = cols[3] if not isupp else ''
                upp = '' if not isupp else True
                (catalog.entries[name]
                 .add_photometry(time=mjd, magnitude=mag,
                                 e_magnitude=e_mag,
                                 upperlimit=upp, band=band, source=source,
                                 telescope='Swift', instrument='UVOT',
                                 system='Vega'))
    catalog.journal_entries()

    # Nicholl 05-03-16
    files = glob(os.path.join(
        catalog.get_current_task_repo(), 'nicholl-05-03-16/*.txt'))
    name = catalog.add_entry('SN2015bn')
    source = catalog.entries[name].add_source(bibcode='2016arXiv160304748N')
    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, 'PS15ae', source)
    for fi in pbar(files, task_str):
        telescope = os.path.basename(fi).split('_')[1]
        with open(fi, 'r') as f:
            lines = f.read().splitlines()
            for li, line in enumerate(lines):
                if not line or (line[0] == '#' and li != 0):
                    continue
                cols = list(filter(None, line.split()))
                if not cols:
                    continue
                if li == 0:
                    bands = cols[1:]
                    continue

                mjd = cols[0]
                for ci, col in enumerate(cols[1::2]):
                    if not is_number(col):
                        continue

                    emag = cols[2 * ci + 2]
                    upp = ''
                    if not is_number(emag):
                        emag = ''
                        upp = True
                    instrument = 'UVOT' if telescope == 'Swift' else ''
                    (catalog.entries[name]
                     .add_photometry(time=mjd, magnitude=col,
                                     e_magnitude=emag, upperlimit=upp,
                                     band=bands[ci], source=source,
                                     telescope=telescope,
                                     instrument=instrument,
                                     system='Vega' if
                                     telescope == 'Swift' else 'AB'))

    catalog.journal_entries()
    return
Beispiel #22
0
def do_suspect_photo(catalog):
    task_str = catalog.get_current_task_str()
    with open(os.path.join(catalog.get_current_task_repo(),
                           'suspectreferences.csv'), 'r') as f:
        tsvin = csv.reader(f, delimiter=',', skipinitialspace=True)
        suspectrefdict = {}
        for row in tsvin:
            suspectrefdict[row[0]] = row[1]

    file_names = list(sorted(glob(os.path.join(
        catalog.get_current_task_repo(), 'SUSPECT/*.html'))))
    for datafile in pbar_strings(file_names, task_str):
        basename = os.path.basename(datafile)
        basesplit = basename.split('-')
        oldname = basesplit[1]
        name = catalog.add_entry(oldname)
        if name.startswith('SN') and is_number(name[2:]):
            name = name + 'A'
        band = basesplit[3].split('.')[0]
        ei = int(basesplit[2])
        bandlink = 'file://' + os.path.abspath(datafile)
        bandresp = urllib.request.urlopen(bandlink)
        bandsoup = BeautifulSoup(bandresp, 'html5lib')
        bandtable = bandsoup.find('table')

        names = bandsoup.body.findAll(text=re.compile('Name'))
        reference = ''
        for link in bandsoup.body.findAll('a'):
            if 'adsabs' in link['href']:
                reference = str(link).replace('"', "'")

        bibcode = unescape(suspectrefdict[reference])
        source = catalog.entries[name].add_source(bibcode=bibcode)

        sec_ref = 'SUSPECT'
        sec_refurl = 'https://www.nhn.ou.edu/~suspect/'
        sec_source = catalog.entries[name].add_source(
            name=sec_ref, url=sec_refurl, secondary=True)
        catalog.entries[name].add_quantity(
            SUPERNOVA.ALIAS, oldname, sec_source)

        if ei == 1:
            year = re.findall(r'\d+', name)[0]
            catalog.entries[name].add_quantity(
                SUPERNOVA.DISCOVER_DATE, year, sec_source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.HOST, names[1].split(':')[1].strip(), sec_source)

            redshifts = bandsoup.body.findAll(text=re.compile('Redshift'))
            if redshifts:
                catalog.entries[name].add_quantity(
                    SUPERNOVA.REDSHIFT, redshifts[0].split(':')[1].strip(),
                    sec_source, kind='heliocentric')
            # hvels = bandsoup.body.findAll(text=re.compile('Heliocentric
            # Velocity'))
            # if hvels:
            #     vel = hvels[0].split(':')[1].strip().split(' ')[0]
            #     catalog.entries[name].add_quantity(SUPERNOVA.VELOCITY, vel,
            # sec_source,
            # kind='heliocentric')
            types = bandsoup.body.findAll(text=re.compile('Type'))

            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE, types[0].split(
                    ':')[1].strip().split(' ')[0],
                sec_source)

        for r, row in enumerate(bandtable.findAll('tr')):
            if r == 0:
                continue
            col = row.findAll('td')
            mjd = str(jd_to_mjd(Decimal(col[0].contents[0])))
            mag = col[3].contents[0]
            if mag.isspace():
                mag = ''
            else:
                mag = str(mag)
            e_magnitude = col[4].contents[0]
            if e_magnitude.isspace():
                e_magnitude = ''
            else:
                e_magnitude = str(e_magnitude)
            catalog.entries[name].add_photometry(
                time=mjd, band=band, magnitude=mag, e_magnitude=e_magnitude,
                source=sec_source + ',' + source)

    catalog.journal_entries()
    return
Beispiel #23
0
def do_donated_photo(catalog):
    """Import donated photometry."""
    task_str = catalog.get_current_task_str()

    # Private donations here #
    if not catalog.args.travis:
        pass
    # End private donations #

    # Ponder 05-12-17 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Ponder-05-12-17', 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Ponder-05-12-17', '*.dat'))
    for path in file_names:
        with open(path, 'r') as f:
            tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        oname = path.split('/')[-1].split('.')[0]
        name, source = catalog.new_entry(oname,
                                         bibcode=metadict[oname]['bibcode'])
        for row in pbar(tsvin, task_str + ': Ponder ' + oname):
            if row[0][0] == '#' or not is_number(row[-1]):
                continue
            mjd = row[1]
            bandinst = row[2].split('_')
            band = bandinst[0]
            inst = ''
            if len(bandinst) > 1:
                inst = bandinst[1]
            mag = row[3]
            uerr = row[4]
            lerr = row[5]
            photodict = {
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.BAND: band,
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.E_LOWER_MAGNITUDE: lerr,
                PHOTOMETRY.E_UPPER_MAGNITUDE: uerr,
                PHOTOMETRY.SOURCE: source
            }
            if inst:
                photodict[PHOTOMETRY.INSTRUMENT] = inst
            catalog.entries[name].add_photometry(**photodict)

    # Benetti 03-08-17 donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Benetti-03-08-17', '1999E.dat')
    with open(path, 'r') as f:
        tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        name, source = catalog.new_entry('SN1999E',
                                         bibcode='2003MNRAS.340..191R')
        bands = None
        for row in tsvin:
            if not row or row[0][0] == '#':
                continue
            if not bands:
                bands = row[2:-2]
                continue
            mjd = row[1]
            tel = row[-1] if 'IAUC' not in row[-1] else None
            for bi, band in enumerate(bands):
                mag = row[2 + 2 * bi]
                if mag == '9999':
                    continue
                err = row[2 + 2 * bi + 1]
                limit = row[6] == 'True'
                photodict = {
                    PHOTOMETRY.TIME: mjd,
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.TELESCOPE: tel,
                    PHOTOMETRY.BAND: band,
                    PHOTOMETRY.MAGNITUDE: mag,
                    PHOTOMETRY.SOURCE: source
                }
                if err != '.00':
                    photodict[PHOTOMETRY.E_MAGNITUDE] = str(Decimal(err))
                if tel:
                    photodict[PHOTOMETRY.TELESCOPE] = tel
                catalog.entries[name].add_photometry(**photodict)

    # Nicholl 01-29-17 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Nicholl-01-29-17', 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-01-29-17', '*.txt'))
    for path in file_names:
        data = read(path, format='cds')
        oname = path.split('/')[-1].split('_')[0]
        name, source = catalog.new_entry(oname,
                                         bibcode=metadict[oname]['bibcode'])
        for row in pbar(data, task_str + ': Nicholl ' + oname):
            photodict = {
                PHOTOMETRY.TIME: str(row['MJD']),
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: str(row['mag']),
                PHOTOMETRY.BAND: row['Filter'],
                PHOTOMETRY.SOURCE: source
            }
            if 'system' in metadict[oname]:
                photodict[PHOTOMETRY.SYSTEM] = metadict[oname]['system']
            if 'l_mag' in row.columns and row['l_mag'] == '>':
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            elif 'e_mag' in row.columns:
                photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['e_mag'])
            if 'Telescope' in row.columns:
                photodict[PHOTOMETRY.TELESCOPE] = row['Telescope']
            catalog.entries[name].add_photometry(**photodict)

    # Arcavi 2016gkg donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Arcavi-01-24-17', 'SN2016gkg.txt')
    with open(path, 'r') as f:
        tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        name, source = catalog.new_entry('SN2016gkg',
                                         bibcode='2016arXiv161106451A')
        for row in tsvin:
            if row[0][0] == '#':
                continue
            mjd = str(jd_to_mjd(Decimal(row[0])))
            tel = row[1]
            band = row[3]
            mag = row[4]
            err = row[5]
            limit = row[6] == 'True'
            photodict = {
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.TELESCOPE: tel,
                PHOTOMETRY.BAND: band,
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.SOURCE: source
            }
            if limit:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            else:
                photodict[PHOTOMETRY.E_MAGNITUDE] = err
            catalog.entries[name].add_photometry(**photodict)

    # Nicholl Gaia16apd donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Nicholl-01-20-17', 'gaia16apd_phot.txt')

    data = read(path, format='cds')
    name, source = catalog.new_entry('Gaia16apd',
                                     bibcode='2017ApJ...835L...8N')
    for row in pbar(data, task_str + ': Nicholl Gaia16apd'):
        photodict = {
            PHOTOMETRY.TIME: str(row['MJD']),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.MAGNITUDE: str(row['mag']),
            PHOTOMETRY.BAND: row['Filter'],
            PHOTOMETRY.TELESCOPE: row['Telescope'],
            PHOTOMETRY.SOURCE: source
        }
        if row['l_mag'] == '>':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        else:
            photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['e_mag'])
        catalog.entries[name].add_photometry(**photodict)

    # Kuncarayakti-01-09-17
    datafile = os.path.join(catalog.get_current_task_repo(), 'Donations',
                            'Kuncarayakti-01-09-17', 'SN1978K.dat')
    inpname = os.path.basename(datafile).split('.')[0]
    with open(datafile, 'r') as f:
        tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
        host = False
        for ri, row in enumerate(tsvin):
            if ri == 0:
                continue
            if row[0][0] == '#':
                rsplit = [x.strip('# ') for x in ' '.join(row).split(',')]
                bc = rsplit[0]
                tel, ins = '', ''
                if len(rsplit) > 1:
                    tel = rsplit[1]
                if len(rsplit) > 2:
                    ins = rsplit[2]
                continue
            (name, source) = catalog.new_entry(inpname, bibcode=bc)
            mag = row[4]
            err = row[5]
            mjd = str(astrotime('-'.join(row[:3]), format='iso').mjd)
            photodict = {
                PHOTOMETRY.BAND: row[3],
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: mag.strip('>s'),
                PHOTOMETRY.SOURCE: source
            }
            if is_number(err):
                photodict[PHOTOMETRY.E_MAGNITUDE] = err
            if tel:
                photodict[PHOTOMETRY.TELESCOPE] = tel
            if ins:
                photodict[PHOTOMETRY.INSTRUMENT] = ins
            if '>' in mag:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            if 's' in mag:
                photodict[PHOTOMETRY.SYNTHETIC] = True
            catalog.entries[name].add_photometry(**photodict)

    # Nugent 01-09-17 donation
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nugent-01-09-17', '*.dat'))
    for datafile in pbar_strings(file_names, task_str + ': Nugent-01-09-17'):
        inpname = os.path.basename(datafile).split('.')[0]
        (name, source) = catalog.new_entry(inpname,
                                           bibcode='2006ApJ...645..841N')
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
            host = False
            for urow in tsvin:
                row = list(filter(None, urow))
                counts = row[2]
                e_counts = row[3]
                zp = row[4]
                photodict = {
                    PHOTOMETRY.BAND: row[1],
                    PHOTOMETRY.TIME: row[0],
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.COUNT_RATE: counts,
                    PHOTOMETRY.E_COUNT_RATE: e_counts,
                    PHOTOMETRY.ZERO_POINT: zp,
                    PHOTOMETRY.TELESCOPE: 'CFHT',
                    PHOTOMETRY.SURVEY: 'SNLS',
                    PHOTOMETRY.SOURCE: source
                }
                set_pd_mag_from_counts(photodict, counts, ec=e_counts, zp=zp)
                catalog.entries[name].add_photometry(**photodict)

    # Inserra 09-04-16 donation
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Inserra-09-04-16', '*.txt'))
    for datafile in pbar_strings(file_names, task_str + ': Inserra-09-04-16'):
        inpname = os.path.basename(datafile).split('.')[0]
        (name, source) = catalog.new_entry(inpname,
                                           bibcode='2013ApJ...770..128I')
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
            host = False
            for row in tsvin:
                if row[0][0] == '#':
                    if row[0] == '#Host':
                        host = True
                        continue
                    host = False
                    bands = row[3:-1]
                    continue
                for bi, ba in enumerate(bands):
                    mag = row[5 + 2 * bi]
                    if not is_number(mag):
                        continue
                    system = 'AB'
                    if ba in ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K']:
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: row[3],
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.BAND: ba,
                        PHOTOMETRY.MAGNITUDE: mag.strip('< '),
                        PHOTOMETRY.SOURCE: source,
                        PHOTOMETRY.SYSTEM: system
                    }
                    if 'ATel' not in row[-1]:
                        photodict[PHOTOMETRY.TELESCOPE] = row[-1]
                    if host:
                        photodict[PHOTOMETRY.HOST] = True
                    if '<' in mag:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    e_mag = row[5 + 2 * bi + 1].strip('() ')
                    if is_number(e_mag):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = e_mag
                    catalog.entries[name].add_photometry(**photodict)

    # Nicholl 04-01-16 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Nicholl-04-01-16', 'bibcodes.json'), 'r') as f:
        bcs = json.loads(f.read())

    kcorrected = ['SN2011ke', 'SN2011kf', 'SN2012il', 'PTF10hgi', 'PTF11rks']
    ignorephoto = ['PTF10hgi', 'PTF11rks', 'SN2011ke', 'SN2011kf', 'SN2012il']

    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-04-01-16/*.txt'))
    for datafile in pbar_strings(file_names, task_str + ': Nicholl-04-01-16'):
        inpname = os.path.basename(datafile).split('_')[0]
        isk = inpname in kcorrected
        name = catalog.add_entry(inpname)
        bibcode = ''
        for bc in bcs:
            if inpname in bcs[bc]:
                bibcode = bc
        if not bibcode:
            raise ValueError('Bibcode not found!')
        source = catalog.entries[name].add_source(bibcode=bibcode)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, inpname, source)
        if inpname in ignorephoto:
            continue
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
            rtelescope = ''
            for r, rrow in enumerate(tsvin):
                row = list(filter(None, rrow))
                if not row:
                    continue
                if row[0] == '#MJD':
                    bands = [x for x in row[1:] if x and 'err' not in x]
                elif row[0][0] == '#' and len(row[0]) > 1:
                    rtelescope = row[0][1:]
                if row[0][0] == '#':
                    continue
                mjd = row[0]
                if not is_number(mjd):
                    continue
                for v, val in enumerate(row[1::2]):
                    upperlimit = ''
                    mag = val.strip('>')
                    emag = row[2 * v + 2]
                    if '>' in val or (is_number(emag) and float(emag) == 0.0):
                        upperlimit = True
                    if (not is_number(mag) or isnan(float(mag))
                            or float(mag) > 90.0):
                        continue
                    band = bands[v]
                    instrument = ''
                    survey = ''
                    system = ''
                    telescope = rtelescope
                    if telescope == 'LSQ':
                        instrument = 'QUEST'
                    elif telescope == 'PS1':
                        instrument = 'GPC'
                    elif telescope == 'NTT':
                        instrument = 'EFOSC'
                    elif telescope == 'GROND':
                        instrument = 'GROND'
                        telescope = 'MPI/ESO 2.2m'
                    else:
                        if band == 'NUV':
                            instrument = 'GALEX'
                            telescope = 'GALEX'
                        elif band in ['u', 'g', 'r', 'i', 'z']:
                            if inpname.startswith('PS1'):
                                instrument = 'GPC'
                                telescope = 'PS1'
                                survey = 'Pan-STARRS'
                            elif inpname.startswith('PTF'):
                                telescope = 'P60'
                                survey = 'PTF'
                        elif band.upper() in ['UVW2', 'UVW1', 'UVM2']:
                            instrument = 'UVOT'
                            telescope = 'Swift'
                            if inpname in ['PTF12dam']:
                                system = 'AB'
                    if inpname in ['SCP-06F6']:
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: mjd,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.BAND: band,
                        PHOTOMETRY.MAGNITUDE: mag,
                        PHOTOMETRY.UPPER_LIMIT: upperlimit,
                        PHOTOMETRY.SOURCE: source
                    }
                    if instrument:
                        photodict[PHOTOMETRY.INSTRUMENT] = instrument
                    if telescope:
                        photodict[PHOTOMETRY.TELESCOPE] = telescope
                    if survey:
                        photodict[PHOTOMETRY.SURVEY] = survey
                    if system:
                        photodict[PHOTOMETRY.SYSTEM] = system
                    if (is_number(emag) and not isnan(float(emag))
                            and float(emag) > 0.0):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = emag
                    if isk:
                        photodict[PHOTOMETRY.KCORRECTED] = True
                    catalog.entries[name].add_photometry(**photodict)
    catalog.journal_entries()

    # Maggi 04-11-16 donation (MC SNRs)
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Maggi-04-11-16', 'LMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str + ': Maggi-04-11-16/LMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            ra = row[2]
            dec = row[3]
            source = (catalog.entries[name].add_source(
                bibcode='2016A&A...585A.162M'))
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS,
                'LMCSNR J' + rep_chars(ra, ' :.') + rep_chars(dec, ' :.'),
                source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            if row[1] != 'noname':
                catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1],
                                                   source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'LMC', source)
            if row[4] == '1':
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   'Ia', source)
            elif row[4] == '2':
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   'CC', source)
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Maggi-04-11-16', 'SMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str + ': Maggi-04-11-16/SMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            source = catalog.entries[name].add_source(name='Pierre Maggi')
            ra = row[3]
            dec = row[4]
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, 'SMCSNR J' + ra.replace(':', '')[:6] +
                dec.replace(':', '')[:7], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[4], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'SMC', source)
    catalog.journal_entries()

    # Galbany 04-18-16 donation
    folders = next(
        os.walk(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/')))[1]
    bibcode = '2016AJ....151...33G'
    for folder in folders:
        infofiles = glob(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/') + folder + '/*.info')
        photfiles = glob(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/') + folder + '/*.out*')

        zhel = ''
        zcmb = ''
        zerr = ''
        for path in infofiles:
            with open(path, 'r') as f:
                lines = f.read().splitlines()
                for line in lines:
                    splitline = line.split(':')
                    field = splitline[0].strip().lower()
                    value = splitline[1].strip()
                    if field == 'name':
                        name = value[:6].upper()
                        name += (value[6].upper()
                                 if len(value) == 7 else value[6:])
                        name = catalog.add_entry(name)
                        source = (catalog.entries[name].add_source(
                            bibcode=bibcode))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.ALIAS, name, source)
                    elif field == 'type':
                        claimedtype = value.replace('SN', '')
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.CLAIMED_TYPE, claimedtype, source)
                    elif field == 'zhel':
                        zhel = value
                    elif field == 'redshift_error':
                        zerr = value
                    elif field == 'zcmb':
                        zcmb = value
                    elif field == 'ra':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.RA,
                            value,
                            source,
                            u_value='floatdegrees')
                    elif field == 'dec':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DEC,
                            value,
                            source,
                            u_value='floatdegrees')
                    elif field == 'host':
                        value = value.replace('- ', '-').replace('G ', 'G')
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.HOST, value, source)
                    elif field == 'e(b-v)_mw':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.EBV, value, source)

        catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                           zhel,
                                           source,
                                           e_value=zerr,
                                           kind='heliocentric')
        catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                           zcmb,
                                           source,
                                           e_value=zerr,
                                           kind='cmb')

        for path in photfiles:
            with open(path, 'r') as f:
                band = ''
                lines = f.read().splitlines()
                for li, line in enumerate(lines):
                    if li in [0, 2, 3]:
                        continue
                    if li == 1:
                        band = line.split(':')[-1].strip()
                    else:
                        cols = list(filter(None, line.split()))
                        if not cols:
                            continue
                        catalog.entries[name].add_photometry(
                            time=cols[0],
                            u_time='MJD',
                            magnitude=cols[1],
                            e_magnitude=cols[2],
                            band=band,
                            system=cols[3],
                            telescope=cols[4],
                            source=source)
    catalog.journal_entries()

    # Nicholl 05-03-16
    files = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-05-03-16', '*.txt'))
    name = catalog.add_entry('SN2015bn')
    for fi in pbar(files, task_str + ': Nicholl-05-03-16'):
        if 'late' in fi:
            bc = '2016ApJ...828L..18N'
        else:
            bc = '2016ApJ...826...39N'
        source = catalog.entries[name].add_source(bibcode=bc)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, 'PS15ae', source)
        telescope = os.path.basename(fi).split('_')[1]
        with open(fi, 'r') as f:
            lines = f.read().splitlines()
            for li, line in enumerate(lines):
                if not line or (line[0] == '#' and li != 0):
                    continue
                cols = list(filter(None, line.split()))
                if not cols:
                    continue
                if li == 0:
                    bands = cols[1:]
                    continue

                mjd = cols[0]
                for ci, col in enumerate(cols[1::2]):
                    if not is_number(col) or np.isnan(float(col)):
                        continue

                    band = bands[ci]
                    band_set = ''
                    system = 'Vega'
                    if bands[ci] in ["u'", "g'", "r'", "i'", "z'"]:
                        band_set = 'SDSS'
                        system = 'SDSS'
                    elif telescope == 'ASASSN':
                        band_set = 'ASASSN'
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: mjd,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.MAGNITUDE: col,
                        PHOTOMETRY.BAND: bands[ci],
                        PHOTOMETRY.SOURCE: source,
                        PHOTOMETRY.TELESCOPE: telescope,
                        PHOTOMETRY.SYSTEM: system
                    }
                    if band_set:
                        photodict[PHOTOMETRY.BAND_SET] = band_set
                    emag = cols[2 * ci + 2]
                    if is_number(emag):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = emag
                    else:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    if telescope == 'Swift':
                        photodict[PHOTOMETRY.INSTRUMENT] = 'UVOT'
                    catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Beispiel #24
0
def do_cfa_spectra(catalog):
    """Import spectra from the CfA archive."""
    task_str = catalog.get_current_task_str()
    # II spectra
    oldname = ''
    file_names = next(
        os.walk(os.path.join(catalog.get_current_task_repo(), 'CfA_SNII')))[1]
    for ni, name in enumerate(pbar_strings(file_names, task_str)):
        fullpath = os.path.join(catalog.get_current_task_repo(),
                                'CfA_SNII/') + name
        origname = name
        if name.startswith('sn') and is_number(name[2:6]):
            name = 'SN' + name[2:]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        reference = 'CfA Supernova Archive'
        refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
        source = catalog.entries[name].add_source(
            name=reference,
            url=refurl,
            secondary=True,
            acknowledgment=ACKN_CFA)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        for fi, fname in enumerate(
                sorted(
                    glob(fullpath + '/*'), key=lambda s: s.lower())):
            filename = os.path.basename(fname)
            fileparts = filename.split('-')
            if origname.startswith('sn') and is_number(origname[2:6]):
                year = fileparts[1][:4]
                month = fileparts[1][4:6]
                day = fileparts[1][6:]
                instrument = fileparts[2].split('.')[0]
            else:
                year = fileparts[2][:4]
                month = fileparts[2][4:6]
                day = fileparts[2][6:]
                instrument = fileparts[3].split('.')[0]
            time = str(
                astrotime(year + '-' + month + '-' + str(floor(float(day)))
                          .zfill(2)).mjd + float(day) - floor(float(day)))
            f = open(fname, 'r')
            data = csv.reader(f, delimiter=' ', skipinitialspace=True)
            data = [list(i) for i in zip(*data)]
            wavelengths = data[0]
            fluxes = data[1]
            errors = data[2]
            sources = uniq_cdl([
                source,
                (catalog.entries[name]
                 .add_source(bibcode='2017arXiv170601030H'))
            ])
            catalog.entries[name].add_spectrum(
                u_wavelengths='Angstrom',
                u_fluxes='erg/s/cm^2/Angstrom',
                filename=filename,
                wavelengths=wavelengths,
                fluxes=fluxes,
                u_time='MJD' if time else '',
                time=time,
                instrument=instrument,
                u_errors='ergs/s/cm^2/Angstrom',
                errors=errors,
                source=sources,
                dereddened=False,
                deredshifted=False)
        if catalog.args.travis and ni >= catalog.TRAVIS_QUERY_LIMIT:
            break
    catalog.journal_entries()

    # Ia spectra
    oldname = ''
    file_names = next(
        os.walk(os.path.join(catalog.get_current_task_repo(), 'CfA_SNIa')))[1]
    for ni, name in enumerate(pbar_strings(file_names, task_str)):
        fullpath = os.path.join(catalog.get_current_task_repo(),
                                'CfA_SNIa/') + name
        origname = name
        if name.startswith('sn') and is_number(name[2:6]):
            name = 'SN' + name[2:]
        if name.startswith('snf') and is_number(name[3:7]):
            name = 'SNF' + name[3:]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        reference = 'CfA Supernova Archive'
        refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
        source = catalog.entries[name].add_source(
            name=reference,
            url=refurl,
            secondary=True,
            acknowledgment=ACKN_CFA)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        for fi, fname in enumerate(
                sorted(
                    glob(fullpath + '/*'), key=lambda s: s.lower())):
            filename = os.path.basename(fname)
            fileparts = filename.split('-')
            if origname.startswith('sn') and is_number(origname[2:6]):
                year = fileparts[1][:4]
                month = fileparts[1][4:6]
                day = fileparts[1][6:]
                instrument = fileparts[2].split('.')[0]
            else:
                year = fileparts[2][:4]
                month = fileparts[2][4:6]
                day = fileparts[2][6:]
                instrument = fileparts[3].split('.')[0]
            time = str(
                astrotime(year + '-' + month + '-' + str(floor(float(day)))
                          .zfill(2)).mjd + float(day) - floor(float(day)))
            f = open(fname, 'r')
            data = csv.reader(f, delimiter=' ', skipinitialspace=True)
            data = [list(i) for i in zip(*data)]
            wavelengths = data[0]
            fluxes = data[1]
            errors = data[2]
            sources = uniq_cdl([
                source, (catalog.entries[name]
                         .add_source(bibcode='2012AJ....143..126B')),
                (catalog.entries[name]
                 .add_source(bibcode='2008AJ....135.1598M'))
            ])
            catalog.entries[name].add_spectrum(
                u_wavelengths='Angstrom',
                u_fluxes='erg/s/cm^2/Angstrom',
                filename=filename,
                wavelengths=wavelengths,
                fluxes=fluxes,
                u_time='MJD' if time else '',
                time=time,
                instrument=instrument,
                u_errors='ergs/s/cm^2/Angstrom',
                errors=errors,
                source=sources,
                dereddened=False,
                deredshifted=False)
        if catalog.args.travis and ni >= catalog.TRAVIS_QUERY_LIMIT:
            break
    catalog.journal_entries()

    # Ibc spectra
    oldname = ''
    file_names = next(
        os.walk(os.path.join(catalog.get_current_task_repo(), 'CfA_SNIbc')))[1]
    for ni, name in enumerate(pbar(file_names, task_str)):
        fullpath = os.path.join(catalog.get_current_task_repo(),
                                'CfA_SNIbc/') + name
        if name.startswith('sn') and is_number(name[2:6]):
            name = 'SN' + name[2:]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        reference = 'CfA Supernova Archive'
        refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
        source = catalog.entries[name].add_source(
            name=reference,
            url=refurl,
            secondary=True,
            acknowledgment=ACKN_CFA)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        for fi, fname in enumerate(
                sorted(
                    glob(fullpath + '/*'), key=lambda s: s.lower())):
            filename = os.path.basename(fname)
            fileparts = filename.split('-')
            instrument = ''
            year = fileparts[1][:4]
            month = fileparts[1][4:6]
            day = fileparts[1][6:].split('.')[0]
            if len(fileparts) > 2:
                instrument = fileparts[-1].split('.')[0]
            time = str(
                astrotime(year + '-' + month + '-' + str(floor(float(day)))
                          .zfill(2)).mjd + float(day) - floor(float(day)))
            f = open(fname, 'r')
            data = csv.reader(f, delimiter=' ', skipinitialspace=True)
            data = [list(i) for i in zip(*data)]
            wavelengths = data[0]
            fluxes = data[1]
            sources = uniq_cdl([
                source, catalog.entries[name]
                .add_source(bibcode='2014AJ....147...99M')
            ])
            catalog.entries[name].add_spectrum(
                u_wavelengths='Angstrom',
                u_fluxes='erg/s/cm^2/Angstrom',
                wavelengths=wavelengths,
                filename=filename,
                fluxes=fluxes,
                u_time='MJD' if time else '',
                time=time,
                instrument=instrument,
                source=sources,
                dereddened=False,
                deredshifted=False)
        if catalog.args.travis and ni >= catalog.TRAVIS_QUERY_LIMIT:
            break
    catalog.journal_entries()

    # Other spectra
    oldname = ''
    file_names = next(
        os.walk(os.path.join(catalog.get_current_task_repo(), 'CfA_Extra')))[1]
    for ni, name in enumerate(pbar_strings(file_names, task_str)):
        fullpath = os.path.join(catalog.get_current_task_repo(),
                                'CfA_Extra/') + name
        if name.startswith('sn') and is_number(name[2:6]):
            name = 'SN' + name[2:]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        reference = 'CfA Supernova Archive'
        refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
        source = catalog.entries[name].add_source(
            name=reference,
            url=refurl,
            secondary=True,
            acknowledgment=ACKN_CFA)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        for fi, fname in enumerate(
                sorted(
                    glob(fullpath + '/*'), key=lambda s: s.lower())):
            if not os.path.isfile(fname):
                continue
            filename = os.path.basename(fname)
            if ((not filename.startswith('sn') or
                 not filename.endswith('flm') or any(
                     x in filename
                     for x in ['-interp', '-z', '-dered', '-obj', '-gal']))):
                continue
            fileparts = filename.split('.')[0].split('-')
            instrument = ''
            time = ''
            if len(fileparts) > 1:
                year = fileparts[1][:4]
                month = fileparts[1][4:6]
                day = fileparts[1][6:]
                if is_number(year) and is_number(month) and is_number(day):
                    if len(fileparts) > 2:
                        instrument = fileparts[-1]
                    time = str(
                        astrotime(year + '-' + month + '-' + str(
                            floor(float(day))).zfill(2)).mjd + float(day) -
                        floor(float(day)))
            f = open(fname, 'r')
            data = csv.reader(f, delimiter=' ', skipinitialspace=True)
            data = [list(i) for i in zip(*data)]
            wavelengths = data[0]
            fluxes = [str(Decimal(x) * Decimal(1.0e-15)) for x in data[1]]
            catalog.entries[name].add_spectrum(
                u_wavelengths='Angstrom',
                u_fluxes='erg/s/cm^2/Angstrom',
                wavelengths=wavelengths,
                filename=filename,
                fluxes=fluxes,
                u_time='MJD' if time else '',
                time=time,
                instrument=instrument,
                source=source,
                dereddened=False,
                deredshifted=False)
        if catalog.args.travis and ni >= catalog.TRAVIS_QUERY_LIMIT:
            break

    catalog.journal_entries()
    return
Beispiel #25
0
def do_test(catalog):
    log = catalog.log
    log.info("do_test()")
    task_str = catalog.get_current_task_str()
    log.info("`task_str`: '{}'".format(task_str))

    if len(catalog.entries) != 0:
        raise RuntimeError("Run test only with empty catalog.")

    # Test URL retrieve functions
    # ---------------------------
    log.info("`args.archived` = '{}', `current_task.archived` = '{}'".format(
        catalog.args.archived, catalog.current_task.archived))

    test_load_url(catalog)

    # Test repo path functions
    # ------------------------
    paths = catalog.PATHS.get_all_repo_folders()
    for path in tq(paths, currenttask='Test tq progress bar.'):
        tprint('Test tprint.')
        log.debug(path)
    paths = catalog.PATHS.get_repo_input_folders()
    for path in pbar_strings(paths, desc='Test pbar_strings progress bar.'):
        log.debug(path)
    boneyard = catalog.PATHS.get_repo_boneyard()
    log.debug(boneyard)

    # Create a Fake Entry, with some Fake Data
    # ----------------------------------------
    _first_event_first_source(catalog)

    log_str = "ADDING SECOND SOURCE"
    log.info("\n\n{}\n{}\n{}\n\n".format("=" * 100, log_str, "=" * 100))

    # Add new Data, from different source, to same fake entry
    # -------------------------------------------------------
    _first_event_second_source(catalog)

    # Make sure output file for this test exists
    outdir, filename = catalog.entries[FAKE_ALIAS_1]._get_save_path()
    save_name = os.path.join(outdir, filename + '.json')
    if not os.path.exists(save_name):
        raise RuntimeError("File not found in '{}'".format(save_name))
    # Delete created test file
    catalog._delete_entry_file(entry_name=FAKE_ALIAS_1)
    # Make sure it was deleted
    if os.path.exists(save_name):
        raise RuntimeError("File not deleted at '{}'".format(save_name))

    # Delete entry in catalog
    del catalog.entries[FAKE_ALIAS_1]
    # Make sure entry was deleted
    if len(catalog.entries) != 0:
        raise RuntimeError("Error deleting test entry!")

    # Add entry back catalog to test later tasks
    _first_event_first_source(catalog)
    _first_event_second_source(catalog)

    # Test some utility functions
    log.debug("Preferred name for 2nd source: " +
              catalog.get_preferred_name(FAKE_ALIAS_2))
    log.debug("Entry exists? " +
              str(catalog.entry_exists(FAKE_ALIAS_2)))
    log.debug("Entry text: " + catalog.entries[FAKE_ALIAS_1].get_entry_text(
        os.path.join(outdir, filename + '.json')))

    # Third source is a duplicate that will be merged
    _first_event_third_source(catalog)

    # Add second event to perform different tests
    _second_event(catalog)

    # Delete name to test name re-addition in sanitize
    for ii, alias in enumerate(
            catalog.entries[FAKE_ALIAS_5][ENTRY.ALIAS].copy()):
        if alias[QUANTITY.VALUE] == FAKE_ALIAS_5:
            del catalog.entries[FAKE_ALIAS_1][ENTRY.ALIAS][ii]
            break

    return
Beispiel #26
0
def do_sdss_photo(catalog):
    task_str = catalog.get_current_task_str()
    # Load up metadata first
    with open(os.path.join(catalog.get_current_task_repo(),
                           'SDSS/sdsssn_master.dat2'), 'r') as f:
        rows = list(csv.reader(f.read().splitlines()[1:], delimiter=' '))
        ignored_cids = []
        columns = {
            SUPERNOVA.RA: 1,
            SUPERNOVA.DEC: 2,
            SUPERNOVA.ALIAS: 4,
            SUPERNOVA.CLAIMED_TYPE: 5,
            SUPERNOVA.REDSHIFT: 11,
            SUPERNOVA.MAX_DATE: 21,
            SUPERNOVA.HOST_RA: 99,
            SUPERNOVA.HOST_DEC: 100
        }
        colnums = {v: k for k, v in columns.items()}

        rows = [[x.replace('\\N', '') for x in y] for y in rows]

        co = [[x[0], x[99], x[100]] for x in rows if x[99] and x[100]]
        coo = coord([x[1] for x in co], [x[2] for x in co], unit="deg")
        coo = [''.join([y[:9] for y in x.split()]) for x in
               coo.to_string('hmsdms', sep='')]
        hostdict = dict(zip([x[0] for x in co],
                            ['SDSS J' + x[1:] for x in coo]))

        for ri, row in enumerate(pbar(rows, task_str + ": metadata")):
            name = ''

            # Check if type is non-SNe first
            ct = row[columns[SUPERNOVA.CLAIMED_TYPE]]
            al = row[columns[SUPERNOVA.ALIAS]]
            if ct in ['AGN', 'Variable'] and not al:
                catalog.log.info('`{}` is not a SN, not '
                                 'adding.'.format(row[0]))
                ignored_cids.append(row[0])
                continue

            # Add entry
            (name, source) = catalog.new_entry(
                'SDSS-II SN ' + row[0], bibcode='2014arXiv1401.3317S',
                url='http://data.sdss3.org/sas/dr10/boss/papers/supernova/')

            # Add host name
            if row[0] in hostdict:
                catalog.entries[name].add_quantity(SUPERNOVA.HOST,
                                                   hostdict[row[0]], source)

            # Add other metadata
            for cn in colnums:
                key = colnums[cn]
                if not key:
                    continue
                ic = int(cn)
                val = row[ic]
                if not val:
                    continue
                kwargs = {}
                if key == SUPERNOVA.ALIAS:
                    val = 'SN' + val
                elif key in [SUPERNOVA.RA, SUPERNOVA.DEC, SUPERNOVA.HOST_RA,
                             SUPERNOVA.HOST_DEC]:
                    kwargs = {QUANTITY.U_VALUE: 'floatdegrees'}
                    if key in [SUPERNOVA.RA, SUPERNOVA.HOST_RA]:
                        fval = float(val)
                        if fval < 0.0:
                            val = str(Decimal(360) + Decimal(fval))
                elif key == SUPERNOVA.CLAIMED_TYPE:
                    val = val.lstrip('pz').replace('SN', '')
                elif key == SUPERNOVA.REDSHIFT:
                    kwargs[QUANTITY.KIND] = 'spectroscopic'
                    if float(row[ic + 1]) > 0.0:
                        kwargs[QUANTITY.E_VALUE] = row[ic + 1]
                elif key == SUPERNOVA.MAX_DATE:
                    dt = astrotime(float(val), format='mjd').datetime
                    val = make_date_string(dt.year, dt.month, dt.day)
                catalog.entries[name].add_quantity(key, val, source, **kwargs)

    with open(os.path.join(catalog.get_current_task_repo(),
                           'SDSS/2010ApJ...708..661D.txt'), 'r') as sdss_file:
        bibcodes2010 = sdss_file.read().split('\n')
    sdssbands = ['u', 'g', 'r', 'i', 'z']
    file_names = (list(glob(os.path.join(catalog
                                         .get_current_task_repo(),
                                         'SDSS/sum/*.sum'))) +
                  list(glob(os.path.join(catalog
                                         .get_current_task_repo(),
                                         'SDSS/SMP_Data/*.dat'))))
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        tsvin = csv.reader(open(fname, 'r'), delimiter=' ',
                           skipinitialspace=True)
        basename = os.path.basename(fname)
        hasred = True
        rst = 19
        if '.dat' in fname:
            bibcode = '2014arXiv1401.3317S'
            hasred = False
            rst = 4
        elif basename in bibcodes2010:
            bibcode = '2010ApJ...708..661D'
        else:
            bibcode = '2008AJ....136.2306H'

        skip_entry = False
        for rr, row in enumerate(tsvin):
            if skip_entry:
                break
            if rr == 0:
                # Ignore non-SNe objects and those not in metadata table above
                if row[3] in ignored_cids:
                    skip_entry = True
                    continue
                # Ignore IAU names from Sako 2014 as they are unreliable
                if row[5] == 'RA:' or bibcode == '2014arXiv1401.3317S':
                    name = 'SDSS-II SN ' + row[3]
                else:
                    name = 'SN' + row[5]
                name = catalog.add_entry(name)
                source = catalog.entries[name].add_source(bibcode=bibcode)
                catalog.entries[name].add_quantity(
                    SUPERNOVA.ALIAS, name, source)
                catalog.entries[name].add_quantity(
                    SUPERNOVA.ALIAS, 'SDSS-II SN ' + row[3], source)

                if row[5] != 'RA:' and bibcode == '2014arXiv1401.3317S':
                    year = re.findall(r'\d+', name)[0]
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.DISCOVER_DATE, year, source)

                catalog.entries[name].add_quantity(
                    SUPERNOVA.RA, row[-4], source, u_value='floatdegrees')
                catalog.entries[name].add_quantity(
                    SUPERNOVA.DEC, row[-2], source, u_value='floatdegrees')
            if hasred and rr == 1:
                error = row[4] if float(row[4]) >= 0.0 else ''
                (catalog.entries[name]
                 .add_quantity(SUPERNOVA.REDSHIFT, row[2], source,
                               e_value=error,
                               kind='heliocentric'))
            if rr >= rst:
                # Skip bad measurements
                if int(row[0]) > 1024:
                    continue

                mjd = row[1]
                band = sdssbands[int(row[2])]
                magnitude = row[3]
                e_mag = row[4]
                telescope = 'SDSS'
                (catalog.entries[name]
                 .add_photometry(time=mjd, telescope=telescope,
                                 band=band, magnitude=magnitude,
                                 e_magnitude=e_mag, source=source,
                                 system='SDSS'))
        if not fi % 1000:
            catalog.journal_entries()

    catalog.journal_entries()
    return
Beispiel #27
0
def do_suspect_photo(catalog):
    task_str = catalog.get_current_task_str()
    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'suspectreferences.csv'), 'r') as f:
        tsvin = csv.reader(f, delimiter=',', skipinitialspace=True)
        suspectrefdict = {}
        for row in tsvin:
            suspectrefdict[row[0]] = row[1]

    file_names = list(
        sorted(
            glob(
                os.path.join(catalog.get_current_task_repo(),
                             'SUSPECT/*.html'))))
    for datafile in pbar_strings(file_names, task_str):
        basename = os.path.basename(datafile)
        basesplit = basename.split('-')
        oldname = basesplit[1]
        name = catalog.add_entry(oldname)
        if name.startswith('SN') and is_number(name[2:]):
            name = name + 'A'
        band = basesplit[3].split('.')[0]
        ei = int(basesplit[2])
        bandlink = 'file://' + os.path.abspath(datafile)
        bandresp = urllib.request.urlopen(bandlink)
        bandsoup = BeautifulSoup(bandresp, 'html5lib')
        bandtable = bandsoup.find('table')

        names = bandsoup.body.findAll(text=re.compile('Name'))
        reference = ''
        for link in bandsoup.body.findAll('a'):
            if 'adsabs' in link['href']:
                reference = str(link).replace('"', "'")

        bibcode = unescape(suspectrefdict[reference])
        source = catalog.entries[name].add_source(bibcode=bibcode)

        sec_ref = 'SUSPECT'
        sec_refurl = 'https://www.nhn.ou.edu/~suspect/'
        sec_source = catalog.entries[name].add_source(name=sec_ref,
                                                      url=sec_refurl,
                                                      secondary=True)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, oldname,
                                           sec_source)

        if ei == 1:
            year = re.findall(r'\d+', name)[0]
            catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
                                               sec_source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST,
                                               names[1].split(':')[1].strip(),
                                               sec_source)

            redshifts = bandsoup.body.findAll(text=re.compile('Redshift'))
            if redshifts:
                catalog.entries[name].add_quantity(
                    SUPERNOVA.REDSHIFT,
                    redshifts[0].split(':')[1].strip(),
                    sec_source,
                    kind='heliocentric')
            # hvels = bandsoup.body.findAll(text=re.compile('Heliocentric
            # Velocity'))
            # if hvels:
            #     vel = hvels[0].split(':')[1].strip().split(' ')[0]
            #     catalog.entries[name].add_quantity(SUPERNOVA.VELOCITY, vel,
            # sec_source,
            # kind='heliocentric')
            types = bandsoup.body.findAll(text=re.compile('Type'))

            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE,
                types[0].split(':')[1].strip().split(' ')[0], sec_source)

        for r, row in enumerate(bandtable.findAll('tr')):
            if r == 0:
                continue
            col = row.findAll('td')
            mjd = str(jd_to_mjd(Decimal(col[0].contents[0])))
            mag = col[3].contents[0]
            if mag.isspace():
                mag = ''
            else:
                mag = str(mag)
            e_magnitude = col[4].contents[0]
            if e_magnitude.isspace():
                e_magnitude = ''
            else:
                e_magnitude = str(e_magnitude)
            catalog.entries[name].add_photometry(time=mjd,
                                                 u_time='MJD',
                                                 band=band,
                                                 magnitude=mag,
                                                 e_magnitude=e_magnitude,
                                                 source=sec_source + ',' +
                                                 source)

    catalog.journal_entries()
    return
Beispiel #28
0
def do_test(catalog):
    log = catalog.log
    log.info("do_test()")
    task_str = catalog.get_current_task_str()
    log.info("`task_str`: '{}'".format(task_str))

    if len(catalog.entries) != 0:
        raise RuntimeError("Run test only with empty catalog.")

    # Test URL retrieve functions
    # ---------------------------
    log.info("`args.archived` = '{}', `current_task.archived` = '{}'".format(
        catalog.args.archived, catalog.current_task.archived))

    test_load_url(catalog)

    # Test repo path functions
    # ------------------------
    paths = catalog.PATHS.get_all_repo_folders()
    for path in tq(paths, currenttask='Test tq progress bar.'):
        tprint('Test tprint.')
        log.debug(path)
    paths = catalog.PATHS.get_repo_input_folders()
    for path in pbar_strings(paths, desc='Test pbar_strings progress bar.'):
        log.debug(path)
    boneyard = catalog.PATHS.get_repo_boneyard()
    log.debug(boneyard)

    # Create a Fake Entry, with some Fake Data
    # ----------------------------------------
    _first_event_first_source(catalog)

    log_str = "ADDING SECOND SOURCE"
    log.info("\n\n{}\n{}\n{}\n\n".format("=" * 100, log_str, "=" * 100))

    # Add new Data, from different source, to same fake entry
    # -------------------------------------------------------
    _first_event_second_source(catalog)

    # Make sure output file for this test exists
    outdir, filename = catalog.entries[FAKE_ALIAS_1]._get_save_path()
    save_name = os.path.join(outdir, filename + '.json')
    if not os.path.exists(save_name):
        raise RuntimeError("File not found in '{}'".format(save_name))
    # Delete created test file
    catalog._delete_entry_file(entry_name=FAKE_ALIAS_1)
    # Make sure it was deleted
    if os.path.exists(save_name):
        raise RuntimeError("File not deleted at '{}'".format(save_name))

    # Delete entry in catalog
    del catalog.entries[FAKE_ALIAS_1]
    # Make sure entry was deleted
    if len(catalog.entries) != 0:
        raise RuntimeError("Error deleting test entry!")

    # Add entry back catalog to test later tasks
    _first_event_first_source(catalog)
    _first_event_second_source(catalog)

    # Test some utility functions
    log.debug("Preferred name for 2nd source: " +
              catalog.get_preferred_name(FAKE_ALIAS_2))
    log.debug("Entry exists? " + str(catalog.entry_exists(FAKE_ALIAS_2)))
    log.debug("Entry text: " + catalog.entries[FAKE_ALIAS_1].get_entry_text(
        os.path.join(outdir, filename + '.json')))

    # Third source is a duplicate that will be merged
    _first_event_third_source(catalog)

    # Add second event to perform different tests
    _second_event(catalog)

    # Delete name to test name re-addition in sanitize
    for ii, alias in enumerate(
            catalog.entries[FAKE_ALIAS_5][ENTRY.ALIAS].copy()):
        if alias[QUANTITY.VALUE] == FAKE_ALIAS_5:
            del catalog.entries[FAKE_ALIAS_1][ENTRY.ALIAS][ii]
            break

    return
Beispiel #29
0
def do_wiserep_spectra(catalog):
    #if not catalog.args.travis:
    #    from ..input.WISeWEBSpider.wisewebspider import spider
    #    try:
    #        spider(update=True, daysago=7, path="/../../sne-external-WISEREP/")
    #    except:
    #        catalog.log.warning(
    #            'Spider errored, continuing without letting it complete.')

    task_str = catalog.get_current_task_str()
    secondaryreference = 'WISeREP'
    secondaryrefurl = 'http://wiserep.weizmann.ac.il/'
    secondarybibcode = '2012PASP..124..668Y'
    wiserepcnt = 0

    # These are known to be in error on the WISeREP page, either fix or ignore
    # them.
    wiserepbibcorrectdict = {
        '2000AJ....120..367G]': '2000AJ....120..367G',
        'Harutyunyan et al. 2008': '2008A&A...488..383H',
        '0609268': '2007AJ....133...58K',
        '2006ApJ...636...400Q': '2006ApJ...636..400Q',
        '2011ApJ...741...76': '2011ApJ...741...76C',
        '2016PASP...128...961': '2016PASP..128...961',
        '2002AJ....1124..417H': '2002AJ....1124.417H',
        '2013ApJ…774…58D': '2013ApJ...774...58D',
        '2011Sci.333..856S': '2011Sci...333..856S',
        '2014MNRAS.438,368': '2014MNRAS.438..368T',
        '2012MNRAS.420.1135': '2012MNRAS.420.1135S',
        '2012Sci..337..942D': '2012Sci...337..942D',
        'stt1839': '2013MNRAS.436.3614S',
        'arXiv:1605.03136': '2016MNRAS.460.3447T',
        '10.1093/mnras/stt1839': '2013MNRAS.436.3614S'
    }

    file_names = list(glob(os.path.join(catalog.get_current_task_repo(), '*')))
    for folder in pbar_strings(file_names, task_str):
        if '.txt' in folder or '.json' in folder:
            continue
        name = os.path.basename(folder).strip()
        if name.startswith('sn'):
            name = 'SN' + name[2:]
        if (name.startswith(('CSS', 'SSS', 'MLS')) and ':' not in name):
            name = name.replace('-', ':', 1)
        if name.startswith('MASTERJ'):
            name = name.replace('MASTERJ', 'MASTER OT J')
        if name.startswith('PSNJ'):
            name = name.replace('PSNJ', 'PSN J')
        name = catalog.add_entry(name)

        secondarysource = catalog.entries[name].add_source(
            name=secondaryreference,
            url=secondaryrefurl,
            bibcode=secondarybibcode,
            secondary=True)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name,
                                           secondarysource)

        readme_path = os.path.join(folder, 'README.json')
        if not os.path.exists(readme_path):
            catalog.log.warning(
                'Metadata file not found for event "{}"'.format(name))
            continue

        with open(readme_path, 'r') as f:
            fileinfo = json.loads(f.read())

        files = list(
            set(glob(folder + '/*')) - set(glob(folder + '/README.json')))
        for fname in pbar(files, task_str):
            specfile = os.path.basename(fname)
            if specfile not in fileinfo:
                catalog.log.warning(
                    'Metadata not found for "{}"'.format(fname))
                continue
            claimedtype = fileinfo[specfile]["Type"]
            instrument = fileinfo[specfile]["Instrument"]
            epoch = fileinfo[specfile]["Obs. Date"]
            observer = fileinfo[specfile]["Observer"]
            reducer = fileinfo[specfile]["Reducer"]
            bibcode = fileinfo[specfile]["Bibcode"]
            redshift = fileinfo[specfile]["Redshift"]
            survey = fileinfo[specfile]["Program"]
            reduction = fileinfo[specfile]["Reduction Status"]

            if bibcode:
                newbibcode = bibcode
                if bibcode in wiserepbibcorrectdict:
                    newbibcode = wiserepbibcorrectdict[bibcode]
                if newbibcode and len(newbibcode) == 19:
                    source = catalog.entries[name].add_source(
                        bibcode=unescape(newbibcode))
                else:
                    bibname = unescape(bibcode)
                    source = catalog.entries[name].add_source(name=bibname)
                    catalog.log.warning('Bibcode "{}" is invalid, using as '
                                        '`{}` instead'.format(
                                            bibname, SOURCE.NAME))
                sources = uniq_cdl([source, secondarysource])
            else:
                sources = secondarysource

            if claimedtype not in ['Other']:
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   claimedtype,
                                                   secondarysource)
            catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT, redshift,
                                               secondarysource)

            with open(fname, 'r') as f:
                data = [x.split() for x in f]

                skipspec = False
                newdata = []
                oldval = ''
                for row in data:
                    if row and '#' not in row[0]:
                        if (len(row) >= 2 and is_number(row[0])
                                and is_number(row[1]) and row[1] != oldval):
                            newdata.append(row)
                            oldval = row[1]

                if skipspec or not newdata:
                    warnings.warn('Skipped adding spectrum file ' + specfile)
                    continue

                data = [list(i) for i in zip(*newdata)]
                wavelengths = data[0]
                fluxes = data[1]
                errors = ''
                if len(data) == 3:
                    errors = data[1]
                time = str(astrotime(epoch).mjd)

                if max([float(x) for x in fluxes]) < 1.0e-5:
                    fluxunit = 'erg/s/cm^2/Angstrom'
                else:
                    fluxunit = 'Uncalibrated'

                catalog.entries[name].add_spectrum(
                    u_wavelengths='Angstrom',
                    errors=errors,
                    u_fluxes=fluxunit,
                    u_errors=fluxunit if errors else '',
                    wavelengths=wavelengths,
                    fluxes=fluxes,
                    u_time='MJD',
                    time=time,
                    instrument=instrument,
                    source=sources,
                    observer=observer,
                    reducer=reducer,
                    reduction=reduction,
                    filename=specfile,
                    survey=survey,
                    redshift=redshift)

        catalog.journal_entries()

        wiserepcnt = wiserepcnt + 1
        if (catalog.args.travis
                and wiserepcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
            break

    return
Beispiel #30
0
def do_donated_photo(catalog):
    """Import donated photometry."""
    task_str = catalog.get_current_task_str()

    # Private donations here #
    if not catalog.args.travis:
        pass
    # End private donations #

    # Ponder 05-12-17 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Ponder-05-12-17', 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Ponder-05-12-17', '*.dat'))
    for path in file_names:
        with open(path, 'r') as f:
            tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        oname = path.split('/')[-1].split('.')[0]
        name, source = catalog.new_entry(
            oname, bibcode=metadict[oname]['bibcode'])
        for row in pbar(tsvin, task_str + ': Ponder ' + oname):
            if row[0][0] == '#' or not is_number(row[-1]):
                continue
            mjd = row[1]
            bandinst = row[2].split('_')
            band = bandinst[0]
            inst = ''
            if len(bandinst) > 1:
                inst = bandinst[1]
            mag = row[3]
            uerr = row[4]
            lerr = row[5]
            photodict = {
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.BAND: band,
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.E_LOWER_MAGNITUDE: lerr,
                PHOTOMETRY.E_UPPER_MAGNITUDE: uerr,
                PHOTOMETRY.SOURCE: source
            }
            if inst:
                photodict[PHOTOMETRY.INSTRUMENT] = inst
            catalog.entries[name].add_photometry(**photodict)

    # Benetti 03-08-17 donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Benetti-03-08-17', '1999E.dat')
    with open(path, 'r') as f:
        tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        name, source = catalog.new_entry(
            'SN1999E', bibcode='2003MNRAS.340..191R')
        bands = None
        for row in tsvin:
            if not row or row[0][0] == '#':
                continue
            if not bands:
                bands = row[2:-2]
                continue
            mjd = row[1]
            tel = row[-1] if 'IAUC' not in row[-1] else None
            for bi, band in enumerate(bands):
                mag = row[2 + 2 * bi]
                if mag == '9999':
                    continue
                err = row[2 + 2 * bi + 1]
                limit = row[6] == 'True'
                photodict = {
                    PHOTOMETRY.TIME: mjd,
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.TELESCOPE: tel,
                    PHOTOMETRY.BAND: band,
                    PHOTOMETRY.MAGNITUDE: mag,
                    PHOTOMETRY.SOURCE: source
                }
                if err != '.00':
                    photodict[PHOTOMETRY.E_MAGNITUDE] = str(Decimal(err))
                if tel:
                    photodict[PHOTOMETRY.TELESCOPE] = tel
                catalog.entries[name].add_photometry(**photodict)

    # Nicholl 01-29-17 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Nicholl-01-29-17', 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-01-29-17', '*.txt'))
    for path in file_names:
        data = read(path, format='cds')
        oname = path.split('/')[-1].split('_')[0]
        name, source = catalog.new_entry(
            oname, bibcode=metadict[oname]['bibcode'])
        for row in pbar(data, task_str + ': Nicholl ' + oname):
            photodict = {
                PHOTOMETRY.TIME: str(row['MJD']),
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: str(row['mag']),
                PHOTOMETRY.BAND: row['Filter'],
                PHOTOMETRY.SOURCE: source
            }
            if 'system' in metadict[oname]:
                photodict[PHOTOMETRY.SYSTEM] = metadict[oname]['system']
            if 'l_mag' in row.columns and row['l_mag'] == '>':
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            elif 'e_mag' in row.columns:
                photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['e_mag'])
            if 'Telescope' in row.columns:
                photodict[PHOTOMETRY.TELESCOPE] = row['Telescope']
            catalog.entries[name].add_photometry(**photodict)

    # Arcavi 2016gkg donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Arcavi-01-24-17', 'SN2016gkg.txt')
    with open(path, 'r') as f:
        tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        name, source = catalog.new_entry(
            'SN2016gkg', bibcode='2016arXiv161106451A')
        for row in tsvin:
            if row[0][0] == '#':
                continue
            mjd = str(jd_to_mjd(Decimal(row[0])))
            tel = row[1]
            band = row[3]
            mag = row[4]
            err = row[5]
            limit = row[6] == 'True'
            photodict = {
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.TELESCOPE: tel,
                PHOTOMETRY.BAND: band,
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.SOURCE: source
            }
            if limit:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            else:
                photodict[PHOTOMETRY.E_MAGNITUDE] = err
            catalog.entries[name].add_photometry(**photodict)

    # Nicholl Gaia16apd donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Nicholl-01-20-17', 'gaia16apd_phot.txt')

    data = read(path, format='cds')
    name, source = catalog.new_entry(
        'Gaia16apd', bibcode='2017ApJ...835L...8N')
    for row in pbar(data, task_str + ': Nicholl Gaia16apd'):
        photodict = {
            PHOTOMETRY.TIME: str(row['MJD']),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.MAGNITUDE: str(row['mag']),
            PHOTOMETRY.BAND: row['Filter'],
            PHOTOMETRY.TELESCOPE: row['Telescope'],
            PHOTOMETRY.SOURCE: source
        }
        if row['l_mag'] == '>':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        else:
            photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['e_mag'])
        catalog.entries[name].add_photometry(**photodict)

    # Kuncarayakti-01-09-17
    datafile = os.path.join(catalog.get_current_task_repo(), 'Donations',
                            'Kuncarayakti-01-09-17', 'SN1978K.dat')
    inpname = os.path.basename(datafile).split('.')[0]
    with open(datafile, 'r') as f:
        tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
        host = False
        for ri, row in enumerate(tsvin):
            if ri == 0:
                continue
            if row[0][0] == '#':
                rsplit = [x.strip('# ') for x in ' '.join(row).split(',')]
                bc = rsplit[0]
                tel, ins = '', ''
                if len(rsplit) > 1:
                    tel = rsplit[1]
                if len(rsplit) > 2:
                    ins = rsplit[2]
                continue
            (name, source) = catalog.new_entry(inpname, bibcode=bc)
            mag = row[4]
            err = row[5]
            mjd = str(astrotime('-'.join(row[:3]), format='iso').mjd)
            photodict = {
                PHOTOMETRY.BAND: row[3],
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: mag.strip('>s'),
                PHOTOMETRY.SOURCE: source
            }
            if is_number(err):
                photodict[PHOTOMETRY.E_MAGNITUDE] = err
            if tel:
                photodict[PHOTOMETRY.TELESCOPE] = tel
            if ins:
                photodict[PHOTOMETRY.INSTRUMENT] = ins
            if '>' in mag:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            if 's' in mag:
                photodict[PHOTOMETRY.SYNTHETIC] = True
            catalog.entries[name].add_photometry(**photodict)

    # Nugent 01-09-17 donation
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nugent-01-09-17', '*.dat'))
    for datafile in pbar_strings(file_names, task_str + ': Nugent-01-09-17'):
        inpname = os.path.basename(datafile).split('.')[0]
        (name, source) = catalog.new_entry(
            inpname, bibcode='2006ApJ...645..841N')
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
            host = False
            for urow in tsvin:
                row = list(filter(None, urow))
                counts = row[2]
                e_counts = row[3]
                zp = row[4]
                photodict = {
                    PHOTOMETRY.BAND: row[1],
                    PHOTOMETRY.TIME: row[0],
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.COUNT_RATE: counts,
                    PHOTOMETRY.E_COUNT_RATE: e_counts,
                    PHOTOMETRY.ZERO_POINT: zp,
                    PHOTOMETRY.TELESCOPE: 'CFHT',
                    PHOTOMETRY.SURVEY: 'SNLS',
                    PHOTOMETRY.SOURCE: source
                }
                set_pd_mag_from_counts(photodict, counts, ec=e_counts, zp=zp,
                                       sig=5.0)
                catalog.entries[name].add_photometry(**photodict)

    # Inserra 09-04-16 donation
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Inserra-09-04-16', '*.txt'))
    for datafile in pbar_strings(file_names, task_str + ': Inserra-09-04-16'):
        inpname = os.path.basename(datafile).split('.')[0]
        (name, source) = catalog.new_entry(
            inpname, bibcode='2013ApJ...770..128I')
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
            host = False
            for row in tsvin:
                if row[0][0] == '#':
                    if row[0] == '#Host':
                        host = True
                        continue
                    host = False
                    bands = row[3:-1]
                    continue
                for bi, ba in enumerate(bands):
                    mag = row[5 + 2 * bi]
                    if not is_number(mag):
                        continue
                    system = 'AB'
                    if ba in ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K']:
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: row[3],
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.BAND: ba,
                        PHOTOMETRY.MAGNITUDE: mag.strip('< '),
                        PHOTOMETRY.SOURCE: source,
                        PHOTOMETRY.SYSTEM: system
                    }
                    if 'ATel' not in row[-1]:
                        photodict[PHOTOMETRY.TELESCOPE] = row[-1]
                    if host:
                        photodict[PHOTOMETRY.HOST] = True
                    if '<' in mag:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    e_mag = row[5 + 2 * bi + 1].strip('() ')
                    if is_number(e_mag):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = e_mag
                    catalog.entries[name].add_photometry(**photodict)

    # Nicholl 04-01-16 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Nicholl-04-01-16', 'bibcodes.json'), 'r') as f:
        bcs = json.loads(f.read())

    kcorrected = ['SN2011ke', 'SN2011kf', 'SN2012il', 'PTF10hgi', 'PTF11rks']
    ignorephoto = ['PTF10hgi', 'PTF11rks', 'SN2011ke', 'SN2011kf', 'SN2012il']

    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-04-01-16/*.txt'))
    for datafile in pbar_strings(file_names, task_str + ': Nicholl-04-01-16'):
        inpname = os.path.basename(datafile).split('_')[0]
        isk = inpname in kcorrected
        name = catalog.add_entry(inpname)
        bibcode = ''
        for bc in bcs:
            if inpname in bcs[bc]:
                bibcode = bc
        if not bibcode:
            raise ValueError('Bibcode not found!')
        source = catalog.entries[name].add_source(bibcode=bibcode)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, inpname, source)
        if inpname in ignorephoto:
            continue
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
            rtelescope = ''
            for r, rrow in enumerate(tsvin):
                row = list(filter(None, rrow))
                if not row:
                    continue
                if row[0] == '#MJD':
                    bands = [x for x in row[1:] if x and 'err' not in x]
                elif row[0][0] == '#' and len(row[0]) > 1:
                    rtelescope = row[0][1:]
                if row[0][0] == '#':
                    continue
                mjd = row[0]
                if not is_number(mjd):
                    continue
                for v, val in enumerate(row[1::2]):
                    upperlimit = ''
                    mag = val.strip('>')
                    emag = row[2 * v + 2]
                    if '>' in val or (is_number(emag) and float(emag) == 0.0):
                        upperlimit = True
                    if (not is_number(mag) or isnan(float(mag)) or
                            float(mag) > 90.0):
                        continue
                    band = bands[v]
                    instrument = ''
                    survey = ''
                    system = ''
                    telescope = rtelescope
                    if telescope == 'LSQ':
                        instrument = 'QUEST'
                    elif telescope == 'PS1':
                        instrument = 'GPC'
                    elif telescope == 'NTT':
                        instrument = 'EFOSC'
                    elif telescope == 'GROND':
                        instrument = 'GROND'
                        telescope = 'MPI/ESO 2.2m'
                    else:
                        if band == 'NUV':
                            instrument = 'GALEX'
                            telescope = 'GALEX'
                        elif band in ['u', 'g', 'r', 'i', 'z']:
                            if inpname.startswith('PS1'):
                                instrument = 'GPC'
                                telescope = 'PS1'
                                survey = 'Pan-STARRS'
                            elif inpname.startswith('PTF'):
                                telescope = 'P60'
                                survey = 'PTF'
                        elif band.upper() in ['UVW2', 'UVW1', 'UVM2']:
                            instrument = 'UVOT'
                            telescope = 'Swift'
                            if inpname in ['PTF12dam']:
                                system = 'AB'
                    if inpname in ['SCP-06F6']:
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: mjd,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.BAND: band,
                        PHOTOMETRY.MAGNITUDE: mag,
                        PHOTOMETRY.UPPER_LIMIT: upperlimit,
                        PHOTOMETRY.SOURCE: source
                    }
                    if instrument:
                        photodict[PHOTOMETRY.INSTRUMENT] = instrument
                    if telescope:
                        photodict[PHOTOMETRY.TELESCOPE] = telescope
                    if survey:
                        photodict[PHOTOMETRY.SURVEY] = survey
                    if system:
                        photodict[PHOTOMETRY.SYSTEM] = system
                    if (is_number(emag) and
                            not isnan(float(emag)) and float(emag) > 0.0):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = emag
                    if isk:
                        photodict[PHOTOMETRY.KCORRECTED] = True
                    catalog.entries[name].add_photometry(**photodict)
    catalog.journal_entries()

    # Maggi 04-11-16 donation (MC SNRs)
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Maggi-04-11-16', 'LMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str + ': Maggi-04-11-16/LMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            ra = row[2]
            dec = row[3]
            source = (catalog.entries[name]
                      .add_source(bibcode='2016A&A...585A.162M'))
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS,
                'LMCSNR J' + rep_chars(ra, ' :.') + rep_chars(dec, ' :.'),
                source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            if row[1] != 'noname':
                catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1],
                                                   source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'LMC', source)
            if row[4] == '1':
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   'Ia', source)
            elif row[4] == '2':
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   'CC', source)
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Maggi-04-11-16', 'SMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str + ': Maggi-04-11-16/SMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            source = catalog.entries[name].add_source(name='Pierre Maggi')
            ra = row[3]
            dec = row[4]
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, 'SMCSNR J' + ra.replace(
                    ':', '')[:6] + dec.replace(':', '')[:7], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[4], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'SMC', source)
    catalog.journal_entries()

    # Galbany 04-18-16 donation
    folders = next(
        os.walk(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/')))[1]
    bibcode = '2016AJ....151...33G'
    for folder in folders:
        infofiles = glob(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/') + folder + '/*.info')
        photfiles = glob(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/') + folder + '/*.out*')

        zhel = ''
        zcmb = ''
        zerr = ''
        for path in infofiles:
            with open(path, 'r') as f:
                lines = f.read().splitlines()
                for line in lines:
                    splitline = line.split(':')
                    field = splitline[0].strip().lower()
                    value = splitline[1].strip()
                    if field == 'name':
                        name = value[:6].upper()
                        name += (value[6].upper()
                                 if len(value) == 7 else value[6:])
                        name = catalog.add_entry(name)
                        source = (catalog.entries[name]
                                  .add_source(bibcode=bibcode))
                        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,
                                                           name, source)
                    elif field == 'type':
                        claimedtype = value.replace('SN', '')
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.CLAIMED_TYPE, claimedtype, source)
                    elif field == 'zhel':
                        zhel = value
                    elif field == 'redshift_error':
                        zerr = value
                    elif field == 'zcmb':
                        zcmb = value
                    elif field == 'ra':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.RA,
                            value,
                            source,
                            u_value='floatdegrees')
                    elif field == 'dec':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DEC,
                            value,
                            source,
                            u_value='floatdegrees')
                    elif field == 'host':
                        value = value.replace('- ', '-').replace('G ', 'G')
                        catalog.entries[name].add_quantity(SUPERNOVA.HOST,
                                                           value, source)
                    elif field == 'e(b-v)_mw':
                        catalog.entries[name].add_quantity(SUPERNOVA.EBV,
                                                           value, source)

        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT,
            zhel,
            source,
            e_value=zerr,
            kind='heliocentric')
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT, zcmb, source, e_value=zerr, kind='cmb')

        for path in photfiles:
            with open(path, 'r') as f:
                band = ''
                lines = f.read().splitlines()
                for li, line in enumerate(lines):
                    if li in [0, 2, 3]:
                        continue
                    if li == 1:
                        band = line.split(':')[-1].strip()
                    else:
                        cols = list(filter(None, line.split()))
                        if not cols:
                            continue
                        catalog.entries[name].add_photometry(
                            time=cols[0],
                            u_time='MJD',
                            magnitude=cols[1],
                            e_magnitude=cols[2],
                            band=band,
                            system=cols[3],
                            telescope=cols[4],
                            source=source)
    catalog.journal_entries()

    # Nicholl 05-03-16
    files = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-05-03-16', '*.txt'))
    name = catalog.add_entry('SN2015bn')
    for fi in pbar(files, task_str + ': Nicholl-05-03-16'):
        if 'late' in fi:
            bc = '2016ApJ...828L..18N'
        else:
            bc = '2016ApJ...826...39N'
        source = catalog.entries[name].add_source(bibcode=bc)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, 'PS15ae', source)
        telescope = os.path.basename(fi).split('_')[1]
        with open(fi, 'r') as f:
            lines = f.read().splitlines()
            for li, line in enumerate(lines):
                if not line or (line[0] == '#' and li != 0):
                    continue
                cols = list(filter(None, line.split()))
                if not cols:
                    continue
                if li == 0:
                    bands = cols[1:]
                    continue

                mjd = cols[0]
                for ci, col in enumerate(cols[1::2]):
                    if not is_number(col) or np.isnan(float(col)):
                        continue

                    band = bands[ci]
                    band_set = ''
                    system = 'Vega'
                    if bands[ci] in ["u'", "g'", "r'", "i'", "z'"]:
                        band_set = 'SDSS'
                        system = 'SDSS'
                    elif telescope == 'ASASSN':
                        band_set = 'ASASSN'
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: mjd,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.MAGNITUDE: col,
                        PHOTOMETRY.BAND: bands[ci],
                        PHOTOMETRY.SOURCE: source,
                        PHOTOMETRY.TELESCOPE: telescope,
                        PHOTOMETRY.SYSTEM: system
                    }
                    if band_set:
                        photodict[PHOTOMETRY.BAND_SET] = band_set
                    emag = cols[2 * ci + 2]
                    if is_number(emag):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = emag
                    else:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    if telescope == 'Swift':
                        photodict[PHOTOMETRY.INSTRUMENT] = 'UVOT'
                    catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Beispiel #31
0
def do_sdss_photo(catalog):
    task_str = catalog.get_current_task_str()
    D25 = Decimal('2.5')

    # fits_path = os.path.join(catalog.get_current_task_repo(),
    #                          'SDSS/SDSS_allCandidates+BOSS_HEAD.FITS')
    #
    # hdulist = fits.open(fits_path)
    # print(hdulist[1].columns)
    # for ri, row in enumerate(hdulist[1].data['SNID']):
    #     print([[tag, hdulist[1].data[tag][ri]] for tag in hdulist[1].data])
    #     print(hdulist[1].data['SNID'][ri], hdulist[1].data['IAUC'][ri],
    #           hdulist[1].data['REDSHIFT_HELIO'][ri])
    #
    # # print(hdulist[1].data['MJD'])
    # hdulist.close()
    # return

    # Load up metadata first
    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'SDSS/sdsssn_master.dat2'), 'r') as f:
        rows = list(csv.reader(f.read().splitlines()[1:], delimiter=' '))
        ignored_cids = []
        columns = {
            SUPERNOVA.RA: 1,
            SUPERNOVA.DEC: 2,
            SUPERNOVA.ALIAS: 4,
            SUPERNOVA.CLAIMED_TYPE: 5,
            SUPERNOVA.REDSHIFT: 11,
            SUPERNOVA.MAX_DATE: 21,
            SUPERNOVA.HOST_RA: 99,
            SUPERNOVA.HOST_DEC: 100
        }
        colnums = {v: k for k, v in columns.items()}

        rows = [[x.replace('\\N', '') for x in y] for y in rows]

        co = [[x[0], x[99], x[100]] for x in rows if x[99] and x[100]]
        coo = coord([x[1] for x in co], [x[2] for x in co], unit="deg")
        coo = [
            ''.join([y[:9] for y in x.split()])
            for x in coo.to_string('hmsdms', sep='')
        ]
        hostdict = dict(
            zip([x[0] for x in co], ['SDSS J' + x[1:] for x in coo]))

        for ri, row in enumerate(pbar(rows, task_str + ": metadata")):
            name = ''

            # Check if type is non-SNe first
            ct = row[columns[SUPERNOVA.CLAIMED_TYPE]]
            al = row[columns[SUPERNOVA.ALIAS]]
            if ct in ['AGN', 'Variable', 'Unknown'] and not al:
                catalog.log.info('`{}` is not a SN, not '
                                 'adding.'.format(row[0]))
                ignored_cids.append(row[0])
                continue

            # Add entry
            (name, source) = catalog.new_entry(
                'SDSS-II SN ' + row[0],
                bibcode='2014arXiv1401.3317S',
                url='http://data.sdss3.org/sas/dr10/boss/papers/supernova/')

            # Add host name
            if row[0] in hostdict:
                catalog.entries[name].add_quantity(SUPERNOVA.HOST,
                                                   hostdict[row[0]], source)

            # Add other metadata
            for cn in colnums:
                key = colnums[cn]
                if not key:
                    continue
                ic = int(cn)
                val = row[ic]
                if not val:
                    continue
                kwargs = {}
                if key == SUPERNOVA.ALIAS:
                    val = 'SN' + val
                elif key in [
                        SUPERNOVA.RA, SUPERNOVA.DEC, SUPERNOVA.HOST_RA,
                        SUPERNOVA.HOST_DEC
                ]:
                    kwargs = {QUANTITY.U_VALUE: 'floatdegrees'}
                    if key in [SUPERNOVA.RA, SUPERNOVA.HOST_RA]:
                        fval = float(val)
                        if fval < 0.0:
                            val = str(Decimal(360) + Decimal(fval))
                elif key == SUPERNOVA.CLAIMED_TYPE:
                    val = val.lstrip('pz').replace('SN', '')
                elif key == SUPERNOVA.REDSHIFT:
                    kwargs[QUANTITY.KIND] = 'spectroscopic'
                    if float(val) < -1.0:
                        continue
                    if float(row[ic + 1]) > 0.0:
                        kwargs[QUANTITY.E_VALUE] = row[ic + 1]
                elif key == SUPERNOVA.MAX_DATE:
                    dt = astrotime(float(val), format='mjd').datetime
                    val = make_date_string(dt.year, dt.month, dt.day)
                catalog.entries[name].add_quantity(key, val, source, **kwargs)

    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'SDSS/2010ApJ...708..661D.txt'), 'r') as sdss_file:
        bibcodes2010 = sdss_file.read().split('\n')
    sdssbands = ['u', 'g', 'r', 'i', 'z']
    file_names = (list(
        glob(os.path.join(catalog.get_current_task_repo(), 'SDSS/sum/*.sum')))
                  + list(
                      glob(
                          os.path.join(catalog.get_current_task_repo(),
                                       'SDSS/SMP_Data/*.dat'))))
    skipphoto = ['SDSS-II SN 15557']
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        tsvin = csv.reader(open(fname, 'r'),
                           delimiter=' ',
                           skipinitialspace=True)
        basename = os.path.basename(fname)
        hasred = True
        rst = 19
        if '.dat' in fname:
            bibcode = '2014arXiv1401.3317S'
            hasred = False
            rst = 4
        elif basename in bibcodes2010:
            bibcode = '2010ApJ...708..661D'
        else:
            bibcode = '2008AJ....136.2306H'

        skip_entry = False
        for rr, row in enumerate(tsvin):
            if skip_entry:
                break
            if rr == 0:
                # Ignore non-SNe objects and those not in metadata table above
                if row[3] in ignored_cids:
                    skip_entry = True
                    continue
                # Ignore IAU names from file headers as they are unreliable
                oname = 'SDSS-II SN ' + row[3]
                (name, source) = catalog.new_entry(oname, bibcode=bibcode)
                catalog.entries[name].add_quantity(SUPERNOVA.RA,
                                                   row[-4],
                                                   source,
                                                   u_value='floatdegrees')
                catalog.entries[name].add_quantity(SUPERNOVA.DEC,
                                                   row[-2],
                                                   source,
                                                   u_value='floatdegrees')
            if hasred and rr == 1:
                error = row[4] if float(row[4]) >= 0.0 else ''
                val = row[2]
                if float(val) < -1.0:
                    continue
                (catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                                    val,
                                                    source,
                                                    e_value=error,
                                                    kind='heliocentric'))
            if rr >= rst:
                # Skip bad measurements
                if int(row[0]) > 1024:
                    continue
                if oname in skipphoto:
                    break

                mjd = row[1]
                band = sdssbands[int(row[2])] + "'"
                magnitude = row[3]
                e_mag = row[4]
                fluxd = row[7]
                e_fluxd = row[8]
                telescope = 'SDSS'
                photodict = {
                    PHOTOMETRY.TIME: mjd,
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.TELESCOPE: telescope,
                    PHOTOMETRY.BAND: band,
                    PHOTOMETRY.MAGNITUDE: magnitude,
                    PHOTOMETRY.E_MAGNITUDE: e_mag,
                    PHOTOMETRY.FLUX_DENSITY: fluxd,
                    PHOTOMETRY.E_FLUX_DENSITY: e_fluxd,
                    PHOTOMETRY.U_FLUX_DENSITY: 'μJy',
                    PHOTOMETRY.SOURCE: source,
                    PHOTOMETRY.BAND_SET: 'SDSS',
                    PHOTOMETRY.SYSTEM: 'SDSS'
                }
                if float(fluxd) > 0.0:
                    photodict[PHOTOMETRY.ZERO_POINT] = str(
                        D25 * Decimal(fluxd).log10() + Decimal(magnitude))
                ul_sigma = 3.0
                if int(row[0]) & 32 or float(
                        fluxd) < ul_sigma * float(e_fluxd):
                    photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(ul_sigma)
                catalog.entries[name].add_photometry(**photodict)
        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break
        if not fi % 1000:
            catalog.journal_entries()

    catalog.journal_entries()
    return
Beispiel #32
0
def do_essence_spectra(catalog):
    task_str = catalog.get_current_task_str()

    insdict = {
        "lris": "LRIS",
        "esi": "ESI",
        "deimos": "DEIMOS",
        "gmos": "GMOS",
        "fors1": "FORS1",
        "bluechannel": "Blue Channel",
        "ldss2": "LDSS-2",
        "ldss3": "LDSS-3",
        "imacs": "IMACS",
        "fast": "FAST"
    }

    teldict = {
        "lris": "Keck",
        "esi": "Keck",
        "deimos": "Keck",
        "gmos": "Gemini",
        "fors1": "VLT",
        "bluechannel": "MMT",
        "ldss2": "Magellan Clay & Baade",
        "ldss3": "Magellan Clay & Baade",
        "imacs": "Magellan Clay & Baade",
        "fast": "FLWO 1.5m"
    }

    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'ESSENCE', '*'))
    oldname = ''
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        filename = os.path.basename(fname)
        fileparts = filename.split('_')
        name = 'ESSENCE ' + fileparts[0]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name

        if is_number(fileparts[1]):
            doffset = 1
        else:
            if fileparts[1] != 'comb':
                continue
            doffset = 2

        dstr = fileparts[doffset]
        mjd = str(
            astrotime(
                datetime.datetime(year=int(dstr[:4]),
                                  month=int(dstr[4:6]),
                                  day=int(dstr[6:8])) +
                datetime.timedelta(days=float(dstr[8:]))).mjd)

        instrument = fileparts[-1].split('.')[0]
        telescope = teldict.get(instrument, '')
        instrument = insdict.get(instrument, '')

        with open(fname, 'r') as f:
            data = csv.reader(f, delimiter=' ', skipinitialspace=True)
            data = [list(i) for i in zip(*data)]
            wavelengths = data[0]
            fluxes = [str(Decimal('1.0e-15') * Decimal(x)) for x in data[1]]

        name, source = catalog.new_entry(name, bibcode='2016ApJS..224....3N')

        specdict = {
            SPECTRUM.TIME: mjd,
            SPECTRUM.U_TIME: 'MJD',
            SPECTRUM.U_WAVELENGTHS: 'Angstrom',
            SPECTRUM.WAVELENGTHS: wavelengths,
            SPECTRUM.FLUXES: fluxes,
            SPECTRUM.U_FLUXES: 'erg/s/cm^2/Angstrom',
            SPECTRUM.FILENAME: filename,
            SPECTRUM.SOURCE: source
        }

        if instrument:
            specdict[SPECTRUM.INSTRUMENT] = instrument
        if telescope:
            specdict[SPECTRUM.TELESCOPE] = telescope

        catalog.entries[name].add_spectrum(**specdict)

        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break

    catalog.journal_entries()
    return
Beispiel #33
0
def do_cccp(catalog):
    task_str = catalog.get_current_task_str()
    cccpbands = ['B', 'V', 'R', 'I']
    file_names = list(
        glob(os.path.join(catalog.get_current_task_repo(),
                          'CCCP/apj407397*.txt')))
    for datafile in pbar_strings(file_names, task_str + ': apj407397...'):
        with open(datafile, 'r') as ff:
            tsvin = csv.reader(ff, delimiter='\t', skipinitialspace=True)
            for rr, row in enumerate(tsvin):
                if rr == 0:
                    continue
                elif rr == 1:
                    name = 'SN' + row[0].split('SN ')[-1]
                    name = catalog.add_entry(name)
                    source = catalog.entries[name].add_source(
                        bibcode='2012ApJ...744...10K')
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.ALIAS, name, source)
                elif rr >= 5:
                    mjd = str(Decimal(row[0]) + 53000)
                    for bb, band in enumerate(cccpbands):
                        if row[2 * bb + 1]:
                            mag = row[2 * bb + 1].strip('>')
                            upl = (not row[2 * bb + 2])
                            (catalog.entries[name]
                             .add_photometry(time=mjd, u_time='MJD', band=band,
                                             magnitude=mag,
                                             e_magnitude=row[2 * bb + 2],
                                             upperlimit=upl, source=source))

    html = catalog.load_url(
        'https://webhome.weizmann.ac.il/home/iair/sc_cccp.html',
        os.path.join(catalog.get_current_task_repo(),
                     'CCCP/sc_cccp.html'))

    soup = BeautifulSoup(html, 'html5lib')
    links = soup.body.findAll("a")
    for link in pbar(links, task_str + ': links'):
        if 'sc_sn' in link['href']:
            name = catalog.add_entry(link.text.replace(' ', ''))
            source = (catalog.entries[name]
                      .add_source(name='CCCP',
                                  url=('https://webhome.weizmann.ac.il'
                                       '/home/iair/sc_cccp.html')))
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

            html2 = catalog.load_url(
                'https://webhome.weizmann.ac.il/home/iair/' + link['href'],
                os.path.join(catalog.get_current_task_repo(),
                             'CCCP/') + link['href'].split('/')[-1])

            soup2 = BeautifulSoup(html2, 'html5lib')
            links2 = soup2.body.findAll("a")
            for link2 in links2:
                if '.txt' in link2['href'] and '_' in link2['href']:
                    band = link2['href'].split('_')[1].split('.')[0].upper()

                    # Many 404s in photometry, set cache_only = True unless
                    # attempting complete rebuild.
                    html3 = catalog.load_url(
                        'https://webhome.weizmann.ac.il/home/iair/cccp/' +
                        link2['href'],
                        os.path.join(catalog.get_current_task_repo(),
                                     'CCCP/') + link2['href'].split('/')[-1],
                        cache_only=True)

                    if html3 is None:
                        continue

                    table = [[str(Decimal(yy.strip())).rstrip('0') for yy in
                              xx.split(',')]
                             for xx in list(filter(None, html3.split('\n')))]
                    for row in table:
                        catalog.entries[name].add_photometry(
                            time=str(Decimal(row[0]) + 53000),
                            u_time='MJD', band=band, magnitude=row[1],
                            e_magnitude=row[2], source=source)

    catalog.journal_entries()
    return