Esempio n. 1
0
def do_itep(catalog):
    task_str = catalog.get_current_task_str()
    itepbadsources = ['2004ApJ...602..571B']
    needsbib = []
    with open(os.path.join(catalog.get_current_task_repo(),
                           'itep-refs.txt'), 'r') as refs_file:
        refrep = refs_file.read().splitlines()
    refrepf = dict(list(zip(refrep[1::2], refrep[::2])))
    fname = os.path.join(catalog.get_current_task_repo(),
                         'itep-lc-cat-28dec2015.txt')
    tsvin = list(csv.reader(open(fname, 'r'),
                            delimiter='|', skipinitialspace=True))
    curname = ''
    for rr, row in enumerate(pbar(tsvin, task_str)):
        if rr <= 1 or len(row) < 7:
            continue
        oldname = 'SN' + row[0].strip()
        mjd = str(jd_to_mjd(Decimal(row[1].strip())))
        band = row[2].strip()
        magnitude = row[3].strip()
        e_magnitude = row[4].strip()
        reference = row[6].strip().strip(',')

        if curname != oldname:
            curname = oldname
            name = catalog.add_entry(oldname)

            sec_reference = ('Sternberg Astronomical Institute '
                             'Supernova Light Curve Catalogue')
            sec_refurl = 'http://dau.itep.ru/sn/node/72'
            sec_source = catalog.entries[name].add_source(
                name=sec_reference, url=sec_refurl, secondary=True)
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, oldname, sec_source)

            year = re.findall(r'\d+', name)[0]
            catalog.entries[name].add_quantity(
                SUPERNOVA.DISCOVER_DATE, year, sec_source)
        if reference in refrepf:
            bibcode = unescape(refrepf[reference])
            source = catalog.entries[name].add_source(bibcode=bibcode)
        else:
            needsbib.append(reference)
            source = catalog.entries[name].add_source(
                name=reference) if reference else ''

        if bibcode not in itepbadsources:
            catalog.entries[name].add_photometry(time=mjd, band=band,
                                                 magnitude=magnitude,
                                                 e_magnitude=e_magnitude,
                                                 source=sec_source + ',' +
                                                 source)

    # Write out references that could use aa bibcode
    needsbib = list(OrderedDict.fromkeys(needsbib))
    with open('../itep-needsbib.txt', 'w') as bib_file:
        bib_file.writelines(['%ss\n' % ii for ii in needsbib])
    catalog.journal_entries()
    return
Esempio n. 2
0
def do_csp_spectra(catalog):
    """Import CSP spectra."""
    oldname = ''
    task_str = catalog.get_current_task_str()
    file_names = glob(os.path.join(catalog.get_current_task_repo(), 'CSP/*'))
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        filename = os.path.basename(fname)
        sfile = filename.split('.')
        if sfile[1] == 'txt':
            continue
        sfile = sfile[0]
        fileparts = sfile.split('_')
        name = 'SN20' + fileparts[0][2:]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        telescope = fileparts[-2]
        instrument = fileparts[-1]
        source = catalog.entries[name].add_source(
            bibcode='2013ApJ...773...53F')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        data = csv.reader(open(fname, 'r'),
                          delimiter=' ',
                          skipinitialspace=True)
        specdata = []
        for r, row in enumerate(data):
            if row[0] == '#JDate_of_observation:':
                jd = row[1].strip()
                time = str(jd_to_mjd(Decimal(jd)))
            elif row[0] == '#Redshift:':
                catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                                   row[1].strip(), source)
            if r < 7:
                continue
            specdata.append(list(filter(None, [x.strip(' ') for x in row])))
        specdata = [list(i) for i in zip(*specdata)]
        wavelengths = specdata[0]
        fluxes = specdata[1]

        catalog.entries[name].add_spectrum(u_wavelengths='Angstrom',
                                           u_fluxes='erg/s/cm^2/Angstrom',
                                           u_time='MJD',
                                           time=time,
                                           wavelengths=wavelengths,
                                           fluxes=fluxes,
                                           telescope=telescope,
                                           instrument=instrument,
                                           source=source,
                                           deredshifted=True,
                                           filename=filename)
        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break

    catalog.journal_entries()
    return
Esempio n. 3
0
    def set_first_max_light(self):
        if ENTRY.MAX_APP_MAG not in self:
            mldt, mlmag, mlband, mlsource = self._get_max_light()
            if mldt or mlmag or mlband:
                source = self.add_self_source()
                uniq_src = uniq_cdl([source] + mlsource.split(','))
            if mldt:
                max_date = make_date_string(mldt.year, mldt.month, mldt.day)
                self.add_quantity(ENTRY.MAX_DATE, max_date, uniq_src,
                                  derived=True)
            if mlmag:
                mlmag = pretty_num(mlmag)
                self.add_quantity(ENTRY.MAX_APP_MAG, mlmag, uniq_src,
                                  derived=True)
            if mlband:
                self.add_quantity(ENTRY.MAX_BAND, mlband, uniq_src,
                                  derived=True)

        if (self._KEYS.DISCOVER_DATE not in self or
                max([len(x[QUANTITY.VALUE].split('/')) for x in
                     self[self._KEYS.DISCOVER_DATE]]) < 3):
            fldt, flsource = self._get_first_light()
            if fldt:
                source = self.add_self_source()
                disc_date = make_date_string(fldt.year, fldt.month, fldt.day)
                self.add_quantity(
                    self._KEYS.DISCOVER_DATE, disc_date,
                    uniq_cdl([source] + flsource.split(',')),
                    derived=True)

        if self._KEYS.DISCOVER_DATE not in self and self._KEYS.SPECTRA in self:
            minspecmjd = float("+inf")
            for spectrum in self[self._KEYS.SPECTRA]:
                if 'time' in spectrum and 'u_time' in spectrum:
                    if spectrum['u_time'] == 'MJD':
                        mjd = float(spectrum['time'])
                    elif spectrum['u_time'] == 'JD':
                        mjd = float(jd_to_mjd(Decimal(spectrum['time'])))
                    else:
                        continue

                    if mjd < minspecmjd:
                        minspecmjd = mjd
                        minspecsource = spectrum['source']

            if minspecmjd < float("+inf"):
                fldt = astrotime(minspecmjd, format='mjd').datetime
                source = self.add_self_source()
                disc_date = make_date_string(fldt.year, fldt.month, fldt.day)
                self.add_quantity(
                    self._KEYS.DISCOVER_DATE, disc_date,
                    uniq_cdl([source] + minspecsource.split(',')),
                    derived=True)
        return
Esempio n. 4
0
def do_csp_spectra(catalog):
    """Import CSP spectra."""
    oldname = ''
    task_str = catalog.get_current_task_str()
    file_names = glob(os.path.join(catalog.get_current_task_repo(), 'CSP/*'))
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        filename = os.path.basename(fname)
        sfile = filename.split('.')
        if sfile[1] == 'txt':
            continue
        sfile = sfile[0]
        fileparts = sfile.split('_')
        name = 'SN20' + fileparts[0][2:]
        name = catalog.get_preferred_name(name)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        telescope = fileparts[-2]
        instrument = fileparts[-1]
        source = catalog.entries[name].add_source(
            bibcode='2013ApJ...773...53F')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        data = csv.reader(open(fname, 'r'), delimiter=' ',
                          skipinitialspace=True)
        specdata = []
        for r, row in enumerate(data):
            if row[0] == '#JDate_of_observation:':
                jd = row[1].strip()
                time = str(jd_to_mjd(Decimal(jd)))
            elif row[0] == '#Redshift:':
                catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                                   row[1].strip(),
                                                   source)
            if r < 7:
                continue
            specdata.append(list(filter(None, [x.strip(' ') for x in row])))
        specdata = [list(i) for i in zip(*specdata)]
        wavelengths = specdata[0]
        fluxes = specdata[1]

        catalog.entries[name].add_spectrum(
            u_wavelengths='Angstrom', u_fluxes='erg/s/cm^2/Angstrom',
            u_time='MJD',
            time=time, wavelengths=wavelengths, fluxes=fluxes,
            telescope=telescope, instrument=instrument,
            source=source, deredshifted=True, filename=filename)
        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break

    catalog.journal_entries()
    return
Esempio n. 5
0
def do_asas_atels(catalog):
    """Import LCs exposed in ASASSN Atels."""
    import gzip

    try:
        with gzip.open('/root/better-atel/atels.json.gz', 'rb') as f:
            ateljson = json.load(f)
    except Exception:
        print('ATel data unavailable, skipping ASAS ATel task.')
        return

    for entry in ateljson:
        if ('asas-sn.osu.edu/light_curve' in entry['body'] and
                'Supernovae' in entry['subjects']):
            matches = re.findall(r'<a\s+[^>]*?href="([^"]*)".*?>(.*?)<\/a>',
                                 entry['body'], re.DOTALL)
            lcurl = ''
            objname = ''
            for match in matches:
                if 'asas-sn.osu.edu/light_curve' in match[0]:
                    lcurl = match[0]
                    objname = re.findall(
                        r'\bASASSN-[0-9][0-9].*?\b', match[1])
                    if len(objname):
                        objname = objname[0]
            if objname and lcurl:
                name, source = catalog.new_entry(
                    objname, srcname='ASAS-SN Sky Patrol',
                    bibcode='2017arXiv170607060K',
                    url='https://asas-sn.osu.edu')
                csv = catalog.load_url(lcurl + '.csv', os.path.join(
                    catalog.get_current_task_repo(), os.path.join(
                        'ASASSN', objname + '.csv')))
                data = read(csv, format='csv')
                for row in data:
                    mag = str(row['mag'])
                    if float(mag.strip('>')) > 50.0:
                        continue
                    photodict = {
                        PHOTOMETRY.TIME: str(jd_to_mjd(
                            Decimal(str(row['HJD'])))),
                        PHOTOMETRY.MAGNITUDE: mag.strip('>'),
                        PHOTOMETRY.SURVEY: 'ASASSN',
                        PHOTOMETRY.SOURCE: source
                    }
                    if '>' in mag:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    else:
                        photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['mag_err'])
                    catalog.entries[name].add_photometry(**photodict)
    catalog.journal_entries()
    return
Esempio n. 6
0
def do_asas_atels(catalog):
    """Import LCs exposed in ASASSN Atels."""
    import gzip

    try:
        with gzip.open('/root/better-atel/atels.json.gz', 'rb') as f:
            ateljson = json.load(f)
    except Exception:
        print('ATel data unavailable, skipping ASAS ATel task.')
        return

    for entry in ateljson:
        if ('asas-sn.osu.edu/light_curve' in entry['body']
                and 'Supernovae' in entry['subjects']):
            matches = re.findall(r'<a\s+[^>]*?href="([^"]*)".*?>(.*?)<\/a>',
                                 entry['body'], re.DOTALL)
            lcurl = ''
            objname = ''
            for match in matches:
                if 'asas-sn.osu.edu/light_curve' in match[0]:
                    lcurl = match[0]
                    objname = re.findall(r'\bASASSN-[0-9][0-9].*?\b', match[1])
                    if len(objname):
                        objname = objname[0]
            if objname and lcurl:
                name, source = catalog.new_entry(objname,
                                                 srcname='ASAS-SN Sky Patrol',
                                                 bibcode='2017arXiv170607060K',
                                                 url='https://asas-sn.osu.edu')
                csv = catalog.load_url(
                    lcurl + '.csv',
                    os.path.join(catalog.get_current_task_repo(),
                                 os.path.join('ASASSN', objname + '.csv')))
                data = read(csv, format='csv')
                for row in data:
                    mag = str(row['mag'])
                    if float(mag.strip('>')) > 50.0:
                        continue
                    photodict = {
                        PHOTOMETRY.TIME:
                        str(jd_to_mjd(Decimal(str(row['HJD'])))),
                        PHOTOMETRY.MAGNITUDE: mag.strip('>'),
                        PHOTOMETRY.SURVEY: 'ASASSN',
                        PHOTOMETRY.SOURCE: source
                    }
                    if '>' in mag:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    else:
                        photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['mag_err'])
                    catalog.entries[name].add_photometry(**photodict)
    catalog.journal_entries()
    return
Esempio n. 7
0
def do_donated_photo(catalog):
    """Import donated photometry."""
    task_str = catalog.get_current_task_str()

    # Private donations here #
    if not catalog.args.travis:
        pass
    # End private donations #

    # Ponder 05-12-17 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Ponder-05-12-17', 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Ponder-05-12-17', '*.dat'))
    for path in file_names:
        with open(path, 'r') as f:
            tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        oname = path.split('/')[-1].split('.')[0]
        name, source = catalog.new_entry(oname,
                                         bibcode=metadict[oname]['bibcode'])
        for row in pbar(tsvin, task_str + ': Ponder ' + oname):
            if row[0][0] == '#' or not is_number(row[-1]):
                continue
            mjd = row[1]
            bandinst = row[2].split('_')
            band = bandinst[0]
            inst = ''
            if len(bandinst) > 1:
                inst = bandinst[1]
            mag = row[3]
            uerr = row[4]
            lerr = row[5]
            photodict = {
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.BAND: band,
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.E_LOWER_MAGNITUDE: lerr,
                PHOTOMETRY.E_UPPER_MAGNITUDE: uerr,
                PHOTOMETRY.SOURCE: source
            }
            if inst:
                photodict[PHOTOMETRY.INSTRUMENT] = inst
            catalog.entries[name].add_photometry(**photodict)

    # Benetti 03-08-17 donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Benetti-03-08-17', '1999E.dat')
    with open(path, 'r') as f:
        tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        name, source = catalog.new_entry('SN1999E',
                                         bibcode='2003MNRAS.340..191R')
        bands = None
        for row in tsvin:
            if not row or row[0][0] == '#':
                continue
            if not bands:
                bands = row[2:-2]
                continue
            mjd = row[1]
            tel = row[-1] if 'IAUC' not in row[-1] else None
            for bi, band in enumerate(bands):
                mag = row[2 + 2 * bi]
                if mag == '9999':
                    continue
                err = row[2 + 2 * bi + 1]
                limit = row[6] == 'True'
                photodict = {
                    PHOTOMETRY.TIME: mjd,
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.TELESCOPE: tel,
                    PHOTOMETRY.BAND: band,
                    PHOTOMETRY.MAGNITUDE: mag,
                    PHOTOMETRY.SOURCE: source
                }
                if err != '.00':
                    photodict[PHOTOMETRY.E_MAGNITUDE] = str(Decimal(err))
                if tel:
                    photodict[PHOTOMETRY.TELESCOPE] = tel
                catalog.entries[name].add_photometry(**photodict)

    # Nicholl 01-29-17 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Nicholl-01-29-17', 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-01-29-17', '*.txt'))
    for path in file_names:
        data = read(path, format='cds')
        oname = path.split('/')[-1].split('_')[0]
        name, source = catalog.new_entry(oname,
                                         bibcode=metadict[oname]['bibcode'])
        for row in pbar(data, task_str + ': Nicholl ' + oname):
            photodict = {
                PHOTOMETRY.TIME: str(row['MJD']),
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: str(row['mag']),
                PHOTOMETRY.BAND: row['Filter'],
                PHOTOMETRY.SOURCE: source
            }
            if 'system' in metadict[oname]:
                photodict[PHOTOMETRY.SYSTEM] = metadict[oname]['system']
            if 'l_mag' in row.columns and row['l_mag'] == '>':
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            elif 'e_mag' in row.columns:
                photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['e_mag'])
            if 'Telescope' in row.columns:
                photodict[PHOTOMETRY.TELESCOPE] = row['Telescope']
            catalog.entries[name].add_photometry(**photodict)

    # Arcavi 2016gkg donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Arcavi-01-24-17', 'SN2016gkg.txt')
    with open(path, 'r') as f:
        tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        name, source = catalog.new_entry('SN2016gkg',
                                         bibcode='2016arXiv161106451A')
        for row in tsvin:
            if row[0][0] == '#':
                continue
            mjd = str(jd_to_mjd(Decimal(row[0])))
            tel = row[1]
            band = row[3]
            mag = row[4]
            err = row[5]
            limit = row[6] == 'True'
            photodict = {
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.TELESCOPE: tel,
                PHOTOMETRY.BAND: band,
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.SOURCE: source
            }
            if limit:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            else:
                photodict[PHOTOMETRY.E_MAGNITUDE] = err
            catalog.entries[name].add_photometry(**photodict)

    # Nicholl Gaia16apd donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Nicholl-01-20-17', 'gaia16apd_phot.txt')

    data = read(path, format='cds')
    name, source = catalog.new_entry('Gaia16apd',
                                     bibcode='2017ApJ...835L...8N')
    for row in pbar(data, task_str + ': Nicholl Gaia16apd'):
        photodict = {
            PHOTOMETRY.TIME: str(row['MJD']),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.MAGNITUDE: str(row['mag']),
            PHOTOMETRY.BAND: row['Filter'],
            PHOTOMETRY.TELESCOPE: row['Telescope'],
            PHOTOMETRY.SOURCE: source
        }
        if row['l_mag'] == '>':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        else:
            photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['e_mag'])
        catalog.entries[name].add_photometry(**photodict)

    # Kuncarayakti-01-09-17
    datafile = os.path.join(catalog.get_current_task_repo(), 'Donations',
                            'Kuncarayakti-01-09-17', 'SN1978K.dat')
    inpname = os.path.basename(datafile).split('.')[0]
    with open(datafile, 'r') as f:
        tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
        host = False
        for ri, row in enumerate(tsvin):
            if ri == 0:
                continue
            if row[0][0] == '#':
                rsplit = [x.strip('# ') for x in ' '.join(row).split(',')]
                bc = rsplit[0]
                tel, ins = '', ''
                if len(rsplit) > 1:
                    tel = rsplit[1]
                if len(rsplit) > 2:
                    ins = rsplit[2]
                continue
            (name, source) = catalog.new_entry(inpname, bibcode=bc)
            mag = row[4]
            err = row[5]
            mjd = str(astrotime('-'.join(row[:3]), format='iso').mjd)
            photodict = {
                PHOTOMETRY.BAND: row[3],
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: mag.strip('>s'),
                PHOTOMETRY.SOURCE: source
            }
            if is_number(err):
                photodict[PHOTOMETRY.E_MAGNITUDE] = err
            if tel:
                photodict[PHOTOMETRY.TELESCOPE] = tel
            if ins:
                photodict[PHOTOMETRY.INSTRUMENT] = ins
            if '>' in mag:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            if 's' in mag:
                photodict[PHOTOMETRY.SYNTHETIC] = True
            catalog.entries[name].add_photometry(**photodict)

    # Nugent 01-09-17 donation
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nugent-01-09-17', '*.dat'))
    for datafile in pbar_strings(file_names, task_str + ': Nugent-01-09-17'):
        inpname = os.path.basename(datafile).split('.')[0]
        (name, source) = catalog.new_entry(inpname,
                                           bibcode='2006ApJ...645..841N')
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
            host = False
            for urow in tsvin:
                row = list(filter(None, urow))
                counts = row[2]
                e_counts = row[3]
                zp = row[4]
                photodict = {
                    PHOTOMETRY.BAND: row[1],
                    PHOTOMETRY.TIME: row[0],
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.COUNT_RATE: counts,
                    PHOTOMETRY.E_COUNT_RATE: e_counts,
                    PHOTOMETRY.ZERO_POINT: zp,
                    PHOTOMETRY.TELESCOPE: 'CFHT',
                    PHOTOMETRY.SURVEY: 'SNLS',
                    PHOTOMETRY.SOURCE: source
                }
                set_pd_mag_from_counts(photodict, counts, ec=e_counts, zp=zp)
                catalog.entries[name].add_photometry(**photodict)

    # Inserra 09-04-16 donation
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Inserra-09-04-16', '*.txt'))
    for datafile in pbar_strings(file_names, task_str + ': Inserra-09-04-16'):
        inpname = os.path.basename(datafile).split('.')[0]
        (name, source) = catalog.new_entry(inpname,
                                           bibcode='2013ApJ...770..128I')
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
            host = False
            for row in tsvin:
                if row[0][0] == '#':
                    if row[0] == '#Host':
                        host = True
                        continue
                    host = False
                    bands = row[3:-1]
                    continue
                for bi, ba in enumerate(bands):
                    mag = row[5 + 2 * bi]
                    if not is_number(mag):
                        continue
                    system = 'AB'
                    if ba in ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K']:
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: row[3],
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.BAND: ba,
                        PHOTOMETRY.MAGNITUDE: mag.strip('< '),
                        PHOTOMETRY.SOURCE: source,
                        PHOTOMETRY.SYSTEM: system
                    }
                    if 'ATel' not in row[-1]:
                        photodict[PHOTOMETRY.TELESCOPE] = row[-1]
                    if host:
                        photodict[PHOTOMETRY.HOST] = True
                    if '<' in mag:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    e_mag = row[5 + 2 * bi + 1].strip('() ')
                    if is_number(e_mag):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = e_mag
                    catalog.entries[name].add_photometry(**photodict)

    # Nicholl 04-01-16 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Nicholl-04-01-16', 'bibcodes.json'), 'r') as f:
        bcs = json.loads(f.read())

    kcorrected = ['SN2011ke', 'SN2011kf', 'SN2012il', 'PTF10hgi', 'PTF11rks']
    ignorephoto = ['PTF10hgi', 'PTF11rks', 'SN2011ke', 'SN2011kf', 'SN2012il']

    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-04-01-16/*.txt'))
    for datafile in pbar_strings(file_names, task_str + ': Nicholl-04-01-16'):
        inpname = os.path.basename(datafile).split('_')[0]
        isk = inpname in kcorrected
        name = catalog.add_entry(inpname)
        bibcode = ''
        for bc in bcs:
            if inpname in bcs[bc]:
                bibcode = bc
        if not bibcode:
            raise ValueError('Bibcode not found!')
        source = catalog.entries[name].add_source(bibcode=bibcode)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, inpname, source)
        if inpname in ignorephoto:
            continue
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
            rtelescope = ''
            for r, rrow in enumerate(tsvin):
                row = list(filter(None, rrow))
                if not row:
                    continue
                if row[0] == '#MJD':
                    bands = [x for x in row[1:] if x and 'err' not in x]
                elif row[0][0] == '#' and len(row[0]) > 1:
                    rtelescope = row[0][1:]
                if row[0][0] == '#':
                    continue
                mjd = row[0]
                if not is_number(mjd):
                    continue
                for v, val in enumerate(row[1::2]):
                    upperlimit = ''
                    mag = val.strip('>')
                    emag = row[2 * v + 2]
                    if '>' in val or (is_number(emag) and float(emag) == 0.0):
                        upperlimit = True
                    if (not is_number(mag) or isnan(float(mag))
                            or float(mag) > 90.0):
                        continue
                    band = bands[v]
                    instrument = ''
                    survey = ''
                    system = ''
                    telescope = rtelescope
                    if telescope == 'LSQ':
                        instrument = 'QUEST'
                    elif telescope == 'PS1':
                        instrument = 'GPC'
                    elif telescope == 'NTT':
                        instrument = 'EFOSC'
                    elif telescope == 'GROND':
                        instrument = 'GROND'
                        telescope = 'MPI/ESO 2.2m'
                    else:
                        if band == 'NUV':
                            instrument = 'GALEX'
                            telescope = 'GALEX'
                        elif band in ['u', 'g', 'r', 'i', 'z']:
                            if inpname.startswith('PS1'):
                                instrument = 'GPC'
                                telescope = 'PS1'
                                survey = 'Pan-STARRS'
                            elif inpname.startswith('PTF'):
                                telescope = 'P60'
                                survey = 'PTF'
                        elif band.upper() in ['UVW2', 'UVW1', 'UVM2']:
                            instrument = 'UVOT'
                            telescope = 'Swift'
                            if inpname in ['PTF12dam']:
                                system = 'AB'
                    if inpname in ['SCP-06F6']:
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: mjd,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.BAND: band,
                        PHOTOMETRY.MAGNITUDE: mag,
                        PHOTOMETRY.UPPER_LIMIT: upperlimit,
                        PHOTOMETRY.SOURCE: source
                    }
                    if instrument:
                        photodict[PHOTOMETRY.INSTRUMENT] = instrument
                    if telescope:
                        photodict[PHOTOMETRY.TELESCOPE] = telescope
                    if survey:
                        photodict[PHOTOMETRY.SURVEY] = survey
                    if system:
                        photodict[PHOTOMETRY.SYSTEM] = system
                    if (is_number(emag) and not isnan(float(emag))
                            and float(emag) > 0.0):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = emag
                    if isk:
                        photodict[PHOTOMETRY.KCORRECTED] = True
                    catalog.entries[name].add_photometry(**photodict)
    catalog.journal_entries()

    # Maggi 04-11-16 donation (MC SNRs)
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Maggi-04-11-16', 'LMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str + ': Maggi-04-11-16/LMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            ra = row[2]
            dec = row[3]
            source = (catalog.entries[name].add_source(
                bibcode='2016A&A...585A.162M'))
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS,
                'LMCSNR J' + rep_chars(ra, ' :.') + rep_chars(dec, ' :.'),
                source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            if row[1] != 'noname':
                catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1],
                                                   source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'LMC', source)
            if row[4] == '1':
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   'Ia', source)
            elif row[4] == '2':
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   'CC', source)
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Maggi-04-11-16', 'SMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str + ': Maggi-04-11-16/SMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            source = catalog.entries[name].add_source(name='Pierre Maggi')
            ra = row[3]
            dec = row[4]
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, 'SMCSNR J' + ra.replace(':', '')[:6] +
                dec.replace(':', '')[:7], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[4], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'SMC', source)
    catalog.journal_entries()

    # Galbany 04-18-16 donation
    folders = next(
        os.walk(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/')))[1]
    bibcode = '2016AJ....151...33G'
    for folder in folders:
        infofiles = glob(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/') + folder + '/*.info')
        photfiles = glob(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/') + folder + '/*.out*')

        zhel = ''
        zcmb = ''
        zerr = ''
        for path in infofiles:
            with open(path, 'r') as f:
                lines = f.read().splitlines()
                for line in lines:
                    splitline = line.split(':')
                    field = splitline[0].strip().lower()
                    value = splitline[1].strip()
                    if field == 'name':
                        name = value[:6].upper()
                        name += (value[6].upper()
                                 if len(value) == 7 else value[6:])
                        name = catalog.add_entry(name)
                        source = (catalog.entries[name].add_source(
                            bibcode=bibcode))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.ALIAS, name, source)
                    elif field == 'type':
                        claimedtype = value.replace('SN', '')
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.CLAIMED_TYPE, claimedtype, source)
                    elif field == 'zhel':
                        zhel = value
                    elif field == 'redshift_error':
                        zerr = value
                    elif field == 'zcmb':
                        zcmb = value
                    elif field == 'ra':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.RA,
                            value,
                            source,
                            u_value='floatdegrees')
                    elif field == 'dec':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DEC,
                            value,
                            source,
                            u_value='floatdegrees')
                    elif field == 'host':
                        value = value.replace('- ', '-').replace('G ', 'G')
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.HOST, value, source)
                    elif field == 'e(b-v)_mw':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.EBV, value, source)

        catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                           zhel,
                                           source,
                                           e_value=zerr,
                                           kind='heliocentric')
        catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                           zcmb,
                                           source,
                                           e_value=zerr,
                                           kind='cmb')

        for path in photfiles:
            with open(path, 'r') as f:
                band = ''
                lines = f.read().splitlines()
                for li, line in enumerate(lines):
                    if li in [0, 2, 3]:
                        continue
                    if li == 1:
                        band = line.split(':')[-1].strip()
                    else:
                        cols = list(filter(None, line.split()))
                        if not cols:
                            continue
                        catalog.entries[name].add_photometry(
                            time=cols[0],
                            u_time='MJD',
                            magnitude=cols[1],
                            e_magnitude=cols[2],
                            band=band,
                            system=cols[3],
                            telescope=cols[4],
                            source=source)
    catalog.journal_entries()

    # Nicholl 05-03-16
    files = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-05-03-16', '*.txt'))
    name = catalog.add_entry('SN2015bn')
    for fi in pbar(files, task_str + ': Nicholl-05-03-16'):
        if 'late' in fi:
            bc = '2016ApJ...828L..18N'
        else:
            bc = '2016ApJ...826...39N'
        source = catalog.entries[name].add_source(bibcode=bc)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, 'PS15ae', source)
        telescope = os.path.basename(fi).split('_')[1]
        with open(fi, 'r') as f:
            lines = f.read().splitlines()
            for li, line in enumerate(lines):
                if not line or (line[0] == '#' and li != 0):
                    continue
                cols = list(filter(None, line.split()))
                if not cols:
                    continue
                if li == 0:
                    bands = cols[1:]
                    continue

                mjd = cols[0]
                for ci, col in enumerate(cols[1::2]):
                    if not is_number(col) or np.isnan(float(col)):
                        continue

                    band = bands[ci]
                    band_set = ''
                    system = 'Vega'
                    if bands[ci] in ["u'", "g'", "r'", "i'", "z'"]:
                        band_set = 'SDSS'
                        system = 'SDSS'
                    elif telescope == 'ASASSN':
                        band_set = 'ASASSN'
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: mjd,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.MAGNITUDE: col,
                        PHOTOMETRY.BAND: bands[ci],
                        PHOTOMETRY.SOURCE: source,
                        PHOTOMETRY.TELESCOPE: telescope,
                        PHOTOMETRY.SYSTEM: system
                    }
                    if band_set:
                        photodict[PHOTOMETRY.BAND_SET] = band_set
                    emag = cols[2 * ci + 2]
                    if is_number(emag):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = emag
                    else:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    if telescope == 'Swift':
                        photodict[PHOTOMETRY.INSTRUMENT] = 'UVOT'
                    catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Esempio n. 8
0
def do_snf_specta(catalog):
    task_str = catalog.get_current_task_str()
    bibcodes = {
        'SN2005gj': '2006ApJ...650..510A',
        'SN2006D': '2007ApJ...654L..53T',
        'SN2007if': '2010ApJ...713.1073S',
        'SN2011fe': '2013A&A...554A..27P'
    }
    oldname = ''
    snfcnt = 0
    eventfolders = next(
        os.walk(os.path.join(catalog.get_current_task_repo(), 'SNFactory')))[1]
    for eventfolder in pbar(eventfolders, task_str):
        oname = eventfolder
        name = catalog.get_preferred_name(oname)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        sec_reference = 'Nearby Supernova Factory'
        sec_refurl = 'http://snfactory.lbl.gov/'
        sec_bibcode = '2002SPIE.4836...61A'
        sec_source = catalog.entries[name].add_source(name=sec_reference,
                                                      url=sec_refurl,
                                                      bibcode=sec_bibcode,
                                                      secondary=True)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, oname, sec_source)
        bibcode = bibcodes[oname]
        source = catalog.entries[name].add_source(bibcode=bibcode)
        sources = uniq_cdl([source, sec_source])
        use_path = os.path.join(catalog.get_current_task_repo(), 'SNFactory',
                                eventfolder, '*.dat')
        eventspectra = glob(use_path)
        for spectrum in pbar(eventspectra, task_str):
            filename = os.path.basename(spectrum)
            with open(spectrum) as spec_file:
                specdata = list(
                    csv.reader(spec_file, delimiter=' ',
                               skipinitialspace=True))
            specdata = list(filter(None, specdata))
            newspec = []
            time = ''
            telescope = ''
            instrument = ''
            observer = ''
            observatory = ''
            if 'Keck_20060202_R' in spectrum:
                time = '53768.23469'
            elif 'Spectrum05_276' in spectrum:
                time = pretty_num(astrotime('2005-10-03').mjd, sig=5)
            elif 'Spectrum05_329' in spectrum:
                time = pretty_num(astrotime('2005-11-25').mjd, sig=5)
            elif 'Spectrum05_336' in spectrum:
                time = pretty_num(astrotime('2005-12-02').mjd, sig=5)
            for row in specdata:
                if row[0][0] == '#':
                    joinrow = (' '.join(row)).split('=')
                    if len(joinrow) < 2:
                        continue
                    field = joinrow[0].strip('# ')
                    value = joinrow[1].split('/')[0].strip('\' ')
                    if not time:
                        if field == 'JD':
                            time = str(jd_to_mjd(Decimal(value)))
                        elif field == 'MJD':
                            time = value
                        elif field == 'MJD-OBS':
                            time = value
                    if field == 'OBSERVER':
                        observer = value.capitalize()
                    if field == 'OBSERVAT':
                        observatory = value.capitalize()
                    if field == 'TELESCOP':
                        telescope = value.capitalize()
                    if field == 'INSTRUME':
                        instrument = value.capitalize()
                else:
                    newspec.append(row)
            if not time:
                raise ValueError('Time missing from spectrum.')
            specdata = newspec
            haserrors = len(
                specdata[0]
            ) == 3 and specdata[0][2] and specdata[0][2] != 'NaN'
            specdata = [list(i) for i in zip(*specdata)]

            wavelengths = specdata[0]
            fluxes = specdata[1]
            errors = ''
            if haserrors:
                errors = specdata[2]

            unit_err = ('Variance'
                        if oldname == 'SN2011fe' else 'erg/s/cm^2/Angstrom')
            unit_flx = 'erg/s/cm^2/Angstrom'
            catalog.entries[name].add_spectrum(u_wavelengths='Angstrom',
                                               u_fluxes=unit_flx,
                                               u_time='MJD',
                                               time=time,
                                               wavelengths=wavelengths,
                                               fluxes=fluxes,
                                               errors=errors,
                                               observer=observer,
                                               observatory=observatory,
                                               telescope=telescope,
                                               instrument=instrument,
                                               u_errors=unit_err,
                                               source=sources,
                                               filename=filename)
            snfcnt = snfcnt + 1
            if (catalog.args.travis
                    and snfcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
                break

    catalog.journal_entries()
    return
Esempio n. 9
0
def do_external_fits_spectra(catalog):
    fpath = catalog.get_current_task_repo()
    with open(os.path.join(fpath, 'fits', 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())

    fureps = {'erg/cm2/s/A': 'erg/s/cm^2/Angstrom'}
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), 'fits',
                                '*.fits')
    files = glob(path_pattern)
    for datafile in files:
        filename = datafile.split('/')[-1]
        if filename == 'meta.json':
            continue
        hdulist = fits.open(datafile)
        for oi, obj in enumerate(hdulist[0].header):
            if any(x in ['.', '/'] for x in obj):
                del (hdulist[0].header[oi])
        hdulist[0].verify('silentfix')
        hdrkeys = list(hdulist[0].header.keys())
        # print(hdrkeys)
        name = ''
        if filename in metadict:
            if 'name' in metadict[filename]:
                name = metadict[filename]['name']
        if not name:
            name = hdulist[0].header['OBJECT']
        if 'bibcode' in metadict[filename]:
            name, source = catalog.new_entry(
                name, bibcode=metadict[filename]['bibcode'])
        elif 'donator' in metadict[filename]:
            name, source = catalog.new_entry(
                name, srcname=metadict[filename]['donator'])
        else:
            if 'OBSERVER' in hdrkeys:
                name, source = catalog.new_entry(
                    name, srcname=hdulist[0].header['OBSERVER'])
            else:
                name = catalog.add_entry(name)
                source = catalog.entries[name].add_self_source()
        # for key in hdulist[0].header.keys():
        #     print(key, hdulist[0].header[key])
        if hdulist[0].header['SIMPLE']:
            if 'JD' in hdrkeys:
                mjd = str(jd_to_mjd(Decimal(str(hdulist[0].header['JD']))))
            elif 'MJD' in hdrkeys:
                mjd = str(hdulist[0].header['MJD'])
            elif 'DATE-OBS' in hdrkeys:
                if 'T' in hdulist[0].header['DATE-OBS']:
                    dateobs = hdulist[0].header['DATE-OBS'].strip()
                elif 'UTC-OBS' in hdrkeys:
                    dateobs = hdulist[0].header['DATE-OBS'].strip(
                    ) + 'T' + hdulist[0].header['UTC-OBS'].strip()
                mjd = str(astrotime(dateobs, format='isot').mjd)
            else:
                raise ValueError("Couldn't find JD/MJD for spectrum.")
            w0 = hdulist[0].header['CRVAL1']
            if hdulist[0].header['NAXIS'] == 1:
                wd = hdulist[0].header['CDELT1']
                fluxes = [str(x) for x in list(hdulist[0].data)]
                errors = False
            elif hdulist[0].header['NAXIS'] == 2:
                wd = hdulist[0].header['CD1_1']
                fluxes = [str(x) for x in list(hdulist[0].data)[0]]
                errors = False
            elif hdulist[0].header['NAXIS'] == 3:
                wd = hdulist[0].header['CD1_1']
                fluxes = [str(x) for x in list(hdulist[0].data)[0][0]]
                errors = [str(x) for x in list(hdulist[0].data)[3][0]]
            else:
                print('Warning: Skipping FITS spectrum `{}`.'.format(filename))
                continue
            waves = [str(w0 + wd * x) for x in range(0, len(fluxes))]
        else:
            raise ValueError('Non-simple FITS import not yet supported.')
        if 'BUNIT' in hdrkeys:
            fluxunit = hdulist[0].header['BUNIT']
            if fluxunit in fureps:
                fluxunit = fureps[fluxunit]
        else:
            if max([float(x) for x in fluxes]) < 1.0e-5:
                fluxunit = 'erg/s/cm^2/Angstrom'
            else:
                fluxunit = 'Uncalibrated'
        specdict = {
            SPECTRUM.U_WAVELENGTHS: 'Angstrom',
            SPECTRUM.WAVELENGTHS: waves,
            SPECTRUM.TIME: mjd,
            SPECTRUM.U_TIME: 'MJD',
            SPECTRUM.FLUXES: fluxes,
            SPECTRUM.U_FLUXES: fluxunit,
            SPECTRUM.FILENAME: filename,
            SPECTRUM.SOURCE: source
        }
        if 'TELESCOP' in hdrkeys:
            specdict[SPECTRUM.TELESCOPE] = hdulist[0].header['TELESCOP']
        if 'INSTRUME' in hdrkeys:
            specdict[SPECTRUM.INSTRUMENT] = hdulist[0].header['INSTRUME']
        if 'AIRMASS' in hdrkeys:
            specdict[SPECTRUM.AIRMASS] = hdulist[0].header['AIRMASS']
        if errors:
            specdict[SPECTRUM.ERRORS] = errors
            specdict[SPECTRUM.U_ERRORS] = fluxunit
        if 'SITENAME' in hdrkeys:
            specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['SITENAME']
        elif 'OBSERVAT' in hdrkeys:
            specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['OBSERVAT']
        if 'OBSERVER' in hdrkeys:
            specdict[SPECTRUM.OBSERVER] = hdulist[0].header['OBSERVER']
        catalog.entries[name].add_spectrum(**specdict)
        hdulist.close()
        catalog.journal_entries()
    return
Esempio n. 10
0
def do_ogle(catalog):
    task_str = catalog.get_current_task_str()
    basenames = [
        'transients', 'transients/2015', 'transients/2014b', 'transients/2014',
        'transients/2013', 'transients/2012'
    ]
    oglenames = []
    ogleupdate = [True, False, False, False, False]
    for b, bn in enumerate(pbar(basenames, task_str)):
        if catalog.args.update and not ogleupdate[b]:
            continue

        filepath = os.path.join(catalog.get_current_task_repo(), 'OGLE-')
        filepath += bn.replace('/', '-') + '-transients.html'
        htmltxt = catalog.load_url(
            'http://ogle.astrouw.edu.pl/ogle4/' + bn + '/transients.html',
            filepath)
        if not htmltxt:
            continue

        soup = BeautifulSoup(htmltxt, 'html5lib')
        links = soup.findAll('a')
        breaks = soup.findAll('br')
        datalinks = []
        datafnames = []
        for a in links:
            if a.has_attr('href'):
                if '.dat' in a['href']:
                    datalinks.append('http://ogle.astrouw.edu.pl/ogle4/' + bn +
                                     '/' + a['href'])
                    datafnames.append(
                        bn.replace('/', '-') + '-' + a['href'].replace('/',
                                                                       '-'))

        ec = -1
        reference = 'OGLE-IV Transient Detection System'
        refurl = 'http://ogle.astrouw.edu.pl/ogle4/transients/transients.html'
        for bi, br in enumerate(pbar(breaks, task_str)):
            sibling = br.nextSibling
            if 'Ra,Dec=' in sibling:
                line = sibling.replace('\n', '').split('Ra,Dec=')
                name = line[0].strip()
                ec += 1

                if 'NOVA' in name or 'dupl' in name:
                    continue

                if name in oglenames:
                    continue
                oglenames.append(name)

                name = catalog.add_entry(name)

                mySibling = sibling.nextSibling
                atelref = ''
                claimedtype = ''
                while 'Ra,Dec=' not in mySibling:
                    if isinstance(mySibling, NavigableString):
                        if not claimedtype and 'class=' in str(mySibling):
                            claimedtype = re.sub(r'\([^)]*\)', '',
                                                 str(mySibling).split('=')[-1])
                            claimedtype = claimedtype.replace('SN', '').strip()
                            if claimedtype == '-':
                                claimedtype = ''
                    if isinstance(mySibling, Tag):
                        atela = mySibling
                        if (atela and atela.has_attr('href') and
                                'astronomerstelegram' in atela['href']):
                            atelref = atela.contents[0].strip()
                            atelurl = atela['href']
                    mySibling = mySibling.nextSibling
                    if mySibling is None:
                        break

                # nextSibling = sibling.nextSibling
                # if ((isinstance(nextSibling, Tag) and
                #      nextSibling.has_attr('alt') and
                #      nextSibling.contents[0].strip() != 'NED')):
                #     radec = nextSibling.contents[0].strip().split()
                # else:
                #     radec = line[-1].split()
                # ra = radec[0]
                # dec = radec[1]

                fname = os.path.join(catalog.get_current_task_repo(),
                                     'OGLE/') + datafnames[ec]
                csvtxt = catalog.load_url(datalinks[ec], fname)

                lcdat = csvtxt.splitlines()
                sources = [
                    catalog.entries[name].add_source(
                        name=reference, url=refurl)
                ]
                catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name,
                                                   sources[0])
                if atelref and atelref != 'ATel#----':
                    sources.append(catalog.entries[name].add_source(
                        name=atelref, url=atelurl))
                sources = uniq_cdl(sources)

                if name.startswith('OGLE'):
                    if name[4] == '-':
                        if is_number(name[5:9]):
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.DISCOVER_DATE, name[5:9], sources)
                    else:
                        if is_number(name[4:6]):
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.DISCOVER_DATE, '20' + name[4:6],
                                sources)

                # RA and Dec from OGLE pages currently not reliable
                # catalog.entries[name].add_quantity(SUPERNOVA.RA, ra, sources)
                # catalog.entries[name].add_quantity(SUPERNOVA.DEC, dec,
                # sources)
                if claimedtype and claimedtype != '-':
                    catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                       claimedtype, sources)
                elif ('SN' not in name and
                      SUPERNOVA.CLAIMED_TYPE not in catalog.entries[name]):
                    catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                       'Candidate', sources)
                for row in lcdat:
                    row = row.split()
                    mjd = str(jd_to_mjd(Decimal(row[0])))
                    magnitude = row[1]
                    if float(magnitude) > 90.0:
                        continue
                    e_mag = row[2]
                    upperlimit = False
                    if e_mag == '-1' or float(e_mag) > 10.0:
                        e_mag = ''
                        upperlimit = True
                    catalog.entries[name].add_photometry(
                        time=mjd,
                        u_time='MJD',
                        band='I',
                        magnitude=magnitude,
                        e_magnitude=e_mag,
                        system='Vega',
                        source=sources,
                        upperlimit=upperlimit)
                if catalog.args.update:
                    catalog.journal_entries()
                if catalog.args.travis and bi >= catalog.TRAVIS_QUERY_LIMIT:
                    break

        catalog.journal_entries()
    return
Esempio n. 11
0
def do_itep(catalog):
    task_str = catalog.get_current_task_str()
    itepignoresources = ['2004ApJ...602..571B', '2013NewA...20...30M']
    itepignorephot = ['SN2006gy']
    needsbib = []
    with open(os.path.join(catalog.get_current_task_repo(), 'itep-refs.txt'),
              'r') as refs_file:
        refrep = refs_file.read().splitlines()
    refrepf = dict(list(zip(refrep[1::2], refrep[::2])))
    fname = os.path.join(catalog.get_current_task_repo(),
                         'itep-lc-cat-28dec2015.txt')
    tsvin = list(
        csv.reader(open(fname, 'r'), delimiter='|', skipinitialspace=True))
    curname = ''
    for rr, row in enumerate(pbar(tsvin, task_str)):
        if rr <= 1 or len(row) < 7:
            continue
        oldname = 'SN' + row[0].strip()
        mjd = str(jd_to_mjd(Decimal(row[1].strip())))
        band = row[2].strip()
        magnitude = row[3].strip()
        e_magnitude = row[4].strip()
        reference = row[6].strip().strip(',')

        if curname != oldname:
            curname = oldname
            name = catalog.add_entry(oldname)

            sec_reference = ('Sternberg Astronomical Institute '
                             'Supernova Light Curve Catalogue')
            sec_refurl = 'http://dau.itep.ru/sn/node/72'
            sec_source = catalog.entries[name].add_source(name=sec_reference,
                                                          url=sec_refurl,
                                                          secondary=True)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, oldname,
                                               sec_source)

            year = re.findall(r'\d+', name)[0]
            catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
                                               sec_source)
        if reference in refrepf:
            bibcode = unescape(refrepf[reference])
            source = catalog.entries[name].add_source(bibcode=bibcode)
        else:
            needsbib.append(reference)
            source = catalog.entries[name].add_source(
                name=reference) if reference else ''
        if oldname in itepignorephot or bibcode in itepignoresources:
            continue

        photodict = {
            PHOTOMETRY.TIME: mjd,
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.MAGNITUDE: magnitude,
            PHOTOMETRY.SOURCE: uniq_cdl([sec_source, source])
        }
        if e_magnitude:
            photodict[PHOTOMETRY.E_MAGNITUDE] = e_magnitude
        if band.endswith('_SDSS'):
            photodict[PHOTOMETRY.BAND_SET] = 'SDSS'
            photodict[PHOTOMETRY.SYSTEM] = 'SDSS'
            band = band.replace('_SDSS', "'")
        photodict[PHOTOMETRY.BAND] = band
        catalog.entries[name].add_photometry(**photodict)
        if catalog.args.travis and rr >= catalog.TRAVIS_QUERY_LIMIT:
            break

    # Write out references that could use aa bibcode
    needsbib = list(OrderedDict.fromkeys(needsbib))
    with open('../itep-needsbib.txt', 'w') as bib_file:
        bib_file.writelines(['%ss\n' % ii for ii in needsbib])
    catalog.journal_entries()
    return
Esempio n. 12
0
def do_tns_photo(catalog):
    """Load TNS photometry."""
    task_str = catalog.get_current_task_str()
    tns_url = 'https://wis-tns.weizmann.ac.il/'
    try:
        with open('tns.key', 'r') as f:
            tnskey = f.read().splitlines()[0]
    except Exception:
        catalog.log.warning('TNS API key not found, make sure a file named '
                            '`tns.key` containing the key is placed the '
                            'astrocats directory.')
        tnskey = ''

    bandreps = {'Clear': 'C'}
    fails = 0
    for name in pbar(list(catalog.entries.keys()), task_str):
        if name not in catalog.entries:
            continue
        aliases = catalog.entries[name].get_aliases()
        oname = ''
        for alias in aliases:
            if (alias.startswith(('SN', 'AT')) and is_integer(alias[2:6]) and
                    int(alias[2:6]) >= 2016) and alias[6:].isalpha():
                oname = alias
                break
        if not oname:
            continue
        reqname = oname[2:]
        jsonpath = os.path.join(catalog.get_current_task_repo(), 'TNS',
                                reqname + '.json')
        download_json = True
        if os.path.isfile(jsonpath):
            with open(jsonpath, 'r') as f:
                objdict = json.load(f)
            if ('discoverydate' in objdict and
                (datetime.now() - datetime.strptime(objdict['discoverydate'],
                                                    '%Y-%m-%d %H:%M:%S')
                 ).days > 90):
                download_json = False
        if download_json:
            data = urllib.parse.urlencode({
                'api_key': tnskey,
                'data': json.dumps({
                    'objname': reqname,
                    'photometry': '1'
                })
            }).encode('ascii')
            req = urllib.request.Request(
                'https://wis-tns.weizmann.ac.il/api/get/object', data=data)
            trys = 0
            objdict = None
            while trys < 3 and not objdict:
                try:
                    objdict = json.loads(
                        urllib.request.urlopen(req, timeout=30).read().decode('ascii'))[
                            'data']['reply']
                except KeyboardInterrupt:
                    raise
                except Exception:
                    catalog.log.warning('API request failed for `{}`.'.format(
                        name))
                    time.sleep(5)
                trys = trys + 1
            if (not objdict or 'objname' not in objdict or
                    not isinstance(objdict['objname'], str)):
                fails = fails + 1
                catalog.log.warning('Object `{}` not found!'.format(name))
                if fails >= 5:
                    break
                continue
            # Cache object here
            with open(jsonpath, 'w') as f:
                json.dump(sortOD(objdict), f, indent='\t',
                          separators=(',', ':'), ensure_ascii=False,
                          sort_keys=True)

        if 'photometry' not in objdict:
            continue
        photoarr = objdict['photometry']
        name, source = catalog.new_entry(
            oname, srcname='Transient Name Server', url=tns_url)
        for photo in photoarr:
            if 'mag' not in photo['flux_unit']['name'].lower():
                catalog.log.warning('Unknown flux unit `{}`.'.format(photo[
                    'flux_unit']['name']))
                continue
            if not photo['jd']:
                continue
            if not photo['flux'] and not photo['limflux']:
                continue
            mag = photo['flux'] if photo['flux'] else photo['limflux']
            photodict = {
                PHOTOMETRY.TIME: str(jd_to_mjd(Decimal(str(photo['jd'])))),
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.SOURCE: source
            }
            if photo.get('fluxerr', ''):
                photodict[PHOTOMETRY.E_MAGNITUDE] = photo['fluxerr']
            if not photo['flux']:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            band = photo['filters']['name']
            if band:
                if band in bandreps:
                    band = bandreps[band]
                photodict[PHOTOMETRY.BAND] = band
            if photo.get('observer', ''):
                photodict[PHOTOMETRY.OBSERVER] = photo['observer']
            if 'source_group' in photo:
                survey = photo['source_group']['group_name']
                if survey:
                    photodict[PHOTOMETRY.SURVEY] = survey
            if 'telescope' in photo:
                telescope = photo['telescope']['name']
                if telescope and telescope != 'Other':
                    photodict[PHOTOMETRY.TELESCOPE] = telescope
            if 'instrument' in photo:
                instrument = photo['instrument']['name']
                if instrument and instrument != 'Other':
                    photodict[PHOTOMETRY.INSTRUMENT] = instrument
            system = ''
            if 'Vega' in photo['flux_unit']['name']:
                system = 'Vega'
            elif 'ab' in photo['flux_unit']['name']:
                system = 'AB'
            if system:
                photodict[PHOTOMETRY.SYSTEM] = system
            catalog.entries[name].add_photometry(**photodict)
        catalog.journal_entries()
    return
Esempio n. 13
0
def do_csp_fits_spectra(catalog):
    from astropy.io import fits

    fpath = catalog.get_current_task_repo()

    fureps = {'erg/cm2/s/A': 'erg/s/cm^2/Angstrom'}
    task_str = catalog.get_current_task_str()
    dirs = [x[0] for x in os.walk(os.path.join(fpath, 'Gutierrez_et_al_2017'))]
    files = []
    for dir in dirs:
        files.extend(glob(os.path.join(dir, '*.fits')))
    for datafile in pbar(files, task_str):
        filename = datafile.split('/')[-1]
        hdulist = fits.open(datafile)
        for oi, obj in enumerate(hdulist[0].header):
            if any(x in ['.', '/'] for x in obj):
                del (hdulist[0].header[oi])
        try:
            hdulist[0].verify('silentfix')
        except Exception as e:
            print(e)
        hdrkeys = list(hdulist[0].header.keys())
        # print(hdrkeys)
        name = datafile.split('/')[-2]
        if name[2] in '6789':
            name = 'SN19' + name[2:]
        elif name != 'SN210':
            name = 'SN20' + name[2:]
        name, source = catalog.new_entry(name, bibcode='2017ApJ...850...89G')
        # for key in hdulist[0].header.keys():
        #     print(key, hdulist[0].header[key])
        mjd = None
        if hdulist[0].header['SIMPLE']:
            if 'JD' in hdrkeys:
                mjd = str(jd_to_mjd(Decimal(str(hdulist[0].header['JD']))))
            elif 'MJD' in hdrkeys:
                mjd = str(hdulist[0].header['MJD'])
            elif 'DATE-OBS' in hdrkeys or 'DATE' in hdrkeys:
                dkey = 'DATE-OBS' if 'DATE-OBS' in hdrkeys else 'DATE'
                dval = hdulist[0].header[dkey]
                if is_number(dval):
                    dkey = 'DATE' if dkey == 'DATE-OBS' else 'DATE-OBS'
                    dval = hdulist[0].header[dkey]
                dateobs = None
                if 'T' in dval:
                    dateobs = dval.strip()
                elif 'UTC-OBS' in hdrkeys:
                    dateobs = dval.strip(
                    ) + 'T' + hdulist[0].header['UTC-OBS'].strip()
                if dateobs is not None:
                    mjd = str(astrotime(dateobs, format='isot').mjd)
            # print(hdulist[0].header)
            if 'CRVAL1' in hdulist[0].header:
                w0 = hdulist[0].header['CRVAL1']
            elif hdulist[0].header['CTYPE1'] == 'MULTISPE':
                w0 = float(
                    hdulist[0].header['WAT2_001'].split('"')[-1].split()[3])
            else:
                raise ValueError('Unsupported spectrum format.')
            if hdulist[0].header['NAXIS'] == 1:
                wd = hdulist[0].header['CDELT1']
                fluxes = [str(x) for x in list(hdulist[0].data)]
                errors = False
            elif hdulist[0].header['NAXIS'] == 3:
                wd = hdulist[0].header['CD1_1']
                fluxes = [str(x) for x in list(hdulist[0].data)[0][0]]
                errors = [str(x) for x in list(hdulist[0].data)[-1][0]]
            else:
                print('Warning: Skipping FITS spectrum `{}`.'.format(filename))
                continue
            waves = [str(w0 + wd * x) for x in range(0, len(fluxes))]
        else:
            raise ValueError('Non-simple FITS import not yet supported.')
        if 'BUNIT' in hdrkeys:
            fluxunit = hdulist[0].header['BUNIT']
            if fluxunit in fureps:
                fluxunit = fureps[fluxunit]
        else:
            if max([float(x) for x in fluxes]) < 1.0e-5:
                fluxunit = 'erg/s/cm^2/Angstrom'
            else:
                fluxunit = 'Uncalibrated'
        specdict = {
            SPECTRUM.U_WAVELENGTHS: 'Angstrom',
            SPECTRUM.WAVELENGTHS: waves,
            SPECTRUM.FLUXES: fluxes,
            SPECTRUM.U_FLUXES: fluxunit,
            SPECTRUM.FILENAME: filename,
            SPECTRUM.SOURCE: source
        }
        if mjd is not None:
            specdict[SPECTRUM.TIME] = mjd
            specdict[SPECTRUM.U_TIME] = 'MJD'
        if 'TELESCOP' in hdrkeys:
            specdict[SPECTRUM.TELESCOPE] = hdulist[0].header['TELESCOP']
        if 'INSTRUME' in hdrkeys:
            specdict[SPECTRUM.INSTRUMENT] = hdulist[0].header['INSTRUME']
        if 'AIRMASS' in hdrkeys:
            specdict[SPECTRUM.AIRMASS] = hdulist[0].header['AIRMASS']
        if errors:
            specdict[SPECTRUM.ERRORS] = errors
            specdict[SPECTRUM.U_ERRORS] = fluxunit
        if 'SITENAME' in hdrkeys:
            specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['SITENAME']
        elif 'OBSERVAT' in hdrkeys:
            specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['OBSERVAT']
        if 'OBSERVER' in hdrkeys:
            specdict[SPECTRUM.OBSERVER] = hdulist[0].header['OBSERVER']
        catalog.entries[name].add_spectrum(**specdict)
        hdulist.close()
        catalog.journal_entries()
    return
Esempio n. 14
0
def do_suspect_photo(catalog):
    task_str = catalog.get_current_task_str()
    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'suspectreferences.csv'), 'r') as f:
        tsvin = csv.reader(f, delimiter=',', skipinitialspace=True)
        suspectrefdict = {}
        for row in tsvin:
            suspectrefdict[row[0]] = row[1]

    file_names = list(
        sorted(
            glob(
                os.path.join(catalog.get_current_task_repo(),
                             'SUSPECT/*.html'))))
    for datafile in pbar_strings(file_names, task_str):
        basename = os.path.basename(datafile)
        basesplit = basename.split('-')
        oldname = basesplit[1]
        name = catalog.add_entry(oldname)
        if name.startswith('SN') and is_number(name[2:]):
            name = name + 'A'
        band = basesplit[3].split('.')[0]
        ei = int(basesplit[2])
        bandlink = 'file://' + os.path.abspath(datafile)
        bandresp = urllib.request.urlopen(bandlink)
        bandsoup = BeautifulSoup(bandresp, 'html5lib')
        bandtable = bandsoup.find('table')

        names = bandsoup.body.findAll(text=re.compile('Name'))
        reference = ''
        for link in bandsoup.body.findAll('a'):
            if 'adsabs' in link['href']:
                reference = str(link).replace('"', "'")

        bibcode = unescape(suspectrefdict[reference])
        source = catalog.entries[name].add_source(bibcode=bibcode)

        sec_ref = 'SUSPECT'
        sec_refurl = 'https://www.nhn.ou.edu/~suspect/'
        sec_source = catalog.entries[name].add_source(name=sec_ref,
                                                      url=sec_refurl,
                                                      secondary=True)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, oldname,
                                           sec_source)

        if ei == 1:
            year = re.findall(r'\d+', name)[0]
            catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
                                               sec_source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST,
                                               names[1].split(':')[1].strip(),
                                               sec_source)

            redshifts = bandsoup.body.findAll(text=re.compile('Redshift'))
            if redshifts:
                catalog.entries[name].add_quantity(
                    SUPERNOVA.REDSHIFT,
                    redshifts[0].split(':')[1].strip(),
                    sec_source,
                    kind='heliocentric')
            # hvels = bandsoup.body.findAll(text=re.compile('Heliocentric
            # Velocity'))
            # if hvels:
            #     vel = hvels[0].split(':')[1].strip().split(' ')[0]
            #     catalog.entries[name].add_quantity(SUPERNOVA.VELOCITY, vel,
            # sec_source,
            # kind='heliocentric')
            types = bandsoup.body.findAll(text=re.compile('Type'))

            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE,
                types[0].split(':')[1].strip().split(' ')[0], sec_source)

        for r, row in enumerate(bandtable.findAll('tr')):
            if r == 0:
                continue
            col = row.findAll('td')
            mjd = str(jd_to_mjd(Decimal(col[0].contents[0])))
            mag = col[3].contents[0]
            if mag.isspace():
                mag = ''
            else:
                mag = str(mag)
            e_magnitude = col[4].contents[0]
            if e_magnitude.isspace():
                e_magnitude = ''
            else:
                e_magnitude = str(e_magnitude)
            catalog.entries[name].add_photometry(time=mjd,
                                                 u_time='MJD',
                                                 band=band,
                                                 magnitude=mag,
                                                 e_magnitude=e_magnitude,
                                                 source=sec_source + ',' +
                                                 source)

    catalog.journal_entries()
    return
Esempio n. 15
0
    def generate_event_list(self, event_list):
        """Generate a list of events and/or convert events to JSON format."""
        prt = self._printer
        cidict = OrderedDict()
        intro_shown = False

        new_event_list = []
        previous_file = None
        for event in event_list:
            rsource = {SOURCE.NAME: self._DEFAULT_SOURCE}
            use_self_source = None
            new_events = []
            toffset = Decimal('0')
            if ('.' in event and os.path.isfile(event) and
                    not event.endswith('.json')):
                if not intro_shown:
                    prt.message('converter_info')
                    intro_shown = True

                prt.message('converting_to_json', [event])

                with open(event, 'r') as f:
                    ftxt = f.read()

                # Try a couple of table formats from astropy.
                table = None
                try:
                    table = read(ftxt, Reader=Cds, guess=False)
                except Exception:
                    pass
                else:
                    prt.message('convert_cds')
                    flines = [table.colnames] + [
                        list(x) for x in np.array(table).tolist()]
                    for i in range(len(flines)):
                        flines[i] = [str(x) for x in flines[i]]

                try:
                    table = read(ftxt, Reader=Latex, guess=False)
                except Exception:
                    pass
                else:
                    prt.message('convert_latex')
                    flines = [table.colnames] + [
                        list(x) for x in np.array(table).tolist()]

                if table is None:
                    # Count to try and determine delimiter.
                    delims = [' ', '\t', ',', ';', '|', '&']
                    delimnames = [
                        'Space: ` `', 'Tab: `\t`', 'Comma: `,`',
                        'Semi-colon: `;`', 'Bar: `|`', 'Ampersand: `&`']
                    delim = None
                    delimcounts = [ftxt.count(x) for x in delims]
                    maxdelimcount = max(delimcounts)
                    delim = delims[delimcounts.index(maxdelimcount)]
                    # If two delimiter options are close in count, ask user.
                    for i, x in enumerate(delimcounts):
                        if x > 0.5 * maxdelimcount and delims[i] != delim:
                            delim = None
                    if delim is None:
                        odelims = list(np.array(delimnames)[
                            np.array(delimcounts) > 0])
                        delim = delims[prt.prompt(
                            'delim', kind='option', options=odelims) - 1]
                    ad = list(delims)
                    ad.remove(delim)
                    ad = ''.join(ad)

                    fsplit = ftxt.splitlines()
                    fsplit = [
                        x.replace('$', '').replace('\\pm', delim)
                        .replace('±', delim).replace('(', delim + '(')
                        .strip(ad + '()# ').replace('′', "'")
                        for x in fsplit]
                    flines = []
                    for fs in fsplit:
                        flines.append(list(
                            csv.reader([fs], delimiter=delim))[0])

                    flines = [[
                        x.strip(ad + '#$()\\')
                        for x in y] for y in flines]

                    # Find band columns if they exist and insert error columns
                    # if they don't exist.
                    for fi, fl in enumerate(list(flines)):
                        flcopy = list(fl)
                        offset = 0
                        if not any([is_number(x) for x in fl]):
                            for fci, fc in enumerate(fl):
                                if (fc in self._band_names and
                                    (fci == len(fl) - 1 or
                                     fl[fci + 1] not in self._emagstrs)):
                                    flcopy.insert(fci + 1 + offset, 'e mag')
                                    offset += 1
                        flines[fi] = flcopy

                    # Find the most frequent column count. These are probably
                    # the tables we wish to read.
                    flens = [len(x) for x in flines]
                    ncols = Counter(flens).most_common(1)[0][0]

                    newlines = []
                    potential_name = None
                    for fi, fl in enumerate(flines):
                        if (len(fl) and flens[fi] == 1 and
                            fi < len(flines) - 1 and
                                flens[fi + 1] == ncols and not len(newlines)):
                            potential_name = fl[0]
                        if flens[fi] == ncols:
                            if potential_name is not None and any(
                                    [is_number(x) for x in fl]):
                                newlines.append([potential_name] + list(fl))
                            else:
                                newlines.append(list(fl))
                    flines = newlines
                    for fi, fl in enumerate(flines):
                        if len(fl) == ncols and potential_name is not None:
                            if not any([is_number(x) for x in fl]):
                                flines[fi] = ['name'] + list(fl)

                # If none of the rows contain numeric data, the file
                # is likely a list of transient names.
                if (len(flines) and
                    (not any(any([is_number(x) or x == '' for x in y])
                             for y in flines) or
                     len(flines) == 1)):
                    new_events = [
                        it for s in flines for it in s]

                # If last row is numeric, then likely this is a file with
                # transient data.
                elif (len(flines) > 1 and
                        any([is_number(x) for x in flines[-1]])):

                    # Check that each row has the same number of columns.
                    if len(set([len(x) for x in flines])) > 1:
                        print(set([len(x) for x in flines]))
                        raise ValueError(
                            'Number of columns in each row not '
                            'consistent!')

                    if len(cidict) and len(new_event_list):
                        msg = ('is_file_same' if
                               previous_file else 'is_event_same')
                        reps = [previous_file] if previous_file else [''.join(
                            new_event_list[-1].split('.')[:-1])]
                        text = prt.text(msg, reps)
                        is_same = prt.prompt(text, message=False,
                                             kind='bool')
                        if not is_same:
                            cidict = OrderedDict()

                    # If the first row has no numbers it is likely a header.
                    if not len(cidict):
                        self.assign_columns(cidict, flines)

                    perms = 1
                    for key in cidict:
                        if isinstance(cidict[key], list) and not isinstance(
                                cidict[key], string_types):
                            if cidict[key][0] != 'j':
                                perms = len(cidict[key])

                    # Get event name (if single event) or list of names from
                    # table.
                    event_names = []
                    if ENTRY.NAME in cidict:
                        for fi, fl in enumerate(flines):
                            flines[fi][cidict[ENTRY.NAME]] = name_clean(
                                fl[cidict[ENTRY.NAME]])
                        event_names = list(sorted(set([
                            x[cidict[ENTRY.NAME]] for x in flines[
                                self._first_data:]])))
                        new_events = [x + '.json' for x in event_names]
                    else:
                        new_event_name = '.'.join(event.split(
                            '.')[:-1]).split('/')[-1]
                        text = prt.message(
                            'is_event_name', [new_event_name], prt=False)
                        is_name = prt.prompt(text, message=False,
                                             kind='bool', default='y')
                        if not is_name:
                            new_event_name = ''
                            while new_event_name.strip() == '':
                                new_event_name = prt.prompt(
                                    'enter_name', kind='string')
                        event_names.append(new_event_name)
                        new_events = [new_event_name + '.json']

                    # Create a new event, populate the photometry, and dump
                    # to a JSON file in the run directory.
                    entries = OrderedDict([(x, Entry(name=x))
                                           for x in event_names])

                    # Clean up the data a bit now that we know the column
                    # identities.

                    # Strip common prefixes/suffixes from band names
                    if PHOTOMETRY.BAND in cidict:
                        bi = cidict[PHOTOMETRY.BAND]
                        for d in [True, False]:
                            if not isinstance(bi, (int, np.integer)):
                                break
                            strip_cols = []
                            lens = [len(x[bi])
                                    for x in flines[self._first_data:]]
                            llen = min(lens)
                            ra = range(llen) if d else range(-1, -llen - 1, -1)
                            for li in ra:
                                letter = None
                                for row in list(flines[self._first_data:]):
                                    if letter is None:
                                        letter = row[bi][li]
                                    elif row[bi][li] != letter:
                                        letter = None
                                        break
                                if letter is not None:
                                    strip_cols.append(li)
                                else:
                                    break
                            if len(strip_cols) == llen:
                                break
                            for ri in range(len(flines[self._first_data:])):
                                flines[self._first_data + ri][bi] = ''.join(
                                    [c for i, c in enumerate(flines[
                                        self._first_data + ri][bi])
                                     if (i if d else i - len(flines[
                                         self._first_data + ri][bi])) not in
                                     strip_cols])

                    if (PHOTOMETRY.TIME in cidict and
                            (not isinstance(cidict[PHOTOMETRY.TIME], list) or
                             len(cidict[PHOTOMETRY.TIME]) <= 2)):
                        bi = cidict[PHOTOMETRY.TIME]

                        if isinstance(bi, list) and not isinstance(
                            bi, string_types) and isinstance(
                                bi[0], string_types) and bi[0] == 'jd':
                            bi = bi[-1]

                        mmtimes = [float(x[bi])
                                   for x in flines[self._first_data:]]
                        mintime, maxtime = min(mmtimes), max(mmtimes)

                        if mintime < 10000:
                            while True:
                                try:
                                    response = prt.prompt(
                                        'small_time_offset', kind='string')
                                    if response is not None:
                                        toffset = Decimal(response)
                                    break
                                except Exception:
                                    pass
                        elif maxtime > 60000 and cidict[
                                PHOTOMETRY.TIME][0] != 'jd':
                            isjd = prt.prompt(
                                'large_time_offset',
                                kind='bool', default='y')
                            if isjd:
                                toffset = Decimal('-2400000.5')

                    for row in flines[self._first_data:]:
                        photodict = {}
                        rname = (row[cidict[ENTRY.NAME]]
                                 if ENTRY.NAME in cidict else event_names[0])
                        for pi in range(perms):
                            sources = set()
                            for key in cidict:
                                if key in self._bool_keys:
                                    rval = row[cidict[key]]

                                    if rval in self._FALSE_VALS:
                                        rval = False
                                    elif rval in self._TRUE_VALS:
                                        rval = True

                                    if type(rval) != 'bool':
                                        try:
                                            rval = bool(rval)
                                        except Exception:
                                            pass

                                    if type(rval) != 'bool':
                                        try:
                                            rval = bool(float(rval))
                                        except Exception:
                                            rval = True

                                    if not rval:
                                        continue
                                    row[cidict[key]] = rval
                                elif key == 'reference':
                                    if (isinstance(cidict[key],
                                                   string_types) and
                                            len(cidict[key]) == 19):
                                        new_src = entries[rname].add_source(
                                            bibcode=cidict[key])
                                        sources.update(new_src)
                                        row[
                                            cidict[key]] = new_src
                                elif key == ENTRY.NAME:
                                    continue
                                elif (isinstance(key, Key) and
                                        key.type == KEY_TYPES.TIME and
                                        isinstance(cidict[key], list) and not
                                        isinstance(cidict[key],
                                                   string_types)):
                                    tval = np.array(row)[np.array(cidict[key][
                                        1:], dtype=int)]
                                    if cidict[key][0] == 'j':
                                        date = '-'.join([x.zfill(2) for x in
                                                         tval])
                                        date = self._month_rep.sub(
                                            lambda x: self._MONTH_IDS[
                                                x.group()], date)
                                        photodict[key] = str(
                                            astrotime(date, format='isot').mjd)
                                    elif cidict[key][0] == 'jd':
                                        photodict[key] = str(
                                            jd_to_mjd(Decimal(tval[-1])))
                                    continue

                                val = cidict[key]
                                if (isinstance(val, list) and not
                                        isinstance(val, string_types)):
                                    val = val[pi]
                                    if isinstance(val, string_types):
                                        if val != '':
                                            photodict[key] = val
                                    else:
                                        photodict[key] = row[val]
                                else:
                                    if isinstance(val, string_types):
                                        if val != '':
                                            photodict[key] = val
                                    else:
                                        photodict[key] = row[val]
                            if self._data_type == 2:
                                if self._zp:
                                    photodict[PHOTOMETRY.ZERO_POINT] = self._zp
                                else:
                                    photodict[PHOTOMETRY.ZERO_POINT] = (
                                        row[cidict[PHOTOMETRY.ZERO_POINT][pi]]
                                        if isinstance(cidict[
                                            PHOTOMETRY.ZERO_POINT], list) else
                                        row[cidict[PHOTOMETRY.ZERO_POINT]])
                                zpp = photodict[PHOTOMETRY.ZERO_POINT]
                                cc = (
                                    row[cidict[PHOTOMETRY.COUNT_RATE][pi]] if
                                    isinstance(cidict[
                                        PHOTOMETRY.COUNT_RATE], list) else
                                    row[cidict[PHOTOMETRY.COUNT_RATE]])
                                ecc = (
                                    row[cidict[PHOTOMETRY.E_COUNT_RATE][pi]] if
                                    isinstance(cidict[
                                        PHOTOMETRY.E_COUNT_RATE], list) else
                                    row[cidict[PHOTOMETRY.E_COUNT_RATE]])
                                if '<' in cc:
                                    set_pd_mag_from_counts(
                                        photodict, ec=cc.strip('<'), zp=zpp)
                                else:
                                    set_pd_mag_from_counts(
                                        photodict, c=cc, ec=ecc, zp=zpp)
                            elif self._data_type == 3:
                                photodict[
                                    PHOTOMETRY.U_FLUX_DENSITY] = self._ufd
                                if PHOTOMETRY.U_FLUX_DENSITY in cidict:
                                    photodict[PHOTOMETRY.U_FLUX_DENSITY] = (
                                        row[cidict[
                                            PHOTOMETRY.U_FLUX_DENSITY][pi]]
                                        if isinstance(cidict[
                                            PHOTOMETRY.
                                            U_FLUX_DENSITY], list) else
                                        row[cidict[PHOTOMETRY.U_FLUX_DENSITY]])
                                if photodict[
                                        PHOTOMETRY.U_FLUX_DENSITY] == '':
                                    photodict[
                                        PHOTOMETRY.U_FLUX_DENSITY] = 'µJy'
                                fd = (
                                    row[cidict[PHOTOMETRY.FLUX_DENSITY][pi]] if
                                    isinstance(cidict[
                                        PHOTOMETRY.FLUX_DENSITY], list) else
                                    row[cidict[PHOTOMETRY.FLUX_DENSITY]])
                                efd = (
                                    row[cidict[
                                        PHOTOMETRY.E_FLUX_DENSITY][pi]] if
                                    isinstance(cidict[
                                        PHOTOMETRY.E_FLUX_DENSITY], list) else
                                    row[cidict[PHOTOMETRY.E_FLUX_DENSITY]])

                                mult = Decimal('1')
                                ufd = photodict[PHOTOMETRY.U_FLUX_DENSITY]
                                if ufd.lower() in [
                                        'mjy', 'millijy', 'millijansky']:
                                    mult = Decimal('1e3')
                                elif ufd.lower() in ['jy', 'jansky']:
                                    mult = Decimal('1e6')

                                if '<' in fd:
                                    set_pd_mag_from_flux_density(
                                        photodict, efd=str(
                                            Decimal(fd.strip('<')) * mult))
                                else:
                                    set_pd_mag_from_flux_density(
                                        photodict, fd=Decimal(fd) * mult,
                                        efd=Decimal(efd) * mult)
                            if not len(sources):
                                if use_self_source is None:
                                    sopts = [
                                        ('Bibcode', 'b'), ('Last name', 'l')]
                                    if self._require_source:
                                        sel_str = 'must_select_source'
                                    else:
                                        sel_str = 'select_source'
                                    text = prt.text(sel_str)
                                    skind = prt.prompt(
                                        text, kind='option',
                                        options=sopts, default='b',
                                        none_string=(
                                            None if self._require_source else
                                            'Neither, tag MOSFiT as source'))
                                    if skind == 'b':
                                        rsource = {}
                                        bibcode = ''

                                        while len(bibcode) != 19:
                                            bibcode = prt.prompt(
                                                'bibcode',
                                                kind='string',
                                                allow_blank=False
                                            )
                                            bibcode = bibcode.strip()
                                            if (re.search(
                                                '[0-9]{4}..........[\.0-9]{4}'
                                                '[A-Za-z]', bibcode)
                                                    is None):
                                                bibcode = ''
                                        rsource[
                                            SOURCE.BIBCODE] = bibcode
                                        use_self_source = False
                                    elif skind == 'l':
                                        rsource = {}
                                        last_name = prt.prompt(
                                            'last_name', kind='string'
                                        )
                                        rsource[
                                            SOURCE.NAME] = (
                                                last_name.strip().title() +
                                                ' et al., in preparation')
                                        use_self_source = False
                                    elif skind == 'n':
                                        use_self_source = True

                                photodict[
                                    PHOTOMETRY.SOURCE] = entries[
                                        rname].add_source(**rsource)

                            if any([x in photodict.get(
                                    PHOTOMETRY.MAGNITUDE, '')
                                    for x in ['<', '>']]):
                                photodict[PHOTOMETRY.UPPER_LIMIT] = True
                                photodict[
                                    PHOTOMETRY.MAGNITUDE] = photodict[
                                        PHOTOMETRY.MAGNITUDE].strip('<>')

                            if '<' in photodict.get(PHOTOMETRY.COUNT_RATE, ''):
                                photodict[PHOTOMETRY.UPPER_LIMIT] = True
                                photodict[
                                    PHOTOMETRY.COUNT_RATE] = photodict[
                                        PHOTOMETRY.COUNT_RATE].strip('<')
                                if PHOTOMETRY.E_COUNT_RATE in photodict:
                                    del(photodict[PHOTOMETRY.E_COUNT_RATE])

                            if '<' in photodict.get(
                                    PHOTOMETRY.FLUX_DENSITY, ''):
                                photodict[PHOTOMETRY.UPPER_LIMIT] = True
                                photodict[
                                    PHOTOMETRY.FLUX_DENSITY] = photodict[
                                        PHOTOMETRY.FLUX_DENSITY].strip('<')
                                if PHOTOMETRY.E_FLUX_DENSITY in photodict:
                                    del(photodict[PHOTOMETRY.E_FLUX_DENSITY])

                            # Apply offset time if set.
                            if (PHOTOMETRY.TIME in photodict and
                                    toffset != Decimal('0')):
                                photodict[PHOTOMETRY.TIME] = str(
                                    Decimal(photodict[PHOTOMETRY.TIME]) +
                                    toffset)

                            # Skip entries for which key values are not
                            # expected type.
                            if not all([
                                is_number(photodict.get(x, ''))
                                for x in photodict.keys() if
                                (PHOTOMETRY.get_key_by_name(x).type ==
                                 KEY_TYPES.NUMERIC)]):
                                continue

                            # Skip placeholder values.
                            if float(photodict.get(
                                    PHOTOMETRY.MAGNITUDE, 0.0)) > 50.0:
                                continue

                            # Add system if specified by user.
                            if (self._system is not None and
                                    PHOTOMETRY.SYSTEM not in photodict):
                                photodict[PHOTOMETRY.SYSTEM] = self._system

                            # Remove keys not in the `PHOTOMETRY` class.
                            for key in list(photodict.keys()):
                                if key not in PHOTOMETRY.vals():
                                    del(photodict[key])

                            # Add the photometry.
                            entries[rname].add_photometry(
                                **photodict)

                    merge_with_existing = None
                    for ei, entry in enumerate(entries):
                        entries[entry].sanitize()
                        if os.path.isfile(new_events[ei]):
                            if merge_with_existing is None:
                                merge_with_existing = prt.prompt(
                                    'merge_with_existing', default='y')
                            if merge_with_existing:
                                existing = Entry.init_from_file(
                                    catalog=None,
                                    name=event_names[ei],
                                    path=new_events[ei],
                                    merge=False,
                                    pop_schema=False,
                                    ignore_keys=[ENTRY.MODELS],
                                    compare_to_existing=False)
                                Catalog().copy_entry_to_entry(
                                    existing, entries[entry])

                        oentry = entries[entry]._ordered(entries[entry])
                        entabbed_json_dump(
                            {entry: oentry}, open(new_events[ei], 'w'),
                            separators=(',', ':'))

                    self._converted.extend([
                        [event_names[x], new_events[x]]
                        for x in range(len(event_names))])

                new_event_list.extend(new_events)
                previous_file = event
            else:
                new_event_list.append(event)

        return new_event_list
Esempio n. 16
0
def do_lamost(catalog):
    """Import spectra from LAMOST."""
    task_str = catalog.get_current_task_str()

    # Set preferred names, calculate some columns based on imported data,
    # sanitize some fields
    keys = list(catalog.entries.keys())

    viz = Vizier(columns=["**"])

    fureps = {'erg/cm2/s/A': 'erg/s/cm^2/Angstrom'}

    c_kms = con.c.cgs.value / 1.0e5

    for oname in pbar(keys, task_str):
        # Some events may be merged in cleanup process, skip them if
        # non-existent.

        if (FASTSTARS.RA not in catalog.entries[oname]
                or FASTSTARS.DEC not in catalog.entries[oname]):
            continue
        else:
            result = viz.query_region(coord.SkyCoord(
                ra=catalog.entries[oname][FASTSTARS.RA][0]['value'],
                dec=catalog.entries[oname][FASTSTARS.DEC][0]['value'],
                unit=(un.hourangle, un.deg),
                frame='icrs'),
                                      width="2s",
                                      catalog="V/149/dr2")

            if not result.keys():
                continue
            tab = result['V/149/dr2']

            star = None
            for row in tab:
                if (row['objType'] == 'Star'
                        and row['Class'].lower() in ['star', 'unknown']):
                    star = row
                    break
            if not star:
                continue

            try:
                name, source = catalog.new_entry(oname,
                                                 bibcode='2016yCat.5149....0L',
                                                 srcname='LAMOST',
                                                 url='http://dr3.lamost.org/')
            except Exception:
                catalog.log.warning(
                    '"{}" was not found, suggests merge occurred in cleanup '
                    'process.'.format(oname))
                continue

            if row['SubClass'] is not 'Non':
                #catalog.entries[name].add_quantity(
                #    FASTSTARS.SPECTRAL_TYPE, row['SubClass'], source=source)

                ST, SCfull = row['SubClass'][:2], row['SubClass'][2:]
                if len(SCfull) > 0:
                    if 'IV' in SCfull:
                        SC = 'sg'
                    elif 'III' in SCfull:
                        SC = 'g'
                    elif 'V' in SCfull:
                        SC = 'd'
                    elif 'I' in SCfull:
                        SC = 'Sg'
                    catalog.entries[name].add_quantity(FASTSTARS.STELLAR_CLASS,
                                                       SC,
                                                       source=source)
                catalog.entries[name].add_quantity(FASTSTARS.SPECTRAL_TYPE,
                                                   ST,
                                                   source=source)

            if row['z'] and is_number(row['z']):
                catalog.entries[name].add_quantity(FASTSTARS.REDSHIFT,
                                                   str(row['z']),
                                                   e_value=str(row['e_z']),
                                                   source=source)
                catalog.entries[name].add_quantity(
                    FASTSTARS.VELOCITY,
                    pretty_num(float(row['z']) * c_kms, sig=5),
                    e_value=pretty_num(float(row['e_z'] * c_kms), sig=5),
                    source=source)

            mag_types = list(row['magType'].replace('psf_', ''))

            nmt = []
            nmi = 0
            for mt in mag_types:
                if is_number(mt):
                    nmt[nmi - 1] += mt
                else:
                    nmt += mt
                    nmi += 1
            mag_types = [
                x.upper() if x in ['b', 'v', 'j', 'h'] else x for x in nmt
            ]

            for mi, mt in enumerate(mag_types):
                snrf = 'snr' + mt.lower()
                if snrf in row.columns and float(row[snrf]) < 3:
                    continue
                photodict = {
                    PHOTOMETRY.TIME: str(row['MJD']),
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.BAND: mt,
                    PHOTOMETRY.TELESCOPE: 'LAMOST',
                    PHOTOMETRY.MAGNITUDE: str(row['mag' + str(mi + 1)]),
                    PHOTOMETRY.SOURCE: source
                }
                if snrf in row.columns:
                    photodict[PHOTOMETRY.E_MAGNITUDE] = str(
                        Decimal('2.5') *
                        (Decimal('1') +
                         Decimal('1') / Decimal(str(row[snrf]))).log10())[:5]
                catalog.entries[name].add_photometry(**photodict)

            vname = row['PlanId']

            ffile = ('spec-' + row['LMJD'] + '-' + vname + '_sp' +
                     row['spId'] + '-' + row['FiberId'] + '.fits.gz')

            furl = 'http://dr3.lamost.org/sas/fits/' + vname + '/' + ffile

            datafile = os.path.join(catalog.get_current_task_repo(), 'LAMOST',
                                    ffile)

            if not os.path.exists(datafile):
                fr = requests.get(furl)

                open(datafile, 'wb').write(fr.content)

            hdulist = fits.open(datafile)
            for oi, obj in enumerate(hdulist[0].header):
                if any(x in ['.', '/'] for x in obj):
                    del (hdulist[0].header[oi])
            hdulist[0].verify('silentfix')
            hdrkeys = list(hdulist[0].header.keys())
            # print(hdrkeys)
            # for key in hdulist[0].header.keys():
            #     print(key, hdulist[0].header[key])
            if hdulist[0].header['SIMPLE']:
                if 'JD' in hdrkeys:
                    mjd = str(jd_to_mjd(Decimal(str(hdulist[0].header['JD']))))
                elif 'MJD' in hdrkeys:
                    mjd = str(hdulist[0].header['MJD'])
                elif 'DATE-OBS' in hdrkeys:
                    if 'T' in hdulist[0].header['DATE-OBS']:
                        dateobs = hdulist[0].header['DATE-OBS'].strip()
                    elif 'UTC-OBS' in hdrkeys:
                        dateobs = hdulist[0].header['DATE-OBS'].strip(
                        ) + 'T' + hdulist[0].header['UTC-OBS'].strip()
                    mjd = str(astrotime(dateobs, format='isot').mjd)
                else:
                    raise ValueError("Couldn't find JD/MJD for spectrum.")
                if hdulist[0].header['NAXIS'] == 2:
                    waves = [str(x) for x in list(hdulist[0].data)[2]]
                    fluxes = [str(x) for x in list(hdulist[0].data)[0]]
                else:
                    print('Warning: Skipping FITS spectrum `{}`.'.format(
                        datafile))
                    continue
            else:
                raise ValueError('Non-simple FITS import not yet supported.')
            if 'BUNIT' in hdrkeys:
                fluxunit = hdulist[0].header['BUNIT']
                if fluxunit in fureps:
                    fluxunit = fureps[fluxunit]
            else:
                if max([float(x) for x in fluxes]) < 1.0e-5:
                    fluxunit = 'erg/s/cm^2/Angstrom'
                else:
                    fluxunit = 'Uncalibrated'
            specdict = {
                SPECTRUM.U_WAVELENGTHS: 'Angstrom',
                SPECTRUM.WAVELENGTHS: waves,
                SPECTRUM.TIME: mjd,
                SPECTRUM.U_TIME: 'MJD',
                SPECTRUM.FLUXES: fluxes,
                SPECTRUM.U_FLUXES: fluxunit,
                SPECTRUM.FILENAME: ffile,
                SPECTRUM.SOURCE: source
            }
            if 'TELESCOP' in hdrkeys:
                specdict[SPECTRUM.TELESCOPE] = hdulist[0].header['TELESCOP']
            if 'INSTRUME' in hdrkeys:
                specdict[SPECTRUM.INSTRUMENT] = hdulist[0].header['INSTRUME']
            if 'SITENAME' in hdrkeys:
                specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['SITENAME']
            elif 'OBSERVAT' in hdrkeys:
                specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['OBSERVAT']
            if 'OBSERVER' in hdrkeys:
                specdict[SPECTRUM.OBSERVER] = hdulist[0].header['OBSERVER']
            if 'AIRMASS' in hdrkeys:
                specdict[SPECTRUM.AIRMASS] = hdulist[0].header['AIRMASS']
            catalog.entries[name].add_spectrum(**specdict)
            hdulist.close()
            catalog.journal_entries()

    return
Esempio n. 17
0
def do_gaia(catalog):
    """Import from the GAIA alerts page."""
    task_str = catalog.get_current_task_str()
    fname = os.path.join(catalog.get_current_task_repo(), 'GAIA/alerts.csv')
    csvtxt = catalog.load_url('http://gsaweb.ast.cam.ac.uk/alerts/alerts.csv',
                              fname)
    if not csvtxt:
        return
    tsvin = list(
        csv.reader(csvtxt.splitlines(), delimiter=',', skipinitialspace=True))
    reference = 'Gaia Photometric Science Alerts'
    refurl = 'http://gsaweb.ast.cam.ac.uk/alerts/alertsindex'
    loopcnt = 0
    for ri, row in enumerate(pbar(tsvin, task_str)):
        if ri == 0 or not row:
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(name=reference, url=refurl)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        year = '20' + re.findall(r'\d+', row[0])[0]
        catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
                                           source)
        catalog.entries[name].add_quantity(SUPERNOVA.RA,
                                           row[2],
                                           source,
                                           u_value='floatdegrees')
        catalog.entries[name].add_quantity(SUPERNOVA.DEC,
                                           row[3],
                                           source,
                                           u_value='floatdegrees')
        if row[7] and row[7] != 'unknown':
            type = row[7].replace('SNe', '').replace('SN', '').strip()
            catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, type,
                                               source)
        elif any([
                xx in row[9].upper()
                for xx in ['SN CANDIATE', 'CANDIDATE SN', 'HOSTLESS SN']
        ]):
            catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                               'Candidate', source)

        if ('aka' in row[9].replace('gakaxy', 'galaxy').lower()
                and 'AKARI' not in row[9]):
            commentsplit = (row[9].replace('_', ' ').replace(
                'MLS ', 'MLS').replace('CSS ', 'CSS').replace(
                    'SN iPTF', 'iPTF').replace('SN ',
                                               'SN').replace('AT ', 'AT'))
            commentsplit = commentsplit.split()
            for csi, cs in enumerate(commentsplit):
                if 'aka' in cs.lower() and csi < len(commentsplit) - 1:
                    alias = commentsplit[csi + 1].strip('(),:.').replace(
                        'PSNJ', 'PSN J')
                    if alias[:6] == 'ASASSN' and alias[6] != '-':
                        alias = 'ASASSN-' + alias[6:]
                    if alias.lower() != 'master':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.ALIAS, alias, source)
                    break

        fname = os.path.join(catalog.get_current_task_repo(),
                             'GAIA/') + row[0] + '.csv'

        csvtxt = catalog.load_url(
            'http://gsaweb.ast.cam.ac.uk/alerts/alert/' + row[0] +
            '/lightcurve.csv', fname)

        if csvtxt:
            tsvin2 = csv.reader(csvtxt.splitlines())
            for ri2, row2 in enumerate(tsvin2):
                if ri2 <= 1 or not row2:
                    continue
                mjd = str(jd_to_mjd(Decimal(row2[1].strip())))
                magnitude = row2[2].strip()
                if magnitude == 'null':
                    continue
                e_mag = 0.
                telescope = 'GAIA'
                band = 'G'
                catalog.entries[name].add_photometry(time=mjd,
                                                     u_time='MJD',
                                                     telescope=telescope,
                                                     band=band,
                                                     magnitude=magnitude,
                                                     e_magnitude=e_mag,
                                                     source=source)
        if catalog.args.update:
            catalog.journal_entries()
        loopcnt = loopcnt + 1
        if catalog.args.travis and loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0:
            break
    catalog.journal_entries()
    return
Esempio n. 18
0
def do_gaia(catalog):
    """Import from the GAIA alerts page."""
    task_str = catalog.get_current_task_str()
    fname = os.path.join(catalog.get_current_task_repo(), 'GAIA/alerts.csv')
    csvtxt = catalog.load_url('http://gsaweb.ast.cam.ac.uk/alerts/alerts.csv',
                              fname)
    if not csvtxt:
        return
    tsvin = list(
        csv.reader(
            csvtxt.splitlines(), delimiter=',', skipinitialspace=True))
    reference = 'Gaia Photometric Science Alerts'
    refurl = 'http://gsaweb.ast.cam.ac.uk/alerts/alertsindex'
    loopcnt = 0
    for ri, row in enumerate(pbar(tsvin, task_str)):
        if ri == 0 or not row:
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(name=reference, url=refurl)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        year = '20' + re.findall(r'\d+', row[0])[0]
        catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
                                           source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.RA, row[2], source, u_value='floatdegrees')
        catalog.entries[name].add_quantity(
            SUPERNOVA.DEC, row[3], source, u_value='floatdegrees')
        if row[7] and row[7] != 'unknown':
            type = row[7].replace('SNe', '').replace('SN', '').strip()
            catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, type,
                                               source)
        elif any([
                xx in row[9].upper()
                for xx in ['SN CANDIATE', 'CANDIDATE SN', 'HOSTLESS SN']
        ]):
            catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                               'Candidate', source)

        if ('aka' in row[9].replace('gakaxy', 'galaxy').lower() and
                'AKARI' not in row[9]):
            commentsplit = (row[9].replace('_', ' ').replace('MLS ', 'MLS')
                            .replace('CSS ', 'CSS').replace('SN iPTF', 'iPTF')
                            .replace('SN ', 'SN').replace('AT ', 'AT'))
            commentsplit = commentsplit.split()
            for csi, cs in enumerate(commentsplit):
                if 'aka' in cs.lower() and csi < len(commentsplit) - 1:
                    alias = commentsplit[csi + 1].strip('(),:.').replace(
                        'PSNJ', 'PSN J')
                    if alias[:6] == 'ASASSN' and alias[6] != '-':
                        alias = 'ASASSN-' + alias[6:]
                    if alias.lower() != 'master':
                        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,
                                                           alias, source)
                    break

        fname = os.path.join(catalog.get_current_task_repo(),
                             'GAIA/') + row[0] + '.csv'

        csvtxt = catalog.load_url('http://gsaweb.ast.cam.ac.uk/alerts/alert/' +
                                  row[0] + '/lightcurve.csv', fname)

        tsvin2 = csv.reader(csvtxt.splitlines())
        for ri2, row2 in enumerate(tsvin2):
            if ri2 <= 1 or not row2:
                continue
            mjd = str(jd_to_mjd(Decimal(row2[1].strip())))
            magnitude = row2[2].strip()
            if magnitude == 'null':
                continue
            e_mag = 0.
            telescope = 'GAIA'
            band = 'G'
            catalog.entries[name].add_photometry(
                time=mjd,
                u_time='MJD',
                telescope=telescope,
                band=band,
                magnitude=magnitude,
                e_magnitude=e_mag,
                source=source)
        if catalog.args.update:
            catalog.journal_entries()
        loopcnt = loopcnt + 1
        if catalog.args.travis and loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0:
            break
    catalog.journal_entries()
    return
Esempio n. 19
0
def do_snax(catalog):
    """Import from the SNaX X-ray database."""
    task_str = catalog.get_current_task_str()

    dlurl = 'http://kronos.uchicago.edu/snax/export.php?exportType=TSV&exportFields=standard&objid=&name=&typeid=&type=&galaxyid=&galaxy=&fluxMin=&fluxMax=&fluxEnergyLMin=&fluxEnergyLMax=&fluxEnergyHMin=&fluxEnergyHMax=&lumMin=&lumMax=&instrumentid=&instrument=&ageMin=&ageMax=&dateMin=&dateMax=&sortA=dateExploded'  # noqa: E501

    file_path = os.path.join(catalog.get_current_task_repo(), 'SNaX.TSV')

    tsv = catalog.load_url(dlurl, file_path)
    # csvtxt = catalog.load_url(
    #     'http://www.grbcatalog.org/'
    #     'download_data?cut_0_min=5&cut_0=BAT%20T90'
    #     '&cut_0_max=100000&num_cuts=1&no_date_cut=True',
    #     file_path)

    data = [x.split('\t') for x in tsv.split('\n')]

    for r, row in enumerate(pbar(data, task_str)):
        if r == 0 or not row[0]:
            continue
        (name,
         source) = catalog.new_entry(row[0],
                                     srcname='SNaX',
                                     url='http://kronos.uchicago.edu/snax/',
                                     secondary=True)
        sources = [source]
        bibcode = row[-6].strip()
        if len(bibcode) != 19:
            continue
        expsrc = uniq_cdl(
            sources +
            [catalog.entries[name].add_source(bibcode=row[-6].strip())])
        coosrc = uniq_cdl(
            sources +
            [catalog.entries[name].add_source(bibcode=row[-5].strip())])
        dissrc = uniq_cdl(
            sources +
            [catalog.entries[name].add_source(bibcode=row[-4].strip())])
        flxsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(
                bibcode=row[-3].strip()), catalog.entries[name].add_source(
                    bibcode=row[-2].strip())
        ])

        catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, row[1],
                                           source)
        date = astrotime(float(row[2]), format='jd').datetime
        catalog.entries[name].add_quantity(
            SUPERNOVA.EXPLOSION_DATE,
            make_date_string(date.year, date.month, date.day), expsrc)
        catalog.entries[name].add_quantity(SUPERNOVA.RA,
                                           ' '.join(row[3].split()[:3]),
                                           coosrc)
        catalog.entries[name].add_quantity(SUPERNOVA.DEC,
                                           ' '.join(row[3].split()[3:]),
                                           coosrc)
        catalog.entries[name].add_quantity(SUPERNOVA.LUM_DIST, row[4], dissrc)
        catalog.entries[name].add_quantity(SUPERNOVA.HOST, row[5], source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT,
            row[6],
            source,
            e_value=row[7] if (row[7] and float(row[7]) != 0.0) else '')
        photodict = {
            PHOTOMETRY.TIME: jd_to_mjd(Decimal(row[8])),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.ENERGY: row[15:17],
            PHOTOMETRY.U_ENERGY: 'keV',
            PHOTOMETRY.FLUX: str(Decimal('1.0e-13') * Decimal(row[11])),
            PHOTOMETRY.U_FLUX: 'ergs/s/cm^2',
            PHOTOMETRY.E_LOWER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[13])),
            PHOTOMETRY.E_UPPER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[14])),
            PHOTOMETRY.INSTRUMENT: row[9],
            PHOTOMETRY.SOURCE: flxsrc
        }
        if row[12] == '1':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Esempio n. 20
0
def do_sdss_spectra(catalog):
    """Import spectra from LAMOST."""
    task_str = catalog.get_current_task_str()

    # Set preferred names, calculate some columns based on imported data,
    # sanitize some fields
    keys = list(catalog.entries.keys())

    fureps = {
        'erg/cm2/s/A': 'erg/s/cm^2/Angstrom',
        '1E-17 erg/cm^2/s/Ang': 'erg/s/cm^2/Angstrom'
    }

    c_kms = con.c.cgs.value / 1.0e5
    cntsdss = 0
    for oname in pbar(keys, task_str):
        # Some events may be merged in cleanup process, skip them if
        # non-existent.

        if (FASTSTARS.RA not in catalog.entries[oname]
                or FASTSTARS.DEC not in catalog.entries[oname]):
            continue
        else:
            xid = SDSS.query_region(coord.SkyCoord(
                ra=catalog.entries[oname][FASTSTARS.RA][0]['value'],
                dec=catalog.entries[oname][FASTSTARS.DEC][0]['value'],
                unit=(un.hourangle, un.deg),
                frame='icrs'),
                                    spectro=True)
            # xid = SDSS.query_region(coord.SkyCoord(
            #        ra='14:34:06.17',
            #        dec='+56:30:47.24',
            #        unit=(un.hourangle, un.deg), frame='icrs'), spectro=True)
            if xid is None:
                continue
            while len(xid) > 1:
                notstar = xid['z'].argmax()
                xid.remove_row(notstar)
            #print(xid)

            # star = None
            # for row in tab:
            #    if (row['objType'] == 'Star' and
            #            row['Class'].lower() in ['star', 'unknown']):
            #        star = row
            #        break
            # if not star:
            #    continue

            try:
                name, source = catalog.new_entry(oname,
                                                 bibcode='2015ApJS..219...12A',
                                                 srcname='SDSS',
                                                 url='http://www.sdss.org/')
            except Exception:
                catalog.log.warning(
                    '"{}" was not found, suggests merge occurred in cleanup '
                    'process.'.format(oname))
                continue

            ffile = ('spec-' + str(xid['specobjid'][0]) + '.fits.gz')

            # furl = 'http://dr3.lamost.org/sas/fits/' + vname + '/' + ffile

            datafile = os.path.join(catalog.get_current_task_repo(), 'SDSS',
                                    ffile)

            if not os.path.exists(datafile):
                # Download spectra
                try:
                    sp = SDSS.get_spectra(matches=xid)[0]
                except urllib.error.HTTPError:
                    catalog.log.warning(
                        '"{}" threw an HTTP 404, must be error upstream. Will likely go away on the next run.'
                        .format(oname))
                continue

                # Identify star
                # assert sp[2].data['class'][0]=='STAR'

                # Write spectra
                # print(catalog.entries[oname][FASTSTARS.RA][0]['value'],catalog.entries[oname][FASTSTARS.DEC][0]['value'])
                sp.writeto(datafile, overwrite=True)
                # open(datafile, 'wb').write(fr.content)

            # Open spectra
            hdulist = fits.open(datafile)

            # sp contains a list of fits datafiles, identify main one
            i_primary = 0
            i_coadd = 1
            i_specobj = 2
            assert hdulist[i_primary].name == 'PRIMARY'
            assert hdulist[i_coadd].name == 'COADD'
            assert (hdulist[i_specobj].name == 'SPECOBJ'
                    or hdulist[i_specobj].name == 'SPALL')

            # xid = SDSS.query_region(coord.SkyCoord(
            #     ra='12:11:50.27',
            #     dec='+14:37:16.2',
            #     unit=(un.hourangle, un.deg), frame='icrs'), spectro=True)

            # from SPECOBJ
            # print('.'+hdulist[i_specobj].data['ELODIE_SPTYPE'][0]+'.')
            if hdulist[i_specobj].data['ELODIE_SPTYPE'][0] != 'unknown':
                ST, SCfull = hdulist[i_specobj].data['ELODIE_SPTYPE'][
                    0][:2], hdulist[i_specobj].data['ELODIE_SPTYPE'][0][2:]
                if len(SCfull) > 0:
                    if 'IV' in SCfull:
                        SC = 'sg'
                    elif 'III' in SCfull:
                        SC = 'g'
                    elif 'V' in SCfull:
                        SC = 'd'
                    elif 'I' in SCfull:
                        SC = 'Sg'
                    else:
                        SC = False
                    if SC is not False:
                        catalog.entries[name].add_quantity(
                            FASTSTARS.STELLAR_CLASS, SC, source=source)
                catalog.entries[name].add_quantity(FASTSTARS.SPECTRAL_TYPE,
                                                   ST,
                                                   source=source)

            if hdulist[i_specobj].data['Z'][0] != 0.0:
                catalog.entries[name].add_quantity(
                    FASTSTARS.REDSHIFT,
                    str(hdulist[i_specobj].data['Z'][0]),
                    e_value=str(hdulist[i_specobj].data['Z_ERR'][0]),
                    source=source)
                catalog.entries[name].add_quantity(
                    FASTSTARS.VELOCITY,
                    pretty_num(float(hdulist[i_specobj].data['Z'][0]) * c_kms,
                               sig=5),
                    e_value=pretty_num(float(
                        hdulist[i_specobj].data['Z_ERR'][0] * c_kms),
                                       sig=5),
                    source=source)

            for oi, obj in enumerate(hdulist[0].header):
                if any(x in ['.', '/'] for x in obj):
                    del (hdulist[0].header[oi])
            hdulist[0].verify('silentfix')
            hdrkeys = list(hdulist[0].header.keys())
            # print(hdrkeys)
            # for key in hdulist[0].header.keys():
            #     print(key, hdulist[0].header[key])
            if hdulist[0].header['SIMPLE']:
                if 'JD' in hdrkeys:
                    mjd = str(jd_to_mjd(Decimal(str(hdulist[0].header['JD']))))
                elif 'MJD' in hdrkeys:
                    mjd = str(hdulist[0].header['MJD'])
                elif 'DATE-OBS' in hdrkeys:
                    if 'T' in hdulist[0].header['DATE-OBS']:
                        dateobs = hdulist[0].header['DATE-OBS'].strip()
                    elif 'UTC-OBS' in hdrkeys:
                        dateobs = hdulist[0].header['DATE-OBS'].strip(
                        ) + 'T' + hdulist[0].header['UTC-OBS'].strip()
                    mjd = str(astrotime(dateobs, format='isot').mjd)
                else:
                    raise ValueError("Couldn't find JD/MJD for spectrum.")
                if hdulist[i_coadd].header['NAXIS'] == 2:
                    waves = [
                        str(x)
                        for x in list(10**hdulist[i_coadd].data['loglam'])
                    ]
                    fluxes = [
                        str(x) for x in list(hdulist[i_coadd].data['flux'])
                    ]
                else:
                    print('Warning: Skipping FITS spectrum `{}`.'.format(
                        datafile))
                    continue
            else:
                raise ValueError('Non-simple FITS import not yet supported.')
            if 'BUNIT' in hdrkeys:
                fluxunit = hdulist[0].header['BUNIT']
                if fluxunit in fureps:
                    fluxunit = fureps[fluxunit]
                if fluxunit[:3] == '1E-17':
                    fluxes = [
                        str(x * 1e-17)
                        for x in list(hdulist[i_coadd].data['flux'])
                    ]
            else:
                if max([float(x) for x in fluxes]) < 1.0e-5:
                    fluxunit = 'erg/s/cm^2/Angstrom'
                else:
                    fluxunit = 'Uncalibrated'
            specdict = {
                SPECTRUM.U_WAVELENGTHS: 'Angstrom',
                SPECTRUM.WAVELENGTHS: waves,
                SPECTRUM.TIME: mjd,
                SPECTRUM.U_TIME: 'MJD',
                SPECTRUM.FLUXES: fluxes,
                SPECTRUM.U_FLUXES: fluxunit,
                SPECTRUM.FILENAME: ffile,
                SPECTRUM.SOURCE: source
            }
            if 'TELESCOP' in hdrkeys:
                specdict[SPECTRUM.TELESCOPE] = hdulist[0].header['TELESCOP']
            if 'INSTRUME' in hdrkeys:
                specdict[SPECTRUM.INSTRUMENT] = hdulist[0].header['INSTRUME']
            if 'SITENAME' in hdrkeys:
                specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['SITENAME']
            elif 'OBSERVAT' in hdrkeys:
                specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['OBSERVAT']
            if 'OBSERVER' in hdrkeys:
                specdict[SPECTRUM.OBSERVER] = hdulist[0].header['OBSERVER']
            if 'AIRMASS' in hdrkeys:
                specdict[SPECTRUM.AIRMASS] = hdulist[0].header['AIRMASS']
            catalog.entries[name].add_spectrum(**specdict)
            cntsdss += 1
            hdulist.close()
            catalog.journal_entries()
    print('`{}` have SDSS spectra.'.format(cntsdss))
    return
Esempio n. 21
0
def do_ascii(catalog):
    """Process ASCII files that were extracted from datatables appearing in
    published works.
    """
    task_str = catalog.get_current_task_str()

    # 2006ApJ...645..841N
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2006ApJ...645..841N-table3.csv')
    tsvin = list(csv.reader(open(file_path, 'r'), delimiter=','))
    for ri, row in enumerate(pbar(tsvin, task_str)):
        name = 'SNLS-' + row[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2006ApJ...645..841N')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT, row[1], source, kind='spectroscopic')
        astrot = astrotime(float(row[4]) + 2450000., format='jd').datetime
        date_str = make_date_string(astrot.year, astrot.month, astrot.day)
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE, date_str, source)
    catalog.journal_entries()

    # Anderson 2014
    file_names = list(
        glob(os.path.join(
            catalog.get_current_task_repo(), 'SNII_anderson2014/*.dat')))
    for datafile in pbar_strings(file_names, task_str):
        basename = os.path.basename(datafile)
        if not is_number(basename[:2]):
            continue
        if basename == '0210_V.dat':
            name = 'SN0210'
        else:
            name = ('SN20' if int(basename[:2]) <
                    50 else 'SN19') + basename.split('_')[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2014ApJ...786...67A')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        if name in ['SN1999ca', 'SN2003dq', 'SN2008aw']:
            system = 'Swope'
        else:
            system = 'Landolt'

        with open(datafile, 'r') as ff:
            tsvin = csv.reader(ff, delimiter=' ', skipinitialspace=True)
            for row in tsvin:
                if not row[0]:
                    continue
                time = str(jd_to_mjd(Decimal(row[0])))
                catalog.entries[name].add_photometry(
                    time=time, band='V',
                    magnitude=row[1], e_magnitude=row[2],
                    system=system, source=source)
    catalog.journal_entries()

    # stromlo
    stromlobands = ['B', 'V', 'R', 'I', 'VM', 'RM']
    file_path = os.path.join(
        catalog.get_current_task_repo(), 'J_A+A_415_863-1/photometry.csv')
    tsvin = list(csv.reader(open(file_path, 'r'), delimiter=','))
    for row in pbar(tsvin, task_str):
        name = row[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2004A&A...415..863G')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        mjd = str(jd_to_mjd(Decimal(row[1])))
        for ri, ci in enumerate(range(2, len(row), 3)):
            if not row[ci]:
                continue
            band = stromlobands[ri]
            upperlimit = True if (not row[ci + 1] and row[ci + 2]) else False
            e_upper_magnitude = str(
                abs(Decimal(row[ci + 1]))) if row[ci + 1] else ''
            e_lower_magnitude = str(
                abs(Decimal(row[ci + 2]))) if row[ci + 2] else ''
            teles = 'MSSSO 1.3m' if band in ['VM', 'RM'] else 'CTIO'
            instr = 'MaCHO' if band in ['VM', 'RM'] else ''
            catalog.entries[name].add_photometry(
                time=mjd, band=band, magnitude=row[ci],
                e_upper_magnitude=e_upper_magnitude,
                e_lower_magnitude=e_lower_magnitude,
                upperlimit=upperlimit, telescope=teles,
                instrument=instr, source=source)
    catalog.journal_entries()

    # 2015MNRAS.449..451W
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2015MNRAS.449..451W.dat')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if rr == 0:
            continue
        namesplit = row[0].split('/')
        name = namesplit[-1]
        if name.startswith('SN'):
            name = name.replace(' ', '')
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2015MNRAS.449..451W')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        if len(namesplit) > 1:
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, namesplit[0], source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.CLAIMED_TYPE, row[1], source)
        catalog.entries[name].add_photometry(
            time=row[2], band=row[4], magnitude=row[3], source=source)
    catalog.journal_entries()

    # 2016MNRAS.459.1039T
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2016MNRAS.459.1039T.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    name = catalog.add_entry('LSQ13zm')
    source = catalog.entries[name].add_source(bibcode='2016MNRAS.459.1039T')
    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            bands = [xx.replace('(err)', '') for xx in row[3:-1]]
            continue
        mjd = row[1]
        mags = [re.sub(r'\([^)]*\)', '', xx) for xx in row[3:-1]]
        upps = [True if '>' in xx else '' for xx in mags]
        mags = [xx.replace('>', '') for xx in mags]
        errs = [xx[xx.find('(') + 1:xx.find(')')]
                if '(' in xx else '' for xx in row[3:-1]]
        for mi, mag in enumerate(mags):
            if not is_number(mag):
                continue
            catalog.entries[name].add_photometry(
                time=mjd, band=bands[mi], magnitude=mag, e_magnitude=errs[mi],
                instrument=row[-1], upperlimit=upps[mi], source=source)
    catalog.journal_entries()

    # 2015ApJ...804...28G
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2015ApJ...804...28G.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    name = catalog.add_entry('PS1-13arp')
    source = catalog.entries[name].add_source(bibcode='2015ApJ...804...28G')
    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
    for rr, row in enumerate(pbar(data, task_str)):
        if rr == 0:
            continue
        mjd = row[1]
        mag = row[3]
        upp = True if '<' in mag else ''
        mag = mag.replace('<', '')
        err = row[4] if is_number(row[4]) else ''
        ins = row[5]
        catalog.entries[name].add_photometry(
            time=mjd, band=row[0], magnitude=mag, e_magnitude=err,
            instrument=ins, upperlimit=upp, source=source)
    catalog.journal_entries()

    # 2016ApJ...819...35A
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2016ApJ...819...35A.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            bibcode='2016ApJ...819...35A')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
        catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
        catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT, row[3], source)
        disc_date = datetime.strptime(row[4], '%Y %b %d').isoformat()
        disc_date = disc_date.split('T')[0].replace('-', '/')
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE, disc_date, source)
    catalog.journal_entries()

    # 2014ApJ...784..105W
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2014ApJ...784..105W.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            bibcode='2014ApJ...784..105W')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        mjd = row[1]
        band = row[2]
        mag = row[3]
        err = row[4]
        catalog.entries[name].add_photometry(
            time=mjd, band=row[2], magnitude=mag, e_magnitude=err,
            instrument='WHIRC', telescope='WIYN 3.5 m', observatory='NOAO',
            system='WHIRC', source=source)
    catalog.journal_entries()

    # 2012MNRAS.425.1007B
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2012MNRAS.425.1007B.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            bands = row[2:]
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            bibcode='2012MNRAS.425.1007B')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        mjd = row[1]
        mags = [xx.split('±')[0].strip() for xx in row[2:]]
        errs = [xx.split('±')[1].strip()
                if '±' in xx else '' for xx in row[2:]]
        if row[0] == 'PTF09dlc':
            ins = 'HAWK-I'
            tel = 'VLT 8.1m'
            obs = 'ESO'
        else:
            ins = 'NIRI'
            tel = 'Gemini North 8.2m'
            obs = 'Gemini'

        for mi, mag in enumerate(mags):
            if not is_number(mag):
                continue
            catalog.entries[name].add_photometry(
                time=mjd, band=bands[mi], magnitude=mag, e_magnitude=errs[mi],
                instrument=ins, telescope=tel, observatory=obs,
                system='Natural', source=source)

        catalog.journal_entries()

    # 2014ApJ...783...28G
    file_path = os.path.join(
        catalog.get_current_task_repo(), 'apj490105t2_ascii.txt')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            name, source = catalog.new_entry(
                row[0], bibcode='2014ApJ...783...28G')
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.DISCOVER_DATE, '20' + row[0][3:5], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[3], source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT, row[13] if is_number(row[13]) else
                row[10], source)
    catalog.journal_entries()

    # 2005ApJ...634.1190H
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2005ApJ...634.1190H.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            name, source = catalog.new_entry(
                'SNLS-' + row[0], bibcode='2005ApJ...634.1190H')
            catalog.entries[name].add_quantity(
                SUPERNOVA.DISCOVER_DATE, '20' + row[0][:2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT, row[5].replace('?', ''), source,
                e_value=row[6], kind='host')
            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE, row[7].replace('SN', '').strip(':* '),
                source)
    catalog.journal_entries()

    # 2014MNRAS.444.2133S
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2014MNRAS.444.2133S.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            name = row[0]
            if is_number(name[:4]):
                name = 'SN' + name
            name, source = catalog.new_entry(
                name, bibcode='2014MNRAS.444.2133S')
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT, row[3],
                                               source,
                                               kind='host')
    catalog.journal_entries()

    # 2009MNRAS.398.1041B
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2009MNRAS.398.1041B.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                bands = row[2:-1]
                continue
            name, source = catalog.new_entry(
                'SN2008S', bibcode='2009MNRAS.398.1041B')
            mjd = str(jd_to_mjd(Decimal(row[0])))
            mags = [x.split('±')[0].strip() for x in row[2:]]
            upps = [('<' in x.split('±')[0]) for x in row[2:]]
            errs = [x.split('±')[1].strip()
                    if '±' in x else '' for x in row[2:]]

            instrument = row[-1]

            for mi, mag in enumerate(mags):
                if not is_number(mag):
                    continue
                catalog.entries[name].add_photometry(
                    time=mjd, band=bands[mi],
                    magnitude=mag, e_magnitude=errs[mi],
                    instrument=instrument, source=source)
    catalog.journal_entries()

    # 2010arXiv1007.0011P
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2010arXiv1007.0011P.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                bands = row[1:]
                continue
            name, source = catalog.new_entry(
                'SN2008S', bibcode='2010arXiv1007.0011P')
            mjd = row[0]
            mags = [x.split('±')[0].strip() for x in row[1:]]
            errs = [x.split('±')[1].strip()
                    if '±' in x else '' for x in row[1:]]

            for mi, mag in enumerate(mags):
                if not is_number(mag):
                    continue
                catalog.entries[name].add_photometry(
                    time=mjd, band=bands[mi],
                    magnitude=mag, e_magnitude=errs[mi],
                    instrument='LBT', source=source)
    catalog.journal_entries()

    # 2000ApJ...533..320G
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2000ApJ...533..320G.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        name, source = catalog.new_entry(
            'SN1997cy', bibcode='2000ApJ...533..320G')
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                bands = row[1:-1]
                continue
            mjd = str(jd_to_mjd(Decimal(row[0])))
            mags = row[1:len(bands)]
            for mi, mag in enumerate(mags):
                if not is_number(mag):
                    continue
                catalog.entries[name].add_photometry(
                    time=mjd, band=bands[mi],
                    magnitude=mag,
                    observatory='Mount Stromlo', telescope='MSSSO',
                    source=source, kcorrected=True)

    catalog.journal_entries()
    return
Esempio n. 22
0
def do_external_fits_spectra(catalog):
    fpath = catalog.get_current_task_repo()
    with open(os.path.join(fpath, 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())

    fureps = {'erg/cm2/s/A': 'erg/s/cm^2/Angstrom'}
    task_str = catalog.get_current_task_str()
    path_pattern = os.path.join(catalog.get_current_task_repo(), '*.fits')
    files = glob(path_pattern)
    for datafile in files:
        filename = datafile.split('/')[-1]
        if filename == 'meta.json':
            continue
        hdulist = fits.open(datafile)
        for oi, obj in enumerate(hdulist[0].header):
            if any(x in ['.', '/'] for x in obj):
                del (hdulist[0].header[oi])
        hdulist[0].verify('silentfix')
        hdrkeys = list(hdulist[0].header.keys())
        # print(hdrkeys)
        name = ''
        if filename in metadict:
            if 'name' in metadict[filename]:
                name = metadict[filename]['name']
        if not name:
            name = hdulist[0].header['OBJECT']
        if 'bibcode' in metadict[filename]:
            name, source = catalog.new_entry(
                name, bibcode=metadict[filename]['bibcode'])
        elif 'donator' in metadict[filename]:
            name, source = catalog.new_entry(
                name, srcname=metadict[filename]['donator'])
        else:
            if 'OBSERVER' in hdrkeys:
                name, source = catalog.new_entry(
                    name, srcname=hdulist[0].header['OBSERVER'])
            else:
                name = catalog.add_entry(name)
                source = catalog.entries[name].add_self_source()
        # for key in hdulist[0].header.keys():
        #     print(key, hdulist[0].header[key])
        if hdulist[0].header['SIMPLE']:
            if 'JD' in hdrkeys:
                mjd = str(jd_to_mjd(Decimal(str(hdulist[0].header['JD']))))
            elif 'MJD' in hdrkeys:
                mjd = str(hdulist[0].header['MJD'])
            elif 'DATE-OBS' in hdrkeys:
                if 'T' in hdulist[0].header['DATE-OBS']:
                    dateobs = hdulist[0].header['DATE-OBS'].strip()
                elif 'UTC-OBS' in hdrkeys:
                    dateobs = hdulist[0].header['DATE-OBS'].strip(
                    ) + 'T' + hdulist[0].header['UTC-OBS'].strip()
                mjd = str(astrotime(dateobs, format='isot').mjd)
            else:
                raise ValueError("Couldn't find JD/MJD for spectrum.")
            w0 = hdulist[0].header['CRVAL1']
            if hdulist[0].header['NAXIS'] == 1:
                wd = hdulist[0].header['CDELT1']
                fluxes = [str(x) for x in list(hdulist[0].data)]
                errors = False
            elif hdulist[0].header['NAXIS'] == 3:
                wd = hdulist[0].header['CD1_1']
                fluxes = [str(x) for x in list(hdulist[0].data)[0][0]]
                errors = [str(x) for x in list(hdulist[0].data)[3][0]]
            else:
                print('Warning: Skipping FITS spectrum `{}`.'.format(filename))
                continue
            waves = [str(w0 + wd * x) for x in range(0, len(fluxes))]
        else:
            raise ValueError('Non-simple FITS import not yet supported.')
        airmass = hdulist[0].header['AIRMASS']
        if 'BUNIT' in hdrkeys:
            fluxunit = hdulist[0].header['BUNIT']
            if fluxunit in fureps:
                fluxunit = fureps[fluxunit]
        else:
            if max([float(x) for x in fluxes]) < 1.0e-5:
                fluxunit = 'erg/s/cm^2/Angstrom'
            else:
                fluxunit = 'Uncalibrated'
        specdict = {
            SPECTRUM.U_WAVELENGTHS: 'Angstrom',
            SPECTRUM.WAVELENGTHS: waves,
            SPECTRUM.TIME: mjd,
            SPECTRUM.U_TIME: 'MJD',
            SPECTRUM.FLUXES: fluxes,
            SPECTRUM.U_FLUXES: fluxunit,
            SPECTRUM.AIRMASS: airmass,
            SPECTRUM.FILENAME: filename,
            SPECTRUM.SOURCE: source
        }
        if 'TELESCOP' in hdrkeys:
            specdict[SPECTRUM.TELESCOPE] = hdulist[0].header['TELESCOP']
        if 'INSTRUME' in hdrkeys:
            specdict[SPECTRUM.INSTRUMENT] = hdulist[0].header['INSTRUME']
        if errors:
            specdict[SPECTRUM.ERRORS] = errors
            specdict[SPECTRUM.U_ERRORS] = fluxunit
        if 'SITENAME' in hdrkeys:
            specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['SITENAME']
        elif 'OBSERVAT' in hdrkeys:
            specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['OBSERVAT']
        if 'OBSERVER' in hdrkeys:
            specdict[SPECTRUM.OBSERVER] = hdulist[0].header['OBSERVER']
        catalog.entries[name].add_spectrum(**specdict)
        hdulist.close()
        catalog.journal_entries()
    return
Esempio n. 23
0
def do_gaia(catalog):
    task_str = catalog.get_current_task_str()
    fname = os.path.join(catalog.get_current_task_repo(), 'GAIA/alerts.csv')
    csvtxt = catalog.load_cached_url(
        'http://gsaweb.ast.cam.ac.uk/alerts/alerts.csv', fname)
    if not csvtxt:
        return
    tsvin = list(csv.reader(csvtxt.splitlines(),
                            delimiter=',', skipinitialspace=True))
    reference = 'Gaia Photometric Science Alerts'
    refurl = 'http://gsaweb.ast.cam.ac.uk/alerts/alertsindex'
    for ri, row in enumerate(pbar(tsvin, task_str)):
        if ri == 0 or not row:
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            name=reference, url=refurl)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        year = '20' + re.findall(r'\d+', row[0])[0]
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE, year, source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.RA, row[2], source, u_value='floatdegrees')
        catalog.entries[name].add_quantity(
            SUPERNOVA.DEC, row[3], source, u_value='floatdegrees')
        if row[7] and row[7] != 'unknown':
            type = row[7].replace('SNe', '').replace('SN', '').strip()
            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE, type, source)
        elif any([xx in row[9].upper() for xx in
                  ['SN CANDIATE', 'CANDIDATE SN', 'HOSTLESS SN']]):
            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE, 'Candidate', source)

        if ('aka' in row[9].replace('gakaxy', 'galaxy').lower() and
                'AKARI' not in row[9]):
            commentsplit = (row[9]
                            .replace('_', ' ')
                            .replace('MLS ', 'MLS')
                            .replace('CSS ', 'CSS')
                            .replace('SN iPTF', 'iPTF')
                            .replace('SN ', 'SN')
                            .replace('AT ', 'AT'))
            commentsplit = commentsplit.split()
            for csi, cs in enumerate(commentsplit):
                if 'aka' in cs.lower() and csi < len(commentsplit) - 1:
                    alias = commentsplit[
                        csi + 1].strip('(),:.').replace('PSNJ', 'PSN J')
                    if alias[:6] == 'ASASSN' and alias[6] != '-':
                        alias = 'ASASSN-' + alias[6:]
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.ALIAS, alias, source)
                    break

        fname = os.path.join(catalog.get_current_task_repo(),
                             'GAIA/') + row[0] + '.csv'
        if (catalog.current_task.load_archive(catalog.args) and
                os.path.isfile(fname)):
            with open(fname, 'r') as ff:
                csvtxt = ff.read()
        else:
            response = urllib.request.urlopen('http://gsaweb.ast.cam.ac.uk/'
                                              'alerts/alert/' +
                                              row[0] + '/lightcurve.csv')
            with open(fname, 'w') as ff:
                csvtxt = response.read().decode('utf-8')
                ff.write(csvtxt)

        tsvin2 = csv.reader(csvtxt.splitlines())
        for ri2, row2 in enumerate(tsvin2):
            if ri2 <= 1 or not row2:
                continue
            mjd = str(jd_to_mjd(Decimal(row2[1].strip())))
            magnitude = row2[2].strip()
            if magnitude == 'null':
                continue
            e_mag = 0.
            telescope = 'GAIA'
            band = 'G'
            catalog.entries[name].add_photometry(
                time=mjd, telescope=telescope, band=band, magnitude=magnitude,
                e_magnitude=e_mag, source=source)
        if catalog.args.update:
            catalog.journal_entries()
    catalog.journal_entries()
    return
Esempio n. 24
0
def do_suspect_photo(catalog):
    task_str = catalog.get_current_task_str()
    with open(os.path.join(catalog.get_current_task_repo(),
                           'suspectreferences.csv'), 'r') as f:
        tsvin = csv.reader(f, delimiter=',', skipinitialspace=True)
        suspectrefdict = {}
        for row in tsvin:
            suspectrefdict[row[0]] = row[1]

    file_names = list(sorted(glob(os.path.join(
        catalog.get_current_task_repo(), 'SUSPECT/*.html'))))
    for datafile in pbar_strings(file_names, task_str):
        basename = os.path.basename(datafile)
        basesplit = basename.split('-')
        oldname = basesplit[1]
        name = catalog.add_entry(oldname)
        if name.startswith('SN') and is_number(name[2:]):
            name = name + 'A'
        band = basesplit[3].split('.')[0]
        ei = int(basesplit[2])
        bandlink = 'file://' + os.path.abspath(datafile)
        bandresp = urllib.request.urlopen(bandlink)
        bandsoup = BeautifulSoup(bandresp, 'html5lib')
        bandtable = bandsoup.find('table')

        names = bandsoup.body.findAll(text=re.compile('Name'))
        reference = ''
        for link in bandsoup.body.findAll('a'):
            if 'adsabs' in link['href']:
                reference = str(link).replace('"', "'")

        bibcode = unescape(suspectrefdict[reference])
        source = catalog.entries[name].add_source(bibcode=bibcode)

        sec_ref = 'SUSPECT'
        sec_refurl = 'https://www.nhn.ou.edu/~suspect/'
        sec_source = catalog.entries[name].add_source(
            name=sec_ref, url=sec_refurl, secondary=True)
        catalog.entries[name].add_quantity(
            SUPERNOVA.ALIAS, oldname, sec_source)

        if ei == 1:
            year = re.findall(r'\d+', name)[0]
            catalog.entries[name].add_quantity(
                SUPERNOVA.DISCOVER_DATE, year, sec_source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.HOST, names[1].split(':')[1].strip(), sec_source)

            redshifts = bandsoup.body.findAll(text=re.compile('Redshift'))
            if redshifts:
                catalog.entries[name].add_quantity(
                    SUPERNOVA.REDSHIFT, redshifts[0].split(':')[1].strip(),
                    sec_source, kind='heliocentric')
            # hvels = bandsoup.body.findAll(text=re.compile('Heliocentric
            # Velocity'))
            # if hvels:
            #     vel = hvels[0].split(':')[1].strip().split(' ')[0]
            #     catalog.entries[name].add_quantity(SUPERNOVA.VELOCITY, vel,
            # sec_source,
            # kind='heliocentric')
            types = bandsoup.body.findAll(text=re.compile('Type'))

            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE, types[0].split(
                    ':')[1].strip().split(' ')[0],
                sec_source)

        for r, row in enumerate(bandtable.findAll('tr')):
            if r == 0:
                continue
            col = row.findAll('td')
            mjd = str(jd_to_mjd(Decimal(col[0].contents[0])))
            mag = col[3].contents[0]
            if mag.isspace():
                mag = ''
            else:
                mag = str(mag)
            e_magnitude = col[4].contents[0]
            if e_magnitude.isspace():
                e_magnitude = ''
            else:
                e_magnitude = str(e_magnitude)
            catalog.entries[name].add_photometry(
                time=mjd, band=band, magnitude=mag, e_magnitude=e_magnitude,
                source=sec_source + ',' + source)

    catalog.journal_entries()
    return
Esempio n. 25
0
def do_tns_spectra(catalog):
    """Load TNS spectra."""
    requests.packages.urllib3.disable_warnings()
    task_str = catalog.get_current_task_str()
    tns_url = 'https://wis-tns.weizmann.ac.il/'
    try:
        with open('tns.key', 'r') as f:
            tnskey = f.read().splitlines()[0]
    except Exception:
        catalog.log.warning('TNS API key not found, make sure a file named '
                            '`tns.key` containing the key is placed the '
                            'astrocats directory.')
        tnskey = ''

    fails = 0
    for name in pbar(list(catalog.entries.keys()), task_str):
        if name not in catalog.entries:
            continue
        aliases = catalog.entries[name].get_aliases()
        oname = ''
        for alias in aliases:
            if (alias.startswith(('SN', 'AT')) and is_integer(alias[2:6]) and
                    int(alias[2:6]) >= 2016) and alias[6:].isalpha():
                oname = alias
                break
        if not oname:
            continue
        reqname = oname[2:]
        jsonpath = os.path.join(catalog.get_current_task_repo(), 'TNS', 'meta',
                                reqname + '.json')
        download_json = True
        if os.path.isfile(jsonpath):
            with open(jsonpath, 'r') as f:
                objdict = json.load(f)
            if ('discoverydate' in objdict and
                (datetime.now() - datetime.strptime(objdict['discoverydate'],
                                                    '%Y-%m-%d %H:%M:%S')
                 ).days > 90):
                download_json = False
        if download_json:
            data = urllib.parse.urlencode({
                'api_key': tnskey,
                'data': json.dumps({
                    'objname': reqname,
                    'spectra': '1'
                })
            }).encode('ascii')
            req = urllib.request.Request(
                'https://wis-tns.weizmann.ac.il/api/get/object', data=data)
            trys = 0
            objdict = None
            while trys < 3 and not objdict:
                try:
                    objdict = json.loads(
                        urllib.request.urlopen(req, timeout=30).read().decode('ascii'))[
                            'data']['reply']
                except KeyboardInterrupt:
                    raise
                except Exception:
                    catalog.log.warning('API request failed for `{}`.'.format(
                        name))
                    time.sleep(5)
                trys = trys + 1
            if (not objdict or 'objname' not in objdict or
                    not isinstance(objdict['objname'], str)):
                fails = fails + 1
                catalog.log.warning('Object `{}` not found!'.format(name))
                if fails >= 5:
                    break
                continue
            # Cache object here
            with open(jsonpath, 'w') as f:
                json.dump(sortOD(objdict), f, indent='\t',
                          separators=(',', ':'), ensure_ascii=False,
                          sort_keys=True)

        if 'spectra' not in objdict:
            continue
        specarr = objdict['spectra']
        name, source = catalog.new_entry(
            oname, srcname='Transient Name Server', url=tns_url)
        for spectrum in specarr:
            spectrumdict = {
                PHOTOMETRY.SOURCE: source
            }
            if 'jd' in spectrum:
                spectrumdict[SPECTRUM.TIME] = str(
                    jd_to_mjd(Decimal(str(spectrum['jd']))))
                spectrumdict[SPECTRUM.U_TIME] = 'MJD'
            if spectrum.get('observer', ''):
                spectrumdict[SPECTRUM.OBSERVER] = spectrum['observer']
            if spectrum.get('reducer', ''):
                spectrumdict[SPECTRUM.OBSERVER] = spectrum['observer']
            if 'source_group' in spectrum:
                survey = spectrum['source_group']['name']
                if survey:
                    spectrumdict[SPECTRUM.SURVEY] = survey
            if 'telescope' in spectrum:
                telescope = spectrum['telescope']['name']
                if telescope and telescope != 'Other':
                    spectrumdict[SPECTRUM.TELESCOPE] = telescope
            if 'instrument' in spectrum:
                instrument = spectrum['instrument']['name']
                if instrument and instrument != 'Other':
                    spectrumdict[SPECTRUM.INSTRUMENT] = instrument

            if 'asciifile' in spectrum:
                fname = urllib.parse.unquote(
                    spectrum['asciifile'].split('/')[-1])
                spectxt = catalog.load_url(
                    spectrum['asciifile'],
                    os.path.join(
                        catalog.get_current_task_repo(), 'TNS', 'spectra',
                        fname), archived_mode=True)
                data = [x.split() for x in spectxt.splitlines()]

                skipspec = False
                newdata = []
                oldval = ''
                for row in data:
                    if row and '#' not in row[0]:
                        if (len(row) >= 2 and is_number(row[0]) and
                                is_number(row[1]) and row[1] != oldval):
                            newdata.append(row)
                            oldval = row[1]

                if skipspec or not newdata:
                    warnings.warn('Skipped adding spectrum file ' + fname)
                    continue

                data = [list(i) for i in zip(*newdata)]
                wavelengths = data[0]
                fluxes = data[1]
                errors = ''
                if len(data) == 3:
                    errors = data[1]

                if max([float(x) for x in fluxes]) < 1.0e-5:
                    fluxunit = 'erg/s/cm^2/Angstrom'
                else:
                    fluxunit = 'Uncalibrated'

                spectrumdict.update({
                    SPECTRUM.U_WAVELENGTHS: 'Angstrom',
                    SPECTRUM.ERRORS: errors,
                    SPECTRUM.U_FLUXES: fluxunit,
                    SPECTRUM.U_ERRORS: fluxunit if errors else '',
                    SPECTRUM.WAVELENGTHS: wavelengths,
                    SPECTRUM.FLUXES: fluxes
                })
                catalog.entries[name].add_spectrum(**spectrumdict)
        catalog.journal_entries()
    return
Esempio n. 26
0
    def set_first_max_light(self):
        if SUPERNOVA.MAX_APP_MAG not in self:
            # Get the maximum amongst all bands
            mldt, mlmag, mlband, mlsource = self._get_max_light()
            if mldt or mlmag or mlband:
                source = self.add_self_source()
                uniq_src = uniq_cdl([source] + mlsource.split(','))
            if mldt:
                max_date = make_date_string(mldt.year, mldt.month, mldt.day)
                self.add_quantity(SUPERNOVA.MAX_DATE,
                                  max_date,
                                  uniq_src,
                                  derived=True)
            if mlmag:
                mlmag = pretty_num(mlmag)
                self.add_quantity(SUPERNOVA.MAX_APP_MAG,
                                  mlmag,
                                  uniq_src,
                                  derived=True)
            if mlband:
                self.add_quantity(SUPERNOVA.MAX_BAND,
                                  mlband,
                                  uniq_src,
                                  derived=True)

        if SUPERNOVA.MAX_VISUAL_APP_MAG not in self:
            # Get the "visual" maximum
            mldt, mlmag, mlband, mlsource = self._get_max_light(visual=True)
            if mldt or mlmag or mlband:
                source = self.add_self_source()
                uniq_src = uniq_cdl([source] + mlsource.split(','))
            if mldt:
                max_date = make_date_string(mldt.year, mldt.month, mldt.day)
                self.add_quantity(SUPERNOVA.MAX_VISUAL_DATE,
                                  max_date,
                                  uniq_src,
                                  derived=True)
            if mlmag:
                mlmag = pretty_num(mlmag)
                self.add_quantity(SUPERNOVA.MAX_VISUAL_APP_MAG,
                                  mlmag,
                                  uniq_src,
                                  derived=True)
            if mlband:
                self.add_quantity(SUPERNOVA.MAX_VISUAL_BAND,
                                  mlband,
                                  uniq_src,
                                  derived=True)

        if (self._KEYS.DISCOVER_DATE not in self or max([
                len(x[QUANTITY.VALUE].split('/'))
                for x in self[self._KEYS.DISCOVER_DATE]
        ]) < 3):
            fldt, flsource = self._get_first_light()
            if fldt:
                source = self.add_self_source()
                disc_date = make_date_string(fldt.year, fldt.month, fldt.day)
                self.add_quantity(self._KEYS.DISCOVER_DATE,
                                  disc_date,
                                  uniq_cdl([source] + flsource.split(',')),
                                  derived=True)

        if self._KEYS.DISCOVER_DATE not in self and self._KEYS.SPECTRA in self:
            minspecmjd = float("+inf")
            for spectrum in self[self._KEYS.SPECTRA]:
                if 'time' in spectrum and 'u_time' in spectrum:
                    if spectrum['u_time'] == 'MJD':
                        mjd = float(spectrum['time'])
                    elif spectrum['u_time'] == 'JD':
                        mjd = float(jd_to_mjd(Decimal(spectrum['time'])))
                    else:
                        continue

                    if mjd < minspecmjd:
                        minspecmjd = mjd
                        minspecsource = spectrum['source']

            if minspecmjd < float("+inf"):
                fldt = astrotime(minspecmjd, format='mjd').datetime
                source = self.add_self_source()
                disc_date = make_date_string(fldt.year, fldt.month, fldt.day)
                self.add_quantity(self._KEYS.DISCOVER_DATE,
                                  disc_date,
                                  uniq_cdl([source] +
                                           minspecsource.split(',')),
                                  derived=True)
        return
Esempio n. 27
0
    def clean_internal(self, data):
        """Clean input data from the 'Supernovae/input/internal' repository.

        FIX: instead of making changes in place to `dirty_event`, should a new
             event be created, values filled, then returned??
        FIX: currently will fail if no bibcode and no url
        """
        self._log.debug("clean_internal(): {}".format(self.name()))

        bibcodes = []
        # Remove 'names' when 'bibcodes' are given
        for ss, source in enumerate(data.get(self._KEYS.SOURCES, [])):
            if SOURCE.BIBCODE in source:
                bibcodes.append(source[SOURCE.BIBCODE])

        # If there are no existing sources, add OSC as one
        if len(bibcodes) == 0:
            self.add_self_source()
            bibcodes = [self.catalog.OSC_BIBCODE]

        # Clean some legacy fields
        alias_key = 'aliases'
        if alias_key in data:
            # Remove the entry in the data
            aliases = data.pop(alias_key)
            # Make sure this is a list
            if not isinstance(aliases, list):
                raise ValueError("{}: aliases not a list '{}'".format(
                    self.name(), aliases))
            # Add OSC source entry
            source = self.add_self_source()

            for alias in aliases:
                self.add_quantity(self._KEYS.ALIAS, alias, source)

        dist_key = 'distinctfrom'
        if dist_key in data:
            distincts = data.pop(dist_key)
            if ((isinstance(distincts, list) and
                 isinstance(distincts[0], str))):
                source = self.add_self_source()
                for df in distincts:
                    self.add_quantity(dist_key, df, source)

        # Go through all remaining keys in 'dirty' event, and make sure
        # everything is a quantity with a source (OSC if no other)
        for key in data.keys():
            # The following line should be used to replace the above once keys
            # returns the superclass keys too
            if self._KEYS.get_key_by_name(key).no_source:
                pass
            elif key == self._KEYS.PHOTOMETRY:
                for p, photo in enumerate(data[self._KEYS.PHOTOMETRY]):
                    if photo[PHOTOMETRY.U_TIME] == 'JD':
                        data[self._KEYS.PHOTOMETRY][p][
                            PHOTOMETRY.U_TIME] = 'MJD'
                        data[self._KEYS.PHOTOMETRY][p][
                            PHOTOMETRY.TIME] = str(
                            jd_to_mjd(Decimal(photo['time'])))
                    if QUANTITY.SOURCE not in photo:
                        source = self.add_source(bibcode=bibcodes[0])
                        data[self._KEYS.PHOTOMETRY][p][
                            QUANTITY.SOURCE] = source
            else:
                for qi, quantity in enumerate(data[key]):
                    if QUANTITY.SOURCE not in quantity:
                        source = self.add_source(bibcode=bibcodes[0])
                        data[key][qi][QUANTITY.SOURCE] = source

        return data
Esempio n. 28
0
def do_snax(catalog):
    task_str = catalog.get_current_task_str()
    file_path = os.path.join(catalog.get_current_task_repo(), 'SNaX.TSV')
    # csvtxt = catalog.load_url(
    #     'http://www.grbcatalog.org/'
    #     'download_data?cut_0_min=5&cut_0=BAT%20T90'
    #     '&cut_0_max=100000&num_cuts=1&no_date_cut=True',
    #     file_path)
    data = list(
        csv.reader(
            open(file_path, 'r'),
            delimiter='\t',
            quotechar='"',
            skipinitialspace=True))

    for r, row in enumerate(pbar(data, task_str)):
        if r == 0:
            continue
        (name, source) = catalog.new_entry(
            row[0], srcname='SNaX', url='http://kronos.uchicago.edu/snax/')
        sources = [source]
        expsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-6].strip())
        ])
        coosrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-5].strip())
        ])
        dissrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-4].strip())
        ])
        flxsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-3].strip()),
            catalog.entries[name].add_source(bibcode=row[-2].strip())
        ])

        catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, row[1],
                                           source)
        date = astrotime(float(row[2]), format='jd').datetime
        catalog.entries[name].add_quantity(
            SUPERNOVA.EXPLOSION_DATE,
            make_date_string(date.year, date.month, date.day), expsrc)
        catalog.entries[name].add_quantity(
            SUPERNOVA.RA, ' '.join(row[3].split()[:3]), coosrc)
        catalog.entries[name].add_quantity(
            SUPERNOVA.DEC, ' '.join(row[3].split()[:3]), coosrc)
        catalog.entries[name].add_quantity(SUPERNOVA.LUM_DIST, row[4], dissrc)
        catalog.entries[name].add_quantity(SUPERNOVA.HOST, row[5], source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT,
            row[6],
            source,
            e_value=row[7] if (row[7] and float(row[7]) != 0.0) else '')
        photodict = {
            PHOTOMETRY.TIME: jd_to_mjd(Decimal(row[8])),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.ENERGY: row[15:17],
            PHOTOMETRY.U_ENERGY: 'keV',
            PHOTOMETRY.FLUX: str(Decimal('1.0e-13') * Decimal(row[11])),
            PHOTOMETRY.U_FLUX: 'ergs/s/cm^2',
            PHOTOMETRY.E_LOWER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[13])),
            PHOTOMETRY.E_UPPER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[14])),
            PHOTOMETRY.INSTRUMENT: row[9],
            PHOTOMETRY.SOURCE: flxsrc
        }
        if row[12] == '1':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Esempio n. 29
0
    def clean_internal(self, data):
        """Clean input data from the 'Supernovae/input/internal' repository.

        FIX: instead of making changes in place to `dirty_event`, should a new
             event be created, values filled, then returned??
        FIX: currently will fail if no bibcode and no url
        """
        self._log.debug("clean_internal(): {}".format(self.name()))

        def_source_dict = {}
        # Find source that will be used as default
        sources = data.get(self._KEYS.SOURCES, [])
        if sources:
            def_source_dict = sources[0]
            allow_alias = False
            if SOURCE.ALIAS in def_source_dict:
                del (def_source_dict[SOURCE.ALIAS])
        else:
            # If there are no existing sources, add OSC as one
            self.add_self_source()
            sources = self.get(self._KEYS.SOURCES, [])
            def_source_dict = sources[0]
            allow_alias = True

        # Clean some legacy fields
        alias_key = 'aliases'
        if alias_key in data:
            # Remove the entry in the data
            aliases = data.pop(alias_key)
            # Make sure this is a list
            if not isinstance(aliases, list):
                raise ValueError("{}: aliases not a list '{}'".format(
                    self.name(), aliases))
            # Add OSC source entry
            source = self.add_self_source()

            for alias in aliases:
                self.add_quantity(self._KEYS.ALIAS, alias, source)

        dist_key = 'distinctfrom'
        if dist_key in data:
            distincts = data.pop(dist_key)
            if ((isinstance(distincts, list)
                 and isinstance(distincts[0], string_types))):
                source = self.add_self_source()
                for df in distincts:
                    self.add_quantity(self._KEYS.DISTINCT_FROM, df, source)
            else:
                data[dist_key] = list(distincts)

        # Go through all remaining keys in 'dirty' event, and make sure
        # everything is a quantity with a source (OSC if no other)
        for key in data.keys():
            # The following line should be used to replace the above once keys
            # returns the superclass keys too
            if self._KEYS.get_key_by_name(key).no_source:
                pass
            elif key == self._KEYS.PHOTOMETRY:
                for p, photo in enumerate(data[self._KEYS.PHOTOMETRY]):
                    if photo[PHOTOMETRY.U_TIME] == 'JD':
                        data[self._KEYS.PHOTOMETRY][p][
                            PHOTOMETRY.U_TIME] = 'MJD'
                        data[self._KEYS.PHOTOMETRY][p][PHOTOMETRY.TIME] = str(
                            jd_to_mjd(Decimal(photo['time'])))
                    if QUANTITY.SOURCE not in photo:
                        if not def_source_dict:
                            raise ValueError("No sources found, can't add "
                                             "photometry.")
                        source = self.add_source(allow_alias=allow_alias,
                                                 **def_source_dict)
                        data[self._KEYS.PHOTOMETRY][p][
                            QUANTITY.SOURCE] = source
            else:
                for qi, quantity in enumerate(data[key]):
                    if QUANTITY.SOURCE not in quantity:
                        if not def_source_dict:
                            raise ValueError("No sources found, can't add "
                                             "quantity.")
                        source = self.add_source(allow_alias=allow_alias,
                                                 **def_source_dict)
                        data[key][qi][QUANTITY.SOURCE] = source

        return data
Esempio n. 30
0
def do_tns_photo(catalog):
    """Load TNS photometry."""
    task_str = catalog.get_current_task_str()
    tns_url = 'https://www.wis-tns.org/'
    try:
        with open('tns.key', 'r') as f:
            tnskey = f.read().splitlines()[0]
    except Exception:
        catalog.log.warning('TNS API key not found, make sure a file named '
                            '`tns.key` containing the key is placed the '
                            'astrocats directory.')
        tnskey = ''

    bandreps = {'Clear': 'C'}
    fails = 0
    for name in pbar(list(catalog.entries.keys()), task_str):
        if name not in catalog.entries:
            continue
        aliases = catalog.entries[name].get_aliases()
        oname = ''
        for alias in aliases:
            if (alias.startswith(('SN', 'AT')) and is_integer(alias[2:6]) and
                    int(alias[2:6]) >= 2016) and alias[6:].isalpha():
                oname = alias
                break
        if not oname:
            continue
        reqname = oname[2:]
        jsonpath = os.path.join(catalog.get_current_task_repo(), 'TNS',
                                reqname + '.json')
        download_json = True
        if os.path.isfile(jsonpath):
            with open(jsonpath, 'r') as f:
                objdict = json.load(f)
            if 'discoverydate' in objdict:
                discoverydate = objdict['discoverydate']
                if '.' not in discoverydate:
                    discoverydate += '.0'
                try:
                    if (datetime.now() - datetime.strptime(
                            discoverydate, '%Y-%m-%d %H:%M:%S.%f')).days > 90:
                        download_json = False
                except ValueError:
                    download_json = False
        if download_json:
            data = urllib.parse.urlencode({
                'api_key': tnskey,
                'data': json.dumps({
                    'objname': reqname,
                    'photometry': '1'
                })
            }).encode('ascii')
            req = urllib.request.Request(
                'https://www.wis-tns.org/api/get/object', data=data)
            trys = 0
            objdict = None
            while trys < 3 and not objdict:
                try:
                    objdict = json.loads(
                        urllib.request.urlopen(req, timeout=30).read().decode('ascii'))[
                            'data']['reply']
                except KeyboardInterrupt:
                    raise
                except Exception:
                    catalog.log.warning('API request failed for `{}`.'.format(
                        name))
                    time.sleep(5)
                trys = trys + 1
            if (not objdict or 'objname' not in objdict or
                    not isinstance(objdict['objname'], str)):
                fails = fails + 1
                catalog.log.warning('Object `{}` not found!'.format(name))
                if fails >= 5:
                    break
                continue
            # Cache object here
            with open(jsonpath, 'w') as f:
                json.dump(sortOD(objdict), f, indent='\t',
                          separators=(',', ':'), ensure_ascii=False,
                          sort_keys=True)

        if 'photometry' not in objdict:
            continue
        photoarr = objdict['photometry']
        name, source = catalog.new_entry(
            oname, srcname='Transient Name Server', url=tns_url)
        for photo in photoarr:
            if 'mag' not in photo['flux_unit']['name'].lower():
                catalog.log.warning('Unknown flux unit `{}`.'.format(photo[
                    'flux_unit']['name']))
                continue
            if not photo['jd']:
                continue
            if not photo['flux'] and not photo['limflux']:
                continue
            mag = photo['flux'] if photo['flux'] else photo['limflux']
            photodict = {
                PHOTOMETRY.TIME: str(jd_to_mjd(Decimal(str(photo['jd'])))),
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.SOURCE: source
            }
            if photo.get('fluxerr', ''):
                photodict[PHOTOMETRY.E_MAGNITUDE] = photo['fluxerr']
            if not photo['flux']:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            band = photo['filters']['name']
            if band:
                if band in bandreps:
                    band = bandreps[band]
                photodict[PHOTOMETRY.BAND] = band
            if photo.get('observer', ''):
                photodict[PHOTOMETRY.OBSERVER] = photo['observer']
            if 'source_group' in photo:
                survey = photo['source_group']['group_name']
                if survey:
                    photodict[PHOTOMETRY.SURVEY] = survey
            if 'telescope' in photo:
                telescope = photo['telescope']['name']
                if telescope and telescope != 'Other':
                    photodict[PHOTOMETRY.TELESCOPE] = telescope
            if 'instrument' in photo:
                instrument = photo['instrument']['name']
                if instrument and instrument != 'Other':
                    photodict[PHOTOMETRY.INSTRUMENT] = instrument
            system = ''
            if 'Vega' in photo['flux_unit']['name']:
                system = 'Vega'
            elif 'ab' in photo['flux_unit']['name']:
                system = 'AB'
            if system:
                photodict[PHOTOMETRY.SYSTEM] = system
            catalog.entries[name].add_photometry(**photodict)
        catalog.journal_entries()
    return
Esempio n. 31
0
def do_cfa_photo(catalog):
    """Import photometry from the CfA archive."""
    from html import unescape
    import re
    task_str = catalog.get_current_task_str()
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'cfa-input/*.dat'))
    for fname in pbar_strings(file_names, task_str):
        f = open(fname, 'r')
        tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
        csv_data = []
        for r, row in enumerate(tsvin):
            new = []
            for item in row:
                new.extend(item.split('\t'))
            csv_data.append(new)

        for r, row in enumerate(csv_data):
            for c, col in enumerate(row):
                csv_data[r][c] = col.strip()
            csv_data[r] = [_f for _f in csv_data[r] if _f]

        eventname = os.path.basename(os.path.splitext(fname)[0])

        eventparts = eventname.split('_')

        name = clean_snname(eventparts[0])
        name = catalog.add_entry(name)
        secondaryname = 'CfA Supernova Archive'
        secondaryurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
        secondarysource = catalog.entries[name].add_source(
            name=secondaryname,
            url=secondaryurl,
            secondary=True,
            acknowledgment=ACKN_CFA)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name,
                                           secondarysource)

        year = re.findall(r'\d+', name)[0]
        catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
                                           secondarysource)

        eventbands = list(eventparts[1])

        tu = 'MJD'
        jdoffset = Decimal(0.)
        for rc, row in enumerate(csv_data):
            if len(row) > 0 and row[0][0] == "#":
                if len(row[0]) > 2 and row[0][:3] == '#JD':
                    tu = 'JD'
                    rowparts = row[0].split('-')
                    jdoffset = Decimal(rowparts[1])
                elif len(row[0]) > 6 and row[0][:7] == '#Julian':
                    tu = 'JD'
                    jdoffset = Decimal(0.)
                elif len(row) > 1 and row[1].lower() == 'photometry':
                    for ci, col in enumerate(row[2:]):
                        if col[0] == "(":
                            refstr = ' '.join(row[2 + ci:])
                            refstr = refstr.replace('(', '').replace(')', '')
                            bibcode = unescape(refstr)
                            source = catalog.entries[name].add_source(
                                bibcode=bibcode)
                elif len(row) > 1 and row[1] == 'HJD':
                    tu = 'HJD'
                continue

            elif len(row) > 0:
                mjd = row[0]
                for v, val in enumerate(row):
                    if v == 0:
                        if tu == 'JD':
                            mjd = str(jd_to_mjd(Decimal(val) + jdoffset))
                            tuout = 'MJD'
                        elif tu == 'HJD':
                            mjd = str(jd_to_mjd(Decimal(val)))
                            tuout = 'MJD'
                        else:
                            mjd = val
                            tuout = tu
                    elif v % 2 != 0:
                        if float(row[v]) < 90.0:
                            src = secondarysource + ',' + source
                            photodict = {
                                PHOTOMETRY.U_TIME: tuout,
                                PHOTOMETRY.TIME: mjd,
                                PHOTOMETRY.BAND_SET: 'Standard',
                                PHOTOMETRY.BAND: eventbands[(v - 1) // 2],
                                PHOTOMETRY.MAGNITUDE: row[v],
                                PHOTOMETRY.E_MAGNITUDE: row[v + 1],
                                PHOTOMETRY.SOURCE: src
                            }
                            catalog.entries[name].add_photometry(**photodict)
        f.close()

    # Hicken 2012
    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'hicken-2012-standard.dat'), 'r') as infile:
        tsvin = list(csv.reader(infile, delimiter='|', skipinitialspace=True))
        for r, row in enumerate(pbar(tsvin, task_str)):
            if r <= 47:
                continue

            if row[0][:2] != 'sn':
                name = 'SN' + row[0].strip()
            else:
                name = row[0].strip()

            name = catalog.add_entry(name)

            source = catalog.entries[name].add_source(
                bibcode='2012ApJS..200...12H')
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, 'Ia',
                                               source)
            photodict = {
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.TIME: row[2].strip(),
                PHOTOMETRY.BAND: row[1].strip(),
                PHOTOMETRY.BAND_SET: 'Standard',
                PHOTOMETRY.MAGNITUDE: row[6].strip(),
                PHOTOMETRY.E_MAGNITUDE: row[7].strip(),
                PHOTOMETRY.SOURCE: source
            }
            catalog.entries[name].add_photometry(**photodict)

    # Bianco 2014
    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'bianco-2014-standard.dat'), 'r') as infile:
        tsvin = list(csv.reader(infile, delimiter=' ', skipinitialspace=True))
        for row in pbar(tsvin, task_str):
            name = 'SN' + row[0]
            name = catalog.add_entry(name)

            source = catalog.entries[name].add_source(
                bibcode='2014ApJS..213...19B')
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            photodict = {
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.TIME: row[2],
                PHOTOMETRY.BAND: row[1],
                PHOTOMETRY.MAGNITUDE: row[3],
                PHOTOMETRY.E_MAGNITUDE: row[4],
                PHOTOMETRY.TELESCOPE: row[5],
                PHOTOMETRY.BAND_SET: 'Standard',
                PHOTOMETRY.SOURCE: source
            }
            catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Esempio n. 32
0
def do_tns_spectra(catalog, tns_url='https://www.wis-tns.org/', directory='TNS'):
    """Load TNS spectra."""
    requests.packages.urllib3.disable_warnings()
    task_str = catalog.get_current_task_str()
    try:
        with open('tns.key', 'r') as f:
            tnskey = f.read().splitlines()[0]
    except Exception:
        catalog.log.warning('TNS API key not found, make sure a file named '
                            '`tns.key` containing the key is placed the '
                            'astrocats directory.')
        tnskey = ''

    fails = 0
    for name in pbar(list(catalog.entries.keys()), task_str):
        if name not in catalog.entries:
            continue
        aliases = catalog.entries[name].get_aliases()
        oname = ''
        for alias in aliases:
            if (alias.startswith(('SN', 'AT')) and is_integer(alias[2:6]) and
                    int(alias[2:6]) >= 2016) and alias[6:].isalpha():
                oname = alias
                break
        if not oname:
            continue
        reqname = oname[2:]
        jsonpath = os.path.join(catalog.get_current_task_repo(), directory, 'meta',
                                reqname + '.json')
        download_json = True
        if os.path.isfile(jsonpath):
            with open(jsonpath, 'r') as f:
                objdict = json.load(f)
            if 'discoverydate' in objdict:
                discoverydate = objdict['discoverydate']
                if '.' not in discoverydate:
                    discoverydate += '.0'
                try:
                    if (datetime.now() - datetime.strptime(
                            discoverydate, '%Y-%m-%d %H:%M:%S.%f')).days > 90:
                        download_json = False
                except ValueError:
                    download_json = False
        if download_json:
            data = urllib.parse.urlencode({
                'api_key': tnskey,
                'data': json.dumps({
                    'objname': reqname,
                    'spectra': '1'
                })
            }).encode('ascii')
            req = urllib.request.Request(
                tns_url + 'api/get/object', data=data)
            trys = 0
            objdict = None
            while trys < 3 and not objdict:
                try:
                    objdict = json.loads(
                        urllib.request.urlopen(req, timeout=30).read().decode('ascii'))[
                            'data']['reply']
                except KeyboardInterrupt:
                    raise
                except Exception:
                    catalog.log.warning('API request failed for `{}`.'.format(
                        name))
                    time.sleep(5)
                trys = trys + 1
            if (not objdict or 'objname' not in objdict or
                    not isinstance(objdict['objname'], str)):
                fails = fails + 1
                catalog.log.warning('Object `{}` not found!'.format(name))
                if fails >= 5:
                    break
                continue
            # Cache object here
            with open(jsonpath, 'w') as f:
                json.dump(sortOD(objdict), f, indent='\t',
                          separators=(',', ':'), ensure_ascii=False,
                          sort_keys=True)

        if 'spectra' not in objdict:
            continue
        specarr = objdict['spectra']
        name, source = catalog.new_entry(
            oname, srcname='Transient Name Server', url=tns_url)
        for spectrum in specarr:
            spectrumdict = {
                PHOTOMETRY.SOURCE: source
            }
            if 'jd' in spectrum:
                spectrumdict[SPECTRUM.TIME] = str(
                    jd_to_mjd(Decimal(str(spectrum['jd']))))
                spectrumdict[SPECTRUM.U_TIME] = 'MJD'
            if spectrum.get('observer', ''):
                spectrumdict[SPECTRUM.OBSERVER] = spectrum['observer']
            if spectrum.get('reducer', ''):
                spectrumdict[SPECTRUM.OBSERVER] = spectrum['observer']
            if 'source_group' in spectrum:
                survey = spectrum['source_group']['name']
                if survey:
                    spectrumdict[SPECTRUM.SURVEY] = survey
            if 'telescope' in spectrum:
                telescope = spectrum['telescope']['name']
                if telescope and telescope != 'Other':
                    spectrumdict[SPECTRUM.TELESCOPE] = telescope
            if 'instrument' in spectrum:
                instrument = spectrum['instrument']['name']
                if instrument and instrument != 'Other':
                    spectrumdict[SPECTRUM.INSTRUMENT] = instrument

            if 'asciifile' in spectrum:
                fname = urllib.parse.unquote(
                    spectrum['asciifile'].split('/')[-1])
                spectxt = catalog.load_url(
                    spectrum['asciifile'],
                    os.path.join(
                        catalog.get_current_task_repo(), directory, 'spectra',
                        fname), archived_mode=True)
                data = [x.split() for x in spectxt.splitlines()]

                skipspec = False
                newdata = []
                oldval = ''
                for row in data:
                    if row and '#' not in row[0]:
                        if (len(row) >= 2 and is_number(row[0]) and
                                is_number(row[1]) and row[1] != oldval):
                            newdata.append(row)
                            oldval = row[1]

                if skipspec or not newdata:
                    warnings.warn('Skipped adding spectrum file ' + fname)
                    continue

                data = [list(i) for i in zip(*newdata)]
                wavelengths = data[0]
                fluxes = data[1]
                errors = ''
                if len(data) == 3:
                    errors = data[1]

                if max([float(x) for x in fluxes]) < 1.0e-5:
                    fluxunit = 'erg/s/cm^2/Angstrom'
                else:
                    fluxunit = 'Uncalibrated'

                spectrumdict.update({
                    SPECTRUM.U_WAVELENGTHS: 'Angstrom',
                    SPECTRUM.ERRORS: errors,
                    SPECTRUM.U_FLUXES: fluxunit,
                    SPECTRUM.U_ERRORS: fluxunit if errors else '',
                    SPECTRUM.WAVELENGTHS: wavelengths,
                    SPECTRUM.FLUXES: fluxes
                })
                catalog.entries[name].add_spectrum(**spectrumdict)
        catalog.journal_entries()
    return
Esempio n. 33
0
def do_csp_fits_spectra(catalog):
    from astropy.io import fits

    fpath = catalog.get_current_task_repo()

    fureps = {'erg/cm2/s/A': 'erg/s/cm^2/Angstrom'}
    task_str = catalog.get_current_task_str()
    dirs = [x[0] for x in os.walk(
        os.path.join(fpath, 'Gutierrez_et_al_2017'))]
    files = []
    for dir in dirs:
        files.extend(glob(os.path.join(dir, '*.fits')))
    for datafile in pbar(files, task_str):
        filename = datafile.split('/')[-1]
        hdulist = fits.open(datafile)
        for oi, obj in enumerate(hdulist[0].header):
            if any(x in ['.', '/'] for x in obj):
                del (hdulist[0].header[oi])
        try:
            hdulist[0].verify('silentfix')
        except Exception as e:
            print(e)
        hdrkeys = list(hdulist[0].header.keys())
        # print(hdrkeys)
        name = datafile.split('/')[-2]
        if name[2] in '6789':
            name = 'SN19' + name[2:]
        elif name != 'SN210':
            name = 'SN20' + name[2:]
        name, source = catalog.new_entry(name, bibcode='2017ApJ...850...89G')
        # for key in hdulist[0].header.keys():
        #     print(key, hdulist[0].header[key])
        mjd = None
        if hdulist[0].header['SIMPLE']:
            if 'JD' in hdrkeys:
                mjd = str(jd_to_mjd(Decimal(str(hdulist[0].header['JD']))))
            elif 'MJD' in hdrkeys:
                mjd = str(hdulist[0].header['MJD'])
            elif 'DATE-OBS' in hdrkeys or 'DATE' in hdrkeys:
                dkey = 'DATE-OBS' if 'DATE-OBS' in hdrkeys else 'DATE'
                dval = hdulist[0].header[dkey]
                if is_number(dval):
                    dkey = 'DATE' if dkey == 'DATE-OBS' else 'DATE-OBS'
                    dval = hdulist[0].header[dkey]
                dateobs = None
                if 'T' in dval:
                    dateobs = dval.strip()
                elif 'UTC-OBS' in hdrkeys:
                    dateobs = dval.strip(
                    ) + 'T' + hdulist[0].header['UTC-OBS'].strip()
                if dateobs is not None:
                    mjd = str(astrotime(dateobs, format='isot').mjd)
            # print(hdulist[0].header)
            if 'CRVAL1' in hdulist[0].header:
                w0 = hdulist[0].header['CRVAL1']
            elif hdulist[0].header['CTYPE1'] == 'MULTISPE':
                w0 = float(hdulist[0].header['WAT2_001'].split(
                    '"')[-1].split()[3])
            else:
                raise ValueError('Unsupported spectrum format.')
            if hdulist[0].header['NAXIS'] == 1:
                wd = hdulist[0].header['CDELT1']
                fluxes = [str(x) for x in list(hdulist[0].data)]
                errors = False
            elif hdulist[0].header['NAXIS'] == 3:
                wd = hdulist[0].header['CD1_1']
                fluxes = [str(x) for x in list(hdulist[0].data)[0][0]]
                errors = [str(x) for x in list(hdulist[0].data)[-1][0]]
            else:
                print('Warning: Skipping FITS spectrum `{}`.'.format(filename))
                continue
            waves = [str(w0 + wd * x) for x in range(0, len(fluxes))]
        else:
            raise ValueError('Non-simple FITS import not yet supported.')
        if 'BUNIT' in hdrkeys:
            fluxunit = hdulist[0].header['BUNIT']
            if fluxunit in fureps:
                fluxunit = fureps[fluxunit]
        else:
            if max([float(x) for x in fluxes]) < 1.0e-5:
                fluxunit = 'erg/s/cm^2/Angstrom'
            else:
                fluxunit = 'Uncalibrated'
        specdict = {
            SPECTRUM.U_WAVELENGTHS: 'Angstrom',
            SPECTRUM.WAVELENGTHS: waves,
            SPECTRUM.FLUXES: fluxes,
            SPECTRUM.U_FLUXES: fluxunit,
            SPECTRUM.FILENAME: filename,
            SPECTRUM.SOURCE: source
        }
        if mjd is not None:
            specdict[SPECTRUM.TIME] = mjd
            specdict[SPECTRUM.U_TIME] = 'MJD'
        if 'TELESCOP' in hdrkeys:
            specdict[SPECTRUM.TELESCOPE] = hdulist[0].header['TELESCOP']
        if 'INSTRUME' in hdrkeys:
            specdict[SPECTRUM.INSTRUMENT] = hdulist[0].header['INSTRUME']
        if 'AIRMASS' in hdrkeys:
            specdict[SPECTRUM.AIRMASS] = hdulist[0].header['AIRMASS']
        if errors:
            specdict[SPECTRUM.ERRORS] = errors
            specdict[SPECTRUM.U_ERRORS] = fluxunit
        if 'SITENAME' in hdrkeys:
            specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['SITENAME']
        elif 'OBSERVAT' in hdrkeys:
            specdict[SPECTRUM.OBSERVATORY] = hdulist[0].header['OBSERVAT']
        if 'OBSERVER' in hdrkeys:
            specdict[SPECTRUM.OBSERVER] = hdulist[0].header['OBSERVER']
        catalog.entries[name].add_spectrum(**specdict)
        hdulist.close()
        catalog.journal_entries()
    return
Esempio n. 34
0
    def clean_internal(self, data):
        """Clean input data from the 'Supernovae/input/internal' repository.

        FIX: instead of making changes in place to `dirty_event`, should a new
             event be created, values filled, then returned??
        FIX: currently will fail if no bibcode and no url
        """
        self._log.debug("clean_internal(): {}".format(self.name()))

        def_source_dict = {}
        # Find source that will be used as default
        sources = data.get(self._KEYS.SOURCES, [])
        if sources:
            def_source_dict = sources[0]
            allow_alias = False
            if SOURCE.ALIAS in def_source_dict:
                del (def_source_dict[SOURCE.ALIAS])
        else:
            # If there are no existing sources, add OSC as one
            self.add_self_source()
            sources = self.get(self._KEYS.SOURCES, [])
            def_source_dict = sources[0]
            allow_alias = True

        # Clean some legacy fields
        alias_key = 'aliases'
        if alias_key in data:
            # Remove the entry in the data
            aliases = data.pop(alias_key)
            # Make sure this is a list
            if not isinstance(aliases, list):
                raise ValueError("{}: aliases not a list '{}'".format(
                    self.name(), aliases))
            # Add OSC source entry
            source = self.add_self_source()

            for alias in aliases:
                self.add_quantity(self._KEYS.ALIAS, alias, source)

        dist_key = 'distinctfrom'
        if dist_key in data:
            distincts = data.pop(dist_key)
            if ((isinstance(distincts, list) and
                 isinstance(distincts[0], string_types))):
                source = self.add_self_source()
                for df in distincts:
                    self.add_quantity(self._KEYS.DISTINCT_FROM, df, source)
            else:
                data[dist_key] = list(distincts)

        # Go through all remaining keys in 'dirty' event, and make sure
        # everything is a quantity with a source (OSC if no other)
        for key in data.keys():
            # The following line should be used to replace the above once keys
            # returns the superclass keys too
            if self._KEYS.get_key_by_name(key).no_source:
                pass
            elif key == self._KEYS.PHOTOMETRY:
                for p, photo in enumerate(data[self._KEYS.PHOTOMETRY]):
                    if photo.get(PHOTOMETRY.U_TIME) == 'JD':
                        data[self._KEYS.PHOTOMETRY][p][
                            PHOTOMETRY.U_TIME] = 'MJD'
                        data[self._KEYS.PHOTOMETRY][p][PHOTOMETRY.TIME] = str(
                            jd_to_mjd(Decimal(photo['time'])))
                    if QUANTITY.SOURCE not in photo:
                        if not def_source_dict:
                            raise ValueError("No sources found, can't add "
                                             "photometry.")
                        source = self.add_source(
                            allow_alias=allow_alias, **def_source_dict)
                        data[self._KEYS.PHOTOMETRY][p][
                            QUANTITY.SOURCE] = source
            else:
                for qi, quantity in enumerate(data[key]):
                    if QUANTITY.SOURCE not in quantity:
                        if not def_source_dict:
                            raise ValueError("No sources found, can't add "
                                             "quantity.")
                        source = self.add_source(
                            allow_alias=allow_alias, **def_source_dict)
                        data[key][qi][QUANTITY.SOURCE] = source

        return data
Esempio n. 35
0
def do_snf_specta(catalog):
    task_str = catalog.get_current_task_str()
    bibcodes = {'SN2005gj': '2006ApJ...650..510A',
                'SN2006D': '2007ApJ...654L..53T',
                'SN2007if': '2010ApJ...713.1073S',
                'SN2011fe': '2013A&A...554A..27P'}
    oldname = ''
    snfcnt = 0
    eventfolders = next(os.walk(os.path.join(
        catalog.get_current_task_repo(), 'SNFactory')))[1]
    for eventfolder in pbar(eventfolders, task_str):
        oname = eventfolder
        name = catalog.get_preferred_name(oname)
        if oldname and name != oldname:
            catalog.journal_entries()
        oldname = name
        name = catalog.add_entry(name)
        sec_reference = 'Nearby Supernova Factory'
        sec_refurl = 'http://snfactory.lbl.gov/'
        sec_bibcode = '2002SPIE.4836...61A'
        sec_source = catalog.entries[name].add_source(
            name=sec_reference, url=sec_refurl, bibcode=sec_bibcode,
            secondary=True)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, oname, sec_source)
        bibcode = bibcodes[oname]
        source = catalog.entries[name].add_source(bibcode=bibcode)
        sources = uniq_cdl([source, sec_source])
        use_path = os.path.join(
            catalog.get_current_task_repo(), 'SNFactory', eventfolder, '*.dat')
        eventspectra = glob(use_path)
        for spectrum in pbar(eventspectra, task_str):
            filename = os.path.basename(spectrum)
            with open(spectrum) as spec_file:
                specdata = list(csv.reader(
                    spec_file, delimiter=' ', skipinitialspace=True))
            specdata = list(filter(None, specdata))
            newspec = []
            time = ''
            telescope = ''
            instrument = ''
            observer = ''
            observatory = ''
            if 'Keck_20060202_R' in spectrum:
                time = '53768.23469'
            elif 'Spectrum05_276' in spectrum:
                time = pretty_num(astrotime('2005-10-03').mjd, sig=5)
            elif 'Spectrum05_329' in spectrum:
                time = pretty_num(astrotime('2005-11-25').mjd, sig=5)
            elif 'Spectrum05_336' in spectrum:
                time = pretty_num(astrotime('2005-12-02').mjd, sig=5)
            for row in specdata:
                if row[0][0] == '#':
                    joinrow = (' '.join(row)).split('=')
                    if len(joinrow) < 2:
                        continue
                    field = joinrow[0].strip('# ')
                    value = joinrow[1].split('/')[0].strip('\' ')
                    if not time:
                        if field == 'JD':
                            time = str(jd_to_mjd(Decimal(value)))
                        elif field == 'MJD':
                            time = value
                        elif field == 'MJD-OBS':
                            time = value
                    if field == 'OBSERVER':
                        observer = value.capitalize()
                    if field == 'OBSERVAT':
                        observatory = value.capitalize()
                    if field == 'TELESCOP':
                        telescope = value.capitalize()
                    if field == 'INSTRUME':
                        instrument = value.capitalize()
                else:
                    newspec.append(row)
            if not time:
                raise ValueError('Time missing from spectrum.')
            specdata = newspec
            haserrors = len(specdata[0]) == 3 and specdata[
                0][2] and specdata[0][2] != 'NaN'
            specdata = [list(i) for i in zip(*specdata)]

            wavelengths = specdata[0]
            fluxes = specdata[1]
            errors = ''
            if haserrors:
                errors = specdata[2]

            unit_err = ('Variance' if oldname == 'SN2011fe' else
                        'erg/s/cm^2/Angstrom')
            unit_flx = 'erg/s/cm^2/Angstrom'
            catalog.entries[name].add_spectrum(
                u_wavelengths='Angstrom', u_fluxes=unit_flx, u_time='MJD',
                time=time,
                wavelengths=wavelengths, fluxes=fluxes, errors=errors,
                observer=observer, observatory=observatory,
                telescope=telescope, instrument=instrument, u_errors=unit_err,
                source=sources, filename=filename)
            snfcnt = snfcnt + 1
            if (catalog.args.travis and
                    snfcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
                break

    catalog.journal_entries()
    return
Esempio n. 36
0
def do_donated_photo(catalog):
    """Import donated photometry."""
    task_str = catalog.get_current_task_str()

    # Private donations here #
    if not catalog.args.travis:
        pass
    # End private donations #

    # Ponder 05-12-17 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Ponder-05-12-17', 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Ponder-05-12-17', '*.dat'))
    for path in file_names:
        with open(path, 'r') as f:
            tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        oname = path.split('/')[-1].split('.')[0]
        name, source = catalog.new_entry(
            oname, bibcode=metadict[oname]['bibcode'])
        for row in pbar(tsvin, task_str + ': Ponder ' + oname):
            if row[0][0] == '#' or not is_number(row[-1]):
                continue
            mjd = row[1]
            bandinst = row[2].split('_')
            band = bandinst[0]
            inst = ''
            if len(bandinst) > 1:
                inst = bandinst[1]
            mag = row[3]
            uerr = row[4]
            lerr = row[5]
            photodict = {
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.BAND: band,
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.E_LOWER_MAGNITUDE: lerr,
                PHOTOMETRY.E_UPPER_MAGNITUDE: uerr,
                PHOTOMETRY.SOURCE: source
            }
            if inst:
                photodict[PHOTOMETRY.INSTRUMENT] = inst
            catalog.entries[name].add_photometry(**photodict)

    # Benetti 03-08-17 donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Benetti-03-08-17', '1999E.dat')
    with open(path, 'r') as f:
        tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        name, source = catalog.new_entry(
            'SN1999E', bibcode='2003MNRAS.340..191R')
        bands = None
        for row in tsvin:
            if not row or row[0][0] == '#':
                continue
            if not bands:
                bands = row[2:-2]
                continue
            mjd = row[1]
            tel = row[-1] if 'IAUC' not in row[-1] else None
            for bi, band in enumerate(bands):
                mag = row[2 + 2 * bi]
                if mag == '9999':
                    continue
                err = row[2 + 2 * bi + 1]
                limit = row[6] == 'True'
                photodict = {
                    PHOTOMETRY.TIME: mjd,
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.TELESCOPE: tel,
                    PHOTOMETRY.BAND: band,
                    PHOTOMETRY.MAGNITUDE: mag,
                    PHOTOMETRY.SOURCE: source
                }
                if err != '.00':
                    photodict[PHOTOMETRY.E_MAGNITUDE] = str(Decimal(err))
                if tel:
                    photodict[PHOTOMETRY.TELESCOPE] = tel
                catalog.entries[name].add_photometry(**photodict)

    # Nicholl 01-29-17 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Nicholl-01-29-17', 'meta.json'), 'r') as f:
        metadict = json.loads(f.read())
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-01-29-17', '*.txt'))
    for path in file_names:
        data = read(path, format='cds')
        oname = path.split('/')[-1].split('_')[0]
        name, source = catalog.new_entry(
            oname, bibcode=metadict[oname]['bibcode'])
        for row in pbar(data, task_str + ': Nicholl ' + oname):
            photodict = {
                PHOTOMETRY.TIME: str(row['MJD']),
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: str(row['mag']),
                PHOTOMETRY.BAND: row['Filter'],
                PHOTOMETRY.SOURCE: source
            }
            if 'system' in metadict[oname]:
                photodict[PHOTOMETRY.SYSTEM] = metadict[oname]['system']
            if 'l_mag' in row.columns and row['l_mag'] == '>':
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            elif 'e_mag' in row.columns:
                photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['e_mag'])
            if 'Telescope' in row.columns:
                photodict[PHOTOMETRY.TELESCOPE] = row['Telescope']
            catalog.entries[name].add_photometry(**photodict)

    # Arcavi 2016gkg donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Arcavi-01-24-17', 'SN2016gkg.txt')
    with open(path, 'r') as f:
        tsvin = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
        name, source = catalog.new_entry(
            'SN2016gkg', bibcode='2016arXiv161106451A')
        for row in tsvin:
            if row[0][0] == '#':
                continue
            mjd = str(jd_to_mjd(Decimal(row[0])))
            tel = row[1]
            band = row[3]
            mag = row[4]
            err = row[5]
            limit = row[6] == 'True'
            photodict = {
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.TELESCOPE: tel,
                PHOTOMETRY.BAND: band,
                PHOTOMETRY.MAGNITUDE: mag,
                PHOTOMETRY.SOURCE: source
            }
            if limit:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            else:
                photodict[PHOTOMETRY.E_MAGNITUDE] = err
            catalog.entries[name].add_photometry(**photodict)

    # Nicholl Gaia16apd donation
    path = os.path.join(catalog.get_current_task_repo(), 'Donations',
                        'Nicholl-01-20-17', 'gaia16apd_phot.txt')

    data = read(path, format='cds')
    name, source = catalog.new_entry(
        'Gaia16apd', bibcode='2017ApJ...835L...8N')
    for row in pbar(data, task_str + ': Nicholl Gaia16apd'):
        photodict = {
            PHOTOMETRY.TIME: str(row['MJD']),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.MAGNITUDE: str(row['mag']),
            PHOTOMETRY.BAND: row['Filter'],
            PHOTOMETRY.TELESCOPE: row['Telescope'],
            PHOTOMETRY.SOURCE: source
        }
        if row['l_mag'] == '>':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        else:
            photodict[PHOTOMETRY.E_MAGNITUDE] = str(row['e_mag'])
        catalog.entries[name].add_photometry(**photodict)

    # Kuncarayakti-01-09-17
    datafile = os.path.join(catalog.get_current_task_repo(), 'Donations',
                            'Kuncarayakti-01-09-17', 'SN1978K.dat')
    inpname = os.path.basename(datafile).split('.')[0]
    with open(datafile, 'r') as f:
        tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
        host = False
        for ri, row in enumerate(tsvin):
            if ri == 0:
                continue
            if row[0][0] == '#':
                rsplit = [x.strip('# ') for x in ' '.join(row).split(',')]
                bc = rsplit[0]
                tel, ins = '', ''
                if len(rsplit) > 1:
                    tel = rsplit[1]
                if len(rsplit) > 2:
                    ins = rsplit[2]
                continue
            (name, source) = catalog.new_entry(inpname, bibcode=bc)
            mag = row[4]
            err = row[5]
            mjd = str(astrotime('-'.join(row[:3]), format='iso').mjd)
            photodict = {
                PHOTOMETRY.BAND: row[3],
                PHOTOMETRY.TIME: mjd,
                PHOTOMETRY.U_TIME: 'MJD',
                PHOTOMETRY.MAGNITUDE: mag.strip('>s'),
                PHOTOMETRY.SOURCE: source
            }
            if is_number(err):
                photodict[PHOTOMETRY.E_MAGNITUDE] = err
            if tel:
                photodict[PHOTOMETRY.TELESCOPE] = tel
            if ins:
                photodict[PHOTOMETRY.INSTRUMENT] = ins
            if '>' in mag:
                photodict[PHOTOMETRY.UPPER_LIMIT] = True
            if 's' in mag:
                photodict[PHOTOMETRY.SYNTHETIC] = True
            catalog.entries[name].add_photometry(**photodict)

    # Nugent 01-09-17 donation
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nugent-01-09-17', '*.dat'))
    for datafile in pbar_strings(file_names, task_str + ': Nugent-01-09-17'):
        inpname = os.path.basename(datafile).split('.')[0]
        (name, source) = catalog.new_entry(
            inpname, bibcode='2006ApJ...645..841N')
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
            host = False
            for urow in tsvin:
                row = list(filter(None, urow))
                counts = row[2]
                e_counts = row[3]
                zp = row[4]
                photodict = {
                    PHOTOMETRY.BAND: row[1],
                    PHOTOMETRY.TIME: row[0],
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.COUNT_RATE: counts,
                    PHOTOMETRY.E_COUNT_RATE: e_counts,
                    PHOTOMETRY.ZERO_POINT: zp,
                    PHOTOMETRY.TELESCOPE: 'CFHT',
                    PHOTOMETRY.SURVEY: 'SNLS',
                    PHOTOMETRY.SOURCE: source
                }
                set_pd_mag_from_counts(photodict, counts, ec=e_counts, zp=zp,
                                       sig=5.0)
                catalog.entries[name].add_photometry(**photodict)

    # Inserra 09-04-16 donation
    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Inserra-09-04-16', '*.txt'))
    for datafile in pbar_strings(file_names, task_str + ': Inserra-09-04-16'):
        inpname = os.path.basename(datafile).split('.')[0]
        (name, source) = catalog.new_entry(
            inpname, bibcode='2013ApJ...770..128I')
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
            host = False
            for row in tsvin:
                if row[0][0] == '#':
                    if row[0] == '#Host':
                        host = True
                        continue
                    host = False
                    bands = row[3:-1]
                    continue
                for bi, ba in enumerate(bands):
                    mag = row[5 + 2 * bi]
                    if not is_number(mag):
                        continue
                    system = 'AB'
                    if ba in ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K']:
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: row[3],
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.BAND: ba,
                        PHOTOMETRY.MAGNITUDE: mag.strip('< '),
                        PHOTOMETRY.SOURCE: source,
                        PHOTOMETRY.SYSTEM: system
                    }
                    if 'ATel' not in row[-1]:
                        photodict[PHOTOMETRY.TELESCOPE] = row[-1]
                    if host:
                        photodict[PHOTOMETRY.HOST] = True
                    if '<' in mag:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    e_mag = row[5 + 2 * bi + 1].strip('() ')
                    if is_number(e_mag):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = e_mag
                    catalog.entries[name].add_photometry(**photodict)

    # Nicholl 04-01-16 donation
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Nicholl-04-01-16', 'bibcodes.json'), 'r') as f:
        bcs = json.loads(f.read())

    kcorrected = ['SN2011ke', 'SN2011kf', 'SN2012il', 'PTF10hgi', 'PTF11rks']
    ignorephoto = ['PTF10hgi', 'PTF11rks', 'SN2011ke', 'SN2011kf', 'SN2012il']

    file_names = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-04-01-16/*.txt'))
    for datafile in pbar_strings(file_names, task_str + ': Nicholl-04-01-16'):
        inpname = os.path.basename(datafile).split('_')[0]
        isk = inpname in kcorrected
        name = catalog.add_entry(inpname)
        bibcode = ''
        for bc in bcs:
            if inpname in bcs[bc]:
                bibcode = bc
        if not bibcode:
            raise ValueError('Bibcode not found!')
        source = catalog.entries[name].add_source(bibcode=bibcode)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, inpname, source)
        if inpname in ignorephoto:
            continue
        with open(datafile, 'r') as f:
            tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
            rtelescope = ''
            for r, rrow in enumerate(tsvin):
                row = list(filter(None, rrow))
                if not row:
                    continue
                if row[0] == '#MJD':
                    bands = [x for x in row[1:] if x and 'err' not in x]
                elif row[0][0] == '#' and len(row[0]) > 1:
                    rtelescope = row[0][1:]
                if row[0][0] == '#':
                    continue
                mjd = row[0]
                if not is_number(mjd):
                    continue
                for v, val in enumerate(row[1::2]):
                    upperlimit = ''
                    mag = val.strip('>')
                    emag = row[2 * v + 2]
                    if '>' in val or (is_number(emag) and float(emag) == 0.0):
                        upperlimit = True
                    if (not is_number(mag) or isnan(float(mag)) or
                            float(mag) > 90.0):
                        continue
                    band = bands[v]
                    instrument = ''
                    survey = ''
                    system = ''
                    telescope = rtelescope
                    if telescope == 'LSQ':
                        instrument = 'QUEST'
                    elif telescope == 'PS1':
                        instrument = 'GPC'
                    elif telescope == 'NTT':
                        instrument = 'EFOSC'
                    elif telescope == 'GROND':
                        instrument = 'GROND'
                        telescope = 'MPI/ESO 2.2m'
                    else:
                        if band == 'NUV':
                            instrument = 'GALEX'
                            telescope = 'GALEX'
                        elif band in ['u', 'g', 'r', 'i', 'z']:
                            if inpname.startswith('PS1'):
                                instrument = 'GPC'
                                telescope = 'PS1'
                                survey = 'Pan-STARRS'
                            elif inpname.startswith('PTF'):
                                telescope = 'P60'
                                survey = 'PTF'
                        elif band.upper() in ['UVW2', 'UVW1', 'UVM2']:
                            instrument = 'UVOT'
                            telescope = 'Swift'
                            if inpname in ['PTF12dam']:
                                system = 'AB'
                    if inpname in ['SCP-06F6']:
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: mjd,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.BAND: band,
                        PHOTOMETRY.MAGNITUDE: mag,
                        PHOTOMETRY.UPPER_LIMIT: upperlimit,
                        PHOTOMETRY.SOURCE: source
                    }
                    if instrument:
                        photodict[PHOTOMETRY.INSTRUMENT] = instrument
                    if telescope:
                        photodict[PHOTOMETRY.TELESCOPE] = telescope
                    if survey:
                        photodict[PHOTOMETRY.SURVEY] = survey
                    if system:
                        photodict[PHOTOMETRY.SYSTEM] = system
                    if (is_number(emag) and
                            not isnan(float(emag)) and float(emag) > 0.0):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = emag
                    if isk:
                        photodict[PHOTOMETRY.KCORRECTED] = True
                    catalog.entries[name].add_photometry(**photodict)
    catalog.journal_entries()

    # Maggi 04-11-16 donation (MC SNRs)
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Maggi-04-11-16', 'LMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str + ': Maggi-04-11-16/LMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            ra = row[2]
            dec = row[3]
            source = (catalog.entries[name]
                      .add_source(bibcode='2016A&A...585A.162M'))
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS,
                'LMCSNR J' + rep_chars(ra, ' :.') + rep_chars(dec, ' :.'),
                source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            if row[1] != 'noname':
                catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1],
                                                   source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'LMC', source)
            if row[4] == '1':
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   'Ia', source)
            elif row[4] == '2':
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   'CC', source)
    with open(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Maggi-04-11-16', 'SMCSNRs_OpenSNe.csv')) as f:
        tsvin = csv.reader(f, delimiter=',')
        for row in pbar(list(tsvin), task_str + ': Maggi-04-11-16/SMCSNRs'):
            name = 'MCSNR ' + row[0]
            name = catalog.add_entry(name)
            source = catalog.entries[name].add_source(name='Pierre Maggi')
            ra = row[3]
            dec = row[4]
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, 'SMCSNR J' + ra.replace(
                    ':', '')[:6] + dec.replace(':', '')[:7], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[3], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[4], source)
            catalog.entries[name].add_quantity(SUPERNOVA.HOST, 'SMC', source)
    catalog.journal_entries()

    # Galbany 04-18-16 donation
    folders = next(
        os.walk(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/')))[1]
    bibcode = '2016AJ....151...33G'
    for folder in folders:
        infofiles = glob(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/') + folder + '/*.info')
        photfiles = glob(
            os.path.join(catalog.get_current_task_repo(), 'Donations',
                         'Galbany-04-18-16/') + folder + '/*.out*')

        zhel = ''
        zcmb = ''
        zerr = ''
        for path in infofiles:
            with open(path, 'r') as f:
                lines = f.read().splitlines()
                for line in lines:
                    splitline = line.split(':')
                    field = splitline[0].strip().lower()
                    value = splitline[1].strip()
                    if field == 'name':
                        name = value[:6].upper()
                        name += (value[6].upper()
                                 if len(value) == 7 else value[6:])
                        name = catalog.add_entry(name)
                        source = (catalog.entries[name]
                                  .add_source(bibcode=bibcode))
                        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,
                                                           name, source)
                    elif field == 'type':
                        claimedtype = value.replace('SN', '')
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.CLAIMED_TYPE, claimedtype, source)
                    elif field == 'zhel':
                        zhel = value
                    elif field == 'redshift_error':
                        zerr = value
                    elif field == 'zcmb':
                        zcmb = value
                    elif field == 'ra':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.RA,
                            value,
                            source,
                            u_value='floatdegrees')
                    elif field == 'dec':
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DEC,
                            value,
                            source,
                            u_value='floatdegrees')
                    elif field == 'host':
                        value = value.replace('- ', '-').replace('G ', 'G')
                        catalog.entries[name].add_quantity(SUPERNOVA.HOST,
                                                           value, source)
                    elif field == 'e(b-v)_mw':
                        catalog.entries[name].add_quantity(SUPERNOVA.EBV,
                                                           value, source)

        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT,
            zhel,
            source,
            e_value=zerr,
            kind='heliocentric')
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT, zcmb, source, e_value=zerr, kind='cmb')

        for path in photfiles:
            with open(path, 'r') as f:
                band = ''
                lines = f.read().splitlines()
                for li, line in enumerate(lines):
                    if li in [0, 2, 3]:
                        continue
                    if li == 1:
                        band = line.split(':')[-1].strip()
                    else:
                        cols = list(filter(None, line.split()))
                        if not cols:
                            continue
                        catalog.entries[name].add_photometry(
                            time=cols[0],
                            u_time='MJD',
                            magnitude=cols[1],
                            e_magnitude=cols[2],
                            band=band,
                            system=cols[3],
                            telescope=cols[4],
                            source=source)
    catalog.journal_entries()

    # Nicholl 05-03-16
    files = glob(
        os.path.join(catalog.get_current_task_repo(), 'Donations',
                     'Nicholl-05-03-16', '*.txt'))
    name = catalog.add_entry('SN2015bn')
    for fi in pbar(files, task_str + ': Nicholl-05-03-16'):
        if 'late' in fi:
            bc = '2016ApJ...828L..18N'
        else:
            bc = '2016ApJ...826...39N'
        source = catalog.entries[name].add_source(bibcode=bc)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, 'PS15ae', source)
        telescope = os.path.basename(fi).split('_')[1]
        with open(fi, 'r') as f:
            lines = f.read().splitlines()
            for li, line in enumerate(lines):
                if not line or (line[0] == '#' and li != 0):
                    continue
                cols = list(filter(None, line.split()))
                if not cols:
                    continue
                if li == 0:
                    bands = cols[1:]
                    continue

                mjd = cols[0]
                for ci, col in enumerate(cols[1::2]):
                    if not is_number(col) or np.isnan(float(col)):
                        continue

                    band = bands[ci]
                    band_set = ''
                    system = 'Vega'
                    if bands[ci] in ["u'", "g'", "r'", "i'", "z'"]:
                        band_set = 'SDSS'
                        system = 'SDSS'
                    elif telescope == 'ASASSN':
                        band_set = 'ASASSN'
                        system = 'Vega'
                    photodict = {
                        PHOTOMETRY.TIME: mjd,
                        PHOTOMETRY.U_TIME: 'MJD',
                        PHOTOMETRY.MAGNITUDE: col,
                        PHOTOMETRY.BAND: bands[ci],
                        PHOTOMETRY.SOURCE: source,
                        PHOTOMETRY.TELESCOPE: telescope,
                        PHOTOMETRY.SYSTEM: system
                    }
                    if band_set:
                        photodict[PHOTOMETRY.BAND_SET] = band_set
                    emag = cols[2 * ci + 2]
                    if is_number(emag):
                        photodict[PHOTOMETRY.E_MAGNITUDE] = emag
                    else:
                        photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    if telescope == 'Swift':
                        photodict[PHOTOMETRY.INSTRUMENT] = 'UVOT'
                    catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Esempio n. 37
0
def do_ogle(catalog):
    task_str = catalog.get_current_task_str()
    basenames = ['transients', 'transients/2014b', 'transients/2014',
                 'transients/2013', 'transients/2012']
    oglenames = []
    ogleupdate = [True, False, False, False, False]
    for b, bn in enumerate(pbar(basenames, task_str)):
        if catalog.args.update and not ogleupdate[b]:
            continue

        filepath = os.path.join(catalog.get_current_task_repo(), 'OGLE-')
        filepath += bn.replace('/', '-') + '-transients.html'
        htmltxt = catalog.load_cached_url(
            'http://ogle.astrouw.edu.pl/ogle4/' + bn +
            '/transients.html', filepath)
        if not htmltxt:
            continue

        soup = BeautifulSoup(htmltxt, 'html5lib')
        links = soup.findAll('a')
        breaks = soup.findAll('br')
        datalinks = []
        datafnames = []
        for a in links:
            if a.has_attr('href'):
                if '.dat' in a['href']:
                    datalinks.append(
                        'http://ogle.astrouw.edu.pl/ogle4/' + bn + '/' +
                        a['href'])
                    datafnames.append(bn.replace('/', '-') +
                                      '-' + a['href'].replace('/', '-'))

        ec = -1
        reference = 'OGLE-IV Transient Detection System'
        refurl = 'http://ogle.astrouw.edu.pl/ogle4/transients/transients.html'
        for br in pbar(breaks, task_str):
            sibling = br.nextSibling
            if 'Ra,Dec=' in sibling:
                line = sibling.replace('\n', '').split('Ra,Dec=')
                name = line[0].strip()
                ec += 1

                if 'NOVA' in name or 'dupl' in name:
                    continue

                if name in oglenames:
                    continue
                oglenames.append(name)

                name = catalog.add_entry(name)

                mySibling = sibling.nextSibling
                atelref = ''
                claimedtype = ''
                while 'Ra,Dec=' not in mySibling:
                    if isinstance(mySibling, NavigableString):
                        if 'Phot.class=' in str(mySibling):
                            claimedtype = re.sub(
                                r'\([^)]*\)', '',
                                str(mySibling).split('=')[-1])
                            claimedtype = claimedtype.replace('SN', '').strip()
                    if isinstance(mySibling, Tag):
                        atela = mySibling
                        if (atela and atela.has_attr('href') and
                                'astronomerstelegram' in atela['href']):
                            atelref = atela.contents[0].strip()
                            atelurl = atela['href']
                    mySibling = mySibling.nextSibling
                    if mySibling is None:
                        break

                # nextSibling = sibling.nextSibling
                # if ((isinstance(nextSibling, Tag) and
                #      nextSibling.has_attr('alt') and
                #      nextSibling.contents[0].strip() != 'NED')):
                #     radec = nextSibling.contents[0].strip().split()
                # else:
                #     radec = line[-1].split()
                # ra = radec[0]
                # dec = radec[1]

                fname = os.path.join(catalog.get_current_task_repo(),
                                     'OGLE/') + datafnames[ec]
                if (catalog.current_task.load_archive(catalog.args) and
                        os.path.isfile(fname)):
                    with open(fname, 'r') as f:
                        csvtxt = f.read()
                else:
                    response = urllib.request.urlopen(datalinks[ec])
                    with open(fname, 'w') as f:
                        csvtxt = response.read().decode('utf-8')
                        f.write(csvtxt)

                lcdat = csvtxt.splitlines()
                sources = [catalog.entries[name].add_source(
                    name=reference, url=refurl)]
                catalog.entries[name].add_quantity(
                    SUPERNOVA.ALIAS, name, sources[0])
                if atelref and atelref != 'ATel#----':
                    sources.append(catalog.entries[name].add_source(
                        name=atelref, url=atelurl))
                sources = uniq_cdl(sources)

                if name.startswith('OGLE'):
                    if name[4] == '-':
                        if is_number(name[5:9]):
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.DISCOVER_DATE, name[5:9], sources)
                    else:
                        if is_number(name[4:6]):
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.DISCOVER_DATE, '20' + name[4:6],
                                sources)

                # RA and Dec from OGLE pages currently not reliable
                # catalog.entries[name].add_quantity(SUPERNOVA.RA, ra, sources)
                # catalog.entries[name].add_quantity(SUPERNOVA.DEC, dec,
                # sources)
                if claimedtype and claimedtype != '-':
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.CLAIMED_TYPE, claimedtype, sources)
                elif ('SN' not in name and SUPERNOVA.CLAIMED_TYPE not in
                      catalog.entries[name]):
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.CLAIMED_TYPE, 'Candidate', sources)
                for row in lcdat:
                    row = row.split()
                    mjd = str(jd_to_mjd(Decimal(row[0])))
                    magnitude = row[1]
                    if float(magnitude) > 90.0:
                        continue
                    e_mag = row[2]
                    upperlimit = False
                    if e_mag == '-1' or float(e_mag) > 10.0:
                        e_mag = ''
                        upperlimit = True
                    catalog.entries[name].add_photometry(
                        time=mjd, band='I', magnitude=magnitude,
                        e_magnitude=e_mag,
                        system='Vega', source=sources, upperlimit=upperlimit)
                if catalog.args.update:
                    catalog.journal_entries()

        catalog.journal_entries()
    return
Esempio n. 38
0
def do_snax(catalog):
    """Import from the SNaX X-ray database."""
    task_str = catalog.get_current_task_str()

    dlurl = 'http://kronos.uchicago.edu/snax/export.php?exportType=TSV&exportFields=standard&objid=&name=&typeid=&type=&galaxyid=&galaxy=&fluxMin=&fluxMax=&fluxEnergyLMin=&fluxEnergyLMax=&fluxEnergyHMin=&fluxEnergyHMax=&lumMin=&lumMax=&instrumentid=&instrument=&ageMin=&ageMax=&dateMin=&dateMax=&sortA=dateExploded'  # noqa: E501

    file_path = os.path.join(catalog.get_current_task_repo(), 'SNaX.TSV')

    tsv = catalog.load_url(dlurl, file_path)
    # csvtxt = catalog.load_url(
    #     'http://www.grbcatalog.org/'
    #     'download_data?cut_0_min=5&cut_0=BAT%20T90'
    #     '&cut_0_max=100000&num_cuts=1&no_date_cut=True',
    #     file_path)

    data = [x.split('\t') for x in tsv.split('\n')]

    for r, row in enumerate(pbar(data, task_str)):
        if r == 0 or not row[0]:
            continue
        (name, source) = catalog.new_entry(
            row[0], srcname='SNaX', url='http://kronos.uchicago.edu/snax/',
            secondary=True)
        sources = [source]
        expsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-6].strip())
        ])
        coosrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-5].strip())
        ])
        dissrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-4].strip())
        ])
        flxsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-3].strip()),
            catalog.entries[name].add_source(bibcode=row[-2].strip())
        ])

        catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, row[1],
                                           source)
        date = astrotime(float(row[2]), format='jd').datetime
        catalog.entries[name].add_quantity(
            SUPERNOVA.EXPLOSION_DATE,
            make_date_string(date.year, date.month, date.day), expsrc)
        catalog.entries[name].add_quantity(
            SUPERNOVA.RA, ' '.join(row[3].split()[:3]), coosrc)
        catalog.entries[name].add_quantity(
            SUPERNOVA.DEC, ' '.join(row[3].split()[3:]), coosrc)
        catalog.entries[name].add_quantity(SUPERNOVA.LUM_DIST, row[4], dissrc)
        catalog.entries[name].add_quantity(SUPERNOVA.HOST, row[5], source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT,
            row[6],
            source,
            e_value=row[7] if (row[7] and float(row[7]) != 0.0) else '')
        photodict = {
            PHOTOMETRY.TIME: jd_to_mjd(Decimal(row[8])),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.ENERGY: row[15:17],
            PHOTOMETRY.U_ENERGY: 'keV',
            PHOTOMETRY.FLUX: str(Decimal('1.0e-13') * Decimal(row[11])),
            PHOTOMETRY.U_FLUX: 'ergs/s/cm^2',
            PHOTOMETRY.E_LOWER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[13])),
            PHOTOMETRY.E_UPPER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[14])),
            PHOTOMETRY.INSTRUMENT: row[9],
            PHOTOMETRY.SOURCE: flxsrc
        }
        if row[12] == '1':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return