Beispiel #1
0
    def set_first_max_light(self):
        if ENTRY.MAX_APP_MAG not in self:
            mldt, mlmag, mlband, mlsource = self._get_max_light()
            if mldt or mlmag or mlband:
                source = self.add_self_source()
                uniq_src = uniq_cdl([source] + mlsource.split(','))
            if mldt:
                max_date = make_date_string(mldt.year, mldt.month, mldt.day)
                self.add_quantity(ENTRY.MAX_DATE, max_date, uniq_src,
                                  derived=True)
            if mlmag:
                mlmag = pretty_num(mlmag)
                self.add_quantity(ENTRY.MAX_APP_MAG, mlmag, uniq_src,
                                  derived=True)
            if mlband:
                self.add_quantity(ENTRY.MAX_BAND, mlband, uniq_src,
                                  derived=True)

        if (self._KEYS.DISCOVER_DATE not in self or
                max([len(x[QUANTITY.VALUE].split('/')) for x in
                     self[self._KEYS.DISCOVER_DATE]]) < 3):
            fldt, flsource = self._get_first_light()
            if fldt:
                source = self.add_self_source()
                disc_date = make_date_string(fldt.year, fldt.month, fldt.day)
                self.add_quantity(
                    self._KEYS.DISCOVER_DATE, disc_date,
                    uniq_cdl([source] + flsource.split(',')),
                    derived=True)

        if self._KEYS.DISCOVER_DATE not in self and self._KEYS.SPECTRA in self:
            minspecmjd = float("+inf")
            for spectrum in self[self._KEYS.SPECTRA]:
                if 'time' in spectrum and 'u_time' in spectrum:
                    if spectrum['u_time'] == 'MJD':
                        mjd = float(spectrum['time'])
                    elif spectrum['u_time'] == 'JD':
                        mjd = float(jd_to_mjd(Decimal(spectrum['time'])))
                    else:
                        continue

                    if mjd < minspecmjd:
                        minspecmjd = mjd
                        minspecsource = spectrum['source']

            if minspecmjd < float("+inf"):
                fldt = astrotime(minspecmjd, format='mjd').datetime
                source = self.add_self_source()
                disc_date = make_date_string(fldt.year, fldt.month, fldt.day)
                self.add_quantity(
                    self._KEYS.DISCOVER_DATE, disc_date,
                    uniq_cdl([source] + minspecsource.split(',')),
                    derived=True)
        return
Beispiel #2
0
def do_ps_mds(catalog):
    task_str = catalog.get_current_task_str()
    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'MDS/apj506838t1_mrt.txt')) as f:
        for ri, row in enumerate(pbar(f.read().splitlines(), task_str)):
            if ri < 35:
                continue
            cols = [x.strip() for x in row.split(',')]
            name = catalog.add_entry(cols[0])
            source = catalog.entries[name].add_source(
                bibcode='2015ApJ...799..208S')
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, cols[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, cols[3], source)
            astrot = astrotime(float(cols[4]), format='mjd').datetime
            ddate = make_date_string(astrot.year, astrot.month, astrot.day)
            catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, ddate,
                                               source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT, cols[5], source, kind='spectroscopic')
            catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, 'II P',
                                               source)
    catalog.journal_entries()
    return
Beispiel #3
0
 def set_first_max_light(self):
     if FASTSTARS.MAX_APP_MAG not in self:
         # Get the maximum amongst all bands
         mldt, mlmag, mlband, mlsource = self._get_max_light()
         if mldt or mlmag or mlband:
             source = self.add_self_source()
             uniq_src = uniq_cdl([source] + mlsource.split(','))
         if mldt:
             max_date = make_date_string(mldt.year, mldt.month, mldt.day)
             self.add_quantity(
                 FASTSTARS.MAX_DATE, max_date, uniq_src, derived=True)
         if mlmag:
             mlmag = pretty_num(mlmag)
             self.add_quantity(
                 FASTSTARS.MAX_APP_MAG, mlmag, uniq_src, derived=True)
         if mlband:
             self.add_quantity(
                 FASTSTARS.MAX_BAND, mlband, uniq_src, derived=True)
     return
Beispiel #4
0
def do_psst(catalog):
    task_str = catalog.get_current_task_str()
    # 2016MNRAS.462.4094S
    file_path = os.path.join(catalog.get_current_task_repo(), 'ASCII',
                             '2016MNRAS.462.4094S-tab1.tsv')
    with open(file_path, 'r') as f:
        data = list(
            csv.reader(
                f, delimiter='\t', quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            (name, source) = catalog.new_entry(
                row[0], bibcode='2016MNRAS.462.4094S')
            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE, row[3].replace('SN', '').strip('() '),
                source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT,
                row[5].strip('() '),
                source,
                kind='spectroscopic')

    file_path = os.path.join(catalog.get_current_task_repo(), 'ASCII',
                             '2016MNRAS.462.4094S-tab2.tsv')
    with open(file_path, 'r') as f:
        data = list(
            csv.reader(
                f, delimiter='\t', quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            (name, source) = catalog.new_entry(
                row[0], bibcode='2016MNRAS.462.4094S')
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            mldt = astrotime(float(row[4]), format='mjd').datetime
            discoverdate = make_date_string(mldt.year, mldt.month, mldt.day)
            catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE,
                                               discoverdate, source)

    catalog.journal_entries()

    # 2016ApJ...827L..40S
    file_path = os.path.join(catalog.get_current_task_repo(), 'ASCII',
                             '2016ApJ...827L..40S.tsv')
    with open(file_path, 'r') as f:
        data = list(
            csv.reader(
                f, delimiter='\t', quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            (name, source) = catalog.new_entry(
                row[0], bibcode='2016ApJ...827L..40S')
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            mldt = astrotime(float(row[3]), format='mjd').datetime
            discoverdate = make_date_string(mldt.year, mldt.month, mldt.day)
            catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE,
                                               discoverdate, source)
            catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, row[6],
                                               source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT, row[7], source, kind='spectroscopic')
            for alias in [x.strip() for x in row[8].split(',')]:
                catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, alias,
                                                   source)

    catalog.journal_entries()

    return
Beispiel #5
0
def do_ascii(catalog):
    """Process ASCII files that were extracted from datatables appearing in
    published works.
    """
    task_str = catalog.get_current_task_str()

    # 2006ApJ...645..841N
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2006ApJ...645..841N-table3.csv')
    tsvin = list(csv.reader(open(file_path, 'r'), delimiter=','))
    for ri, row in enumerate(pbar(tsvin, task_str)):
        name = 'SNLS-' + row[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2006ApJ...645..841N')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT, row[1], source, kind='spectroscopic')
        astrot = astrotime(float(row[4]) + 2450000., format='jd').datetime
        date_str = make_date_string(astrot.year, astrot.month, astrot.day)
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE, date_str, source)
    catalog.journal_entries()

    # Anderson 2014
    file_names = list(
        glob(os.path.join(
            catalog.get_current_task_repo(), 'SNII_anderson2014/*.dat')))
    for datafile in pbar_strings(file_names, task_str):
        basename = os.path.basename(datafile)
        if not is_number(basename[:2]):
            continue
        if basename == '0210_V.dat':
            name = 'SN0210'
        else:
            name = ('SN20' if int(basename[:2]) <
                    50 else 'SN19') + basename.split('_')[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2014ApJ...786...67A')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)

        if name in ['SN1999ca', 'SN2003dq', 'SN2008aw']:
            system = 'Swope'
        else:
            system = 'Landolt'

        with open(datafile, 'r') as ff:
            tsvin = csv.reader(ff, delimiter=' ', skipinitialspace=True)
            for row in tsvin:
                if not row[0]:
                    continue
                time = str(jd_to_mjd(Decimal(row[0])))
                catalog.entries[name].add_photometry(
                    time=time, band='V',
                    magnitude=row[1], e_magnitude=row[2],
                    system=system, source=source)
    catalog.journal_entries()

    # stromlo
    stromlobands = ['B', 'V', 'R', 'I', 'VM', 'RM']
    file_path = os.path.join(
        catalog.get_current_task_repo(), 'J_A+A_415_863-1/photometry.csv')
    tsvin = list(csv.reader(open(file_path, 'r'), delimiter=','))
    for row in pbar(tsvin, task_str):
        name = row[0]
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2004A&A...415..863G')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        mjd = str(jd_to_mjd(Decimal(row[1])))
        for ri, ci in enumerate(range(2, len(row), 3)):
            if not row[ci]:
                continue
            band = stromlobands[ri]
            upperlimit = True if (not row[ci + 1] and row[ci + 2]) else False
            e_upper_magnitude = str(
                abs(Decimal(row[ci + 1]))) if row[ci + 1] else ''
            e_lower_magnitude = str(
                abs(Decimal(row[ci + 2]))) if row[ci + 2] else ''
            teles = 'MSSSO 1.3m' if band in ['VM', 'RM'] else 'CTIO'
            instr = 'MaCHO' if band in ['VM', 'RM'] else ''
            catalog.entries[name].add_photometry(
                time=mjd, band=band, magnitude=row[ci],
                e_upper_magnitude=e_upper_magnitude,
                e_lower_magnitude=e_lower_magnitude,
                upperlimit=upperlimit, telescope=teles,
                instrument=instr, source=source)
    catalog.journal_entries()

    # 2015MNRAS.449..451W
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2015MNRAS.449..451W.dat')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if rr == 0:
            continue
        namesplit = row[0].split('/')
        name = namesplit[-1]
        if name.startswith('SN'):
            name = name.replace(' ', '')
        name = catalog.add_entry(name)
        source = catalog.entries[name].add_source(
            bibcode='2015MNRAS.449..451W')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        if len(namesplit) > 1:
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, namesplit[0], source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.CLAIMED_TYPE, row[1], source)
        catalog.entries[name].add_photometry(
            time=row[2], band=row[4], magnitude=row[3], source=source)
    catalog.journal_entries()

    # 2016MNRAS.459.1039T
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2016MNRAS.459.1039T.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    name = catalog.add_entry('LSQ13zm')
    source = catalog.entries[name].add_source(bibcode='2016MNRAS.459.1039T')
    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            bands = [xx.replace('(err)', '') for xx in row[3:-1]]
            continue
        mjd = row[1]
        mags = [re.sub(r'\([^)]*\)', '', xx) for xx in row[3:-1]]
        upps = [True if '>' in xx else '' for xx in mags]
        mags = [xx.replace('>', '') for xx in mags]
        errs = [xx[xx.find('(') + 1:xx.find(')')]
                if '(' in xx else '' for xx in row[3:-1]]
        for mi, mag in enumerate(mags):
            if not is_number(mag):
                continue
            catalog.entries[name].add_photometry(
                time=mjd, band=bands[mi], magnitude=mag, e_magnitude=errs[mi],
                instrument=row[-1], upperlimit=upps[mi], source=source)
    catalog.journal_entries()

    # 2015ApJ...804...28G
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2015ApJ...804...28G.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    name = catalog.add_entry('PS1-13arp')
    source = catalog.entries[name].add_source(bibcode='2015ApJ...804...28G')
    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
    for rr, row in enumerate(pbar(data, task_str)):
        if rr == 0:
            continue
        mjd = row[1]
        mag = row[3]
        upp = True if '<' in mag else ''
        mag = mag.replace('<', '')
        err = row[4] if is_number(row[4]) else ''
        ins = row[5]
        catalog.entries[name].add_photometry(
            time=mjd, band=row[0], magnitude=mag, e_magnitude=err,
            instrument=ins, upperlimit=upp, source=source)
    catalog.journal_entries()

    # 2016ApJ...819...35A
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2016ApJ...819...35A.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            bibcode='2016ApJ...819...35A')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
        catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
        catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT, row[3], source)
        disc_date = datetime.strptime(row[4], '%Y %b %d').isoformat()
        disc_date = disc_date.split('T')[0].replace('-', '/')
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE, disc_date, source)
    catalog.journal_entries()

    # 2014ApJ...784..105W
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2014ApJ...784..105W.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            bibcode='2014ApJ...784..105W')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        mjd = row[1]
        band = row[2]
        mag = row[3]
        err = row[4]
        catalog.entries[name].add_photometry(
            time=mjd, band=row[2], magnitude=mag, e_magnitude=err,
            instrument='WHIRC', telescope='WIYN 3.5 m', observatory='NOAO',
            system='WHIRC', source=source)
    catalog.journal_entries()

    # 2012MNRAS.425.1007B
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2012MNRAS.425.1007B.tsv')
    data = list(csv.reader(open(file_path, 'r'), delimiter='\t',
                           quotechar='"', skipinitialspace=True))
    for rr, row in enumerate(pbar(data, task_str)):
        if row[0][0] == '#':
            bands = row[2:]
            continue
        name = catalog.add_entry(row[0])
        source = catalog.entries[name].add_source(
            bibcode='2012MNRAS.425.1007B')
        catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
        mjd = row[1]
        mags = [xx.split('±')[0].strip() for xx in row[2:]]
        errs = [xx.split('±')[1].strip()
                if '±' in xx else '' for xx in row[2:]]
        if row[0] == 'PTF09dlc':
            ins = 'HAWK-I'
            tel = 'VLT 8.1m'
            obs = 'ESO'
        else:
            ins = 'NIRI'
            tel = 'Gemini North 8.2m'
            obs = 'Gemini'

        for mi, mag in enumerate(mags):
            if not is_number(mag):
                continue
            catalog.entries[name].add_photometry(
                time=mjd, band=bands[mi], magnitude=mag, e_magnitude=errs[mi],
                instrument=ins, telescope=tel, observatory=obs,
                system='Natural', source=source)

        catalog.journal_entries()

    # 2014ApJ...783...28G
    file_path = os.path.join(
        catalog.get_current_task_repo(), 'apj490105t2_ascii.txt')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            name, source = catalog.new_entry(
                row[0], bibcode='2014ApJ...783...28G')
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.DISCOVER_DATE, '20' + row[0][3:5], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[3], source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT, row[13] if is_number(row[13]) else
                row[10], source)
    catalog.journal_entries()

    # 2005ApJ...634.1190H
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2005ApJ...634.1190H.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            name, source = catalog.new_entry(
                'SNLS-' + row[0], bibcode='2005ApJ...634.1190H')
            catalog.entries[name].add_quantity(
                SUPERNOVA.DISCOVER_DATE, '20' + row[0][:2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            catalog.entries[name].add_quantity(
                SUPERNOVA.REDSHIFT, row[5].replace('?', ''), source,
                e_value=row[6], kind='host')
            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE, row[7].replace('SN', '').strip(':* '),
                source)
    catalog.journal_entries()

    # 2014MNRAS.444.2133S
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2014MNRAS.444.2133S.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            name = row[0]
            if is_number(name[:4]):
                name = 'SN' + name
            name, source = catalog.new_entry(
                name, bibcode='2014MNRAS.444.2133S')
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT, row[3],
                                               source,
                                               kind='host')
    catalog.journal_entries()

    # 2009MNRAS.398.1041B
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2009MNRAS.398.1041B.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                bands = row[2:-1]
                continue
            name, source = catalog.new_entry(
                'SN2008S', bibcode='2009MNRAS.398.1041B')
            mjd = str(jd_to_mjd(Decimal(row[0])))
            mags = [x.split('±')[0].strip() for x in row[2:]]
            upps = [('<' in x.split('±')[0]) for x in row[2:]]
            errs = [x.split('±')[1].strip()
                    if '±' in x else '' for x in row[2:]]

            instrument = row[-1]

            for mi, mag in enumerate(mags):
                if not is_number(mag):
                    continue
                catalog.entries[name].add_photometry(
                    time=mjd, band=bands[mi],
                    magnitude=mag, e_magnitude=errs[mi],
                    instrument=instrument, source=source)
    catalog.journal_entries()

    # 2010arXiv1007.0011P
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2010arXiv1007.0011P.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                bands = row[1:]
                continue
            name, source = catalog.new_entry(
                'SN2008S', bibcode='2010arXiv1007.0011P')
            mjd = row[0]
            mags = [x.split('±')[0].strip() for x in row[1:]]
            errs = [x.split('±')[1].strip()
                    if '±' in x else '' for x in row[1:]]

            for mi, mag in enumerate(mags):
                if not is_number(mag):
                    continue
                catalog.entries[name].add_photometry(
                    time=mjd, band=bands[mi],
                    magnitude=mag, e_magnitude=errs[mi],
                    instrument='LBT', source=source)
    catalog.journal_entries()

    # 2000ApJ...533..320G
    file_path = os.path.join(
        catalog.get_current_task_repo(), '2000ApJ...533..320G.tsv')
    with open(file_path, 'r') as f:
        data = list(csv.reader(f, delimiter='\t',
                               quotechar='"', skipinitialspace=True))
        name, source = catalog.new_entry(
            'SN1997cy', bibcode='2000ApJ...533..320G')
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                bands = row[1:-1]
                continue
            mjd = str(jd_to_mjd(Decimal(row[0])))
            mags = row[1:len(bands)]
            for mi, mag in enumerate(mags):
                if not is_number(mag):
                    continue
                catalog.entries[name].add_photometry(
                    time=mjd, band=bands[mi],
                    magnitude=mag,
                    observatory='Mount Stromlo', telescope='MSSSO',
                    source=source, kcorrected=True)

    catalog.journal_entries()
    return
Beispiel #6
0
def do_batse(catalog):
    task_str = catalog.get_current_task_str()
    file_path = os.path.join(catalog.get_current_task_repo(),
                             'BATSE/basic_table.txt')
    csvtxt = catalog.load_url(
        'http://gammaray.nsstc.nasa.gov/batse/grb/catalog/current/tables/'
        'basic_table.txt', file_path)
    if not csvtxt:
        return
    data = list(
        csv.reader(
            csvtxt.splitlines(),
            delimiter=' ',
            quotechar='"',
            skipinitialspace=True))

    file_path = os.path.join(catalog.get_current_task_repo(),
                             'BATSE/duration_table.txt')
    csvtxt = catalog.load_url(
        'http://gammaray.nsstc.nasa.gov/batse/grb/catalog/current/tables/'
        'duration_table.txt', file_path)
    if not csvtxt:
        return
    data2 = list(
        csv.reader(
            csvtxt.splitlines(),
            delimiter=' ',
            quotechar='"',
            skipinitialspace=True))
    t90s = {}
    for row in data2:
        # Add one sigma to quoted T90 to compare to
        t90s[row[0]] = float(row[-3]) + float(row[-2])

    prev_oname = ''
    grb_letter = 'A'
    for r, row in enumerate(pbar(data, task_str)):
        if row[0].startswith('#'):
            continue
        oname = 'GRB ' + row[2]
        if oname.replace('-', '') == prev_oname:
            grb_letter = chr(ord(grb_letter) + 1)
        else:
            grb_letter = 'A'
        prev_oname = oname.replace('-', '')
        if oname.endswith('-'):
            oname = oname.replace('-', grb_letter)
        if row[-1] == 'Y':
            continue
        if row[0] not in t90s or t90s[row[0]] < 3.0:
            continue
        (name, source) = catalog.new_entry(
            oname,
            srcname='BATSE Catalog',
            bibcode='1999ApJS..122..465P',
            url='http://gammaray.nsstc.nasa.gov/batse/grb/catalog/')

        jd = Decimal(2440000.5) + Decimal(row[3])
        astrot = astrotime(float(jd), format='jd').datetime
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE,
            make_date_string(astrot.year, astrot.month, astrot.day), source)
        pos_err = str(Decimal(row[9]) * Decimal(3600))
        catalog.entries[name].add_quantity(
            SUPERNOVA.RA,
            row[5],
            source,
            u_value='floatdegrees',
            e_value=pos_err)
        catalog.entries[name].add_quantity(
            SUPERNOVA.DEC,
            row[6],
            source,
            u_value='floatdegrees',
            e_value=pos_err)
        catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, 'LGRB',
                                           source)

    catalog.journal_entries()
    return
Beispiel #7
0
def do_rochester(catalog):
    """Import data from latest supernova page."""
    rochestermirrors = [
        'http://www.rochesterastronomy.org/',
        'http://www.supernova.thistlethwaites.com/'
    ]
    rochesterpaths = [
        'snimages/snredshiftall.html', 'sn2020/snredshift.html',
        'snimages/snredboneyard.html', 'snimages/snredboneyard-old.html'
    ]
    rochesterupdate = [False, True, True, False]
    task_str = catalog.get_current_task_str()
    baddates = ['2440587', '2440587.292', '0001/01/01']

    for pp, path in enumerate(pbar(rochesterpaths, task_str)):
        if catalog.args.update and not rochesterupdate[pp]:
            continue

        if 'snredboneyard.html' in path:
            cns = {
                'name': 0,
                'host': 1,
                'ra': 2,
                'dec': 3,
                'type': 7,
                'z': 8,
                'mmag': 9,
                'max': 10,
                'disc': 11,
                'ref': 12,
                'dver': 13,
                'aka': 14
            }
        else:
            cns = {
                'name': 0,
                'type': 1,
                'host': 2,
                'ra': 3,
                'dec': 4,
                'disc': 6,
                'max': 7,
                'mmag': 8,
                'z': 11,
                'zh': 12,
                'ref': 13,
                'dver': 14,
                'aka': 15
            }

        filepath = (
            os.path.join(catalog.get_current_task_repo(), 'rochester/') +
            path.replace('/', '-'))
        for mirror in rochestermirrors:
            html = catalog.load_url(mirror + path,
                                    filepath,
                                    fail=(mirror != rochestermirrors[-1]))
            if html:
                break

        if not html:
            continue

        soup = BeautifulSoup(html, 'html5lib')
        rows = soup.findAll('tr')
        sec_ref = 'Latest Supernovae'
        sec_refurl = ('http://www.rochesterastronomy.org/'
                      'snimages/snredshiftall.html')
        loopcnt = 0
        for rr, row in enumerate(pbar(rows, task_str)):
            if rr == 0:
                continue
            cols = row.findAll('td')
            if not len(cols):
                continue

            name = ''
            if cols[cns['aka']].contents:
                for rawaka in str(cols[cns['aka']].contents[0]).split(','):
                    aka = rawaka.strip()
                    if is_number(aka.strip('?')):
                        aka = 'SN' + aka.strip('?') + 'A'
                        oldname = aka
                        name = catalog.add_entry(aka)
                    elif len(aka) == 4 and is_number(aka[:4]):
                        aka = 'SN' + aka
                        oldname = aka
                        name = catalog.add_entry(aka)

            sn = re.sub('<[^<]+?>', '',
                        str(cols[cns['name']].contents[0])).strip()
            if is_number(sn.strip('?')):
                sn = 'SN' + sn.strip('?') + 'A'
            elif len(sn) == 4 and is_number(sn[:4]):
                sn = 'SN' + sn
            if not name:
                if not sn or sn in ['Transient']:
                    continue

            ra = str(cols[cns['ra']].contents[0]).strip().replace(':.', '.')
            dec = str(cols[cns['dec']].contents[0]).strip().replace(':.', '.')

            if not name:
                if sn[:8] == 'MASTER J':
                    sn = sn.replace('MASTER J',
                                    'MASTER OT J').replace('SNHunt', 'SNhunt')
                if 'POSSIBLE' in sn.upper() and ra and dec:
                    sn = 'PSN J' + ra.replace(':', '').replace('.', '')
                    sn += dec.replace(':', '').replace('.', '')
                oldname = sn
                name = catalog.add_entry(sn)
            sec_source = catalog.entries[name].add_source(name=sec_ref,
                                                          url=sec_refurl,
                                                          secondary=True)
            sources = []
            if 'ref' in cns:
                reftag = reference = cols[cns['ref']].findAll('a')
                if len(reftag) and len(reftag[0].contents):
                    reference = reftag[0].contents[0].strip()
                    refurl = reftag[0]['href'].strip()
                    sources.append(catalog.entries[name].add_source(
                        name=reference, url=refurl))
            sources.append(sec_source)
            sources = uniq_cdl(list(filter(None, sources)))
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, oldname,
                                               sources)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, sn, sources)

            if cols[cns['aka']].contents:
                for rawaka in str(cols[cns['aka']].contents[0]).split(','):
                    aka = rawaka.strip()
                    if aka == 'SNR G1.9+0.3':
                        aka = 'G001.9+00.3'
                    if aka[:4] == 'PS1 ':
                        aka = 'PS1-' + aka[4:]
                    if aka[:8] == 'MASTER J':
                        aka = aka.replace('MASTER J', 'MASTER OT J').replace(
                            'SNHunt', 'SNhunt')
                    if 'POSSIBLE' in aka.upper() and ra and dec:
                        aka = 'PSN J' + ra.replace(':', '').replace('.', '')
                        aka += dec.replace(':', '').replace('.', '')
                    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, aka,
                                                       sources)

            if (len(cols[cns['type']].contents) > 0
                    and str(cols[cns['type']].contents[0]).strip() != 'unk'):
                type = str(cols[cns['type']].contents[0]).strip(' :,')
                catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
                                                   type, sources)
            if (len(cols[cns['host']].contents) > 0 and
                    str(cols[cns['host']].contents[0]).strip() != 'anonymous'):
                catalog.entries[name].add_quantity(
                    SUPERNOVA.HOST,
                    str(cols[cns['host']].contents[0]).strip(), sources)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, ra, sources)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, dec, sources)
            discstr = str(cols[cns['disc']].contents[0]).strip()
            if discstr and discstr not in baddates:
                if '/' not in discstr:
                    astrot = astrotime(float(discstr), format='jd').datetime
                    ddate = make_date_string(astrot.year, astrot.month,
                                             astrot.day)
                else:
                    ddate = discstr
                catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE,
                                                   ddate, sources)
            maxstr = str(cols[cns.get('max', '')].contents[0]).strip()
            if maxstr and maxstr not in baddates:
                try:
                    if '/' not in maxstr:
                        astrot = astrotime(float(maxstr), format='jd')
                    else:
                        astrot = astrotime(maxstr.replace('/', '-'),
                                           format='iso')
                except:
                    catalog.log.info(
                        'Max date conversion failed for `{}`.'.format(maxstr))
                if ((float(str(cols[cns['mmag']].contents[0]).strip()) <= 90.0
                     and
                     not any('GRB' in xx
                             for xx in catalog.entries[name].get_aliases()))):
                    mag = str(cols[cns['mmag']].contents[0]).strip()
                    catalog.entries[name].add_photometry(time=str(astrot.mjd),
                                                         u_time='MJD',
                                                         magnitude=mag,
                                                         source=sources)
            if 'z' in cns and cols[cns['z']].contents[0] != 'n/a':
                catalog.entries[name].add_quantity(
                    SUPERNOVA.REDSHIFT,
                    str(cols[cns['z']].contents[0]).strip(), sources)
            if 'zh' in cns:
                zhost = str(cols[cns['zh']].contents[0]).strip()
                if is_number(zhost):
                    catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                                       zhost, sources)
            if 'dver' in cns:
                catalog.entries[name].add_quantity(
                    SUPERNOVA.DISCOVERER,
                    str(cols[cns['dver']].contents[0]).strip(), sources)
            if catalog.args.update:
                catalog.journal_entries()
            loopcnt = loopcnt + 1
            if (catalog.args.travis
                    and loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
                break

    if not catalog.args.update:
        vsnetfiles = ['latestsne.dat']
        for vsnetfile in vsnetfiles:
            file_name = os.path.join(catalog.get_current_task_repo(),
                                     "" + vsnetfile)
            with open(file_name, 'r', encoding='latin1') as csv_file:
                tsvin = csv.reader(csv_file,
                                   delimiter=' ',
                                   skipinitialspace=True)
                loopcnt = 0
                for rr, row in enumerate(tsvin):
                    if (not row or row[0] in ['Transient']
                            or row[0][:4] in ['http', 'www.'] or len(row) < 3):
                        continue
                    name = row[0].strip()
                    if name[:4].isdigit():
                        name = 'SN' + name
                    if name.startswith('PSNJ'):
                        name = 'PSN J' + name[4:]
                    if name.startswith('MASTEROTJ'):
                        name = name.replace('MASTEROTJ', 'MASTER OT J')
                    name = catalog.add_entry(name)
                    sec_source = catalog.entries[name].add_source(
                        name=sec_ref, url=sec_refurl, secondary=True)
                    catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name,
                                                       sec_source)

                    if not is_number(row[1]):
                        continue
                    year = row[1][:4]
                    month = row[1][4:6]
                    day = row[1][6:]
                    if '.' not in day:
                        day = day[:2] + '.' + day[2:]
                    mjd = astrotime(year + '-' + month + '-' +
                                    str(floor(float(day))).zfill(2)).mjd
                    mjd += float(day) - floor(float(day))
                    magnitude = row[2].rstrip(ascii_letters)
                    if not is_number(magnitude):
                        continue
                    if magnitude.isdigit():
                        if int(magnitude) > 100:
                            magnitude = magnitude[:2] + '.' + magnitude[2:]

                    if float(str(cols[8].contents[0]).strip()) >= 90.0:
                        continue

                    if len(row) >= 4:
                        if is_number(row[3]):
                            e_magnitude = row[3]
                            refind = 4
                        else:
                            e_magnitude = ''
                            refind = 3

                        if refind >= len(row):
                            sources = sec_source
                        else:
                            reference = ' '.join(row[refind:])
                            source = catalog.entries[name].add_source(
                                name=reference)
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.ALIAS, name, sec_source)
                            sources = uniq_cdl([source, sec_source])
                    else:
                        sources = sec_source

                    band = row[2].lstrip('1234567890.')

                    catalog.entries[name].add_photometry(
                        time=mjd,
                        u_time='MJD',
                        band=band,
                        magnitude=magnitude,
                        e_magnitude=e_magnitude,
                        source=sources)

                    if (catalog.args.travis
                            and loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
                        break

    catalog.journal_entries()
    return
Beispiel #8
0
def do_psst(catalog):
    task_str = catalog.get_current_task_str()
    # 2016MNRAS.462.4094S
    file_path = os.path.join(catalog.get_current_task_repo(), 'ASCII',
                             '2016MNRAS.462.4094S-tab1.tsv')
    with open(file_path, 'r') as f:
        data = list(
            csv.reader(f, delimiter='\t', quotechar='"',
                       skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            (name, source) = catalog.new_entry(row[0],
                                               bibcode='2016MNRAS.462.4094S')
            catalog.entries[name].add_quantity(
                SUPERNOVA.CLAIMED_TYPE, row[3].replace('SN', '').strip('() '),
                source)
            catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                               row[5].strip('() '),
                                               source,
                                               kind='spectroscopic')

    file_path = os.path.join(catalog.get_current_task_repo(), 'ASCII',
                             '2016MNRAS.462.4094S-tab2.tsv')
    with open(file_path, 'r') as f:
        data = list(
            csv.reader(f, delimiter='\t', quotechar='"',
                       skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            (name, source) = catalog.new_entry(row[0],
                                               bibcode='2016MNRAS.462.4094S')
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            mldt = astrotime(float(row[4]), format='mjd').datetime
            discoverdate = make_date_string(mldt.year, mldt.month, mldt.day)
            catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE,
                                               discoverdate, source)

    catalog.journal_entries()

    # 2016ApJ...827L..40S
    file_path = os.path.join(catalog.get_current_task_repo(), 'ASCII',
                             '2016ApJ...827L..40S.tsv')
    with open(file_path, 'r') as f:
        data = list(
            csv.reader(f, delimiter='\t', quotechar='"',
                       skipinitialspace=True))
        for r, row in enumerate(pbar(data, task_str)):
            if row[0][0] == '#':
                continue
            (name, source) = catalog.new_entry(row[0],
                                               bibcode='2016ApJ...827L..40S')
            catalog.entries[name].add_quantity(SUPERNOVA.RA, row[1], source)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, row[2], source)
            mldt = astrotime(float(row[3]), format='mjd').datetime
            discoverdate = make_date_string(mldt.year, mldt.month, mldt.day)
            catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE,
                                               discoverdate, source)
            catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, row[6],
                                               source)
            catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                               row[7],
                                               source,
                                               kind='spectroscopic')
            for alias in [x.strip() for x in row[8].split(',')]:
                catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, alias,
                                                   source)

    catalog.journal_entries()

    return
def do_rochester(catalog):
    """Import data from the Latest Supernovae page."""
    rochestermirrors = [
        'http://www.rochesterastronomy.org/',
        'http://www.supernova.thistlethwaites.com/'
    ]
    rochesterpaths = [
        'snimages/snredshiftall.html', 'sn2017/snredshift.html',
        'snimages/snredboneyard.html', 'snimages/snredboneyard-old.html'
    ]
    rochesterupdate = [False, True, True, False]
    task_str = catalog.get_current_task_str()
    baddates = ['2440587', '2440587.292', '0001/01/01']

    for pp, path in enumerate(pbar(rochesterpaths, task_str)):
        if catalog.args.update and not rochesterupdate[pp]:
            continue

        if 'snredboneyard.html' in path:
            cns = {
                'name': 0,
                'host': 1,
                'ra': 2,
                'dec': 3,
                'type': 7,
                'z': 8,
                'mmag': 9,
                'max': 10,
                'disc': 11,
                'ref': 12,
                'dver': 13,
                'aka': 14
            }
        else:
            cns = {
                'name': 0,
                'type': 1,
                'host': 2,
                'ra': 3,
                'dec': 4,
                'disc': 6,
                'max': 7,
                'mmag': 8,
                'z': 11,
                'zh': 12,
                'ref': 13,
                'dver': 14,
                'aka': 15
            }

        filepath = (
            os.path.join(catalog.get_current_task_repo(), 'rochester/') +
            path.replace('/', '-'))
        for mirror in rochestermirrors:
            html = catalog.load_url(
                mirror + path, filepath, fail=(mirror != rochestermirrors[-1]))
            if html:
                break

        if not html:
            continue

        soup = BeautifulSoup(html, 'html5lib')
        rows = soup.findAll('tr')
        sec_ref = 'Latest Supernovae'
        sec_refurl = ('http://www.rochesterastronomy.org/'
                      'snimages/snredshiftall.html')
        loopcnt = 0
        for rr, row in enumerate(pbar(rows, task_str)):
            if rr == 0:
                continue
            cols = row.findAll('td')
            if not len(cols):
                continue

            name = ''
            if cols[cns['aka']].contents:
                for rawaka in str(cols[cns['aka']].contents[0]).split(','):
                    aka = rawaka.strip()
                    if is_number(aka.strip('?')):
                        aka = 'SN' + aka.strip('?') + 'A'
                        oldname = aka
                        name = catalog.add_entry(aka)
                    elif len(aka) == 4 and is_number(aka[:4]):
                        aka = 'SN' + aka
                        oldname = aka
                        name = catalog.add_entry(aka)

            ra = str(cols[cns['ra']].contents[0]).strip()
            dec = str(cols[cns['dec']].contents[0]).strip()

            sn = re.sub('<[^<]+?>', '',
                        str(cols[cns['name']].contents[0])).strip()
            if is_number(sn.strip('?')):
                sn = 'SN' + sn.strip('?') + 'A'
            elif len(sn) == 4 and is_number(sn[:4]):
                sn = 'SN' + sn
            if not name:
                if not sn or sn in ['Transient']:
                    continue
                if sn[:8] == 'MASTER J':
                    sn = sn.replace('MASTER J', 'MASTER OT J').replace(
                        'SNHunt', 'SNhunt')
                if 'POSSIBLE' in sn.upper() and ra and dec:
                    sn = 'PSN J' + ra.replace(':', '').replace('.', '')
                    sn += dec.replace(':', '').replace('.', '')
                oldname = sn
                name = catalog.add_entry(sn)
            sec_source = catalog.entries[name].add_source(
                name=sec_ref, url=sec_refurl, secondary=True)
            sources = []
            if 'ref' in cns:
                reftag = reference = cols[cns['ref']].findAll('a')
                if len(reftag):
                    reference = reftag[0].contents[0].strip()
                    refurl = reftag[0]['href'].strip()
                    sources.append(catalog.entries[name].add_source(
                        name=reference, url=refurl))
            sources.append(sec_source)
            sources = uniq_cdl(list(filter(None, sources)))
            catalog.entries[name].add_quantity(TIDALDISRUPTION.ALIAS, oldname,
                                               sources)
            catalog.entries[name].add_quantity(
                TIDALDISRUPTION.ALIAS, sn, sources)

            if cols[cns['aka']].contents:
                for rawaka in str(cols[cns['aka']].contents[0]).split(','):
                    aka = rawaka.strip()
                    if aka == 'SNR G1.9+0.3':
                        aka = 'G001.9+00.3'
                    if aka[:4] == 'PS1 ':
                        aka = 'PS1-' + aka[4:]
                    if aka[:8] == 'MASTER J':
                        aka = aka.replace('MASTER J', 'MASTER OT J').replace(
                            'SNHunt', 'SNhunt')
                    if 'POSSIBLE' in aka.upper() and ra and dec:
                        aka = 'PSN J' + ra.replace(':', '').replace('.', '')
                        aka += dec.replace(':', '').replace('.', '')
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.ALIAS, aka, sources)

            if str(cols[cns['type']].contents[0]).strip() != 'unk':
                ctype = str(cols[cns['type']].contents[0]).strip(' :,')
                catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.CLAIMED_TYPE, ctype, sources)
            if (len(cols[cns['host']].contents) > 0 and
                    str(cols[cns['host']].contents[0]).strip() != 'anonymous'):
                catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.HOST,
                    str(cols[cns['host']].contents[0]).strip(), sources)
            catalog.entries[name].add_quantity(TIDALDISRUPTION.RA, ra, sources)
            catalog.entries[name].add_quantity(
                TIDALDISRUPTION.DEC, dec, sources)
            discstr = str(cols[cns['disc']].contents[0]).strip()
            if discstr and discstr not in baddates:
                if '/' not in discstr:
                    astrot = astrotime(float(discstr), format='jd').datetime
                    ddate = make_date_string(astrot.year, astrot.month,
                                             astrot.day)
                else:
                    ddate = discstr
                catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.DISCOVER_DATE, ddate, sources)
            maxstr = str(cols[cns.get('max', '')].contents[0]).strip()
            if maxstr and maxstr not in baddates:
                try:
                    if '/' not in maxstr:
                        astrot = astrotime(float(maxstr), format='jd')
                    else:
                        astrot = astrotime(
                            maxstr.replace('/', '-'), format='iso')
                except Exception:
                    catalog.log.info(
                        'Max date conversion failed for `{}`.'.format(maxstr))
                if ((float(str(
                    cols[cns['mmag']].contents[0])
                    .strip()) <= 90.0 and not any(
                        'GRB' in xx for xx in
                        catalog.entries[name].get_aliases()))):
                    mag = str(cols[cns['mmag']].contents[0]).strip()
                    catalog.entries[name].add_photometry(
                        time=str(astrot.mjd),
                        u_time='MJD',
                        magnitude=mag,
                        source=sources)
            if 'z' in cns and cols[cns['z']].contents[0] != 'n/a':
                catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.REDSHIFT,
                    str(cols[cns['z']].contents[0]).strip(), sources)
            if 'zh' in cns:
                zhost = str(cols[cns['zh']].contents[0]).strip()
                if is_number(zhost):
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.REDSHIFT, zhost, sources)
            if 'dver' in cns:
                catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.DISCOVERER,
                    str(cols[cns['dver']].contents[0]).strip(), sources)
            if catalog.args.update:
                catalog.journal_entries()
            loopcnt = loopcnt + 1
            if (catalog.args.travis and
                    loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
                break

    if not catalog.args.update:
        vsnetfiles = ['latestsne.dat']
        for vsnetfile in vsnetfiles:
            file_name = os.path.join(catalog.get_current_task_repo(),
                                     "" + vsnetfile)
            with open(file_name, 'r', encoding='latin1') as csv_file:
                tsvin = csv.reader(
                    csv_file, delimiter=' ', skipinitialspace=True)
                loopcnt = 0
                for rr, row in enumerate(tsvin):
                    if (not row or row[0] in ['Transient'] or
                            row[0][:4] in ['http', 'www.'] or len(row) < 3):
                        continue
                    name = row[0].strip()
                    if name[:4].isdigit():
                        name = 'SN' + name
                    if name.startswith('PSNJ'):
                        name = 'PSN J' + name[4:]
                    if name.startswith('MASTEROTJ'):
                        name = name.replace('MASTEROTJ', 'MASTER OT J')
                    name = catalog.add_entry(name)
                    sec_source = catalog.entries[name].add_source(
                        name=sec_ref, url=sec_refurl, secondary=True)
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.ALIAS, name, sec_source)

                    if not is_number(row[1]):
                        continue
                    year = row[1][:4]
                    month = row[1][4:6]
                    day = row[1][6:]
                    if '.' not in day:
                        day = day[:2] + '.' + day[2:]
                    mjd = astrotime(year + '-' + month + '-' + str(
                        floor(float(day))).zfill(2)).mjd
                    mjd += float(day) - floor(float(day))
                    magnitude = row[2].rstrip(ascii_letters)
                    if not is_number(magnitude):
                        continue
                    if magnitude.isdigit():
                        if int(magnitude) > 100:
                            magnitude = magnitude[:2] + '.' + magnitude[2:]

                    if float(str(cols[8].contents[0]).strip()) >= 90.0:
                        continue

                    if len(row) >= 4:
                        if is_number(row[3]):
                            e_magnitude = row[3]
                            refind = 4
                        else:
                            e_magnitude = ''
                            refind = 3

                        if refind >= len(row):
                            sources = sec_source
                        else:
                            reference = ' '.join(row[refind:])
                            source = catalog.entries[name].add_source(
                                name=reference)
                            catalog.entries[name].add_quantity(
                                TIDALDISRUPTION.ALIAS, name, sec_source)
                            sources = uniq_cdl([source, sec_source])
                    else:
                        sources = sec_source

                    band = row[2].lstrip('1234567890.')

                    catalog.entries[name].add_photometry(
                        time=mjd,
                        u_time='MJD',
                        band=band,
                        magnitude=magnitude,
                        e_magnitude=e_magnitude,
                        source=sources)

                    if (catalog.args.travis and
                            loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
                        break

    catalog.journal_entries()
    return
Beispiel #10
0
def do_snax(catalog):
    """Import from the SNaX X-ray database."""
    task_str = catalog.get_current_task_str()

    dlurl = 'http://kronos.uchicago.edu/snax/export.php?exportType=TSV&exportFields=standard&objid=&name=&typeid=&type=&galaxyid=&galaxy=&fluxMin=&fluxMax=&fluxEnergyLMin=&fluxEnergyLMax=&fluxEnergyHMin=&fluxEnergyHMax=&lumMin=&lumMax=&instrumentid=&instrument=&ageMin=&ageMax=&dateMin=&dateMax=&sortA=dateExploded'  # noqa: E501

    file_path = os.path.join(catalog.get_current_task_repo(), 'SNaX.TSV')

    tsv = catalog.load_url(dlurl, file_path)
    # csvtxt = catalog.load_url(
    #     'http://www.grbcatalog.org/'
    #     'download_data?cut_0_min=5&cut_0=BAT%20T90'
    #     '&cut_0_max=100000&num_cuts=1&no_date_cut=True',
    #     file_path)

    data = [x.split('\t') for x in tsv.split('\n')]

    for r, row in enumerate(pbar(data, task_str)):
        if r == 0 or not row[0]:
            continue
        (name,
         source) = catalog.new_entry(row[0],
                                     srcname='SNaX',
                                     url='http://kronos.uchicago.edu/snax/',
                                     secondary=True)
        sources = [source]
        bibcode = row[-6].strip()
        if len(bibcode) != 19:
            continue
        expsrc = uniq_cdl(
            sources +
            [catalog.entries[name].add_source(bibcode=row[-6].strip())])
        coosrc = uniq_cdl(
            sources +
            [catalog.entries[name].add_source(bibcode=row[-5].strip())])
        dissrc = uniq_cdl(
            sources +
            [catalog.entries[name].add_source(bibcode=row[-4].strip())])
        flxsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(
                bibcode=row[-3].strip()), catalog.entries[name].add_source(
                    bibcode=row[-2].strip())
        ])

        catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, row[1],
                                           source)
        date = astrotime(float(row[2]), format='jd').datetime
        catalog.entries[name].add_quantity(
            SUPERNOVA.EXPLOSION_DATE,
            make_date_string(date.year, date.month, date.day), expsrc)
        catalog.entries[name].add_quantity(SUPERNOVA.RA,
                                           ' '.join(row[3].split()[:3]),
                                           coosrc)
        catalog.entries[name].add_quantity(SUPERNOVA.DEC,
                                           ' '.join(row[3].split()[3:]),
                                           coosrc)
        catalog.entries[name].add_quantity(SUPERNOVA.LUM_DIST, row[4], dissrc)
        catalog.entries[name].add_quantity(SUPERNOVA.HOST, row[5], source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT,
            row[6],
            source,
            e_value=row[7] if (row[7] and float(row[7]) != 0.0) else '')
        photodict = {
            PHOTOMETRY.TIME: jd_to_mjd(Decimal(row[8])),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.ENERGY: row[15:17],
            PHOTOMETRY.U_ENERGY: 'keV',
            PHOTOMETRY.FLUX: str(Decimal('1.0e-13') * Decimal(row[11])),
            PHOTOMETRY.U_FLUX: 'ergs/s/cm^2',
            PHOTOMETRY.E_LOWER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[13])),
            PHOTOMETRY.E_UPPER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[14])),
            PHOTOMETRY.INSTRUMENT: row[9],
            PHOTOMETRY.SOURCE: flxsrc
        }
        if row[12] == '1':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Beispiel #11
0
def do_sdss_photo(catalog):
    task_str = catalog.get_current_task_str()
    D25 = Decimal('2.5')

    # fits_path = os.path.join(catalog.get_current_task_repo(),
    #                          'SDSS/SDSS_allCandidates+BOSS_HEAD.FITS')
    #
    # hdulist = fits.open(fits_path)
    # print(hdulist[1].columns)
    # for ri, row in enumerate(hdulist[1].data['SNID']):
    #     print([[tag, hdulist[1].data[tag][ri]] for tag in hdulist[1].data])
    #     print(hdulist[1].data['SNID'][ri], hdulist[1].data['IAUC'][ri],
    #           hdulist[1].data['REDSHIFT_HELIO'][ri])
    #
    # # print(hdulist[1].data['MJD'])
    # hdulist.close()
    # return

    # Load up metadata first
    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'SDSS/sdsssn_master.dat2'), 'r') as f:
        rows = list(csv.reader(f.read().splitlines()[1:], delimiter=' '))
        ignored_cids = []
        columns = {
            SUPERNOVA.RA: 1,
            SUPERNOVA.DEC: 2,
            SUPERNOVA.ALIAS: 4,
            SUPERNOVA.CLAIMED_TYPE: 5,
            SUPERNOVA.REDSHIFT: 11,
            SUPERNOVA.MAX_DATE: 21,
            SUPERNOVA.HOST_RA: 99,
            SUPERNOVA.HOST_DEC: 100
        }
        colnums = {v: k for k, v in columns.items()}

        rows = [[x.replace('\\N', '') for x in y] for y in rows]

        co = [[x[0], x[99], x[100]] for x in rows if x[99] and x[100]]
        coo = coord([x[1] for x in co], [x[2] for x in co], unit="deg")
        coo = [
            ''.join([y[:9] for y in x.split()])
            for x in coo.to_string('hmsdms', sep='')
        ]
        hostdict = dict(
            zip([x[0] for x in co], ['SDSS J' + x[1:] for x in coo]))

        for ri, row in enumerate(pbar(rows, task_str + ": metadata")):
            name = ''

            # Check if type is non-SNe first
            ct = row[columns[SUPERNOVA.CLAIMED_TYPE]]
            al = row[columns[SUPERNOVA.ALIAS]]
            if ct in ['AGN', 'Variable', 'Unknown'] and not al:
                catalog.log.info('`{}` is not a SN, not '
                                 'adding.'.format(row[0]))
                ignored_cids.append(row[0])
                continue

            # Add entry
            (name, source) = catalog.new_entry(
                'SDSS-II SN ' + row[0],
                bibcode='2014arXiv1401.3317S',
                url='http://data.sdss3.org/sas/dr10/boss/papers/supernova/')

            # Add host name
            if row[0] in hostdict:
                catalog.entries[name].add_quantity(SUPERNOVA.HOST,
                                                   hostdict[row[0]], source)

            # Add other metadata
            for cn in colnums:
                key = colnums[cn]
                if not key:
                    continue
                ic = int(cn)
                val = row[ic]
                if not val:
                    continue
                kwargs = {}
                if key == SUPERNOVA.ALIAS:
                    val = 'SN' + val
                elif key in [
                        SUPERNOVA.RA, SUPERNOVA.DEC, SUPERNOVA.HOST_RA,
                        SUPERNOVA.HOST_DEC
                ]:
                    kwargs = {QUANTITY.U_VALUE: 'floatdegrees'}
                    if key in [SUPERNOVA.RA, SUPERNOVA.HOST_RA]:
                        fval = float(val)
                        if fval < 0.0:
                            val = str(Decimal(360) + Decimal(fval))
                elif key == SUPERNOVA.CLAIMED_TYPE:
                    val = val.lstrip('pz').replace('SN', '')
                elif key == SUPERNOVA.REDSHIFT:
                    kwargs[QUANTITY.KIND] = 'spectroscopic'
                    if float(val) < -1.0:
                        continue
                    if float(row[ic + 1]) > 0.0:
                        kwargs[QUANTITY.E_VALUE] = row[ic + 1]
                elif key == SUPERNOVA.MAX_DATE:
                    dt = astrotime(float(val), format='mjd').datetime
                    val = make_date_string(dt.year, dt.month, dt.day)
                catalog.entries[name].add_quantity(key, val, source, **kwargs)

    with open(
            os.path.join(catalog.get_current_task_repo(),
                         'SDSS/2010ApJ...708..661D.txt'), 'r') as sdss_file:
        bibcodes2010 = sdss_file.read().split('\n')
    sdssbands = ['u', 'g', 'r', 'i', 'z']
    file_names = (list(
        glob(os.path.join(catalog.get_current_task_repo(), 'SDSS/sum/*.sum')))
                  + list(
                      glob(
                          os.path.join(catalog.get_current_task_repo(),
                                       'SDSS/SMP_Data/*.dat'))))
    skipphoto = ['SDSS-II SN 15557']
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        tsvin = csv.reader(open(fname, 'r'),
                           delimiter=' ',
                           skipinitialspace=True)
        basename = os.path.basename(fname)
        hasred = True
        rst = 19
        if '.dat' in fname:
            bibcode = '2014arXiv1401.3317S'
            hasred = False
            rst = 4
        elif basename in bibcodes2010:
            bibcode = '2010ApJ...708..661D'
        else:
            bibcode = '2008AJ....136.2306H'

        skip_entry = False
        for rr, row in enumerate(tsvin):
            if skip_entry:
                break
            if rr == 0:
                # Ignore non-SNe objects and those not in metadata table above
                if row[3] in ignored_cids:
                    skip_entry = True
                    continue
                # Ignore IAU names from file headers as they are unreliable
                oname = 'SDSS-II SN ' + row[3]
                (name, source) = catalog.new_entry(oname, bibcode=bibcode)
                catalog.entries[name].add_quantity(SUPERNOVA.RA,
                                                   row[-4],
                                                   source,
                                                   u_value='floatdegrees')
                catalog.entries[name].add_quantity(SUPERNOVA.DEC,
                                                   row[-2],
                                                   source,
                                                   u_value='floatdegrees')
            if hasred and rr == 1:
                error = row[4] if float(row[4]) >= 0.0 else ''
                val = row[2]
                if float(val) < -1.0:
                    continue
                (catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT,
                                                    val,
                                                    source,
                                                    e_value=error,
                                                    kind='heliocentric'))
            if rr >= rst:
                # Skip bad measurements
                if int(row[0]) > 1024:
                    continue
                if oname in skipphoto:
                    break

                mjd = row[1]
                band = sdssbands[int(row[2])] + "'"
                magnitude = row[3]
                e_mag = row[4]
                fluxd = row[7]
                e_fluxd = row[8]
                telescope = 'SDSS'
                photodict = {
                    PHOTOMETRY.TIME: mjd,
                    PHOTOMETRY.U_TIME: 'MJD',
                    PHOTOMETRY.TELESCOPE: telescope,
                    PHOTOMETRY.BAND: band,
                    PHOTOMETRY.MAGNITUDE: magnitude,
                    PHOTOMETRY.E_MAGNITUDE: e_mag,
                    PHOTOMETRY.FLUX_DENSITY: fluxd,
                    PHOTOMETRY.E_FLUX_DENSITY: e_fluxd,
                    PHOTOMETRY.U_FLUX_DENSITY: 'μJy',
                    PHOTOMETRY.SOURCE: source,
                    PHOTOMETRY.BAND_SET: 'SDSS',
                    PHOTOMETRY.SYSTEM: 'SDSS'
                }
                if float(fluxd) > 0.0:
                    photodict[PHOTOMETRY.ZERO_POINT] = str(
                        D25 * Decimal(fluxd).log10() + Decimal(magnitude))
                ul_sigma = 3.0
                if int(row[0]) & 32 or float(
                        fluxd) < ul_sigma * float(e_fluxd):
                    photodict[PHOTOMETRY.UPPER_LIMIT] = True
                    photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(ul_sigma)
                catalog.entries[name].add_photometry(**photodict)
        if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:
            break
        if not fi % 1000:
            catalog.journal_entries()

    catalog.journal_entries()
    return
Beispiel #12
0
def do_snax(catalog):
    task_str = catalog.get_current_task_str()
    file_path = os.path.join(catalog.get_current_task_repo(), 'SNaX.TSV')
    # csvtxt = catalog.load_url(
    #     'http://www.grbcatalog.org/'
    #     'download_data?cut_0_min=5&cut_0=BAT%20T90'
    #     '&cut_0_max=100000&num_cuts=1&no_date_cut=True',
    #     file_path)
    data = list(
        csv.reader(
            open(file_path, 'r'),
            delimiter='\t',
            quotechar='"',
            skipinitialspace=True))

    for r, row in enumerate(pbar(data, task_str)):
        if r == 0:
            continue
        (name, source) = catalog.new_entry(
            row[0], srcname='SNaX', url='http://kronos.uchicago.edu/snax/')
        sources = [source]
        expsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-6].strip())
        ])
        coosrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-5].strip())
        ])
        dissrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-4].strip())
        ])
        flxsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-3].strip()),
            catalog.entries[name].add_source(bibcode=row[-2].strip())
        ])

        catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, row[1],
                                           source)
        date = astrotime(float(row[2]), format='jd').datetime
        catalog.entries[name].add_quantity(
            SUPERNOVA.EXPLOSION_DATE,
            make_date_string(date.year, date.month, date.day), expsrc)
        catalog.entries[name].add_quantity(
            SUPERNOVA.RA, ' '.join(row[3].split()[:3]), coosrc)
        catalog.entries[name].add_quantity(
            SUPERNOVA.DEC, ' '.join(row[3].split()[:3]), coosrc)
        catalog.entries[name].add_quantity(SUPERNOVA.LUM_DIST, row[4], dissrc)
        catalog.entries[name].add_quantity(SUPERNOVA.HOST, row[5], source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT,
            row[6],
            source,
            e_value=row[7] if (row[7] and float(row[7]) != 0.0) else '')
        photodict = {
            PHOTOMETRY.TIME: jd_to_mjd(Decimal(row[8])),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.ENERGY: row[15:17],
            PHOTOMETRY.U_ENERGY: 'keV',
            PHOTOMETRY.FLUX: str(Decimal('1.0e-13') * Decimal(row[11])),
            PHOTOMETRY.U_FLUX: 'ergs/s/cm^2',
            PHOTOMETRY.E_LOWER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[13])),
            PHOTOMETRY.E_UPPER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[14])),
            PHOTOMETRY.INSTRUMENT: row[9],
            PHOTOMETRY.SOURCE: flxsrc
        }
        if row[12] == '1':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return
Beispiel #13
0
def do_rochester(catalog):
    rochestermirrors = ['http://www.rochesterastronomy.org/',
                        'http://www.supernova.thistlethwaites.com/']
    rochesterpaths = ['snimages/snredshiftall.html',
                      'sn2016/snredshift.html', 'snimages/snredboneyard.html']
    rochesterupdate = [False, True, True]
    task_str = catalog.get_current_task_str()

    for pp, path in enumerate(pbar(rochesterpaths, task_str)):
        if catalog.args.update and not rochesterupdate[pp]:
            continue

        filepath = (os.path.join(
            catalog.get_current_task_repo(), 'rochester/') +
            os.path.basename(path))
        for mirror in rochestermirrors:
            html = catalog.load_cached_url(
                mirror + path, filepath,
                failhard=(mirror != rochestermirrors[-1]))
            if html:
                break

        if not html:
            continue

        soup = BeautifulSoup(html, 'html5lib')
        rows = soup.findAll('tr')
        sec_ref = 'Latest Supernovae'
        sec_refurl = ('http://www.rochesterastronomy.org/'
                      'snimages/snredshiftall.html')
        for rr, row in enumerate(pbar(rows, task_str)):
            if rr == 0:
                continue
            cols = row.findAll('td')
            if not len(cols):
                continue

            name = ''
            if cols[14].contents:
                aka = str(cols[14].contents[0]).strip()
                if is_number(aka.strip('?')):
                    aka = 'SN' + aka.strip('?') + 'A'
                    oldname = aka
                    name = catalog.add_entry(aka)
                elif len(aka) == 4 and is_number(aka[:4]):
                    aka = 'SN' + aka
                    oldname = aka
                    name = catalog.add_entry(aka)

            ra = str(cols[3].contents[0]).strip()
            dec = str(cols[4].contents[0]).strip()

            sn = re.sub('<[^<]+?>', '', str(cols[0].contents[0])).strip()
            if is_number(sn.strip('?')):
                sn = 'SN' + sn.strip('?') + 'A'
            elif len(sn) == 4 and is_number(sn[:4]):
                sn = 'SN' + sn
            if not name:
                if not sn:
                    continue
                if sn[:8] == 'MASTER J':
                    sn = sn.replace('MASTER J', 'MASTER OT J').replace(
                        'SNHunt', 'SNhunt')
                if 'POSSIBLE' in sn.upper() and ra and dec:
                    sn = 'PSN J' + ra.replace(':', '').replace('.', '')
                    sn += dec.replace(':', '').replace('.', '')
                oldname = sn
                name = catalog.add_entry(sn)

            reference = cols[12].findAll('a')[0].contents[0].strip()
            refurl = cols[12].findAll('a')[0]['href'].strip()
            source = catalog.entries[name].add_source(
                name=reference, url=refurl)
            sec_source = catalog.entries[name].add_source(
                name=sec_ref, url=sec_refurl, secondary=True)
            sources = uniq_cdl(list(filter(None, [source, sec_source])))
            catalog.entries[name].add_quantity(
                SUPERNOVA.ALIAS, oldname, sources)
            catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, sn, sources)

            if cols[14].contents:
                if aka == 'SNR G1.9+0.3':
                    aka = 'G001.9+00.3'
                if aka[:4] == 'PS1 ':
                    aka = 'PS1-' + aka[4:]
                if aka[:8] == 'MASTER J':
                    aka = aka.replace('MASTER J', 'MASTER OT J').replace(
                        'SNHunt', 'SNhunt')
                if 'POSSIBLE' in aka.upper() and ra and dec:
                    aka = 'PSN J' + ra.replace(':', '').replace('.', '')
                    aka += dec.replace(':', '').replace('.', '')
                catalog.entries[name].add_quantity(
                    SUPERNOVA.ALIAS, aka, sources)

            if str(cols[1].contents[0]).strip() != 'unk':
                type = str(cols[1].contents[0]).strip(' :,')
                catalog.entries[name].add_quantity(
                    SUPERNOVA.CLAIMED_TYPE, type, sources)
            if str(cols[2].contents[0]).strip() != 'anonymous':
                catalog.entries[name].add_quantity(SUPERNOVA.HOST, str(
                    cols[2].contents[0]).strip(), sources)
            catalog.entries[name].add_quantity(SUPERNOVA.RA, ra, sources)
            catalog.entries[name].add_quantity(SUPERNOVA.DEC, dec, sources)
            if (str(cols[6].contents[0]).strip() not in
                    ['2440587', '2440587.292']):
                astrot = astrotime(
                    float(str(cols[6].contents[0]).strip()),
                    format='jd').datetime
                ddate = make_date_string(astrot.year, astrot.month, astrot.day)
                catalog.entries[name].add_quantity(
                    SUPERNOVA.DISCOVER_DATE, ddate, sources)
            if (str(cols[7].contents[0]).strip() not in
                    ['2440587', '2440587.292']):
                astrot = astrotime(
                    float(str(cols[7].contents[0]).strip()), format='jd')
                if ((float(str(cols[8].contents[0]).strip()) <= 90.0 and
                     not any('GRB' in xx for xx in
                             catalog.entries[name].get_aliases()))):
                    mag = str(cols[8].contents[0]).strip()
                    catalog.entries[name].add_photometry(
                        time=str(astrot.mjd), magnitude=mag,
                        source=sources)
            if cols[11].contents[0] != 'n/a':
                catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT, str(
                    cols[11].contents[0]).strip(), sources)
            catalog.entries[name].add_quantity('discoverer', str(
                cols[13].contents[0]).strip(), sources)
            if catalog.args.update:
                catalog.journal_entries()

    if not catalog.args.update:
        vsnetfiles = ['latestsne.dat']
        for vsnetfile in vsnetfiles:
            file_name = os.path.join(
                catalog.get_current_task_repo(), "" + vsnetfile)
            with open(file_name, 'r', encoding='latin1') as csv_file:
                tsvin = csv.reader(csv_file, delimiter=' ',
                                   skipinitialspace=True)
                for rr, row in enumerate(tsvin):
                    if (not row or row[0][:4] in ['http', 'www.'] or
                            len(row) < 3):
                        continue
                    name = row[0].strip()
                    if name[:4].isdigit():
                        name = 'SN' + name
                    if name.startswith('PSNJ'):
                        name = 'PSN J' + name[4:]
                    if name.startswith('MASTEROTJ'):
                        name = name.replace('MASTEROTJ', 'MASTER OT J')
                    name = catalog.add_entry(name)
                    sec_source = catalog.entries[name].add_source(
                        name=sec_ref, url=sec_refurl, secondary=True)
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.ALIAS, name, sec_source)

                    if not is_number(row[1]):
                        continue
                    year = row[1][:4]
                    month = row[1][4:6]
                    day = row[1][6:]
                    if '.' not in day:
                        day = day[:2] + '.' + day[2:]
                    mjd = astrotime(year + '-' + month + '-' +
                                    str(floor(float(day))).zfill(2)).mjd
                    mjd += float(day) - floor(float(day))
                    magnitude = row[2].rstrip(ascii_letters)
                    if not is_number(magnitude):
                        continue
                    if magnitude.isdigit():
                        if int(magnitude) > 100:
                            magnitude = magnitude[:2] + '.' + magnitude[2:]

                    if float(str(cols[8].contents[0]).strip()) >= 90.0:
                        continue

                    if len(row) >= 4:
                        if is_number(row[3]):
                            e_magnitude = row[3]
                            refind = 4
                        else:
                            e_magnitude = ''
                            refind = 3

                        if refind >= len(row):
                            sources = sec_source
                        else:
                            reference = ' '.join(row[refind:])
                            source = catalog.entries[
                                name].add_source(name=reference)
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.ALIAS, name, sec_source)
                            sources = uniq_cdl([source, sec_source])
                    else:
                        sources = sec_source

                    band = row[2].lstrip('1234567890.')

                    catalog.entries[name].add_photometry(
                        time=mjd, band=band, magnitude=magnitude,
                        e_magnitude=e_magnitude, source=sources)

    catalog.journal_entries()
    return
Beispiel #14
0
def do_sdss_photo(catalog):
    task_str = catalog.get_current_task_str()
    # Load up metadata first
    with open(os.path.join(catalog.get_current_task_repo(),
                           'SDSS/sdsssn_master.dat2'), 'r') as f:
        rows = list(csv.reader(f.read().splitlines()[1:], delimiter=' '))
        ignored_cids = []
        columns = {
            SUPERNOVA.RA: 1,
            SUPERNOVA.DEC: 2,
            SUPERNOVA.ALIAS: 4,
            SUPERNOVA.CLAIMED_TYPE: 5,
            SUPERNOVA.REDSHIFT: 11,
            SUPERNOVA.MAX_DATE: 21,
            SUPERNOVA.HOST_RA: 99,
            SUPERNOVA.HOST_DEC: 100
        }
        colnums = {v: k for k, v in columns.items()}

        rows = [[x.replace('\\N', '') for x in y] for y in rows]

        co = [[x[0], x[99], x[100]] for x in rows if x[99] and x[100]]
        coo = coord([x[1] for x in co], [x[2] for x in co], unit="deg")
        coo = [''.join([y[:9] for y in x.split()]) for x in
               coo.to_string('hmsdms', sep='')]
        hostdict = dict(zip([x[0] for x in co],
                            ['SDSS J' + x[1:] for x in coo]))

        for ri, row in enumerate(pbar(rows, task_str + ": metadata")):
            name = ''

            # Check if type is non-SNe first
            ct = row[columns[SUPERNOVA.CLAIMED_TYPE]]
            al = row[columns[SUPERNOVA.ALIAS]]
            if ct in ['AGN', 'Variable'] and not al:
                catalog.log.info('`{}` is not a SN, not '
                                 'adding.'.format(row[0]))
                ignored_cids.append(row[0])
                continue

            # Add entry
            (name, source) = catalog.new_entry(
                'SDSS-II SN ' + row[0], bibcode='2014arXiv1401.3317S',
                url='http://data.sdss3.org/sas/dr10/boss/papers/supernova/')

            # Add host name
            if row[0] in hostdict:
                catalog.entries[name].add_quantity(SUPERNOVA.HOST,
                                                   hostdict[row[0]], source)

            # Add other metadata
            for cn in colnums:
                key = colnums[cn]
                if not key:
                    continue
                ic = int(cn)
                val = row[ic]
                if not val:
                    continue
                kwargs = {}
                if key == SUPERNOVA.ALIAS:
                    val = 'SN' + val
                elif key in [SUPERNOVA.RA, SUPERNOVA.DEC, SUPERNOVA.HOST_RA,
                             SUPERNOVA.HOST_DEC]:
                    kwargs = {QUANTITY.U_VALUE: 'floatdegrees'}
                    if key in [SUPERNOVA.RA, SUPERNOVA.HOST_RA]:
                        fval = float(val)
                        if fval < 0.0:
                            val = str(Decimal(360) + Decimal(fval))
                elif key == SUPERNOVA.CLAIMED_TYPE:
                    val = val.lstrip('pz').replace('SN', '')
                elif key == SUPERNOVA.REDSHIFT:
                    kwargs[QUANTITY.KIND] = 'spectroscopic'
                    if float(row[ic + 1]) > 0.0:
                        kwargs[QUANTITY.E_VALUE] = row[ic + 1]
                elif key == SUPERNOVA.MAX_DATE:
                    dt = astrotime(float(val), format='mjd').datetime
                    val = make_date_string(dt.year, dt.month, dt.day)
                catalog.entries[name].add_quantity(key, val, source, **kwargs)

    with open(os.path.join(catalog.get_current_task_repo(),
                           'SDSS/2010ApJ...708..661D.txt'), 'r') as sdss_file:
        bibcodes2010 = sdss_file.read().split('\n')
    sdssbands = ['u', 'g', 'r', 'i', 'z']
    file_names = (list(glob(os.path.join(catalog
                                         .get_current_task_repo(),
                                         'SDSS/sum/*.sum'))) +
                  list(glob(os.path.join(catalog
                                         .get_current_task_repo(),
                                         'SDSS/SMP_Data/*.dat'))))
    for fi, fname in enumerate(pbar_strings(file_names, task_str)):
        tsvin = csv.reader(open(fname, 'r'), delimiter=' ',
                           skipinitialspace=True)
        basename = os.path.basename(fname)
        hasred = True
        rst = 19
        if '.dat' in fname:
            bibcode = '2014arXiv1401.3317S'
            hasred = False
            rst = 4
        elif basename in bibcodes2010:
            bibcode = '2010ApJ...708..661D'
        else:
            bibcode = '2008AJ....136.2306H'

        skip_entry = False
        for rr, row in enumerate(tsvin):
            if skip_entry:
                break
            if rr == 0:
                # Ignore non-SNe objects and those not in metadata table above
                if row[3] in ignored_cids:
                    skip_entry = True
                    continue
                # Ignore IAU names from Sako 2014 as they are unreliable
                if row[5] == 'RA:' or bibcode == '2014arXiv1401.3317S':
                    name = 'SDSS-II SN ' + row[3]
                else:
                    name = 'SN' + row[5]
                name = catalog.add_entry(name)
                source = catalog.entries[name].add_source(bibcode=bibcode)
                catalog.entries[name].add_quantity(
                    SUPERNOVA.ALIAS, name, source)
                catalog.entries[name].add_quantity(
                    SUPERNOVA.ALIAS, 'SDSS-II SN ' + row[3], source)

                if row[5] != 'RA:' and bibcode == '2014arXiv1401.3317S':
                    year = re.findall(r'\d+', name)[0]
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.DISCOVER_DATE, year, source)

                catalog.entries[name].add_quantity(
                    SUPERNOVA.RA, row[-4], source, u_value='floatdegrees')
                catalog.entries[name].add_quantity(
                    SUPERNOVA.DEC, row[-2], source, u_value='floatdegrees')
            if hasred and rr == 1:
                error = row[4] if float(row[4]) >= 0.0 else ''
                (catalog.entries[name]
                 .add_quantity(SUPERNOVA.REDSHIFT, row[2], source,
                               e_value=error,
                               kind='heliocentric'))
            if rr >= rst:
                # Skip bad measurements
                if int(row[0]) > 1024:
                    continue

                mjd = row[1]
                band = sdssbands[int(row[2])]
                magnitude = row[3]
                e_mag = row[4]
                telescope = 'SDSS'
                (catalog.entries[name]
                 .add_photometry(time=mjd, telescope=telescope,
                                 band=band, magnitude=magnitude,
                                 e_magnitude=e_mag, source=source,
                                 system='SDSS'))
        if not fi % 1000:
            catalog.journal_entries()

    catalog.journal_entries()
    return
Beispiel #15
0
    def set_first_max_light(self):
        if SUPERNOVA.MAX_APP_MAG not in self:
            # Get the maximum amongst all bands
            mldt, mlmag, mlband, mlsource = self._get_max_light()
            if mldt or mlmag or mlband:
                source = self.add_self_source()
                uniq_src = uniq_cdl([source] + mlsource.split(','))
            if mldt:
                max_date = make_date_string(mldt.year, mldt.month, mldt.day)
                self.add_quantity(SUPERNOVA.MAX_DATE,
                                  max_date,
                                  uniq_src,
                                  derived=True)
            if mlmag:
                mlmag = pretty_num(mlmag)
                self.add_quantity(SUPERNOVA.MAX_APP_MAG,
                                  mlmag,
                                  uniq_src,
                                  derived=True)
            if mlband:
                self.add_quantity(SUPERNOVA.MAX_BAND,
                                  mlband,
                                  uniq_src,
                                  derived=True)

        if SUPERNOVA.MAX_VISUAL_APP_MAG not in self:
            # Get the "visual" maximum
            mldt, mlmag, mlband, mlsource = self._get_max_light(visual=True)
            if mldt or mlmag or mlband:
                source = self.add_self_source()
                uniq_src = uniq_cdl([source] + mlsource.split(','))
            if mldt:
                max_date = make_date_string(mldt.year, mldt.month, mldt.day)
                self.add_quantity(SUPERNOVA.MAX_VISUAL_DATE,
                                  max_date,
                                  uniq_src,
                                  derived=True)
            if mlmag:
                mlmag = pretty_num(mlmag)
                self.add_quantity(SUPERNOVA.MAX_VISUAL_APP_MAG,
                                  mlmag,
                                  uniq_src,
                                  derived=True)
            if mlband:
                self.add_quantity(SUPERNOVA.MAX_VISUAL_BAND,
                                  mlband,
                                  uniq_src,
                                  derived=True)

        if (self._KEYS.DISCOVER_DATE not in self or max([
                len(x[QUANTITY.VALUE].split('/'))
                for x in self[self._KEYS.DISCOVER_DATE]
        ]) < 3):
            fldt, flsource = self._get_first_light()
            if fldt:
                source = self.add_self_source()
                disc_date = make_date_string(fldt.year, fldt.month, fldt.day)
                self.add_quantity(self._KEYS.DISCOVER_DATE,
                                  disc_date,
                                  uniq_cdl([source] + flsource.split(',')),
                                  derived=True)

        if self._KEYS.DISCOVER_DATE not in self and self._KEYS.SPECTRA in self:
            minspecmjd = float("+inf")
            for spectrum in self[self._KEYS.SPECTRA]:
                if 'time' in spectrum and 'u_time' in spectrum:
                    if spectrum['u_time'] == 'MJD':
                        mjd = float(spectrum['time'])
                    elif spectrum['u_time'] == 'JD':
                        mjd = float(jd_to_mjd(Decimal(spectrum['time'])))
                    else:
                        continue

                    if mjd < minspecmjd:
                        minspecmjd = mjd
                        minspecsource = spectrum['source']

            if minspecmjd < float("+inf"):
                fldt = astrotime(minspecmjd, format='mjd').datetime
                source = self.add_self_source()
                disc_date = make_date_string(fldt.year, fldt.month, fldt.day)
                self.add_quantity(self._KEYS.DISCOVER_DATE,
                                  disc_date,
                                  uniq_cdl([source] +
                                           minspecsource.split(',')),
                                  derived=True)
        return
Beispiel #16
0
def do_batse(catalog):
    task_str = catalog.get_current_task_str()
    file_path = os.path.join(catalog.get_current_task_repo(),
                             'BATSE/basic_table.txt')
    csvtxt = catalog.load_url(
        'http://gammaray.nsstc.nasa.gov/batse/grb/catalog/current/tables/'
        'basic_table.txt', file_path)
    if not csvtxt:
        return
    data = list(
        csv.reader(csvtxt.splitlines(),
                   delimiter=' ',
                   quotechar='"',
                   skipinitialspace=True))

    file_path = os.path.join(catalog.get_current_task_repo(),
                             'BATSE/duration_table.txt')
    csvtxt = catalog.load_url(
        'http://gammaray.nsstc.nasa.gov/batse/grb/catalog/current/tables/'
        'duration_table.txt', file_path)
    if not csvtxt:
        return
    data2 = list(
        csv.reader(csvtxt.splitlines(),
                   delimiter=' ',
                   quotechar='"',
                   skipinitialspace=True))
    t90s = {}
    for row in data2:
        # Add one sigma to quoted T90 to compare to
        t90s[row[0]] = float(row[-3]) + float(row[-2])

    prev_oname = ''
    grb_letter = 'A'
    for r, row in enumerate(pbar(data, task_str)):
        if row[0].startswith('#'):
            continue
        oname = 'GRB ' + row[2]
        if oname.replace('-', '') == prev_oname:
            grb_letter = chr(ord(grb_letter) + 1)
        else:
            grb_letter = 'A'
        prev_oname = oname.replace('-', '')
        if oname.endswith('-'):
            oname = oname.replace('-', grb_letter)
        if row[-1] == 'Y':
            continue
        if row[0] not in t90s or t90s[row[0]] < 3.0:
            continue
        (name, source) = catalog.new_entry(
            oname,
            srcname='BATSE Catalog',
            bibcode='1999ApJS..122..465P',
            url='http://gammaray.nsstc.nasa.gov/batse/grb/catalog/')

        jd = Decimal(2440000.5) + Decimal(row[3])
        astrot = astrotime(float(jd), format='jd').datetime
        catalog.entries[name].add_quantity(
            SUPERNOVA.DISCOVER_DATE,
            make_date_string(astrot.year, astrot.month, astrot.day), source)
        pos_err = str(Decimal(row[9]) * Decimal(3600))
        catalog.entries[name].add_quantity(SUPERNOVA.RA,
                                           row[5],
                                           source,
                                           u_value='floatdegrees',
                                           e_value=pos_err)
        catalog.entries[name].add_quantity(SUPERNOVA.DEC,
                                           row[6],
                                           source,
                                           u_value='floatdegrees',
                                           e_value=pos_err)
        catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, 'LGRB',
                                           source)

    catalog.journal_entries()
    return
Beispiel #17
0
def do_snax(catalog):
    """Import from the SNaX X-ray database."""
    task_str = catalog.get_current_task_str()

    dlurl = 'http://kronos.uchicago.edu/snax/export.php?exportType=TSV&exportFields=standard&objid=&name=&typeid=&type=&galaxyid=&galaxy=&fluxMin=&fluxMax=&fluxEnergyLMin=&fluxEnergyLMax=&fluxEnergyHMin=&fluxEnergyHMax=&lumMin=&lumMax=&instrumentid=&instrument=&ageMin=&ageMax=&dateMin=&dateMax=&sortA=dateExploded'  # noqa: E501

    file_path = os.path.join(catalog.get_current_task_repo(), 'SNaX.TSV')

    tsv = catalog.load_url(dlurl, file_path)
    # csvtxt = catalog.load_url(
    #     'http://www.grbcatalog.org/'
    #     'download_data?cut_0_min=5&cut_0=BAT%20T90'
    #     '&cut_0_max=100000&num_cuts=1&no_date_cut=True',
    #     file_path)

    data = [x.split('\t') for x in tsv.split('\n')]

    for r, row in enumerate(pbar(data, task_str)):
        if r == 0 or not row[0]:
            continue
        (name, source) = catalog.new_entry(
            row[0], srcname='SNaX', url='http://kronos.uchicago.edu/snax/',
            secondary=True)
        sources = [source]
        expsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-6].strip())
        ])
        coosrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-5].strip())
        ])
        dissrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-4].strip())
        ])
        flxsrc = uniq_cdl(sources + [
            catalog.entries[name].add_source(bibcode=row[-3].strip()),
            catalog.entries[name].add_source(bibcode=row[-2].strip())
        ])

        catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, row[1],
                                           source)
        date = astrotime(float(row[2]), format='jd').datetime
        catalog.entries[name].add_quantity(
            SUPERNOVA.EXPLOSION_DATE,
            make_date_string(date.year, date.month, date.day), expsrc)
        catalog.entries[name].add_quantity(
            SUPERNOVA.RA, ' '.join(row[3].split()[:3]), coosrc)
        catalog.entries[name].add_quantity(
            SUPERNOVA.DEC, ' '.join(row[3].split()[3:]), coosrc)
        catalog.entries[name].add_quantity(SUPERNOVA.LUM_DIST, row[4], dissrc)
        catalog.entries[name].add_quantity(SUPERNOVA.HOST, row[5], source)
        catalog.entries[name].add_quantity(
            SUPERNOVA.REDSHIFT,
            row[6],
            source,
            e_value=row[7] if (row[7] and float(row[7]) != 0.0) else '')
        photodict = {
            PHOTOMETRY.TIME: jd_to_mjd(Decimal(row[8])),
            PHOTOMETRY.U_TIME: 'MJD',
            PHOTOMETRY.ENERGY: row[15:17],
            PHOTOMETRY.U_ENERGY: 'keV',
            PHOTOMETRY.FLUX: str(Decimal('1.0e-13') * Decimal(row[11])),
            PHOTOMETRY.U_FLUX: 'ergs/s/cm^2',
            PHOTOMETRY.E_LOWER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[13])),
            PHOTOMETRY.E_UPPER_FLUX:
            str(Decimal('1.0e-13') * Decimal(row[14])),
            PHOTOMETRY.INSTRUMENT: row[9],
            PHOTOMETRY.SOURCE: flxsrc
        }
        if row[12] == '1':
            photodict[PHOTOMETRY.UPPER_LIMIT] = True
        catalog.entries[name].add_photometry(**photodict)

    catalog.journal_entries()
    return