Exemplo n.º 1
0
def do_test(catalog):
    log = catalog.log
    log.info("do_test()")
    task_str = catalog.get_current_task_str()
    log.info("`task_str`: '{}'".format(task_str))

    if len(catalog.entries) != 0:
        raise RuntimeError("Run test only with empty catalog.")

    # Test URL retrieve functions
    # ---------------------------
    log.info("`args.archived` = '{}', `current_task.archived` = '{}'".format(
        catalog.args.archived, catalog.current_task.archived))

    test_load_url(catalog)

    # Test repo path functions
    # ------------------------
    paths = catalog.PATHS.get_all_repo_folders()
    for path in tq(paths, currenttask='Test tq progress bar.'):
        tprint('Test tprint.')
        log.debug(path)
    paths = catalog.PATHS.get_repo_input_folders()
    for path in pbar_strings(paths, desc='Test pbar_strings progress bar.'):
        log.debug(path)
    boneyard = catalog.PATHS.get_repo_boneyard()
    log.debug(boneyard)

    # Create a Fake Entry, with some Fake Data
    # ----------------------------------------
    _first_event_first_source(catalog)

    log_str = "ADDING SECOND SOURCE"
    log.info("\n\n{}\n{}\n{}\n\n".format("=" * 100, log_str, "=" * 100))

    # Add new Data, from different source, to same fake entry
    # -------------------------------------------------------
    _first_event_second_source(catalog)

    # Make sure output file for this test exists
    outdir, filename = catalog.entries[FAKE_ALIAS_1]._get_save_path()
    save_name = os.path.join(outdir, filename + '.json')
    if not os.path.exists(save_name):
        raise RuntimeError("File not found in '{}'".format(save_name))
    # Delete created test file
    catalog._delete_entry_file(entry_name=FAKE_ALIAS_1)
    # Make sure it was deleted
    if os.path.exists(save_name):
        raise RuntimeError("File not deleted at '{}'".format(save_name))

    # Delete entry in catalog
    del catalog.entries[FAKE_ALIAS_1]
    # Make sure entry was deleted
    if len(catalog.entries) != 0:
        raise RuntimeError("Error deleting test entry!")

    # Add entry back catalog to test later tasks
    _first_event_first_source(catalog)
    _first_event_second_source(catalog)

    # Test some utility functions
    log.debug("Preferred name for 2nd source: " +
              catalog.get_preferred_name(FAKE_ALIAS_2))
    log.debug("Entry exists? " +
              str(catalog.entry_exists(FAKE_ALIAS_2)))
    log.debug("Entry text: " + catalog.entries[FAKE_ALIAS_1].get_entry_text(
        os.path.join(outdir, filename + '.json')))

    # Third source is a duplicate that will be merged
    _first_event_third_source(catalog)

    # Add second event to perform different tests
    _second_event(catalog)

    # Delete name to test name re-addition in sanitize
    for ii, alias in enumerate(
            catalog.entries[FAKE_ALIAS_5][ENTRY.ALIAS].copy()):
        if alias[QUANTITY.VALUE] == FAKE_ALIAS_5:
            del catalog.entries[FAKE_ALIAS_1][ENTRY.ALIAS][ii]
            break

    return
Exemplo n.º 2
0
          cubehelix.purple_16.hex_colors[2:13] +
          cubehelix.purple_16.hex_colors[2:13] +
          cubehelix.perceptual_rainbow_16.hex_colors)
shuffle(colors)

repofolders = get_rep_folders(moduledir)
files = repo_file_list(moduledir, repofolders, normal=True, bones=False)

with open(
        'astrocats/' + moduledir + '/input/non-' + modulename + '-types.json',
        'r') as f:
    nontypes = json.loads(f.read(), object_pairs_hook=OrderedDict)
    nontypes = [x.upper() for x in nontypes]

for fcnt, eventfile in enumerate(
        tq(sorted(files, key=lambda s: s.lower()), "Collecting positions")):
    # if fcnt > 5000:
    #    break

    filetext = get_event_text(eventfile)

    thisevent = json.loads(filetext, object_pairs_hook=OrderedDict)
    thisevent = thisevent[list(thisevent.keys())[0]]

    if 'ra' in thisevent and 'dec' in thisevent:
        if 'claimedtype' in thisevent and thisevent['claimedtype']:
            for ct in [x['value'] for x in thisevent['claimedtype']]:
                thistype = ct.replace('?', '').replace('*', '')
                if thistype.upper() in nontypes:
                    continue
                elif thistype in ('Other', 'not Ia', 'SN', 'unconf', 'Radio',
Exemplo n.º 3
0
metanames = [x['name'] for x in meta]

for averagetype in averagetypes:
    phototime = []
    phototimelowererrs = []
    phototimeuppererrs = []
    photoAB = []
    photoABerrs = []
    photoband = []
    photoinstru = []
    photoevent = []
    phototype = []

    for fcnt, eventfile in enumerate(
        tq(sorted(files, key=lambda s: s.lower()), 'Looping over ' +
           averagetype + ' SNe')):
        # if fcnt > 2000:
        #    break

        name = os.path.basename(os.path.splitext(eventfile)[0])
        if name in metanames:
            foundtype = False
            mi = metanames.index(name)
            if meta[mi]['claimedtype']:
                for ct in meta[mi]['claimedtype']:
                    if ct['value'] == averagetype:
                        foundtype = True
                        break
            if not foundtype:
                continue
        else:
Exemplo n.º 4
0
def do_test(catalog):
    log = catalog.log
    log.info("do_test()")
    task_str = catalog.get_current_task_str()
    log.info("`task_str`: '{}'".format(task_str))

    if len(catalog.entries) != 0:
        raise RuntimeError("Run test only with empty catalog.")

    # Test URL retrieve functions
    # ---------------------------
    log.info("`args.archived` = '{}', `current_task.archived` = '{}'".format(
        catalog.args.archived, catalog.current_task.archived))

    test_load_url(catalog)

    # Test repo path functions
    # ------------------------
    paths = catalog.PATHS.get_all_repo_folders()
    for path in tq(paths, currenttask='Test tq progress bar.'):
        tprint('Test tprint.')
        log.debug(path)
    paths = catalog.PATHS.get_repo_input_folders()
    for path in pbar_strings(paths, desc='Test pbar_strings progress bar.'):
        log.debug(path)
    boneyard = catalog.PATHS.get_repo_boneyard()
    log.debug(boneyard)

    # Create a Fake Entry, with some Fake Data
    # ----------------------------------------
    _first_event_first_source(catalog)

    log_str = "ADDING SECOND SOURCE"
    log.info("\n\n{}\n{}\n{}\n\n".format("=" * 100, log_str, "=" * 100))

    # Add new Data, from different source, to same fake entry
    # -------------------------------------------------------
    _first_event_second_source(catalog)

    # Make sure output file for this test exists
    outdir, filename = catalog.entries[FAKE_ALIAS_1]._get_save_path()
    save_name = os.path.join(outdir, filename + '.json')
    if not os.path.exists(save_name):
        raise RuntimeError("File not found in '{}'".format(save_name))
    # Delete created test file
    catalog._delete_entry_file(entry_name=FAKE_ALIAS_1)
    # Make sure it was deleted
    if os.path.exists(save_name):
        raise RuntimeError("File not deleted at '{}'".format(save_name))

    # Delete entry in catalog
    del catalog.entries[FAKE_ALIAS_1]
    # Make sure entry was deleted
    if len(catalog.entries) != 0:
        raise RuntimeError("Error deleting test entry!")

    # Add entry back catalog to test later tasks
    _first_event_first_source(catalog)
    _first_event_second_source(catalog)

    # Test some utility functions
    log.debug("Preferred name for 2nd source: " +
              catalog.get_preferred_name(FAKE_ALIAS_2))
    log.debug("Entry exists? " + str(catalog.entry_exists(FAKE_ALIAS_2)))
    log.debug("Entry text: " + catalog.entries[FAKE_ALIAS_1].get_entry_text(
        os.path.join(outdir, filename + '.json')))

    # Third source is a duplicate that will be merged
    _first_event_third_source(catalog)

    # Add second event to perform different tests
    _second_event(catalog)

    # Delete name to test name re-addition in sanitize
    for ii, alias in enumerate(
            catalog.entries[FAKE_ALIAS_5][ENTRY.ALIAS].copy()):
        if alias[QUANTITY.VALUE] == FAKE_ALIAS_5:
            del catalog.entries[FAKE_ALIAS_1][ENTRY.ALIAS][ii]
            break

    return
Exemplo n.º 5
0
else:
    bibauthordict = OrderedDict()

files = repo_file_list(bones=False)

path = 'ads.key'
if os.path.isfile(path):
    with open(path, 'r') as f:
        ads.config.token = f.read().splitlines()[0]
else:
    raise IOError(
        "Cannot find ads.key, please generate one at "
        "https://ui.adsabs.harvard.edu/#user/settings/token and place it in "
        "this file.")

for fcnt, eventfile in enumerate(tq(sorted(files, key=lambda s: s.lower()))):
    # if fcnt > 100:
    #    break
    fileeventname = os.path.splitext(os.path.basename(eventfile))[
        0].replace('.json', '')

    if eventfile.split('.')[-1] == 'gz':
        with gzip.open(eventfile, 'rt') as f:
            filetext = f.read()
    else:
        with open(eventfile, 'r') as f:
            filetext = f.read()

    item = json.loads(filetext, object_pairs_hook=OrderedDict)
    item = item[list(item.keys())[0]]
Exemplo n.º 6
0
          cubehelix.cubehelix3_16.hex_colors[2:13] +
          cubehelix.jim_special_16.hex_colors[2:13] +
          cubehelix.purple_16.hex_colors[2:13] +
          cubehelix.purple_16.hex_colors[2:13] +
          cubehelix.purple_16.hex_colors[2:13] +
          cubehelix.purple_16.hex_colors[2:13] +
          cubehelix.perceptual_rainbow_16.hex_colors)
shuffle(colors)

files = repo_file_list(bones=False)

with open('astrocats/supernovae/input/non-sne-types.json', 'r') as f:
    nonsnetypes = json.loads(f.read(), object_pairs_hook=OrderedDict)
    nonsnetypes = [x.upper() for x in nonsnetypes]

for fcnt, eventfile in enumerate(tq(sorted(files, key=lambda s: s.lower()),
                                    "Collecting positions")):
    # if fcnt > 20:
    #    break

    filetext = get_event_text(eventfile)

    thisevent = json.loads(filetext, object_pairs_hook=OrderedDict)
    thisevent = thisevent[list(thisevent.keys())[0]]

    if 'ra' in thisevent and 'dec' in thisevent:
        if 'claimedtype' in thisevent and thisevent['claimedtype']:
            for ct in [x['value'] for x in thisevent['claimedtype']]:
                thistype = ct.replace('?', '').replace('*', '')
                if thistype.upper() in nonsnetypes:
                    continue
                elif thistype in ('Other', 'not Ia', 'SN', 'unconf', 'Radio',
Exemplo n.º 7
0
if os.path.isfile(path):
    with open(path, 'r') as f:
        ads.config.token = f.read().splitlines()[0]
else:
    raise IOError(
        "Cannot find ads.key, please generate one at "
        "https://ui.adsabs.harvard.edu/#user/settings/token and place it in "
        "this file.")

specterms = [
    "spectrum", "spectra", "spectroscopic", "spectroscopy"]

photterms = [
    "photometry", "photometric", "light curve"]

for fcnt, eventfile in enumerate(tq(sorted(files, key=lambda s: s.lower()))):
    #if fcnt > 10000:
    #   break
    fileeventname = os.path.splitext(os.path.basename(eventfile))[0].replace(
        '.json', '')

    if eventfile.split('.')[-1] == 'gz':
        with gzip.open(eventfile, 'rt') as f:
            filetext = f.read()
    else:
        with open(eventfile, 'r') as f:
            filetext = f.read()

    item = json.loads(filetext, object_pairs_hook=OrderedDict)
    item = item[list(item.keys())[0]]
Exemplo n.º 8
0
metanames = [x['name'] for x in meta]

for averagetype in averagetypes:
    phototime = []
    phototimelowererrs = []
    phototimeuppererrs = []
    photoAB = []
    photoABerrs = []
    photoband = []
    photoinstru = []
    photoevent = []
    phototype = []

    for fcnt, eventfile in enumerate(
            tq(sorted(files, key=lambda s: s.lower()),
               'Looping over ' + averagetype + ' SNe')):
        # if fcnt > 2000:
        #    break

        name = os.path.basename(os.path.splitext(eventfile)[0])
        if name in metanames:
            foundtype = False
            mi = metanames.index(name)
            if meta[mi]['claimedtype']:
                for ct in meta[mi]['claimedtype']:
                    if ct['value'] == averagetype:
                        foundtype = True
                        break
            if not foundtype:
                continue
        else:
Exemplo n.º 9
0
              cubehelix.purple_16.hex_colors[2:13] +
              cubehelix.purple_16.hex_colors[2:13] +
              cubehelix.purple_16.hex_colors[2:13] +
              cubehelix.purple_16.hex_colors[2:13] +
              cubehelix.perceptual_rainbow_16.hex_colors)
    shuffle(colors)
    untype = 'Unknown'

repofolders = get_rep_folders(moduledir)
files = repo_file_list(moduledir, repofolders, normal=True, bones=True)

with open('astrocats/' + moduledir + '/input/non-' + modulename + '-types.json', 'r') as f:
    nontypes = json.loads(f.read(), object_pairs_hook=OrderedDict)
    nontypes = [x.upper() for x in nontypes]

for fcnt, eventfile in enumerate(tq(sorted(files, key=lambda s: s.lower()),
                                    "Collecting positions")):
    # if fcnt > 5000:
    #    break

    filetext = get_event_text(eventfile)

    thisevent = json.loads(filetext, object_pairs_hook=OrderedDict)
    thisevent = thisevent[list(thisevent.keys())[0]]

    # Code for Boubert 2018.
    # if 'discoverdate' in thisevent:
    #     if int(thisevent['discoverdate'][0]['value']) >= 2018:
    #         continue

    # if 'boundprobability' not in thisevent or float(thisevent['boundprobability'][0]['value']) > 0.5:
    #     continue
Exemplo n.º 10
0
sntypes = sorted(sntypes)
snoffs = [[] for x in range(len(sntypes))]

tt = [
    ("Type", "@ct")
]
hover = HoverTool(tooltips=tt, line_policy='interp')
p = Figure(x_range=[0., 100.], y_range=[0., 1.],
           title='Supernova Host Offsets',
           x_axis_label='Offset (kpc)', y_axis_label='CDF',
           plot_width=980, plot_height=500)
p.add_tools(hover)
p.title.text_font = 'futura'
p.title.text_font_size = '14pt'

for si, sntype in enumerate(tq(sntypes)):
    for event in meta:
        if ('hostoffsetdist' in event and event['hostoffsetdist'] and
                is_number(event['hostoffsetdist'][0]['value']) and
                'claimedtype' in event and event['claimedtype'] and
                sntype in [x['value'] for x in event['claimedtype']]):
            snoffs[si].append(float(event['hostoffsetdist'][0]['value']))
    snoffs[si] = sorted(snoffs[si])

colors = sns.color_palette("hls", n_colors=sum(
    [1 if len(snoffs[i]) >= mincnt else 0 for i, x in
     enumerate(snoffs)])).as_hex()

cnt = 0
for si, sntype in enumerate(sntypes):
    if len(snoffs[si]) >= mincnt: