示例#1
0
              foreign_key='source_id',
              column_type_overrides={'spectra.spectrum': types.TEXT(),
                                     'spectra.local_spectrum': types.TEXT()})

# Query similarly to SIMPLE
results = db.query(db.sources).limit(10).all()
for row in results: print(row)

# The spectra table contains columns of type SPECTRUM, the column_type_overrides allows us to work with them as text
for c in db.spectra.columns: print(c, c.type)
db.query(db.spectra).limit(10).all()

_ = db.inventory(11, pretty_print=True)

# Can output the full contents of BDNYC as json files
db.save_db('bdnyc')

# Copy to another database
source_connection_string = 'sqlite:///../BDNYCdb-1/bdnyc_database.db'  # SQLite
destination_connection_string = 'postgresql://localhost/BDNYC'  # Postgres
copy_database_schema(source_connection_string, destination_connection_string)

# Load database contents from JSON
db = Database(destination_connection_string,
              reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes',
                                'systems', 'telescopes', 'versions', 'instruments'],
              primary_table='sources',
              primary_table_key='id',
              foreign_key='source_id',
              column_type_overrides={'spectra.spectrum': types.TEXT(),
                                     'spectra.local_spectrum': types.TEXT()})
temp = bdnyc.query(bdnyc.publications)\
    .filter(db.publications.c.shortname.notin_(existing_simple))\
    .all()

# Reformat data into something easier for SIMPLE to import
new_db_mapping = {'DOI': 'doi', 'shortname': 'name'}
data = [{
    new_db_mapping.get(k, k): x.__getattribute__(k)
    for k in x.keys() if k not in 'id'
} for x in temp]

db.Publications.insert().execute(data)

# Verify insert and save to disk
db.query(db.Publications).count()
db.save_db('data')

# ----------------------------------------------------------------------------------------
# Add Sources that are not already in SIMPLE
temp = db.query(db.Publications.c.name).all()
publications_simple = [s[0] for s in temp]
temp = db.query(db.Sources.c.source).all()
existing_simple = [s[0] for s in temp]

temp = bdnyc.query(bdnyc.sources.c.designation,
                   bdnyc.sources.c.names,
                   bdnyc.sources.c.ra,
                   bdnyc.sources.c.dec,
                   bdnyc.sources.c.shortname,
                   bdnyc.sources.c.publication_shortname, )\
    .filter(and_(bdnyc.sources.c.names.notin_(existing_simple),
示例#3
0
spt_refs = ingest_table['spt_ref']

# sources names in database Names table
db_names = []
for name in names:
    db_name = db.search_object(name, output_table='Sources')[0].source
    db_names.append(db_name)

# Convert SpT string to code
spectral_type_codes = convert_spt_string_to_code(spectral_types, verbose=True)

# add new references to Publications table
ref_list = spt_refs.tolist()
included_ref = db.query(db.Publications.c.name).filter(
    db.Publications.c.name.in_(ref_list)).all()
included_ref = [s[0] for s in included_ref]
new_ref = list(set(ref_list) - set(included_ref))
new_ref = [{'name': s} for s in new_ref]

if len(new_ref) > 0:
    db.Publications.insert().execute(new_ref)

# Make astropy table with all relevant columns and add to SpectralTypes Table
SpT_table = Table(
    [db_names, spectral_types, spectral_type_codes, regime, spt_refs],
    names=('source', 'spectral_type_string', 'spectral_type_code', 'regime',
           'reference'))
db.add_table_data(SpT_table, table='SpectralTypes', fmt='astropy')

db.save_db('../../data')
        name_ref.append('Bedi17')
    elif ref == 24:
        name_ref.append('Dahn02')
    elif ref == 25:
        name_ref.append('Maro13')
    elif ref == 26:
        name_ref.append('Wein12')
    elif ref == 27:
        name_ref.append('Tinn14')
    elif ref == 28:
        name_ref.append('Tinn03')
    elif ref == 29:
        name_ref.append('Delo17')
    elif ref == 30:
        name_ref.append('Mars13')
    elif ref == 31:
        name_ref.append('Luhm16')
    elif ref == 32:
        name_ref.append('Kirk11')
    elif ref == 33:
        name_ref.append('Legg17')
    elif ref == 34:
        name_ref.append('Mart18')
    else:
        name_ref.append('Missing')

ingest_parallaxes(db, sources, plx, plx_unc, name_ref, verbose=True)

if not DRY_RUN:
    db.save_db('data')  #edits the JSON files if we're not doing a dry run