Exemplo n.º 1
0
z_from = 0.01
z_to = 0.7
z_step = 0.01

av_from = 0.0
av_to = 2.0
av_step = .1
av_sampling = np.arange(av_from, av_to, av_step)

#db_file = 'lib_csp_A.hdf5'
db_file = '/home/william/lib_csp_A_sdss_z0p7.hdf5'
try:
    os.unlink(db_file)
except:
    pass
db = inithdf5(db_file)
# Tables group
db.create_group('/tables/')    
# Filtersystem groups
for filterid in db_f.keys():
    db.create_group('/%s/' % filterid)
# CCD groups
for ccd in db_f.get(filterid).keys():
    db.create_group('/%s/%s/' % (filterid, ccd))


# 1 - Tables
## 1.1 - redshift
db.create_dataset(name = '/tables/z', data = np.arange(z_from, z_to, z_step) )

## 1.2 - properties
Exemplo n.º 2
0
import os
import sys
import time
import h5py
import atpy

import numpy as np

from bgpe.io.readfilterset import readfilterset
from bgpe.io.hdf5util import inithdf5

if __name__ == "__main__" and len(sys.argv) > 2:

    dbfile = sys.argv[1]
    # Init file
    db = inithdf5(dbfile)

    for filter_file in sys.argv[2:]:
        aux_id = os.path.basename(filter_file).split(".")
        f = readfilterset()
        f.read(filter_file)
        for fid in np.unique(f.filterset["ID_filter"]):
            dataset = "/%s/%s/%s" % (aux_id[0], aux_id[1], fid)
            print dataset
            aux = atpy.Table(name=fid)
            aux.add_column(name="wl", data=f.filterset["wl"][f.filterset["ID_filter"] == fid])
            aux.add_column(name="transm", data=f.filterset["transm"][f.filterset["ID_filter"] == fid])
            db.create_dataset(dataset, data=aux.data)

    db.close()