Exemple #1
0
 def to_dict(self):
     """ Write AbsSystem data to a dict that can be written with JSON
     """
     import datetime
     import getpass
     date = str(datetime.date.today().strftime('%Y-%b-%d'))
     user = getpass.getuser()
     # Generate the dict
     outdict = dict(Name=self.name, abs_type=self.abs_type, zabs=self.zabs,
                    vlim=self.vlim.to('km/s').value, zem=self.zem,
                    NHI=self.NHI, sig_NHI=self.sig_NHI, flag_NHI=self.flag_NHI,
                    RA=self.coord.ra.value, DEC=self.coord.dec.value,
                    kin=self.kin, Refs=self.Refs, CreationDate=date,
                    ZH=self.ZH, sig_ZH=self.sig_ZH,
                    user=user
                    )
     outdict['class'] = self.__class__.__name__
     # Components
     outdict['components'] = {}
     for component in self._components:
         outdict['components'][component.name] = ltu.jsonify(component.to_dict())
     # Polish
     outdict = ltu.jsonify(outdict)
     # Return
     return outdict
Exemple #2
0
 def to_dict(self):
     """ Write AbsSystem data to a dict that can be written with JSON
     """
     import datetime
     import getpass
     date = str(datetime.date.today().strftime('%Y-%b-%d'))
     user = getpass.getuser()
     # Generate the dict
     outdict = dict(Name=self.name, abs_type=self.abs_type, zabs=self.zabs,
                    vlim=self.vlim.to('km/s').value, zem=self.zem,
                    NHI=self.NHI, sig_NHI=self.sig_NHI, flag_NHI=self.flag_NHI,
                    RA=self.coord.icrs.ra.value, DEC=self.coord.icrs.dec.value,
                    kin=self.kin, Refs=self.Refs, CreationDate=date,
                    ZH=self.ZH, sig_ZH=self.sig_ZH, flag_ZH=self.flag_ZH,
                    user=user
                    )
     outdict['class'] = self.__class__.__name__
     # Components
     outdict['components'] = {}
     for component in self._components:
         outdict['components'][component.name] = ltu.jsonify(component.to_dict())
     # Spectrum file?
     if hasattr(self, 'spec_file'):
         outdict['spec_file'] = self.spec_file
     # Polish
     outdict = ltu.jsonify(outdict)
     # Return
     return outdict
Exemple #3
0
 def to_dict(self):
     """ Write AbsSightline data to a dict that can be written with JSON
     """
     import datetime
     import getpass
     date = str(datetime.date.today().strftime('%Y-%b-%d'))
     user = getpass.getuser()
     # Generate the dict
     outdict = dict(RA=self.coord.ra.value, DEC=self.coord.dec.value,
                    CreationDate=date, user=user)
     outdict['class'] = self.__class__.__name__
     # Add other attributes
     all_attr = self.__dict__
     for key in all_attr:
         if key in ['coord', '_components']:
             continue
         elif key in ['_abssystems']:
             pass
         else:
             outdict[key] = getattr(self, key)
     # Components
     outdict['components'] = {}
     for component in self._components:
         outdict['components'][component.name] = ltu.jsonify(component.to_dict())
     # Systems
     if len(self._abssystems) != 0:
         outdict['systems'] = {}
         for abs_sys in self._abssystems:
             outdict['systems'][abs_sys.name] = abs_sys.to_dict()
     # Polish
     outdict = ltu.jsonify(outdict)
     # Return
     return outdict
Exemple #4
0
 def to_dict(self):
     """ Write AbsSightline data to a dict that can be written with JSON
     """
     import datetime
     import getpass
     date = str(datetime.date.today().strftime('%Y-%b-%d'))
     user = getpass.getuser()
     # Generate the dict
     outdict = dict(RA=self.coord.ra.value, DEC=self.coord.dec.value,
                    CreationDate=date, user=user)
     outdict['class'] = self.__class__.__name__
     # Add other attributes
     all_attr = self.__dict__
     for key in all_attr:
         if key in ['coord', '_components']:
             continue
         else:
             outdict[key] = getattr(self, key)
     # Components
     outdict['components'] = {}
     for component in self._components:
         outdict['components'][component.name] = ltu.jsonify(component.to_dict())
     # Polish
     outdict = ltu.jsonify(outdict)
     # Return
     return outdict
Exemple #5
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset
    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: Quasar Spectra from the COS-Halos Survey'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
    hdf[dset]['meta'].attrs['SSA_COS'] = json.dumps(ltu.jsonify(ssa_dict))
    # HIRES
    ssa_dict = default_fields(Title, flux='normalized')
    hdf[dset]['meta'].attrs['SSA_HIRES'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #6
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset
    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: Quasar Spectra from the COS-Halos Survey'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
    hdf[dset]['meta'].attrs['SSA_COS'] = json.dumps(ltu.jsonify(ssa_dict))
    # HIRES
    ssa_dict = default_fields(Title, flux='normalized')
    hdf[dset]['meta'].attrs['SSA_HIRES'] = json.dumps(ltu.jsonify(ssa_dict))
def test_to_dict():
    # Init
    gensl = make_gensl()
    # Dict
    gensl_dict = gensl.to_dict()
    _ = ltu.jsonify(gensl_dict)
    assert gensl_dict['class'] == 'GenericAbsSightline'
Exemple #8
0
    def write_out(self):
        import json, io

        # Create dict
        out_dict = dict(
            LLS={},
            conti_model=self.conti_dict,
            conti=list(self.base_continuum.value),
            spec_file=self.spec_widg.spec.filename,
            smooth=self.smooth,
        )
        if self.zqso is not None:
            out_dict["zqso"] = self.zqso
        # Load
        for kk, lls in enumerate(self.abssys_widg.all_abssys):
            key = "{:d}".format(kk + 1)
            out_dict["LLS"][key] = {}
            out_dict["LLS"][key]["z"] = lls.zabs
            out_dict["LLS"][key]["NHI"] = lls.NHI
            out_dict["LLS"][key]["bval"] = lls.lls_lines[0].attrib["b"].value
            out_dict["LLS"][key]["comment"] = str(lls.comment).strip()
        # Write
        # QtCore.pyqtRemoveInputHook()
        # xdb.set_trace()
        # QtCore.pyqtRestoreInputHook()
        clean_dict = ltu.jsonify(out_dict)
        with io.open(self.outfil, "w", encoding="utf-8") as f:
            f.write(unicode(json.dumps(clean_dict, sort_keys=True, indent=4, separators=(",", ": "))))
        self.flag_write = True
Exemple #9
0
    def to_dict(self):
        """ Convert the system to a JSON-ready dict for output

        Returns
        -------
        cdict : dict
        """
        import datetime
        import getpass
        date = str(datetime.date.today().strftime('%Y-%b-%d'))
        user = getpass.getuser()
        # Generate the dict
        outdict = dict(Name=self.name, z=self.galaxy.z, rho=self.rho.value,
                       ang_sep=self.ang_sep.value,
                       PA=self.PA.value,
                       RA=self.galaxy.coord.ra.value,
                       DEC=self.galaxy.coord.dec.value,
                       cosmo = self.cosmo.name,
                       CreationDate=date,
                       user=user
                       )
        # IGM_SYS
        outdict['igm_sys'] = self.igm_sys.to_dict()
        # Galaxy
        outdict['galaxy'] = self.galaxy.to_dict()
        # Polish
        outdict = ltu.jsonify(outdict)
        # Return
        return outdict
Exemple #10
0
 def to_dict(self):
     """ Convert component data to a dict
     
     Returns
     -------
     cdict : dict
     """
     cdict = dict(
         Zion=self.Zion,
         zcomp=self.zcomp,
         vlim=self.vlim.to('km/s').value,
         Name=self.name,
         RA=self.coord.icrs.ra.value,
         DEC=self.coord.icrs.dec.value,
         A=self.A,
         Ej=self.Ej.to('1/cm').value,
         comment=self.comment,
         attrib=self.attrib.copy())  # Avoids changing the dict in place
     cdict['class'] = self.__class__.__name__
     # AbsLines
     cdict['lines'] = {}
     for iline in self._abslines:
         cdict['lines'][iline.wrest.value] = iline.to_dict()
     # Polish
     cdict = ltu.jsonify(cdict)
     # Return
     return cdict
Exemple #11
0
    def to_dict(self):
        """ Convert the galaxy to a JSON-ready dict for output

        Returns
        -------
        gdict : dict

        """
        import datetime
        import getpass
        date = str(datetime.date.today().strftime('%Y-%b-%d'))
        user = getpass.getuser()
        # Generate the dict
        gdict = dict(Name=self.name,
                       RA=self.coord.ra.value,
                       DEC=self.coord.dec.value,
                       CreationDate=date,
                       user=user
                       )
        # Attributes (e.g. SFR)
        for key in self.__dict__.keys():
            if key in ['coord', 'name']:
                continue
            gdict[key] = getattr(self, key)
        # Polish
        gdict = ltu.jsonify(gdict)
        # Return
        return gdict
Exemple #12
0
 def to_dict(self):
     """ Convert component data to a dict
     Returns
     -------
     cdict : dict
     """
     cdict = dict(Zion=self.Zion,
                  zcomp=self.zcomp,
                  vlim=self.vlim.to('km/s').value,
                  Name=self.name,
                  RA=self.coord.ra.value,
                  DEC=self.coord.dec.value,
                  A=self.A,
                  Ej=self.Ej.to('1/cm').value,
                  comment=self.comment,
                  flag_N=self.flag_N,
                  logN=self.logN,
                  sig_logN=self.sig_logN)
     cdict['class'] = self.__class__.__name__
     # AbsLines
     cdict['lines'] = {}
     for iline in self._abslines:
         cdict['lines'][iline.wrest.value] = iline.to_dict()
     # Polish
     cdict = ltu.jsonify(cdict)
     # Return
     return cdict
Exemple #13
0
    def to_dict(self):
        """ Generate a dict from the sub-system

        Returns
        -------
        outdict : dict
          JSON capatible

        """
        import datetime
        import getpass
        date = str(datetime.date.today().strftime('%Y-%b-%d'))
        user = getpass.getuser()
        # Generate the dict
        outdict = dict(abs_type='SubSystem',
                       Name=self.name,
                       zabs=self.zabs,
                       vlim=self.vlim.to('km/s').value,
                       lbl=self.lbl,
                       CreationDate=date,
                       user=user)
        # Components
        outdict['components'] = {}
        for component in self._components:
            outdict['components'][component.name] = component.to_dict()
        # Polish
        outdict = ltu.jsonify(outdict)
        # Return
        return outdict
Exemple #14
0
 def save(self, idx):
     # Update dict
     self.update_dict(idx)
     # Save
     cjson = ltu.jsonify(self.zdict)
     ltu.savejson(self.outfile, cjson, overwrite=True, easy_to_read=True)
     print("Wrote: {:s}".format(self.outfile))
Exemple #15
0
def meta_to_disk(in_meta):
    """ Polish up the meta dict for I/O

    Parameters
    ----------
    in_meta : dict
      list in 'headers' needs to be a series of Header objects
      or a series of dict objects

    Returns
    -------
    meta_out : str

    """
    meta = in_meta.copy()
    # Headers
    for kk,header in enumerate(in_meta['headers']):
        if header is None:
            meta['headers'][kk] = str('none')
        else:
            if isinstance(header, dict):
                meta['headers'][kk] = header.copy()
            else:
                try:
                    meta['headers'][kk] = header.tostring()
                except AttributeError:
                    if not isinstance(header, basestring):
                        raise ValueError("Bad format in header")
                    meta['headers'][kk] = header
    # Clean up the dict
    d = liu.jsonify(meta)
    return json.dumps(d)
Exemple #16
0
    def to_dict(self):
        """ Generate a dict from the sub-system

        Returns
        -------
        outdict : dict
          JSON capatible

        """
        import datetime
        import getpass
        date = str(datetime.date.today().strftime('%Y-%b-%d'))
        user = getpass.getuser()
        # Generate the dict
        outdict = dict(abs_type='SubSystem', Name=self.name, zabs=self.zabs,
                       vlim=self.vlim.to('km/s').value,
                       lbl=self.lbl,
                       CreationDate=date,
                       user=user
                       )
        # Components
        outdict['components'] = {}
        for component in self._components:
            outdict['components'][component.name] = component.to_dict()
        # Polish
        outdict = ltu.jsonify(outdict)
        # Return
        return outdict
Exemple #17
0
def meta_to_disk(in_meta):
    """ Polish up the meta dict for I/O

    Parameters
    ----------
    in_meta : dict
      list in 'headers' needs to be a series of Header objects
      or a series of dict objects

    Returns
    -------
    meta_out : str

    """
    meta = in_meta.copy()
    # Headers
    for kk, header in enumerate(in_meta['headers']):
        if header is None:
            meta['headers'][kk] = str('none')
        else:
            if isinstance(header, dict):
                meta['headers'][kk] = header.copy()
            else:
                try:
                    meta['headers'][kk] = header.tostring()
                except AttributeError:
                    if not isinstance(header, basestring):
                        raise ValueError("Bad format in header")
                    meta['headers'][kk] = header
    # Clean up the dict
    d = liu.jsonify(meta)
    return json.dumps(d)
Exemple #18
0
    def to_dict(self):
        """ Convert the system to a JSON-ready dict for output

        Returns
        -------
        cdict : dict
        """
        import datetime
        import getpass
        date = str(datetime.date.today().strftime('%Y-%b-%d'))
        user = getpass.getuser()
        # Generate the dict
        outdict = dict(Name=self.name,
                       z=self.galaxy.z,
                       rho=self.rho.value,
                       ang_sep=self.ang_sep.value,
                       PA=self.PA.value,
                       RA=self.galaxy.coord.icrs.ra.value,
                       DEC=self.galaxy.coord.icrs.dec.value,
                       cosmo=self.cosmo.name,
                       ebv=self.ebv,
                       CreationDate=date,
                       user=user)
        # IGM_SYS
        outdict['igm_sys'] = self.igm_sys.to_dict()
        # Galaxy
        outdict['galaxy'] = self.galaxy.to_dict()
        # Polish
        outdict = ltu.jsonify(outdict)
        # Return
        return outdict
Exemple #19
0
 def write_to_json(self, outfile):
     # Generate the dict
     igms_dict = self.to_dict()
     # Jsonify
     clean_dict = ltu.jsonify(igms_dict)
     # Write
     ltu.savejson(outfile, clean_dict, overwrite=True)
Exemple #20
0
 def write_out(self):
     import json, io
     # Create dict
     out_dict = dict(DLA={},
                     conti_model=self.conti_dict,
                     spec_file=self.spec_widg.spec.filename,
                     smooth=self.smooth)
     if self.zqso is not None:
         out_dict['zqso'] = self.zqso
     # Load
     for kk, dla in enumerate(self.abssys_widg.all_abssys):
         key = '{:d}'.format(kk + 1)
         out_dict['DLA'][key] = {}
         out_dict['DLA'][key]['z'] = dla.zabs
         out_dict['DLA'][key]['NHI'] = dla.NHI
         out_dict['DLA'][key]['bval'] = dla.dla_lines[0].attrib['b'].value
         out_dict['DLA'][key]['comment'] = str(dla.comment).strip()
     # Write
     #QtCore.pyqtRemoveInputHook()
     #xdb.set_trace()
     #QtCore.pyqtRestoreInputHook()
     clean_dict = ltu.jsonify(out_dict)
     with io.open(self.outfil, 'w', encoding='utf-8') as f:
         f.write(
             unicode(
                 json.dumps(clean_dict,
                            sort_keys=True,
                            indent=4,
                            separators=(',', ': '))))
     self.flag_write = True
Exemple #21
0
    def to_dict(self):
        """ Convert the galaxy to a JSON-ready dict for output

        Returns
        -------
        gdict : dict

        """
        import datetime
        import getpass
        date = str(datetime.date.today().strftime('%Y-%b-%d'))
        user = getpass.getuser()
        # Generate the dict
        gdict = dict(Name=self.name,
                     RA=self.coord.ra.value,
                     DEC=self.coord.dec.value,
                     CreationDate=date,
                     user=user)
        # Attributes (e.g. SFR)
        for key in self.__dict__.keys():
            if key in ['coord', 'name']:
                continue
            gdict[key] = getattr(self, key)
        # Polish
        gdict = ltu.jsonify(gdict)
        # Return
        return gdict
Exemple #22
0
    def to_dict(self):
        """ Convert class to dict

        Returns
        -------
        adict : dict
         dict representation of the SpectralLine
        """
        from numpy.ma.core import MaskedConstant
        from astropy.units import Quantity
        # Starting
        adict = dict(ltype=self.ltype, analy=dict(), attrib=dict(), data=dict(),\
                     limits=dict(), name=self.name, wrest=dict(value=self.wrest.value,\
                                                  unit=self.wrest.unit.to_string()))
        # Data
        for key in self.data:
            # Skip masked values
            if isinstance(self.data[key], MaskedConstant):
                continue
            # Quantity
            elif isinstance(self.data[key], Quantity):
                adict['data'][key] = dict(value=self.data[key].value,
                                          unit=self.data[key].unit.to_string())
            else:
                adict['data'][key] = self.data[key]
        # Attrib
        for key in self.attrib:
            if key == 'coord':
                adict['attrib']['RA'] = self.attrib['coord'].ra.value
                adict['attrib']['DEC'] = self.attrib['coord'].dec.value
            elif isinstance(self.attrib[key], Quantity):
                adict['attrib'][key] = dict(
                    value=self.attrib[key].value,
                    unit=self.attrib[key].unit.to_string())
            else:
                adict['attrib'][key] = self.attrib[key]
        # Analysis
        for key in self.analy:
            if key == 'spec':
                if isinstance(self.analy['spec'], basestring):
                    adict['analy']['spec_file'] = self.analy['spec']
                elif isinstance(self.analy['spec'], XSpectrum1D):
                    adict['analy']['spec_file'] = self.analy['spec'].filename
                else:
                    pass
            elif isinstance(self.analy[key], Quantity):
                adict['analy'][key] = dict(
                    value=self.analy[key].value,
                    unit=self.analy[key].unit.to_string())
            else:
                adict['analy'][key] = self.analy[key]

        # Limits
        adict['limits'] = self.limits.to_dict()

        # Polish for JSON
        adict = ltu.jsonify(adict)
        # Return
        return adict
Exemple #23
0
def save_masters(slf, det, setup):
    """ Save Master Frames
    Parameters
    ----------
    slf
    setup
    Returns
    -------
    """
    from linetools import utils as ltu
    import io, json

    # MasterFrame directory
    mdir = slf._argflag['run']['masterdir']
    # Bias
    if 'bias'+slf._argflag['masters']['setup'] not in slf._argflag['masters']['loaded']:
        if not isinstance(slf._msbias[det-1], (basestring)):
            arsave.save_master(slf, slf._msbias[det-1],
                               filename=master_name(mdir, 'bias', setup),
                               frametype='bias')
    # Bad Pixel
    if 'badpix'+slf._argflag['masters']['setup'] not in slf._argflag['masters']['loaded']:
        arsave.save_master(slf, slf._bpix[det-1],
                               filename=master_name(mdir, 'badpix', setup),
                               frametype='badpix')
    # Trace
    if 'trace'+slf._argflag['masters']['setup'] not in slf._argflag['masters']['loaded']:
        extensions = [slf._lordloc[det-1], slf._rordloc[det-1],
                      slf._pixcen[det-1], slf._pixwid[det-1],
                      slf._lordpix[det-1], slf._rordpix[det-1]]
        arsave.save_master(slf, slf._mstrace[det-1],
                           filename=master_name(mdir, 'trace', setup),
                           frametype='trace', extensions=extensions)
    # Pixel Flat
    if 'normpixflat'+slf._argflag['masters']['setup'] not in slf._argflag['masters']['loaded']:
        arsave.save_master(slf, slf._mspixflatnrm[det-1],
                           filename=master_name(mdir, 'normpixflat', setup),
                           frametype='normpixflat')
    # Arc/Wave
    if 'arc'+slf._argflag['masters']['setup'] not in slf._argflag['masters']['loaded']:
        arsave.save_master(slf, slf._msarc[det-1],
                           filename=master_name(mdir, 'arc', setup),
                           frametype='arc', keywds=dict(transp=slf._transpose))
    if 'wave'+slf._argflag['masters']['setup'] not in slf._argflag['masters']['loaded']:
        # Wavelength image
        arsave.save_master(slf, slf._mswave[det-1],
                           filename=master_name(mdir, 'wave', setup),
                           frametype='wave')
        # Wavelength fit
        gddict = ltu.jsonify(slf._wvcalib[det-1])
        json_file=master_name(mdir, 'wave_calib', setup)
        with io.open(json_file, 'w', encoding='utf-8') as f:
            f.write(unicode(json.dumps(gddict, sort_keys=True, indent=4,
                                       separators=(',', ': '))))
    if 'tilts'+slf._argflag['masters']['setup'] not in slf._argflag['masters']['loaded']:
        arsave.save_master(slf, slf._tilts[det-1],
                           filename=master_name(mdir, 'tilts', setup),
                           frametype='tilts')
Exemple #24
0
    def to_dict(self):
        """ Convert class to dict

        Returns
        -------
        adict : dict
         dict representation of the SpectralLine
        """
        from numpy.ma.core import MaskedConstant
        from astropy.units import Quantity
        # Starting
        adict = dict(ltype=self.ltype, analy=dict(), attrib=dict(), data=dict(),\
                     limits=dict(), name=self.name, wrest=dict(value=self.wrest.value,\
                                                  unit=self.wrest.unit.to_string()))
        # Data
        for key in self.data:
            # Skip masked values
            if isinstance(self.data[key], MaskedConstant):
                continue
            # Quantity
            elif isinstance(self.data[key], Quantity):
                adict['data'][key] = dict(value=self.data[key].value,
                                          unit=self.data[key].unit.to_string())
            else:
                adict['data'][key] = self.data[key]
        # Attrib
        for key in self.attrib:
            if key == 'coord':
                adict['attrib']['RA'] = self.attrib['coord'].ra.value
                adict['attrib']['DEC'] = self.attrib['coord'].dec.value
            elif isinstance(self.attrib[key], Quantity):
                adict['attrib'][key] = dict(value=self.attrib[key].value,
                                            unit=self.attrib[key].unit.to_string())
            else:
                adict['attrib'][key] = self.attrib[key]
        # Analysis
        for key in self.analy:
            if key == 'spec':
                if isinstance(self.analy['spec'], basestring):
                    adict['analy']['spec_file'] = self.analy['spec']
                elif isinstance(self.analy['spec'], XSpectrum1D):
                    adict['analy']['spec_file'] = self.analy['spec'].filename
                else:
                    pass
            elif isinstance(self.analy[key], Quantity):
                adict['analy'][key] = dict(value=self.analy[key].value,
                                            unit=self.analy[key].unit.to_string())
            else:
                adict['analy'][key] = self.analy[key]

        # Limits
        adict['limits'] = self.limits.to_dict()

        # Polish for JSON
        adict = ltu.jsonify(adict)
        # Return
        return adict
Exemple #25
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset
    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: Keck/HIRES KODIAQ DR1'.format(dset)
    ssa_dict = default_fields(Title, flux='normalized')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #26
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset
    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: SDSS DR7 Quasars'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #27
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset
    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: HST UV spectra for surveying LLS and DLAs'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #28
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset
    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: Dall''Aglio et al. (2008) compilation of VLT/UVES spectra'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #29
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset
    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: HST and FUSE spectra of AGN and Quasars by Cooksey et al. (2010)'.format(
        dset)
    ssa_dict = default_fields(Title, flux='flambda')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #30
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset
    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: Dall' 'Aglio et al. (2008) compilation of VLT/UVES spectra'.format(
        dset)
    ssa_dict = default_fields(Title, flux='flambda')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #31
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset

    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: The XQ-100 Survey of 100 z>3 quasars with VLT/XShooter'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #32
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset
    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: The Magellan uniform survey of damped Lya systems'.format(
        dset)
    ssa_dict = default_fields(Title, flux='normalized')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #33
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset

    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: The Keck/ESI Survey for high-z DLAs'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #34
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset

    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: The Keck/ESI Survey for high-z DLAs'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #35
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset

    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: Giant Gemini GMOS Survey of z>4 quasars'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #36
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset

    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: Giant Gemini GMOS Survey of z>4 quasars'.format(dset)
    ssa_dict = default_fields(Title, flux='flambda')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
Exemple #37
0
def add_ssa(hdf, dset):
    """  Add SSA info to meta dataset

    Parameters
    ----------
    hdf
    dset : str
    """
    from specdb.ssa import default_fields
    Title = '{:s}: The XQ-100 Survey of 100 z>3 quasars with VLT/XShooter'.format(
        dset)
    ssa_dict = default_fields(Title, flux='flambda')
    hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
def get_tslits_nires(flat_files,
                     user_settings=par,
                     gingashow=True,
                     tilt_root='tilt_nires'):
    """Precess flat files and get titlts for NIRES
    """

    # Process flat images
    tImage = traceimage.TraceImage(spectrograph,
                                   file_list=flat_files,
                                   par=par['calibrations']['traceframe'])

    tflat = tImage.process(bias_subtract='overscan',
                           trim=False)

    mstrace = tflat.copy()

    # Define pixlocn and bpm
    pixlocn = pixels.gen_pixloc(tImage.stack.shape)
    bpm = spectrograph.bpm(shape=tflat.shape, det=1)

    # Instantiate Trace
    tSlits = traceslits.TraceSlits(mstrace,
                                   pixlocn,
                                   par=par['calibrations']['slits'],
                                   binbpx=bpm)
    tslits_dict = tSlits.run(plate_scale = 0.123)

    if gingashow:
        # Look at what TraceSlits was actually trying to trace
        viewer, ch = ginga.show_image(tSlits.edgearr)
        # Look at the sawtooth convolved image
        viewer, ch = ginga.show_image(tSlits.siglev)

        tmp = tSlits.edgearr * 100.
        tmp[np.where(tmp == 0.)] = 1.
        ginga.show_image(tSlits.mstrace * tmp)
        ginga.show_slits(viewer,
                         ch,
                         tSlits.lcen,
                         tSlits.rcen,
                         slit_ids=np.arange(tSlits.lcen.shape[1]) + 1,
                         pstep=50)

    if tilt_root is not None:
        # Write dict on a json file
        jdict = ltu.jsonify(tslits_dict.copy())
        ltu.savejson(tilt_root + '.json', jdict, overwrite=True, indent=None, easy_to_read=True)
        print("Wrote: {:s}".format(tilt_root + '.json'))

    return tslits_dict
Exemple #39
0
    def run(self, skip_QA=False, debug=False):
        """
        Main driver for wavelength calibration

        Code flow:
          1. Extract 1D arc spectra down the center of each unmasked slit/order
          2. Load the parameters guiding wavelength calibration
          3. Generate the 1D wavelength fits
          4. Generate a mask

        Args:
            skip_QA : bool, optional

        Returns:
            dict:  wv_calib dict

        """
        ###############
        # Extract an arc down each slit
        self.arccen, self.wvc_bpm = self.extract_arcs()

        # Fill up the calibrations and generate QA
        self.wv_calib = self.build_wv_calib(self.arccen,
                                            self.par['method'],
                                            skip_QA=skip_QA)

        # Fit 2D?
        if self.par['echelle']:
            fit2d = self.echelle_2dfit(self.wv_calib,
                                       skip_QA=skip_QA,
                                       debug=debug)
            self.wv_calib.wv_fit2d = fit2d

        # Deal with mask
        self.update_wvmask()

        # Any masked during this analysis?
        wv_masked = np.where(np.invert(self.wvc_bpm_init) & self.wvc_bpm)[0]
        if len(wv_masked) > 0:
            self.slits.mask[wv_masked] = self.slits.bitmask.turn_on(
                self.slits.mask[wv_masked], 'BADWVCALIB')

        # Pack up
        sv_par = self.par.data.copy()
        j_par = ltu.jsonify(sv_par)
        self.wv_calib['strpar'] = json.dumps(
            j_par)  #, sort_keys=True, indent=4, separators=(',', ': '))

        return self.wv_calib
Exemple #40
0
def write_bg_regions(bg_region, outfile):
    """ Write background regions to a simple JSON file

    Parameters
    ----------
    bg_region : dict
    outfile : str

    Returns
    -------

    """
    jdict = ltu.jsonify(bg_region)
    # Write
    ltu.savejson(outfile, jdict, easy_to_read=True, overwrite=True)
    print("Wrote Background Regions to {:s}", outfile)
Exemple #41
0
def write_traces(obj, arc, outfile):
    """ Write a simple JSON file
    Parameters
    ----------
    obj : float
    arc : float
    outfile : str

    Returns
    -------

    """
    tdict = dict(obj=obj, arc=arc)
    jdict = ltu.jsonify(tdict)
    # Write
    ltu.savejson(outfile, jdict, easy_to_read=True, overwrite=True)
    print("Wrote Traces to {:s}", outfile)
Exemple #42
0
def igmgjson_from_complist(complist, specfile, fwhm, outfile='IGM_model.json'):
    """ Write to a JSON file of the IGMGuesses format.

        complist : list of AbsComponents
            Ditto
        specfile : str
            Name of spectrum associated to these components
        fwhm : int
            FWHM of the spectrum
        outfile : str, optional
            Name of the output json file

        """
    import json, io
    # Create dict of the components
    out_dict = dict(cmps={}, spec_file=specfile, fwhm=fwhm, bad_pixels=[])

    for kk, comp in enumerate(complist):
        key = comp.name
        out_dict['cmps'][key] = comp.to_dict()
        # import pdb; pdb.set_trace()
        out_dict['cmps'][key]['zcomp'] = comp.zcomp
        out_dict['cmps'][key]['zfit'] = comp.zcomp
        out_dict['cmps'][key]['Nfit'] = comp.logN
        out_dict['cmps'][key]['bfit'] = comp.attrib['b']
        out_dict['cmps'][key]['wrest'] = comp._abslines[0].wrest.value
        out_dict['cmps'][key]['vlim'] = list(comp.vlim.value)
        out_dict['cmps'][key]['Reliability'] = str(comp.reliability)
        out_dict['cmps'][key]['Comment'] = str(comp.comment)
        # out_dict['cmps'][key]['mask_abslines'] = comp.mask_abslines

    # JSONify
    gd_dict = ltu.jsonify(out_dict)

    # Write file
    # with io.open(outfile, 'w', encoding='utf-8') as f:
    f = open(outfile, 'w')
    f.write(
        unicode(
            json.dumps(gd_dict,
                       sort_keys=True,
                       indent=4,
                       separators=(',', ': '))))
    print('Wrote: {:s}'.format(outfile))
Exemple #43
0
def write_hdf(hdf,
              dbname,
              maindb,
              zpri,
              gdict,
              version,
              epoch=2000.,
              spaceframe='ICRS',
              **kwargs):
    """
    Parameters
    ----------
    hdf
    dbname
    maindb
    zpri
    gdict
    version : str
    epoch : float, optional
    spaceframe : str, optional

    Returns
    -------

    """
    import json
    import datetime
    # Write
    clean_table_for_hdf(maindb)
    hdf['catalog'] = maindb
    hdf['catalog'].attrs['NAME'] = str.encode(dbname)
    hdf['catalog'].attrs['EPOCH'] = epoch
    hdf['catalog'].attrs['EQUINOX'] = epoch
    hdf['catalog'].attrs['SpaceFrame'] = str.encode(spaceframe)
    hdf['catalog'].attrs['Z_PRIORITY'] = zpri
    hdf['catalog'].attrs['GROUP_DICT'] = json.dumps(ltu.jsonify(gdict))
    hdf['catalog'].attrs['CREATION_DATE'] = str.encode(
        datetime.date.today().strftime('%Y-%b-%d'))
    hdf['catalog'].attrs['VERSION'] = str.encode(version)
    # kwargs
    for key in kwargs:
        hdf['catalog'].attrs[str.encode(key)] = kwargs[key]
    # Close
    hdf.close()
Exemple #44
0
 def to_dict(self):
     """ Convert component data to a dict
     Returns
     -------
     cdict : dict
     """
     cdict = dict(Zion=self.Zion, zcomp=self.zcomp, vlim=self.vlim.to('km/s').value,
                  Name=self.name,
                  RA=self.coord.ra.value, DEC=self.coord.dec.value,
                  A=self.A, Ej=self.Ej.to('1/cm').value, comment=self.comment,
                  flag_N=self.flag_N, logN=self.logN, sig_logN=self.sig_logN)
     # AbsLines
     cdict['lines'] = {}
     for iline in self._abslines:
         cdict['lines'][iline.wrest.value] = iline.to_dict()
     # Polish
     cdict = ltu.jsonify(cdict)
     # Return
     return cdict
Exemple #45
0
def igmgjson_from_complist(complist, specfile, fwhm, outfile='IGM_model.json'):
        """ Write to a JSON file of the IGMGuesses format.

        complist : list of AbsComponents
            Ditto
        specfile : str
            Name of spectrum associated to these components
        fwhm : int
            FWHM of the spectrum
        outfile : str, optional
            Name of the output json file

        """
        import json, io
        # Create dict of the components
        out_dict = dict(cmps={},
                        spec_file=specfile,
                        fwhm=fwhm, bad_pixels=[])

        for kk, comp in enumerate(complist):
            key = comp.name
            out_dict['cmps'][key] = comp.to_dict()
            # import pdb; pdb.set_trace()
            out_dict['cmps'][key]['zcomp'] = comp.zcomp
            out_dict['cmps'][key]['zfit'] = comp.zcomp
            out_dict['cmps'][key]['Nfit'] = comp.logN
            out_dict['cmps'][key]['bfit'] = comp.attrib['b']
            out_dict['cmps'][key]['wrest'] = comp._abslines[0].wrest.value
            out_dict['cmps'][key]['vlim'] = list(comp.vlim.value)
            out_dict['cmps'][key]['Reliability'] = str(comp.reliability)
            out_dict['cmps'][key]['Comment'] = str(comp.comment)
            # out_dict['cmps'][key]['mask_abslines'] = comp.mask_abslines

        # JSONify
        gd_dict = ltu.jsonify(out_dict)

        # Write file
        # with io.open(outfile, 'w', encoding='utf-8') as f:
        f = open(outfile, 'w')
        f.write(unicode(json.dumps(gd_dict, sort_keys=True, indent=4, separators=(',', ': '))))
        print('Wrote: {:s}'.format(outfile))
Exemple #46
0
 def to_dict(self):
     """ Convert component data to a dict
     
     Returns
     -------
     cdict : dict
     """
     cdict = dict(Zion=self.Zion, zcomp=self.zcomp, vlim=self.vlim.to('km/s').value,
                  Name=self.name,
                  RA=self.coord.icrs.ra.value, DEC=self.coord.icrs.dec.value,
                  A=self.A, Ej=self.Ej.to('1/cm').value, comment=self.comment,
                  attrib=self.attrib.copy())  # Avoids changing the dict in place
     cdict['class'] = self.__class__.__name__
     # AbsLines
     cdict['lines'] = {}
     for iline in self._abslines:
         cdict['lines'][iline.wrest.value] = iline.to_dict()
     # Polish
     cdict = ltu.jsonify(cdict)
     # Return
     return cdict
Exemple #47
0
 def write_out(self):
     import json, io
     # Create dict
     out_dict = dict(LLS={},
                     conti_model=self.conti_dict,
                     conti=list(self.base_continuum.value),
                     spec_file=self.spec_widg.spec.filename,
                     smooth=self.smooth,
                     model_spec=self.model_spec)
     if hasattr(self.spec_widg.spec, 'labels'):
         out_dict['spec_label'] = self.spec_widg.spec.labels[
             self.model_spec]
     if self.zqso is not None:
         out_dict['zqso'] = self.zqso
     # Load
     for kk, lls in enumerate(self.abssys_widg.all_abssys):
         key = '{:d}'.format(kk + 1)
         out_dict['LLS'][key] = {}
         out_dict['LLS'][key]['z'] = lls.zabs
         out_dict['LLS'][key]['NHI'] = lls.NHI
         out_dict['LLS'][key]['bval'] = lls.lls_lines[0].attrib['b'].value
         out_dict['LLS'][key]['comment'] = str(lls.comment).strip()
     #QtCore.pyqtRemoveInputHook()
     #xdb.set_trace()
     #QtCore.pyqtRestoreInputHook()
     # Add coord
     if self.coord is not None:
         out_dict['RA'] = self.coord.ra.value
         out_dict['DEC'] = self.coord.dec.value
     # Clean for JSON
     clean_dict = ltu.jsonify(out_dict)
     # Write
     with io.open(self.outfil, 'w', encoding='utf-8') as f:
         f.write(
             ustr(
                 json.dumps(clean_dict,
                            sort_keys=True,
                            indent=4,
                            separators=(',', ': '))))
     self.flag_write = True
Exemple #48
0
    def to_json(self, outfile, overwrite=True):
        """ Generates a JSON file of the survey

        Parameters
        ----------
        outfil : str

        """
        survey_dict = OrderedDict()
        # Loop on systems
        for cgm_abs in self.cgm_abs:
            # Dict from copy
            cdict = cgm_abs.to_dict()
            # Use galaxy name for key;  Should be unique
            survey_dict[cgm_abs.galaxy.name + '_' +
                        cgm_abs.igm_sys.name] = cdict.copy()

        # JSON
        clean_dict = ltu.jsonify(survey_dict)
        ltu.savejson(outfile, clean_dict, overwrite=overwrite)
        print("Wrote: {:s}".format(outfile))
        print("You may now wish to compress it..")
Exemple #49
0
 def to_dict(self):
     """ Write EmSystem data to a dict that can be written with JSON
     """
     import datetime
     import getpass
     date = str(datetime.date.today().strftime('%Y-%b-%d'))
     user = getpass.getuser()
     # Generate the dict
     outdict = dict(Name=self.name, em_type=self.em_type,
                    vlim=self.vlim.to('km/s').value, zem=self.zem,
                    RA=self.coord.ra.value, DEC=self.coord.dec.value,
                    kin=self.kin, Refs=self.Refs, CreationDate=date,
                    ZH=self.ZH, sig_ZH=self.sig_ZH,
                    user=user
                    )
     outdict['class'] = self.__class__.__name__
     outdict['emlines'] = {}
     for iline in self._emlines:
         outdict['emlines'][iline.wrest.value] = iline.to_dict()
     # Polish
     outdict = ltu.jsonify(outdict)
     # Return
     return outdict
Exemple #50
0
 def write_out(self):
     import json, io
     # Create dict
     out_dict = dict(DLA={},conti_model=self.conti_dict,
         spec_file=self.spec_widg.spec.filename,smooth=self.smooth)
     if self.zqso is not None:
         out_dict['zqso'] = self.zqso
     # Load
     for kk,dla in enumerate(self.abssys_widg.all_abssys):
         key = '{:d}'.format(kk+1)
         out_dict['DLA'][key] = {}
         out_dict['DLA'][key]['z'] = dla.zabs
         out_dict['DLA'][key]['NHI'] = dla.NHI
         out_dict['DLA'][key]['bval'] = dla.dla_lines[0].attrib['b'].value
         out_dict['DLA'][key]['comment'] = str(dla.comment).strip()
     # Write
     #QtCore.pyqtRemoveInputHook()
     #xdb.set_trace()
     #QtCore.pyqtRestoreInputHook()
     clean_dict = ltu.jsonify(out_dict)
     with io.open(self.outfil, 'w', encoding='utf-8') as f:
         f.write(unicode(json.dumps(clean_dict, sort_keys=True, indent=4,
             separators=(',', ': '))))
     self.flag_write = True
Exemple #51
0
def calib_setup(sciexp, sc, det, fitsdict, calib_dict,
                write=False):
    """ Define calibration setup
    Parameters
    ----------
    sciexp
    calib_dict
    Returns
    -------
    """
    import json, io
    setup_str = [str('{:02d}'.format(i+1)) for i in range(99)]
    # Arc
    idx = sciexp._spect['arc']['index'][sc]
    disp_name = fitsdict["disperser"][idx[0]]
    disp_angle = fitsdict["cdangle"][idx[0]]
    # Common
    dichroic = fitsdict["dichroic"][idx[0]]
    decker = fitsdict["decker"][idx[0]]
    slitwid = fitsdict["slitwid"][idx[0]]
    slitlen = fitsdict["slitlen"][idx[0]]
    # Detector
    binning = fitsdict["binning"][idx[0]]
    naxis0 = fitsdict["naxis0"][idx[0]]
    naxis1 = fitsdict["naxis1"][idx[0]]

    # Generate
    # Don't nest deeper than 1
    cdict = dict(disperser={'name': disp_name,
                            'angle': disp_angle},
                 dichroic=dichroic,
                 slit={'decker': decker,
                       'slitwid': slitwid,
                       'slitlen': slitlen},
                 detector={'binning': binning,
                           'det': det,
                           'naxis0': naxis0,
                           'naxis1': naxis1},
                 )

    if len(calib_dict) == 0: # Generate
        setup = str('01')
        # Finish
        calib_dict[setup] = cdict
    else:
        # Search for a match
        setup = None
        for ckey in calib_dict.keys():
            mtch = True
            for key in calib_dict[ckey].keys():
                # Dict?
                if isinstance(calib_dict[ckey][key], dict):
                    for ikey in calib_dict[ckey][key].keys():
                        mtch &= calib_dict[ckey][key][ikey] == cdict[key][ikey]
                        #if mtch is False:
                        #    debugger.set_trace()
                else:
                    mtch &= calib_dict[ckey][key] == cdict[key]
                    #if mtch is False:
                    #    debugger.set_trace()
            if mtch:
                setup = ckey
                break
        # Augment calib_dict?
        if setup is None:
            if write is False:
                return ''
            maxs = max(calib_dict.keys())
            setup = setup_str[setup_str.index(maxs)+1]
            calib_dict[setup] = cdict

    # Write
    if write:
        gddict = ltu.jsonify(calib_dict)
        setup_file = sciexp._argflag['out']['sorted']+'.setup'
        sciexp._argflag['masters']['setup_file'] = setup_file
        with io.open(setup_file, 'w', encoding='utf-8') as f:
            f.write(unicode(json.dumps(gddict, sort_keys=True, indent=4,
                                       separators=(',', ': '))))

    return setup
Exemple #52
0
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False,
                 mk_test_file=False):
    """ Append HSTQSO data to the h5 file

    Parameters
    ----------
    hdf : hdf5 pointer
    IDs : ndarray
      int array of IGM_ID values in mainDB
    sname : str
      Survey name
    chk_meta_only : bool, optional
      Only check meta file;  will not write
    mk_test_file : bool, optional
      Generate the debug test file for Travis??

    Returns
    -------

    """
    # Add Survey
    print("Adding {:s} survey to DB".format(sname))
    hstz2_grp = hdf.create_group(sname)
    # Checks
    if sname != 'HSTQSO':
        raise IOError("Not expecting this survey..")

    # Build spectra (and parse for meta)
    nspec = len(meta)
    max_npix = 80000  # Just needs to be large enough
    data = init_data(max_npix, include_co=False)
    # Init
    spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
                                         maxshape=(None,), compression='gzip')
    spec_set.resize((nspec,))
    wvminlist = []
    wvmaxlist = []
    npixlist = []
    Rlist = []
    # Loop
    #path = os.getenv('RAW_IGMSPEC')+'/KODIAQ_data_20150421/'
    path = os.getenv('RAW_IGMSPEC')+'/HSTQSO/'
    maxpix = 0
    for jj,row in enumerate(meta):
        # Generate full file
        full_file = path+row['SPEC_FILE']+'.gz'
        # Extract
        print("HSTQSO: Reading {:s}".format(full_file))
        hduf = fits.open(full_file)
        head0 = hduf[0].header
        spec = lsio.readspec(full_file, masking='edges')
        # Parse name
        fname = full_file.split('/')[-1]
        # npix
        npix = spec.npix
        if npix > max_npix:
            raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
        else:
            maxpix = max(npix,maxpix)
        # Some fiddling about
        for key in ['wave','flux','sig']:
            data[key] = 0.  # Important to init (for compression too)
        data['flux'][0][:npix] = spec.flux.value
        data['sig'][0][:npix] = spec.sig.value
        data['wave'][0][:npix] = spec.wavelength.value
        # Meta
        if 'FOS-L' in fname:
            Rlist.append(300.)
        elif 'FOS-H' in fname:
            Rlist.append(14000.)
        elif 'STIS' in fname:
            if row['DISPERSER'] == 'G230L':
                Rlist.append(700.)
            elif row['DISPERSER'] == 'G140L':
                Rlist.append(1200.)
            else:
                raise ValueError("Bad STIS grating")
        elif 'hsla' in fname:  # COS
            Rlist.append(18000.)
            row['DATE-OBS'] = hduf[1].data['DATEOBS'][0][0]
        else:
            pdb.set_trace()
            raise ValueError("Missing instrument!")
        wvminlist.append(np.min(data['wave'][0][:npix]))
        wvmaxlist.append(np.max(data['wave'][0][:npix]))
        npixlist.append(npix)
        if chk_meta_only:
            continue
        # Only way to set the dataset correctly
        spec_set[jj] = data

    #
    print("Max pix = {:d}".format(maxpix))
    # Add columns
    meta.add_column(Column([2000.]*nspec, name='EPOCH'))
    meta.add_column(Column(npixlist, name='NPIX'))
    meta.add_column(Column(wvminlist, name='WV_MIN'))
    meta.add_column(Column(wvmaxlist, name='WV_MAX'))
    meta.add_column(Column(Rlist, name='R'))
    meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))

    # Add HDLLS meta to hdf5
    if chk_meta(meta):
        if chk_meta_only:
            pdb.set_trace()
        hdf[sname]['meta'] = meta
    else:
        raise ValueError("meta file failed")
    # References
    refs = [dict(url='http://adsabs.harvard.edu/abs/2011ApJ...736...42R',
                 bib='ribuado11'),
            dict(url='http://adsabs.harvard.edu/abs/2016ApJ...818..113N',
                         bib='neeleman16'),
            ]
    jrefs = ltu.jsonify(refs)
    hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
    #
    return
Exemple #53
0
def hdf5_adddata(hdf, IDs, sname, debug=False, chk_meta_only=False,
                 mk_test_file=False):
    """ Append HST_z2 data to the h5 file

    Parameters
    ----------
    hdf : hdf5 pointer
    IDs : ndarray
      int array of IGM_ID values in mainDB
    sname : str
      Survey name
    chk_meta_only : bool, optional
      Only check meta file;  will not write
    mk_test_file : bool, optional
      Generate the debug test file for Travis??

    Returns
    -------

    """
    # Add Survey
    print("Adding {:s} survey to DB".format(sname))
    hstz2_grp = hdf.create_group(sname)
    # Load up
    meta = grab_meta()
    bmeta = meta_for_build()
    # Checks
    if sname != 'HST_z2':
        raise IOError("Not expecting this survey..")
    if np.sum(IDs < 0) > 0:
        raise ValueError("Bad ID values")
    # Open Meta tables
    if len(bmeta) != len(IDs):
        raise ValueError("Wrong sized table..")

    # Generate ID array from RA/DEC
    c_cut = SkyCoord(ra=bmeta['RA'], dec=bmeta['DEC'], unit='deg')
    c_all = SkyCoord(ra=meta['RA'], dec=meta['DEC'], unit='deg')
    # Find new sources
    idx, d2d, d3d = match_coordinates_sky(c_all, c_cut, nthneighbor=1)
    if np.sum(d2d > 0.1*u.arcsec):
        raise ValueError("Bad matches in HST_z2")
    meta_IDs = IDs[idx]

    # Loop me to bid the full survey catalog
    meta.add_column(Column(meta_IDs, name='IGM_ID'))

    # Build spectra (and parse for meta)
    nspec = len(meta)
    max_npix = 300  # Just needs to be large enough
    data = np.ma.empty((1,),
                       dtype=[(str('wave'), 'float64', (max_npix)),
                              (str('flux'), 'float32', (max_npix)),
                              (str('sig'),  'float32', (max_npix)),
                              #(str('co'),   'float32', (max_npix)),
                             ])
    # Init
    spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
                                         maxshape=(None,), compression='gzip')
    spec_set.resize((nspec,))
    Rlist = []
    wvminlist = []
    wvmaxlist = []
    gratinglist = []
    npixlist = []
    speclist = []
    # Loop
    #path = os.getenv('RAW_IGMSPEC')+'/KODIAQ_data_20150421/'
    path = os.getenv('RAW_IGMSPEC')+'/HST_z2/'
    maxpix = 0
    for jj,row in enumerate(meta):
        # Generate full file
        if row['INSTR'] == 'ACS':
            full_file = path+row['qso']+'.fits.gz'
        elif row['INSTR'] == 'WFC3':
            coord = ltu.radec_to_coord((row['RA'],row['DEC']))
            full_file = path+'/J{:s}{:s}_wfc3.fits.gz'.format(coord.ra.to_string(unit=u.hour,sep='',precision=2,pad=True),
                                               coord.dec.to_string(sep='',pad=True,alwayssign=True,precision=1))
        # Extract
        print("HST_z2: Reading {:s}".format(full_file))
        hduf = fits.open(full_file)
        head = hduf[0].header
        spec = lsio.readspec(full_file)
        # Parse name
        fname = full_file.split('/')[-1]
        # npix
        npix = spec.npix
        if npix > max_npix:
            raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
        else:
            maxpix = max(npix,maxpix)
        # Some fiddling about
        for key in ['wave','flux','sig']:
            data[key] = 0.  # Important to init (for compression too)
        data['flux'][0][:npix] = spec.flux.value
        data['sig'][0][:npix] = spec.sig.value
        data['wave'][0][:npix] = spec.wavelength.value
        # Meta
        speclist.append(str(fname))
        wvminlist.append(np.min(data['wave'][0][:npix]))
        wvmaxlist.append(np.max(data['wave'][0][:npix]))
        npixlist.append(npix)
        if chk_meta_only:
            continue
        # Only way to set the dataset correctly
        spec_set[jj] = data

    #
    print("Max pix = {:d}".format(maxpix))
    # Add columns
    meta.add_column(Column([2000.]*nspec, name='EPOCH'))
    meta.add_column(Column(speclist, name='SPEC_FILE'))
    meta.add_column(Column(npixlist, name='NPIX'))
    meta.add_column(Column(wvminlist, name='WV_MIN'))
    meta.add_column(Column(wvmaxlist, name='WV_MAX'))
    meta.add_column(Column(np.arange(nspec,dtype=int),name='SURVEY_ID'))

    # Add HDLLS meta to hdf5
    if iiu.chk_meta(meta):
        if chk_meta_only:
            pdb.set_trace()
        hdf[sname]['meta'] = meta
    else:
        raise ValueError("meta file failed")
    # References
    refs = [dict(url='http://adsabs.harvard.edu/abs/2011ApJS..195...16O',
                 bib='omeara11')
            ]
    jrefs = ltu.jsonify(refs)
    hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
    #
    return
Exemple #54
0
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False, boss_hdf=None, **kwargs):
    """ Add BOSS data to the DB

    Parameters
    ----------
    hdf : hdf5 pointer
    IDs : ndarray
      int array of IGM_ID values in mainDB
    sname : str
      Survey name
    chk_meta_only : bool, optional
      Only check meta file;  will not write
    boss_hdf : str, optional


    Returns
    -------

    """
    # Add Survey
    print("Adding {:s} survey to DB".format(sname))
    if boss_hdf is not None:
        print("Using previously generated {:s} dataset...".format(sname))
        boss_hdf.copy(sname, hdf)
        return
    boss_grp = hdf.create_group(sname)

    # Build spectra (and parse for meta)
    nspec = len(meta)
    max_npix = 4650  # Just needs to be large enough
    data = init_data(max_npix, include_co=True)
    # Init
    spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
                                         maxshape=(None,), compression='gzip')
    spec_set.resize((nspec,))
    wvminlist = []
    wvmaxlist = []
    speclist = []
    npixlist = []
    # Loop
    maxpix = 0
    for jj,row in enumerate(meta):
        # Generate full file
        full_file = get_specfil(row)
        if full_file == 'None':
            continue
        # Read
        spec = lsio.readspec(full_file)
        # npix
        npix = spec.npix
        # Kludge for higest redshift systems
        if npix < 10:
            full_file = get_specfil(row, hiz=True)
            try:
                spec = lsio.readspec(full_file)
            except:
                print("Missing: {:s}".format(full_file))
            npix = spec.npix
        elif npix > max_npix:
            raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
        else:
            maxpix = max(npix,maxpix)
        # Parse name
        fname = full_file.split('/')[-1]
        # Fill
        for key in ['wave','flux','sig']:
            data[key] = 0.  # Important to init (for compression too)
        data['flux'][0][:npix] = spec.flux.value
        data['sig'][0][:npix] = spec.sig.value
        data['wave'][0][:npix] = spec.wavelength.value
        # GZ Continuum -- packed in with spectrum, generated by my IDL script
        try:
            co = spec.co.value
        except AttributeError:
            co = np.zeros_like(spec.flux.value)
        # KG Continuum
        KG_file = get_specfil(row, KG=True)
        if os.path.isfile(KG_file) and (npix>1):  # Latter is for junk in GZ file.  Needs fixing
            hduKG = fits.open(KG_file)
            KGtbl = hduKG[1].data
            wvKG = 10.**KGtbl['LOGLAM']
            try:
                assert (wvKG[0]-spec.wavelength[0].value) < 1e-5
            except:
                pdb.set_trace()
            gdpix = np.where(wvKG < (1+row['zem_GROUP'])*1200.)[0]
            co[gdpix] = KGtbl['CONT'][gdpix]
        data['co'][0][:npix] = co
        # Meta
        speclist.append(str(fname))
        wvminlist.append(np.min(data['wave'][0][:npix]))
        wvmaxlist.append(np.max(data['wave'][0][:npix]))
        npixlist.append(npix)
        if chk_meta_only:
            continue
        # Only way to set the dataset correctly
        spec_set[jj] = data

    #
    print("Max pix = {:d}".format(maxpix))
    # Add columns
    meta.add_column(Column(speclist, name='SPEC_FILE'))
    meta.add_column(Column(npixlist, name='NPIX'))
    meta.add_column(Column(wvminlist, name='WV_MIN'))
    meta.add_column(Column(wvmaxlist, name='WV_MAX'))
    meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
    meta.add_column(Column([2000.]*len(meta), name='EPOCH'))

    # Add HDLLS meta to hdf5
    if chk_meta(meta):
        if chk_meta_only:
            pdb.set_trace()
        hdf[sname]['meta'] = meta
    else:
        pdb.set_trace()
        raise ValueError("meta file failed")
    # References
    refs = [dict(url='http://adsabs.harvard.edu/abs/2015ApJS..219...12A',
                 bib='boss_qso_dr12'),
            ]
    jrefs = ltu.jsonify(refs)
    hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
    #
    return
Exemple #55
0
def save_masters(slf, det, setup):
    """ Save Master Frames
    Parameters
    ----------
    slf
    setup
    Returns
    -------
    """
    from linetools import utils as ltu
    import io, json

    # MasterFrame directory
    mdir = slf._argflag["run"]["masterdir"]
    # Bias
    if "bias" + slf._argflag["masters"]["setup"] not in slf._argflag["masters"]["loaded"]:
        if not isinstance(slf._msbias[det - 1], (basestring)):
            arsave.save_master(slf, slf._msbias[det - 1], filename=master_name(mdir, "bias", setup), frametype="bias")
    # Bad Pixel
    if "badpix" + slf._argflag["masters"]["setup"] not in slf._argflag["masters"]["loaded"]:
        arsave.save_master(slf, slf._bpix[det - 1], filename=master_name(mdir, "badpix", setup), frametype="badpix")
    # Trace
    if "trace" + slf._argflag["masters"]["setup"] not in slf._argflag["masters"]["loaded"]:
        extensions = [
            slf._lordloc[det - 1],
            slf._rordloc[det - 1],
            slf._pixcen[det - 1],
            slf._pixwid[det - 1],
            slf._lordpix[det - 1],
            slf._rordpix[det - 1],
        ]
        arsave.save_master(
            slf,
            slf._mstrace[det - 1],
            filename=master_name(mdir, "trace", setup),
            frametype="trace",
            extensions=extensions,
        )
    # Pixel Flat
    if "normpixflat" + slf._argflag["masters"]["setup"] not in slf._argflag["masters"]["loaded"]:
        arsave.save_master(
            slf, slf._mspixflatnrm[det - 1], filename=master_name(mdir, "normpixflat", setup), frametype="normpixflat"
        )
    # Arc/Wave
    if "arc" + slf._argflag["masters"]["setup"] not in slf._argflag["masters"]["loaded"]:
        arsave.save_master(
            slf,
            slf._msarc[det - 1],
            filename=master_name(mdir, "arc", setup),
            frametype="arc",
            keywds=dict(transp=slf._transpose),
        )
    if "wave" + slf._argflag["masters"]["setup"] not in slf._argflag["masters"]["loaded"]:
        # Wavelength image
        arsave.save_master(slf, slf._mswave[det - 1], filename=master_name(mdir, "wave", setup), frametype="wave")
        # Wavelength fit
        gddict = ltu.jsonify(slf._wvcalib[det - 1])
        json_file = master_name(mdir, "wave_calib", setup)
        with io.open(json_file, "w", encoding="utf-8") as f:
            f.write(unicode(json.dumps(gddict, sort_keys=True, indent=4, separators=(",", ": "))))
    if "tilts" + slf._argflag["masters"]["setup"] not in slf._argflag["masters"]["loaded"]:
        arsave.save_master(slf, slf._tilts[det - 1], filename=master_name(mdir, "tilts", setup), frametype="tilts")
Exemple #56
0
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False):
    """ Append XQ-100 data to the h5 file

    Parameters
    ----------
    hdf : hdf5 pointer
    IDs : ndarray
      int array of IGM_ID values in mainDB
    sname : str
      Survey name
    chk_meta_only : bool, optional
      Only check meta file;  will not write

    Returns
    -------

    """
    # Add Survey
    print("Adding {:s} survey to DB".format(sname))
    xq100_grp = hdf.create_group(sname)
    if len(meta) != 300:
        pdb.set_trace()
    # Checks
    if sname != 'XQ-100':
        raise IOError("Expecting XQ-100!!")

    # Build spectra (and parse for meta)
    nspec = len(meta)
    max_npix = 20000  # Just needs to be large enough
    data = init_data(max_npix, include_co=True)
    # Init
    spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
                                         maxshape=(None,), compression='gzip')
    spec_set.resize((nspec,))
    Rlist = []
    wvminlist = []
    wvmaxlist = []
    npixlist = []
    speclist = []
    gratinglist = []
    telelist = []
    instrlist = []
    # Loop
    maxpix = 0
    for jj,row in enumerate(meta):
        #
        print("XQ-100: Reading {:s}".format(row['SPEC_FILE']))
        spec = lsio.readspec(row['SPEC_FILE'])
        # Parse name
        fname = row['SPEC_FILE'].split('/')[-1]
        # npix
        npix = spec.npix
        if npix > max_npix:
            raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
        else:
            maxpix = max(npix,maxpix)
        # Continuum
        # Some fiddling about
        for key in ['wave','flux','sig']:
            data[key] = 0.  # Important to init (for compression too)
        data['flux'][0][:npix] = spec.flux.value
        data['sig'][0][:npix] = spec.sig.value
        data['wave'][0][:npix] = spec.wavelength.to('AA').value
        data['co'][0][:npix] = spec.co.value
        # Meta
        head = spec.header
        speclist.append(str(fname))
        wvminlist.append(np.min(data['wave'][0][:npix]))
        wvmaxlist.append(np.max(data['wave'][0][:npix]))
        telelist.append(head['TELESCOP'])
        instrlist.append(head['INSTRUME'])
        gratinglist.append(head['DISPELEM'])
        npixlist.append(npix)
        if gratinglist[-1] == 'NIR':  # From Lopez+16
            Rlist.append(4350.)
        elif gratinglist[-1] == 'VIS':
            Rlist.append(7450.)
        elif gratinglist[-1] == 'UVB':
            Rlist.append(5300.)
        else:
            pdb.set_trace()
            raise ValueError("UH OH")
        # Only way to set the dataset correctly
        if chk_meta_only:
            continue
        spec_set[jj] = data

    #
    print("Max pix = {:d}".format(maxpix))
    # Add columns
    meta.add_column(Column(gratinglist, name='DISPERSER'))
    meta.add_column(Column(telelist, name='TELESCOPE'))
    meta.add_column(Column(instrlist, name='INSTR'))
    meta.add_column(Column(npixlist, name='NPIX'))
    meta.add_column(Column(wvminlist, name='WV_MIN'))
    meta.add_column(Column(wvmaxlist, name='WV_MAX'))
    meta.add_column(Column(Rlist, name='R'))
    meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))

    # Add HDLLS meta to hdf5
    if chk_meta(meta):
        if chk_meta_only:
            pdb.set_trace()
        hdf[sname]['meta'] = meta
    else:
        raise ValueError("meta file failed")

    # References
    refs = [dict(url='http://adsabs.harvard.edu/abs/2016arXiv160708776L',
                 bib='lopez+16')]
    jrefs = ltu.jsonify(refs)
    hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
    #
    return
Exemple #57
0
    def from_dict(cls, idict, coord=None, warn_only=False, chk_data=True, **kwargs):
        """ Initialize from a dict (usually read from disk)

        Parameters
        ----------
        idict : dict
          dict with the Line parameters
        chk_data : bool, optional
          Check atomic data in dict against current values in LineList
        warn_only : bool, optional
          If the chk_data is performed and the values do not match, only
          throw a Warning as opposed to crashing

        Returns
        -------
        sline : SpectralLine
         SpectralLine of the proper type

        """
        # Init
        if idict['ltype'] == 'Abs':
            # TODO: remove this try/except eventually
            try:
                sline = AbsLine(idict['name'], **kwargs)
            except KeyError: #  This is to be compatible JSON files already written with old notation (e.g. DLA H100)
                sline = AbsLine(idict['trans'], **kwargs)
        elif idict['ltype'] == 'Em':
            sline = EmLine(idict['name'], **kwargs)
        else:
            raise ValueError("Not prepared for type {:s}.".format(idict['ltype']))
        # Check data
        if chk_data:
            #for key in idict['data']:
            for key in sline.data.keys():
                if key not in idict['data'].keys():
                    warnings.warn("Key {:s} not in your input dict".format(key))
                    continue
                if isinstance(idict['data'][key], dict):  # Assume Quantity
                    val = idict['data'][key]['value']
                else:
                    val = idict['data'][key]
                try:
                    assert sline.data[key] == val
                except AssertionError:
                    if warn_only:
                        warnings.warn("Different data value for {:s}: {}, {}".format(key,sline.data[key],val))
        # Set analy
        for key in idict['analy'].keys():
            if isinstance(idict['analy'][key], dict):  # Assume Quantity
                #sline.analy[key] = Quantity(idict['analy'][key]['value'],
                #                             unit=idict['analy'][key]['unit'])
                #pdb.set_trace()
                sline.analy[key] = ltu.convert_quantity_in_dict(idict['analy'][key])
            elif key == 'spec_file':
                # spec_file is intended to be the name of the spectrum file
                # spec is intended to hold an XSpectrum1D object
                #warnings.warn("You will need to load {:s} into analy['spec'] yourself".format(
                #        idict['analy'][key]))
                sline.analy[key] = idict['analy'][key]
            else:
                sline.analy[key] = idict['analy'][key]

        # Set attrib
        for key in idict['attrib'].keys():
            if isinstance(idict['attrib'][key], dict):
                sline.attrib[key] = ltu.convert_quantity_in_dict(idict['attrib'][key])
            elif key in ['RA','DEC']:
                if coord is None:
                    sline.attrib['coord'] = SkyCoord(ra=idict['attrib']['RA']*u.deg,
                                                  dec=idict['attrib']['DEC']*u.deg)
                else:
                    sline.attrib['coord'] = coord
            else:
                sline.attrib[key] = idict['attrib'][key]

        # Set z and limits
        if 'z' in sline.attrib.keys():  # Backwards compatability
            z = sline.attrib.pop('z')
        else:
            z = 0.
        if 'limits' in idict.keys():
            if 'wrest' not in idict['limits'].keys(): # compatibility with IGMGuesses
                # import pdb; pdb.set_trace()
                idict['limits']['wrest'] = ltu.jsonify(sline.wrest)
                idict['limits']['z'] = z
            sline.limits = zLimits.from_dict(idict['limits'])
        else:
            sline.limits = zLimits(z, [z,z], wrest=sline.wrest)
            if 'vlim' in sline.analy.keys():  # Backwards compatability
                if sline.analy['vlim'][1] > sline.analy['vlim'][0]:
                    sline.limits.set(sline.analy['vlim'])
            elif 'wvlim' in sline.analy.keys():  # Backwards compatability
                if sline.analy['wvlim'][1] > sline.analy['wvlim'][0]:
                    sline.limits.set(sline.analy['wvlim'])
        return sline
Exemple #58
0
def hdf5_adddata(hdf, IDs, sname, debug=False, chk_meta_only=False):
    """ Add SDSS data to the DB

    Parameters
    ----------
    hdf : hdf5 pointer
    IDs : ndarray
      int array of IGM_ID values in mainDB
    sname : str
      Survey name
    chk_meta_only : bool, optional
      Only check meta file;  will not write

    Returns
    -------

    """
       # Add Survey
    print("Adding {:s} survey to DB".format(sname))
    sdss_grp = hdf.create_group(sname)
    # Load up
    meta = grab_meta()
    bmeta = meta_for_build()
    # Checks
    if sname != 'SDSS_DR7':
        raise IOError("Not expecting this survey..")
    if np.sum(IDs < 0) > 0:
        raise ValueError("Bad ID values")
    # Open Meta tables
    if len(bmeta) != len(IDs):
        raise ValueError("Wrong sized table..")

    # Generate ID array from RA/DEC
    c_cut = SkyCoord(ra=bmeta['RA'], dec=bmeta['DEC'], unit='deg')
    c_all = SkyCoord(ra=meta['RA'], dec=meta['DEC'], unit='deg')
    # Find new sources
    idx, d2d, d3d = match_coordinates_sky(c_all, c_cut, nthneighbor=1)
    if np.sum(d2d > 1.2*u.arcsec):  # There is one system offset by 1.1"
        raise ValueError("Bad matches in SDSS")
    meta_IDs = IDs[idx]
    meta.add_column(Column(meta_IDs, name='IGM_ID'))

    # Add zem

    # Build spectra (and parse for meta)
    nspec = len(meta)
    max_npix = 4000  # Just needs to be large enough
    data = np.ma.empty((1,),
                       dtype=[(str('wave'), 'float64', (max_npix)),
                              (str('flux'), 'float32', (max_npix)),
                              (str('sig'),  'float32', (max_npix)),
                              #(str('co'),   'float32', (max_npix)),
                             ])
    # Init
    spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
                                         maxshape=(None,), compression='gzip')
    spec_set.resize((nspec,))
    wvminlist = []
    wvmaxlist = []
    npixlist = []
    speclist = []
    # Loop
    maxpix = 0
    for jj,row in enumerate(meta):
        full_file = get_specfil(row)
        # Extract
        print("SDSS: Reading {:s}".format(full_file))
        # Parse name
        fname = full_file.split('/')[-1]
        if debug:
            if jj > 500:
                speclist.append(str(fname))
                if not os.path.isfile(full_file):
                    raise IOError("SDSS file {:s} does not exist".format(full_file))
                wvminlist.append(np.min(data['wave'][0][:npix]))
                wvmaxlist.append(np.max(data['wave'][0][:npix]))
                npixlist.append(npix)
                continue
        # Generate full file
        spec = lsio.readspec(full_file)
        # npix
        npix = spec.npix
        if npix > max_npix:
            raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
        else:
            maxpix = max(npix,maxpix)
        # Some fiddling about
        for key in ['wave','flux','sig']:
            data[key] = 0.  # Important to init (for compression too)
        data['flux'][0][:npix] = spec.flux.value
        data['sig'][0][:npix] = spec.sig.value
        data['wave'][0][:npix] = spec.wavelength.value
        # Meta
        speclist.append(str(fname))
        wvminlist.append(np.min(data['wave'][0][:npix]))
        wvmaxlist.append(np.max(data['wave'][0][:npix]))
        npixlist.append(npix)
        # Only way to set the dataset correctly
        if chk_meta_only:
            continue
        spec_set[jj] = data

    #
    print("Max pix = {:d}".format(maxpix))
    # Add columns
    meta.add_column(Column(speclist, name='SPEC_FILE'))
    meta.add_column(Column(npixlist, name='NPIX'))
    meta.add_column(Column(wvminlist, name='WV_MIN'))
    meta.add_column(Column(wvmaxlist, name='WV_MAX'))
    meta.add_column(Column(np.arange(nspec,dtype=int),name='SURVEY_ID'))

    # Add HDLLS meta to hdf5
    if iiu.chk_meta(meta):
        if chk_meta_only:
            pdb.set_trace()
        hdf[sname]['meta'] = meta
    else:
        raise ValueError("meta file failed")
    # References
    refs = [dict(url='http://adsabs.harvard.edu/abs/2010AJ....139.2360S',
                 bib='sdss_qso_dr7'),
            ]
    jrefs = ltu.jsonify(refs)
    hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
    #
    return