示例#1
0
def main(pargs):
    """ Run
    """
    import warnings
    from IPython import embed
    from linetools import utils as ltu
    from tennis import tourney
    from tennis import io as tennis_io

    # Load seeds
    seeds = tennis_io.load_seeding(pargs.names)

    # Generate
    matches = tourney.generate_match_ups(seeds)

    # Write to JSON
    if pargs.json_file is not None:
        ltu.savejson(pargs.json_file,
                     matches,
                     overwrite=True,
                     easy_to_read=True)
        print("Wrote: {:s}".format(pargs.json_file))

    # Table
    if pargs.table_file is not None:
        mtch_tbl = tourney.table_from_matches(matches)
        mtch_tbl.write(pargs.table_file, format='csv')
示例#2
0
 def write_to_json(self, outfile):
     # Generate the dict
     igms_dict = self.to_dict()
     # Jsonify
     clean_dict = ltu.jsonify(igms_dict)
     # Write
     ltu.savejson(outfile, clean_dict, overwrite=True)
示例#3
0
 def save(self, idx):
     # Update dict
     self.update_dict(idx)
     # Save
     cjson = ltu.jsonify(self.zdict)
     ltu.savejson(self.outfile, cjson, overwrite=True, easy_to_read=True)
     print("Wrote: {:s}".format(self.outfile))
示例#4
0
def test_write_sngl():
    import io, json
    from linetools import utils as ltu
    # Class
    cos_halos = COSHalos(fits_path=data_path(''),
                         cdir=data_path(''),
                         load=False)
    # Load
    cos_halos.load_single_fits(('J0950+4831', '177_27'))
    # Write to JSON
    cdict = cos_halos.cgm_abs[0].to_dict()
    ltu.savejson('tmp.json', cdict, overwrite=True)
示例#5
0
def test_dicts():
    # Init HI Lya
    abslin = AbsLine(1215.6700*u.AA)
    adict = abslin.to_dict()
    assert isinstance(adict, dict)
    # Write
    #pdb.set_trace()
    ltu.savejson('tmp.json', adict, overwrite=True)
    # Read
    newdict = ltu.loadjson('tmp.json')
    newlin = SpectralLine.from_dict(newdict)
    assert newlin.name == 'HI 1215'
def test_dicts():
    # Init Halpha
    emisslin = EmLine(6564.613*u.AA)
    emisslin.analy['spec'] = 'tmp.fits'
    edict = emisslin.to_dict()
    assert isinstance(edict, dict)
    # Write
    ltu.savejson('tmp.json', edict, overwrite=True)
    # Read
    newdict = ltu.loadjson('tmp.json')
    newlin = SpectralLine.from_dict(newdict)
    assert newlin.name == 'Halpha'
    assert newlin.ltype == 'Em'
示例#7
0
def test_dicts():
    # Init Halpha
    emisslin = EmLine(6564.613 * u.AA)
    emisslin.analy['spec'] = 'tmp.fits'
    edict = emisslin.to_dict()
    assert isinstance(edict, dict)
    # Write
    ltu.savejson('tmp.json', edict, overwrite=True)
    # Read
    newdict = ltu.loadjson('tmp.json')
    newlin = SpectralLine.from_dict(newdict)
    assert newlin.name == 'Halpha'
    assert newlin.ltype == 'Em'
示例#8
0
def test_to_dict():
    from linetools import utils as ltu
    radec = (125 * u.deg, 45.2 * u.deg)
    gal = Galaxy(radec, z=0.3)
    radec_qso = (125 * u.deg, 45.203 * u.deg)
    igmsys = IGMSystem(radec_qso,
                       gal.z, [-500, 500] * u.km / u.s,
                       abs_type='CGM')
    # Instantiate
    cgmabs = CGMAbsSys(gal, igmsys)
    # Test
    cdict = cgmabs.to_dict()
    ltu.savejson('tmp.json', cdict, overwrite=True)
def get_tslits_nires(flat_files,
                     user_settings=par,
                     gingashow=True,
                     tilt_root='tilt_nires'):
    """Precess flat files and get titlts for NIRES
    """

    # Process flat images
    tImage = traceimage.TraceImage(spectrograph,
                                   file_list=flat_files,
                                   par=par['calibrations']['traceframe'])

    tflat = tImage.process(bias_subtract='overscan',
                           trim=False)

    mstrace = tflat.copy()

    # Define pixlocn and bpm
    pixlocn = pixels.gen_pixloc(tImage.stack.shape)
    bpm = spectrograph.bpm(shape=tflat.shape, det=1)

    # Instantiate Trace
    tSlits = traceslits.TraceSlits(mstrace,
                                   pixlocn,
                                   par=par['calibrations']['slits'],
                                   binbpx=bpm)
    tslits_dict = tSlits.run(plate_scale = 0.123)

    if gingashow:
        # Look at what TraceSlits was actually trying to trace
        viewer, ch = ginga.show_image(tSlits.edgearr)
        # Look at the sawtooth convolved image
        viewer, ch = ginga.show_image(tSlits.siglev)

        tmp = tSlits.edgearr * 100.
        tmp[np.where(tmp == 0.)] = 1.
        ginga.show_image(tSlits.mstrace * tmp)
        ginga.show_slits(viewer,
                         ch,
                         tSlits.lcen,
                         tSlits.rcen,
                         slit_ids=np.arange(tSlits.lcen.shape[1]) + 1,
                         pstep=50)

    if tilt_root is not None:
        # Write dict on a json file
        jdict = ltu.jsonify(tslits_dict.copy())
        ltu.savejson(tilt_root + '.json', jdict, overwrite=True, indent=None, easy_to_read=True)
        print("Wrote: {:s}".format(tilt_root + '.json'))

    return tslits_dict
示例#10
0
    def write_json(self, outfil=None):
        """ Generate a JSON file from the system

        Returns
        -------

        """
        # Generate the dict
        odict = self.to_dict()
        # Write
        if outfil is None:
            outfil = self.name+'.json'
        ltu.savejson(outfil, odict, overwrite=True, easy_to_read=True)
        # Finish
        print("Wrote {:s} system to {:s} file".format(self.name, outfil))
示例#11
0
    def write_json(self, outfil=None):
        """ Generate a JSON file from the system

        Returns
        -------

        """
        # Generate the dict
        odict = self.to_dict()
        # Write
        if outfil is None:
            outfil = self.name+'.json'
        ltu.savejson(outfil, odict, overwrite=True, easy_to_read=True)
        # Finish
        print("Wrote {:s} system to {:s} file".format(self.name, outfil))
示例#12
0
文件: io.py 项目: PYPIT/COS_REDUX
def write_bg_regions(bg_region, outfile):
    """ Write background regions to a simple JSON file

    Parameters
    ----------
    bg_region : dict
    outfile : str

    Returns
    -------

    """
    jdict = ltu.jsonify(bg_region)
    # Write
    ltu.savejson(outfile, jdict, easy_to_read=True, overwrite=True)
    print("Wrote Background Regions to {:s}", outfile)
示例#13
0
    def write(self, outfile, overwrite=True):
        """ Write to a JSON file

        Parameters
        ----------
        outfile : str
        overwrite : bool, optional

        Returns
        -------

        """
        # Generate the dict
        cdict = self.to_dict()
        # Write
        ltu.savejson(outfile, cdict, overwrite=overwrite, easy_to_read=True)
        print("Wrote AbsComponent to {:s}".format(outfile))
示例#14
0
    def write(self, outfile, overwrite=True):
        """ Write to a JSON file

        Parameters
        ----------
        outfile : str
        overwrite : bool, optional

        Returns
        -------

        """
        # Generate the dict
        cdict = self.to_dict()
        # Write
        ltu.savejson(outfile, cdict, overwrite=overwrite, easy_to_read=True)
        print("Wrote AbsComponent to {:s}".format(outfile))
示例#15
0
文件: io.py 项目: PYPIT/COS_REDUX
def write_traces(obj, arc, outfile):
    """ Write a simple JSON file
    Parameters
    ----------
    obj : float
    arc : float
    outfile : str

    Returns
    -------

    """
    tdict = dict(obj=obj, arc=arc)
    jdict = ltu.jsonify(tdict)
    # Write
    ltu.savejson(outfile, jdict, easy_to_read=True, overwrite=True)
    print("Wrote Traces to {:s}", outfile)
示例#16
0
    def write_json(self, outfil=None, overwrite=True):
        """ Generate a JSON file from the system

        Parameters
        ----------
        outfil : str, optional
          Output filename;  generated from system name if not provided
        overwrite : bool, optional
          Overwrite?
        """
        # Generate the dict
        odict = self.to_dict()
        # Write
        if outfil is None:
            outfil = self.name+'.json'
        ltu.savejson(outfil, odict, overwrite=overwrite, easy_to_read=True)
        # Finish
        print("Wrote {:s} system to {:s} file".format(self.name, outfil))
示例#17
0
def test_dicts():
    # Init HI Lya
    abslin = AbsLine(1215.6700*u.AA)
    abslin.analy['spec'] = 'tmp.fits'
    adict = abslin.to_dict()
    assert isinstance(adict, dict)
    # Write
    #pdb.set_trace()
    ltu.savejson('tmp.json', adict, overwrite=True)
    # Read
    newdict = ltu.loadjson('tmp.json')
    newlin = SpectralLine.from_dict(newdict)
    assert newlin.name == 'HI 1215'
    # Old dict for compatability
    newdict.pop('limits')
    newdict['analy']['vlim'] = [-150,150]*u.km/u.s
    newdict['attrib']['z'] = 0.5
    tmp3 = SpectralLine.from_dict(newdict)
    assert newlin.name == 'HI 1215'
示例#18
0
def test_dicts():
    # Init HI Lya
    abslin = AbsLine(1215.6700 * u.AA)
    abslin.analy['spec'] = 'tmp.fits'
    adict = abslin.to_dict()
    assert isinstance(adict, dict)
    # Write
    #pdb.set_trace()
    ltu.savejson('tmp.json', adict, overwrite=True)
    # Read
    newdict = ltu.loadjson('tmp.json')
    newlin = SpectralLine.from_dict(newdict)
    assert newlin.name == 'HI 1215'
    # Old dict for compatability
    newdict.pop('limits')
    newdict['analy']['vlim'] = [-150, 150] * u.km / u.s
    newdict['attrib']['z'] = 0.5
    tmp3 = SpectralLine.from_dict(newdict)
    assert newlin.name == 'HI 1215'
示例#19
0
    def to_json(self, outfile, overwrite=True):
        """ Generates a JSON file of the survey

        Parameters
        ----------
        outfil : str

        """
        survey_dict = OrderedDict()
        # Loop on systems
        for cgm_abs in self.cgm_abs:
            # Dict from copy
            cdict = cgm_abs.to_dict()
            # Use galaxy name for key;  Should be unique
            survey_dict[cgm_abs.galaxy.name + '_' +
                        cgm_abs.igm_sys.name] = cdict.copy()

        # JSON
        clean_dict = ltu.jsonify(survey_dict)
        ltu.savejson(outfile, clean_dict, overwrite=overwrite)
        print("Wrote: {:s}".format(outfile))
        print("You may now wish to compress it..")
示例#20
0
def update_dla_fits(new_fits):
    import datetime
    import getpass
    # Load existing
    dla_fits, fit_file = load_dla_fits()

    # Write fit
    date = str(datetime.date.today().strftime('%Y-%b-%d'))
    user = getpass.getuser()
    #
    for key in new_fits:
        # Add
        if key not in dla_fits.keys():
            dla_fits[key] = {}
        for subkey in new_fits[key]:
            dla_fits[key][subkey] = new_fits[key][subkey]
            dla_fits[key][subkey]['CreationDate'] = date
            dla_fits[key][subkey]['User'] = user
    # Write
    pdb.set_trace()
    jdfits = ltu.jsonify(dla_fits)
    ltu.savejson(fit_file, jdfits, easy_to_read=True, overwrite=True)
    print("Wrote: {:s}".format(fit_file))
示例#21
0
def test_save_load_json():
    tmp_dict = dict(a=1, b=2, c='adsf')
    # Write
    ltu.savejson('tmp.json', tmp_dict, overwrite=True)
    # Load
    new_dict = ltu.loadjson('tmp.json')
    assert new_dict['a'] == 1
    # Write with gzip
    ltu.savejson('tmp.json.gz', tmp_dict, overwrite=True)
    # Load
    new_dict = ltu.loadjson('tmp.json.gz')
    assert new_dict['a'] == 1
    # Write with easy to read
    ltu.savejson('tmp2.json', tmp_dict, overwrite=True, easy_to_read=True)
    new_dict2 = ltu.loadjson('tmp2.json')
    assert new_dict2['a'] == 1
示例#22
0
def test_save_load_json():
    tmp_dict = dict(a=1, b=2, c='adsf')
    # Write
    ltu.savejson('tmp.json', tmp_dict, overwrite=True)
    # Load
    new_dict = ltu.loadjson('tmp.json')
    assert new_dict['a'] == 1
    # Write with gzip
    ltu.savejson('tmp.json.gz', tmp_dict, overwrite=True)
    # Load
    new_dict = ltu.loadjson('tmp.json.gz')
    assert new_dict['a'] == 1
    # Write with easy to read
    ltu.savejson('tmp2.json', tmp_dict, overwrite=True, easy_to_read=True)
    new_dict2 = ltu.loadjson('tmp2.json')
    assert new_dict2['a'] == 1
示例#23
0
    def write_to_igmguesses(self,
                            outfile,
                            fwhm=3.,
                            specfilename=None,
                            creator=None,
                            instrument='unknown',
                            altname='unknown',
                            date=None,
                            overwrite=False):
        import json
        """
        Writes an IGMGuesses formatted JSON file

        Parameters
        ----------
        outfile : str
            Name of the IGMGuesses JSON file to write to.
        fwhm : int
            FWHM for IGMguesses
        specfilename : str
            Name of the spectrum file these guesses are associated to.
        altname : str
            Alternative name for the sightline. e.g. 3C273
        creator : str
            Name of the person who is creating the file. Important for tracking.
        instrument : str
            String indicating the instrument and its configuration associated to the specfilename.
            e.g. HST/COS/G130M+G160M/LP2
        overwrite : bool
            Wthether to overwrite output


        Returns
        -------

        """
        import datetime
        # Slurp IGMGuesses component attributes
        from pyigm.guis.igmguesses import comp_init_attrib
        from linetools.isgm.abscomponent import AbsComponent
        tmp = AbsComponent((10.0 * u.deg, 45 * u.deg), (14, 2), 1.0,
                           [-300, 300] * u.km / u.s)
        comp_init_attrib(tmp)
        igmg_akeys = list(tmp.attrib.keys())
        # components
        comp_list = self._components
        coord_ref = comp_list[0].coord
        if date is None:
            date = str(datetime.date.today().strftime('%Y-%b-%d'))

        # spec_file, meta
        if hasattr(self, 'igmg_dict'):
            spec_file = self.igmg_dict['spec_file']
            meta = self.igmg_dict['meta']
            # Updates
            meta['Date'] = date
            if creator is not None:
                meta['Creator'] = creator
            #
            fwhm = self.igmg_dict['fwhm']
        else:
            spec_file = specfilename
            # coordinates and meta
            RA = coord_ref.ra.to('deg').value
            DEC = coord_ref.dec.to('deg').value
            jname = ltu.name_from_coord(coord_ref, precision=(2, 1))
            if self.zem is None:
                zem = 0.  # IGMGuesses rules
            else:
                zem = self.zem
            if creator is None:
                creator = 'unknown'
            meta = {
                'RA': RA,
                'DEC': DEC,
                'ALTNAME': altname,
                'zem': zem,
                'Creator': creator,
                'Instrument': instrument,
                'Date': date,
                'JNAME': jname
            }

        # Create dict of the components
        out_dict = dict(cmps={},
                        spec_file=spec_file,
                        fwhm=fwhm,
                        bad_pixels=[],
                        meta=meta)

        for comp in comp_list:
            key = comp.name
            out_dict['cmps'][key] = comp.to_dict()
            # import pdb; pdb.set_trace()
            # check coordinate
            if comp.coord != coord_ref:
                raise ValueError(
                    "All AbsComponent objects must have the same coordinates!")
            out_dict['cmps'][key]['zcomp'] = comp.zcomp
            # IGMGuesses specific component attr
            for igm_key in [
                    'zfit', 'Nfit', 'bfit', 'wrest', 'mask_abslines', 'vlim'
            ]:
                out_dict['cmps'][key][igm_key] = comp.igmg_attrib['top_level'][
                    igm_key]
            # IGMGuesses attribute dict
            out_dict['cmps'][key]['attrib'] = {}
            for igm_key in igmg_akeys:
                try:
                    out_dict['cmps'][key]['attrib'][
                        igm_key] = comp.igmg_attrib[igm_key]
                except KeyError:
                    out_dict['cmps'][key]['attrib'][igm_key] = comp.attrib[
                        igm_key]
            #out_dict['cmps'][key]['vlim'] = list(comp.vlim.value)
            out_dict['cmps'][key]['reliability'] = str(comp.reliability)
            out_dict['cmps'][key]['comment'] = str(comp.comment)
            # Compatability on sig_logN
            out_dict['cmps'][key]['attrib']['sig_logN'] = comp.attrib[
                'sig_logN'][0]

        # JSONify
        gd_dict = ltu.jsonify(out_dict)

        # Write file
        ltu.savejson(outfile,
                     gd_dict,
                     overwrite=overwrite,
                     sort_keys=True,
                     indent=4,
                     separators=(',', ': '))
        print('Wrote: {:s}'.format(outfile))
示例#24
0
def chk_pn_dla_to_ml(ml_dlasurvey=None, ml_llssurvey=None, dz_toler=0.015, outfile='vette_dr7_pn.json'):
    """ Compare results of Noterdaeme to ML
    Save to JSON file
    """
    # Load ML
    if (ml_dlasurvey is None) or (ml_llssurvey is None):
        ml_llssurvey, ml_dlasurvey = load_ml_dr7()
    # Load PN
    pn_dr7_file = '../Analysis/noterdaeme_dr7.fits'
    pn_dr7 = Table.read(pn_dr7_file)

    # Use coord to efficiently deal with sightlines
    ml_coord = SkyCoord(ra=ml_dlasurvey.sightlines['RA'], dec=ml_dlasurvey.sightlines['DEC'], unit='deg')
    pn_coord = SkyCoord(ra=pn_dr7['_RA'], dec=pn_dr7['_DE'], unit='deg')
    idx, d2d, d3d = match_coordinates_sky(pn_coord, ml_coord, nthneighbor=1)
    in_ml = d2d < 2*u.arcsec
    print("{:d} of the PN sightlines were covered by ML out of {:d}".format(np.sum(in_ml), len(pn_dr7)))

    # Cut
    cut_pn = pn_dr7[in_ml]

    # Loop on PN DLAs and save indices of the matches
    pn_ml_idx = np.zeros(len(cut_pn)).astype(int) - 1
    for ii,pnrow in enumerate(cut_pn):
        if pnrow['logN_HI_'] >= 20.3:
            dla_mts = np.where((ml_dlasurvey.plate == pnrow['Plate']) & (ml_dlasurvey.fiber == pnrow['Fiber']))[0]
            nmt = len(dla_mts)
            if nmt == 0:  # No match
                # Check for LLS
                lls_mts = np.where((ml_llssurvey.plate == pnrow['Plate']) & (ml_llssurvey.fiber == pnrow['Fiber']))[0]
                nmt2 = len(lls_mts)
                if nmt2 == 0:  # No match
                    pass
                else:
                    zML = ml_llssurvey.zabs[lls_mts] # Redshifts of all DLAs on the sightline in ML
                    zdiff = np.abs(pnrow['zabs']-zML)
                    if np.min(zdiff) < dz_toler:
                        pn_ml_idx[ii] = -9  # SLLS match
            else:
                zML = ml_dlasurvey.zabs[dla_mts] # Redshifts of all DLAs on the sightline in ML
                zdiff = np.abs(pnrow['zabs']-zML)
                if np.min(zdiff) < dz_toler:
                    #print("Match on {:d}!".format(ii))
                    # Match
                    imin = np.argmin(zdiff)
                    pn_ml_idx[ii] = dla_mts[imin]
        else:
            pn_ml_idx[ii] = -99  # Not a PN DLA
    # Stats on matches
    '''
    gdm = pn_ml_idx >= 0
    pdb.set_trace()
    dz = cut_pn['zabs'][gdm]-ml_dlasurvey.zabs[pn_ml_idx[gdm]]
    dNHI = cut_pn['logN_HI_'][gdm]-ml_dlasurvey.NHI[pn_ml_idx[gdm]]
    plt.clf()
    #plt.hist(dz)
    plt.hist(dNHI)
    plt.show()
    '''
    # PN not matched by ML?
    misses = (pn_ml_idx == -1)
    pn_missed = cut_pn[misses]
    # Write high NHI systems to disk
    high_NHI = pn_missed['logN_HI_'] > 20.8
    pn_missed[['QSO','Plate','Fiber', 'zem', 'zabs', 'Flag', 'logN_HI_']][high_NHI].write("N09_missed_highNHI.ascii", format='ascii.fixed_width', overwrite=True)

    # ML not matched by PN?
    ml_dla_coords = ml_dlasurvey.coords
    idx2, d2d2, d3d = match_coordinates_sky(ml_dla_coords, pn_coord, nthneighbor=1)
    not_in_pn = d2d2 > 2*u.arcsec  # This doesn't check zabs!!

    tmp_tbl = Table()
    for key in ['plate', 'fiber', 'zabs', 'NHI', 'confidence']:
        tmp_tbl[key] = getattr(ml_dlasurvey, key)

    # Save
    out_dict = {}
    out_dict['in_ml'] = in_ml
    out_dict['pn_idx'] = pn_ml_idx  # -1 are misses, -99 are not DLAs in PN
    out_dict['not_in_pn'] = np.where(not_in_pn)[0]
    ltu.savejson(outfile, ltu.jsonify(out_dict), overwrite=True)
    print("Wrote: {:s}".format(outfile))
# Line list
CuI = waveio.load_line_list('CuI', use_ion=True, NIST=True)
ArI = waveio.load_line_list('ArI', use_ion=True, NIST=True)
ArII = waveio.load_line_list('ArII', use_ion=True, NIST=True)
llist = vstack([CuI, ArI, ArII])
arcparam['llist'] = llist

# Simple calibration
final_fit = arc.simple_calib(dummy,
                             arcparam,
                             spec,
                             IDpixels=IDpixels,
                             IDwaves=IDwaves,
                             nfitpix=9)  #, sigdetect=5.) #sigdetect=7.)
arc.arc_fit_qa(None, final_fit, None, outfile='GMOS_R400_wave.png')
jdict = ltu.jsonify(final_fit)
ltu.savejson(outfile, jdict, overwrite=True)
pdb.set_trace()

# Arc fitter
lines = ['CuI', 'ArI', 'ArII']
min_ampl = 1000.
arcfitter = autoid.General(spec.reshape((spec.size, 1)),
                           lines,
                           min_ampl=min_ampl,
                           rms_threshold=0.2,
                           nonlinear_counts=80000.)

pdb.set_trace()
示例#26
0
def score_ml_test(dz_toler=0.015,
                  outfile='vette_10k.json',
                  test_file='data/test_dlas_96629_10000.json.gz',
                  pred_file='data/test_dlas_96629_predictions.json.gz'):
    # Load Test
    test_dlas = test_to_tbl(test_file)
    ntest = len(test_dlas)
    # Load ML
    ml_abs = pred_to_tbl(pred_file)

    # Loop on test DLAs and save indices of the matches
    test_ml_idx = np.zeros(ntest).astype(int) - 99999
    for ii in range(ntest):
        # Match to ML sl
        in_sl = np.where(ml_abs['ids'] == test_dlas['ids'][ii])[0]
        dla_mts = np.where(
            np.abs(ml_abs['zabs'][in_sl] -
                   test_dlas['zabs'][ii]) < dz_toler)[0]
        nmt = len(dla_mts)
        if nmt == 0:  # No match within dz
            pass
        elif nmt == 1:  # No match
            if ml_abs['NHI'][in_sl][dla_mts[0]] > 20.2999:
                test_ml_idx[ii] = in_sl[dla_mts[0]]
            else:
                test_ml_idx[ii] = -1 * in_sl[dla_mts[0]]
        else:  # Very rarely the ML identifies two DLAs in the window
            print("Double hit in test DLA {:d}".format(ii))
            imin = np.argmin(
                np.abs(ml_abs['zabs'][in_sl] - test_dlas['zabs'][ii]))
            test_ml_idx[ii] = in_sl[imin]

    match = test_ml_idx >= 0
    print("There were {:d} DLAs recovered out of {:d}".format(
        np.sum(match), ntest))

    # Write out misses
    misses = np.where(test_ml_idx == -99999)[0]
    print("There were {:d} DLAs missed altogether".format(len(misses)))
    mtbl = Table()
    for key in ['sl', 'NHI', 'zabs']:
        mtbl[key] = test_dlas[key][misses]
    mtbl.write('test_misses.ascii', format='ascii.fixed_width', overwrite=True)

    # Write out SLLS
    sllss = np.where((test_ml_idx < 0) & (test_ml_idx != -99999))[0]
    print("There were {:d} DLAs recovered as SLLS".format(len(sllss)))
    stbl = Table()
    for key in ['sl', 'NHI', 'zabs']:
        stbl[key] = test_dlas[key][sllss]
    mtbl.write('test_slls.ascii', format='ascii.fixed_width', overwrite=True)

    # Save
    out_dict = {}
    out_dict[
        'test_idx'] = test_ml_idx  # -1 are misses, -99 are not DLAs in PN, -9 are SLLS
    ltu.savejson(outfile, ltu.jsonify(out_dict), overwrite=True)

    # Stats on dz
    dz = ml_abs['zabs'][test_ml_idx[match]] - test_dlas['zabs'][match]
    print("Median dz = {} and sigma(dz)= {}".format(np.median(dz), np.std(dz)))
示例#27
0
def make_set(ntrain,
             slines,
             outroot=None,
             tol=1 * u.arcsec,
             igmsp_survey='SDSS_DR7',
             frac_without=0.,
             seed=1234,
             zmin=None,
             zmax=4.5,
             high=False,
             slls=False,
             mix=False,
             low_s2n=False):
    """ Generate a training set

    Parameters
    ----------
    ntrain : int
      Number of training sightlines to generate
    slines : Table
      Table of sightlines without DLAs (usually from SDSS or BOSS)
    igmsp_survey : str, optional
      Dataset name for spectra
    frac_without : float, optional
      Fraction of sightlines (on average) without a DLA
    seed : int, optional
    outroot : str, optional
      Root for output filenames
        root+'.fits' for spectra
        root+'.json' for DLA info
    zmin : float, optional
      Minimum redshift for training; defaults to min(slines['ZEM'])
    zmax : float, optional
      Maximum redshift to train on
    mix : bool, optional
      Mix of SLLS and DLAs
    low_s2n : bool, optional
      Reduce the S/N artificially, i.e. add noise

    Returns
    -------

    """
    from linetools.spectra.utils import collate

    # Init and checks
    igmsp = IgmSpec()
    assert igmsp_survey in igmsp.groups
    rstate = np.random.RandomState(seed)
    rfrac = rstate.random_sample(ntrain)
    if zmin is None:
        zmin = np.min(slines['ZEM'])
    rzem = zmin + rstate.random_sample(ntrain) * (zmax - zmin)
    fNHI = init_fNHI(slls=slls, mix=mix, high=high)

    all_spec = []
    full_dict = {}
    # Begin looping
    for qq in range(ntrain):
        print("qq = {:d}".format(qq))
        full_dict[qq] = {}
        # Grab sightline
        isl = np.argmin(np.abs(slines['ZEM'] - rzem[qq]))
        full_dict[qq]['sl'] = isl  # sightline
        specl, meta = igmsp.spectra_from_coord(
            (slines['RA'][isl], slines['DEC'][isl]),
            groups=['SDSS_DR7'],
            tol=tol,
            verbose=False)
        assert len(meta) == 1
        spec = specl
        # Meta data for header
        mdict = {}
        for key in meta.keys():
            mdict[key] = meta[key][0]
        mhead = Header(mdict)
        # Clear?
        if rfrac[qq] < frac_without:
            spec.meta['headers'][0] = mdict.copy()  #mhead
            all_spec.append(spec)
            full_dict[qq]['nDLA'] = 0
            continue
        # Insert at least one DLA
        spec, dlas = insert_dlas(spec,
                                 mhead['zem_GROUP'],
                                 rstate=rstate,
                                 fNHI=fNHI,
                                 slls=slls,
                                 mix=mix,
                                 high=high,
                                 low_s2n=low_s2n)
        spec.meta['headers'][0] = mdict.copy()  #mhead
        all_spec.append(spec)
        full_dict[qq]['nDLA'] = len(dlas)
        for kk, dla in enumerate(dlas):
            full_dict[qq][kk] = {}
            full_dict[qq][kk]['NHI'] = dla.NHI
            full_dict[qq][kk]['zabs'] = dla.zabs

    # Generate one object
    final_spec = collate(all_spec)
    # Write?
    if outroot is not None:
        # Spectra
        final_spec.write_to_hdf5(outroot + '.hdf5')
        # Dict -> JSON
        gdict = ltu.jsonify(full_dict)
        ltu.savejson(outroot + '.json', gdict,
                     overwrite=True)  #, easy_to_read=True)
    # Return
    return final_spec, full_dict
示例#28
0
def score_ml_test(
        dz_toler=0.015,
        outfile='vette_mix.json',
        test_file='data/mix_test_23559_10000.json',
        pred_file='../Analysis/visuals_mix/mixed_predictions.json.gz'):
    """
    Parameters
    ----------
    dz_toler
    outfile
    test_file
    pred_file

    Returns
    -------
    writes

    """
    # Load Test
    mix_abs = test_to_tbl(test_file)
    mix_dlas = mix_abs[mix_abs['NHI'] >= 20.3]
    ntest = len(mix_dlas)
    mix_slls = mix_abs[mix_abs['NHI'] < 20.3]
    n_slls = len(mix_slls)
    # Load ML
    ml_abs = pred_to_tbl(pred_file)
    ml_dlas = ml_abs['NHI'] >= 20.3
    ml_slls = ml_abs['NHI'] < 20.3

    # Loop on test DLAs and save indices of the matches
    test_ml_idx = np.zeros(ntest).astype(int) - 99999
    for ii in range(ntest):
        # Match to ML sl
        in_sl = np.where(ml_abs['ids'] == mix_dlas['ids'][ii])[0]
        dla_mts = np.where(
            np.abs(ml_abs['zabs'][in_sl] - mix_dlas['zabs'][ii]) < dz_toler)[0]
        nmt = len(dla_mts)
        if nmt == 0:  # No match within dz
            pass
        elif nmt == 1:  # One match
            if ml_abs['NHI'][in_sl][dla_mts[0]] > 20.2999:
                test_ml_idx[ii] = in_sl[dla_mts[0]]
            else:
                test_ml_idx[ii] = -1 * in_sl[dla_mts[0]]
        else:  # Very rarely the ML identifies two DLAs in the window
            print("Double hit in mix DLA {:d}".format(ii))
            imin = np.argmin(
                np.abs(ml_abs['zabs'][in_sl] - mix_dlas['zabs'][ii]))
            test_ml_idx[ii] = in_sl[imin]

    match = test_ml_idx >= 0
    print("There were {:d} DLAs discovered by the CNN".format(np.sum(ml_dlas)))
    print("There were {:d} DLAs recovered out of {:d}".format(
        np.sum(match), ntest))
    print("There were {:d} false positive DLAs".format(
        np.sum(ml_dlas) - np.sum(match)))

    # Write out misses
    misses = np.where(test_ml_idx == -99999)[0]
    print("There were {:d} DLAs missed altogether (false negatives)".format(
        len(misses)))
    mtbl = Table()
    for key in ['sl', 'NHI', 'zabs']:
        mtbl[key] = mix_dlas[key][misses]
    mtbl.write('mix_misses.ascii', format='ascii.fixed_width', overwrite=True)

    # Write out SLLS
    sllss = np.where((test_ml_idx < 0) & (test_ml_idx != -99999))[0]
    print("There were {:d} DLAs recovered as SLLS".format(len(sllss)))
    stbl = Table()
    for key in ['sl', 'NHI', 'zabs']:
        stbl[key] = mix_dlas[key][sllss]
    mtbl.write('mix_slls.ascii', format='ascii.fixed_width', overwrite=True)

    # Loop on test SLLS and save indices of the matches
    test_ml_slls_idx = np.zeros(n_slls).astype(int) - 99999
    for ii in range(n_slls):
        # Match to ML sl
        in_sl = np.where(ml_abs['ids'] == mix_slls['ids'][ii])[0]
        slls_mts = np.where(
            np.abs(ml_abs['zabs'][in_sl] - mix_slls['zabs'][ii]) < dz_toler)[0]
        nmt = len(slls_mts)
        if nmt == 0:  # No match within dz
            pass
        elif nmt == 1:  # One match
            if ml_abs['NHI'][in_sl][slls_mts[0]] > 20.2999:
                test_ml_slls_idx[ii] = -1 * in_sl[slls_mts[0]]
            else:
                test_ml_slls_idx[ii] = in_sl[slls_mts[0]]
        else:  # Very rarely the ML identifies two DLAs in the windo
            print("Double hit in mix DLA {:d}".format(ii))
            imin = np.argmin(
                np.abs(ml_abs['zabs'][in_sl] - mix_slls['zabs'][ii]))
            test_ml_slls_idx[ii] = in_sl[imin]

    slls_match = test_ml_slls_idx >= 0
    print("There were {:d} SLLS discovered by the CNN".format(np.sum(ml_slls)))
    print("There were {:d} SLLS recovered out of {:d}".format(
        np.sum(slls_match), n_slls))
    print("There were {:d} false positive SLLS".format(
        np.sum(ml_slls) - np.sum(slls_match)))

    # Save
    out_dict = {}
    out_dict[
        'test_idx'] = test_ml_idx  # -1 are misses, -99 are not DLAs in PN, -9 are SLLS
    ltu.savejson(outfile, ltu.jsonify(out_dict), overwrite=True)

    # Stats on dz
    dz = ml_abs['zabs'][test_ml_idx[match]] - mix_dlas['zabs'][match]
    print("Median dz = {} and sigma(dz)= {}".format(np.median(dz), np.std(dz)))
示例#29
0
def chk_dr5_dla_to_ml(ml_dlasurvey=None, ml_llssurvey=None, dz_toler=0.015,
                      outfile='vette_dr5.json', write_again=True):
    # Load ML
    if (ml_dlasurvey is None) or (ml_llssurvey is None):
        ml_llssurvey, ml_dlasurvey = load_ml_dr7()
    # Load DR5
    dr5 = DLASurvey.load_SDSS_DR5()  # This is the statistical sample
    # Use coord to efficiently deal with sightlines
    ml_coord = SkyCoord(ra=ml_dlasurvey.sightlines['RA'], dec=ml_dlasurvey.sightlines['DEC'], unit='deg')
    dr5_coord = SkyCoord(ra=dr5.sightlines['RA'], dec=dr5.sightlines['DEC'], unit='deg')
    idx, d2d, d3d = match_coordinates_sky(dr5_coord, ml_coord, nthneighbor=1)
    in_ml = d2d < 2*u.arcsec
    print("{:d} of the DR5 sightlines were covered by ML out of {:d}".format(np.sum(in_ml), len(dr5.sightlines)))
    # 7477 sightlines out of 7482

    # Cut down
    dr5.sightlines = dr5.sightlines[in_ml]
    new_mask = dla_stat(dr5, dr5.sightlines) # 737 good DLAs
    dr5.mask = new_mask
    dr5_dla_coord = dr5.coord
    dr5_dla_zabs = dr5.zabs
    ndr5 = len(dr5_dla_coord)

    ml_dla_coord = ml_dlasurvey.coords
    ml_lls_coord = ml_llssurvey.coords

    # Loop on DR5 DLAs and save indices of the matches
    dr5_ml_idx = np.zeros(ndr5).astype(int) - 1
    for ii in range(ndr5):
        # Match to ML
        dla_mts = np.where(dr5_dla_coord[ii].separation(ml_dla_coord) < 2*u.arcsec)[0]
        nmt = len(dla_mts)
        if nmt == 0:  # No match
            # Check for LLS
            lls_mts = np.where(dr5_dla_coord[ii].separation(ml_lls_coord) < 2*u.arcsec)[0]
            nmt2 = len(lls_mts)
            if nmt2 == 0:  # No match
                pass
            else:
                zML = ml_llssurvey.zabs[lls_mts] # Redshifts of all DLAs on the sightline in ML
                zdiff = np.abs(dr5_dla_zabs[ii]-zML)
                if np.min(zdiff) < dz_toler:
                    dr5_ml_idx[ii] = -9  # SLLS match
        else:
            zML = ml_dlasurvey.zabs[dla_mts] # Redshifts of all DLAs on the sightline in ML
            zdiff = np.abs(dr5_dla_zabs[ii]-zML)
            if np.min(zdiff) < dz_toler:
                #print("Match on {:d}!".format(ii))
                # Match
                imin = np.argmin(zdiff)
                dr5_ml_idx[ii] = dla_mts[imin]
            else: # Check for LLS
                lls_mts = np.where(dr5_dla_coord[ii].separation(ml_lls_coord) < 2*u.arcsec)[0]
                nmt2 = len(lls_mts)
                if nmt2 == 0:  # No match
                    pass
                else:
                    zML = ml_llssurvey.zabs[lls_mts] # Redshifts of all DLAs on the sightline in ML
                    zdiff = np.abs(dr5_dla_zabs[ii]-zML)
                    if np.min(zdiff) < dz_toler:
                        dr5_ml_idx[ii] = -9  # SLLS match


    dr5_coord = SkyCoord(ra=dr5.sightlines['RA'], dec=dr5.sightlines['DEC'], unit='deg')

    # Write out misses
    misses = np.where(dr5_ml_idx == -1)[0]
    plates, fibers = [], []
    for miss in misses:
        imin = np.argmin(dr5_dla_coord[miss].separation(dr5_coord))
        plates.append(dr5.sightlines['PLATE'][imin])
        fibers.append(dr5.sightlines['FIB'][imin])
    mtbl = Table()
    mtbl['PLATE'] = plates
    mtbl['FIBER'] = fibers
    mtbl['NHI'] = dr5.NHI[misses]
    mtbl['zabs'] = dr5.zabs[misses]
    if write_again:
        mtbl.write('DR5_misses.ascii', format='ascii.fixed_width', overwrite=True)

    # Write out SLLS
    sllss = np.where(dr5_ml_idx == -9)[0]
    plates, fibers = [], []
    for slls in sllss:
        imin = np.argmin(dr5_dla_coord[slls].separation(dr5_coord))
        plates.append(dr5.sightlines['PLATE'][imin])
        fibers.append(dr5.sightlines['FIB'][imin])
    mtbl = Table()
    mtbl['PLATE'] = plates
    mtbl['FIBER'] = fibers
    mtbl['NHI'] = dr5.NHI[sllss]
    mtbl['zabs'] = dr5.zabs[sllss]
    if write_again:
        mtbl.write('DR5_SLLS.ascii', format='ascii.fixed_width', overwrite=True)
    pdb.set_trace()

    # ML not matched by PW09?
    ml_dla_coords = ml_dlasurvey.coords
    idx2, d2d2, d3d = match_coordinates_sky(ml_dla_coords, dr5_dla_coord, nthneighbor=1)
    not_in_dr5 = d2d2 > 2*u.arcsec  # This doesn't match redshifts!
    might_be_in_dr5 = np.where(~not_in_dr5)[0]

    others_not_in = []  # this is some painful book-keeping
    for idx in might_be_in_dr5:  # Matching redshifts..
        imt = ml_dla_coord[idx].separation(dr5_dla_coord) < 2*u.arcsec
        # Match on dztoler
        if np.min(np.abs(ml_dlasurvey.zabs[idx]-dr5.zabs[imt])) > dz_toler:
            others_not_in.append(idx)

    # Save
    out_dict = {}
    out_dict['in_ml'] = in_ml
    out_dict['dr5_idx'] = dr5_ml_idx  # -1 are misses, -9 are SLLS
    out_dict['not_in_dr5'] = np.concatenate([np.where(not_in_dr5)[0], np.array(others_not_in)])
    ltu.savejson(outfile, ltu.jsonify(out_dict), overwrite=True)
示例#30
0
文件: pypeit.py 项目: tbowers7/PypeIt
    def calib_all(self, run=True):
        """
        Create calibrations for all setups

        This will not crash if not all of the standard set of files are not provided

        Args:
            run (bool, optional): If False, only print the calib names and do
            not actually run.  Only used with the pypeit_parse_calib_id script

        Returns:
            dict: A simple dict summarizing the calibration names
        """
        calib_dict = {}

        self.tstart = time.time()

        # Frame indices
        frame_indx = np.arange(len(self.fitstbl))
        for i in range(self.fitstbl.n_calib_groups):
            # 1-indexed calib number
            calib_grp = str(i + 1)
            # Find all the frames in this calibration group
            in_grp = self.fitstbl.find_calib_group(i)
            grp_frames = frame_indx[in_grp]

            # Find the detectors to reduce
            #            detectors = PypeIt.select_detectors(detnum=self.par['rdx']['detnum'],
            #                                                ndet=self.spectrograph.ndet)
            detectors = self.spectrograph.select_detectors(
                subset=self.par['rdx']['detnum'])
            calib_dict[calib_grp] = {}
            # Loop on Detectors
            for self.det in detectors:
                # Instantiate Calibrations class
                self.caliBrate = calibrations.Calibrations.get_instance(
                    self.fitstbl,
                    self.par['calibrations'],
                    self.spectrograph,
                    self.calibrations_path,
                    qadir=self.qa_path,
                    reuse_masters=self.reuse_masters,
                    show=self.show,
                    user_slits=slittrace.merge_user_slit(
                        self.par['rdx']['slitspatnum'],
                        self.par['rdx']['maskIDs']))
                # Do it
                # TODO: Why isn't set_config part of the Calibrations.__init__ method?
                self.caliBrate.set_config(grp_frames[0], self.det,
                                          self.par['calibrations'])

                # Allow skipping the run (e.g. parse_calib_id.py script)
                if run:
                    self.caliBrate.run_the_steps()

                key = self.caliBrate.master_key_dict['frame']
                calib_dict[calib_grp][key] = {}
                for step in self.caliBrate.steps:
                    if step in ['bpm', 'slits', 'wv_calib', 'tilts', 'flats']:
                        continue
                    elif step == 'tiltimg':  # Annoying kludge
                        step = 'tilt'
                    # Prep
                    raw_files, self.caliBrate.master_key_dict[
                        step] = self.caliBrate._prep_calibrations(step)
                    masterframe_name = masterframe.construct_file_name(
                        buildimage.frame_image_classes[step],
                        self.caliBrate.master_key_dict[step],
                        master_dir=self.caliBrate.master_dir)

                    # Add to dict
                    if len(raw_files) > 0:
                        calib_dict[calib_grp][key][step] = {}
                        calib_dict[calib_grp][key][step][
                            'master_key'] = self.caliBrate.master_key_dict[
                                step]
                        calib_dict[calib_grp][key][step][
                            'master_name'] = os.path.basename(masterframe_name)
                        calib_dict[calib_grp][key][step]['raw_files'] = [
                            os.path.basename(ifile) for ifile in raw_files
                        ]

        # Print the results
        print(json.dumps(calib_dict, sort_keys=True, indent=4))

        # Write
        msgs.info('Writing calib file')
        calib_file = self.pypeit_file.replace('.pypeit', '.calib_ids')
        ltu.savejson(calib_file, calib_dict, overwrite=True, easy_to_read=True)

        # Finish
        self.print_end_time()

        # Return
        return calib_dict
示例#31
0
def iterative_fitting(spec, tcent, ifit, IDs, llist, disp,
                      match_toler = 2.0, func = 'legendre', n_first=2, sigrej_first=2.0,
                      n_final=4, sigrej_final=3.0,
                      weights=None, plot_fil=None, verbose=False):

    """ Routine for iteratively fitting wavelength solutions.

    Parameters
    ----------
    spec : ndarray, shape = (nspec,)
      arcline spectrum
    tcent : ndarray
      Centroids in pixels of lines identified in spec
    ifit : ndarray
      Indices of the lines that will be fit
    IDs: ndarray
      wavelength IDs of the lines that will be fit (I think?)
    llist: dict
      Linelist dictionary
    disp: float
      dispersion

    Optional Parameters
    -------------------
    match_toler: float, default = 3.0
      Matching tolerance when searching for new lines. This is the difference in pixels between the wavlength assigned to
      an arc line by an iteration of the wavelength solution to the wavelength in the line list.
    func: str, default = 'legendre'
      Name of function used for the wavelength solution
    n_first: int, default = 2
      Order of first guess to the wavelength solution.
    sigrej_first: float, default = 2.0
      Number of sigma for rejection for the first guess to the wavelength solution.
    n_final: int, default = 4
      Order of the final wavelength solution fit
    sigrej_final: float, default = 3.0
      Number of sigma for rejection for the final fit to the wavelength solution.
    weights: ndarray
      Weights to be used?
    verbose : bool
      If True, print out more information.
    plot_fil:
      Filename for plotting some QA?

    Returns
    -------
    final_fit: dict
      Dictionary containing the full fitting results and the final best guess of the line IDs
    """

    #TODO JFH add error checking here to ensure that IDs and ifit have the same size!

    if weights is None:
        weights = np.ones(tcent.size)

    nspec = spec.size
    xnspecmin1 = float(nspec-1)
    # Setup for fitting
    sv_ifit = list(ifit)  # Keep the originals
    all_ids = -999.*np.ones(len(tcent))
    all_idsion = np.array(['UNKNWN']*len(tcent))
    all_ids[ifit] = IDs

    # Fit
    n_order = n_first
    flg_continue = True
    flg_penultimate = False
    fmin, fmax = 0.0, 1.0
    # Note the number of parameters is actually n_order and not n_order+1
    while flg_continue:
        if flg_penultimate:
            flg_continue = False
        # Fit with rejection
        xfit, yfit, wfit = tcent[ifit], all_ids[ifit], weights[ifit]
        mask, fit = utils.robust_polyfit(xfit/xnspecmin1, yfit, n_order, function=func, sigma=sigrej_first,
                                         minx=fmin, maxx=fmax, verbose=verbose, weights=wfit)

        rms_ang = utils.calc_fit_rms(xfit[mask == 0]/xnspecmin1, yfit[mask == 0], fit, func, minx=fmin, maxx=fmax,
                                     weights=wfit[mask == 0])
        rms_pix = rms_ang/disp
        if verbose:
            msgs.info('n_order = {:d}'.format(n_order) + ': RMS = {:g}'.format(rms_pix))

        # Reject but keep originals (until final fit)
        ifit = list(ifit[mask == 0]) + sv_ifit
        # Find new points (should we allow removal of the originals?)
        twave = utils.func_val(fit, tcent/xnspecmin1, func, minx=fmin, maxx=fmax)
        for ss, iwave in enumerate(twave):
            mn = np.min(np.abs(iwave-llist['wave']))
            if mn/disp < match_toler:
                imn = np.argmin(np.abs(iwave-llist['wave']))
                #if verbose:
                #    print('Adding {:g} at {:g}'.format(llist['wave'][imn],tcent[ss]))
                # Update and append
                all_ids[ss] = llist['wave'][imn]
                all_idsion[ss] = llist['ion'][imn]
                ifit.append(ss)
        # Keep unique ones
        ifit = np.unique(np.array(ifit, dtype=int))
        # Increment order?
        if n_order < n_final:
            n_order += 1
        else:
            flg_penultimate = True

    # Final fit (originals can now be rejected)
    #fmin, fmax = 0., 1.
    #xfit, yfit, wfit = tcent[ifit]/(nspec-1), all_ids[ifit], weights[ifit]
    xfit, yfit, wfit = tcent[ifit], all_ids[ifit], weights[ifit]
    mask, fit = utils.robust_polyfit(xfit/xnspecmin1, yfit, n_order, function=func, sigma=sigrej_final,
                                     minx=fmin, maxx=fmax, verbose=verbose, weights=wfit)#, debug=True)
    irej = np.where(mask == 1)[0]
    if len(irej) > 0:
        xrej = xfit[irej]
        yrej = yfit[irej]
        if verbose:
            for kk, imask in enumerate(irej):
                wave = utils.func_val(fit, xrej[kk]/xnspecmin1, func, minx=fmin, maxx=fmax)
                msgs.info('Rejecting arc line {:g}; {:g}'.format(yfit[imask], wave))
    else:
        xrej = []
        yrej = []

    #xfit = xfit[mask == 0]
    #yfit = yfit[mask == 0]
    #wfit = wfit[mask == 0]
    ions = all_idsion[ifit]
#    ions = all_idsion[ifit][mask == 0]
    # Final RMS
    rms_ang = utils.calc_fit_rms(xfit[mask==0]/xnspecmin1, yfit[mask==0], fit, func,
                                 minx=fmin, maxx=fmax, weights=wfit[mask==0])
#    rms_ang = utils.calc_fit_rms(xfit, yfit, fit, func,
#                                 minx=fmin, maxx=fmax, weights=wfit)
    rms_pix = rms_ang/disp

    # Pack up fit
    spec_vec = np.arange(nspec)
    wave_soln = utils.func_val(fit,spec_vec/xnspecmin1, func, minx=fmin, maxx=fmax)
    cen_wave = utils.func_val(fit, float(nspec)/2/xnspecmin1, func, minx=fmin, maxx=fmax)
    cen_wave_min1 = utils.func_val(fit, (float(nspec)/2 - 1.0)/xnspecmin1, func, minx=fmin, maxx=fmax)
    cen_disp = cen_wave - cen_wave_min1

    final_fit = dict(fitc=fit, function=func, pixel_fit=xfit, wave_fit=yfit, weights=wfit, ions=ions,
                     fmin=fmin, fmax=fmax, xnorm = xnspecmin1, nspec=nspec, cen_wave = cen_wave, cen_disp = cen_disp,
                     xrej=xrej, yrej=yrej, mask=(mask == 0), spec=spec, wave_soln = wave_soln, nrej=sigrej_final,
                     shift=0., tcent=tcent, rms=rms_pix)

    # If set to True, this will output a file that can then be included in the tests
    saveit = False
    if saveit:
        from linetools import utils as ltu
        jdict = ltu.jsonify(final_fit)
        if plot_fil is None:
            outname = "temp"
            print("You should have set the plot_fil directory to save wavelength fits... using 'temp' as a filename")
        else:
            outname = plot_fil
        ltu.savejson(outname + '.json', jdict, easy_to_read=True, overwrite=True)
        print(" Wrote: {:s}".format(outname + '.json'))

    # QA
    if plot_fil is not None:
        autoid.arc_fit_qa(final_fit, plot_fil)
    # Return
    return final_fit
示例#32
0
def semi_brute(spec,
               lines,
               wv_cen,
               disp,
               siglev=20.,
               min_ampl=300.,
               outroot=None,
               debug=False,
               do_fit=True,
               verbose=False,
               fit_parm=None,
               min_nmatch=0,
               lowest_ampl=200.):
    """
    Parameters
    ----------
    spec
    lines
    wv_cen
    disp
    siglev
    min_ampl
    outroot
    debug
    do_fit
    verbose
    fit_parm
    min_nmatch
    lowest_ampl

    Returns
    -------
    best_dict : dict
    final_fit : dict

    """
    # imports
    from astropy.table import vstack
    from linetools import utils as ltu
    from arclines import plots as arcl_plots
    # Load line lists
    line_lists = arcl_io.load_line_lists(lines)
    unknwns = arcl_io.load_unknown_list(lines)

    npix = spec.size

    # Lines
    all_tcent, cut_tcent, icut = arch_utils.arc_lines_from_spec(
        spec, min_ampl=min_ampl)

    # Best
    best_dict = dict(nmatch=0, ibest=-1, bwv=0., min_ampl=min_ampl)

    # 3 things to fiddle:
    #  pix_tol -- higher for fewer lines  1/2
    #  unknowns -- on for fewer lines  off/on
    #  scoring -- weaken for more lines ??

    # Loop on unknowns
    for unknown in [False, True]:
        if unknown:
            tot_list = vstack([line_lists, unknwns])
        else:
            tot_list = line_lists
        wvdata = np.array(tot_list['wave'].data)  # Removes mask if any
        wvdata.sort()
        sav_nmatch = best_dict['nmatch']

        # Loop on pix_tol
        for pix_tol in [1., 2.]:
            # Scan on wavelengths
            arch_patt.scan_for_matches(wv_cen,
                                       disp,
                                       npix,
                                       cut_tcent,
                                       wvdata,
                                       best_dict=best_dict,
                                       pix_tol=pix_tol)
            # Lower minimum amplitude
            ampl = min_ampl
            while (best_dict['nmatch'] < min_nmatch):
                ampl /= 2.
                if ampl < lowest_ampl:
                    break
                all_tcent, cut_tcent, icut = arch_utils.arc_lines_from_spec(
                    spec, min_ampl=ampl)
                arch_patt.scan_for_matches(wv_cen,
                                           disp,
                                           npix,
                                           cut_tcent,
                                           wvdata,
                                           best_dict=best_dict,
                                           pix_tol=pix_tol,
                                           ampl=ampl)

        # Save linelist?
        if best_dict['nmatch'] > sav_nmatch:
            best_dict['line_list'] = tot_list
            best_dict['unknown'] = unknown
            best_dict['ampl'] = unknown

    if best_dict['nmatch'] == 0:
        print('---------------------------------------------------')
        print('Report:')
        print('::   No matches!  Could be you input a bad wvcen or disp value')
        print('---------------------------------------------------')
        return

    # Report
    print('---------------------------------------------------')
    print('Report:')
    print('::   Number of lines recovered = {:d}'.format(all_tcent.size))
    print('::   Number of lines analyzed = {:d}'.format(cut_tcent.size))
    print('::   Number of Perf/Good/Ok matches = {:d}'.format(
        best_dict['nmatch']))
    print('::   Best central wavelength = {:g}A'.format(best_dict['bwv']))
    print('::   Best solution used pix_tol = {}'.format(best_dict['pix_tol']))
    print('::   Best solution had unknown = {}'.format(best_dict['unknown']))
    print('---------------------------------------------------')

    if debug:
        match_idx = best_dict['midx']
        for kk in match_idx.keys():
            uni, counts = np.unique(match_idx[kk]['matches'],
                                    return_counts=True)
            print('kk={}, {}, {}, {}'.format(kk, uni, counts, np.sum(counts)))

    # Write scores
    #out_dict = best_dict['scores']
    #jdict = ltu.jsonify(out_dict)
    #ltu.savejson(pargs.outroot+'.scores', jdict, easy_to_read=True, overwrite=True)

    # Write IDs
    if outroot is not None:
        out_dict = dict(pix=cut_tcent, IDs=best_dict['IDs'])
        jdict = ltu.jsonify(out_dict)
        ltu.savejson(outroot + '.json',
                     jdict,
                     easy_to_read=True,
                     overwrite=True)
        print("Wrote: {:s}".format(outroot + '.json'))

    # Plot
    if outroot is not None:
        arcl_plots.match_qa(spec, cut_tcent, best_dict['line_list'],
                            best_dict['IDs'], best_dict['scores'],
                            outroot + '.pdf')
        print("Wrote: {:s}".format(outroot + '.pdf'))

    # Fit
    final_fit = None
    if do_fit:
        # Read in Full NIST Tables
        full_NIST = arcl_io.load_line_lists(lines, NIST=True)
        # KLUDGE!!!!!
        keep = full_NIST['wave'] > 8800.
        line_lists = vstack([line_lists, full_NIST[keep]])
        #
        NIST_lines = line_lists['NIST'] > 0
        ifit = np.where(best_dict['mask'])[0]
        if outroot is not None:
            plot_fil = outroot + '_fit.pdf'
        else:
            plot_fil = None
        # Purge UNKNOWNS from ifit
        imsk = np.array([True] * len(ifit))
        for kk, idwv in enumerate(np.array(best_dict['IDs'])[ifit]):
            if np.min(np.abs(line_lists['wave'][NIST_lines] - idwv)) > 0.01:
                imsk[kk] = False
        ifit = ifit[imsk]
        # Allow for weaker lines in the fit
        all_tcent, weak_cut_tcent, icut = arch_utils.arc_lines_from_spec(
            spec, min_ampl=lowest_ampl)
        add_weak = []
        for weak in weak_cut_tcent:
            if np.min(np.abs(cut_tcent - weak)) > 5.:
                add_weak += [weak]
        if len(add_weak) > 0:
            cut_tcent = np.concatenate([cut_tcent, np.array(add_weak)])
        # Fit
        final_fit = arch_fit.iterative_fitting(spec,
                                               cut_tcent,
                                               ifit,
                                               np.array(
                                                   best_dict['IDs'])[ifit],
                                               line_lists[NIST_lines],
                                               disp,
                                               plot_fil=plot_fil,
                                               verbose=verbose,
                                               aparm=fit_parm)
        if plot_fil is not None:
            print("Wrote: {:s}".format(plot_fil))

    # Return
    return best_dict, final_fit
示例#33
0
def main(pargs=None):
    """ Run
    Parameters
    ----------
    args

    Returns
    -------

    """
    import numpy as np
    from matplotlib import pyplot as plt

    from linetools import utils as ltu

    from arclines import io as arcl_io
    from arclines.holy import utils as arch_utils
    from arclines.holy.grail import general, semi_brute
    from arclines.holy import patterns as arch_patt
    from arclines.holy import fitting as arch_fit

    if pargs.outroot is None:
        pargs.outroot = 'tmp_matches'
    # Defaults

    # Load spectrum
    spec = arcl_io.load_spectrum(pargs.spectrum)
    if pargs.show_spec:
        plt.clf()
        ax = plt.gca()
        ax.plot(spec)
        plt.show()

    # Arc lines
    lines = pargs.lines.split(',')

    # Call brute
    if pargs.brute:
        best_dict, final_fit = semi_brute(spec,
                                          lines,
                                          pargs.wvcen,
                                          pargs.disp,
                                          min_ampl=pargs.min_ampl,
                                          debug=pargs.debug,
                                          outroot=pargs.outroot,
                                          do_fit=pargs.fit,
                                          verbose=True)
        #best_dict, final_fit = grail.semi_brute(spec, lines, wv_cen, disp, siglev=siglev,
        #                                        min_ampl=min_ampl, min_nmatch=min_match, outroot=outroot)
    else:
        best_dict, final_fit = general(spec,
                                       lines,
                                       do_fit=pargs.fit,
                                       verbose=True,
                                       debug=pargs.debug,
                                       min_ampl=pargs.min_ampl,
                                       outroot=pargs.outroot)
    if pargs.debug:
        pdb.set_trace()

    if pargs.fit:
        ltu.savejson(pargs.outroot + '_fit.json',
                     ltu.jsonify(final_fit),
                     easy_to_read=True,
                     overwrite=True)
示例#34
0
def save_masters(slf, det, mftype='all'):
    """ Save Master Frames
    Parameters
    ----------
    slf
    det : int
    mftype : str
      'all' -- Save them all
    
    """
    from linetools import utils as ltu
    setup = slf.setup

    transpose = bool(settings.argflag['trace']['dispersion']['direction'])

    # Bias
    if (mftype in [
            'bias', 'all'
    ]) and ('bias' + setup
            not in settings.argflag['reduce']['masters']['loaded']):
        if not isinstance(slf._msbias[det - 1], (basestring)):
            arsave.save_master(slf,
                               slf._msbias[det - 1],
                               filename=master_name('bias', setup),
                               frametype='bias')
    # Bad Pixel
    if (mftype in [
            'badpix', 'all'
    ]) and ('badpix' + setup
            not in settings.argflag['reduce']['masters']['loaded']):
        arsave.save_master(slf,
                           slf._bpix[det - 1],
                           filename=master_name('badpix', setup),
                           frametype='badpix')
    # Trace
    if (mftype in [
            'trace', 'all'
    ]) and ('trace' + setup
            not in settings.argflag['reduce']['masters']['loaded']):
        extensions = [
            slf._lordloc[det - 1], slf._rordloc[det - 1], slf._pixcen[det - 1],
            slf._pixwid[det - 1], slf._lordpix[det - 1], slf._rordpix[det - 1],
            slf._slitpix[det - 1]
        ]
        names = [
            'LeftEdges_det', 'RightEdges_det', 'SlitCentre', 'SlitLength',
            'LeftEdges_pix', 'RightEdges_pix', 'SlitPixels'
        ]
        arsave.save_master(slf,
                           slf._mstrace[det - 1],
                           filename=master_name('trace', setup),
                           frametype='trace',
                           extensions=extensions,
                           names=names)
    # Pixel Flat
    if (mftype in [
            'normpixelflat', 'all'
    ]) and ('normpixelflat' + setup
            not in settings.argflag['reduce']['masters']['loaded']):
        arsave.save_master(slf,
                           slf._mspixelflatnrm[det - 1],
                           filename=master_name('normpixelflat', setup),
                           frametype='normpixelflat')
    # Pinhole Flat
    if (mftype in [
            'pinhole', 'all'
    ]) and ('pinhole' + setup
            not in settings.argflag['reduce']['masters']['loaded']):
        arsave.save_master(slf,
                           slf._mspinhole[det - 1],
                           filename=master_name('pinhole', setup),
                           frametype='pinhole')
    # Arc/Wave
    if (mftype in [
            'arc', 'all'
    ]) and ('arc' + setup
            not in settings.argflag['reduce']['masters']['loaded']):
        arsave.save_master(slf,
                           slf._msarc[det - 1],
                           filename=master_name('arc', setup),
                           frametype='arc',
                           keywds=dict(transp=transpose))
    if (mftype in [
            'wave', 'all'
    ]) and ('wave' + setup
            not in settings.argflag['reduce']['masters']['loaded']):
        # Wavelength image
        arsave.save_master(slf,
                           slf._mswave[det - 1],
                           filename=master_name('wave', setup),
                           frametype='wave')
        # Wavelength fit
        gddict = ltu.jsonify(slf._wvcalib[det - 1])
        json_file = master_name('wv_calib', setup)
        if gddict is not None:
            ltu.savejson(json_file, gddict, easy_to_read=True, overwrite=True)
        else:
            msgs.warn("The master wavelength solution has not been saved")
    # Tilts
    if (mftype in [
            'tilts', 'all'
    ]) and ('tilts' + setup
            not in settings.argflag['reduce']['masters']['loaded']):
        arsave.save_master(slf,
                           slf._tilts[det - 1],
                           filename=master_name('tilts', setup),
                           frametype='tilts')
    # Spatial slit profile
    if (mftype in [
            'slitprof', 'all'
    ]) and ('slitprof' + setup
            not in settings.argflag['reduce']['masters']['loaded']):
        arsave.save_master(slf,
                           slf._slitprof[det - 1],
                           filename=master_name('slitprof', setup),
                           frametype='slit profile')
示例#35
0
def general(spec, lines, min_ampl=300.,
            outroot=None, debug=False, do_fit=True, verbose=False,
            fit_parm=None, lowest_ampl=200.):
    """
    Parameters
    ----------
    spec
    lines
    siglev
    min_ampl
    outroot
    debug
    do_fit
    verbose
    fit_parm
    min_nmatch
    lowest_ampl

    Returns
    -------
    best_dict : dict
    final_fit : dict

    """
    # imports
    from astropy.table import vstack
    from linetools import utils as ltu
    from arclines import plots as arcl_plots

    # Import the triangles algorithm
    from arclines.holy.patterns import triangles

    # Load line lists
    line_lists = arcl_io.load_line_lists(lines)
    unknwns = arcl_io.load_unknown_list(lines)

    npix = spec.size

    # Lines
    all_tcent, cut_tcent, icut = arch_utils.arc_lines_from_spec(spec, min_ampl=min_ampl)
    use_tcent = all_tcent.copy()
    #use_tcent = cut_tcent.copy()  # min_ampl is having not effect at present

    # Best
    best_dict = dict(nmatch=0, ibest=-1, bwv=0., min_ampl=min_ampl)

    ngrid = 1000

    # Loop on unknowns
    for unknown in [False, True]:
        if unknown:
            tot_list = vstack([line_lists,unknwns])
        else:
            tot_list = line_lists
        wvdata = np.array(tot_list['wave'].data)  # Removes mask if any
        wvdata.sort()

        sav_nmatch = best_dict['nmatch']

        # Loop on pix_tol
        for pix_tol in [1.]:#, 2.]:
            # Triangle pattern matching
            dindex, lindex, wvcen, disps = triangles(use_tcent, wvdata, npix, 5, 10, pix_tol)

            # Remove any invalid results
            ww = np.where((wvcen > 0.0) & (disps > 0.0))
            dindex = dindex[ww[0], :]
            lindex = lindex[ww[0], :]
            disps = disps[ww]
            wvcen = wvcen[ww]

            # Setup the grids and histogram
            binw = np.linspace(max(np.min(wvcen), np.min(wvdata)), min(np.max(wvcen), np.max(wvdata)), ngrid)
            bind = np.linspace(np.min(np.log10(disps)), np.max(np.log10(disps)), ngrid)
            histimg, xed, yed = np.histogram2d(wvcen, np.log10(disps), bins=[binw, bind])
            histimg = gaussian_filter(histimg, 3)

            # Find the best combination of central wavelength and dispersion
            bidx = np.unravel_index(np.argmax(histimg), histimg.shape)

            debug = False
            if debug:
                from matplotlib import pyplot as plt
                plt.clf()
                plt.imshow(histimg[:, ::-1].T, extent=[binw[0], binw[-1], bind[0], bind[-1]], aspect='auto')
                plt.axvline(binw[bidx[0]], color='r', linestyle='--')
                plt.axhline(bind[bidx[1]], color='r', linestyle='--')
                plt.show()
                print(histimg[bidx], binw[bidx[0]], 10.0**bind[bidx[1]])
                pdb.set_trace()

            # Find all good solutions
            nsel = 5  # Select all solutions around the best solution within a square of side 2*nsel
            wlo = binw[bidx[0] - nsel]
            whi = binw[bidx[0] + nsel]
            dlo = 10.0 ** bind[bidx[1] - 5*nsel]
            dhi = 10.0 ** bind[bidx[1] + 5*nsel]
            wgd = np.where((wvcen > wlo) & (wvcen < whi) & (disps > dlo) & (disps < dhi))
            dindex = dindex[wgd[0], :].flatten()
            lindex = lindex[wgd[0], :].flatten()

            # Given this solution, fit for all detlines
            arch_patt.solve_triangles(use_tcent, wvdata, dindex, lindex, best_dict)
            if best_dict['nmatch'] > sav_nmatch:
                best_dict['pix_tol'] = pix_tol

        # Save linelist?
        if best_dict['nmatch'] > sav_nmatch:
            best_dict['bwv'] = binw[bidx[0]]
            best_dict['bdisp'] = 10.0**bind[bidx[1]]
            best_dict['line_list'] = tot_list.copy()
            best_dict['unknown'] = unknown
            best_dict['ampl'] = unknown

    # Try to pick up some extras by turning off/on unknowns
    if best_dict['unknown']:
        tot_list = line_lists
    else:
        tot_list = vstack([line_lists,unknwns])

    # Retrieve the wavelengths of the linelist and sort
    wvdata = np.array(tot_list['wave'].data)  # Removes mask if any
    wvdata.sort()

    if best_dict['nmatch'] == 0:
        print('---------------------------------------------------')
        print('Report:')
        print('::   No matches! Try another algorithm')
        print('---------------------------------------------------')
        return

    # Report
    print('---------------------------------------------------')
    print('Report:')
    print('::   Number of lines recovered    = {:d}'.format(all_tcent.size))
    print('::   Number of lines analyzed     = {:d}'.format(use_tcent.size))
    print('::   Number of acceptable matches = {:d}'.format(best_dict['nmatch']))
    print('::   Best central wavelength      = {:g}A'.format(best_dict['bwv']))
    print('::   Best dispersion              = {:g}A/pix'.format(best_dict['bdisp']))
    print('::   Best solution used pix_tol   = {}'.format(best_dict['pix_tol']))
    print('::   Best solution had unknown    = {}'.format(best_dict['unknown']))
    print('---------------------------------------------------')

    # Write IDs
    if outroot is not None:
        out_dict = dict(pix=use_tcent, IDs=best_dict['IDs'])
        jdict = ltu.jsonify(out_dict)
        ltu.savejson(outroot+'.json', jdict, easy_to_read=True, overwrite=True)
        print("Wrote: {:s}".format(outroot+'.json'))

    # Plot
    if outroot is not None:
        tmp_list = vstack([line_lists, unknwns])
        arcl_plots.match_qa(spec, use_tcent, tmp_list,
                            best_dict['IDs'], best_dict['scores'], outroot+'.pdf')
        print("Wrote: {:s}".format(outroot+'.pdf'))

    # Fit
    final_fit = None
    if do_fit:
        # Good lines = NIST or OH
        good_lines = np.any([line_lists['NIST']>0, line_lists['ion'] == 'OH'], axis=0)
        #
        ifit = np.where(best_dict['mask'])[0]
        if outroot is not None:
            plot_fil = outroot+'_fit.pdf'
        else:
            plot_fil = None
        # Purge UNKNOWNS from ifit
        imsk = np.array([True]*len(ifit))
        for kk, idwv in enumerate(np.array(best_dict['IDs'])[ifit]):
            if np.min(np.abs(line_lists['wave'][good_lines]-idwv)) > 0.01:
                imsk[kk] = False
        ifit = ifit[imsk]
        # Allow for weaker lines in the fit
        all_tcent, weak_cut_tcent, icut = arch_utils.arc_lines_from_spec(spec, min_ampl=lowest_ampl)
        use_weak_tcent = all_tcent.copy()
        add_weak = []
        for weak in use_weak_tcent:
            if np.min(np.abs(use_tcent-weak)) > 5.:
                add_weak += [weak]
        if len(add_weak) > 0:
            use_tcent = np.concatenate([use_tcent, np.array(add_weak)])
        # Fit
        final_fit = arch_fit.iterative_fitting(spec, use_tcent, ifit,
                                               np.array(best_dict['IDs'])[ifit], line_lists[good_lines],
                                               best_dict['bdisp'], plot_fil=plot_fil, verbose=verbose,
                                               aparm=fit_parm)
        if plot_fil is not None:
            print("Wrote: {:s}".format(plot_fil))

    # Return
    return best_dict, final_fit