Beispiel #1
0
 def table(self, filter_homogenous=False):
     """Return a text table for this datagroup. See :meth:`output`.
     
     .. note:: This method currently returns a list of strings. In the future, it will return an :class:`astropy.table.Table` object containing only the desired header keywords.
     
     """
     from astropy.table import Table, Column
             
     if filter_homogenous:
         groups = [ group for group in self if not isinstance(group, ListFITSDataGroup) ]
         list_groups = []
     else:
         groups = [ group for group in self if not isinstance(group, ListFITSDataGroup) ]
         list_groups = [ group for group in self if isinstance(group, ListFITSDataGroup) ]
         
     groups.sort(key=lambda g : g.name)
     
     result = Table([ group.keylist for group in groups ], names=map(str, self.keywords))
     
     name_column = Column(name=str("Name"), data=[ group.name for group in groups ])
     result.add_column(name_column, index=0)
     
     number_column = Column(name=str("N"), data=[ len(group) for group in groups ])
     result.add_column(number_column)
     
     for lgroup in list_groups:
         result.add_row({"Name":lgroup.name, "N":len(lgroup)})
     
     return result
def get_feature_importances(data_table, obs_metadata, lines_table, use_con_flux=False):
    feature_importances_list = []
    X_colnames = None
    for line_name, line_wavelength in lines_table['source', 'wavelength_target']:
        subset = data_table[(data_table['source'] == line_name) & (data_table['wavelength_target'] == line_wavelength)]
        X, y, labels = get_X_and_y(subset, obs_metadata, use_con_flux)
        if X_colnames is None:
            X_colnames = X.colnames

        params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
                'learning_rate': 0.01, 'loss': 'lad'}
        clf = ensemble.GradientBoostingRegressor(**params)
        X = ndarrayidze(X)

        # Scaling is optional, but I think I'm going to do it (for now) for all methods,
        # just in comparing between valued here and with e.g. ICA there are fewer diffs
        X = skpp.scale(X)
        y = skpp.scale(y)

        clf.fit(X, y)
        feature_importances_list.append(clf.feature_importances_)

    fi = np.array(feature_importances_list)
    fi_table = Table(fi, names = X_colnames)
    fi_table.add_column(lines_table['source'])
    fi_table.add_column(lines_table['wavelength_target'])

    return fi_table
Beispiel #3
0
def print_table(fname):
    tab = pickle.load(open(fname))
    data = Table()
    data.add_column(
        Column(data=processes_pretty, name='process')
    )
    for rowname, row in zip([t[0] for t in tab], tab):
        data.add_column(
            Column(data=row[1:], name=rownames[rowname])
        )
        data[rownames[rowname]].format = '%.0f'

    class MyLatex(ascii.Latex):
        def __init__(self, **kwargs):
            ascii.Latex.__init__(self, **kwargs)
            #self.header.data = ['process'] + order
            self.latex['header_start'] = '\hline'
            self.latex['header_end'] = '\hline'
            self.latex['preamble'] = r'\begin{center}'
            self.latex['tablefoot'] = r'\end{center}'
            self.latex['data_end'] = r'\hline'
            self.header.comment = r"\%Generated by src/yields/print_tables.py"

    print r"% BEGIN AUTOGENERATED, NOT FOR CHANGING"

    print r"% Generated by src/yields/print_tables.py on " + str(datetime.datetime.utcnow())
    print r"% Hostname: " + socket.gethostname() + " User: "******"% Input file:  " + fname
    ascii.write(data, Writer=MyLatex, latexdict = {'col_align':'|c|cccccc|c|'})
    print r"% END AUTOGENERATED"
def test_load_lc_fits():
    dt = 0.1
    n_seg = 5
    n_seconds = 100
    output_file = 'out.fits'

    ref_lc = np.arange(0, n_seconds, dt)
    ci_lc = np.array(np.array_split(np.arange(0, n_seconds*n_seg, dt), n_seg))

    n_bins = len(ref_lc)

    lightcurves = Table()
    lightcurves.add_column(Column(name='REF', data=ref_lc.T/dt))
    lightcurves.add_column(Column(name='CI', data=ci_lc.T/dt))
    lightcurves.meta['N_BINS'] = n_bins
    lightcurves.meta['DT'] = dt
    lightcurves.meta['N_SEG'] = n_seg
    lightcurves.meta['NSECONDS'] = n_seconds
    lightcurves.write(output_file, format='fits', overwrite=True)

    ref, ci, meta = spec.load_lc_fits(output_file, counts_type=False)
    remove(output_file)

    assert np.all(ref_lc == ref)
    assert np.all(ci_lc == ci)
    assert meta['N_BINS'] == n_bins
    assert meta['DT'] == dt
    assert meta['N_SEG'] == n_seg
    assert meta['NSECONDS'] == n_seconds
Beispiel #5
0
def do_surf_phot(img_list=imglist, aperture_radii=np.arange(1,20,1)*u.arcsec):
    ''' Do aperture photometry on a list of images in a bunch of apertures
    input: img_list: list of images
           aperture_list: apertures to use (same for all)

    output: astropy table with photomety for all
    '''

    # make an empty table to hold the output
    rcol = Column(aperture_radii.value, name=('r_arcsec'))
    phot_tab = Table([rcol])

    # loop over the images
    for (img_num,img_name) in enumerate(img_list):

        img = fits.open(img_name)
        photlist = multi_ap_phot(img, glx_ctr, aperture_radii, sb=True)

        # calibrate photmetry: TODO
#        obs_val = out_tab['aperture_sum'][0] * out_tab['aperture_sum'].unit
#        cal_phot = calib_phot(obs_val, img, output_units='MJy')

        phot_tab.add_column(Column(photlist, name=img_name))
    # done loop over images

    return(phot_tab)
Beispiel #6
0
def test_read_write_memory(tmpdir):
    with h5py.File('test', 'w', driver='core', backing_store=False) as output_file:
        t1 = Table()
        t1.add_column(Column(name='a', data=[1, 2, 3]))
        t1.write(output_file, path='the_table')
        t2 = Table.read(output_file, path='the_table')
        assert np.all(t2['a'] == [1, 2, 3])
Beispiel #7
0
def test_from_table_without_mask():
    from astropy.table import Table, Column
    t = Table()
    c = Column(data=[1, 2, 3], name='a')
    t.add_column(c)
    output = io.BytesIO()
    t.write(output, format='votable')
Beispiel #8
0
    def _get_uncertainties(self, star_group_size):
        """
        Retrieve uncertainties on fitted parameters from the fitter
        object.

        Parameters
        ----------
        star_group_size : int
            Number of stars in the given group.

        Returns
        -------
        unc_tab : `~astropy.table.Table`
            Table which contains uncertainties on the fitted parameters.
            The uncertainties are reported as one standard deviation.
        """

        unc_tab = Table()
        for param_name in self.psf_model.param_names:
            if not self.psf_model.fixed[param_name]:
                unc_tab.add_column(Column(name=param_name + "_unc",
                                          data=np.empty(star_group_size)))

        if 'param_cov' in self.fitter.fit_info.keys():
            if self.fitter.fit_info['param_cov'] is not None:
                k = 0
                n_fit_params = len(unc_tab.colnames)
                for i in range(star_group_size):
                    unc_tab[i] = np.sqrt(np.diag(
                                          self.fitter.fit_info['param_cov'])
                                         )[k: k + n_fit_params]
                    k = k + n_fit_params
        return unc_tab
Beispiel #9
0
def test_write_nopath(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    with pytest.raises(ValueError) as exc:
        t1.write(test_file)
    assert exc.value.args[0] == "table path should be set via the path= argument"
Beispiel #10
0
def test_read_nopath(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    t1.write(test_file, path='the_table')
    t2 = Table.read(test_file)
    assert np.all(t1['a'] == t2['a'])
Beispiel #11
0
def test_write_invalid_path(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    with pytest.raises(ValueError) as exc:
        t1.write(test_file, path='test/')
    assert exc.value.args[0] == "table path should end with table name, not /"
    def get_table(self, **kwargs):

        D = self.lineLum(**kwargs)

        # remap names to match RADEX
        name_mapping = {'upper':'upperlevel',
                        'lower':'lowerlevel',
                        'freq':'frequency',}


        names = D[0].keys()
        T = Table(names=[name_mapping[n]
                         if n in name_mapping
                         else n
                         for n in names],
                  dtype=[type(D[0][k]) for k in names])
    
        for row in D:
            T.add_row([row[k] for k in names])

        T.add_column(Column(name='upperlevelpop',
                            data=self.upperlevelpop,
                            dtype='float'))
        T.add_column(Column(name='lowerlevelpop',
                            data=self.lowerlevelpop,
                            dtype='float'))

        return T
Beispiel #13
0
def save_fluxes(model_fluxes, model_parameters, filters, names, filename,
                directory=OUT_DIR, out_format='ascii.commented_header'):
    """Save fluxes and associated parameters into a table.

    Parameters
    ----------
    model_fluxes: RawArray
        Contains the fluxes of each model.
    model_parameters: RawArray
        Contains the parameters associated to each model.
    filters: list
        Contains the filter names.
    names: List
        Contains the parameters names.
    filename: str
        Name under which the file should be saved.
    directory: str
        Directory under which the file should be saved.
    out_format: str
        Format of the output file

    """
    out_fluxes = np.ctypeslib.as_array(model_fluxes[0])
    out_fluxes = out_fluxes.reshape(model_fluxes[1])

    out_params = np.ctypeslib.as_array(model_parameters[0])
    out_params = out_params.reshape(model_parameters[1])

    out_table = Table(np.hstack((out_fluxes, out_params)),
                      names=filters + list(names))

    out_table.add_column(Column(np.arange(model_fluxes[1][0]), name='id'),
                         index=0)

    out_table.write("{}/{}".format(directory, filename), format=out_format)
Beispiel #14
0
def compute_Q_analytical(component_table):
    """Compute Q factors analytically.
    """
    q_table = Table()
    q_table.add_column(Column(data=component_table['Name'], name='Q_AB'))
    
    Q_all_list = ['All others']
      
    for j in range(len(component_table)):
        # Get parameters A
        row_A = component_table[j]
        x_A, y_A, sigma_A, N_A = row_A['GLON'], row_A['GLAT'], row_A['Sigma'], row_A['Norm']
         
        # Compute Q_factor all others
        components_all = [[row['GLON'], row['GLAT'], row['Sigma'], row['Norm']] for row in component_table if not row == row_A]
        Q_All = compute_Q_from_components([[x_A, y_A, sigma_A, N_A]], components_all)
        Q_all_list.append(Q_All)
        
        # Compute Q factors pairwise
        Q_AB_list = np.zeros(len(component_table))
        for i, row_B in enumerate(component_table):
            # Get parameters B
            x_B, y_B, sigma_B, N_B = row_B['GLON'], row_B['GLAT'], row_B['Sigma'], row_B['Norm']
            
            # Compute Q_factor
            Q_AB = Q_factor_analytical(sigma_A, sigma_B, x_A, y_A, x_B, y_B)
            Q_AB_list[i] = Q_AB
        q_table.add_column(Column(data=Q_AB_list, name=row_A['Name']))
    q_table.add_row(Q_all_list)    
    return q_table
Beispiel #15
0
    def ions(self, iZion, Ej=0., skip_null=False):
        """
        Generate a Table of columns and so on
        Restrict to those systems where flg_clm > 0

        Parameters
        ----------
        iZion : tuple
           Z, ion   e.g. (6,4) for CIV
        Ej : float [1/cm]
           Energy of the lower level (0. is resonance)
        skip_null : boolean (False)
           Skip systems without an entry, else pad with zeros 

        Returns
        -------
        Table of values for the Survey
        """
        if len(self.abs_sys()[0]._ionN) == 0:
            raise IOError("ionN table not set.  Use fill_ionN")
        #
        keys = [u'name', ] + self.abs_sys()[0]._ionN.keys()
        t = Table(self.abs_sys()[0]._ionN[0:1]).copy()   # Avoids mixin trouble
        t.add_column(Column(['dum'], name='name', dtype='<U32'))
        t = t[keys]
        if 'Ej' not in keys:
            warnings.warn("Ej not in your ionN table.  Ignoring. Be careful..")

        # Loop on systems (Masked)
        for abs_sys in self.abs_sys():
            # Grab
            if 'Ej' in keys:
                mt = ((abs_sys._ionN['Z'] == iZion[0])
                      & (abs_sys._ionN['ion'] == iZion[1])
                      & (abs_sys._ionN['Ej'] == Ej))
            else:
                mt = ((abs_sys._ionN['Z'] == iZion[0])
                      & (abs_sys._ionN['ion'] == iZion[1]))
            if np.sum(mt) == 1:
                irow = abs_sys._ionN[mt]
                # Cut on flg_clm
                if irow['flag_N'] > 0:
                    row = [abs_sys.name] + [irow[key] for key in keys[1:]]
                    t.add_row(row)   # This could be slow
                else:
                    if skip_null is False:
                        row = [abs_sys.name] + [0 for key in keys[1:]]
                        t.add_row(row)
            elif np.sum(mt) == 0:
                if skip_null is False:
                    row = [abs_sys.name] + [0 for key in keys[1:]]
                    t.add_row( row )
                continue
            else:
                raise ValueError("Multple entries")


        # Return
        return t[1:]
Beispiel #16
0
def test_read_write_existing_overwrite(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    h5py.File(test_file, 'w').close()  # create empty file
    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    t1.write(test_file, path='the_table', overwrite=True)
    t2 = Table.read(test_file, path='the_table')
    assert np.all(t2['a'] == [1, 2, 3])
Beispiel #17
0
def test_read_invalid_path(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    t1.write(test_file, path='the_table')
    with pytest.raises(OSError) as exc:
        Table.read(test_file, path='test/')
    assert exc.value.args[0] == "Path test/ does not exist"
Beispiel #18
0
def test_read_write_existing_table(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    t1.write(test_file, path='the_table')
    with pytest.raises(OSError) as exc:
        t1.write(test_file, path='the_table', append=True)
    assert exc.value.args[0] == "Table the_table already exists"
Beispiel #19
0
def test_read_write_existing(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))
    h5py.File(test_file, 'w').close()  # create empty file
    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    with pytest.raises(OSError) as exc:
        t1.write(test_file, path='the_table')
    assert exc.value.args[0].startswith("File exists:")
Beispiel #20
0
def test_write_wrong_type():

    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    with pytest.raises(TypeError) as exc:
        t1.write(1212, path='path/to/data/the_table', format='hdf5')
    assert exc.value.args[0] == ('output should be a string '
                                 'or an h5py File or Group object')
Beispiel #21
0
def get_asol(info=None):
    """
    Get aspect solution DY, DZ, DTHETA values sampled at 1 ksec intervals
    during all science observations.

    :param info: dict of processing information and outputs
    :returns: aspect solution data (Table)
    """
    start = Time(opt.start)
    stop = Time(opt.stop)

    h5_file = os.path.join(opt.data_root, 'aimpoint_asol_values.h5')
    logger.info('Reading asol file {}'.format(h5_file))
    h5 = tables.openFile(h5_file)
    asol = Table(h5.root.data[:])
    h5.close()

    bad = (asol['dy'] == 0.0) & (asol['dz'] == 0.0)
    asol = asol[~bad]

    year = Column(Time(asol['time'], format='cxcsec').decimalyear, name='year')
    asol.add_column(year, index=0)

    asol.sort('time')

    # Include only points between --start and --stop
    i0, i1 = np.searchsorted(asol['time'], [start.cxcsec, stop.cxcsec])
    asol = asol[i0:i1]

    # Exclude from 10ksec before to 3 days after any normal sun or safe sun intervals.
    normal_suns = events.normal_suns()
    safe_suns = events.safe_suns()
    normal_suns.interval_pad = 10000, 86400 * 3
    safe_suns.interval_pad = 10000, 86400 * 3
    exclude_intervals = (normal_suns | safe_suns).intervals(asol['time'][0], asol['time'][-1])
    ok = np.ones(len(asol), dtype=np.bool)
    for date0, date1 in exclude_intervals:
        logger.info('Excluding asol values from {} to {} (normal/safe sun)'.format(date0, date1))
        i0, i1 = np.searchsorted(asol['time'], Time([date0, date1], format='yday').cxcsec)
        ok[i0:i1] = False
    asol = asol[ok]

    if info is not None:
        info['last_ctime'] = Time(asol['time'][-1], format='cxcsec').datetime.ctime()
        info['last_obsid'] = asol['obsid'][-1]

    for date, jump in AIMPOINT_JUMPS.items():
        jump_date = Time(date)
        if jump_date < stop:
            # Make the mean of the "before" interval match the mean of the "after" interval.
            i0 = np.searchsorted(asol['time'], jump_date.cxcsec)
            asol['dy'][:i0] -= jump['d_dy'] / 20.0
            asol['dz'][:i0] -= jump['d_dz'] / 20.0
            # Capture info about jump
            info.setdefault('aimpoint_jumps', {})[date] = jump
            logger.info('Applying aimpoint jump of {} at {}'.format(jump, date))

    return asol
Beispiel #22
0
 def test_add_masked_row_to_masked_table_mapping4(self):
     # When adding values to a masked table, if the mask is specified as a
     # dict, then keys in values should match keys in mask
     t = Table(masked=True)
     t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
     t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
     with pytest.raises(ValueError) as exc:
         t.add_row({'b': 5}, mask={'a': True})
     assert exc.value.args[0] == 'keys in mask should match keys in vals'
Beispiel #23
0
def createBiomassTable(species, biomass, inputCount):
    if inputCount == ():
        t = Table([tuple(species), biomass], names = ('species', 'biomass')) #simple table
        return t
    else:
        t = Table([tuple(species), biomass[:, 0]], names = ('species', 'biomass input1'))
        for col in range(inputCount - 1):
            t.add_column(Column(biomass[:, col + 1], name = 'biomass input%d' %(col + 2))) #simple table + more columns
        return t
Beispiel #24
0
def test_write_csv(tmpdir):
    '''If properly registered, filename should be sufficient to specify format

    #3189
    '''
    t = Table()
    t.add_column(Column(name='a', data=[1, 2, 3]))
    t.add_column(Column(name='b', data=['a', 'b', 'c']))
    path = str(tmpdir.join("data.csv"))
    t.write(path)
Beispiel #25
0
def csv_per_order(infile, col, outfile):
    '''Rewrite one column in ``aeffRfromraygrid`` to csv file

    Turn one vector-valued (all orders in one cell) column into a
    csv table with one entry per cell.
    '''
    tab = Table.read(infile)
    outtab = Table(tab[col], names=['order_{0}'.format(o) for o in orders_from_meta(tab.meta)])
    outtab.add_column(tab['wave'], index=0)
    outtab.write(outfile, format='ascii.csv', overwrite=True)
Beispiel #26
0
 def test_rename_masked_column(self):
     t = Table(masked=True)
     t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
     t['a'].fill_value = 42
     t.rename_column('a', 'b')
     assert t.masked
     assert np.all(t['b'] == np.array([1, 2, 3]))
     assert np.all(t['b'].mask == np.array([0, 1, 0], bool))
     assert t['b'].fill_value == 42
     assert t.colnames == ['b']
Beispiel #27
0
 def test_add_masked_row_to_masked_table_mismatch(self):
     t = Table(masked=True)
     t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
     t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
     with pytest.raises(TypeError) as exc:
         t.add_row([2, 5], mask={'a': 1, 'b': 0})
     assert exc.value.args[0] == "Mismatch between type of vals and mask"
     with pytest.raises(TypeError) as exc:
         t.add_row({'b': 5, 'a': 2}, mask=[1, 0])
     assert exc.value.args[0] == "Mismatch between type of vals and mask"
Beispiel #28
0
def test_fail_meta_serialize(tmpdir):

    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1.add_column(Column(name='a', data=[1, 2, 3]))
    t1.meta['f'] = str

    with pytest.raises(Exception) as err:
        t1.write(test_file, path='the_table', serialize_meta=True)
    assert "cannot represent an object: <class 'str'>" in str(err)
Beispiel #29
0
 def test_add_masked_row_to_masked_table_mapping1(self):
     t = Table(masked=True)
     t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
     t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
     t.add_row({'b': 5, 'a': 2}, mask={'a': 1, 'b': 0})
     t.add_row({'a': 3, 'b': 6}, mask={'b': 1, 'a': 0})
     assert t.masked
     assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
     assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
     assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
     assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
Beispiel #30
0
def unify_qso_catalog_mq(qsos):
    qsos = Table(qsos)
    qsos.rename_column('name','NAME_OLD')
    qsos.rename_column("z", 'redshift')
    qsos.add_column(Column(name='objid_mq', data=np.arange(len(qsos))+1))
    qsos.rename_column("FUV", 'mag_fuv')

    # unify name
    name = [give_name(ra,dec) for ra,dec in zip(qsos['ra_d'],qsos['dec_d'])]
    qsos['name'] = name
    return qsos
Beispiel #31
0
def mk_meta(files,
            ztbl,
            fname=False,
            stype='QSO',
            skip_badz=False,
            mdict=None,
            parse_head=None,
            debug=False,
            chkz=False,
            mtbl_file=None,
            verbose=False,
            specdb=None,
            sdb_key=None,
            **kwargs):
    """ Generate a meta Table from an input list of files

    Parameters
    ----------
    files : list
      List of FITS files
    ztbl : Table
      Table of redshifts.  Must include RA, DEC, ZEM, ZEM_SOURCE
      Used for RA/DEC if fname=False;  then requires SPEC_FILE too
    fname : bool, optional
      Attempt to parse RA/DEC from the file name
      Format must be
      SDSSJ######(.##)+/-######(.#)[x]
        where x cannot be a #. or +/-
    stype : str, optional
      Description of object type (e.g. 'QSO', 'Galaxy', 'SN')
    specdb : SpecDB, optional
      Database object to grab ID values from
      Requires sdb_key
    sdb_key : str, optional
      ID key in SpecDB object
    skip_badz : bool, optional
      Skip spectra without a parseable redshift (using the Myers catalog)
    parse_head : dict, optional
      Parse header for meta info with this dict
    mdict : dict, optional
      Input meta data in dict form e.g.  mdict=dict(INSTR='ESI')
    chkz : bool, optional
      If any sources have no parseable redshift, hit a set_trace
    mtbl_file : str
      Filename of input meta table.  Current allowed extensions are _meta.ascii or _meta.fits
      and they must be readable by Table.read().  The values in this table will overwrite
      any others generated.  Table must include a SPEC_FILE column to link meta data

    Returns
    -------
    meta : Table
      Meta table
    """
    if specdb is not None:
        if sdb_key is None:
            raise IOError("Must specify sdb_key if you are passing in specdb")
    Rdicts = defs.get_res_dicts()
    #
    coordlist = []
    snames = []
    for ifile in files:
        sname = ifile.split('/')[-1]
        snames.append(sname)
        if fname:
            # Starting index
            if 'SDSSJ' in ifile:
                i0 = ifile.find('SDSSJ') + 4
            else:
                i0 = ifile.rfind('J') + 1
            # Find end (ugly)
            for ii in range(i0 + 1, 99999):
                if ifile[ii] in ('0', '1', '2', '3', '4', '5', '6', '7', '8',
                                 '9', '.', '+', '-'):
                    continue
                else:
                    i1 = ii
                    break
            # Deal with .fits
            if ifile[i1 - 1] == '.':
                i1 -= 1
            # Get coord
            try:
                coord = ltu.radec_to_coord(ifile[i0:i1])
            except (UnboundLocalError, ValueError):
                pdb.set_trace()
        else:
            mt = np.where(ztbl['SPEC_FILE'] == sname)[0]
            if len(mt) != 1:
                raise IndexError("NO MATCH FOR {:s}".format(sname))
            coord = ltu.radec_to_coord((ztbl['RA'][mt], ztbl['DEC'][mt]))[0]
        coordlist.append(coord)
    ras = np.array([coord.ra.degree for coord in coordlist])
    decs = np.array([coord.dec.degree for coord in coordlist])
    coords = SkyCoord(ra=ras, dec=decs, unit='deg')

    # Generate maindb Table
    #maindb, tkeys = spbu.start_maindb(private=True)

    # Fill
    meta = Table()
    meta['RA_GROUP'] = coords.ra.deg
    meta['DEC_GROUP'] = coords.dec.deg
    meta['STYPE'] = [str(stype)] * len(meta)

    zem, zsource = spzu.zem_from_radec(meta['RA_GROUP'], meta['DEC_GROUP'],
                                       ztbl, **kwargs)
    badz = zem <= 0.
    if np.sum(badz) > 0:
        if skip_badz:
            warnings.warn(
                "Skipping {:d} entries without a parseable redshift".format(
                    np.sum(badz)))
        else:
            if chkz:  # Turn this on to hit a stop instead of an Exception
                pdb.set_trace()
            else:
                raise ValueError(
                    "{:d} entries without a parseable redshift".format(
                        np.sum(badz)))
    meta['zem_GROUP'] = zem
    meta['sig_zem'] = 0.  # Need to add
    meta['flag_zem'] = zsource
    # Cut
    meta = meta[~badz]

    # specdb IDs
    if sdb_key is not None:
        meta[sdb_key] = [-9999] * len(meta)
        if sdb_key not in meta.keys():
            meta[sdb_key] = [-9999] * len(meta)
        c_igmsp = SkyCoord(ra=specdb.qcat.cat['RA'],
                           dec=specdb.qcat.cat['DEC'],
                           unit='deg')
        c_new = SkyCoord(ra=meta['RA_GROUP'],
                         dec=meta['DEC_GROUP'],
                         unit='deg')
        # Find new sources
        idx, d2d, d3d = match_coordinates_sky(c_new, c_igmsp, nthneighbor=1)
        cdict = defs.get_cat_dict()
        mtch = d2d < cdict['match_toler']
        meta[sdb_key][mtch] = specdb.qcat.cat[sdb_key][idx[mtch]]

    # Stack (primarily as a test)
    '''
    try:
        maindb = vstack([maindb,meta], join_type='exact')
    except:
        pdb.set_trace()
    '''

    # SPEC_FILE
    meta['SPEC_FILE'] = np.array(files)[~badz]
    root_names = np.array(snames)[~badz]

    # Try Header?
    if parse_head is not None:
        # Setup to store
        plist = {}
        for key in parse_head.keys():
            plist[key] = []
        # Loop on files
        for sfile in meta['SPEC_FILE']:
            if verbose:
                print('Parsing {:s}'.format(sfile))
            try:
                head = fits.open(sfile)[0].header
            except FileNotFoundError:  # Try for compressed
                head = fits.open(sfile + '.gz')[0].header
            for key, item in parse_head.items():
                # R
                if key == 'R':
                    if parse_head[key] is True:
                        try:
                            plist[key].append(spbu.set_resolution(head))
                        except ValueError:
                            if mdict is not None:
                                try:
                                    plist[key].append(mdict['R'])
                                except KeyError:
                                    pdb.set_trace()
                            else:
                                pdb.set_trace()
                                plist[key].append(0.)
                    else:
                        raise ValueError("Set something else for R")
                elif key == 'DATE-OBS':
                    if 'MJD' in item:
                        tval = Time(head[item],
                                    format='mjd',
                                    out_subfmt='date')
                    else:
                        tval = Time(head[item].replace('/', '-'),
                                    format='isot',
                                    out_subfmt='date')
                    plist[key].append(tval.iso)
                else:
                    plist[key].append(head[item])
            # INSTRUMENT SPECIFIC
            try:
                instr = head['INSTRUME']
            except KeyError:
                instr = 'none'
            if 'LRIS' in instr:
                if 'DISPERSER' not in plist.keys():
                    plist['DISPERSER'] = []
                    plist['INSTR'] = []
                    plist['R'] = []
                try:
                    det = head['DETECTOR']
                except KeyError:
                    if head['OUTFILE'] == 'lred':
                        det = 'LRIS-R'
                    else:
                        det = 'LRIS-B'
                if 'LRIS-R' in det:
                    plist['DISPERSER'].append(head['GRANAME'])
                    plist['INSTR'].append('LRISr')
                else:
                    plist['DISPERSER'].append(head['GRISNAME'])
                    plist['INSTR'].append('LRISb')
                # Resolution
                res = Rdicts[plist['INSTR'][-1]][plist['DISPERSER'][-1]]
                try:
                    sname = head['SLITNAME']
                except KeyError:
                    swidth = 1.
                else:
                    swidth = defs.slit_width(sname, LRIS=True)
                plist['R'].append(res / swidth)
        # Finish
        for key in plist.keys():
            meta[key] = plist[key]
    # mdict
    if mdict is not None:
        for key, item in mdict.items():
            meta[key] = [item] * len(meta)

    # EPOCH
    if 'EPOCH' not in meta.keys():
        warnings.warn("EPOCH not defined.  Filling with 2000.")
        meta['EPOCH'] = 2000.

    # GROUP ID
    meta['GROUP_ID'] = np.arange(len(meta)).astype(int)

    # Fill in empty columns with warning
    mkeys = meta.keys()
    req_clms = defs.get_req_clms(sdb_key=sdb_key)
    for clm in req_clms:
        if clm not in mkeys:
            if clm not in ['NPIX', 'WV_MIN', 'WV_MAX']:  # File in ingest_spec
                warnings.warn(
                    "Meta Column {:s} not defined.  Filling with DUMMY".format(
                        clm))
                if clm == 'DATE-OBS':
                    meta[clm] = ['9999-1-1'] * len(meta)
                else:
                    meta[clm] = ['DUMMY'] * len(meta)

    # Input meta table
    if mtbl_file is not None:
        # Read
        if '_meta.ascii' in mtbl_file:
            imtbl = Table.read(mtbl_file, format='ascii')
        elif '_meta.fits' in mtbl_file:
            imtbl = Table.read(mtbl_file)
        else:
            raise IOError(
                "Input meta table must have either an ascii or fits extension")
        # Check length
        if len(imtbl) != len(meta):
            raise IOError(
                "Input meta table must have same length as self-generated one")
        # Check for SPEC_FILE
        if 'SPEC_FILE' not in imtbl.keys():
            raise ValueError("Input meta table must include SPEC_FILE column")
        # Loop to get indices
        idx = []
        for row in imtbl:
            imt = np.where(root_names == row['SPEC_FILE'])[0]
            if len(imt) == 0:
                print("No match to spec file {:s}.  Will ignore".format(
                    row['SPEC_FILE']))
            elif len(imt) == 1:
                idx.append(imt[0])
            else:
                raise ValueError(
                    "Two entries with the same SPEC_FILE.  Something went wrong.."
                )
        idx = np.array(idx)
        # Loop on keys
        for key in imtbl.keys():
            # Skip?
            if key in ['SPEC_FILE']:
                continue
            if key in meta.keys():
                pdb.set_trace()
            else:
                # Add Column
                meta.add_column(imtbl[key][idx])
    # Return
    if debug:
        meta[['RA_GROUP', 'DEC_GROUP', 'SPEC_FILE']].pprint(max_width=120)
        pdb.set_trace()
    return meta
    def test_cutout_tool_correctness(self):
        # Construct image:
        image_hdu = self.construct_test_image()

        # Construct catalog
        ra = [0] * u.deg  # Center pixel
        dec = [45] * u.deg  # Center pixel
        ids = ["target_1"]

        # Cutout should be 3 by 3:
        cutout_width = cutout_height = [3.0] * u.pix

        catalog = Table(
            data=[ids, ra, dec, cutout_width, cutout_height],
            names=['id', 'ra', 'dec', 'cutout_width', 'cutout_height'])

        # Call cutout tool and extract the first cutout:
        cutout = cutouts_from_fits(image_hdu, catalog)[0]

        # check if values are correct:
        w_orig = WCS(image_hdu.header)
        w_new = WCS(cutout.header)

        for x_new, x_orig in enumerate(range(3, 6)):
            for y_new, y_orig in enumerate(range(3, 6)):
                coords_orig = SkyCoord.from_pixel(x_orig,
                                                  y_orig,
                                                  w_orig,
                                                  origin=0)
                coords_new = SkyCoord.from_pixel(x_new, y_new, w_new, origin=0)

                assert_almost_equal(image_hdu.data[x_orig][y_orig],
                                    cutout.data[x_new][y_new])
                assert_almost_equal(coords_orig.ra.value, coords_new.ra.value)
                assert_almost_equal(coords_orig.dec.value,
                                    coords_new.dec.value)

        # Test for rotation:
        pa = [180] * u.deg
        catalog.add_column(pa, name="cutout_pa")

        # Call cutout tool:
        cutout = cutouts_from_fits(image_hdu, catalog)[0]

        # check if values are correct:
        w_orig = WCS(image_hdu.header)
        w_new = WCS(cutout.header)

        for x_new, x_orig in enumerate(range(5, 2, -1)):
            for y_new, y_orig in enumerate(range(5, 2, -1)):
                coords_orig = SkyCoord.from_pixel(x_orig,
                                                  y_orig,
                                                  w_orig,
                                                  origin=0)
                coords_new = SkyCoord.from_pixel(x_new, y_new, w_new, origin=0)

                assert_almost_equal(image_hdu.data[x_orig][y_orig],
                                    cutout.data[x_new][y_new])
                assert_almost_equal(coords_orig.ra.value, coords_new.ra.value)
                assert_almost_equal(coords_orig.dec.value,
                                    coords_new.dec.value)
Beispiel #33
0
def fit_SB1_gp(time, rv,rv_err, initial_params, bounds, priors, flux_weighted=False, plot_guess=True,
               freeze_parameters=['r1', 'k', 'sbratio', 'b', 'q', \
   'Pdot', 'T_ld', 'M_ld',\
   'Logg_ld', 'dV0'], emcee_fit=False, draws =1000):
    print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
    print('RV fitting procedure with Gaussian Processes')
    print('S. Gill ([email protected])')
    print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n')
    mean_model = _SB1_Model(**initial_params, bounds=bounds)

    time_upsampled = np.linspace(min(time), max(time), len(time) * 1000)
    if plot_guess:
        print('Plotting initial Guess. . .')
        plt.plot(time_upsampled,mean_model.get_value(time_upsampled), 'r--', linewidth=2, \
         label='Starting guess', alpha  = 0.7)

        plt.errorbar(time, rv, yerr=rv_err, fmt='ko')
        plt.legend()
        plt.show()
        return

    ######################
    # Initiate the kernel
    ######################
    #kernel = (terms.RealTerm(log_a=-9.77, log_c=1.91) )
    kernel = (terms.Matern32Term(log_sigma=-2, log_rho=2))

    ########################
    # Initiate the GP model
    ########################
    gp = celerite.GP(kernel, mean=mean_model, fit_mean=True)

    ########################################
    # Freeze parameter we do ot wish to fit
    ########################################
    for i in freeze_parameters:
        gp.freeze_parameter('mean:{}'.format(i))

    #############################################
    # Compute and calculate the initial log like
    #############################################
    gp.compute(time, rv_err)

    print('Minimisation of lightcurve with GP')
    print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
    print("{:>20}: {:.3f}".format('Initial log-like', gp.log_likelihood(rv)))

    ##################
    # Now optimise
    ##################
    initial_params = gp.get_parameter_vector()
    bounds_ = gp.get_parameter_bounds()

    soln = minimize(neg_log_like,
                    initial_params,
                    method="L-BFGS-B",
                    bounds=bounds_,
                    args=(rv, gp))

    ###################
    # Print the output
    ###################
    print("{:>20}: {:.3f} ({} iterations)".format('Final log-like', -soln.fun,
                                                  soln.nit))
    print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
    #for i in range(len(gp.get_parameter_names())):
    #	print('{:.3f}             {}'.format(soln.x[i], gp.get_parameter_names()[i]))

    ############################################################################
    # Now get the best model as a fictionary so we can put it back in and plot
    ############################################################################
    best_params = {}
    for i in range(len(gp.parameter_names)):
        if gp.parameter_names[i][:5] == 'mean:':
            best_params[gp.parameter_names[i][5:]] = gp.parameter_vector[i]
            print('{:>12}:      {:.3f}'.format(gp.parameter_names[i][5:],
                                               gp.parameter_vector[i]))

    #########################
    # Now get the best model
    #########################
    plt.cla()

    plt.errorbar(time, rv, yerr=rv_err, fmt='ko', alpha=0.3)

    # Plot the best model (just to be sure)
    mean_model_better = _SB1_Model(**best_params, bounds=bounds)
    plt.plot(time,mean_model_better.get_value(time), 'b--', linewidth=2, \
      label='Model selected')

    # Plot the variance with upsampled time series
    mu, var = gp.predict(rv, time_upsampled, return_var=True)
    std = np.sqrt(abs(var))
    color = "#ff7f0e"
    plt.fill_between( time_upsampled, \
      mu+std, \
      mu-std, color=color, alpha=0.8, edgecolor="none", label = 'Best fit')

    plt.xlabel('Time')
    plt.ylabel('RV [km/s]')
    plt.legend()
    plt.show()

    if emcee_fit:
        initial = np.array(soln.x)
        ndim, nwalkers = len(initial), 32
        sampler = emcee.EnsembleSampler(nwalkers,
                                        ndim,
                                        log_probability,
                                        args=(gp, rv, priors, flux_weighted),
                                        threads=8)

        print("Running burn-in...")
        p0 = initial + 1e-5 * np.random.randn(nwalkers, ndim)

        burn_in = draws
        width = 30
        start_time = time_.time()
        i, result = [], []
        for i, result in enumerate(sampler.sample(p0, iterations=burn_in)):
            n = int((width + 1) * float(i) / burn_in)
            delta_t = time_.time(
            ) - start_time  # time to do float(i) / n_steps % of caluculations
            time_incr = delta_t / (float(i + 1) / burn_in
                                   )  # seconds per increment
            time_left = time_incr * (1 - float(i) / burn_in)
            m, s = divmod(time_left, 60)
            h, m = divmod(m, 60)
            sys.stdout.write('\r[{0}{1}] {2}% - {3}h:{4}m:{5:.2f}s'.format(
                '#' * n, ' ' * (width - n), 100 * float(i) / burn_in, h, m, s))

        names = gp.get_parameter_names()
        cols = mean_model.get_parameter_names()
        inds = np.array([names.index("mean:" + k) for k in cols])
        samples = sampler.chain[:, int(np.floor(draws * 0.75)):, :].reshape(
            (-1, ndim))
        print(samples.shape, cols)
        corner.corner(samples, labels=gp.get_parameter_names())
        plt.show()

        ########################
        # Now save to fits file
        ########################
        from astropy.table import Table, Column
        chain_file = 'rv_fit.fits'
        try:
            os.remove(chain_file)
        except:
            pass

        t = Table(sampler.flatchain, names=gp.get_parameter_names())
        t.add_column(Column(sampler.flatlnprobability, name='loglike'))
        indices = np.mgrid[0:nwalkers, 0:burn_in]
        step = indices[1].flatten()
        walker = indices[0].flatten()
        t.add_column(Column(step, name='step'))
        t.add_column(Column(walker, name='walker'))
        t.write(chain_file)
Beispiel #34
0
def get_simspec(simspecfile, log=None, nspec=None):
    ''' Get the simspec object

    The simspec table holds the "truth" spectra and the intrinsic properties
    of each object (redshift, noiseless photometry, [OII] flux, etc.).
    (Input spectra to simulate with pixsim.)

    http://desidatamodel.readthedocs.io/en/latest/DESI_SPECTRO_SIM/PIXPROD/NIGHT/simspec-EXPID.html

    Parameters:
        simspecfile (str):
            The filename of the input simspec file

    '''

    minwave = 3533.
    maxwave = 9913.1
    stepwave = 0.2
    scale = 1.e17
    exptime = 5.  # typical BOSS exposure time in s

    if simspecfile:
        if log:
            log.info('Reading input file {}'.format(args.simspec))
        # create SimSpec object
        simspec = lvmsim.io.read_simspec(args.simspec)
        # number of spectra to simulate from SimSpec
        sim_nspec = simspec.nspec

        # get the spectra and wavelengths arrays for different flavors
        if simspec.flavor == 'arc':
            # - TODO: do we need quickgen to support arcs?  For full pipeline
            # - arcs are used to measure PSF but aren't extracted except for
            # - debugging.
            # - TODO: if we do need arcs, this needs to be redone.
            # - conversion from phot to flux doesn't include throughput,
            # - and arc lines are rebinned to nearest 0.2 A.

            # Create full wavelength and flux arrays for arc exposure
            wave_b = np.array(simspec.wave['b'])
            wave_r = np.array(simspec.wave['r'])
            wave_z = np.array(simspec.wave['z'])
            phot_b = np.array(simspec.phot['b'][0])
            phot_r = np.array(simspec.phot['r'][0])
            phot_z = np.array(simspec.phot['z'][0])
            sim_wave = np.concatenate((wave_b, wave_r, wave_z))
            sim_phot = np.concatenate((phot_b, phot_r, phot_z))
            wavelengths = np.arange(minwave, maxwave, stepwave)
            phot = np.zeros(len(wavelengths))
            for i in range(len(sim_wave)):
                wavelength = sim_wave[i]
                flux_index = np.argmin(abs(wavelength - wavelengths))
                phot[flux_index] = sim_phot[i]
            # Convert photons to flux: following specter conversion method
            dw = np.gradient(wavelengths)
            fibarea = const.pi * (1.07e-2 /
                                  2)**2  # cross-sectional fiber area in cm^2
            hc = scale * const.h * const.c  # convert to erg A
            spectra = (hc * exptime * fibarea * dw * phot) / wavelengths
        else:
            wavelengths = simspec.wave['brz']
            spectra = simspec.flux

        # check there's enough spectra to simulate from what we ask for
        if sim_nspec < nspec:
            log.info("Only {} spectra in input file".format(sim_nspec))
            nspec = sim_nspec
    else:
        # Initialize the output truth table.
        spectra = []
        wavelengths = qsim.source.wavelength_out.to(u.Angstrom).value
        npix = len(wavelengths)
        truth = dict()
        meta = Table()
        truth['OBJTYPE'] = np.zeros(args.nspec, dtype=(str, 10))
        truth['FLUX'] = np.zeros((args.nspec, npix))
        truth['WAVE'] = wavelengths
        jj = list()

        for thisobj in set(true_objtype):
            ii = np.where(true_objtype == thisobj)[0]
            nobj = len(ii)
            truth['OBJTYPE'][ii] = thisobj
            if log:
                log.info('Generating {} template'.format(thisobj))

            # Generate the templates
            if thisobj == 'ELG':
                elg = lvmsim.templates.ELG(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = elg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_elg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'LRG':
                lrg = lvmsim.templates.LRG(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = lrg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_lrg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'QSO':
                qso = lvmsim.templates.QSO(wave=wavelengths)
                flux, tmpwave, meta1 = qso.make_templates(
                    nmodel=nobj, seed=args.seed, zrange=args.zrange_qso)
            elif thisobj == 'BGS':
                bgs = lvmsim.templates.BGS(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = bgs.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_bgs,
                    rmagrange=args.rmagrange_bgs,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'STD':
                fstd = lvmsim.templates.FSTD(wave=wavelengths)
                flux, tmpwave, meta1 = fstd.make_templates(nmodel=nobj,
                                                           seed=args.seed)
            elif thisobj == 'QSO_BAD':  # use STAR template no color cuts
                star = lvmsim.templates.STAR(wave=wavelengths)
                flux, tmpwave, meta1 = star.make_templates(nmodel=nobj,
                                                           seed=args.seed)
            elif thisobj == 'MWS_STAR' or thisobj == 'MWS':
                mwsstar = lvmsim.templates.MWS_STAR(wave=wavelengths)
                flux, tmpwave, meta1 = mwsstar.make_templates(nmodel=nobj,
                                                              seed=args.seed)
            elif thisobj == 'WD':
                wd = lvmsim.templates.WD(wave=wavelengths)
                flux, tmpwave, meta1 = wd.make_templates(nmodel=nobj,
                                                         seed=args.seed)
            elif thisobj == 'SKY':
                flux = np.zeros((nobj, npix))
                meta1 = Table(dict(REDSHIFT=np.zeros(nobj, dtype=np.float32)))
            elif thisobj == 'TEST':
                flux = np.zeros((args.nspec, npix))
                indx = np.where(wave > 5800.0 - 1E-6)[0][0]
                ref_integrated_flux = 1E-10
                ref_cst_flux_density = 1E-17
                single_line = (np.arange(args.nspec) % 2 == 0).astype(
                    np.float32)
                continuum = (np.arange(args.nspec) % 2 == 1).astype(np.float32)

                for spec in range(nspec):
                    flux[spec, indx] = single_line[
                        spec] * ref_integrated_flux / np.gradient(wavelengths)[
                            indx]  # single line
                    flux[spec] += continuum[
                        spec] * ref_cst_flux_density  # flat continuum

                meta1 = Table(
                    dict(REDSHIFT=np.zeros(args.nspec, dtype=np.float32),
                         LINE=wave[indx] *
                         np.ones(args.nspec, dtype=np.float32),
                         LINEFLUX=single_line * ref_integrated_flux,
                         CONSTFLUXDENSITY=continuum * ref_cst_flux_density))
            else:
                raise RuntimeError('Unknown object type')

            # Pack it in.
            truth['FLUX'][ii] = flux
            meta = vstack([meta, meta1])
            jj.append(ii.tolist())

            # Sanity check on units; templates currently return ergs, not 1e-17 ergs...
            # assert (thisobj == 'SKY') or (np.max(truth['FLUX']) < 1e-6)

        # Sort the metadata table.
        jj = sum(jj, [])
        meta_new = Table()
        for k in range(nspec):
            index = int(np.where(np.array(jj) == k)[0])
            meta_new = vstack([meta_new, meta[index]])
        meta = meta_new

        # Add TARGETID and the true OBJTYPE to the metadata table.
        meta.add_column(
            Column(true_objtype, dtype=(str, 10), name='TRUE_OBJTYPE'))
        meta.add_column(Column(targetids, name='TARGETID'))

        # Rename REDSHIFT -> TRUEZ anticipating later table joins with zbest.Z
        meta.rename_column('REDSHIFT', 'TRUEZ')

    return spectra, wavelengths, nspec
Beispiel #35
0
def ref_fk4_no_e_fk4(fnout='fk4_no_e_fk4.csv'):
    """
    Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK4
    conversion, with arbitrary equinoxes and epoch of observation.
    """
    import os
    import numpy as np
    import starlink.Ast as Ast
    from astropy.table import Table, Column

    np.random.seed(12345)

    N = 200

    # Sample uniformly on the unit sphere. These will be either the FK4
    # coordinates for the transformation to FK5, or the FK5 coordinates for the
    # transformation to FK4.
    ra = np.random.uniform(0., 360., N)
    dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N)))

    # Generate random observation epoch and equinoxes
    obstime = [
        "B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)
    ]

    ra_fk4ne, dec_fk4ne = [], []
    ra_fk4, dec_fk4 = [], []

    for i in range(N):

        # Set up frames for AST
        frame_fk4ne = Ast.SkyFrame(
            'System=FK4-NO-E,Epoch={epoch},Equinox=B1950'.format(
                epoch=obstime[i]))
        frame_fk4 = Ast.SkyFrame(
            'System=FK4,Epoch={epoch},Equinox=B1950'.format(epoch=obstime[i]))

        # FK4 to FK4 (no E-terms)
        frameset = frame_fk4.convert(frame_fk4ne)
        coords = np.degrees(
            frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
        ra_fk4ne.append(coords[0, 0])
        dec_fk4ne.append(coords[1, 0])

        # FK4 (no E-terms) to FK4
        frameset = frame_fk4ne.convert(frame_fk4)
        coords = np.degrees(
            frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
        ra_fk4.append(coords[0, 0])
        dec_fk4.append(coords[1, 0])

    # Write out table to a CSV file
    t = Table()
    t.add_column(Column(name='obstime', data=obstime))
    t.add_column(Column(name='ra_in', data=ra))
    t.add_column(Column(name='dec_in', data=dec))
    t.add_column(Column(name='ra_fk4ne', data=ra_fk4ne))
    t.add_column(Column(name='dec_fk4ne', data=dec_fk4ne))
    t.add_column(Column(name='ra_fk4', data=ra_fk4))
    t.add_column(Column(name='dec_fk4', data=dec_fk4))
    f = open(fnout, 'wb')
    f.write("# This file was generated with the {0} script, and the reference "
            "values were computed using AST\n".format(
                os.path.basename(__file__)))
    t.write(f, format='ascii', delimiter=',')
Beispiel #36
0
def get_acq_table(obsid):

    manvrs = events.manvrs.filter(obsid=obsid)
    if not len(manvrs):
        return None
    manvr = manvrs[0]

    start_time = DateTime(manvr.acq_start).secs
    stop_time = start_time + (60 * 5)
    acq_data = fetch.MSIDset(msids + slot_msids, start_time, stop_time)

    vals = Table([acq_data[col].vals for col in msids], names=msids)
    for field in slot_msids:
        vals.add_column(Column(name=field, data=acq_data[field].vals))
        times = Table([acq_data['AOACASEQ'].times], names=['time'])

    def compress_data(data, dtime):
        return data[data['AOREPEAT'] == '0 '], dtime[data['AOREPEAT'] == '0 ']

    vals, times = compress_data(vals, times)

    # Get the catalog for the stars
    # This is used both to map ACQID to the right slot and
    # to get the star positions to estimate deltas later
    timeline_at_acq = mica.starcheck.starcheck.get_timeline_at_date(manvr.start)
    mp_dir = None if (timeline_at_acq is None) else timeline_at_acq['mp_dir']
    starcheck = mica.starcheck.get_starcheck_catalog(int(obsid), mp_dir=mp_dir)
    if 'cat' not in starcheck:
        raise ValueError('No starcheck catalog found for {}'.format(obsid))
    catalog = Table(starcheck['cat'])
    catalog.sort('idx')
    # Filter the catalog to be just acquisition stars
    catalog = catalog[(catalog['type'] == 'ACQ') | (catalog['type'] == 'BOT')]
    slot_for_pos = [cat_row['slot'] for cat_row in catalog]
    pos_for_slot = dict([(slot, idx) for idx, slot in enumerate(slot_for_pos)])
    # Also, save out the starcheck index for each slot for later
    index_for_slot = dict([(cat_row['slot'], cat_row['idx']) for cat_row in catalog])

    # Estimate the offsets from the expected catalog positions
    dy, dz = deltas_vs_obc_quat(vals, times['time'], catalog)
    for slot in range(0, 8):
        vals.add_column(Column(name='dy{}'.format(slot), data=dy[slot].data))
        vals.add_column(Column(name='dz{}'.format(slot), data=dz[slot].data))
        cat_entry = catalog[catalog['slot'] == slot][0]
        dmag = vals['AOACMAG{}'.format(slot)] - cat_entry['mag']
        vals.add_column(Column(name='dmag{}'.format(slot), data=dmag.data))

    # make a list of dicts of the table
    simple_data = []
    kalm_start = None
    for drow, trow in zip(vals, times):
        if (kalm_start is None) and (drow['AOACASEQ'] == 'KALM'):
            kalm_start = trow['time']
        if (kalm_start is not None) and (trow['time'] > kalm_start + 5):
            continue
        slot_data = {'slots': [],
                     'time': trow['time'],
                     'aorfstr1_slot': slot_for_pos[int(drow['AORFSTR1'])],
                     'aorfstr2_slot': slot_for_pos[int(drow['AORFSTR2'])],
                     }
        for m in msids:
            slot_data[m] = drow[m]
        for slot in range(0, 8):
            row_dict = {'slot': slot,
                        'catpos': pos_for_slot[slot],
                        'index': index_for_slot[slot]}
            for col in per_slot:
                if col not in ['AOACQID']:
                    row_dict[col] = drow['{}{}'.format(col, slot)]
            for col in ['dy', 'dz', 'dmag']:
                row_dict[col] = drow['{}{}'.format(col, slot)]
            row_dict['POS_ACQID'] = drow['AOACQID{}'.format(pos_for_slot[slot])]
            slot_data['slots'].append(row_dict)
        simple_data.append(slot_data)

    return simple_data
Beispiel #37
0
sds_val = np.array(ap_summed['aperture_sum'])
sds_val = np.flip(sds_val)
#%%
"""
Data save
"""

Savepath = Path('/home/astro_02/AO2019-2/2019-10-24/Specdata')
#%%

table = Table()

wave_length = Column(data=sds_wvlt, name='Wavelength')
Value = Column(data=sds_val, name='Value')

table.add_column(wave_length, index=0)
table.add_column(Value, index=1)

table.write(Savepath / 'NGC676_01.csv', format='ascii.csv', overwrite=True)

#%%
# Object만 바꿔서 저장용

#%%
Savepath = Path('/home/astro_02/AO2019-2/2019-10-24/Specdata')
DISPAXIS = 2  # 1 = line = python_axis_1 // 2 = column = python_axis_0
FONTSIZE = 12  # Change it on your computer if you wish.
rcParams.update({'font.size': FONTSIZE})
COMPIMAGE = ppdpath / 'Comp-master.fits'  # Change directory if needed!
LINE_FITTER = LevMarLSQFitter()
Beispiel #38
0
	def write(self,filename=None,max_rows=None,format="ascii.latex",column_format="{0:.3f}",**kwargs):

		"""
		Outputs the points that make up the design in a nicely formatted table

		:param filename: name of the file to which the table will be saved; if None the contents will be printed
		:type filename: str. or file descriptor

		:param max_rows: maximum number of rows in the table, if smaller than the number of points the different chunks are hstacked (useful if there are too many rows for display)
		:type max_rows: int.

		:param format: passed to the Table.write astropy method
		:type format: str.

		:param column_format: format specifier for the numerical values in the Table
		:type column_format: str.

		:param kwargs: the keyword arguments are passed to astropy.Table.write method
		:type kwargs: dict.

		:returns: the Table instance with the design parameters

		"""

		#Check that there is something to save
		assert hasattr(self,"points"),"There are no points in your design yet!"
		names = [ self.label[p] for p in self.parameters ]
		
		if (max_rows is None) or (max_rows>=len(self)):

			#Build the table
			design_table = Table(self.values,names=names)

			#Add the number column to the left
			design_table.add_column(Column(data=range(1,len(self)+1),name=r"$N$"),index=0)

		else:

			#Figure out the splitting
			num_chunks = len(self) // max_rows
			if len(self)%max_rows!=0:
				num_chunks+=1

			#Construct the list of tables to hstack
			design_table = list()

			#Cycle through the chunks and create the sub-tables
			for n in range(num_chunks-1):

				columns = self.values[n*max_rows:(n+1)*max_rows]

				#Build the sub-table
				design_table.append(Table(columns,names=names))

				#Add the number column to the left
				design_table[-1].add_column(Column(data=range(n*max_rows+1,(n+1)*max_rows+1),name=r"$N$"),index=0)

			#Create the last sub-table
			columns = self.values[(num_chunks-1)*max_rows:]
			design_table.append(Table(columns,names=names))
			design_table[-1].add_column(Column(data=range((num_chunks-1)*max_rows+1,len(self)+1),name=r"$N$"),index=0)

			#hstack in a single table
			design_table = hstack(design_table)


		#Tune the format
		for colname in design_table.colnames:
			if not design_table.dtype[colname]==np.int:
				design_table[colname].format = column_format

		#Write the table or return it
		if filename is not None:
			design_table.write(filename,format=format,**kwargs)
			return None
		else:
			return design_table
    #y_err = d[1].data['jackknifStackErrors'][sel]
    N_spec = d[1].data['NspectraPerPixel'][sel]
    N_spectra[index] = int(n.median(N_spec))
    hduSPM = fits.open(path_2_spFly)
    table_entry_1 = get_table_entry_full(hduSPM[1])
    table_entry_2 = get_table_entry_full(hduSPM[2])
    table_all[index] = n.hstack((table_entry_1, table_entry_2))

newDat = n.transpose(table_all)

headers = " Chabrier_MILES_age_lightW Chabrier_MILES_age_lightW_up_1sig Chabrier_MILES_age_lightW_low_1sig Chabrier_MILES_age_lightW_up_2sig Chabrier_MILES_age_lightW_low_2sig Chabrier_MILES_age_lightW_up_3sig Chabrier_MILES_age_lightW_low_3sig Chabrier_MILES_metallicity_lightW Chabrier_MILES_metallicity_lightW_up_1sig Chabrier_MILES_metallicity_lightW_low_1sig Chabrier_MILES_metallicity_lightW_up_2sig Chabrier_MILES_metallicity_lightW_low_2sig Chabrier_MILES_metallicity_lightW_up_3sig Chabrier_MILES_metallicity_lightW_low_3sig Chabrier_MILES_age_massW Chabrier_MILES_age_massW_up_1sig Chabrier_MILES_age_massW_low_1sig Chabrier_MILES_age_massW_up_2sig Chabrier_MILES_age_massW_low_2sig Chabrier_MILES_age_massW_up_3sig Chabrier_MILES_age_massW_low_3sig Chabrier_MILES_metallicity_massW Chabrier_MILES_metallicity_massW_up_1sig Chabrier_MILES_metallicity_massW_low_1sig Chabrier_MILES_metallicity_massW_up_2sig Chabrier_MILES_metallicity_massW_low_2sig Chabrier_MILES_metallicity_massW_up_3sig Chabrier_MILES_metallicity_massW_low_3sig Chabrier_MILES_total_mass Chabrier_MILES_stellar_mass Chabrier_MILES_living_stars_mass Chabrier_MILES_remnant_mass Chabrier_MILES_remnant_mass_in_whitedwarfs Chabrier_MILES_remnant_mass_in_neutronstars Chabrier_MILES_remnant_mass_blackholes Chabrier_MILES_mass_of_ejecta Chabrier_MILES_total_mass_up_1sig Chabrier_MILES_total_mass_low_1sig Chabrier_MILES_total_mass_up_2sig Chabrier_MILES_total_mass_low_2sig Chabrier_MILES_total_mass_up_3sig Chabrier_MILES_total_mass_low_3sig Chabrier_MILES_spm_EBV Chabrier_MILES_nComponentsSSP Chabrier_MILES_total_mass_ssp_0 Chabrier_MILES_stellar_mass_ssp_0 Chabrier_MILES_living_stars_mass_ssp_0 Chabrier_MILES_remnant_mass_ssp_0 Chabrier_MILES_remnant_mass_in_whitedwarfs_ssp_0 Chabrier_MILES_remnant_mass_in_neutronstars_ssp_0 Chabrier_MILES_remnant_mass_in_blackholes_ssp_0 Chabrier_MILES_mass_of_ejecta_ssp_0 Chabrier_MILES_log_age_ssp_0 Chabrier_MILES_metal_ssp_0 Chabrier_MILES_SFR_ssp_0 Chabrier_MILES_weightMass_ssp_0 Chabrier_MILES_weightLight_ssp_0 Chabrier_MILES_total_mass_ssp_1 Chabrier_MILES_stellar_mass_ssp_1 Chabrier_MILES_living_stars_mass_ssp_1 Chabrier_MILES_remnant_mass_ssp_1 Chabrier_MILES_remnant_mass_in_whitedwarfs_ssp_1 Chabrier_MILES_remnant_mass_in_neutronstars_ssp_1 Chabrier_MILES_remnant_mass_in_blackholes_ssp_1 Chabrier_MILES_mass_of_ejecta_ssp_1 Chabrier_MILES_log_age_ssp_1 Chabrier_MILES_metal_ssp_1 Chabrier_MILES_SFR_ssp_1 Chabrier_MILES_weightMass_ssp_1 Chabrier_MILES_weightLight_ssp_1 Chabrier_MILES_total_mass_ssp_2 Chabrier_MILES_stellar_mass_ssp_2 Chabrier_MILES_living_stars_mass_ssp_2 Chabrier_MILES_remnant_mass_ssp_2 Chabrier_MILES_remnant_mass_in_whitedwarfs_ssp_2 Chabrier_MILES_remnant_mass_in_neutronstars_ssp_2 Chabrier_MILES_remnant_mass_in_blackholes_ssp_2 Chabrier_MILES_mass_of_ejecta_ssp_2 Chabrier_MILES_log_age_ssp_2 Chabrier_MILES_metal_ssp_2 Chabrier_MILES_SFR_ssp_2 Chabrier_MILES_weightMass_ssp_2 Chabrier_MILES_weightLight_ssp_2 Chabrier_MILES_total_mass_ssp_3 Chabrier_MILES_stellar_mass_ssp_3 Chabrier_MILES_living_stars_mass_ssp_3 Chabrier_MILES_remnant_mass_ssp_3 Chabrier_MILES_remnant_mass_in_whitedwarfs_ssp_3 Chabrier_MILES_remnant_mass_in_neutronstars_ssp_3 Chabrier_MILES_remnant_mass_in_blackholes_ssp_3 Chabrier_MILES_mass_of_ejecta_ssp_3 Chabrier_MILES_log_age_ssp_3 Chabrier_MILES_metal_ssp_3 Chabrier_MILES_SFR_ssp_3 Chabrier_MILES_weightMass_ssp_3 Chabrier_MILES_weightLight_ssp_3 Chabrier_MILES_total_mass_ssp_4 Chabrier_MILES_stellar_mass_ssp_4 Chabrier_MILES_living_stars_mass_ssp_4 Chabrier_MILES_remnant_mass_ssp_4 Chabrier_MILES_remnant_mass_in_whitedwarfs_ssp_4 Chabrier_MILES_remnant_mass_in_neutronstars_ssp_4 Chabrier_MILES_remnant_mass_in_blackholes_ssp_4 Chabrier_MILES_mass_of_ejecta_ssp_4 Chabrier_MILES_log_age_ssp_4 Chabrier_MILES_metal_ssp_4 Chabrier_MILES_SFR_ssp_4 Chabrier_MILES_weightMass_ssp_4 Chabrier_MILES_weightLight_ssp_4 Chabrier_MILES_total_mass_ssp_5 Chabrier_MILES_stellar_mass_ssp_5 Chabrier_MILES_living_stars_mass_ssp_5 Chabrier_MILES_remnant_mass_ssp_5 Chabrier_MILES_remnant_mass_in_whitedwarfs_ssp_5 Chabrier_MILES_remnant_mass_in_neutronstars_ssp_5 Chabrier_MILES_remnant_mass_in_blackholes_ssp_5 Chabrier_MILES_mass_of_ejecta_ssp_5 Chabrier_MILES_log_age_ssp_5 Chabrier_MILES_metal_ssp_5 Chabrier_MILES_SFR_ssp_5 Chabrier_MILES_weightMass_ssp_5 Chabrier_MILES_weightLight_ssp_5 Chabrier_MILES_total_mass_ssp_6 Chabrier_MILES_stellar_mass_ssp_6 Chabrier_MILES_living_stars_mass_ssp_6 Chabrier_MILES_remnant_mass_ssp_6 Chabrier_MILES_remnant_mass_in_whitedwarfs_ssp_6 Chabrier_MILES_remnant_mass_in_neutronstars_ssp_6 Chabrier_MILES_remnant_mass_in_blackholes_ssp_6 Chabrier_MILES_mass_of_ejecta_ssp_6 Chabrier_MILES_log_age_ssp_6 Chabrier_MILES_metal_ssp_6 Chabrier_MILES_SFR_ssp_6 Chabrier_MILES_weightMass_ssp_6 Chabrier_MILES_weightLight_ssp_6 Chabrier_MILES_total_mass_ssp_7 Chabrier_MILES_stellar_mass_ssp_7 Chabrier_MILES_living_stars_mass_ssp_7 Chabrier_MILES_remnant_mass_ssp_7 Chabrier_MILES_remnant_mass_in_whitedwarfs_ssp_7 Chabrier_MILES_remnant_mass_in_neutronstars_ssp_7 Chabrier_MILES_remnant_mass_in_blackholes_ssp_7 Chabrier_MILES_mass_of_ejecta_ssp_7 Chabrier_MILES_log_age_ssp_7 Chabrier_MILES_metal_ssp_7 Chabrier_MILES_SFR_ssp_7 Chabrier_MILES_weightMass_ssp_7 Chabrier_MILES_weightLight_ssp_7 Chabrier_ELODIE_age_lightW Chabrier_ELODIE_age_lightW_up_1sig Chabrier_ELODIE_age_lightW_low_1sig Chabrier_ELODIE_age_lightW_up_2sig Chabrier_ELODIE_age_lightW_low_2sig Chabrier_ELODIE_age_lightW_up_3sig Chabrier_ELODIE_age_lightW_low_3sig Chabrier_ELODIE_metallicity_lightW Chabrier_ELODIE_metallicity_lightW_up_1sig Chabrier_ELODIE_metallicity_lightW_low_1sig Chabrier_ELODIE_metallicity_lightW_up_2sig Chabrier_ELODIE_metallicity_lightW_low_2sig Chabrier_ELODIE_metallicity_lightW_up_3sig Chabrier_ELODIE_metallicity_lightW_low_3sig Chabrier_ELODIE_age_massW Chabrier_ELODIE_age_massW_up_1sig Chabrier_ELODIE_age_massW_low_1sig Chabrier_ELODIE_age_massW_up_2sig Chabrier_ELODIE_age_massW_low_2sig Chabrier_ELODIE_age_massW_up_3sig Chabrier_ELODIE_age_massW_low_3sig Chabrier_ELODIE_metallicity_massW Chabrier_ELODIE_metallicity_massW_up_1sig Chabrier_ELODIE_metallicity_massW_low_1sig Chabrier_ELODIE_metallicity_massW_up_2sig Chabrier_ELODIE_metallicity_massW_low_2sig Chabrier_ELODIE_metallicity_massW_up_3sig Chabrier_ELODIE_metallicity_massW_low_3sig Chabrier_ELODIE_total_mass Chabrier_ELODIE_stellar_mass Chabrier_ELODIE_living_stars_mass Chabrier_ELODIE_remnant_mass Chabrier_ELODIE_remnant_mass_in_whitedwarfs Chabrier_ELODIE_remnant_mass_in_neutronstars Chabrier_ELODIE_remnant_mass_blackholes Chabrier_ELODIE_mass_of_ejecta Chabrier_ELODIE_total_mass_up_1sig Chabrier_ELODIE_total_mass_low_1sig Chabrier_ELODIE_total_mass_up_2sig Chabrier_ELODIE_total_mass_low_2sig Chabrier_ELODIE_total_mass_up_3sig Chabrier_ELODIE_total_mass_low_3sig Chabrier_ELODIE_spm_EBV Chabrier_ELODIE_nComponentsSSP Chabrier_ELODIE_total_mass_ssp_0 Chabrier_ELODIE_stellar_mass_ssp_0 Chabrier_ELODIE_living_stars_mass_ssp_0 Chabrier_ELODIE_remnant_mass_ssp_0 Chabrier_ELODIE_remnant_mass_in_whitedwarfs_ssp_0 Chabrier_ELODIE_remnant_mass_in_neutronstars_ssp_0 Chabrier_ELODIE_remnant_mass_in_blackholes_ssp_0 Chabrier_ELODIE_mass_of_ejecta_ssp_0 Chabrier_ELODIE_log_age_ssp_0 Chabrier_ELODIE_metal_ssp_0 Chabrier_ELODIE_SFR_ssp_0 Chabrier_ELODIE_weightMass_ssp_0 Chabrier_ELODIE_weightLight_ssp_0 Chabrier_ELODIE_total_mass_ssp_1 Chabrier_ELODIE_stellar_mass_ssp_1 Chabrier_ELODIE_living_stars_mass_ssp_1 Chabrier_ELODIE_remnant_mass_ssp_1 Chabrier_ELODIE_remnant_mass_in_whitedwarfs_ssp_1 Chabrier_ELODIE_remnant_mass_in_neutronstars_ssp_1 Chabrier_ELODIE_remnant_mass_in_blackholes_ssp_1 Chabrier_ELODIE_mass_of_ejecta_ssp_1 Chabrier_ELODIE_log_age_ssp_1 Chabrier_ELODIE_metal_ssp_1 Chabrier_ELODIE_SFR_ssp_1 Chabrier_ELODIE_weightMass_ssp_1 Chabrier_ELODIE_weightLight_ssp_1 Chabrier_ELODIE_total_mass_ssp_2 Chabrier_ELODIE_stellar_mass_ssp_2 Chabrier_ELODIE_living_stars_mass_ssp_2 Chabrier_ELODIE_remnant_mass_ssp_2 Chabrier_ELODIE_remnant_mass_in_whitedwarfs_ssp_2 Chabrier_ELODIE_remnant_mass_in_neutronstars_ssp_2 Chabrier_ELODIE_remnant_mass_in_blackholes_ssp_2 Chabrier_ELODIE_mass_of_ejecta_ssp_2 Chabrier_ELODIE_log_age_ssp_2 Chabrier_ELODIE_metal_ssp_2 Chabrier_ELODIE_SFR_ssp_2 Chabrier_ELODIE_weightMass_ssp_2 Chabrier_ELODIE_weightLight_ssp_2 Chabrier_ELODIE_total_mass_ssp_3 Chabrier_ELODIE_stellar_mass_ssp_3 Chabrier_ELODIE_living_stars_mass_ssp_3 Chabrier_ELODIE_remnant_mass_ssp_3 Chabrier_ELODIE_remnant_mass_in_whitedwarfs_ssp_3 Chabrier_ELODIE_remnant_mass_in_neutronstars_ssp_3 Chabrier_ELODIE_remnant_mass_in_blackholes_ssp_3 Chabrier_ELODIE_mass_of_ejecta_ssp_3 Chabrier_ELODIE_log_age_ssp_3 Chabrier_ELODIE_metal_ssp_3 Chabrier_ELODIE_SFR_ssp_3 Chabrier_ELODIE_weightMass_ssp_3 Chabrier_ELODIE_weightLight_ssp_3 Chabrier_ELODIE_total_mass_ssp_4 Chabrier_ELODIE_stellar_mass_ssp_4 Chabrier_ELODIE_living_stars_mass_ssp_4 Chabrier_ELODIE_remnant_mass_ssp_4 Chabrier_ELODIE_remnant_mass_in_whitedwarfs_ssp_4 Chabrier_ELODIE_remnant_mass_in_neutronstars_ssp_4 Chabrier_ELODIE_remnant_mass_in_blackholes_ssp_4 Chabrier_ELODIE_mass_of_ejecta_ssp_4 Chabrier_ELODIE_log_age_ssp_4 Chabrier_ELODIE_metal_ssp_4 Chabrier_ELODIE_SFR_ssp_4 Chabrier_ELODIE_weightMass_ssp_4 Chabrier_ELODIE_weightLight_ssp_4 Chabrier_ELODIE_total_mass_ssp_5 Chabrier_ELODIE_stellar_mass_ssp_5 Chabrier_ELODIE_living_stars_mass_ssp_5 Chabrier_ELODIE_remnant_mass_ssp_5 Chabrier_ELODIE_remnant_mass_in_whitedwarfs_ssp_5 Chabrier_ELODIE_remnant_mass_in_neutronstars_ssp_5 Chabrier_ELODIE_remnant_mass_in_blackholes_ssp_5 Chabrier_ELODIE_mass_of_ejecta_ssp_5 Chabrier_ELODIE_log_age_ssp_5 Chabrier_ELODIE_metal_ssp_5 Chabrier_ELODIE_SFR_ssp_5 Chabrier_ELODIE_weightMass_ssp_5 Chabrier_ELODIE_weightLight_ssp_5 Chabrier_ELODIE_total_mass_ssp_6 Chabrier_ELODIE_stellar_mass_ssp_6 Chabrier_ELODIE_living_stars_mass_ssp_6 Chabrier_ELODIE_remnant_mass_ssp_6 Chabrier_ELODIE_remnant_mass_in_whitedwarfs_ssp_6 Chabrier_ELODIE_remnant_mass_in_neutronstars_ssp_6 Chabrier_ELODIE_remnant_mass_in_blackholes_ssp_6 Chabrier_ELODIE_mass_of_ejecta_ssp_6 Chabrier_ELODIE_log_age_ssp_6 Chabrier_ELODIE_metal_ssp_6 Chabrier_ELODIE_SFR_ssp_6 Chabrier_ELODIE_weightMass_ssp_6 Chabrier_ELODIE_weightLight_ssp_6 Chabrier_ELODIE_total_mass_ssp_7 Chabrier_ELODIE_stellar_mass_ssp_7 Chabrier_ELODIE_living_stars_mass_ssp_7 Chabrier_ELODIE_remnant_mass_ssp_7 Chabrier_ELODIE_remnant_mass_in_whitedwarfs_ssp_7 Chabrier_ELODIE_remnant_mass_in_neutronstars_ssp_7 Chabrier_ELODIE_remnant_mass_in_blackholes_ssp_7 Chabrier_ELODIE_mass_of_ejecta_ssp_7 Chabrier_ELODIE_log_age_ssp_7 Chabrier_ELODIE_metal_ssp_7 Chabrier_ELODIE_SFR_ssp_7 Chabrier_ELODIE_weightMass_ssp_7 Chabrier_ELODIE_weightLight_ssp_7"

t = Table()

for data_array, head in zip(newDat, headers.split()):
    t.add_column(Column(name=head, format='D', data=data_array))

for id_col, (col_chi2,
             col_ndof) in enumerate(zip(n.transpose(chi2), n.transpose(ndof))):
    t.add_column(
        Column(name=hdu_header_prefix[id_col] + "chi2",
               format='D',
               data=col_chi2))
    t.add_column(
        Column(name=hdu_header_prefix[id_col] + "ndof",
               format='D',
               data=col_ndof))

t.add_column(Column(name="z_min", format='D', data=th_min))
t.add_column(Column(name="z_max", format='D', data=th_max))
t.add_column(Column(name="radial_bin", format='D', data=cz_sel))
Beispiel #40
0
    # printed output
    print()
    print()
    print(('    Target: ' + target_name))
    print()
    print()
    print(("Luminosity\t\t" + str(round(luminosity))))
    print(("Optical depth\t\t" + str(grid_outputs[model_index]['odep'])))
    print(("Expansion velocity (scaled)\t" + str(round(scaled_vexp, 2))))
    print(("Mass loss (scaled)\t\t" + str("%.2E" % float(scaled_mdot))))

# saves results csv file
file_a = Table(np.array(latex_array),
               names=('source', 'L', 'vexp_predicted', 'teff', 'tinner',
                      'odep', 'mdot'),
               dtype=('S16', 'int32', 'f8', 'int32', 'int32', 'f8', 'f8'))
file_a.write('../fitting_results.csv', format='csv', overwrite=True)

# saves plotting file
file_b = Table(np.array(follow_up_array))
file_b.add_column(Column(follow_up_index, name='index'), index=0)
file_b.add_column(Column(follow_up_normilazation, name='norm'), index=0)
file_b.add_column(Column(targets, name='data_file'), index=0)
file_b.add_column(Column(follow_up_names, name='target_name'), index=0)
file_b.remove_columns(('c', 'd', 'e', 'f', 'g', 'h', 'i'))
file_b.write('../fitting_plotting_outputs.csv', format='csv', overwrite=True)

end = time.time()
print('Time:' + str((end - start) / 60) + 'minutes')
Beispiel #41
0
    def run_LS(self, minf=1/12.5, maxf=1/0.1, spp=50):
        """ Runs LS fit for each light curve. 

        Parameters
        ----------
        minf : float, optional
             The minimum frequency to search in the LS routine. Default = 1/20.
        maxf : float, optional
             The maximum frequency to search in the LS routine. Default = 1/0.1.
        spp : int, optional
             The number of samples per peak. Default = 50.

        Attributes
        ----------
        LS_results : astropy.table.Table
        """
        def per_orbit(t, f):
            nonlocal maxf, spp

            minf = 1/(t[-1]-t[0])
            if minf > 1/12.0:
                minf = 1/12.0

            freq, power = LombScargle(t, f).autopower(minimum_frequency=minf,
                                                      maximum_frequency=maxf,
                                                      samples_per_peak=spp)
            arg = np.argmax(power)
            per = 1.0/freq
            popt = self.fit_LS_peak(per, power, arg)
            
            ## SEARCHES & MASKS RESONANCES OF THE BEST-FIT PERIOD
            perlist = per[arg] * np.array([0.5, 1.0, 2.0, 4.0, 8.0])
            remove_res = np.zeros(len(per))
            maskreg = int(spp/1.5)
            for p in perlist:
                where = np.where( (per <= p))[0]
                if len(where) > 0:
                    ind = int(where[0])
                    if ind-maskreg > 0 and ind<len(per)-maskreg:
                        remove_res[int(ind-maskreg):int(ind+maskreg)] = 1
                    elif ind < maskreg:
                        remove_res[0:int(maskreg)] = 1
                    elif ind > len(per)-maskreg:
                        remove_res[int(len(per)-maskreg):len(per)] = 1
            if perlist[1] == 1/minf:
                remove_res[0:int(spp/2)] = 1

            rr = remove_res == 0
            arg1 = np.argmax(power[rr])
            ## REDOS PERIOD ROUTINE FOR SECOND HIGHEST PEAK 
            if arg1 == len(per[rr]):
                arg1 = int(arg1-3)

            popt2 = self.fit_LS_peak(per[rr], power[rr], arg1)
            
            maxpower = power[arg]
            secpower = power[rr][arg1]

            bestperiod = per[arg]
            secbperiod = per[rr][arg1]

            bestwidth = popt[0]

            return bestperiod, secbperiod, maxpower, secpower, bestwidth

        tab = Table()

        periods = np.zeros(len(self.IDs))
        stds = np.zeros(len(self.IDs))
        peak_power = np.zeros(len(self.IDs))

        periods2 = np.zeros(len(self.IDs))
        peak_power2 = np.zeros(len(self.IDs))

        orbit_flag = np.zeros(len(self.IDs))
        orbit_flag1 = np.zeros(len(self.IDs))
        orbit_flag2 = np.zeros(len(self.IDs))

        for i in tqdm(range(len(self.flux)), desc="Finding most likely periods"):

            time, flux, flux_err = self.time[i], self.flux[i], self.flux_err[i]
            
            # SPLITS BY ORBIT
            diff = np.diff(time)
            brk = np.where(diff >= np.nanmedian(diff)+14*np.nanstd(diff))[0]
            
            if len(brk) > 1:
                brk_diff = brk - (len(time)/2)
                try:
                    brk_diff = np.where(brk_diff<0)[0][-1]
                except IndexError:
                    brk_diff = np.argmin(brk_diff)
                brk = np.array([brk[brk_diff]], dtype=int)

            # DEFINITELY TRIMS OUT EARTHSHINE MOFO
            t1, f1 = time[:brk[0]], flux[:brk[0]]#[300:-500], flux[:brk[0]]#[300:-500]
            t2, f2 = time[brk[0]:], flux[brk[0]:]#[800:-200], flux[brk[0]:]#[800:-200]

            o1_params = per_orbit(t1, f1)
            o2_params = per_orbit(t2, f2)

            both = np.array([o1_params[0], o2_params[0]])
            avg_period = np.nanmedian(both)


            flag1 = self.assign_flag(o1_params[0], o1_params[2], o1_params[-1],
                                    avg_period, o1_params[-2], t1[-1]-t1[0])
            flag2 = self.assign_flag(o2_params[0], o2_params[2], o2_params[-1],
                                     avg_period, o2_params[-2], t2[-1]-t2[0])

            if np.abs(o1_params[1]-avg_period) < 0.5 and np.abs(o2_params[1]-avg_period)<0.5:
                flag1 = flag2 = 0.0

            if flag1 != 0 and flag2 != 0:
                orbit_flag[i] = 1.0
            else:
                orbit_flag[i] = 0.0
                
            periods[i] = np.nanmedian([o1_params[0], o2_params[0]])
            
            orbit_flag1[i] = flag1
            orbit_flag2[i] = flag2
                
            stds[i]    = o1_params[-1]
            peak_power[i] = o1_params[2]
            periods2[i] = o2_params[0]
            peak_power2[i] = o1_params[-2]

        tab.add_column(Column(self.IDs, 'Target_ID'))
        tab.add_column(Column(periods, name='period_days'))
        tab.add_column(Column(periods2, name='secondary_period_days'))
        tab.add_column(Column(stds, name='gauss_width'))
        tab.add_column(Column(peak_power, name='max_power'))
        tab.add_column(Column(peak_power2, name='secondary_max_power'))
        tab.add_column(Column(orbit_flag, name='orbit_flag'))
        tab.add_column(Column(orbit_flag1, name='oflag1'))
        tab.add_column(Column(orbit_flag2, name='oflag2'))

        tab = self.averaged_per_sector(tab)

        self.LS_results = tab
    def nstar(self, image, star_groups):
        """
        Fit, as appropriate, a compound or single model to the given
        ``star_groups``. Groups are fitted sequentially from the
        smallest to the biggest. In each iteration, ``image`` is
        subtracted by the previous fitted group.

        Parameters
        ----------
        image : numpy.ndarray
            Background-subtracted image.
        star_groups : `~astropy.table.Table`
            This table must contain the following columns: ``id``,
            ``group_id``, ``x_0``, ``y_0``, ``flux_0``.  ``x_0`` and
            ``y_0`` are initial estimates of the centroids and
            ``flux_0`` is an initial estimate of the flux. Additionally,
            columns named as ``<param_name>_0`` are required if any
            other parameter in the psf model is free (i.e., the
            ``fixed`` attribute of that parameter is ``False``).

        Returns
        -------
        result_tab : `~astropy.table.Table`
            Astropy table that contains photometry results.
        image : numpy.ndarray
            Residual image.
        """

        result_tab = Table()
        for param_tab_name in self._pars_to_output.keys():
            result_tab.add_column(Column(name=param_tab_name))

        unc_tab = Table()
        for param, isfixed in self.psf_model.fixed.items():
            if not isfixed:
                unc_tab.add_column(Column(name=param + "_unc"))

        y, x = np.indices(image.shape)

        star_groups = star_groups.group_by('group_id')
        for n in range(len(star_groups.groups)):
            group_psf = get_grouped_psf_model(self.psf_model,
                                              star_groups.groups[n],
                                              self._pars_to_set)
            usepixel = np.zeros_like(image, dtype=bool)

            for row in star_groups.groups[n]:
                usepixel[overlap_slices(large_array_shape=image.shape,
                                        small_array_shape=self.fitshape,
                                        position=(row['y_0'], row['x_0']),
                                        mode='trim')[0]] = True

            fit_model = self.fitter(group_psf, x[usepixel], y[usepixel],
                                    image[usepixel])
            param_table = self._model_params2table(fit_model,
                                                   len(star_groups.groups[n]))
            result_tab = vstack([result_tab, param_table])

            if 'param_cov' in self.fitter.fit_info.keys():
                unc_tab = vstack([
                    unc_tab,
                    self._get_uncertainties(len(star_groups.groups[n]))
                ])
            try:
                from astropy.nddata.utils import NoOverlapError
            except ImportError:
                raise ImportError("astropy 1.1 or greater is required in "
                                  "order to use this class.")
            # do not subtract if the fitting did not go well
            try:
                image = subtract_psf(image,
                                     self.psf_model,
                                     param_table,
                                     subshape=self.fitshape)
            except NoOverlapError:
                pass

        if 'param_cov' in self.fitter.fit_info.keys():
            result_tab = hstack([result_tab, unc_tab])

        return result_tab, image
## stuff for later
galaxy_name = sys.argv[1]
#ptype = sys.argv[2]
ptype = 'coarse_final/'
from astropy.io import fits

from astropy.table import Table, Column
arr = [[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],
       [0.0], [0.0], [0.0], [0.0], [0.0]]

varnames = ['mass', 'logzsol', 'tau', 'tage', 'dust2']
coltit = [ty + par for par in varnames for ty in ['low ', 'best ', 'high ']]

table = Table(arr, names=(coltit))
obname = Column(name='objid', data=['J0000+0000'])
table.add_column(obname, index=0)
table.remove_row(0)


# In [10]
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    #print('lnprobfn loves pizza')
    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
Beispiel #44
0
def main(args):

    # Set up the logger
    if args.verbose:
        log = get_logger(DEBUG)
    else:
        log = get_logger()

    # Make sure all necessary environment variables are set
    setup_envs()

    # Initialize random number generator to use.
    np.random.seed(args.seed)
    random_state = np.random.RandomState(args.seed)

    # Derive spectrograph number from nstart if needed
    if args.spectrograph is None:
        args.spectrograph = args.nstart / args.n_fibers

    # Read fibermapfile to get object type, night and expid
    fibermap, objtype, night, expid = get_fibermap(args.fibermap,
                                                   log=log,
                                                   nspec=args.nspec)

    # Initialize the spectral simulator
    log.info("Initializing SpecSim with config {}".format(args.config))
    lvmparams = load_lvmparams(config=args.config, telescope=args.telescope)
    qsim = get_simulator(args.config, num_fibers=1, params=lvmparams)

    if args.simspec:
        # Read the input file
        log.info('Reading input file {}'.format(args.simspec))
        simspec = lvmsim.io.read_simspec(args.simspec)
        nspec = simspec.nspec
        if simspec.flavor == 'arc':
            # - TODO: do we need quickgen to support arcs?  For full pipeline
            # - arcs are used to measure PSF but aren't extracted except for
            # - debugging.
            # - TODO: if we do need arcs, this needs to be redone.
            # - conversion from phot to flux doesn't include throughput,
            # - and arc lines are rebinned to nearest 0.2 A.

            # Create full wavelength and flux arrays for arc exposure
            wave_b = np.array(simspec.wave['b'])
            wave_r = np.array(simspec.wave['r'])
            wave_z = np.array(simspec.wave['z'])
            phot_b = np.array(simspec.phot['b'][0])
            phot_r = np.array(simspec.phot['r'][0])
            phot_z = np.array(simspec.phot['z'][0])
            sim_wave = np.concatenate((wave_b, wave_r, wave_z))
            sim_phot = np.concatenate((phot_b, phot_r, phot_z))
            wavelengths = np.arange(3533., 9913.1, 0.2)
            phot = np.zeros(len(wavelengths))
            for i in range(len(sim_wave)):
                wavelength = sim_wave[i]
                flux_index = np.argmin(abs(wavelength - wavelengths))
                phot[flux_index] = sim_phot[i]
            # Convert photons to flux: following specter conversion method
            dw = np.gradient(wavelengths)
            exptime = 5.  # typical BOSS exposure time in s
            fibarea = const.pi * (1.07e-2 /
                                  2)**2  # cross-sectional fiber area in cm^2
            hc = 1.e17 * const.h * const.c  # convert to erg A
            spectra = (hc * exptime * fibarea * dw * phot) / wavelengths
        else:
            wavelengths = simspec.wave['brz']
            spectra = simspec.flux
        if nspec < args.nspec:
            log.info("Only {} spectra in input file".format(nspec))
            args.nspec = nspec

    else:
        # Initialize the output truth table.
        spectra = []
        wavelengths = qsim.source.wavelength_out.to(u.Angstrom).value
        npix = len(wavelengths)
        truth = dict()
        meta = Table()
        truth['OBJTYPE'] = np.zeros(args.nspec, dtype=(str, 10))
        truth['FLUX'] = np.zeros((args.nspec, npix))
        truth['WAVE'] = wavelengths
        jj = list()

        for thisobj in set(true_objtype):
            ii = np.where(true_objtype == thisobj)[0]
            nobj = len(ii)
            truth['OBJTYPE'][ii] = thisobj
            log.info('Generating {} template'.format(thisobj))

            # Generate the templates
            if thisobj == 'ELG':
                elg = lvmsim.templates.ELG(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = elg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_elg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'LRG':
                lrg = lvmsim.templates.LRG(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = lrg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_lrg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'QSO':
                qso = lvmsim.templates.QSO(wave=wavelengths)
                flux, tmpwave, meta1 = qso.make_templates(
                    nmodel=nobj, seed=args.seed, zrange=args.zrange_qso)
            elif thisobj == 'BGS':
                bgs = lvmsim.templates.BGS(wave=wavelengths,
                                           add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = bgs.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_bgs,
                    rmagrange=args.rmagrange_bgs,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'STD':
                fstd = lvmsim.templates.FSTD(wave=wavelengths)
                flux, tmpwave, meta1 = fstd.make_templates(nmodel=nobj,
                                                           seed=args.seed)
            elif thisobj == 'QSO_BAD':  # use STAR template no color cuts
                star = lvmsim.templates.STAR(wave=wavelengths)
                flux, tmpwave, meta1 = star.make_templates(nmodel=nobj,
                                                           seed=args.seed)
            elif thisobj == 'MWS_STAR' or thisobj == 'MWS':
                mwsstar = lvmsim.templates.MWS_STAR(wave=wavelengths)
                flux, tmpwave, meta1 = mwsstar.make_templates(nmodel=nobj,
                                                              seed=args.seed)
            elif thisobj == 'WD':
                wd = lvmsim.templates.WD(wave=wavelengths)
                flux, tmpwave, meta1 = wd.make_templates(nmodel=nobj,
                                                         seed=args.seed)
            elif thisobj == 'SKY':
                flux = np.zeros((nobj, npix))
                meta1 = Table(dict(REDSHIFT=np.zeros(nobj, dtype=np.float32)))
            elif thisobj == 'TEST':
                flux = np.zeros((args.nspec, npix))
                indx = np.where(wave > 5800.0 - 1E-6)[0][0]
                ref_integrated_flux = 1E-10
                ref_cst_flux_density = 1E-17
                single_line = (np.arange(args.nspec) % 2 == 0).astype(
                    np.float32)
                continuum = (np.arange(args.nspec) % 2 == 1).astype(np.float32)

                for spec in range(args.nspec):
                    flux[spec, indx] = single_line[
                        spec] * ref_integrated_flux / np.gradient(wavelengths)[
                            indx]  # single line
                    flux[spec] += continuum[
                        spec] * ref_cst_flux_density  # flat continuum

                meta1 = Table(
                    dict(REDSHIFT=np.zeros(args.nspec, dtype=np.float32),
                         LINE=wave[indx] *
                         np.ones(args.nspec, dtype=np.float32),
                         LINEFLUX=single_line * ref_integrated_flux,
                         CONSTFLUXDENSITY=continuum * ref_cst_flux_density))
            else:
                log.fatal('Unknown object type {}'.format(thisobj))
                sys.exit(1)

            # Pack it in.
            truth['FLUX'][ii] = flux
            meta = vstack([meta, meta1])
            jj.append(ii.tolist())

            # Sanity check on units; templates currently return ergs, not 1e-17 ergs...
            # assert (thisobj == 'SKY') or (np.max(truth['FLUX']) < 1e-6)

        # Sort the metadata table.
        jj = sum(jj, [])
        meta_new = Table()
        for k in range(args.nspec):
            index = int(np.where(np.array(jj) == k)[0])
            meta_new = vstack([meta_new, meta[index]])
        meta = meta_new

        # Add TARGETID and the true OBJTYPE to the metadata table.
        meta.add_column(
            Column(true_objtype, dtype=(str, 10), name='TRUE_OBJTYPE'))
        meta.add_column(Column(targetids, name='TARGETID'))

        # Rename REDSHIFT -> TRUEZ anticipating later table joins with zbest.Z
        meta.rename_column('REDSHIFT', 'TRUEZ')

    # ---------- end simspec

    # explicitly set location on focal plane if needed to support airmass
    # variations when using specsim v0.5
    if qsim.source.focal_xy is None:
        qsim.source.focal_xy = (u.Quantity(0, 'mm'), u.Quantity(100, 'mm'))

    # Set simulation parameters from the simspec header or lvmparams
    bright_objects = ['bgs', 'mws', 'bright', 'BGS', 'MWS', 'BRIGHT_MIX']
    gray_objects = ['gray', 'grey']
    if args.simspec is None:
        object_type = objtype
        flavor = None
    elif simspec.flavor == 'science':
        object_type = None
        flavor = simspec.header['PROGRAM']
    else:
        object_type = None
        flavor = simspec.flavor
        log.warning(
            'Maybe using an outdated simspec file with flavor={}'.format(
                flavor))

    # Set airmass
    if args.airmass is not None:
        qsim.atmosphere.airmass = args.airmass
    elif args.simspec and 'AIRMASS' in simspec.header:
        qsim.atmosphere.airmass = simspec.header['AIRMASS']
    else:
        qsim.atmosphere.airmass = 1.25  # Science Req. Doc L3.3.2

    # Set site location
    if args.location is not None:
        qsim.observation.observatory = args.location
    else:
        qsim.observation.observatory = 'APO'

    # Set exptime
    if args.exptime is not None:
        qsim.observation.exposure_time = args.exptime * u.s
    elif args.simspec and 'EXPTIME' in simspec.header:
        qsim.observation.exposure_time = simspec.header['EXPTIME'] * u.s
    elif objtype in bright_objects:
        qsim.observation.exposure_time = lvmparams['exptime_bright'] * u.s
    else:
        qsim.observation.exposure_time = lvmparams['exptime_dark'] * u.s

    # Set Moon Phase
    if args.moon_phase is not None:
        qsim.atmosphere.moon.moon_phase = args.moon_phase
    elif args.simspec and 'MOONFRAC' in simspec.header:
        qsim.atmosphere.moon.moon_phase = simspec.header['MOONFRAC']
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.moon_phase = 0.7
    elif flavor in gray_objects:
        qsim.atmosphere.moon.moon_phase = 0.1
    else:
        qsim.atmosphere.moon.moon_phase = 0.5

    # Set Moon Zenith
    if args.moon_zenith is not None:
        qsim.atmosphere.moon.moon_zenith = args.moon_zenith * u.deg
    elif args.simspec and 'MOONALT' in simspec.header:
        qsim.atmosphere.moon.moon_zenith = simspec.header['MOONALT'] * u.deg
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.moon_zenith = 30 * u.deg
    elif flavor in gray_objects:
        qsim.atmosphere.moon.moon_zenith = 80 * u.deg
    else:
        qsim.atmosphere.moon.moon_zenith = 100 * u.deg

    # Set Moon - Object Angle
    if args.moon_angle is not None:
        qsim.atmosphere.moon.separation_angle = args.moon_angle * u.deg
    elif args.simspec and 'MOONSEP' in simspec.header:
        qsim.atmosphere.moon.separation_angle = simspec.header[
            'MOONSEP'] * u.deg
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.separation_angle = 50 * u.deg
    elif flavor in gray_objects:
        qsim.atmosphere.moon.separation_angle = 60 * u.deg
    else:
        qsim.atmosphere.moon.separation_angle = 60 * u.deg

    # Initialize per-camera output arrays that will be saved
    waves, trueflux, noisyflux, obsivar, resolution, sflux = {}, {}, {}, {}, {}, {}

    maxbin = 0
    nmax = args.nspec
    for camera in qsim.instrument.cameras:
        # Lookup this camera's resolution matrix and convert to the sparse format used in lvmspec.
        R = Resolution(camera.get_output_resolution_matrix())
        resolution[camera.name] = np.tile(R.to_fits_array(),
                                          [args.nspec, 1, 1])
        waves[camera.name] = (camera.output_wavelength.to(
            u.Angstrom).value.astype(np.float32))
        nwave = len(waves[camera.name])
        maxbin = max(maxbin, len(waves[camera.name]))
        nobj = np.zeros((nmax, 3, maxbin))  # object photons
        nsky = np.zeros((nmax, 3, maxbin))  # sky photons
        nivar = np.zeros((nmax, 3, maxbin))  # inverse variance (object+sky)
        cframe_observedflux = np.zeros(
            (nmax, 3, maxbin))  # calibrated object flux
        cframe_ivar = np.zeros(
            (nmax, 3, maxbin))  # inverse variance of calibrated object flux
        cframe_rand_noise = np.zeros(
            (nmax, 3, maxbin))  # random Gaussian noise to calibrated flux
        sky_ivar = np.zeros((nmax, 3, maxbin))  # inverse variance of sky
        sky_rand_noise = np.zeros(
            (nmax, 3, maxbin))  # random Gaussian noise to sky only
        frame_rand_noise = np.zeros(
            (nmax, 3, maxbin))  # random Gaussian noise to nobj+nsky
        trueflux[camera.name] = np.empty(
            (args.nspec, nwave))  # calibrated flux
        noisyflux[camera.name] = np.empty(
            (args.nspec, nwave))  # observed flux with noise
        obsivar[camera.name] = np.empty(
            (args.nspec, nwave))  # inverse variance of flux
        if args.simspec:
            dw = np.gradient(simspec.wave[camera.name])
        else:
            sflux = np.empty((args.nspec, npix))

    # - Check if input simspec is for a continuum flat lamp instead of science
    # - This does not convolve to per-fiber resolution
    if args.simspec:
        if simspec.flavor == 'flat':
            log.info("Simulating flat lamp exposure")
            for i, camera in enumerate(qsim.instrument.cameras):
                channel = camera.name
                assert camera.output_wavelength.unit == u.Angstrom
                num_pixels = len(waves[channel])
                dw = np.gradient(simspec.wave[channel])
                meanspec = resample_flux(
                    waves[channel], simspec.wave[channel],
                    np.average(simspec.phot[channel] / dw, axis=0))
                fiberflat = random_state.normal(loc=1.0,
                                                scale=1.0 / np.sqrt(meanspec),
                                                size=(nspec, num_pixels))
                ivar = np.tile(meanspec, [nspec, 1])
                mask = np.zeros((simspec.nspec, num_pixels), dtype=np.uint32)

                for kk in range((args.nspec + args.nstart - 1) //
                                args.n_fibers + 1):
                    camera = channel + str(kk)
                    outfile = lvmspec.io.findfile('fiberflat', night, expid,
                                                  camera)
                    start = max(args.n_fibers * kk, args.nstart)
                    end = min(args.n_fibers * (kk + 1), nmax)

                    if (args.spectrograph <= kk):
                        log.info(
                            "Writing files for channel:{}, spectrograph:{}, spectra:{} to {}"
                            .format(channel, kk, start, end))

                    ff = FiberFlat(waves[channel],
                                   fiberflat[start:end, :],
                                   ivar[start:end, :],
                                   mask[start:end, :],
                                   meanspec,
                                   header=dict(CAMERA=camera))
                    write_fiberflat(outfile, ff)
                    filePath = lvmspec.io.findfile("fiberflat", night, expid,
                                                   camera)
                    log.info("Wrote file {}".format(filePath))

            sys.exit(0)

    # Repeat the simulation for all spectra
    scale = 1e-17
    fluxunits = scale * u.erg / (u.s * u.cm**2 * u.Angstrom)
    for j in range(args.nspec):

        thisobjtype = objtype[j]
        sys.stdout.flush()
        if flavor == 'arc':
            qsim.source.update_in('Quickgen source {0}'.format, 'perfect',
                                  wavelengths * u.Angstrom,
                                  spectra * fluxunits)
        else:
            qsim.source.update_in('Quickgen source {0}'.format(j),
                                  thisobjtype.lower(),
                                  wavelengths * u.Angstrom,
                                  spectra[j, :] * fluxunits)
        qsim.source.update_out()

        qsim.simulate()
        qsim.generate_random_noise(random_state)

        for i, output in enumerate(qsim.camera_output):
            assert output['observed_flux'].unit == 1e17 * fluxunits
            # Extract the simulation results needed to create our uncalibrated
            # frame output file.
            num_pixels = len(output)
            nobj[j, i, :num_pixels] = output['num_source_electrons'][:, 0]
            nsky[j, i, :num_pixels] = output['num_sky_electrons'][:, 0]
            nivar[j, i, :num_pixels] = 1.0 / output['variance_electrons'][:, 0]

            # Get results for our flux-calibrated output file.
            cframe_observedflux[
                j, i, :num_pixels] = 1e17 * output['observed_flux'][:, 0]
            cframe_ivar[
                j,
                i, :num_pixels] = 1e-34 * output['flux_inverse_variance'][:, 0]

            # Fill brick arrays from the results.
            camera = output.meta['name']
            trueflux[camera][j][:] = 1e17 * output['observed_flux'][:, 0]
            noisyflux[camera][j][:] = 1e17 * (
                output['observed_flux'][:, 0] +
                output['flux_calibration'][:, 0] *
                output['random_noise_electrons'][:, 0])
            obsivar[camera][j][:] = 1e-34 * output['flux_inverse_variance'][:,
                                                                            0]

            # Use the same noise realization in the cframe and frame, without any
            # additional noise from sky subtraction for now.
            frame_rand_noise[
                j, i, :num_pixels] = output['random_noise_electrons'][:, 0]
            cframe_rand_noise[j, i, :num_pixels] = 1e17 * (
                output['flux_calibration'][:, 0] *
                output['random_noise_electrons'][:, 0])

            # The sky output file represents a model fit to ~40 sky fibers.
            # We reduce the variance by a factor of 25 to account for this and
            # give the sky an independent (Gaussian) noise realization.
            sky_ivar[
                j,
                i, :num_pixels] = 25.0 / (output['variance_electrons'][:, 0] -
                                          output['num_source_electrons'][:, 0])
            sky_rand_noise[j, i, :num_pixels] = random_state.normal(
                scale=1.0 / np.sqrt(sky_ivar[j, i, :num_pixels]),
                size=num_pixels)

    armName = {"b": 0, "r": 1, "z": 2}
    for channel in 'brz':

        # Before writing, convert from counts/bin to counts/A (as in Pixsim output)
        # Quicksim Default:
        # FLUX - input spectrum resampled to this binning; no noise added [1e-17 erg/s/cm2/s/Ang]
        # COUNTS_OBJ - object counts in 0.5 Ang bin
        # COUNTS_SKY - sky counts in 0.5 Ang bin

        num_pixels = len(waves[channel])
        dwave = np.gradient(waves[channel])
        nobj[:, armName[channel], :num_pixels] /= dwave
        frame_rand_noise[:, armName[channel], :num_pixels] /= dwave
        nivar[:, armName[channel], :num_pixels] *= dwave**2
        nsky[:, armName[channel], :num_pixels] /= dwave
        sky_rand_noise[:, armName[channel], :num_pixels] /= dwave
        sky_ivar[:, armName[channel], :num_pixels] /= dwave**2

        # Now write the outputs in DESI standard file system. None of the output file can have more than args.n_fibers spectra

        # Looping over spectrograph
        for ii in range((args.nspec + args.nstart - 1) // args.n_fibers + 1):

            start = max(args.n_fibers * ii,
                        args.nstart)  # first spectrum for a given spectrograph
            end = min(args.n_fibers * (ii + 1),
                      nmax)  # last spectrum for the spectrograph

            if (args.spectrograph <= ii):
                camera = "{}{}".format(channel, ii)
                log.info(
                    "Writing files for channel:{}, spectrograph:{}, spectra:{} to {}"
                    .format(channel, ii, start, end))
                num_pixels = len(waves[channel])

                # Write frame file
                framefileName = lvmspec.io.findfile("frame", night, expid,
                                                    camera)

                frame_flux = nobj[start:end, armName[channel], :num_pixels] + \
                    nsky[start:end, armName[channel], :num_pixels] + \
                    frame_rand_noise[start:end, armName[channel], :num_pixels]
                frame_ivar = nivar[start:end, armName[channel], :num_pixels]

                # required for slicing the resolution metric, resolusion matrix has (nspec, ndiag, wave)
                # for example if nstart =400, nspec=150: two spectrographs:
                # 400-499=> 0 spectrograph, 500-549 => 1
                sh1 = frame_flux.shape[0]

                if (args.nstart == start):
                    resol = resolution[channel][:sh1, :, :]
                else:
                    resol = resolution[channel][-sh1:, :, :]

                # must create lvmspec.Frame object
                frame = Frame(waves[channel],
                              frame_flux,
                              frame_ivar,
                              resolution_data=resol,
                              spectrograph=ii,
                              fibermap=fibermap[start:end],
                              meta=dict(CAMERA=camera, FLAVOR=simspec.flavor))
                lvmspec.io.write_frame(framefileName, frame)

                framefilePath = lvmspec.io.findfile("frame", night, expid,
                                                    camera)
                log.info("Wrote file {}".format(framefilePath))

                if args.frameonly or simspec.flavor == 'arc':
                    continue

                # Write cframe file
                cframeFileName = lvmspec.io.findfile("cframe", night, expid,
                                                     camera)
                cframeFlux = cframe_observedflux[start:end, armName[channel], :num_pixels] + \
                    cframe_rand_noise[start:end, armName[channel], :num_pixels]
                cframeIvar = cframe_ivar[start:end,
                                         armName[channel], :num_pixels]

                # must create lvmspec.Frame object
                cframe = Frame(waves[channel],
                               cframeFlux,
                               cframeIvar,
                               resolution_data=resol,
                               spectrograph=ii,
                               fibermap=fibermap[start:end],
                               meta=dict(CAMERA=camera, FLAVOR=simspec.flavor))
                lvmspec.io.frame.write_frame(cframeFileName, cframe)

                cframefilePath = lvmspec.io.findfile("cframe", night, expid,
                                                     camera)
                log.info("Wrote file {}".format(cframefilePath))

                # Write sky file
                skyfileName = lvmspec.io.findfile("sky", night, expid, camera)
                skyflux = nsky[start:end, armName[channel], :num_pixels] + \
                    sky_rand_noise[start:end, armName[channel], :num_pixels]
                skyivar = sky_ivar[start:end, armName[channel], :num_pixels]
                skymask = np.zeros(skyflux.shape, dtype=np.uint32)

                # must create lvmspec.Sky object
                skymodel = SkyModel(waves[channel],
                                    skyflux,
                                    skyivar,
                                    skymask,
                                    header=dict(CAMERA=camera))
                lvmspec.io.sky.write_sky(skyfileName, skymodel)

                skyfilePath = lvmspec.io.findfile("sky", night, expid, camera)
                log.info("Wrote file {}".format(skyfilePath))

                # Write calib file
                calibVectorFile = lvmspec.io.findfile("calib", night, expid,
                                                      camera)
                flux = cframe_observedflux[start:end,
                                           armName[channel], :num_pixels]
                phot = nobj[start:end, armName[channel], :num_pixels]
                calibration = np.zeros_like(phot)
                jj = (flux > 0)
                calibration[jj] = phot[jj] / flux[jj]

                # - TODO: what should calibivar be?
                # - For now, model it as the noise of combining ~10 spectra
                calibivar = 10 / cframe_ivar[start:end,
                                             armName[channel], :num_pixels]
                # mask=(1/calibivar>0).astype(int)??
                mask = np.zeros(calibration.shape, dtype=np.uint32)

                # write flux calibration
                fluxcalib = FluxCalib(waves[channel], calibration, calibivar,
                                      mask)
                write_flux_calibration(calibVectorFile, fluxcalib)

                calibfilePath = lvmspec.io.findfile("calib", night, expid,
                                                    camera)
                log.info("Wrote file {}".format(calibfilePath))
Beispiel #45
0
                                      decintpix, 0,
                                      psf_in[0].data.shape[0] - 1)
        # Yes, it's bonkers that the wcs is the other way round compared to the fits file
        psf_ratio = psf_in[0].data[decclip, raclip]

        # Get the PSF information
        a_in = fits.open(aimage)
        b_in = fits.open(bimage)
        pa_in = fits.open(paimage)
        a = a_in[0].data[decclip, raclip]
        b = b_in[0].data[decclip, raclip]
        pa = pa_in[0].data[decclip, raclip]

        # Scale the peak fluxes
        if options.scaleint:
            data['int_flux'] /= psf_ratio
        else:
            data['peak_flux'] *= psf_ratio

        vot = Table(data)

        # Add the PSF columns
        vot.add_column(Column(data=a, name='a_psf'))
        vot.add_column(Column(data=b, name='b_psf'))
        vot.add_column(Column(data=pa, name='pa_psf'))

        # description of this votable
        vot.description = "Corrected for position-dependent PSF variation"
        print "Writing to " + output
        writetoVO(vot, output)
Beispiel #46
0
    
    pos = np.array([position[0] * np.ones(n),
                    position[1] * np.ones(n),
                    position[2] * np.ones(n),
                    np.ones(n)])
    #print 'Positions:'
    #print pos
    dir = np.array([dir[0] * np.ones(n) + dir_diff[0] * np.arange(0, n),
                    dir[1] * np.ones(n) + dir_diff[1] * np.arange(0, n),
                    dir[2] * np.ones(n) + dir_diff[2] * np.arange(0, n),
                    np.zeros(n)])
    if dir_diff != [0, 0, 0]:
    	print 'Directions:'
    	print dir
    
    photons.add_column(Column(name='pos', data=pos.T))
    photons.add_column(Column(name='dir', data=dir.T))
    photons.add_column(Column(name='polarization', data=polarization_vectors(dir.T, photons['polangle'])))
        
    return photons


def fullRotation(exp_time, offset = 0, ):
	'''The source and first mirror are rotated around 360 degrees, and the simulation is run at 10 degree intervals.
	'''
    counts = np.zeros(36)
    for i in np.arange(0, 360, 10):
        print 'angle: ' + str(i)
        photons = sourceMirrorDetector(sourceAngle = i, time = float(exp_time), sourceOffset = offset, detOffset = offset)
        p = photons[photons['probability'] > 0].copy()
        #p['probability'] *= 100
Beispiel #47
0
def _json_to_table(json_obj):
    """
    Takes a JSON object as returned from a MAST microservice request and turns it into an `~astropy.table.Table`.

    Parameters
    ----------
    json_obj : dict
        A MAST microservice response JSON object (python dictionary)

    Returns
    -------
    response : `~astropy.table.Table`
    """
    data_table = Table(masked=True)

    if not all(x in json_obj.keys() for x in ['info', 'data']):
        raise KeyError("Missing required key(s) 'data' and/or 'info.'")

    # determine database type key in case missing
    type_key = 'type' if json_obj['info'][0].get('type') else 'db_type'

    # for each item in info, store the type and column name
    for idx, col, col_type, ignore_value in \
            [(idx, x['name'], x[type_key], "NULL") for idx, x in enumerate(json_obj['info'])]:

        # if default value is NULL, set ignore value to None
        if ignore_value == "NULL":
            ignore_value = None
        # making type adjustments
        if col_type == "char" or col_type == "STRING":
            col_type = "str"
            ignore_value = "" if (ignore_value is None) else ignore_value
        elif col_type == "boolean" or col_type == "BINARY":
            col_type = "bool"
        elif col_type == "unsignedByte":
            col_type = np.ubyte
        elif col_type == "int" or col_type == "short" or col_type == "long" or col_type == "NUMBER":
            # int arrays do not admit Non/nan vals
            col_type = np.int64
            ignore_value = -999 if (ignore_value is None) else ignore_value
        elif col_type == "double" or col_type == "float" or col_type == "DECIMAL":
            # int arrays do not admit Non/nan vals
            col_type = np.float64
            ignore_value = -999 if (ignore_value is None) else ignore_value
        elif col_type == "DATETIME":
            col_type = "str"
            ignore_value = "" if (ignore_value is None) else ignore_value

        # Make the column list (don't assign final type yet or there will be errors)
        # Step through data array of values
        col_data = np.array([x[idx] for x in json_obj['data']], dtype=object)
        if ignore_value is not None:
            col_data[np.where(np.equal(col_data, None))] = ignore_value

        # no consistant way to make the mask because np.equal fails on ''
        # and array == value fails with None
        if col_type == 'str':
            col_mask = (col_data == ignore_value)
        else:
            col_mask = np.equal(col_data, ignore_value)

        # add the column
        data_table.add_column(
            MaskedColumn(col_data.astype(col_type), name=col, mask=col_mask))

    return data_table
Beispiel #48
0
def ref_galactic_fk4(fnout='galactic_fk4.csv'):
    """
    Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
    conversion, with arbitrary equinoxes and epoch of observation.
    """
    import os
    import numpy as np
    import starlink.Ast as Ast
    from astropy.table import Table, Column

    np.random.seed(12345)

    N = 200

    # Sample uniformly on the unit sphere. These will be either the ICRS
    # coordinates for the transformation to FK5, or the FK5 coordinates for the
    # transformation to ICRS.
    lon = np.random.uniform(0., 360., N)
    lat = np.degrees(np.arcsin(np.random.uniform(-1., 1., N)))

    # Generate random observation epoch and equinoxes
    obstime = [
        "B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)
    ]
    equinox_fk4 = [
        "J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)
    ]

    lon_gal, lat_gal = [], []
    ra_fk4, dec_fk4 = [], []

    for i in range(N):

        # Set up frames for AST
        frame_gal = Ast.SkyFrame(
            'System=Galactic,Epoch={epoch}'.format(epoch=obstime[i]))
        frame_fk4 = Ast.SkyFrame(
            'System=FK4,Epoch={epoch},Equinox={equinox_fk4}'.format(
                epoch=obstime[i], equinox_fk4=equinox_fk4[i]))

        # ICRS to FK5
        frameset = frame_gal.convert(frame_fk4)
        coords = np.degrees(
            frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]]))
        ra_fk4.append(coords[0, 0])
        dec_fk4.append(coords[1, 0])

        # FK5 to ICRS
        frameset = frame_fk4.convert(frame_gal)
        coords = np.degrees(
            frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]]))
        lon_gal.append(coords[0, 0])
        lat_gal.append(coords[1, 0])

    # Write out table to a CSV file
    t = Table()
    t.add_column(Column(name='equinox_fk4', data=equinox_fk4))
    t.add_column(Column(name='obstime', data=obstime))
    t.add_column(Column(name='lon_in', data=lon))
    t.add_column(Column(name='lat_in', data=lat))
    t.add_column(Column(name='ra_fk4', data=ra_fk4))
    t.add_column(Column(name='dec_fk4', data=dec_fk4))
    t.add_column(Column(name='lon_gal', data=lon_gal))
    t.add_column(Column(name='lat_gal', data=lat_gal))
    f = open(fnout, 'wb')
    f.write("# This file was generated with the {0} script, and the reference "
            "values were computed using AST\n".format(
                os.path.basename(__file__)))
    t.write(f, format='ascii', delimiter=',')
Beispiel #49
0
fint=np.append(ynew2,ynew1[i1])
fint=np.sort(fint)[::-1]
finn=np.append(xnew2,xnew1[i1])
finn=np.sort(finn)[::-1]
fint=np.reshape(fint,(np.shape(fint)[0],1))
finn=np.reshape(finn,(np.shape(finn)[0],1))
fX3=np.concatenate((fint, finn), axis=1)
fX3=fX3[fX3[:, 0].argsort()]
mytck3,myu3=itp.splprep([kx3,ky3],k=1)
xnew3,ynew3=itp.splev(np.linspace(0,1,550),mytck3)

#----------------------------------------------------------------------------------------------------------#


#----------------------------------------------------------------------------------------------------------#

# Exporting the points of the fit to a text file
tab1=Table()
tab2=Table()
tab1.add_column(Column(data=fX3[:, 0],name='Latitude'))
tab1.add_column(Column(data=fX3[:, 1],name='Longitude'))	
tab2.add_column(Column(data=ynew3,name='Latitude'))
tab2.add_column(Column(data=xnew3,name='Longitude'))	

sys.stdout = open(output_file_path1, 'w')
tab1.pprint(max_lines=-1, max_width=-1)
sys.stdout = open(output_file_path2, 'w')
tab2.pprint(max_lines=-1, max_width=-1)

#----------------------------------------------------------------------------------------------------------#
Beispiel #50
0
def test_write_html(tmpdir):
    t = Table()
    t.add_column(Column(name='a', data=[1, 2, 3]))
    t.add_column(Column(name='b', data=['a', 'b', 'c']))
    path = str(tmpdir.join("data.html"))
    t.write(path, format='html')
Beispiel #51
0
def savestats(
        basepath="/bio/web/secure/adamginsburg/ALMA-IMF/October31Release",
        suffix='image.tt0',
        filetype=".fits"):
    if 'October31' in basepath:
        stats = assemble_stats(f"{basepath}/*/*/*_12M_*.{suffix}*{filetype}",
                               ditch_suffix=f".{suffix[:-1]}")
    else:
        # extra layer: bsens, cleanest, etc
        stats = assemble_stats(f"{basepath}/*/*/*/*_12M_*.{suffix}*{filetype}",
                               ditch_suffix=f".{suffix[:-1]}")
    with open(f'{basepath}/tables/metadata_{suffix}.json', 'w') as fh:
        json.dump(stats, fh, cls=MyEncoder)

    requested = get_requested_sens()

    meta_keys = [
        'region', 'band', 'array', 'selfcaliter', 'robust', 'suffix', 'bsens',
        'pbcor', 'filename'
    ]
    stats_keys = [
        'bmaj',
        'bmin',
        'bpa',
        'peak',
        'sum',
        'fluxsum',
        'sumgt3sig',
        'sumgt5sig',
        'mad',
        'mad_sample',
        'std_sample',
        'peak/mad',
        'psf_secondpeak',
        'psf_secondpeak_radius',
        'psf_secondpeak_sidelobefraction',
    ]
    req_keys = ['B3_res', 'B3_sens', 'B6_res', 'B6_sens']
    req_keys_head = ['Req_Res', 'Req_Sens']

    rows = []
    for entry in stats:
        band = entry['meta']['band']
        requested_this = requested[requested['Field'] == entry['meta']
                                   ['region']]
        if len(requested_this) == 0:
            print(f"Skipped {entry['meta']['region']}")
            continue
        rows += [[entry['meta'][key] for key in meta_keys] + [
            entry['stats'][key] if key in entry['stats'] else np.nan
            for key in stats_keys
        ] + [requested_this[key][0] for key in req_keys if band in key]]

    tbl = Table(rows=rows, names=meta_keys + stats_keys + req_keys_head)

    # do some QA
    tbl.add_column(
        Column(name='SensVsReq', data=tbl['mad'] * 1e3 / tbl['Req_Sens']))
    tbl.add_column(
        Column(name='BeamVsReq',
               data=(tbl['bmaj'] * tbl['bmin'])**0.5 / tbl['Req_Res']))

    tbl.write(f'{basepath}/tables/metadata_{suffix}.ecsv', overwrite=True)
    tbl.write(f'{basepath}/tables/metadata_{suffix}.html',
              format='ascii.html',
              overwrite=True)
    tbl.write(f'{basepath}/tables/metadata_{suffix}.tex', overwrite=True)
    tbl.write(f'{basepath}/tables/metadata_{suffix}.js.html',
              format='jsviewer')

    return tbl
Beispiel #52
0
        fdic['dark'][float(hdr['EXPTIME'])].append(filename)

    elif hdr['OBJECT'].lower().startswith('bias'):
        fdic['bias'].append(filename)

    elif hdr['OBJECT'].lower().startswith('flat'):
        fdic['flat'].append(filename)
    elif hdr['OBJECT'].lower().startswith('comp'):
        fdic['comp'].append(filename)
    else:
        fdic['objt'].append(filename)
        obnames.append(hdr["OBJECT"])

# table에 이름 column 추가
fnames = Column(data=fnames, name='FILE')
table.add_column(fnames, index=0)
table.sort('FILE')
# table 폴더를 따로 만들어 저장함
tablepath = Path(newfitspath / 'Headertable')
if not tablepath.exists():
    tablepath.mkdir()
else:
    print("Table 저장 폴더가 이미 있어서 새로 만들지 않음")

table.write(tablepath / 'Headersumm.csv', format='ascii.csv', overwrite=True)

#%%
"""Preprocess용 Table 나누기"""
biastab = table[(table['OBJECT'] == 'Bias')]
# exptime 가짓 수
darkdic = {}
Beispiel #53
0
def mark(tv, stars=None, rad=3, auto=False, color='m', new=False, exit=False):
    """ Interactive mark stars on TV, or recenter current list 

    Args : 
           tv  : TV instance from which user will mark stars
           stars =   : existing star table
           auto=  (bool) : if True, recentroid from existing position
           radius= (int): radius to use for centroiding and for size of circles (default=3)
           color= (char) : color for circles (default='m')
    """

    # clear display and mark current star list( if not new)
    if new: tv.tvclear()
    try:
        dateobs = Time(tv.hdr['DATE-OBS'], format='fits')
    except:
        dateobs = None
    #try: exptime=tv.hdr['EXPTIME']
    #except: exptime=None
    #try: filt=tv.hdr['FILTER']
    #except: filt=None
    cards = ['EXPTIME', 'FILTER', 'AIRMASS']
    types = ['f4', 'S', 'f4']
    if stars == None:
        stars = Table(names=('id', 'x', 'y'), dtype=('i4', 'f4', 'f4'))
        stars['x'].info.format = '.2f'
        stars['y'].info.format = '.2f'
        if dateobs is not None:
            stars.add_column(Column([], name='MJD', dtype=('f8')))
            stars['MJD'].info.format = '.6f'
        #if exptime is not None :
        #    stars.add_column(Column([],name='EXPTIME',dtype=('f4')))
        #if filt is not None :
        #    stars.add_column(Column([],name='FILTER',dtype=('S')))
        for icard, card in enumerate(cards):
            try:
                stars.add_column(Column([], name=card, dtype=(types[icard])))
            except:
                pass
    else:
        if auto:
            # with auto option, recentroid and update from current header
            for star in stars:
                x, y = centroid(tv.img, star['x'], star['y'], rad)
                star['x'] = x
                star['y'] = y
                if dateobs is not None: star['MJD'] = dateobs.mjd
                #if exptime is not None : star['EXPTIME'] = exptime
                #if filt is not None : star['FILTER'] = filt
                for icard, card in enumerate(cards):
                    try:
                        star[card] = tv.hdr[card]
                    except:
                        pass
        # display stars
        for star in stars:
            tv.tvcirc(star['x'], star['y'], rad, color=color)
        if exit: return stars

    istar = len(stars) + 1
    while True:
        key, x, y = tv.tvmark()
        if key == 'q' or key == 'e': break
        if key == 'i':
            # add at nearest integer pixel
            x = round(x)
            y = round(y)
        elif key == 'c':
            # centroid around marked position
            x, y = centroid(tv.img, x, y, rad)

        # add blank row, recognizing that we may have added other columns
        stars.add_row()
        stars[len(stars) - 1]['id'] = istar
        stars[len(stars) - 1]['x'] = x
        stars[len(stars) - 1]['y'] = y
        tv.tvcirc(x, y, rad, color=color)
        if dateobs is not None:
            stars[len(stars) - 1]['MJD'] = dateobs.mjd
        for icard, card in enumerate(cards):
            try:
                stars[len(stars) - 1][card] = tv.hdr[card]
            except:
                pass
        #if exptime is not None :
        #    stars[len(stars)-1]['EXPTIME'] = exptime
        #if filt is not None :
        #    stars[len(stars)-1]['FILTER'] = filt
        istar += 1
    return stars
Beispiel #54
0
        if get_quality(qa_txt, nline=47)==1: over_masked[i]='O'
        if get_quality(qa_txt, nline=20)==1: fov[i]='V'
        if get_quality(qa_txt, nline=19)==1: multiple[i]='M'
        if get_quality(qa_txt, nline=18)==1: bright_star[i]='B'  
        if get_quality(qa_txt, nline=17)==1: uncertain[i]='U'
        note[i]= read_note(qa_txt)  
    





#####################################################################

myTable = Table()
myTable.add_column(Column(data=pgc_, name='pgc'))
myTable.add_column(Column(data=ra_, name='ra', format='%0.4f'))
myTable.add_column(Column(data=dec_, name='dec', format='%0.4f'))
myTable.add_column(Column(data=l_, name='gl', format='%0.4f'))
myTable.add_column(Column(data=b_, name='gb', format='%0.4f'))
myTable.add_column(Column(data=sgl_, name='sgl', format='%0.4f'))
myTable.add_column(Column(data=sgb_, name='sgb', format='%0.4f'))
myTable.add_column(Column(data=d25_, name='d25', format='%0.2f'))
myTable.add_column(Column(data=b_a_, name='b_a', format='%0.2f'))
myTable.add_column(Column(data=pa_, name='pa', format='%0.1f'))
myTable.add_column(Column(data=ty_, name='ty', format='%0.1f'))
myTable.add_column(Column(data=type_, name='type'))
myTable.add_column(Column(data=sdss_, name='sdss'))
myTable.add_column(Column(data=alfa100, name='alfa100'))
myTable.add_column(Column(data=QA, name='QA_sdss'))
myTable.add_column(Column(data=QA_wise, name='QA_wise'))
Beispiel #55
0
def quickcat(tilefiles,
             targets,
             truth,
             zcat=None,
             obsconditions=None,
             perfect=False):
    """
    Generates quick output zcatalog

    Args:
        tilefiles : list of fiberassign tile files that were observed
        targets : astropy Table of targets
        truth : astropy Table of input truth with columns TARGETID, TRUEZ, and TRUETYPE
        zcat (optional): input zcatalog Table from previous observations
        obsconditions (optional): Table or ndarray with observing conditions from surveysim
        perfect (optional): if True, treat spectro pipeline as perfect with input=output,
            otherwise add noise and zwarn!=0 flags

    Returns:
        zcatalog astropy Table based upon input truth, plus ZERR, ZWARN,
        NUMOBS, and TYPE columns
    """
    #- convert to Table for easier manipulation
    if not isinstance(truth, Table):
        truth = Table(truth)

    #- Count how many times each target was observed for this set of tiles
    print('{} QC Reading {} tiles'.format(asctime(), len(tilefiles)))
    nobs = Counter()
    targets_in_tile = {}
    tileids = list()
    for infile in tilefiles:
        fibassign, header = fits.getdata(infile,
                                         'FIBER_ASSIGNMENTS',
                                         header=True)
        tile_id = header['TILEID']
        tileids.append(tile_id)

        ii = (fibassign['TARGETID'] != -1)  #- targets with assignments
        nobs.update(fibassign['TARGETID'][ii])
        targets_in_tile[tile_id] = fibassign['TARGETID'][ii]

    #- Trim obsconditions to just the tiles that were observed
    if obsconditions is not None:
        ii = np.in1d(obsconditions['TILEID'], tileids)
        if np.any(ii == False):
            obsconditions = obsconditions[ii]
        assert len(obsconditions) > 0

    #- Sort obsconditions to match order of tiles
    #- This might not be needed, but is fast for O(20k) tiles and may
    #- prevent future surprises if code expects them to be row aligned
    tileids = np.array(tileids)
    if (obsconditions is not None) and \
       (np.any(tileids != obsconditions['TILEID'])):
        i = np.argsort(tileids)
        j = np.argsort(obsconditions['TILEID'])
        k = np.argsort(i)
        obsconditions = obsconditions[j[k]]
        assert np.all(tileids == obsconditions['TILEID'])

    #- Trim truth down to just ones that have already been observed
    print('{} QC Trimming truth to just observed targets'.format(asctime()))
    obs_targetids = np.array(list(nobs.keys()))
    iiobs = np.in1d(truth['TARGETID'], obs_targetids)
    truth = truth[iiobs]
    targets = targets[iiobs]

    #- Construct initial new z catalog
    print('{} QC Constructing new redshift catalog'.format(asctime()))
    newzcat = Table()
    newzcat['TARGETID'] = truth['TARGETID']
    if 'BRICKNAME' in truth.dtype.names:
        newzcat['BRICKNAME'] = truth['BRICKNAME']
    else:
        newzcat['BRICKNAME'] = np.zeros(len(truth), dtype=(str, 8))

    #- Copy TRUETYPE -> SPECTYPE so that we can change without altering original
    newzcat['SPECTYPE'] = truth['TRUESPECTYPE'].copy()

    #- Add ZERR and ZWARN
    print('{} QC Adding ZERR and ZWARN'.format(asctime()))
    nz = len(newzcat)
    if perfect:
        newzcat['Z'] = truth['TRUEZ'].copy()
        newzcat['ZERR'] = np.zeros(nz, dtype=np.float32)
        newzcat['ZWARN'] = np.zeros(nz, dtype=np.int32)
    else:
        # get the observational conditions for the current tilefiles
        if obsconditions is None:
            obsconditions = get_median_obsconditions(tileids)

        # get the redshifts
        z, zerr, zwarn = get_observed_redshifts(targets, truth,
                                                targets_in_tile, obsconditions)
        newzcat['Z'] = z  #- update with noisy redshift
        newzcat['ZERR'] = zerr
        newzcat['ZWARN'] = zwarn

    #- Add numobs column
    print('{} QC Adding NUMOBS column'.format(asctime()))
    newzcat.add_column(Column(name='NUMOBS', length=nz, dtype=np.int32))
    for i in range(nz):
        newzcat['NUMOBS'][i] = nobs[newzcat['TARGETID'][i]]

    #- Merge previous zcat with newzcat
    print('{} QC Merging previous zcat'.format(asctime()))
    if zcat is not None:
        #- don't modify original
        #- Note: this uses copy on write for the columns to be memory
        #- efficient while still letting us modify a column if needed
        zcat = zcat.copy()

        #- targets that are in both zcat and newzcat
        repeats = np.in1d(zcat['TARGETID'], newzcat['TARGETID'])

        #- update numobs in both zcat and newzcat
        ii = np.in1d(newzcat['TARGETID'], zcat['TARGETID'][repeats])
        orig_numobs = zcat['NUMOBS'][repeats].copy()
        new_numobs = newzcat['NUMOBS'][ii].copy()
        zcat['NUMOBS'][repeats] += new_numobs
        newzcat['NUMOBS'][ii] += orig_numobs

        #- replace only repeats that had ZWARN flags in original zcat
        #- replace in new
        replace = repeats & (zcat['ZWARN'] != 0)
        jj = np.in1d(newzcat['TARGETID'], zcat['TARGETID'][replace])
        zcat[replace] = newzcat[jj]

        #- trim newzcat to ones that shouldn't override original zcat
        discard = np.in1d(newzcat['TARGETID'], zcat['TARGETID'])
        newzcat = newzcat[~discard]

        #- Should be non-overlapping now
        assert np.all(np.in1d(zcat['TARGETID'], newzcat['TARGETID']) == False)

        #- merge them
        newzcat = vstack([zcat, newzcat])

    #- check for duplicates
    targetids = newzcat['TARGETID']
    assert len(np.unique(targetids)) == len(targetids)

    #- Metadata for header
    newzcat.meta['EXTNAME'] = 'ZCATALOG'

    print('{} QC done'.format(asctime()))
    return newzcat
Beispiel #56
0
    # Build the gorup ID number
    groupID = i + 1

    # Count the number of images in this group
    numberOfImages = groupEndInd - groupStartInd

    # Build the list of ID numbers for THIS group and append it to the full list
    thisGroupID = numberOfImages * [groupID]
    groupIDs.extend(thisGroupID)

# Fill in the final entry
groupIDs.append(groupID)

# Store the groupID number in the reducedFileIndex
groupIDcolumn = Column(name='GROUP_ID', data=groupIDs)
reducedFileIndex.add_column(groupIDcolumn, index=2)

# Now remove any GROUPS with less than 8 images
groupIndex = reducedFileIndex.group_by('GROUP_ID')
goodGroupInds = []
groupInds = groupIndex.groups.indices
for startInd, endInd in zip(groupInds[:-1], groupInds[+1:]):
    # Count the number of images in this group and test if it's any good.
    if (endInd - startInd) >= 8:
        goodGroupInds.extend(range(startInd, endInd))

# Cull the reducedFileIndex to only include viable groups
goodGroupInds = np.array(goodGroupInds)
reducedFileIndex = reducedFileIndex[goodGroupInds]

# Match a dither type for each group ("ABBA" or "HEX")
Beispiel #57
0
import matplotlib.pyplot as plt
from astropy.io import ascii
from astropy.table import Table
import numpy as np
from scipy import spatial
import seaborn

lbox=205.
basePath = '/home/fede/TNG300-1/output/'
fields = ['SubhaloPos','SubhaloMass']
subgroups = il.groupcat.loadSubhalos(basePath,99,fields=fields)

gxs = Table(subgroups['SubhaloPos'],names=['x','y','z'],dtype=['float64','float64','float64'])

col_m = Table.Column(subgroups['SubhaloMass'],name='mass',dtype='float64')
gxs.add_column(col_m)

gxs = gxs[(np.log10(gxs['mass'])>-1.)&(np.log10(gxs['mass'])<3.)]

for col in ['x','y','z']:
	gxs[col]/=1000.
gxs.remove_row(np.where(gxs['y']==205.0)[0][0])
gxs.remove_row(np.where(gxs['x']<0.)[0][0])

tree = spatial.cKDTree(data=np.column_stack((gxs['x'],gxs['y'],gxs['z'])),boxsize=lbox)
voids = ascii.read('../data/tng300-1_voids.dat',names=['r','x','y','z','vx','vy','vz','deltaint_1r','maxdeltaint_2-3r','log10Poisson','Nrecenter'])
#%%

# N=[]
# for i in range(len(voids)):
# 	if i%1000==0: print(i)
    def _do_photometry(self, param_tab, n_start=1):
        """
        Helper function which performs the iterations of the photometry
        process.

        Parameters
        ----------
        param_names : list
            Names of the columns which represent the initial guesses.
            For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on
            the center positions and the flux.
        n_start : int
            Integer representing the start index of the iteration.  It
            is 1 if init_guesses are None, and 2 otherwise.

        Returns
        -------
        output_table : `~astropy.table.Table` or None
            Table with the photometry results, i.e., centroids and
            fluxes estimations and the initial estimates used to start
            the fitting process.
        """

        output_table = Table()
        self._define_fit_param_names()

        for (init_parname, fit_parname) in zip(self._pars_to_set.keys(),
                                               self._pars_to_output.keys()):
            output_table.add_column(Column(name=init_parname))
            output_table.add_column(Column(name=fit_parname))

        sources = self.finder(self._residual_image)

        n = n_start
        while (sources is not None
               and (self.niters is None or n <= self.niters)):
            positions = np.transpose(
                (sources['xcentroid'], sources['ycentroid']))
            apertures = CircularAperture(positions, r=self.aperture_radius)
            sources['aperture_flux'] = aperture_photometry(
                self._residual_image, apertures)['aperture_sum']

            init_guess_tab = Table(names=['id', 'x_0', 'y_0', 'flux_0'],
                                   data=[
                                       sources['id'], sources['xcentroid'],
                                       sources['ycentroid'],
                                       sources['aperture_flux']
                                   ])
            self._get_additional_columns(sources, init_guess_tab)

            for param_tab_name, param_name in self._pars_to_set.items():
                if param_tab_name not in (['x_0', 'y_0', 'flux_0']):
                    init_guess_tab.add_column(
                        Column(name=param_tab_name,
                               data=(getattr(self.psf_model, param_name) *
                                     np.ones(len(sources)))))

            star_groups = self.group_maker(init_guess_tab)
            table, self._residual_image = super().nstar(
                self._residual_image, star_groups)

            star_groups = star_groups.group_by('group_id')
            table = hstack([star_groups, table])

            table['iter_detected'] = n * np.ones(table['x_fit'].shape,
                                                 dtype=np.int32)

            output_table = vstack([output_table, table])

            # do not warn if no sources are found beyond the first iteration
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', NoDetectionsWarning)
                sources = self.finder(self._residual_image)

            n += 1

        return output_table
Beispiel #59
0
    def _fits_summary(self, header_keywords):
        """
        Generate a summary table of keywords from FITS headers.

        Parameters
        ----------
        header_keywords : list of str or '*'
            Keywords whose value should be extracted from FITS headers or '*'
            to extract all.
        """

        if not self.files:
            return None

        # Make sure we have a list...for example, in python 3, dict.keys()
        # is not a list.
        original_keywords = list(header_keywords)

        # Get rid of any duplicate keywords, also forces a copy.
        header_keys = set(original_keywords)
        header_keys.add('file')

        file_name_column = MaskedColumn(name='file', data=self.files)

        if not header_keys or (header_keys == {'file'}):
            summary_table = Table(masked=True)
            summary_table.add_column(file_name_column)
            return summary_table

        summary_dict = None
        missing_marker = None

        for file_name in file_name_column.tolist():
            file_path = path.join(self.location, file_name)
            try:
                # Note: summary_dict is an OrderedDict, so should preserve
                # the order of the keywords in the FITS header.
                summary_dict = self._dict_from_fits_header(
                    file_path,
                    input_summary=summary_dict,
                    missing_marker=missing_marker)
            except IOError as e:
                logger.warning('unable to get FITS header for file %s: %s.',
                               file_path, e)
                continue

        summary_table = Table(summary_dict, masked=True)

        for column in summary_table.colnames:
            summary_table[column].mask = [
                v is missing_marker for v in summary_table[column].tolist()
            ]

        self._set_column_name_case_to_match_keywords(header_keys,
                                                     summary_table)
        missing_columns = header_keys - set(summary_table.colnames)
        missing_columns -= {'*'}

        length = len(summary_table)
        for column in missing_columns:
            all_masked = MaskedColumn(name=column,
                                      data=np.zeros(length),
                                      mask=np.ones(length))
            summary_table.add_column(all_masked)

        if '*' not in header_keys:
            # Rearrange table columns to match order of keywords.
            # File always comes first.
            header_keys -= {'file'}
            original_order = ['file'] + sorted(header_keys,
                                               key=original_keywords.index)
            summary_table = summary_table[original_order]

        if not summary_table.masked:
            summary_table = Table(summary_table, masked=True)

        return summary_table
Beispiel #60
0
def test_from_table_without_mask():
    t = Table()
    c = Column(data=[1, 2, 3], name='a')
    t.add_column(c)
    output = io.BytesIO()
    t.write(output, format='votable')