Esempio n. 1
0
def saveTableToFile(table,
                    saveFile=None,
                    fileType='hdf5',
                    table_name='table',
                    overwrite=False,
                    **kwargs):
    '''
    Save a grid or source table to a file.
    This is much faster than atpy or astropy for creating hdf5 files.
    '''

    if (fileType == 'hdf5'):
        kwargs['path'] = table_name

    if saveFile is None:
        print('!!> No file name given, not saving to any file.')
    else:
        if (not overwrite) and (os.path.exists(saveFile)):
            print(
                '!!> Output file %s already exists.\n    Use the overwrite option if needed.'
                % saveFile)
        else:
            if (os.path.exists(saveFile)):
                os.remove(saveFile)
            print("@@> Saving table %s to file %s as type %s" %
                  (table_name, saveFile, fileType))
            table.write(saveFile, format=fileType, **kwargs)
Esempio n. 2
0
def makeCartoon(seed=1):
    """Create a cartoon jitter timeseries"""

    np.random.seed(seed)
    rmsat2s = 2.0 / 3.0
    rmsat120s = 0.21

    # share across two dimensions
    rmsat120s1d = rmsat120s / np.sqrt(2)
    rmsat2s1d = rmsat2s / np.sqrt(2)

    cadence = 2.0
    smoothscale = 10.0
    nsmooth = int(smoothscale / cadence)
    t = np.arange(0, 30 * 24 * 60 * 60, 2)
    n = len(t)
    d = {}
    d['t'] = t
    for k in ['x', 'y']:
        v = np.random.normal(0, 1, n)
        for i in range(2):
            v = np.convolve(v, np.ones(nsmooth), mode='same')
        d[k] = v / np.std(v) * rmsat2s1d

    table = astropy.table.Table(d, names=['t', 'x', 'y'])
    table.write(os.path.join(settings.inputs, 'cartoon.jitter'), format='ascii.fixed_width', bookend=False)
Esempio n. 3
0
def save(output_file_path, data_array, header_list, dtype_list):
    print(output_file_path)

    if output_file_path.lower().endswith('.csv'):

        # See http://docs.astropy.org/en/stable/io/fits/usage/table.html
        np.savetxt(
            output_file_path,
            data_array,
            fmt=
            "%s",  # See http://stackoverflow.com/questions/16621351/how-to-use-python-numpy-savetxt-to-write-strings-and-float-number-to-an-ascii-fi
            delimiter=",",
            header=",".join(header_list),
            comments=
            ""  # String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# '.
        )

    elif output_file_path.lower().endswith(('.fits', '.fit')):

        table = astropy.table.Table(names=header_list, dtype=dtype_list)

        for row in data_array:
            table.add_row(row)

        #print(table)
        table.write(output_file_path, overwrite=True)

    else:
        raise Exception('Unknown output format.')
Esempio n. 4
0
def makeCartoon(seed=1):
    """Create a cartoon jitter timeseries"""

    np.random.seed(seed)
    rmsat2s = 2.0 / 3.0
    rmsat120s = 0.21

    # share across two dimensions
    rmsat120s1d = rmsat120s / np.sqrt(2)
    rmsat2s1d = rmsat2s / np.sqrt(2)

    cadence = 2.0
    smoothscale = 10.0
    nsmooth = int(smoothscale / cadence)
    t = np.arange(0, 30 * 24 * 60 * 60, 2)
    n = len(t)
    d = {}
    d['t'] = t
    for k in ['x', 'y']:
        v = np.random.normal(0, 1, n)
        for i in range(2):
            v = np.convolve(v, np.ones(nsmooth), mode='same')
        d[k] = v / np.std(v) * rmsat2s1d

    table = astropy.table.Table(d, names=['t', 'x', 'y'])
    table.write(os.path.join(settings.inputs, 'cartoon.jitter'),
                format='ascii.fixed_width',
                bookend=False)
def save(output_file_path, data_array, header_list, dtype_list):
    print(output_file_path)

    if output_file_path.lower().endswith('.csv'):

        # See http://docs.astropy.org/en/stable/io/fits/usage/table.html
        np.savetxt(output_file_path,
                   data_array,
                   fmt="%s",                      # See http://stackoverflow.com/questions/16621351/how-to-use-python-numpy-savetxt-to-write-strings-and-float-number-to-an-ascii-fi
                   delimiter=",",
                   header=",".join(header_list),
                   comments=""                 # String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# '.
                   )

    elif output_file_path.lower().endswith(('.fits', '.fit')):

        table = astropy.table.Table(names=header_list, dtype=dtype_list)

        for row in data_array:
            table.add_row(row)

        #print(table)
        table.write(output_file_path, overwrite=True)

    else:
        raise Exception('Unknown output format.')
Esempio n. 6
0
def clobber_table_write(table, filename, **kwargs):
    """ Writes a table, even if it has to clobber an older one. """

    try:
        table.write(filename, **kwargs)
    except Exception, e: 
        print e
        print "Overwriting file."
        os.remove(filename)
        table.write(filename, **kwargs)
def do_all_h2co():
    table = initialize_table()
    result = (filaments_right(table=table) + filaments_left(table=table) +
              middlechunk(table=table) + maus(table=table) +
              w51main(table=table))
    table.write(os.path.join(datapath_spectra, "spectralfit_table.ipac"),
                format='ascii.ipac')
    table2 = add_tex_tau_to_table(table)
    table2.write(os.path.join(datapath_spectra, "spectralfit_table_withtextau.ipac"),
                 format='ascii.ipac')
    return result,table
Esempio n. 8
0
def table_info(table, label):
    # Print basic info to console
    print('\n*** {0} ***'.format(label))
    print('Rows: {0}'.format(len(table)))
    print('Columns: {0}'.format(table.colnames))
    print('')

    # Save to file for debugging
    filename = 'crab_mwl_{0}.ipac'.format(label)
    print('INFO: Writing {0}'.format(filename))
    table.write(filename, format='ascii.ipac')
Esempio n. 9
0
    def from_table(cls,
                   table,
                   slice,
                   mos_point_source,
                   relative_path='spectra',
                   fname=None):
        """
        class method to add a new spectrum given a table from the extracted
        spectrum.

        Parameters
        ----------

        table: astropy.table.Table

        mos_point_source: MosPointSource

        relative_path: str
            relative path to work dir (default='spectra')

        fname: str
            filename of the extracted spectrum (default None)
            if None it automatically generates
            <raw_fits_fname>_slc<slc_id>_src<src_id>

        """

        if not os.path.exists(os.path.join(cls.work_dir, relative_path)):
            logger.warn('Relative dir {0} does not exist for adding spectrum '
                        '- creating'.format(relative_path))
            os.mkdir(os.path.join(cls.work_dir, relative_path))

        if fname is None:
            fname = '{0}_slc{1}_src{2}.h5'.format(
                slice.science_set.science.fits.fname.replace('.fits', ''),
                slice.id, mos_point_source.id)

        full_fname = os.path.join(cls.work_dir, relative_path, fname)

        table.write(full_fname,
                    path=cls.hdf5_path,
                    format='hdf5',
                    overwrite=True)

        data_file = DataFile.from_file(os.path.join(relative_path, fname))

        spectrum = cls(mos_point_source_id=mos_point_source.id,
                       slice_id=slice.id)

        spectrum.data_file = data_file

        return data_file, spectrum
Esempio n. 10
0
def main():

    file_ = sys.argv[1]
    dest = file_ + '.csv'

    table = astropy.table.Table.read(file_, format='ascii.ipac')
    table.meta = {}
    table.write(dest,
                format='csv',
                fill_values=[(ascii.masked, '\\N')],
                overwrite=True)

    return
Esempio n. 11
0
    def write_csv(self, filename=None, path=None, overwrite=None, **kwargs):
        ''' Write the datamodel to a CSV '''

        release = self.parent.aliases[0].lower().replace('-', '')

        if not filename:
            filename = 'dapmodels_dm_{0}.csv'.format(release)

        if not path:
            path = os.path.join(os.getenv("MARVIN_DIR"), 'docs', 'sphinx', '_static')

        fullpath = os.path.join(path, filename)
        table = self.to_table(**kwargs)
        table.write(fullpath, format='csv', overwrite=overwrite)
Esempio n. 12
0
def test_load_bad(tmpdir):
    meta = dict(group_name='g', band_name='b')
    # Missing wavelength column.
    table = astropy.table.QTable(meta=meta)
    table['response'] = [1, 1]
    name = str(tmpdir.join('bad.ecsv'))
    table.write(name, format='ascii.ecsv', overwrite=True)
    with pytest.raises(RuntimeError):
        load_filter(name)
    # Missing response column.
    table = astropy.table.QTable(meta=meta)
    table['wavelength'] = [1, 2] * u.Angstrom
    name = str(tmpdir.join('bad.ecsv'))
    table.write(name, format='ascii.ecsv', overwrite=True)
    with pytest.raises(RuntimeError):
        load_filter(name)
    # Missing wavelength units.
    table = astropy.table.QTable(meta=meta)
    table['wavelength'] = [1, 2]
    table['response'] = [1, 1]
    name = str(tmpdir.join('bad.ecsv'))
    table.write(name, format='ascii.ecsv', overwrite=True)
    with pytest.raises(RuntimeError):
        load_filter(name)
    # Unexpected response units.
    table = astropy.table.QTable(meta=meta)
    table['wavelength'] = [1, 2] * u.Angstrom
    table['response'] = [1, 1] * u.erg
    name = str(tmpdir.join('bad.ecsv'))
    table.write(name, format='ascii.ecsv', overwrite=True)
    with pytest.raises(RuntimeError):
        load_filter(name)
Esempio n. 13
0
def test_load_bad(tmpdir):
    meta = dict(group_name='g', band_name='b')
    # Missing wavelength column.
    table = astropy.table.QTable(meta=meta)
    table['response'] = [1, 1]
    name = str(tmpdir.join('bad.ecsv'))
    table.write(name, format='ascii.ecsv', overwrite=True)
    with pytest.raises(RuntimeError):
        load_filter(name)
    # Missing response column.
    table = astropy.table.QTable(meta=meta)
    table['wavelength'] = [1, 2] * u.Angstrom
    name = str(tmpdir.join('bad.ecsv'))
    table.write(name, format='ascii.ecsv', overwrite=True)
    with pytest.raises(RuntimeError):
        load_filter(name)
    # Missing wavelength units.
    table = astropy.table.QTable(meta=meta)
    table['wavelength'] = [1, 2]
    table['response'] = [1, 1]
    name = str(tmpdir.join('bad.ecsv'))
    table.write(name, format='ascii.ecsv', overwrite=True)
    with pytest.raises(RuntimeError):
        load_filter(name)
    # Unexpected response units.
    table = astropy.table.QTable(meta=meta)
    table['wavelength'] = [1, 2] * u.Angstrom
    table['response'] = [1, 1] * u.erg
    name = str(tmpdir.join('bad.ecsv'))
    table.write(name, format='ascii.ecsv', overwrite=True)
    with pytest.raises(RuntimeError):
        load_filter(name)
Esempio n. 14
0
    def from_table(cls, table, slice, mos_point_source, relative_path='spectra',
                   fname=None):
        """
        class method to add a new spectrum given a table from the extracted
        spectrum.

        Parameters
        ----------

        table: astropy.table.Table

        mos_point_source: MosPointSource

        relative_path: str
            relative path to work dir (default='spectra')

        fname: str
            filename of the extracted spectrum (default None)
            if None it automatically generates
            <raw_fits_fname>_slc<slc_id>_src<src_id>

        """

        if not os.path.exists(os.path.join(cls.work_dir, relative_path)):
            logger.warn('Relative dir {0} does not exist for adding spectrum '
                        '- creating'.format(relative_path))
            os.mkdir(os.path.join(cls.work_dir, relative_path))

        if fname is None:
            fname = '{0}_slc{1}_src{2}.h5'.format(
                slice.science_set.science.fits.fname.replace('.fits', ''),
                slice.id, mos_point_source.id)

        full_fname = os.path.join(cls.work_dir, relative_path, fname)

        table.write(full_fname, path=cls.hdf5_path, format='hdf5',
                    overwrite=True)

        data_file = DataFile.from_file(os.path.join(relative_path, fname))

        spectrum = cls(mos_point_source_id=mos_point_source.id,
                       slice_id=slice.id)

        spectrum.data_file = data_file

        return data_file, spectrum
Esempio n. 15
0
def process_db(connection, name, tile, outfile, epochfile, band, blind=True, quiet=True):

    sql="select * from {0}_main where tilename='{1}'".format(name, tile)
    print sql
    table = db.table_from_sql(connection, sql)
    if table is None:
        print "NO OBJECTS FOR ", name, tile
        return
    print "Loaded main table"
    sql="select * from {0}_epoch where tilename='{1}'".format(name, tile)
    print sql
    epochs = db.table_from_sql(connection, sql)
    print "Loaded epoch table"
    table, epochs = process_table(table, epochs, band, blind=blind,quiet=quiet)

    print 'Saving to FITS'
    table.write(outfile, format='fits')
    epochs.write(epochfile, format='fits')
Esempio n. 16
0
def process_text(main_file, epoch_file, out_main, out_epoch, band, blind=True,quiet=True, report=False):
    if os.path.exists(out_main):
        print 'Skipping sub-file', out_main
        return
    table = astropy.table.Table.read(main_file, format='ascii')
    epochs = astropy.table.Table.read(epoch_file, format='ascii')

    if len(table)==0:
        print "No rows found for ", outfile
        return

    table, epochs = process_table(table, epochs, band, blind=blind,quiet=quiet,report=report)
    if report:
        print "Not saving output file as we are just reporting cuts"
        return

    print 'Saving to FITS'
    table.write(out_main, format='fits')
    epochs.write(out_epoch, format='fits')
Esempio n. 17
0
def check_write_table_via_table(test_def, table, fast_writer):
    out = StringIO()

    test_def = copy.deepcopy(test_def)
    if 'Writer' in test_def['kwargs']:
        format = 'ascii.{0}'.format(test_def['kwargs']['Writer']._format_name)
        del test_def['kwargs']['Writer']
    else:
        format = 'ascii'

    try:
        table.write(out, format=format, fast_writer=fast_writer, **test_def['kwargs'])
    except ValueError as e:  # if format doesn't have a fast writer, ignore
        if 'not in the list of formats with fast writers' not in str(e):
            raise e
        return
    print('Expected:\n{}'.format(test_def['out']))
    print('Actual:\n{}'.format(out.getvalue()))
    assert [x.strip() for x in out.getvalue().strip().splitlines()] == [
        x.strip() for x in test_def['out'].strip().splitlines()]
Esempio n. 18
0
def check_write_table_via_table(test_def, table, fast_writer):
    out = StringIO()

    test_def = copy.deepcopy(test_def)
    if 'Writer' in test_def['kwargs']:
        format = f"ascii.{test_def['kwargs']['Writer']._format_name}"
        del test_def['kwargs']['Writer']
    else:
        format = 'ascii'

    try:
        table.write(out, format=format, fast_writer=fast_writer, **test_def['kwargs'])
    except ValueError as e:  # if format doesn't have a fast writer, ignore
        if 'not in the list of formats with fast writers' not in str(e.value):
            raise e
        return
    print(f"Expected:\n{test_def['out']}")
    print(f'Actual:\n{out.getvalue()}')
    assert [x.strip() for x in out.getvalue().strip().splitlines()] == [
        x.strip() for x in test_def['out'].strip().splitlines()]
Esempio n. 19
0
def process_multi_tile_text(main_file, epoch_file, outdir, band, blind=True, quiet=True):
    mkdir(outdir+"/text")
    mkdir(outdir+"/text/main")
    mkdir(outdir+"/text/epoch")
    mkdir(outdir+"/fits/")
    mkdir(outdir+"/fits/main")
    mkdir(outdir+"/fits/epoch")


    filenames = split_by_tile(main_file, epoch_file, outdir+"/text")

    for n,(tilename, main_file, epoch_file) in enumerate(filenames):
        print "{0} - Working on {0}".format(n,tilename)
        table = astropy.table.Table.read(main_file, format='ascii')
        epochs = astropy.table.Table.read(epoch_file, format='ascii')
        table, epochs = process_table(table, epochs, band, blind=blind,quiet=quiet)     
        out_main = outdir+"/fits/main/"+tilename+".txt"
        out_epoch = outdir+"/fits/epoch/"+tilename+".txt"
        print 'Saving to FITS'
        table.write(out_main, format='fits')
        epochs.write(out_epoch, format='fits')
Esempio n. 20
0
File: base.py Progetto: zpace/marvin
    def write_csv(self, filename=None, path=None, overwrite=None, **kwargs):
        ''' Write the datamodel to a CSV '''

        release = self.parent.release.lower().replace('-', '')

        if not filename:
            if isinstance(self.parent, DRPRSSDataModel):
                filename = 'drp_rss_spectra_dm_{0}.csv'.format(release)
            elif isinstance(self.parent, DRPCubeDataModel):
                filename = 'drp_cube_spectra_dm_{0}.csv'.format(release)
            else:
                raise ValueError('invalid parent of type {!r}'.format(
                    type(self.parent)))

        if not path:
            path = os.path.join(os.getenv("MARVIN_DIR"), 'docs', 'sphinx',
                                '_static')

        fullpath = os.path.join(path, filename)
        table = self.to_table(**kwargs)
        table.write(fullpath, format='csv', overwrite=overwrite)
Esempio n. 21
0
def check_write_table_via_table(test_def, table, fast_writer, out=None):
    if out is None:
        out = StringIO()

    test_def = copy.deepcopy(test_def)
    if 'Writer' in test_def['kwargs']:
        format = f"ascii.{test_def['kwargs']['Writer']._format_name}"
        del test_def['kwargs']['Writer']
    else:
        format = 'ascii'

    try:
        table.write(out,
                    format=format,
                    fast_writer=fast_writer,
                    **test_def['kwargs'])
    except ValueError as e:  # if format doesn't have a fast writer, ignore
        if 'not in the list of formats with fast writers' not in str(e.value):
            raise e
        return

    if isinstance(out, StringIO):
        # Output went to a buffer
        actual = out.getvalue()
    else:
        # Output went to a file
        if str(out).startswith('~'):
            # Ensure a file hasn't been accidentally written to a literal tilde
            # path
            assert not os.path.exists(out)
            out = os.path.expanduser(out)
        assert os.path.exists(out)
        with open(out) as f:
            actual = f.read()
        os.remove(out)

    print(f"Expected:\n{test_def['out']}")
    print(f'Actual:\n{actual}')
    assert [x.strip() for x in actual.strip().splitlines()
            ] == [x.strip() for x in test_def['out'].strip().splitlines()]
Esempio n. 22
0
    def writeText(self, filename):
        """Write the defects out to a text file with the specified name.

        Parameters
        ----------
        filename : `str`
            Name of the file to write.  The file extension ".ecsv" will
            always be used.

        Returns
        -------
        used : `str`
            The name of the file used to write the data (which may be
            different from the supplied name given the change to file
            extension).

        Notes
        -----
        The file is written to ECSV format and will include any metadata
        associated with the `Defects`.
        """

        # Using astropy table is the easiest way to serialize to ecsv
        afwTable = self.toSimpleTable()
        table = afwTable.asAstropy()

        metadata = afwTable.getMetadata()
        now = datetime.datetime.utcnow()
        metadata["DATE"] = now.isoformat()
        metadata["CALIB_CREATION_DATE"] = now.strftime("%Y-%m-%d")
        metadata["CALIB_CREATION_TIME"] = now.strftime("%T %Z").strip()

        table.meta = metadata.toDict()

        # Force file extension to .ecsv
        path, ext = os.path.splitext(filename)
        filename = path + ".ecsv"
        table.write(filename, format="ascii.ecsv")
        return filename
Esempio n. 23
0
    stel.append(stel_pa)
    gas.append(gas_pa)
    data.append(pa)
    gas_error.append(gas_err)
    stel_error.append(stel_err)
    gal_id.append(plate_id)

stel = np.array(stel)
gas = np.array(gas)
data = np.array(data)
gal_id = np.array(gal_id)
stel_error = np.array(stel_error)
gas_error = np.array(gas_error)
bad = np.array([how_many_bad])
bad_gals = np.array(bad_gals)

t = Table()
t['STEL_PA'] = Column(stel, description='Stellar position angle')
t['STEL_PA_ERR'] = Column(stel_error,
                          description='Stellar position angle error')
t['GAS_PA'] = Column(gas, description='Position angle calculated from the gas')
t['GAS_PA_ERR'] = Column(
    gas_error, description='Position angle calculated from the gas error')
t['PA'] = Column(data, description='Position angle from the MaNGA data')
t['GALAXY_ID'] = Column(gal_id, description='galaxy ID')
t['NUM_BAD_GALS'] = Column(bad, description="For Celeste's testing, ignore")
t['BAD_GALS_NAMES'] = Column(bad_gals,
                             description="For Celeste's testing, ignore")

t.write('/home/celeste/Documents/astro_research/summer_2018/pa_datav5.fits')
def do_indiv_fits(regfilename, outpfx, ncomp=1, dobaseline=False, table=None,
                  tableprefix="", **kwargs):
    regions = pyregion.open(regfilename)

    spectra = [both.get_apspec(r.coord_list, coordsys=r.coord_format,
                               wunit='degree')
               for r in regions]

    cont11hdu = fits.open(cont11filename)[0]

    pl.ioff()

    for ii,(sp,r) in enumerate(zip(spectra,regions)):

        if hasattr(ncomp,'__len__'):
            nc = ncomp[ii]
        else:
            nc = ncomp

        chi2, parinfo = {},{}
        sp.header['REGION'] = "{shape}({ra},{dec},{radius})".format(shape=r.name,
                                                                    ra=r.coord_list[0],
                                                                    dec=r.coord_list[1],
                                                                    radius=r.coord_list[2])

        ## this is pretty hacky =(
        #if dobaseline:
        #    spdict = fith2co.BigSpectrum_to_H2COdict(sp)
        #    for n,s in spdict.items():
        #        s.baseline(exclude=[2,8,43,75])
        #    sp = pyspeckit.Spectra(spdict.values())
        #    spectra[ii] = sp



        for cont in ('front','back',):
            print("Continuum in the %s" % cont)
            fig = pl.figure(ii, figsize=(12,8))
            fig.clf()
            name = r.attr[1]['text']

            if cont == 'back':
                M = pyregion.ShapeList([r]).get_mask(cont11hdu)
                c11 = cont11[M].mean()
                c22 = cont22[M].mean()
                sp.header['CONT11'] = c11
                sp.header['CONT22'] = c22
            elif cont == 'front':
                c11 = 2.7315
                c22 = 2.7315

            dofit(sp, c11, c22, nc, **kwargs)

            sp.specname = name
            sp.header['OBJECT'] = name
            print("ap%i %s: ni=%i, X^2%0.1f, X^2/n=%0.1f, n=%0.1f" % (ii,
                                                                      sp.specname,
                                                                      sp.specfit.fitter.mp.niter,
                                                                      sp.specfit.chi2,
                                                                      sp.specfit.chi2/sp.specfit.dof,
                                                                      sp.specfit.parinfo.DENSITY0.value))

            #if sp.specfit.chi2 / sp.specfit.dof > 2:
            #    gg[4] = sp.specfit.parinfo.CENTER0.value
            #    fixed[4] = True
            #    sp.specfit(fittype='formaldehyde_radex',guesses=gg,
            #               fixed=fixed, multifit=True,quiet=False,verbose=True,
            #               limits=limits,
            #               limited=limited,
            #               use_window_limits=False, fit_plotted_area=False)

            sp.plotter.autorefresh=False
            plotitem(sp, ii)

            sp.plotter.savefig(os.path.join(datapath,
                                            outpfx+'_aperture_%s_%s.pdf' %
                                            (name,cont)))

            plotitem(sp, ii, dolegend=True)

            # seriously, something aint'n't right here
            pl.figure(sp.plotter.figure.number)
            pl.savefig(os.path.join(datapath,
                                    outpfx+'_aperture_%s_%s_legend.pdf' %
                                    (name,cont)),
                                    bbox_extra_artists=[sp.specfit.fitleg])

            # for writing to file, select the best-fit
            chi2[cont] = sp.specfit.chi2
            parinfo[cont] = sp.specfit.parinfo
            if cont == 'back': # second one...
                if sp.specfit.chi2 > chi2['front']:
                    print("Back chi^2 > front chi^2.  Replacing parameters with Continuum in the Front")
                    best = 'front'
                else:
                    best = 'back'
                # set the previous, unset to match this...
                # since we're guaranteed to be in the 2nd of 2 in a loop here,
                # no danger
                table[-1]['frontbackbest'] = best
                if nc == 2:
                    # or previous *two* if twocomp
                    table[-2]['frontbackbest'] = best
                # at this stage, 'frontbackbest' should be universally assigned....
                if np.any(table['frontbackbest'] == ''):
                    import ipdb; ipdb.set_trace()
            else:
                best = ''

            if table is not None:
                add_parinfo_to_table(table, sp.specfit.parinfo, sp.specfit.chi2,
                                     sp.specfit.dof, sp.specfit.optimal_chi2(reduced=False),
                                     sp.specfit.optimal_chi2(reduced=True),
                                     ra=r.coord_list[0],
                                     dec=r.coord_list[1],
                                     radius=r.coord_list[2],
                                     frontback=cont,
                                     frontbackbest=best,
                                     name=tableprefix+sp.specname)

            if best != '':
                sp.specfit.parinfo = parinfo[best]
                sp.specfit.chi2 = chi2[best]

            sp.write(os.path.join(datapath,outpfx+"_aperture_%s.fits" % name))

    table.write(os.path.join(datapath_spectra,
                             tableprefix+"spectralfit_table.ipac"),
                format='ascii.ipac')

    pl.ion()

    return spectra
Esempio n. 25
0
# Documentation:
# - http://docs.astropy.org/en/stable/table/index.html#getting-started
# - http://www.astropy.org/astropy-tutorials/FITS-tables.html
# - http://www.astropy.org/astropy-tutorials/FITS-header.html

import argparse
from astropy.io import fits

import numpy as np
import astropy.table

# PARSE OPTIONS ###############################################################

parser = argparse.ArgumentParser(description="An astropy snippet")
parser.add_argument("filearg", nargs=1, metavar="FILE", help="the output FITS file")
args = parser.parse_args()
file_path = args.filearg[0]

# WRITE DATA ##################################################################

table = astropy.table.Table(names=("column1", "column2", "column3"))

table.add_row([1, 2, 3])
table.add_row([10, 20, 30])
table.add_row([100, 200, 300])

print(table)

table.write(file_path, overwrite=True)
Esempio n. 26
0
                   limited=[(True, True)] * 5,
                   fixed=[False, False, False, False, False])

        #only change center parameter from example
        #sp.plotter.savefig('H2CO_all_radexfit.pdf')
        table.add_row()
        table[-1]['x'] = v
        table[-1]['y'] = w
        table[-1]['temp'] = sp.specfit.modelpars[0]
        table[-1]['column'] = sp.specfit.modelpars[1]
        table[-1]['density'] = sp.specfit.modelpars[2]
        table[-1]['center'] = sp.specfit.modelpars[3]
        table[-1]['width'] = sp.specfit.modelpars[4]
        table[-1]['temp errors'] = sp.specfit.modelerrs[0]
        table[-1]['column errors'] = sp.specfit.modelerrs[1]
        table[-1]['density errors'] = sp.specfit.modelerrs[2]
        table[-1]['center errors'] = sp.specfit.modelerrs[3]
        table[-1]['width errors'] = sp.specfit.modelerrs[4]
table.write('grs1915H2COparameters.fits', overwrite=True)

plt.show()
t = Table.read('grs1915H2COparameters.fits')
plt.scatter(t['x'],
            t['y'],
            c=t['temp'],
            marker='o',
            cmap='hot',
            edgecolor='none')
plt.colorbar(label='Temperature(K)')
plt.savefig('grs1915H2COtempmap.pdf')
"""Generate an astropy-readable .ecsv files for `butler ingest-files`, to ingest an existing gen2 refcat.

The `refcat_dir` variable needs to be modified for each refcat being converted.
"""
import os
import glob
import astropy.table

refcat_dir = "ref_cats/gaia_dr2_20200414"
out_dir = "."

out_file = f"{out_dir}/{os.path.basename(refcat_dir)}.ecsv"

table = astropy.table.Table(names=("filename", "htm7"), dtype=("str", "int"))
files = glob.glob(f"{refcat_dir}/[0-9]*.fits")

for i, file in enumerate(files):
    # running status, overwriting each print statement as it proceeds
    print(f"{i}/{len(files)} ({100*i/len(files):0.1f}%)", end="\r")

    # extract file index; add row to table
    file_index = int(os.path.basename(os.path.splitext(file)[0]))
    table.add_row((file, file_index))

table.write(out_file)
print(f"Saving to: {out_file}")
Esempio n. 28
0
"""Combine all Fermi IRF data into a single file.
"""
from glob import glob
import numpy as np
from astropy import table
from astropy.io import fits
from astropy.table import Table, vstack
import astropy.units as u
from astropy.units import Quantity

filenames = glob('*.txt')

layers = []
primary = fits.PrimaryHDU()
layers.append(primary)
for filename in filenames:
    print(filename)
    table = Table.read(filename, format='ascii')
    table.write('temp.fits', overwrite=True)
    hdu = fits.open('temp.fits')[1]
    layers.append(hdu)
hdus = fits.HDUList(layers)
hdus.writeto('fermi_irf_data.fits', clobber=True)
Esempio n. 29
0
def sim_spectra(wave,
                flux,
                program,
                spectra_filename,
                obsconditions=None,
                sourcetype=None,
                targetid=None,
                redshift=None,
                expid=0,
                seed=0,
                skyerr=0.0,
                ra=None,
                dec=None,
                meta=None,
                fibermap_columns=None,
                fullsim=False,
                use_poisson=True,
                specsim_config_file="desi",
                dwave_out=None,
                save_resolution=True):
    """
    Simulate spectra from an input set of wavelength and flux and writes a FITS file in the Spectra format that can
    be used as input to the redshift fitter.

    Args:
        wave : 1D np.array of wavelength in Angstrom (in vacuum) in observer frame (i.e. redshifted)
        flux : 1D or 2D np.array. 1D array must have same size as wave, 2D array must have shape[1]=wave.size
               flux has to be in units of 10^-17 ergs/s/cm2/A
        spectra_filename : path to output FITS file in the Spectra format
        program : dark, lrg, qso, gray, grey, elg, bright, mws, bgs
            ignored if obsconditions is not None
    
    Optional:
        obsconditions : dictionnary of observation conditions with SEEING EXPTIME AIRMASS MOONFRAC MOONALT MOONSEP
        sourcetype : list of string, allowed values are (sky,elg,lrg,qso,bgs,star), type of sources, used for fiber aperture loss , default is star
        targetid : list of targetids for each target. default of None has them generated as str(range(nspec))
        redshift : list/array with each index being the redshifts for that target
        expid : this expid number will be saved in the Spectra fibermap
        seed : random seed
        skyerr : fractional sky subtraction error
        ra : numpy array with targets RA (deg)
        dec : numpy array with targets Dec (deg)
        meta : dictionnary, saved in primary fits header of the spectra file 
        fibermap_columns : add these columns to the fibermap
        fullsim : if True, write full simulation data in extra file per camera
        use_poisson : if False, do not use numpy.random.poisson to simulate the Poisson noise. This is useful to get reproducible random
        realizations.
        save_resolution : if True it will save the Resolution matrix for each spectra.
        If False returns a resolution matrix (useful for mocks to save disk space).
    """
    log = get_logger()

    if len(flux.shape) == 1:
        flux = flux.reshape((1, flux.size))
    nspec = flux.shape[0]

    log.info("Starting simulation of {} spectra".format(nspec))

    if sourcetype is None:
        sourcetype = np.array(["star" for i in range(nspec)])
    log.debug("sourcetype = {}".format(sourcetype))

    tileid = 0
    telera = 0
    teledec = 0
    dateobs = time.gmtime()
    night = desisim.obs.get_night(utc=dateobs)
    program = program.lower()

    frame_fibermap = desispec.io.fibermap.empty_fibermap(nspec)
    frame_fibermap.meta["FLAVOR"] = "custom"
    frame_fibermap.meta["NIGHT"] = night
    frame_fibermap.meta["EXPID"] = expid

    # add DESI_TARGET
    tm = desitarget.targetmask.desi_mask
    frame_fibermap['DESI_TARGET'][sourcetype == "star"] = tm.STD_FAINT
    frame_fibermap['DESI_TARGET'][sourcetype == "lrg"] = tm.LRG
    frame_fibermap['DESI_TARGET'][sourcetype == "elg"] = tm.ELG
    frame_fibermap['DESI_TARGET'][sourcetype == "qso"] = tm.QSO
    frame_fibermap['DESI_TARGET'][sourcetype == "sky"] = tm.SKY
    frame_fibermap['DESI_TARGET'][sourcetype == "bgs"] = tm.BGS_ANY

    if fibermap_columns is not None:
        for k in fibermap_columns.keys():
            frame_fibermap[k] = fibermap_columns[k]

    if targetid is None:
        targetid = np.arange(nspec).astype(int)

    # add TARGETID
    frame_fibermap['TARGETID'] = targetid

    # spectra fibermap has two extra fields : night and expid
    # This would be cleaner if desispec would provide the spectra equivalent
    # of desispec.io.empty_fibermap()
    spectra_fibermap = desispec.io.empty_fibermap(nspec)
    spectra_fibermap = desispec.io.util.add_columns(
        spectra_fibermap,
        ['NIGHT', 'EXPID', 'TILEID'],
        [np.int32(night), np.int32(expid),
         np.int32(tileid)],
    )

    for s in range(nspec):
        for tp in frame_fibermap.dtype.fields:
            spectra_fibermap[s][tp] = frame_fibermap[s][tp]

    if ra is not None:
        spectra_fibermap["TARGET_RA"] = ra
        spectra_fibermap["FIBER_RA"] = ra
    if dec is not None:
        spectra_fibermap["TARGET_DEC"] = dec
        spectra_fibermap["FIBER_DEC"] = dec

    if obsconditions is None:
        if program in ['dark', 'lrg', 'qso']:
            obsconditions = desisim.simexp.reference_conditions['DARK']
        elif program in ['elg', 'gray', 'grey']:
            obsconditions = desisim.simexp.reference_conditions['GRAY']
        elif program in ['mws', 'bgs', 'bright']:
            obsconditions = desisim.simexp.reference_conditions['BRIGHT']
        else:
            raise ValueError('unknown program {}'.format(program))
    elif isinstance(obsconditions, str):
        try:
            obsconditions = desisim.simexp.reference_conditions[
                obsconditions.upper()]
        except KeyError:
            raise ValueError('obsconditions {} not in {}'.format(
                obsconditions.upper(),
                list(desisim.simexp.reference_conditions.keys())))
    try:
        params = desimodel.io.load_desiparams()
        wavemin = params['ccd']['b']['wavemin']
        wavemax = params['ccd']['z']['wavemax']
    except KeyError:
        wavemin = desimodel.io.load_throughput('b').wavemin
        wavemax = desimodel.io.load_throughput('z').wavemax

    if specsim_config_file == "eboss":
        wavemin = 3500
        wavemax = 10000

    if wave[0] > wavemin:
        log.warning(
            'Minimum input wavelength {}>{}; padding with zeros'.format(
                wave[0], wavemin))
        dwave = wave[1] - wave[0]
        npad = int((wave[0] - wavemin) / dwave + 1)
        wavepad = np.arange(npad) * dwave
        wavepad += wave[0] - dwave - wavepad[-1]
        fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
        wave = np.concatenate([wavepad, wave])
        flux = np.hstack([fluxpad, flux])
        assert flux.shape[1] == len(wave)
        assert np.allclose(dwave, np.diff(wave))
        assert wave[0] <= wavemin

    if wave[-1] < wavemax:
        log.warning(
            'Maximum input wavelength {}<{}; padding with zeros'.format(
                wave[-1], wavemax))
        dwave = wave[-1] - wave[-2]
        npad = int((wavemax - wave[-1]) / dwave + 1)
        wavepad = wave[-1] + dwave + np.arange(npad) * dwave
        fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
        wave = np.concatenate([wave, wavepad])
        flux = np.hstack([flux, fluxpad])
        assert flux.shape[1] == len(wave)
        assert np.allclose(dwave, np.diff(wave))
        assert wavemax <= wave[-1]

    ii = (wavemin <= wave) & (wave <= wavemax)

    flux_unit = 1e-17 * u.erg / (u.Angstrom * u.s * u.cm**2)

    wave = wave[ii] * u.Angstrom
    flux = flux[:, ii] * flux_unit

    sim = desisim.simexp.simulate_spectra(
        wave,
        flux,
        fibermap=frame_fibermap,
        obsconditions=obsconditions,
        redshift=redshift,
        seed=seed,
        psfconvolve=True,
        specsim_config_file=specsim_config_file,
        dwave_out=dwave_out)

    random_state = np.random.RandomState(seed)
    sim.generate_random_noise(random_state, use_poisson=use_poisson)

    scale = 1e17
    specdata = None

    resolution = {}
    for camera in sim.instrument.cameras:
        R = Resolution(camera.get_output_resolution_matrix())
        resolution[camera.name] = np.tile(R.to_fits_array(), [nspec, 1, 1])
        if not save_resolution:
            resolution[camera.name] = R.to_fits_array()

    skyscale = skyerr * random_state.normal(size=sim.num_fibers)

    if fullsim:
        for table in sim.camera_output:
            band = table.meta['name'].strip()[0]
            table_filename = spectra_filename.replace(
                ".fits", "-fullsim-{}.fits".format(band))
            table.write(table_filename, format="fits", overwrite=True)
            print("wrote", table_filename)

    if specsim_config_file == "eboss":
        for table in sim._eboss_camera_output:
            wave = table['wavelength'].astype(float)
            flux = (table['observed_flux'] + table['random_noise_electrons'] *
                    table['flux_calibration']).T.astype(float)
            if np.any(skyscale):
                flux += ((table['num_sky_electrons'] * skyscale) *
                         table['flux_calibration']).T.astype(float)

            ivar = table['flux_inverse_variance'].T.astype(float)

            band = table.meta['name'].strip()[0]

            flux = flux * scale
            ivar = ivar / scale**2
            mask = np.zeros(flux.shape).astype(int)

            spec = Spectra([band], {band: wave}, {band: flux}, {band: ivar},
                           resolution_data=None,
                           mask={band: mask},
                           fibermap=spectra_fibermap,
                           meta=meta,
                           single=True)

            if specdata is None:
                specdata = spec
            else:
                specdata.update(spec)

    else:
        for table in sim.camera_output:
            wave = table['wavelength'].astype(float)
            flux = (table['observed_flux'] + table['random_noise_electrons'] *
                    table['flux_calibration']).T.astype(float)
            if np.any(skyscale):
                flux += ((table['num_sky_electrons'] * skyscale) *
                         table['flux_calibration']).T.astype(float)

            ivar = table['flux_inverse_variance'].T.astype(float)

            band = table.meta['name'].strip()[0]

            flux = flux * scale
            ivar = ivar / scale**2
            mask = np.zeros(flux.shape).astype(int)

            if not save_resolution:
                spec = Spectra([band], {band: wave}, {band: flux},
                               {band: ivar},
                               resolution_data=None,
                               mask={band: mask},
                               fibermap=spectra_fibermap,
                               meta=meta,
                               single=True)
            else:
                spec = Spectra([band], {band: wave}, {band: flux},
                               {band: ivar},
                               resolution_data={band: resolution[band]},
                               mask={band: mask},
                               fibermap=spectra_fibermap,
                               meta=meta,
                               single=True)

            if specdata is None:
                specdata = spec
            else:
                specdata.update(spec)

    desispec.io.write_spectra(spectra_filename, specdata)
    log.info('Wrote ' + spectra_filename)

    # need to clear the simulation buffers that keeps growing otherwise
    # because of a different number of fibers each time ...
    desisim.specsim._simulators.clear()
    desisim.specsim._simdefaults.clear()

    if not save_resolution:
        return resolution
"""Combine all Fermi IRF data into a single file.
"""
from glob import glob
import numpy as np
from astropy import table
from astropy.io import fits
from astropy.table import Table, vstack
import astropy.units as u
from astropy.units import Quantity


filenames = glob('*.txt')

layers = []
primary = fits.PrimaryHDU()
layers.append(primary)
for filename in filenames:    
    print(filename)
    table = Table.read(filename, format='ascii')
    table.write('temp.fits', overwrite=True)
    hdu = fits.open('temp.fits')[1]
    layers.append(hdu)
hdus = fits.HDUList(layers)
hdus.writeto('fermi_irf_data.fits', clobber=True) 

Esempio n. 31
0
def sim_spectra(wave, flux, program, spectra_filename, obsconditions=None,
                sourcetype=None, targetid=None, redshift=None, expid=0, seed=0, skyerr=0.0, ra=None, dec=None, meta=None, fibermap_columns=None, fullsim=False,use_poisson=True):
    """
    Simulate spectra from an input set of wavelength and flux and writes a FITS file in the Spectra format that can
    be used as input to the redshift fitter.

    Args:
        wave : 1D np.array of wavelength in Angstrom (in vacuum) in observer frame (i.e. redshifted)
        flux : 1D or 2D np.array. 1D array must have same size as wave, 2D array must have shape[1]=wave.size
               flux has to be in units of 10^-17 ergs/s/cm2/A
        spectra_filename : path to output FITS file in the Spectra format
        program : dark, lrg, qso, gray, grey, elg, bright, mws, bgs
            ignored if obsconditions is not None
    
    Optional:
        obsconditions : dictionnary of observation conditions with SEEING EXPTIME AIRMASS MOONFRAC MOONALT MOONSEP
        sourcetype : list of string, allowed values are (sky,elg,lrg,qso,bgs,star), type of sources, used for fiber aperture loss , default is star
        targetid : list of targetids for each target. default of None has them generated as str(range(nspec))
        redshift : list/array with each index being the redshifts for that target
        expid : this expid number will be saved in the Spectra fibermap
        seed : random seed
        skyerr : fractional sky subtraction error
        ra : numpy array with targets RA (deg)
        dec : numpy array with targets Dec (deg)
        meta : dictionnary, saved in primary fits header of the spectra file 
        fibermap_columns : add these columns to the fibermap
        fullsim : if True, write full simulation data in extra file per camera
        use_poisson : if False, do not use numpy.random.poisson to simulate the Poisson noise. This is useful to get reproducible random realizations.
    """
    log = get_logger()
    
    if len(flux.shape)==1 :
        flux=flux.reshape((1,flux.size))
    nspec=flux.shape[0]
    
    log.info("Starting simulation of {} spectra".format(nspec))
    
    if sourcetype is None :        
        sourcetype = np.array(["star" for i in range(nspec)])
    log.debug("sourcetype = {}".format(sourcetype))
    
    tileid  = 0
    telera  = 0
    teledec = 0    
    dateobs = time.gmtime()
    night   = desisim.obs.get_night(utc=dateobs)
    program = program.lower()
        
       
    frame_fibermap = desispec.io.fibermap.empty_fibermap(nspec)    
    frame_fibermap.meta["FLAVOR"]="custom"
    frame_fibermap.meta["NIGHT"]=night
    frame_fibermap.meta["EXPID"]=expid
    
    # add DESI_TARGET 
    tm = desitarget.targetmask.desi_mask
    frame_fibermap['DESI_TARGET'][sourcetype=="star"]=tm.STD_FAINT
    frame_fibermap['DESI_TARGET'][sourcetype=="lrg"]=tm.LRG
    frame_fibermap['DESI_TARGET'][sourcetype=="elg"]=tm.ELG
    frame_fibermap['DESI_TARGET'][sourcetype=="qso"]=tm.QSO
    frame_fibermap['DESI_TARGET'][sourcetype=="sky"]=tm.SKY
    frame_fibermap['DESI_TARGET'][sourcetype=="bgs"]=tm.BGS_ANY
    
    
    if fibermap_columns is not None :
        for k in fibermap_columns.keys() :
            frame_fibermap[k] = fibermap_columns[k]
        
    if targetid is None:
        targetid = np.arange(nspec).astype(int)
        
    # add TARGETID
    frame_fibermap['TARGETID'] = targetid
         
    # spectra fibermap has two extra fields : night and expid
    # This would be cleaner if desispec would provide the spectra equivalent
    # of desispec.io.empty_fibermap()
    spectra_fibermap = desispec.io.empty_fibermap(nspec)
    spectra_fibermap = desispec.io.util.add_columns(spectra_fibermap,
                       ['NIGHT', 'EXPID', 'TILEID'],
                       [np.int32(night), np.int32(expid), np.int32(tileid)],
                       )

    for s in range(nspec):
        for tp in frame_fibermap.dtype.fields:
            spectra_fibermap[s][tp] = frame_fibermap[s][tp]
 
    if ra is not None :
        spectra_fibermap["TARGET_RA"] = ra
        spectra_fibermap["FIBER_RA"]    = ra
    if dec is not None :
        spectra_fibermap["TARGET_DEC"] = dec
        spectra_fibermap["FIBER_DEC"]    = dec
            
    if obsconditions is None:
        if program in ['dark', 'lrg', 'qso']:
            obsconditions = desisim.simexp.reference_conditions['DARK']
        elif program in ['elg', 'gray', 'grey']:
            obsconditions = desisim.simexp.reference_conditions['GRAY']
        elif program in ['mws', 'bgs', 'bright']:
            obsconditions = desisim.simexp.reference_conditions['BRIGHT']
        else:
            raise ValueError('unknown program {}'.format(program))
    elif isinstance(obsconditions, str):
        try:
            obsconditions = desisim.simexp.reference_conditions[obsconditions.upper()]
        except KeyError:
            raise ValueError('obsconditions {} not in {}'.format(
                obsconditions.upper(),
                list(desisim.simexp.reference_conditions.keys())))
    try:
        params = desimodel.io.load_desiparams()
        wavemin = params['ccd']['b']['wavemin']
        wavemax = params['ccd']['z']['wavemax']
    except KeyError:
        wavemin = desimodel.io.load_throughput('b').wavemin
        wavemax = desimodel.io.load_throughput('z').wavemax

    if wave[0] > wavemin:
        log.warning('Minimum input wavelength {}>{}; padding with zeros'.format(
                wave[0], wavemin))
        dwave = wave[1] - wave[0]
        npad = int((wave[0] - wavemin)/dwave + 1)
        wavepad = np.arange(npad) * dwave
        wavepad += wave[0] - dwave - wavepad[-1]
        fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
        wave = np.concatenate([wavepad, wave])
        flux = np.hstack([fluxpad, flux])
        assert flux.shape[1] == len(wave)
        assert np.allclose(dwave, np.diff(wave))
        assert wave[0] <= wavemin

    if wave[-1] < wavemax:
        log.warning('Maximum input wavelength {}<{}; padding with zeros'.format(
                wave[-1], wavemax))
        dwave = wave[-1] - wave[-2]
        npad = int( (wavemax - wave[-1])/dwave + 1 )
        wavepad = wave[-1] + dwave + np.arange(npad)*dwave
        fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
        wave = np.concatenate([wave, wavepad])
        flux = np.hstack([flux, fluxpad])
        assert flux.shape[1] == len(wave)
        assert np.allclose(dwave, np.diff(wave))
        assert wavemax <= wave[-1]

    ii = (wavemin <= wave) & (wave <= wavemax)

    flux_unit = 1e-17 * u.erg / (u.Angstrom * u.s * u.cm ** 2 )
    
    wave = wave[ii]*u.Angstrom
    flux = flux[:,ii]*flux_unit

    sim = desisim.simexp.simulate_spectra(wave, flux, fibermap=frame_fibermap,
        obsconditions=obsconditions, redshift=redshift, seed=seed,
        psfconvolve=True)

    random_state = np.random.RandomState(seed)
    sim.generate_random_noise(random_state,use_poisson=use_poisson)

    scale=1e17
    specdata = None

    resolution={}
    for camera in sim.instrument.cameras:
        R = Resolution(camera.get_output_resolution_matrix())
        resolution[camera.name] = np.tile(R.to_fits_array(), [nspec, 1, 1])

    skyscale = skyerr * random_state.normal(size=sim.num_fibers)

    if fullsim :
        for table in sim.camera_output :
            band  = table.meta['name'].strip()[0]
            table_filename=spectra_filename.replace(".fits","-fullsim-{}.fits".format(band))
            table.write(table_filename,format="fits",overwrite=True)
            print("wrote",table_filename)

    for table in sim.camera_output :
        
        wave = table['wavelength'].astype(float)
        flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
        if np.any(skyscale):
            flux += ((table['num_sky_electrons']*skyscale)*table['flux_calibration']).T.astype(float)

        ivar = table['flux_inverse_variance'].T.astype(float)
        
        band  = table.meta['name'].strip()[0]
        
        flux = flux * scale
        ivar = ivar / scale**2
        mask  = np.zeros(flux.shape).astype(int)
        
        spec = Spectra([band], {band : wave}, {band : flux}, {band : ivar}, 
                       resolution_data={band : resolution[band]}, 
                       mask={band : mask}, 
                       fibermap=spectra_fibermap, 
                       meta=meta,
                       single=True)
        
        if specdata is None :
            specdata = spec
        else :
            specdata.update(spec)
    
    desispec.io.write_spectra(spectra_filename, specdata)        
    log.info('Wrote '+spectra_filename)
    
    # need to clear the simulation buffers that keeps growing otherwise
    # because of a different number of fibers each time ...
    desisim.specsim._simulators.clear()
    desisim.specsim._simdefaults.clear()
def print_table(field='cdfs'):
    swire_names, swire_coords, _ = pipeline.generate_swire_features(overwrite=False, field=field)
    swire_labels = pipeline.generate_swire_labels(swire_names, swire_coords, overwrite=False, field=field)
    (_, atlas_test_sets), (_, swire_test_sets) = pipeline.generate_data_sets(swire_coords, swire_labels, overwrite=False, field=field)
    cids = list(pipeline.cross_identify_all(swire_names, swire_coords, swire_labels, swire_test_sets, swire_labels[:, 0], field=field))

    atlas_to_swire = collections.defaultdict(dict)  # ATLAS -> predictor -> SWIRE

    swire_name_to_coord = {}
    for name, coord in zip(swire_names, swire_coords):
        swire_name_to_coord[name] = coord

    atlas_to_swire_expert = {}
    key_to_atlas = {}
    atlas_to_ras = {}
    atlas_to_decs = {}
    id_to_atlas = {}
    atlas_to_id = {}
    atlas_to_zooniverse_id = {}
    if field == 'cdfs':
        table = astropy.io.ascii.read(pipeline.TABLE_PATH)
        for row in table:
            name = row['Component Name (Franzen)']
            if not name:
                continue
            id_to_atlas[row['Component ID (Franzen)']] = name
            atlas_to_id[name] = row['Component ID (Franzen)']
            atlas_to_zooniverse_id[name] = row['Component Zooniverse ID (RGZ)']
            key_to_atlas[row['Key']] = name
            swire = row['Source SWIRE (Norris)']
            atlas_to_swire_expert[name] = swire
            atlas_to_ras[name] = row['Component RA (Franzen)']
            atlas_to_decs[name] = row['Component DEC (Franzen)']
    else:
        swire_scoords = astropy.coordinates.SkyCoord(ra=swire_coords[:, 0],
                                                     dec=swire_coords[:, 1],
                                                     unit='deg')
        with astropy.io.fits.open(pipeline.MIDDELBERG_TABLE4_PATH) as elais_components_fits:
            elais_components = elais_components_fits[1].data
            component_to_name = {}
            for i, component in enumerate(elais_components):
                name = component['ATELAIS']
                id_to_atlas[component['CID']] = name
                atlas_to_id[name] = component['CID']
                atlas_to_zooniverse_id[name] = ''
                key_to_atlas[i] = name
                coord = astropy.coordinates.SkyCoord(
                    ra='{} {} {}'.format(component['RAh'], component['RAm'], component['RAs']),
                    dec='-{} {} {}'.format(component['DEd'], component['DEm'], component['DEs']),
                    unit=('hourangle', 'deg'))
                coord = (coord.ra.deg, coord.dec.deg)
                atlas_to_ras[name] = coord[0]
                atlas_to_decs[name] = coord[1]
        # Load SWIRE cross-identification from Table 5.
        with open(pipeline.MIDDELBERG_TABLE5_PATH) as elais_file:
            lines = [line.split('|') for line in elais_file]
            for line in lines:
                if 'ATELAISJ' not in line[0]:
                    continue

                line_cids = line[1]
                if 'C0' not in line_cids and 'C1' not in line_cids:
                    continue

                line_cids = [cid.strip() for cid in line_cids.split(',')]
                swire_coord_re = re.search(r'SWIRE4J(\d\d)(\d\d)(\d\d\.\d\d)(-\d\d)(\d\d)(\d\d\.\d)', line[2])
                if not swire_coord_re:
                    continue
                swire_coord_list = swire_coord_re.groups()
                coord = astropy.coordinates.SkyCoord(
                    ra='{} {} {}'.format(*swire_coord_list[:3]),
                    dec='{} {} {}'.format(*swire_coord_list[3:]),
                    unit=('hourangle', 'deg'))
                # Nearest SWIRE...
                seps = coord.separation(swire_scoords)
                nearest = numpy.argmin(seps)
                dist = seps[nearest]
                if dist.deg > 5 / 60 / 60:
                    continue
                name = swire_names[nearest]
                for cid in line_cids:
                    atlas_to_swire_expert[id_to_atlas[cid]] = name

    atlas_to_rgz = {}
    atlas_to_radio_consensus = {}
    atlas_to_ir_consensus = {}
    if field == 'cdfs':
        for row in astropy.io.ascii.read(pipeline.RGZ_PATH):
            name = id_to_atlas[row['atlas_id']]
            atlas_to_radio_consensus[name] = row['consensus.radio_level']
            atlas_to_ir_consensus[name] = row['consensus.ir_level']
            atlas_to_rgz[name] = row['SWIRE.designation']

    titlemap = {
        'RGZ & Norris & compact': 'Compact',
        'RGZ & Norris & resolved': 'Resolved',
        'RGZ & Norris': 'All',
        'RGZ & compact': 'Compact',
        'RGZ & resolved': 'Resolved',
        'RGZ': 'All',
    }

    known_predictors = set()

    for cid in cids:
        if cid.labeller == 'norris' and 'Norris' not in cid.dataset_name:
            continue

        if cid.classifier in {'Groundtruth', 'Random'}:
            continue

        if field == 'cdfs':
            atlas_keys = atlas_test_sets[:, pipeline.SET_NAMES['RGZ & Norris'], cid.quadrant].nonzero()[0]
        else:
            atlas_keys = atlas_test_sets[:, 0, 0].nonzero()[0]

        atlas_to_swire_predictor = dict(zip(cid.radio_names, cid.ir_names))
        n_total = 0
        n_correct = 0
        n_skipped = 0
        if 'Norris' in cid.dataset_name and cid.labeller == 'rgz':
            labeller = 'RGZ N'
            continue
        elif cid.labeller == 'rgz':
            labeller = 'RGZ'
        else:
            labeller = 'Norris'
        predictor_name = '{}({} / {})'.format(
            {'LogisticRegression': 'LR', 'CNN': 'CNN', 'RandomForestClassifier': 'RF'}[cid.classifier],
            labeller, titlemap[cid.dataset_name])
        known_predictors.add(predictor_name)
        for i in atlas_keys:
            name = key_to_atlas[i]
            swire_predictor = atlas_to_swire_predictor.get(name, '')
            atlas_to_swire[name][predictor_name] = swire_predictor

    known_predictors = sorted(known_predictors)

    atlases = sorted(atlas_to_swire)
    ras = []
    decs = []
    expert_xids = []
    expert_xid_ras = []
    expert_xid_decs = []
    rgzs = []
    rgz_ras = []
    rgz_decs = []
    rcs = []
    ircs = []
    cids = []
    zids = []
    predictor_columns = collections.defaultdict(list)
    predictor_ras = collections.defaultdict(list)
    predictor_decs = collections.defaultdict(list)
    for atlas in atlases:
        for predictor in known_predictors:
            predictor_columns[predictor].append(atlas_to_swire[atlas].get(predictor, ''))
            predictor_ras[predictor].append(swire_name_to_coord.get(atlas_to_swire[atlas].get(predictor, ''), (None, None))[0])
            predictor_decs[predictor].append(swire_name_to_coord.get(atlas_to_swire[atlas].get(predictor, ''), (None, None))[1])
        ras.append(atlas_to_ras[atlas])
        decs.append(atlas_to_decs[atlas])
        rgzs.append(atlas_to_rgz.get(atlas, ''))
        cids.append(atlas_to_id[atlas])
        zids.append(atlas_to_zooniverse_id[atlas])
        rgz_ras.append(swire_name_to_coord.get(atlas_to_rgz.get(atlas, ''), (None, None))[0])
        rgz_decs.append(swire_name_to_coord.get(atlas_to_rgz.get(atlas, ''), (None, None))[1])
        rcs.append(atlas_to_radio_consensus.get(atlas, 0.0))
        ircs.append(atlas_to_ir_consensus.get(atlas, 0.0))
        expert_xids.append(atlas_to_swire_expert.get(atlas, ''))
        expert_xid_ras.append(swire_name_to_coord.get(atlas_to_swire_expert.get(atlas, ''), (None, None))[0])
        expert_xid_decs.append(swire_name_to_coord.get(atlas_to_swire_expert.get(atlas, ''), (None, None))[1])

    expert = 'Norris' if field == 'cdfs' else 'Middelberg'
    table = astropy.table.Table(
        data=[atlases, ras, decs,
              cids, zids,
              expert_xids, expert_xid_ras, expert_xid_decs,
              rgzs, rgz_ras, rgz_decs,
              rcs, ircs] + [k for p in known_predictors for k in (predictor_columns[p], predictor_ras[p], predictor_decs[p])],
        names=['ATLAS', 'RA', 'Dec',
               'CID', 'Zooniverse ID',
               expert, expert + ' RA', expert + ' Dec',
               'RGZ', 'RGZ RA', 'RGZ Dec',
               'RGZ radio consensus', 'RGZ IR consensus'] + [k for p in known_predictors for k in (p, p + ' RA', p + ' Dec')])
    table['RGZ radio consensus'].format = '{:.4f}'
    table['RGZ IR consensus'].format = '{:.4f}'
    table.write('/Users/alger/data/Crowdastro/predicted_cross_ids_table_21_03_18_{}.csv'.format(field), format='csv')
    table.write('/Users/alger/data/Crowdastro/predicted_cross_ids_table_21_03_18_{}.tex'.format(field), format='latex')
Esempio n. 33
0
 def write(self, *args, **kwargs):
     table = self[:]
     table.write(*args, **kwargs)
Esempio n. 34
0
def print_table(field='cdfs'):
    titlemap = {
        'RGZ & Norris & compact': 'Compact',
        'RGZ & Norris & resolved': 'Resolved',
        'RGZ & Norris': 'All',
        'RGZ & compact': 'Compact',
        'RGZ & resolved': 'Resolved',
        'RGZ': 'All',
    }

    lr_predictions = itertools.chain(
        pipeline.unserialise_predictions(
            pipeline.WORKING_DIR + 'LogisticRegression_norris_{}_predictions'.format(field)),
        pipeline.unserialise_predictions(
            pipeline.WORKING_DIR + 'LogisticRegression_rgz_{}_predictions'.format(field)))
    rf_predictions = itertools.chain(
        pipeline.unserialise_predictions(
            pipeline.WORKING_DIR + 'RandomForestClassifier_norris_{}_predictions'.format(field)),
        pipeline.unserialise_predictions(
            pipeline.WORKING_DIR + 'RandomForestClassifier_rgz_{}_predictions'.format(field)))
    cnn_predictions = itertools.chain(
        pipeline.unserialise_predictions(
            pipeline.WORKING_DIR + 'CNN_norris_{}_predictions'.format(field)),
        pipeline.unserialise_predictions(
            pipeline.WORKING_DIR + 'CNN_rgz_{}_predictions'.format(field)))

    swire_names, swire_coords, _ = pipeline.generate_swire_features(overwrite=False, field=field)
    swire_labels = pipeline.generate_swire_labels(swire_names, swire_coords, overwrite=False, field=field)
    _, (_, swire_test_sets) = pipeline.generate_data_sets(swire_coords, swire_labels, overwrite=False, field=field)

    swire_names = numpy.array(swire_names)
    swire_coords = numpy.array(swire_coords)

    predictions_map = collections.defaultdict(dict) # SWIRE -> predictor -> probability
    swire_coords_map = {}
    swire_expert_map = {}
    swire_rgz_map = {}
    known_predictors = set()

    for classifier, predictions_ in [['LR', lr_predictions], ['CNN', cnn_predictions], ['RF', rf_predictions]]:
        for predictions in predictions_:
            dataset_name = predictions.dataset_name
            labeller = predictions.labeller
            if labeller == 'rgz' and 'Norris' in dataset_name:
                labeller = 'RGZ N'
                continue
            labeller = labeller.title() if labeller == 'norris' else labeller.upper()
            predictor_name = '{}({} / {})'.format(classifier, labeller, titlemap[dataset_name])
            if field == 'cdfs':
                swire_names_ = swire_names[swire_test_sets[:, pipeline.SET_NAMES['RGZ'], predictions.quadrant]]
                swire_coords_ = swire_coords[swire_test_sets[:, pipeline.SET_NAMES['RGZ'], predictions.quadrant]]
                swire_labels_ = swire_labels[swire_test_sets[:, pipeline.SET_NAMES['RGZ'], predictions.quadrant]]
            else:
                swire_names_ = swire_names[swire_test_sets[:, 0, 0]]
                swire_coords_ = swire_coords[swire_test_sets[:, 0, 0]]
                swire_labels_ = swire_labels[swire_test_sets[:, 0, 0]]
            assert predictions.probabilities.shape[0] == len(swire_names_), \
                'expected {}, got {}'.format(predictions.probabilities.shape[0], len(swire_names_))
            for name, coords, prediction, label in zip(swire_names_, swire_coords_, predictions.probabilities, swire_labels_):
                predictions_map[name][predictor_name] = prediction
                swire_coords_map[name] = coords
                swire_expert_map[name] = label[0]
                swire_rgz_map[name] = label[1]
            known_predictors.add(predictor_name)

    known_predictors = sorted(known_predictors)

    swires = sorted(predictions_map)
    ras = []
    decs = []
    is_expert_host = []
    is_rgz_host = []
    predictor_columns = collections.defaultdict(list)
    for swire in swires:
        for predictor in known_predictors:
            predictor_columns[predictor].append(predictions_map[swire].get(predictor, ''))
        ras.append(swire_coords_map[swire][0])
        decs.append(swire_coords_map[swire][1])
        is_expert_host.append(['no', 'yes'][swire_expert_map[swire]])
        is_rgz_host.append(['no', 'yes'][swire_rgz_map[swire]])

    table = astropy.table.Table(
        data=[swires, ras, decs, is_expert_host, is_rgz_host] + [predictor_columns[p] for p in known_predictors],
        names=['SWIRE', 'RA', 'Dec', 'Expert host', 'RGZ host'] + known_predictors)
    table.write('/Users/alger/data/Crowdastro/predicted_swire_table_{}_21_03_18.csv'.format(field), format='csv')
    for p in known_predictors:
        table[p].format = '{:.4f}'
    table.write('/Users/alger/data/Crowdastro/predicted_swire_table_{}_21_03_18.tex'.format(field), format='latex')
Esempio n. 35
0
def extract(inputs, pixel_scale, filename, noisefile, outputfile, finalfits,
            num_sections, height, width, x, y):
    logger.info(f"Will source extract galaxies from noise file {noisefile}")

    # run sextractor on noise image.
    cmd = '/nfs/slac/g/ki/ki19/deuce/AEGIS/ismael/WLD/params/sextractor-2.25.0/src/sex {} -c {} -CATALOG_NAME {} ' \
          '-PARAMETERS_NAME {} -FILTER_NAME {} -STARNNW_NAME {}'.format(
            noisefile, inputs['config_file'], outputfile, inputs['param_file'], inputs['filter_file'],
            inputs['starnnw_file'])

    logger.info(f"With cmd: {cmd}")

    subprocess.run(cmd, shell=True)

    logger.success(f"Successfully source extracted {noisefile}!")

    # read noise image to figure out image bounds.
    fits_section = fitsio.FITS(noisefile)
    stamp = fits_section[0].read()
    img_width = stamp.shape[0]  # pixels
    img_height = stamp.shape[1]

    assert img_width == width / num_sections and img_height == height / num_sections, \
        "Something is wrong with the heights specified."

    logger.info(
        f"The image width and height from the stamp are {img_width} and {img_height} respectively."
    )

    # read results obtained (table obtained either from combining or from single.)
    cat = descwl.output.Reader(filename).results
    table = cat.table
    detected, matched, indices, distance = cat.match_sextractor(outputfile)
    logger.success(
        f"Successfully matched catalogue with source extractor from sextract output: {outputfile}"
    )

    # convert to arcsecs and relative to this image's center (not to absolute, before w/ respect to corner.)
    detected['X_IMAGE'] = (detected['X_IMAGE'] - 0.5 * img_width -
                           0.5) * pixel_scale
    detected['Y_IMAGE'] = (detected['Y_IMAGE'] - 0.5 * img_height -
                           0.5) * pixel_scale

    # adjust to absolute image center if necessary, both for measured centers and catalogue centers.
    detected['X_IMAGE'] += x * (width * pixel_scale)  # need to use arcsecs
    detected['Y_IMAGE'] += y * (height * pixel_scale)

    table['dx'] += x * (width * pixel_scale)
    table['dy'] += y * (height * pixel_scale)

    # also adjust the xmin,xmax, ymin, ymax bounding box edges, because why not?
    # remember these are in pixels; xmin, xmax relative to left edge; ymin,ymax relative to right edge.
    table['xmin'] += int(x * width + width / 2 - img_width / 2)
    table['xmax'] += int(x * width + width / 2 - img_width / 2)
    table['ymin'] += int(x * height + height / 2 - img_height / 2)
    table['ymax'] += int(x * height + height / 2 - img_height / 2)

    # convert second moments to arcsecs.
    # Not adjust relative to center, because only used for sigma calculation.
    detected['X2_IMAGE'] *= pixel_scale**2
    detected['Y2_IMAGE'] *= pixel_scale**2
    detected['XY_IMAGE'] *= pixel_scale**2

    # calculate size from moments X2_IMAGE,Y2_IMAGE,XY_IMAGE -> remember in pixel**2 so have to convert to arcsecs.
    sigmas = []
    for x2, y2, xy in zip(detected['X2_IMAGE'], detected['Y2_IMAGE'],
                          detected['XY_IMAGE']):
        second_moments = np.array([[x2, xy], [xy, y2]])
        sigma = np.linalg.det(second_moments)**(+1. / 4)  # should be a PLUS.
        sigmas.append(sigma)

    SIGMA = astropy.table.Column(name='SIGMA', data=sigmas)
    detected.add_column(SIGMA)

    # find the indices of the ambiguous blends.
    logger.info("Finding indices/ids that are ambiguously blended")
    ambg_blends = detected_ambiguous_blends(table, indices, detected)

    logger.success("All indices have been found")

    logger.info(
        f"Adding column to original table and writing it to {finalfits}")

    ambiguous_blend_column = []
    for i, gal_row in enumerate(table):
        if i in ambg_blends:
            ambiguous_blend_column.append(True)
        else:
            ambiguous_blend_column.append(False)
    column = astropy.table.Column(name='ambig_blend',
                                  data=ambiguous_blend_column)
    table.add_column(column)

    logger.debug(
        f"Number of galaxies in table from file {finalfits} is: {len(table)}")

    table.write(finalfits)
Esempio n. 36
0
def calculate_fiber_acceptance_fraction(focal_x,
                                        focal_y,
                                        wavelength,
                                        source,
                                        atmosphere,
                                        instrument,
                                        source_types=None,
                                        source_fraction=None,
                                        source_half_light_radius=None,
                                        source_minor_major_axis_ratio=None,
                                        source_position_angle=None,
                                        oversampling=32,
                                        saved_images_file=None,
                                        saved_table_file=None):
    """Calculate the acceptance fraction for a single fiber.

    The behavior of this function is customized by the instrument.fiberloss
    configuration parameters. When instrument.fiberloss.method == 'table',
    pre-tabulated values are returned using source.type as the key and
    all other parameters to this function are ignored.

    When instrument.fiberloss.method == 'galsim', fiberloss is calculated
    on the fly using the GalSim package via :class:`GalsimFiberlossCalculator`
    to model the PSF components and source profile and perform the convolutions.

    To efficiently calculate fiberloss fractions for multiple sources with
    GalSim, use :class:`GalsimFiberlossCalculator` directly instead of
    repeatedly calling this method.  See :mod:`specsim.quickfiberloss` for an
    example of this approach.

    Parameters
    ----------
    focal_x : :class:`astropy.units.Quantity`
        X coordinate of the fiber center in the focal plane with length units.
    focal_y : :class:`astropy.units.Quantity`
        Y coordinate of the fiber center in the focal plane with length units.
    wavelength : :class:`astropy.units.Quantity`
        Array of simulation wavelengths (with length units) where the fiber
        acceptance fraction should be tabulated.
    source : :class:`specsim.source.Source`
        Source model to use for the calculation.
    atmosphere : :class:`specsim.atmosphere.Atmosphere`
        Atmosphere model to use for the calculation.
    instrument : :class:`specsim.instrument.Instrument`
        Instrument model to use for the calculation.
    source_types : array or None
        Array of source type names that identify which tabulated fiberloss
        fraction should be used for each fiber with the ``table`` method.
        Each name should already be defined as a key in the
        ``instruments.fiberloss.table.paths`` configuration.
        Ignored for the ``galsim`` method.
    source_fraction : array or None
        Array of shape (num_fibers, 2).  See
        :meth:`GalsimFiberlossCalculator.create_source` for details.
        Ignored for the ``table`` method.
    source_half_light_radius : array or None
        Array of shape (num_fibers, 2).  See
        :meth:`GalsimFiberlossCalculator.create_source` for details.
        Ignored for the ``table`` method.
    source_minor_major_axis_ratio : array or None
        Array of shape (num_fibers, 2).  See
        :meth:`GalsimFiberlossCalculator.create_source` for details.
        Ignored for the ``table`` method.
    source_position_angle : array or None
        Array of shape (num_fibers, 2).  See
        :meth:`GalsimFiberlossCalculator.create_source` for details.
        Ignored for the ``table`` method.
    oversampling : int
        Oversampling factor to use for anti-aliasing the fiber aperture.
        Ignored for the ``table`` method.
    saved_images_file : str or None
        See :meth:`GalsimFiberlossCalculator.calculate`.
        Ignored for the ``table`` method.
    saved_table_file : str or None
        Write a table of calculated values to a file with this name.  The
        extension determines the file format, and .ecsv is recommended.
        The saved file can then be used as a pre-tabulated input with
        instrument.fiberloss.method = 'table'.

    Returns
    -------
    numpy array
        Array of fiber acceptance fractions (dimensionless) at each of the
        input wavelengths.
    """
    num_fibers = len(focal_x)
    if len(focal_y) != num_fibers:
        raise ValueError('Arrays focal_x and focal_y must have same length.')

    # Use pre-tabulated fiberloss fractions when requested.
    if instrument.fiberloss_method == 'table':
        if source_types is None:
            # Use same source type for all fibers.
            return instrument.fiber_acceptance_dict[source.type_name][
                np.newaxis, :]
        elif len(source_types) != num_fibers:
            raise ValueError('Unexpected shape for source_types.')
        floss = np.empty((num_fibers, len(wavelength)))
        for i, type_name in enumerate(source_types):
            floss[i] = instrument.fiber_acceptance_dict[type_name]
        return floss

    # Otherwise, use GalSim or the fastfiberacceptance calibrated on galsim
    # to calculate fiberloss fractions on the fly...

    # Initialize the grid of wavelengths where the fiberloss will be
    # calculated.
    num_wlen = instrument.fiberloss_num_wlen
    wlen_unit = wavelength.unit
    wlen_grid = np.linspace(wavelength.value[0], wavelength.value[-1],
                            num_wlen) * wlen_unit

    # Calculate the focal-plane optics at the fiber locations.
    scale, blur, offset = instrument.get_focal_plane_optics(
        focal_x, focal_y, wlen_grid)

    # Calculate the atmospheric seeing at each wavelength.
    seeing_fwhm = atmosphere.get_seeing_fwhm(wlen_grid).to(u.arcsec).value

    # Replicate source parameters from the source config if they are not
    # provided via args. If they are provided, check for the expected shape.
    if source_fraction is None:
        source_fraction = np.tile(
            [source.disk_fraction, source.bulge_fraction], [num_fibers, 1])
    elif source_fraction.shape != (num_fibers, 2):
        raise ValueError('Unexpected shape for source_fraction.')
    if source_half_light_radius is None:
        source_half_light_radius = np.tile([
            source.disk_shape.half_light_radius.to(u.arcsec).value,
            source.bulge_shape.half_light_radius.to(u.arcsec).value
        ], [num_fibers, 1])
    elif source_half_light_radius.shape != (num_fibers, 2):
        raise ValueError('Unexpected shape for source_half_light_radius.')
    if source_minor_major_axis_ratio is None:
        source_minor_major_axis_ratio = np.tile([
            source.disk_shape.minor_major_axis_ratio,
            source.bulge_shape.minor_major_axis_ratio
        ], [num_fibers, 1])
    elif source_minor_major_axis_ratio.shape != (num_fibers, 2):
        raise ValueError('Unexpected shape for source_minor_major_axis_ratio.')
    if source_position_angle is None:
        source_position_angle = np.tile([
            source.disk_shape.position_angle.to(u.deg).value,
            source.bulge_shape.position_angle.to(u.deg).value
        ], [num_fibers, 1])
    elif source_position_angle.shape != (num_fibers, 2):
        raise ValueError('Unexpected shape for source_position_angle.')

    fiberloss_grid = None

    # choose here how to compute things
    if instrument.fiberloss_method == 'fastsim':
        scale_um_per_arcsec = scale.to(u.um / u.arcsec).value
        blur_um = blur.to(u.um).value

        sigma = np.zeros((offset.shape[0], offset.shape[1]))
        disk_half_light_radius = np.zeros(sigma.shape)
        bulge_half_light_radius = np.zeros(sigma.shape)
        disk_frac = np.zeros(sigma.shape)
        bulge_frac = np.zeros(sigma.shape)

        for i in range(offset.shape[0]):
            sigma[i] = np.sqrt((seeing_fwhm / 2.35482)**2 *
                               scale_um_per_arcsec[i, 0] *
                               scale_um_per_arcsec[i, 1] + blur_um[i]**2)
            disk_half_light_radius[i] = source_half_light_radius[
                i, 0] * np.ones(offset.shape[1])
            bulge_half_light_radius[i] = source_half_light_radius[
                i, 1] * np.ones(offset.shape[1])
            disk_frac[i] = source_fraction[i, 0] * np.ones(offset.shape[1])
            bulge_frac[i] = source_fraction[i, 1] * np.ones(offset.shape[1])

        point_frac = 1 - disk_frac - bulge_frac

        offset_um = offset.to(u.um).value
        delta = np.sqrt(offset_um[:, :, 0]**2 + offset_um[:, :, 1]**2)

        fiberloss_grid = np.zeros(sigma.shape)
        if np.sum(point_frac) > 0:
            fiberloss_grid += point_frac * instrument.fast_fiber_acceptance.value(
                "POINT", sigma, delta)
        if np.sum(disk_frac) > 0:
            fiberloss_grid += disk_frac * instrument.fast_fiber_acceptance.value(
                "DISK", sigma, delta, disk_half_light_radius)
        if np.sum(bulge_frac) > 0:
            fiberloss_grid += bulge_frac * instrument.fast_fiber_acceptance.value(
                "BULGE", sigma, delta, bulge_half_light_radius)

    else:

        # Initialize a new calculator.
        calc = GalsimFiberlossCalculator(
            instrument.fiber_diameter.to(u.um).value,
            wlen_grid.to(u.Angstrom).value, instrument.fiberloss_num_pixels,
            oversampling, atmosphere.seeing_moffat_beta)

        # Calculate fiberloss fractions.  Note that the calculator expects arrays
        # with implicit units.
        fiberloss_grid = calc.calculate(seeing_fwhm,
                                        scale.to(u.um / u.arcsec).value,
                                        offset.to(u.um).value,
                                        blur.to(u.um).value, source_fraction,
                                        source_half_light_radius,
                                        source_minor_major_axis_ratio,
                                        source_position_angle,
                                        saved_images_file)

    # TODO: add support for saving table when num_fibers > 1.
    if saved_table_file and num_fibers == 1:
        meta = dict(
            description='Fiberloss fraction for source "{0}"'.format(
                source.name) +
            ' at focal (x,y) = ({0:.3f},{1:.3f})'.format(focal_x, focal_y))
        table = astropy.table.Table(meta=meta)
        table.add_column(
            astropy.table.Column(name='Wavelength',
                                 data=wlen_grid.value,
                                 unit=wlen_grid.unit,
                                 description='Observed wavelength'))
        table.add_column(
            astropy.table.Column(name='FiberAcceptance',
                                 data=fiberloss_grid[0],
                                 description='Fiber acceptance fraction'))
        args = {}
        if saved_table_file.endswith('.ecsv'):
            args['format'] = 'ascii.ecsv'
        table.write(saved_table_file, **args)

    # Interpolate (linearly) to the simulation wavelength grid.
    # Use scipy.interpolate instead of np.interp here to avoid looping
    # over fibers.
    interpolator = scipy.interpolate.interp1d(wlen_grid.value,
                                              fiberloss_grid,
                                              kind='linear',
                                              axis=1,
                                              copy=False,
                                              assume_sorted=True)
    # Both wavelength grids have the same units, by construction, so no
    # conversion factor is required here.
    return interpolator(wavelength.value)
Esempio n. 37
0
column_names = ['Inner edge (pc)', 'Outer edge (pc)',
                'GMC index', 'R', 'p',
                'Truncation mass (M$_\odot$)',
                'Largest cloud (M$_\odot$)',
                '5th largest cloud (M$_\odot$)']
column_types = ['f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4']
table = Table(names=column_names, dtype=column_types)

for inneredge, outeredge in zip(inneredge, outeredge):
    idx = np.where((t['RADIUS_PC'] >= inneredge) &
                 (t['RADIUS_PC'] < outeredge))
    mass = t['MASS_GCORR'][idx].data

    optresult = plf.plfit_adstat(mass / minmass)
    pvec = optresult.x
#  don't have to create an index for the xmin mass - defined in the fit_subset
    table.add_row()
    table[-1]['Inner edge (pc)'] = inneredge
    table[-1]['Outer edge (pc)'] = outeredge
    table[-1]['GMC index'] = pvec[0]
    table[-1]['R'] = 0
    table[-1]['p'] = 0
    table[-1]['Truncation mass (M$_\odot$)'] = pvec[1] * minmass
    table[-1]['Largest cloud (M$_\odot$)'] = mass.max()
    table[-1]['5th largest cloud (M$_\odot$)'] =\
        np.sort(t['MASS_GCORR'][idx])[-5]

    print(table)

table.write('m83bininfo_ad.fits', overwrite=True)
Esempio n. 38
0
def data(galaxyname, data, n_bins=1, r_nuc=0):

    # Import the libraries.
    from galaxies import Galaxy
    from astropy.table import Table
    from astropy.table import Column
    import astropy
    import powerlaw
    import numpy as np
    import astropy.table
    import astropy.units as u
    import matplotlib.pyplot as plt
    import matplotlib as mpl

    # Load its FITS file.
    t = Table.read(data)

    # Load the information about the galaxy.
    gxy = Galaxy(galaxyname)

    # Calculate the galaxy's properties.
    distance = np.asarray(gxy.distance)
    inclination = np.asarray(gxy.inclination)
    rgal = gxy.radius(ra=(t['XPOS']), dec=(t['YPOS']))
    rgal = rgal.to(u.kpc)
    rpgal = np.asarray(rgal)

    # Append these to the FITS table.
    col_rgal = Column(name='RADIUS_KPC', data=(rgal))
    t.add_column(col_rgal)

    # Sort the masses according to galactocentric radius.
    mass = t['MASS_EXTRAP'].data
    i_sorted = np.argsort(rgal)
    rgal_sorted = np.asarray(rgal[i_sorted])
    mass_sorted = np.asarray(mass[i_sorted])

    # Initiate a loop to calculate the bin boundaries and the indeces of these boundaries in the sorted list.
    totmass = np.sum(mass) / n_bins
    edge_f = 1.1 * np.max(rgal_sorted)
    edges = np.zeros(n_bins - 1)
    start = 0
    mass_equiv = [0]  #indeces for the sorted mass bins of equal mass
    mass_area = [0]  #indeces for the sorted mass bins of equal area
    rgal_equiv = [
        0, 2, 8**0.5, 12**0.5, 4, 20**0.5, 24**0.5, 28**0.5, 32**0.5, 6
    ]
    r = 1  #equal-area radial index
    e = 0  #edge index
    f = 0  #loop flag to skip the mass_area loop
    c = 0  #loop counter
    for i in range(len(mass_sorted)):
        #Find the indeces for bins of equal mass (equivalent to totmass)
        if np.sum(mass_sorted[start:i]) > totmass:
            edges[e] = 0.5 * (rgal_sorted[i] + rgal_sorted[i - 1])
            start = i
            mass_equiv = np.append(mass_equiv, i)
            e = e + 1
        #Find the indeces for bins of equal area (4pi kpc^2)
        if rgal_sorted[i] > rgal_equiv[r] and not f:
            if rgal_sorted[i] < rgal_equiv[3] and not f:
                mass_area = np.append(mass_area, i)
                r = r + 1
                c = 0
            if rgal_sorted[i] > rgal_equiv[3]:
                f = 1
                mass_area = np.append(mass_area, i)
    mass_equiv = np.append(mass_equiv, i)
    inneredge = np.concatenate(([0.000000], edges))
    outeredge = np.concatenate((edges, [edge_f]))

    # Create a template for a new table.
    column_names = [
        'Inner edge (kpc)', 'Outer edge (kpc)', 'GMC index', 'R', 'p',
        'Truncation mass ($M_\mathrm{\odot}$)',
        'Largest cloud ($M_\mathrm{\odot}$)',
        '5th largest cloud ($M_\mathrm{\odot}$)'
    ]
    column_types = ['f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4']
    table = Table(names=column_names, dtype=column_types)

    # Fill the table.
    for inneredge, outeredge in zip(inneredge, outeredge):

        idx = np.where((t['RADIUS_KPC'] >= inneredge)
                       & (t['RADIUS_KPC'] < outeredge))
        mass = t['MASS_EXTRAP'][idx].data
        fit = powerlaw.Fit(mass)
        fit_subset = powerlaw.Fit(mass, xmin=3e5)
        R, p = fit.distribution_compare('power_law', 'truncated_power_law')
        table.add_row()
        table[-1]['R'] = R
        table[-1]['p'] = p
        table[-1]['GMC index'] = -fit.alpha
        table[-1]['Inner edge (kpc)'] = inneredge
        table[-1]['Outer edge (kpc)'] = outeredge
        table[-1]['Largest cloud ($M_\mathrm{\odot}$)'] = np.nanmax(mass)
        table[-1][
            'Truncation mass ($M_\mathrm{\odot}$)'] = 1 / fit.truncated_power_law.parameter2
        table[-1]['5th largest cloud ($M_\mathrm{\odot}$)'] = np.sort(
            t['MASS_EXTRAP'][idx])[-5]

    # Write the data to a FITS file.
    table.write('../Data/' + galaxyname + '_data.fits', overwrite=True)

    # Plot the mass distribution trends for equal-mass bins.
    t = Table.read('../Data/' + galaxyname + '_data.fits')
    inneredge = t['Inner edge (kpc)'].data
    outeredge = t['Outer edge (kpc)'].data
    subplot_label = ('(a)', '(b)', '(c)', '(d)', '(e)', '(f)')
    for i in range(len(mass_equiv) - 1):

        binmass = mass_sorted[mass_equiv[i]:mass_equiv[i + 1]]
        myfit = powerlaw.Fit(binmass)
        R, p = myfit.distribution_compare('power_law', 'truncated_power_law')
        fig = myfit.truncated_power_law.plot_ccdf(label='Truncated\nPower Law')
        myfit.power_law.plot_ccdf(label='Power Law', ax=fig)
        myfit.plot_ccdf(drawstyle='steps', label='Data', ax=fig)

        # Format the plot.
        plt.legend(loc=0)
        #plt.title(galaxyname+'Equal-mass Mass Distribution, bin '+repr(i+1))
        plt.ylim(ymin=10**-3)
        plt.xlabel(r'$M_\mathrm{\odot}$', fontsize=20)
        plt.ylabel('CCDF', fontsize=20)
        mpl.rc('xtick', labelsize=16)
        mpl.rc('ytick', labelsize=16)
        plt.text(0.01,
                 0.5,
                 subplot_label[i],
                 ha='left',
                 va='center',
                 transform=fig.transAxes,
                 fontsize=16)
        plt.text(
            0.35,
            0.01,
            r'$M_{bin}\ =\ %e M_\mathrm{\odot}$' % (totmass) + '\n' +
            r'$R_{gal}\ =\ %5.4f\ \mathrm{kpc}\ \mathrm{to}\ %5.4f\ \mathrm{kpc}$'
            % (inneredge[i], outeredge[i]) + '\n' +
            r'$\mathrm{R}\ =\ %5.4f,\ \mathrm{p}\ =\ %5.4f$' % (R, p) + '\n' +
            r'$\alpha\ =\ %5.4f,\ M_\mathrm{0}\ =\ %5.4eM_\mathrm{\odot}$' %
            (-myfit.alpha, 1 / myfit.truncated_power_law.parameter2),
            ha='left',
            va='bottom',
            transform=fig.transAxes,
            fontsize=16)
        plt.savefig('../Data/' + galaxyname + '_power_law_equal_mass_' +
                    repr(i + 1) + '.png')
        plt.close()

    # Plot the mass distribution trend for equal-area bins.
    for i in range(len(mass_area) - 1):

        f = 0  #loop flag
        if mass_area[i + 1] - mass_area[i] < 3:
            f = 1
        if not f:
            binmass = mass_sorted[mass_area[i]:mass_area[i + 1]]
            myfit = powerlaw.Fit(binmass)
            R, p = myfit.distribution_compare('power_law',
                                              'truncated_power_law')
            fig = myfit.truncated_power_law.plot_ccdf(
                label='Truncated\nPower Law')
            myfit.power_law.plot_ccdf(label='Power Law', ax=fig)
            myfit.plot_ccdf(drawstyle='steps', label='Data', ax=fig)

            # Format the plot.
            plt.legend(loc=0)
            #plt.title(galaxyname+'Equal-area Mass Distribution, bin '+repr(i+1))
            plt.ylim(ymin=10**-3)
            plt.xlabel(r'$M_\mathrm{\odot}$', fontsize=20)
            plt.ylabel('CCDF', fontsize=20)
            mpl.rc('xtick', labelsize=16)
            mpl.rc('ytick', labelsize=16)
            plt.text(0.01,
                     0.5,
                     subplot_label[i],
                     ha='left',
                     va='center',
                     transform=fig.transAxes,
                     fontsize=16)
            plt.text(
                0.35,
                0.01,
                r'$R_{gal}\ =\ %5.4f\ \mathrm{kpc}\ \mathrm{to}\ %5.4f\ \mathrm{kpc}$'
                % (rgal_equiv[i], rgal_equiv[i + 1]) + '\n' +
                r'$\mathrm{R}\ =\ %5.4f,\ \mathrm{p}\ =\ %5.4f$' % (R, p) +
                '\n' +
                r'$\alpha\ =\ %5.4f,\ M_\mathrm{0}\ =\ %5.4eM_\mathrm{\odot}$'
                % (-myfit.alpha, 1 / myfit.truncated_power_law.parameter2),
                ha='left',
                va='bottom',
                transform=fig.transAxes,
                fontsize=16)
            plt.savefig('../Data/' + galaxyname + '_power_law_equal_area_' +
                        repr(i + 1) + '.png')
            plt.close()

    return [distance, inclination]
Esempio n. 39
0
def table_info(table, label):
    # Print basic info to console
    print('\n*** {0} ***'.format(label))
    print('Rows: {0}'.format(len(table)))
    print('Columns: {0}'.format(table.colnames))
    print('')

    # Save to file for debugging
    filename = 'crab_mwl_{0}.ipac'.format(label)
    print('INFO: Writing {0}'.format(filename))
    table.write(filename, format='ascii.ipac')


if __name__ == '__main__':
    table = get_combined_table()
    table_info(table, 'get_combined_table')

    table = remove_flare_data(table)
    table_info(table, 'remove_flare_data')

    table = combine_columns(table)
    table_info(table, 'combine_columns')

    table = clean_up(table)
    table_info(table, 'clean_up')

    filename = 'crab_mwl.fits'
    print('INFO: Writing {0}'.format(filename))
    table.write(filename, overwrite=True) 
Esempio n. 40
0
    'Inner edge (pc)', 'Outer edge (pc)', 'GMC index', 'R', 'p',
    'Truncation mass (M$_\odot$)', 'Largest cloud (M$_\odot$)',
    '5th largest cloud (M$_\odot$)'
]
column_types = ['f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4']
table = Table(names=column_names, dtype=column_types)

for inneredge, outeredge in zip(inneredge, outeredge):
    idx = np.where((t['RADIUS_PC'] >= inneredge)
                   & (t['RADIUS_PC'] < outeredge))
    mass = t['MASS_EXTRAP'][idx].data
    #don't have to create an index for the xmin mass - defined in the fit_subset
    fit = powerlaw.Fit(mass)
    fit_subset = powerlaw.Fit(mass, xmin=3e5)
    R, p = fit_subset.distribution_compare('power_law', 'truncated_power_law')
    table.add_row()
    table[-1]['Inner edge (pc)'] = inneredge
    table[-1]['Outer edge (pc)'] = outeredge
    table[-1]['GMC index'] = -fit_subset.alpha
    table[-1]['R'] = R
    table[-1]['p'] = p
    table[-1][
        'Truncation mass (M$_\odot$)'] = 1 / fit_subset.truncated_power_law.parameter2
    table[-1]['Largest cloud (M$_\odot$)'] = mass.max()
    table[-1]['5th largest cloud (M$_\odot$)'] = np.sort(
        t['MASS_EXTRAP'][idx])[-5]
    print(table)
    #print(-fit.alpha, -fit_subset.alpha, R, p, 1/fit_subset.truncated_power_law.parameter2)

table.write('m83bininfo.fits', overwrite=True)
Esempio n. 41
0
expec = np.array(expec)
calc = np.array(calc)
calc_err = np.array(calc_err)
y_calc = np.array(y_calc)
y_expec = np.array(y_expec)
expec_err = np.array(expec_err)
print(names)
print(expec)
print(calc)
t = Table()

t['NAMES'] = Column(names, description='MaNGA PlateIFU')
t['EXPECTED'] = Column(expec, description='gradient expected')
t['CALCULATED'] = Column(calc, description='gradient calculate')
t['CALCULATED_ERR'] = Column(calc_err,
                             description='error on the calculated gradient')
t['EXPECTED_ERR'] = Column(expec_err,
                           description='error on the expected gradient')
t['Y_INT_CALC'] = Column(y_calc,
                         description='y intercept of the calculated gradient')
t['Y_INT_MASS'] = Column(y_expec,
                         description='y intercept with the mass from Adam')
t['Y_INT_EXPEC'] = Column(y_expec,
                          description='y intercept of the expected gradient')
t['MASS'] = Column(y_expec, description='just the mass of the galaxy')
t['MASS_GRAD'] = Column(
    y_expec,
    description='gradient from the mass from the polynomial from Adam')

t.write('/home/celeste/Documents/astro_research/summer_2018/slopesv4.fits')
Esempio n. 42
0
def t_table1_radec_xref_jhk_irac(write=False):
    """
    Generates Table 1.

    Current columns:
      ONCvar ID : int  ## note: this comes from our internal UKvar_ID column.
      RA : float, degrees
      DEC : float, degrees
      X-ref : string # Using SIMBAD names for now.
      Data quality : int (0, 1, 2) corresponding to subjective/auto/strict
      Periodic : int (0, 1)
      Median J, H, K, with error bars : six floats
      IRAC colors from Megeath, and errors : eight floats
      Class from Megeath : string {'P', 'D', 'ND', or 'na'}

    Parameters
    ----------
    write : bool, optional (default False)
        Write to disk? Either way, this function returns an ATpy table.
      
    Returns
    -------
    table : atpy.Table
        Table 1.

    """

    table = astropy.table.Table()
    table.table_name = "Table 1"

    columns_data_and_formats = [
        ('ONCvar ID', ukvar_spread.UKvar_ID, '%i'),
        ('R.A. (deg)', np.degrees(ukvar_spread.RA), '%.6f'),
        ('Decl. (deg)', np.degrees(ukvar_spread.DEC), '%.6f'),
        ('SIMBAD Cross-reference', ukvar_spread.SIMBAD_name, '%s'),
        ('Data quality flag', ukvar_spread.autovar + ukvar_spread.strict, '%i'),
        ('Periodic flag', ukvar_spread.periodic, '%i'),
        ('Median J mag', ukvar_spread.j_median, '%.3f'),
        ('Median J mag error', ukvar_spread.j_err_median, '%.3f'),
        ('Median H mag', ukvar_spread.h_median, '%.3f'),
        ('Median H mag error', ukvar_spread.h_err_median, '%.3f'),
        ('Median K mag', ukvar_spread.k_median, '%.3f'),
        ('Median K mag error', ukvar_spread.k_err_median, '%.3f'),
        ('Spitzer [3.6] mag', megeath2012_full_by_ukvar['3.6'], '%.3f'),
        ('Spitzer [3.6] mag error', megeath2012_full_by_ukvar['e_3.6'], '%.3f'),
        ('Spitzer [4.5] mag', megeath2012_full_by_ukvar['4.5'], '%.3f'),
        ('Spitzer [4.5] mag error', megeath2012_full_by_ukvar['e_4.5'], '%.3f'),
        ('Spitzer [5.8] mag', megeath2012_full_by_ukvar['5.8'], '%.3f'),
        ('Spitzer [5.8] mag error', megeath2012_full_by_ukvar['e_5.8'], '%.3f'),
        ('Spitzer [8.0] mag', megeath2012_full_by_ukvar['8'], '%.3f'),
        ('Spitzer [8.0] mag error', megeath2012_full_by_ukvar['e_8'], '%.3f'),
        ('Class (from Megeath et al. 2012)', make_megeath_class_column(), '%s') ]

    column_to_format = {}
    for column, data, format in columns_data_and_formats:
        table[column] = data
        column_to_format[column] = format

    if write:
        table.write(output_directory+'AJ_table_2.txt', format='ascii.basic', delimiter="&", formats=column_to_format)


    return table
#plot fit for all 3 ('both')
		sp.plotter(figure=1)
		sp.specfit(fittype='formaldehyde_mm_radex', guesses=[95, 14.5, 4.5, 67, 4.0], limits=[(50,250), (11,17), (3,5.5), (65,70), (0.5,10)], limited=[(True, True)]*5, fixed=[False, False, False, False, False])

#only change center parameter from example
#sp.plotter.savefig('H2CO_all_radexfit.pdf')
		table.add_row()
		table[-1]['x']=v
		table[-1]['y']=w
		table[-1]['temp']=sp.specfit.modelpars[0]
		table[-1]['column']=sp.specfit.modelpars[1]
		table[-1]['density']=sp.specfit.modelpars[2]
		table[-1]['center']=sp.specfit.modelpars[3]
		table[-1]['width']=sp.specfit.modelpars[4]
		table[-1]['temp errors']=sp.specfit.modelerrs[0]
		table[-1]['column errors']=sp.specfit.modelerrs[1]
		table[-1]['density errors']=sp.specfit.modelerrs[2]
		table[-1]['center errors']=sp.specfit.modelerrs[3]
		table[-1]['width errors']=sp.specfit.modelerrs[4]
table.write('grs1915H2COparameters.fits', overwrite=True)

plt.show()
t=Table.read('grs1915H2COparameters.fits')
plt.scatter(t['x'], t['y'], c=t['temp'], marker='o', cmap='hot', edgecolor='none')
plt.colorbar(label='Temperature(K)')
plt.savefig('grs1915H2COtempmap.pdf')

	

Esempio n. 44
0
def t_table2_variability_periods_periodics_bymegeathclass(write=False):
    """
    Generates Table 2, which comes in three pieces.

    Note that we're only using periodic stars here!

    TODO: add in flags for whether a given star has valid
          J, H, or K data, respectively

    Current columns:
    N_j, N_h, N_k : three integers
      Delta-J, H, K: three floats
      Delta-J-H, J-K, H-K: three floats
      Color slopes (JH, HK, JHK): three floats
      Stetson : one float
      Stetson_choice : one string
      Period : one float
      Data quality : int (0, 1, 2) corresponding to subjective/auto/strict


    Parameters
    ----------
    write : bool, optional (default False)
        Write to disk? Either way, this function returns an ATpy table.
      
    """

    table = astropy.table.Table()
    table.table_name = "Table 2"

    periodics = ukvar_spread.where(ukvar_spread.periodic != 0)
    periodic_periods = ukvar_periods[ukvar_spread.periodic != 0]
    megeath_class_by_periodics = make_megeath_class_column()[ukvar_spread.periodic != 0]

    # Do some stuff where we blank out color slopes that are no good
    jhk_slope_column = periodics.jhk_slope
    jhk_slope_error = periodics.jhk_slope_err
    jhk_slope_column[~np.in1d(periodics.SOURCEID,
                              jhk_slope_reference.SOURCEID)] = np.nan
    jhk_slope_error[~np.in1d(periodics.SOURCEID,
                             jhk_slope_reference.SOURCEID)] = np.nan

    jjh_slope_column = periodics.jjh_slope
    jjh_slope_error = periodics.jjh_slope_err
    jjh_slope_column[~np.in1d(periodics.SOURCEID,
                              jh_slope_reference.SOURCEID)] = np.nan
    jjh_slope_error[~np.in1d(periodics.SOURCEID,
                             jh_slope_reference.SOURCEID)] = np.nan

    khk_slope_column = periodics.khk_slope
    khk_slope_error = periodics.khk_slope_err
    khk_slope_column[~np.in1d(periodics.SOURCEID,
                              hk_slope_reference.SOURCEID)] = np.nan
    khk_slope_error[~np.in1d(periodics.SOURCEID,
                             hk_slope_reference.SOURCEID)] = np.nan

    columns_data_and_formats = [
        ('ONCvar ID', periodics.UKvar_ID, '%i'),
        ('N_J', periodics.N_j, '%i'),
        ('N_H', periodics.N_h, '%i'),
        ('N_K', periodics.N_k, '%i'),
        ('J mag range (robust)', periodics.j_ranger, '%.3f'),
        ('H mag range (robust)', periodics.h_ranger, '%.3f'),
        ('K mag range (robust)', periodics.k_ranger, '%.3f'),
        ('J-H range (robust)', periodics.jmh_ranger, '%.3f'),
        ('H-K range (robust)', periodics.hmk_ranger, '%.3f'),
        ('(J-H), (H-K) color slope', jhk_slope_column, '%.3f'),
        ('jhk_slope_error', jhk_slope_error, '%.3f'),
        ('J, (J-H) color slope', jjh_slope_column, '%.3f'),
        ('jjh_slope_error', jjh_slope_error, '%.3f'),
        ('K, (H-K) color slope', khk_slope_column, '%.3f'),
        ('khk_slope_error', khk_slope_error, '%.3f'),
        ('Stetson Variability Index', periodics.Stetson, '%.3f'),
        ('Bands used to compute Stetson', periodics.Stetson_choice, '%s'),
        ('Best-fit period', periodic_periods, '%.4f'),
        ('Data quality flag', periodics.autovar + periodics.strict, '%i'),
        ('Class', megeath_class_by_periodics, '%s')
    ]

    column_to_format = {}
    for column, data, format in columns_data_and_formats:
        table[column] = data
        column_to_format[column] = format


    if write:
        table.write(output_directory+"AJ_table_4.txt", format='ascii.basic', delimiter="&", formats=column_to_format)

    return table
Esempio n. 45
0
    "GMC index",
    "R",
    "p",
    "Truncation mass (M$_\odot$)",
    "Largest cloud (M$_\odot$)",
    "5th largest cloud (M$_\odot$)",
]
column_types = ["f4", "f4", "f4", "f4", "f4", "f4", "f4", "f4"]
table = Table(names=column_names, dtype=column_types)

for inneredge, outeredge in zip(inneredge, outeredge):
    idx = np.where((t["RADIUS_PC"] >= inneredge) & (t["RADIUS_PC"] < outeredge))
    mass = t["MASS_EXTRAP"][idx].data
    # don't have to create an index for the xmin mass - defined in the fit_subset
    fit = powerlaw.Fit(mass)
    fit_subset = powerlaw.Fit(mass, xmin=3e5)
    R, p = fit_subset.distribution_compare("power_law", "truncated_power_law")
    table.add_row()
    table[-1]["Inner edge (pc)"] = inneredge
    table[-1]["Outer edge (pc)"] = outeredge
    table[-1]["GMC index"] = -fit_subset.alpha
    table[-1]["R"] = R
    table[-1]["p"] = p
    table[-1]["Truncation mass (M$_\odot$)"] = 1 / fit_subset.truncated_power_law.parameter2
    table[-1]["Largest cloud (M$_\odot$)"] = mass.max()
    table[-1]["5th largest cloud (M$_\odot$)"] = np.sort(t["MASS_EXTRAP"][idx])[-5]
    print(table)
    # print(-fit.alpha, -fit_subset.alpha, R, p, 1/fit_subset.truncated_power_law.parameter2)

table.write("m83bininfo.fits", overwrite=True)
Esempio n. 46
0
def t_table3_variability_nonperiodics_bymegeathclass(write=False):
    """
    Generates Table 3, which also comes in three pieces.

    Only non-periodic stars; has many of the same columns as Table 2.

    Current columns:
      N_j, N_h, N_k : three integers
      Delta-J, H, K: three floats
      Delta-J-H, J-K, H-K: three floats
      Color slopes (JH, HK, JHK): three floats
      Stetson : one float
      Stetson_choice : one string
      Data quality : int (0, 1, 2) corresponding to subjective/auto/strict

    Parameters
    ----------
    write : bool, optional (default False)
        Write to disk? Either way, this function returns an ATpy table.

    """

    table = astropy.table.Table()
    table.table_name = "Table 3"

    nonperiodics = ukvar_spread.where(ukvar_spread.periodic == 0)
    megeath_class_by_nonperiodics = make_megeath_class_column()[ukvar_spread.periodic == 0]

    # Do some stuff where we blank out color slopes that are no good
    jhk_slope_column = nonperiodics.jhk_slope
    jhk_slope_error = nonperiodics.jhk_slope_err
    jhk_slope_column[~np.in1d(nonperiodics.SOURCEID, jhk_slope_reference.SOURCEID)] = np.nan
    jhk_slope_error[~np.in1d(nonperiodics.SOURCEID, jhk_slope_reference.SOURCEID)] = np.nan

    jjh_slope_column = nonperiodics.jjh_slope
    jjh_slope_error = nonperiodics.jjh_slope_err
    jjh_slope_column[~np.in1d(nonperiodics.SOURCEID, jh_slope_reference.SOURCEID)] = np.nan
    jjh_slope_error[~np.in1d(nonperiodics.SOURCEID, jh_slope_reference.SOURCEID)] = np.nan

    khk_slope_column = nonperiodics.khk_slope
    khk_slope_error = nonperiodics.khk_slope_err
    khk_slope_column[~np.in1d(nonperiodics.SOURCEID, hk_slope_reference.SOURCEID)] = np.nan
    khk_slope_error[~np.in1d(nonperiodics.SOURCEID, hk_slope_reference.SOURCEID)] = np.nan

    columns_data_and_formats = [
        ('ONCvar ID', nonperiodics.UKvar_ID, '%i'),
        ('N_J', nonperiodics.N_j, '%i'),
        ('N_H', nonperiodics.N_h, '%i'),
        ('N_K', nonperiodics.N_k, '%i'),
        ('J mag range (robust)', nonperiodics.j_ranger, '%.3f'),
        ('H mag range (robust)', nonperiodics.h_ranger, '%.3f'),
        ('K mag range (robust)', nonperiodics.k_ranger, '%.3f'),
        ('J-H range (robust)', nonperiodics.jmh_ranger, '%.3f'),
        ('H-K range (robust)', nonperiodics.hmk_ranger, '%.3f'),
        ('(J-H), (H-K) color slope', jhk_slope_column, '%.3f'),
        ('jhk_slope_error', jhk_slope_error, '%.3f'),
        ('J, (J-H) color slope', jjh_slope_column, '%.3f'),
        ('jjh_slope_error', jjh_slope_error, '%.3f'),
        ('K, (H-K) color slope', khk_slope_column, '%.3f'),
        ('khk_slope_error', khk_slope_error, '%.3f'),
        ('Stetson Variability Index', nonperiodics.Stetson, '%.3f'),
        ('Bands used to compute Stetson', nonperiodics.Stetson_choice, '%s'),
        ('Data quality flag', nonperiodics.autovar + nonperiodics.strict, '%i'),
        ('Class', megeath_class_by_nonperiodics, '%s')
        ]

    # Now split it into three pieces and compute medians!
    column_to_format = {}
    for column, data, format in columns_data_and_formats:
        table[column] = data
        column_to_format[column] = format

    if write:
        table.write(output_directory+"AJ_table_5.txt", format='ascii.basic', delimiter="&", formats=column_to_format)

    return table