예제 #1
0
파일: proc0.py 프로젝트: astrofle/CRRLpy
def main(spec, out, freqf):
    """
    """
    
    specs = glob.glob(spec)
    
    # Sort the SBs
    crrls.natural_sort(specs)
    
    for s in specs:
        
        ## Determine the subband name
        #try:
            #sb = re.findall('SB\d+', s)[0]
        #except IndexError:
            #print "Could not find SB number."
            #print "Will use SB???"
            #sb = 'SB???'
        
        data = np.loadtxt(s, comments='#')
        

        freq = data[:,0]*freqf
        tb = data[:,1]

        data[:,0] = data[:,0]/1e6

        
        # write the processed spectrum
        #np.savetxt('{0}_{1}.ascii'.format(basename, sb), data)
        tbtable = Table([freq, tb], 
                        names=['FREQ MHz',
                               'Tb Jy/BEAM'])
                        
    ascii.write(tbtable, out, format='commented_header')
예제 #2
0
def writefiles(tiles, fnbase, overwrite=False):
    from astropy.io import fits
    from astropy.io import ascii
    from matplotlib.mlab import rec_drop_fields
    from astropy import table
    fits.writeto(fnbase+'.fits', tiles, overwrite=overwrite)
    hdulist = fits.open(fnbase+'.fits', mode='update')
    hdulist[1].header['EXTNAME'] = 'TILES'
    hdulist.close()
    tilestab = table.Table(
        rec_drop_fields(tiles, ['brightra', 'brightdec', 'brightvtmag']))
    metadata = {'tileid': ('', 'Unique tile ID'),
                'ra': ('deg', 'Right ascension'),
                'dec': ('deg', 'Declination'),
                'pass': ('', 'DESI layer'),
                'in_desi': ('', '1=within DESI footprint; 0=outside'),
                'ebv_med':('mag', 'Median Galactic E(B-V) extinction in tile'),
                'airmass':('', 'Airmass if observed at hour angle 15 deg'),
                'star_density':('deg^-2', 'median number density of Gaia stars brighter than 19.5 mag in tile'),
                'exposefac':('', 'Multiplicative exposure time factor from airmass and E(B-V)'),
                'program':('', 'DARK, GRAY, BRIGHT, or EXTRA'),
                'obsconditions':('', '1 for DARK, 2 for GRAY, 4 for BRIGHT, 0 for EXTRA'),
                'brightra':('deg', 'RAs of 3 brightest Tycho-2 stars in tile'),
                'brightdec':('deg', 'Decs of 3 brightest Tycho-2 stars in tile'),
                'brightvtmag':('mag', 'V_T magnitudes of 3 brightest Tycho-2 stars in tile'),
                'centerid':('', 'Unique tile ID of pass 0 tile corresponding to this tile'),
                }
    from astropy import units as u
    unitdict = {'': None, 'deg': u.deg, 'mag': u.mag, 'deg^-2': 1/u.mag/u.mag}
    for name in tilestab.dtype.names:
        tilestab[name].unit = unitdict[metadata[name][0]]
        tilestab[name].description = metadata[name][1]
    ascii.write(tilestab, fnbase+'.ecsv', format='ecsv', overwrite=overwrite)
예제 #3
0
def bsnip_edits():
	bsnip = glob.glob ('../../data/bsnip/*.flm')
	bsnip_sn = []
	b = []
	v = []
	for i in range (len(bsnip)):
			bsnip[i] = bsnip[i].split('-')[0]
			bsnip[i] = bsnip[i].split('/')
			bsnip[i] = bsnip[i][len(bsnip[i])-1]
			if i > 0:#removes redundant sn names
				if bsnip[i-1] != bsnip[i]:
					bsnip_sn.append(bsnip[i])
	cut_index = 0
	flag = False
	for i in range(len(bsnip_sn)):
			if (not flag):
				print "passing",bsnip_sn[i]
			#certain cases for specfic sn names
			if bsnip_sn[i] == 'sn2007s1':#want to start at first changed sn
				print "start with",len(bsnip_sn),"sn"
				print "START at index:",i
				cut_index = i
				flag = True #start here 
			if(flag):
				print "looking at",bsnip_sn[i]
				if bsnip_sn[i] == "sn2007s1":
					bsnip_sn[i] = "SNF20071021-000"
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s1":
					bsnip_sn[i] = "SNF20080514-002"
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008r3":
					bsnip_sn[i] = "sn1989a"###doesn't work
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s3":
					bsnip_sn[i] = "SNF20080825-006"
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s4":
					bsnip_sn[i] = "sn1989a"###can't find correct name
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s5":
					bsnip_sn[i] = "SNF20080909-030"
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s8":
					bsnip_sn[i] = "sn1989a"###look for correct names
					print "changed to",bsnip_sn[i]
				
				ext = IrsaDust.get_extinction_table(bsnip_sn[i])
				print ext
				print ext[1][0],ext[1][3]
				print ext[2][0],ext[2][3]
				b.append(ext[1][3])
				v.append(ext[2][3])


	print "stopped successfullly?...cutting table"
	del bsnip_sn[:cut_index]
	param = Table([bsnip_sn,b,v])
	print param
	ascii.write(param,'bsnip_extinc_added.dat')
예제 #4
0
def csv_generator(headers, keywords, filename):
    """
    Pulls headers and keywords from SQL query to make a csv table.

    Parameters
    ----------
    headers : class (<class 'sqlalchemy.engine.result.RowProxy'>)
        All of the header information returned from SQL query.
    keywords : list
        A list of column names returned from SQL query.
    filename : str
        The names of CSV output file.

    Returns
    -------
    None

    Outputs
    -------
    t : CSV file
        A CSV files that contains all of the queried header information.
    """
    datarows = []
    for item in headers:
        datarows.append(item)
    print(type(item))
    t = Table(rows = datarows, names = keywords, meta = {'Name':'COS HEADER TABLE'})
    ascii.write(t, filename + '.csv', format='csv')
예제 #5
0
def test_rdb_write_types():
    dat = ascii.read(['a b c d', '1 1.0 cat 2.1'],
                     Reader=ascii.Basic)
    out = StringIO()
    ascii.write(dat, out, Writer=ascii.Rdb)
    outs = out.getvalue().splitlines()
    assert_equal(outs[1], 'N\tN\tS\tN')
예제 #6
0
파일: analyze.py 프로젝트: hojonathanho/rtk
def print_results(args, colnames, stats):
    colnames = colnames + ['n_samples']
    colvals = [[] for _ in xrange(len(colnames))]

    kvs = sorted(rtk.itr.iter_nested_d(stats))

    for keys, val in kvs:
        i = 0
        for k in keys:
            colvals[i].append(k)
            i += 1
        n_results_cols = len(val[0])
        for j in xrange(n_results_cols):
            colvals[i].append(round(mean(args, map(rtk.itr.ig(j), val)), 3))
            i += 1
            colvals[i].append(round(std(args, map(rtk.itr.ig(j), val)), 3))
            i += 1
        colvals[i].append(len(val))

    ascii.write(colvals, 
            Writer=ascii.FixedWidthTwoLine, 
            names=colnames,
            bookend=True, 
            delimiter='|',
            delimiter_pad=' ')
예제 #7
0
def query_bsnip():
	#Creates extinction.dat from bsnip data
	#list of files
	bsnip = glob.glob ('../../data/bsnip/*.flm')
	bsnip_sn = []
	for i in range (len(bsnip)):
		bsnip[i] = bsnip[i].split('-')[0]
		bsnip[i] = bsnip[i].split('/')
		bsnip[i] = bsnip[i][len(bsnip[i])-1]
		if i > 0:#removes redundant sn names
			if bsnip[i-1] != bsnip[i]:
				bsnip_sn.append(bsnip[i])
		b = []
		v = []
	for i in range(len(bsnip_sn)):
		print "looking at",bsnip_sn[i]
		#certain cases for specfic sn names
		if bsnip_sn[i] == 'sn2007s1':
			print "WARNING"
			del bsnip_sn[i:]
			break
		ext = IrsaDust.get_extinction_table(bsnip_sn[i])
		print ext
		print ext[1][0],ext[1][3]
		print ext[2][0],ext[2][3]
		b.append(ext[1][3])
		v.append(ext[2][3])
	#makes table in format 'sn','B','V'
	param = Table([bsnip_sn,b,v])
	ascii.write(param,'bsnip_extinc.dat')
예제 #8
0
파일: utils.py 프로젝트: jnburchett/joebvp
def concatenate_line_tables(filelist,outtablefile='compiledVPoutputs.dat'):
    '''
    Compiles the output from several fitting runs into a single table

    Parameters
    ----------
    filelist : list of strings or str
        This should be a list containing the names of VP input files or a string referring to a file simply
        listing the input files.
        See joebvpfit.readpars for details of file format

    outtablefile : str
        Name of compiled model parameter output file

    '''

    if isinstance(filelist, str):
        lstarr=np.genfromtxt(filelist,dtype=None)
        listofiles=lstarr.tolist()
    else:
        listofiles=filelist

    tabs = []
    for i, ff in enumerate(listofiles):
        tabs.append(ascii.read(ff))
    bigpartable = vstack(tabs)
    ascii.write(bigpartable, output=outtablefile, delimiter='|')  # write out compiled table
def csv_generator(headers,keywords,path,filename):
    """
    Pulls headers and keywords from SQL query to make a csv table.

    Parameters
    ----------
    headers : class (<class 'sqlalchemy.engine.result.RowProxy'>)
        All of the header information returned from SQL query.
    keywords : list
        A list of column names returned from SQL query.
    filename : str
        The names of CSV output file.

    Returns
    -------
    None

    Outputs
    -------
    t : CSV file
        A CSV files that contains all of the queried header information.
    """
    #try:
    if filename.endswith('.txt'):
        form = 'ascii'
    else:
        form = 'csv'

    datarows = []
    for item in headers:
        datarows.append(item)
    t = Table(rows = datarows, names = keywords, meta = {'Name':'COS HEADER TABLE'})
    ascii.write(t,os.path.join(path,filename),format=form)
    #except:
    print('Cannot handle files that end with {}'.format(filename.split('.')[1]))
예제 #10
0
def generate_count_table(hsts, fnout=None, maglims=[21, 20.5, 20], outercutrad=-90,remove_cached=True):
    from astropy.io import ascii
    from astropy import table

    targetingkwargs = []
    for m in maglims:
        targetingkwargs.append({'faintlimit': m, 'outercutrad': outercutrad, 'colname': str(m)})

    tab = count_targets(hsts, targetingkwargs=targetingkwargs, remove_cached=remove_cached)
    for m in maglims:
        satcnt = []
        for hs in hsts:
            satcnt.append(count_mw_sats(hs, m))
        tab.add_column(table.Column(name='nsat_' + str(m), data=satcnt))

    for m in maglims:
        nsatstr = 'nsat_' + str(m)
        ntargstr = 'ntarg_' + str(m)

        tab.add_column(table.Column(name='ntargpersat_' + str(m), data=tab[ntargstr] / tab[nsatstr]))

    if fnout:
        ascii.write(tab, fnout)

    return tab
예제 #11
0
def write_metadata(targname, header_data, qldb_rootnames):
    '''
    Writes an output file containing the relationships between the columns,
    rootnames, and times of observation.
    '''

    # Build dictionary containing data to send to output
    out_dict = {}
    out_dict['orig_columns'] = [header_data['col'][i] for i in 
        range(len(header_data)) if header_data['rootname'][i] 
        in qldb_rootnames]
    out_dict['new_columns'] = [i for i in range(len(out_dict['orig_columns']))]
    out_dict['rootnames'] = [header_data['rootname'][i] for i in 
        range(len(header_data)) if header_data['rootname'][i] 
        in qldb_rootnames]

    # Query database for DATE-OBS and TIME-OBS for each rootname
    out_dict['date_obs'], out_dict['time_obs'] = \
        query_for_times(out_dict['rootnames'])

    # Write results to text file
    outfile = '{}_metadata.dat'.format(targname)
    ascii.write([
        out_dict['orig_columns'], 
        out_dict['new_columns'],
        out_dict['rootnames'], 
        out_dict['date_obs'], 
        out_dict['time_obs']], 
        outfile, 
        names=['orig column', 'new column', 'rootname', 'DATE-OBS', 
               'TIME-OBS'])
예제 #12
0
def write_and_correct_latex_table(table, filename, caption, begin=0, end=30,
                                  **kwargs):
    # 1. Convert all the column headers to colhead { old_name }
    for colname in table.colnames:
        table[colname].name = "\\colhead{ %s }" % colname

    # 2. Prepare the latexdict
    header_start = ('\\tabletypesize{\\scriptsize}' +
                    '\n\\rotate\n' +
                    ('\\tablecaption{ %s }\n' % caption) +
                    '\\tablewidth{0pt}\n\n')
    data_start = r'\startdata'
    data_end = r'\enddata'

    latexdict = {
        'header_start': header_start+r'\tablehead{',
        'header_end': '}',
        'data_start': data_start,
        'data_end': data_end
        }

    latexdict.update(kwargs)
    
    # 3. Write the table as a {table} and {tabular} thing
    ascii.write(
        table[begin:end], filename,
        Writer = ascii.Latex,
        latexdict = latexdict,
        )

    # 4. Convert it from {table}+{tabular} environment to {deluxetable}
    convert_tabletabular_to_deluxetable(filename)
예제 #13
0
def write_ascii(self, filename, **kwargs):
    '''
    Read a table from an ASCII file using asciitable

    Optional Keyword Arguments:

        Writer - Writer class (default= Basic)
        delimiter - column delimiter string
        write_comment - string defining a comment line in table
        quotechar - one-character string to quote fields containing special characters
        formats - dict of format specifiers or formatting functions
        names - list of names corresponding to each data column
        include_names - list of names to include in output (default=None selects all names)
        exclude_names - list of names to exlude from output (applied after include_names)

    See the asciitable documentation at http://cxc.harvard.edu/contrib/asciitable/ for more details.
    '''

    if 'overwrite' in kwargs:
        overwrite = kwargs.pop('overwrite')
    else:
        overwrite = False

    if type(filename) is str and os.path.exists(filename):
        if overwrite:
            os.remove(filename)
        else:
            raise Exception("File exists: %s" % filename)

    asciitable.write(self.data, filename, **kwargs)
예제 #14
0
def test_roundtrip_masked(fmt_name_class):
    """
    Round trip a simple masked table through every writable format and confirm
    that reading back gives the same result.
    """
    fmt_name, fmt_cls = fmt_name_class

    if not getattr(fmt_cls, '_io_registry_can_write', True):
        return

    # Skip tests for fixed_width or HTML without bs4
    if ((fmt_name == 'html' and not HAS_BEAUTIFUL_SOUP)
            or fmt_name == 'fixed_width'):
        return

    t = simple_table(masked=True)

    out = StringIO()
    fast = fmt_name in ascii.core.FAST_CLASSES
    try:
        ascii.write(t, out, format=fmt_name, fast_writer=fast)
    except ImportError:  # Some failed dependency, e.g. PyYAML, skip test
        return

    # No-header formats need to be told the column names
    kwargs = {'names': t.colnames} if 'no_header' in fmt_name else {}

    t2 = ascii.read(out.getvalue(), format=fmt_name, fast_reader=fast, guess=False, **kwargs)

    assert t.colnames == t2.colnames
    for col, col2 in zip(t.itercols(), t2.itercols()):
        assert col.dtype.kind == col2.dtype.kind
        assert np.all(col == col2)
예제 #15
0
def update_html(filename, roles):
    """
    Replaces the Roles table in the HTML file with the new values in the
    ``roles`` table.
    """
    with open(filename) as fh:
        lines = fh.readlines()

    idx0, idx1 = get_table_location(lines, 'astropy-roles')
    orig_indent = get_indent(lines[idx0])

    outlines = lines[:idx0]

    # Plug in the roles table
    roles_out = StringIO()
    clean_kwargs = {'tags': ['a', 'span', 'sup', 'br'],
                    'attributes': {'a': ['href'],
                                   '*': ['style']},
                    'styles': ['color', 'font-style']}
    ascii.write(roles, roles_out, format='html',
                htmldict={'table_id': 'astropy-roles',
                          'raw_html_cols': ['Role', 'Lead', 'Deputy'],
                          'raw_html_clean_kwargs': clean_kwargs})

    roles_lines = roles_out.getvalue().splitlines()

    ridx0, ridx1 = get_table_location(roles_lines, 'astropy-roles')
    roles_indent = get_indent(roles_lines[ridx0])
    indent = orig_indent - roles_indent
    newlines = [' ' * indent + str(line) + os.linesep for line in roles_lines[ridx0:ridx1 + 1]]
    outlines += newlines

    outlines += lines[idx1 + 1:]

    return outlines
예제 #16
0
def main(numIter=1000,screenSize=1024,
         numDiameter=20,minDiameter=0.1,maxDiameter=30,
         radialOrders=(1,)):
    """Run a set of simulations for different interferometer parameters"""
    results=[]
    for radialOrder in radialOrders:
        for dr0 in np.logspace(np.log10(minDiameter),np.log10(maxDiameter),
                               numDiameter):
            diameter=ChoosePupilDiameter(dr0,minR0=6.0)
            r=VisibilityStats(numTelescope=2,
                       pupilSize=diameter,
                       r0=float(diameter)/dr0,
                       radialOrder=radialOrder,
                       numIter=numIter,
                       screenSize=screenSize)
            r.update({'d/r0':dr0,
                      'radialOrder':radialOrder,
                      'numIter':numIter,
                      'pupilSize':diameter,
                      'screenSize':screenSize,})
            results.append(r)
    results=Table(results,names=("radialOrder","d/r0","Vsq","stdVsq","fibrePspec","stdFibrePspec","couple","stdCouple","numIter","pupilSize", "screenSize"))
    ascii.write(results,time.strftime("tmp%y%m%d-%H%M.dat"),
                format='fixed_width',
                bookend=False,
                delimiter=None,
                formats={"d/r0":"%8.4f",
                         "Vsq":"%10.4g",
                         "stdVsq":"%10.4g",
                         "fibrePspec":"%10.4g",
                         "stdFibrePspec":"%10.4g",
                         "couple":"%10.4g",
                         "stdCouple":"%10.4g"})
예제 #17
0
파일: utils.py 프로젝트: bmorris3/freckles
def construct_standard_star_table(stars, write_to=results_dir):

    mwo_dict = dict()
    apo_dict = dict()
    for star in stars:
        mwo_dict[star.name.upper()] = []
        apo_dict[star.name.upper()] = []

    names = list(mwo_dict.keys())

    for star_name in names:
        all_obs_this_star = [star for star in stars if star.name.upper() == star_name]
        mwo_dict[star_name] = combine_measurements([s.s_mwo for s in all_obs_this_star])
        apo_dict[star_name] = combine_measurements([s.s_apo for s in all_obs_this_star])

    sp_types = []
    s_mwo = []
    s_apo = []
    n_obs = []

    for star in names:
        s_mwo.append(mwo_dict[star].to_latex())
        s_apo.append(apo_dict[star].to_latex())

        n_obs.append(apo_dict[star].meta)

        customSimbad = Simbad()
        customSimbad.add_votable_fields('sptype')
        sp_type = customSimbad.query_object(star)['SP_TYPE'][0]
        sp_types.append(sp_type)

    print('N_stars = {0}'.format(len(names)))
    print('N_spectra = {0}'.format(sum(n_obs)))
    print('N_sptype=G = {0}'.format(len([spt for spt in sp_types if spt.startswith(b'G')])))
    print('N_sptype=K = {0}'.format(len([spt for spt in sp_types if spt.startswith(b'K')])))
    print('N_sptype=M = {0}'.format(len([spt for spt in sp_types if spt.startswith(b'M')])))
    # for star in stars:
    #     names.append(star.name.upper())
    #     customSimbad = Simbad()
    #     customSimbad.add_votable_fields('sptype')
    #     sp_type = customSimbad.query_object(star.name)['SP_TYPE'][0]
    #     sp_types.append(sp_type)
    #
    #     s_mwo.append(star.s_mwo.to_latex())
    #     s_apo.append(star.s_apo.to_latex())

    standard_table = Table([names, sp_types, s_mwo, s_apo, n_obs],
                           names=['Star', 'Sp.~Type', '$S_{MWO}$', '$S_{APO}$', '$N$'])

    standard_table.sort(keys='$S_{MWO}$')

    latexdict = dict(col_align='l l c c c', preamble=r'\begin{center}',
                     tablefoot=r'\end{center}',
                     caption=r'Stars observed to calibrate the $S$-index '
                             r'(see Section~\ref{sec:def_s_index}). \label{tab:cals}',
                     data_start=r'\hline')

    # output_path,
    ascii.write(standard_table, format='latex', latexdict=latexdict,
                output='cal_stars.tex')
예제 #18
0
파일: test.py 프로젝트: jmeyers314/linmix
def generate_test_data():
    alpha = 4.0
    beta = 3.0
    sigsqr = 0.5

    # GMM with 3 components for xi
    xi = np.random.normal(loc=1.0, scale=1.0, size=9)
    xi = np.concatenate([xi, np.random.normal(loc=2.0, scale=1.5, size=20)])
    xi = np.concatenate([xi, np.random.normal(loc=3.0, scale=0.5, size=30)])
    eta = np.random.normal(loc=alpha+beta*xi, scale=np.sqrt(sigsqr))

    # Let's mix in some weird measurement uncertainties:
    xsig = 0.25 * np.sin(np.arange(len(xi))) + 0.5
    ysig = 0.25 * np.cos(np.arange(len(eta)))**2 + 0.5
    x = np.random.normal(loc=xi, scale=xsig)
    y = np.random.normal(loc=eta, scale=ysig)

    # And put in zero uncertainty in a few of these.
    wzx = np.random.choice(np.arange(len(xi)), size=5, replace=False)
    xsig[wzx] = 0.0
    wzy = np.random.choice(np.arange(len(eta)), size=5, replace=False)
    ysig[wzy] = 0.0

    # And censor all the ydata less than 10, unless the yerr is 0
    w10 = (y < 10) & (ysig != 0)
    y[w10] = 10
    delta = np.ones((len(x),), dtype=int)  # should really be bool, but ints are easier
    delta[w10] = 0

    out = Table([x, y, xsig, ysig, delta], names=['x', 'y', 'xsig', 'ysig', 'delta'])
    import astropy.io.ascii as ascii
    ascii.write(out, 'test.dat')
예제 #19
0
def test_columns_names_with_formats(formats, issues_warning):
    """Test the fix for #4508."""
    t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
    with catch_warnings(AstropyWarning) as ASwarn:
        out = StringIO()
        ascii.write(t, out, formats=formats)
    assert (issues_warning == (len(ASwarn) == 1))
예제 #20
0
def test_latex_units():
    """
    Check to make sure that Latex and AASTex writers attempt to fall
    back on the **unit** attribute of **Column** if the supplied
    **latexdict** does not specify units.
    """
    t = table.Table([table.Column(name='date', data=['a', 'b']),
               table.Column(name='NUV exp.time', data=[1, 2])])
    latexdict = copy.deepcopy(ascii.latexdicts['AA'])
    latexdict['units'] = {'NUV exp.time': 's'}
    out = StringIO()
    expected = '''\
\\begin{table}{cc}
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2
\\enddata
\\end{table}
'''.replace('\n', os.linesep)

    ascii.write(t, out, format='aastex', latexdict=latexdict)
    assert out.getvalue() == expected
    # use unit attribute instead
    t['NUV exp.time'].unit = units.s
    t['date'].unit = units.yr
    out = StringIO()
    ascii.write(t, out, format='aastex', latexdict=ascii.latexdicts['AA'])
    assert out.getvalue() == expected.replace(
        'colhead{s}', r'colhead{$\mathrm{s}$}').replace(
        'colhead{ }', r'colhead{$\mathrm{yr}$}')
예제 #21
0
def write_skycoord_table(data, cube_ref, **kwargs):
    """
    Writes out a text file with flattened coordinates of the cube
    stacked with input array data. Additional arguments are passed
    to astropy's text writing function.

    TODO: add a useful `names` keyword?

    See astropy.io.ascii.write docstring for more info.

    Parameters
    ----------
    data : array-like structure of the same xy-grid as cube_ref.

    cube_ref : a cube file to get the coordinate grid from.

    """
    from astropy.table import Table
    from astropy.io import ascii
    from spectral_cube import SpectralCube

    cube = SpectralCube.read(cube_ref)

    flat_coords = [cube.spatial_coordinate_map[i].flatten() for i in [1,0]]
    # TODO: finish this up for multiple components
    #n_repeat = np.prod(np.array(data).shape)%np.prod(cube.shape[1:])+1

    table = Table(np.vstack(flat_coords +
        [np.array(xy_slice).flatten() for xy_slice in data]).T)

    ascii.write(table, **kwargs)
예제 #22
0
def add_stars(slit_num, ax_stis, ax_wfc3, im_stis, im_wfc3, cenwave):
    '''
    #ask if any stars need to be added
    '''
    tbdata_current = ascii.read(get_filename(slit_num, cenwave))
    if slit_num == 1:
        tbdata_previous = None
        tbdata_next = ascii.read(get_filename(slit_num+1, cenwave))
    elif slit_num == 17:
        tbdata_next = None
        tbdata_previous = ascii.read(get_filename(slit_num -1, cenwave))    
    else:
        tbdata_previous = ascii.read(get_filename(slit_num -1, cenwave))    
        tbdata_next = ascii.read(get_filename(slit_num+1, cenwave))
    ax_stis.set_ylim(900, 1040)
    ax_wfc3.set_ylim(900, 1040)
    pyplot.draw()
    to_do_flag = raw_input('Would you like to adjust the contrast (c), enter a star to be added to this slit (a), move down the slit (m), or move to next slit (q) ? ')
    while to_do_flag != 'q':
        if to_do_flag == 'c':
            set_contrast(im_stis)
            set_contrast(im_wfc3)
        elif to_do_flag == 'a':
            tbdata_current = add_star_to_slit(slit_num, tbdata_current, tbdata_previous, tbdata_next)
        elif to_do_flag == 'm':
            ax_stis = move_down_slit(ax_stis)
            ax_wfc3 = move_down_slit(ax_wfc3)
        to_do_flag = raw_input('Would you like to adjust the contrast (c), enter a star to be added to this slit (a), move down the slit (m), or finish (q) ? ')
    if slit_num < 10:
        shutil.copyfile('slit0{}_{}_phot.dat'.format(int(slit_num), cenwave), 'slit{}_{}_phot_no_split.dat'.format(int(slit_num), cenwave))
        ascii.write(tbdata_current, 'slit0{}_{}_phot.dat'.format(int(slit_num), cenwave))
    else:
        shutil.copyfile('slit{}_{}_phot.dat'.format(int(slit_num), cenwave), 'slit{}_{}_phot_no_split.dat'.format(int(slit_num), cenwave))
        ascii.write(tbdata_current, 'slit{}_{}_phot.dat'.format(int(slit_num), cenwave))
예제 #23
0
파일: downsampler.py 프로젝트: ug-hj/PhD
def save_reals(reals, path, zrange=None, rad=0):
	fpath, zrang, radfac = (0, 0, 0)
	if path.endswith('fits'):
		asc_out = path[:-4]+'asc'
		fpath = True
	elif path.endswith('asc'):
		asc_out = path
	if zrange!=None:
		zstring = '_z%s-%s'%(zrange[0],zrange[1])
		asc_out = asc_out[:-4]+zstring+'.asc'
		zrang = True
	colnames = ['# RA','DEC','z']
	if rad & any((reals.T[:2]>2*np.pi).flatten()):
		reals.T[:2] = reals.T[:2] * (np.pi/180)
		radfac = True
	if fpath|zrang|radfac:
		try:
			print('saving reals.asc..')
			ascii.write(reals, asc_out, names=colnames, delimiter='\t', overwrite=1)
		except ValueError:
			print('..with weights..')
			colnames += ['weights']
			ascii.write(reals, asc_out, names=colnames, delimiter='\t', overwrite=1)
	else:
		print('nothing changed, not re-saving reals')
예제 #24
0
def read_rename(filename, rename_dict=None, date_format=None,
                remove_language=False):
    """Update columns names and their data if they are dates from csv files

    inputs
    ------
    filename : str
      name of the file that we want to modify

    rename_dict : dict
      dictionary with the old and new values as key/values respectively

    date_format : dict
      dictionary with the field names and the format in which they date is in
      the file

    remove_languages : boolean
      whether the language column want to be removed
    """
    data = ascii.read(filename, delimiter=';')
    if rename_dict:
        for k, v in rename_dict.items():
            data[k].name = v
    if date_format:
        for k, v in date_format.items():
            column_i = [datetime.datetime.strptime(x, v) for x in data[k]]
            data.remove_column(k)
            data.add_column(Column(data=column_i, name=k))
    if ('language' in data.keys()) and remove_language:
        data.remove_column('language')
    ascii.write(data, filename, format='csv', delimiter=';', overwrite=True)
def datafile(obj_names,centroids,tcentroids,outfile):
    names    = ['File Name','x-centroid','y-centroid','x-aligned','y-aligned']
    data  = [obj_names,centroids[:,0],centroids[:,1],tcentroids[:,0],tcentroids[:,1]]
    table = Table(data, names=names)
    ascii.write(table, outfile, formats={'x-centroid': '%.3f',\
                'y-centroid': '%.3f','x-aligned': '%.3f','y-aligned': '%.3f'})
    return
예제 #26
0
파일: MosaicAuto.py 프로젝트: pllim/stginga
    def save_imlist(self):
        """Save selected image filename(s) to a plain text file.
        If no image selected, no output is generated.

        This can be re-implemented by sub-class if a different
        output format is needed.

        """
        imlist = self.get_selected_paths()

        if len(imlist) == 0:
            s = 'No image selected!'
            self.logger.error(s)
            self.update_status(s)
            return

        fname = Widgets.SaveDialog(
            title='Save image list', selectedfilter='*.txt').get_path()
        if not fname:  # Cancel
            return
        if os.path.exists(fname):
            s = '{0} will be overwritten'.format(fname)
            self.logger.warn(s)
        ascii.write(
            [imlist], fname, names=['IMAGE'], format='commented_header')
        s = 'Image list saved'
        self.logger.info(s)
        self.update_status(s)
예제 #27
0
    def GetTeXTable(self,tableName="params.tex"):



        

        

        marg = self.anal.get_stats()["marginals"]

        tmp = []
        for params,val,err in zip(self.parameters,self.bestFit,marg):

            
            

            err = err["1sigma"]
            
            print "\t%s:\t%.2f\t+%.2f -%.2f"%(params,val,err[1]-val,val-err[0])
            tmp.append(['%s'%params,'%.2f'%val,'%.2f'%(err[1]-val),'%.2f'%(val-err[0])])

        data = {}

        for t in tmp:
            data[t[0]]=["$%s^{+%s}_{-%s}$"%(t[1],t[2],t[3])]

        ascii.write(data,output=tableName,Writer=ascii.Latex,latexdict={'preamble': r'\begin{center}','tablefoot': r'\end{center}','tabletype': 'table*','header_end': '\\hline \\hline \\hspace{3mm}','caption':self.modName})
예제 #28
0
def DoPhotometryofNight(PC,night):
    """ Does photometry of all images in the night as well as create the final magnitude catlog table """
    with open(os.path.join(PC.OUTDIR,night,PC.OUTFITSFILELIST),'r') as outfitsfilelist:
        # Skip blank lines and Commented out lines with #
        imgfilterlist = [tuple(imageLINE.rstrip().split()) for imageLINE in outfitsfilelist 
                         if ((imageLINE.strip() is not '') and (imageLINE[0] !='#'))]

    for OutFinalImage,Filtr in imgfilterlist:
        MagTable = RunPhotometryofImage(OutFinalImage,Filtr)

        ## Append differential 2MASS magnitude of all sources to table
        TableHeaders = ['ra','dec','mag','magerror','Qflag']#filter,epoch,ImgFile
        filter_col = Column(name='Filter', data=[Filtr]*len(MagTable))
        epoch_col = Column(name='Epoch', data=[epoch]*len(MagTable))
        imgfile_col = Column(name='ImgFile', data=[OutFinalImage]*len(MagTable))
        TableToOutput = MagTable[TableHeaders]
        TableToOutput.add_columns([filter_col, epoch_col, imgfile_col])

        # Now append the Full output table also to an ascii file.
        OutputTableFilename = os.path.join(PC.OUTDIR,PC.OUTPUTFILE)
        try :
            PreviousFullTable = ascii.read(OutputTableFilename,delimiter=',',
                                           format='commented_header',fill_values=('--','0'))
        except IOError :
            logger.info('No previous photometry output found, hence we will be creating'
                        ' a new file {0}.'.format(OutputTableFilename))
            OutputTableToWrite = TableToOutput
        else :
            OutputTableToWrite = table.vstack([PreviousFullTable,TableToOutput], join_type='outer')
        # Writing the final appended table
        ascii.write(OutputTableToWrite, OutputTableFilename,delimiter=',',format='commented_header')

        logger.info("Photometry of {0} over.".format(OutFinalImage))
예제 #29
0
def readPadova(filename,request,outfile,req_age):
     
'''
Program to read a Padova isochrone file and output desired values as a txt file table

Args: 
        Filename: file containing isochrone data identified as string
	List of values: Identified by column header as a list of strings, input None if no request
	Output filename: string, appended with .txt
	Requested age: Float, given as log(age/yr)

Returns:
        Txt file containing the generated table
'''
    
    colnames = ['Z','log(age/yr)','M_ini','M_act','logL/Lo','logTe','logG','mbol','U','B','V','R','I','J','H','K','int_IMF','stage']
    data = ascii.read(filename,names = colnames)
    req_cols = []
    
    #assigns default table values if None is given
    if request != None:
        req_cols = request
    else:
        req_cols = ['Z','log(age/yr)','logTe','mbol','int_IMF','stage']
    
    #defines the output table as consisting of the desired, or default, columns 
    output = Table()
    for colname in req_cols:
        output[colname] = data[colname]

    #removes undesirable ages from the table
    if req_age != None:
        selection = np.where(data['log(age/yr)']==req_age)
	output = output[selection]
    ascii.write(output,outfile+'.txt')
예제 #30
0
def print_table(fname):
    tab = pickle.load(open(fname))
    data = Table()
    data.add_column(
        Column(data=processes_pretty, name='process')
    )
    for rowname, row in zip([t[0] for t in tab], tab):
        data.add_column(
            Column(data=row[1:], name=rownames[rowname])
        )
        data[rownames[rowname]].format = '%.0f'

    class MyLatex(ascii.Latex):
        def __init__(self, **kwargs):
            ascii.Latex.__init__(self, **kwargs)
            #self.header.data = ['process'] + order
            self.latex['header_start'] = '\hline'
            self.latex['header_end'] = '\hline'
            self.latex['preamble'] = r'\begin{center}'
            self.latex['tablefoot'] = r'\end{center}'
            self.latex['data_end'] = r'\hline'
            self.header.comment = r"\%Generated by src/yields/print_tables.py"

    print r"% BEGIN AUTOGENERATED, NOT FOR CHANGING"

    print r"% Generated by src/yields/print_tables.py on " + str(datetime.datetime.utcnow())
    print r"% Hostname: " + socket.gethostname() + " User: "******"% Input file:  " + fname
    ascii.write(data, Writer=MyLatex, latexdict = {'col_align':'|c|cccccc|c|'})
    print r"% END AUTOGENERATED"
예제 #31
0
for col in t.columns:
    if col in ['neighbors']:
        t[col] = [[round(cc, 4) for cc in c] for c in t[col]]
    if col in ['diff', 'ivar']:
        todel.append(col)
for col in todel:
    del (t[col])

ncols = 0
for tup in t.dtype.descr:
    ncols += 1
    if len(tup) == 2 and tup[1][1] == 'f':
        t[tup[0]] = [round(c, 4) for c in t[tup[0]]]
ascii.write(
    t,
    sys.argv[2],
    names=[r"\texttt{{{}}}".format(n.replace("_", r"\_")) for n in t.columns],
    format='aastex',
    overwrite=True,
    formats={},
    col_align="l" * ncols)

#for col in t.columns:
#    if col in ['neighbors', 'weights', 'diff', 'err', 'loss']:
#        t[col] = [np.array_str(c, precision=3) for c in t[col]]
#
#df = t.to_pandas()
#with open(sys.argv[2], 'w') as f:
#    f.write(df.to_latex(index=False, na_rep="",
#            float_format="{:0.2f}".format))
예제 #32
0
def make_validation_table(fitspath: str, vmin_4363SN=3, vmin_5007SN=100,
                          vmax_4363sig=1.6, rlmin_4363SN=3,
                          rlmax_4363sig=1.6, rlmin_5007SN=100):
    """
    This function creates a validation table for a given binning set.
    The validation table contains a OIII4363 detection column where 1.0
    means detection, 0.5 means non-detection with reliable OIII5007, and
    0.0 means unreliable non-detection. This function will be run every
    time the analysis is completed and will create a validation table
    for every analysis.

    Usage:
        valid_table.make_validation_table(fitspath, bin_type_str)

    :param fitspath: Full file path where the input file is and where the
                     output file will be placed.
    :param vmin_4363SN: int. minimum OIII4363 S/N for valid detection
    :param vmin_5007SN: int. minimum OIII5007 S/N for valid detection
    :param vmax_4363sig: int. maximum OIII4363 sigma for valid detection
    :param rlmin_4363SN: int. minimum OIII4363 S/N for robust limit
    :param rlmax_4363sig: int. maximum OIII4363 sigma for robust limit
    :param rlmin_5007SN: int. minimum OIII5007 S/N for robust limit

    Outputs:
      fitspath + 'bin_validation.tbl'
        Validation table containing bin IDs; number of galaxies in each bin;
        and column indicating OIII4363 detection/non-detection,
        OIII4363_Flux_Observed, OIII4363_S/N
    """

    bin_table = asc.read(fitspath + filename_dict['bin_info'])
    em_table = asc.read(fitspath + filename_dict['bin_fit']) 

    bin_ID = em_table['bin_ID'].data
    raw_OIII4363 = em_table['OIII_4363_Flux_Observed'].data
    O_4363_SN = em_table['OIII_4363_S/N'].data
    O_4363_sigma = em_table['OIII_4363_Sigma'].data
    O_5007_SN = em_table['OIII_5007_S/N'].data
    
    N_stack = bin_table['N_stack'].data
    Hgamma_SN = em_table['HGAMMA_S/N'].data
    Hgamma = em_table['HGAMMA_Flux_Observed'].data

    detection  = np.zeros(len(bin_ID))
    OIII4363 = np.zeros(len(bin_ID))
    up_limit = (Hgamma/Hgamma_SN) * 3

    valid_stacks_idx = np.where((O_4363_SN >= vmin_4363SN) &
                                (O_5007_SN > vmin_5007SN) &
                                (O_4363_sigma < vmax_4363sig))[0]
    reliable_5007_stacks = np.where((O_4363_sigma < rlmax_4363sig) &
                                    (O_5007_SN > rlmin_5007SN))[0]
    wide_line_valid = np.where((O_4363_SN >= rlmin_4363SN) &
                               (O_5007_SN > rlmin_5007SN) &
                               (O_4363_sigma >= rlmax_4363sig))[0]
    detection[reliable_5007_stacks] = 0.5
    detection[wide_line_valid] = 0.5
    detection[valid_stacks_idx] = 1
    print(detection)

    for ii in range(len(OIII4363)):
        if detection[ii] == 1:
            OIII4363[ii] = raw_OIII4363[ii]
        if detection[ii] == 0.5:
            OIII4363[ii] = up_limit[ii]
        if detection[ii] == 0:
            OIII4363[ii] = up_limit[ii]

    ver_tab = fitspath + filename_dict['bin_valid']
    tab1 = Table([bin_ID, N_stack, detection, OIII4363, O_4363_SN],
                 names=valid_table_names0)
    asc.write(tab1, ver_tab, format='fixed_width_two_line')
예제 #33
0
def main():

    fullcat = ID.inputCatalogue(runtime)

    fullcat.runInput(data)
    fullcat.clean = args.clean
    fullcat.suppress = args.suppress
    fullcat.progress = args.progress
    fullcat.saveprogress = args.saveprogress
    fullcat.multiop = args.multiop
    fullcat.latex = args.latex

    ## create a temp file for all object files
    uf.checkpath('%sstackertemp/' % fullcat.outloc)
    if uf.checkkeys(data, 'BinYN'):
        binyn = data["BinYN"]
    else:
        binyn = raw_input(
            '\nWould you like to stack in various bins? [y/n] ').lower()

    if binyn == 'y':
        bincatalogue = BD.binnedCatalogue(fullcat)
        try:
            bincatalogue.determineBins(data)
        except (SystemExit, KeyboardInterrupt):
            logger.error('Early exit', exc_info=False)
            uf.exit(fullcat)
        except:
            logging.warning(
                "Couldn't determine the catalogue for the different bins:",
                exc_info=True)
            uf.earlyexit(fullcat)

        if fullcat.uncert == 'y':
            repeat = 2
        else:
            repeat = 1

        counter = 0
        # print(bincatalogue.catalogue)
        while counter < repeat:
            mccount = counter
            for m in range(0, len(bincatalogue.binpartindicies) - 1):
                if counter > 0:
                    bincatalogue = pck.load(
                        open(
                            '%sstackertemp/catalogue_obj.pkl' % fullcat.outloc,
                            'rb'))
                else:
                    h = 0
                try:
                    bincatalogue.createCatalogue(bincatalogue.binpartindicies,
                                                 bincatalogue.binnedtable, m,
                                                 counter)
                except KeyboardInterrupt:
                    logger.error('Early exit', exc_info=False)
                    uf.earlyexit(fullcat)
                    raise sys.exit()
                except SystemExit:
                    logger.error('Early exit', exc_info=False)
                    uf.earlyexit(fullcat)
                    raise sys.exit()
                except:
                    logger.error(
                        'Exception occurred trying to create the binned catalogue.',
                        exc_info=True)
                    uf.earlyexit(fullcat)
                    raise sys.exit()

                if len(bincatalogue.catalogue) < 2:
                    logger.info('Bin %i has too few objects' % (m + 1))
                    pass
                else:
                    try:
                        catalogue = pipeline(bincatalogue, config, data, 'bin',
                                             m + 1, mccount)
                        if counter == 0:
                            bincatalogue = bincatalogue + catalogue
                        else:
                            pass
                    except TypeError:
                        logger.error('Problem with bin %i. Skipping' % (m + 1))
            if (counter == 0):
                bincatalogue.outcatalogue[
                    'Stellar Mass'].unit = bincatalogue.fullcatalogue[
                        'Stellar Mass'].unit
                bincatalogue.outcatalogue[
                    'Other Data'].unit = bincatalogue.fullcatalogue[
                        'Other Data'].unit
                bincatalogue.outcatalogue[
                    'Integrated Flux'].unit = astun.Jy * astun.km / astun.s

                pck.dump(
                    bincatalogue,
                    open(
                        '%sstackertemp/catalogue_obj.pkl' %
                        bincatalogue.outloc, 'wb'))

                catalogue = uf.maskTable(bincatalogue.outcatalogue)
                if len(catalogue) > 1:
                    catalogue = asttab.unique(catalogue, keys='Object ID')
                else:
                    pass
                catalogue.sort('Bin Number')
                astasc.write(
                    catalogue,
                    bincatalogue.outloc +
                    'Stacked_Catalogue_%s.csv' % bincatalogue.origruntime,
                    format='ecsv')
                fullcat.updateconfig(data, bincatalogue.fullmaxgalw,
                                     bincatalogue.fullrebinstatus,
                                     bincatalogue.fullsmoothtype,
                                     bincatalogue.fullsmoothwin,
                                     bincatalogue.fullfuncnum,
                                     bincatalogue.fulloptnum)
            else:
                pass
            saveprogress = None
            counter += 1
    else:
        fullcat.catalogue.add_column(
            asttab.Column(name='Bin Number',
                          data=np.ones(len(fullcat.catalogue), dtype=int)))
        fullcat = pipeline(fullcat, config, data, 'normal')
        try:
            fullcat.updateconfig(data, fullcat.maxgalw.value,
                                 fullcat.rebinstatus, fullcat.smoothtype,
                                 fullcat.smoothwin, fullcat.funcnum,
                                 fullcat.optnum)
            fullcat.outcatalogue['Stellar Mass'].unit = fullcat.catalogue[
                'Stellar Mass'].unit
            fullcat.outcatalogue['Other Data'].unit = fullcat.catalogue[
                'Other Data'].unit
            fullcat.outcatalogue[
                'Integrated Flux'].unit = astun.Jy * astun.km / astun.s
            catalogue = uf.maskTable(fullcat.outcatalogue)
            catalogue = asttab.unique(catalogue, keys='Object ID')
            catalogue.sort('Bin Number')
            astasc.write(catalogue,
                         fullcat.outloc +
                         'Stacked_Catalogue_%s.csv' % fullcat.runtime,
                         format='ecsv')
            logging.info('Written Stacked Catalogue to file.')
        except (SystemExit, KeyboardInterrupt):
            logger.error('Early exit', exc_info=False)
            uf.exit(fullcat)
        except:
            logging.warning("Struggled to save the catalogue files.",
                            exc_info=True)
            uf.earlyexit(fullcat)

    ## inform the user that the stacking has finished
    if fullcat.suppress != 'hide':
        outloc = uf.bashfriendlypath(fullcat.outloc)
        os.system('open ' + outloc)
    else:
        pass

    print('\nStacker has finished.\n')
    uf.exit(fullcat)
    return
def main():
    ###make sure to change these when running in a new enviorment!###
    #location of data directory
    filepath = cu.get_output_path(
    ) + 'processed_data/hearin_mocks/custom_catalogues/'
    savepath = cu.get_output_path(
    ) + 'processed_data/hearin_mocks/custom_catalogues/'
    #################################################################

    catalogues = [
        'Mr19_age_distribution_matching_mock', 'sm9.8_age_matching_mock'
    ]

    catalogue = sys.argv[1]
    f = h5py.File(filepath + catalogue + '.hdf5', 'r')
    GC = f.get(catalogue)
    GC = np.array(GC)

    #make new catalogue and copy over values from original catalogue
    dtype = GC.dtype.descr
    for d in dtype:
        print d
    dtype = np.dtype(dtype)
    GC_new = np.recarray((len(GC), ), dtype=dtype)
    GC_new.fill(0.0)
    GC_new = np.array(GC, copy=True)

    #identify central galaxies, host haloes
    centrals = np.where(
        GC['ID_host'] == -1)[0]  #indices of the central galaxies
    print 'number of centrals, host haloes:', len(centrals)
    satellites = np.where(
        GC['ID_host'] != -1)[0]  #indices of the central galaxies
    print 'number of satellites, sub-haloes:', len(satellites)

    #define mass bins, and which central are in each mass bin
    mass_bins = np.arange(8.0, 16.0, 0.1)  #log mass bins
    #group histogram by log(host_mass)
    mass_hist, bins = np.histogram(GC['M_host'][centrals], bins=mass_bins)
    #indices of groups in log(host_mass) bins
    mass_bin_ind = np.digitize(GC['M_host'][centrals], bins=mass_bins)

    #go through each mass bin
    for i in range(0, len(mass_bins) - 1):
        print i, 'mass bin:', mass_bins[i], mass_bins[i + 1]
        ind = np.where(mass_bin_ind == i +
                       1)[0]  #indices of host haloes in this mass bin
        if len(ind) > 0:  #if there are any haloes in the mass bin
            print 'number of groups:', len(ind)
            ids = GC['ID_halo'][centrals[ind]]
            sat_galaxy_members = np.in1d(
                GC['ID_host'], ids)  #satellite galaxies in the mass bin
            sat_galaxy_members = np.where(sat_galaxy_members)[
                0]  #indicies of galaxies
            cen_galaxy_members = np.in1d(
                GC['ID_halo'], ids)  #central galaxies in the mass bin
            cen_galaxy_members = np.where(cen_galaxy_members)[
                0]  #indicies of galaxies
            galaxy_members = np.hstack(
                (sat_galaxy_members, cen_galaxy_members))
            print 'number of galaxies:', len(galaxy_members)
            satellite_members = np.where(
                GC['ID_host'][galaxy_members] != -1)[0]  #satellites
            satellite_members = galaxy_members[
                satellite_members]  #indices of satellites
            central_members = np.where(
                GC['ID_host'][galaxy_members] == -1)[0]  #centrals
            central_members = galaxy_members[
                central_members]  #indices of centrals
            print 'number of centrals:', len(central_members)
            print 'number of satellites:', len(satellite_members)
            print 'check:', len(central_members) + len(
                satellite_members) == len(galaxy_members)
            #shuffle list of host haloes in mass bin
            shuffle = np.random.permutation(
                np.arange(0, len(central_members), 1))
            shuffled_central_members = central_members[shuffle]
            unshuffle = np.arange(0, len(central_members), 1)
            #shuffle centrals --> leave halo props alone, change gal props
            #GC_new['M_r,0.1'][central_members] = GC['M_r,0.1'][shuffled_central_members]
            #GC_new['g-r'][central_members]     = GC['g-r'][shuffled_central_members]
            #GC_new['M_star'][central_members]  = GC['M_star'][shuffled_central_members]
            #shuffle satellite systems --> leave gal props alone, change halo props
            for i in range(0, len(satellite_members)):
                print "\r", i,
                sys.stdout.flush()
                old_host_ID = GC['ID_host'][
                    satellite_members[i]]  #old host halo ID
                old_host_ind = np.where((GC['ID_halo'] == old_host_ID
                                         ))[0]  #index of old host central
                new_host_ind = np.where(
                    shuffled_central_members == old_host_ind)[
                        0]  #location in central members list
                new_host_ind = central_members[new_host_ind]  #new host index
                #assign a new host properties
                GC_new['ID_host'][
                    satellite_members[i]] = GC['ID_halo'][new_host_ind]
                GC_new['M_host'][
                    satellite_members[i]] = GC['M_host'][new_host_ind]
                GC_new['M_vir'][
                    satellite_members[i]] = GC['M_vir'][new_host_ind]
                GC_new['R200'][satellite_members[i]] = GC['R200'][new_host_ind]
                #calculate satellite positions
                x_new_cen = GC['x'][new_host_ind]
                y_new_cen = GC['y'][new_host_ind]
                z_new_cen = GC['z'][new_host_ind]
                x_old_cen = GC['x'][old_host_ind]
                y_old_cen = GC['y'][old_host_ind]
                z_old_cen = GC['z'][old_host_ind]
                GC_new['x'][satellite_members[i]] = GC_new['x'][
                    satellite_members[i]] - x_old_cen + x_new_cen
                GC_new['y'][satellite_members[i]] = GC_new['y'][
                    satellite_members[i]] - y_old_cen + y_new_cen
                GC_new['z'][satellite_members[i]] = GC_new['z'][
                    satellite_members[i]] - z_old_cen + z_new_cen
                #calculate satellite velocities
                Vx_new_cen = GC['Vx'][new_host_ind]
                Vy_new_cen = GC['Vy'][new_host_ind]
                Vz_new_cen = GC['Vz'][new_host_ind]
                Vx_old_cen = GC['Vx'][old_host_ind]
                Vy_old_cen = GC['Vy'][old_host_ind]
                Vz_old_cen = GC['Vz'][old_host_ind]
                GC_new['Vx'][satellite_members[i]] = GC_new['Vx'][
                    satellite_members[i]] - Vx_old_cen + Vx_new_cen
                GC_new['Vy'][satellite_members[i]] = GC_new['Vy'][
                    satellite_members[i]] - Vy_old_cen + Vy_new_cen
                GC_new['Vz'][satellite_members[i]] = GC_new['Vz'][
                    satellite_members[i]] - Vz_old_cen + Vz_new_cen
            print ' '

    #Fix any boundary condition issues, Lbox=250 Mpc
    fix = np.where(GC_new['x'] < 0.0)[0]
    GC_new['x'][fix] = 250.0 - np.absolute(GC_new['x'][fix])
    fix = np.where(GC_new['y'] < 0.0)[0]
    GC_new['y'][fix] = 250.0 - np.absolute(GC_new['y'][fix])
    fix = np.where(GC_new['z'] < 0.0)[0]
    GC_new['z'][fix] = 250.0 - np.absolute(GC_new['z'][fix])
    fix = np.where(GC_new['x'] > 250.0)[0]
    GC_new['x'][fix] = GC_new['x'][fix] - 250.0
    fix = np.where(GC_new['y'] > 250.0)[0]
    GC_new['y'][fix] = GC_new['y'][fix] - 250.0
    fix = np.where(GC_new['z'] > 250.0)[0]
    GC_new['z'][fix] = GC_new['z'][fix] - 250.0

    catalogue = catalogue + '_satsys_shuffle'
    print 'saving hdf5 version of the catalogue...'
    filename = catalogue
    print filename
    f = h5py.File(savepath + filename + '.hdf5', 'w')
    dset = f.create_dataset(catalogue, data=GC_new)
    f.close()

    print 'saving ascii version of the catalogue...'
    print filename
    data_table = table.table.Table(data=GC_new)
    ascii.write(data_table, savepath + filename + '.dat')
    print data_table
                                                                       n1=3.0,
                                                                       n2=1.5,
                                                                       Xe=0.7,
                                                                       Ye=0.3)
        r, rho, u, P, M = nps.joinPolytropes(r1, rho1, M1, K1, n1, r2, rho2,
                                             M2, K2, n2)

    else:

        r, rho, u, P, M = nps.Get_Polytrope(M_star, R_star, n1)

    # ===================================================================
    # this Table is used later to create the SPH particle distribution

    dat = Table([r, rho, u, P, M], names=('r', 'rho', 'u', 'P', 'M'))
    ascii.write(dat, 'PolyStar_cgs.dat')

    if scale_to_units:
        dat_solar = Table([
            r / DistUnit, rho / DensUnit, u / E_perMassUnit, P / P_Unit,
            M / MassUnit
        ],
                          names=('r', 'rho', 'u', 'P', 'M'))
        ascii.write(dat_solar, 'PolyStar_scaled.dat')

else:

    # print np.shape(r), np.shape(rho), np.shape(u), np.shape(P), np.shape(M)
    dat = Table([r, rho, u, P, M], names=('r', 'rho', 'u', 'P', 'M'))
    ascii.write(dat, 'StarProfile_cgs.dat')
예제 #36
0
def pick_positions(filename, separation, refimage=None):
    """
    Assigns postions to fake star list generated by pick_models

    INPUTS:
    -------

    filename:   string
                Name of AST list generated by pick_models
    separation: float
                Minimum pixel separation between AST and star in photometry 
                catalog provided in the datamodel.
    refimage:   Name of the reference image.  If supplied, the method will use the 
                reference image header to convert from RA and DEC to X and Y.

    OUTPUTS:
    --------

    Ascii table that replaces [filename] with a new version of [filename] that contains the necessary
    position columns for running the ASTs though DOLPHOT
    """

    noise = 3.0  #Spreads the ASTs in a circular annulus of 3 pixel width instead of all being
    #precisely [separation] from an observed star.

    catalog = datamodel.get_obscat(datamodel.obsfile, datamodel.filters)
    colnames = catalog.data.columns

    if 'X' or 'x' in colnames:
        if 'X' in colnames:
            x_positions = catalog.data['X'][:]
            y_positions = catalog.data['Y'][:]
        if 'x' in colnames:
            x_positions = catalog.data['x'][:]
            y_positions = catalog.data['y'][:]
    else:
        if refimage:
            if 'RA' or 'ra' in colnames:
                if 'RA' in colnames:
                    ra_positions = catalog.data['RA'][:]
                    dec_positions = catalog.data['DEC'][:]
                if 'ra' in colnames:
                    ra_positions = catalog.data['ra'][:]
                    dec_positions = catalog.data['dec'][:]
            else:
                raise RuntimeError(
                    "Your catalog does not supply X, Y or RA, DEC information for spatial AST distribution"
                )

        else:
            raise RuntimeError(
                "You must supply a Reference Image to determine spatial AST distribution."
            )
        wcs = WCS(refimage)
        x_positions, y_positions = wcs.all_world2pix(ra_positions,
                                                     dec_positions, 0)

    astmags = ascii.read(filename)

    n_asts = len(astmags)

    # keep is defined to ensure that no fake stars are put outside of the image boundaries

    keep = (x_positions > np.min(x_positions) + separation + noise) & (x_positions < np.max(x_positions) - separation - noise) & \
           (y_positions > np.min(y_positions) + separation + noise) & (y_positions < np.max(y_positions) - separation - noise)

    x_positions = x_positions[keep]
    y_positions = y_positions[keep]

    ncat = len(x_positions)
    ind = np.random.random(n_asts) * ncat
    ind = ind.astype('int')

    # Here we generate the circular distribution of ASTs surrounding random observed stars

    separation = np.random.random(n_asts) * noise + separation
    theta = np.random.random(n_asts) * 2.0 * np.pi
    xvar = separation * np.cos(theta)
    yvar = separation * np.sin(theta)

    new_x = x_positions[ind] + xvar
    new_y = y_positions[ind] + yvar
    column1 = 0 * new_x
    column2 = column1 + 1
    column1 = Column(name='zeros', data=column1.astype('int'))
    column2 = Column(name='ones', data=column2.astype('int'))
    column3 = Column(name='X', data=new_x, format='%.2f')
    column4 = Column(name='Y', data=new_y, format='%.2f')
    astmags.add_column(column1, 0)
    astmags.add_column(column2, 1)
    astmags.add_column(column3, 2)
    astmags.add_column(column4, 3)

    ascii.write(astmags, filename, overwrite=True)
예제 #37
0
def makeobslog(path,root=''):
    """Make a journal of an observation night.
    
    Parameters
    ----------
    path : string
        path where data are located.
    root : string, optional
        root of files to journal.
        Default is '', i.e. all files will be journaled.
    
    Returns
    -------
    None.
    
    """
    # set file name and pattern
    fileout = path+ 'journal' + root + '.log'
    pattern = path +  root + '*.fit*'
    # make list of files from pattern
    listfile = glob.glob(pattern)
    nfile = len(listfile)
    # check number of files
    if (nfile==0):
        msg = '*** WARNING: there is no file of type: '+pattern
        t120.log.error(msg)
        raise IOError(msg)
    t120.log.info('There are '+str(nfile)+' files of type '+pattern)
    # get common prefix to all files
    prefix = os.path.commonprefix(listfile)
    # create output table:
    # method 1: obsolete and dumb... make a first dummy row so astropy.table.QTable knows 'FILTER' is a string
    #data = QTable([['                                            '],['                    '],
    #            [0.0],[0.0],[0.0],[0.0],[' '],[0.0]],
    #            names=['FILE','TARGET','JD','RA (deg)','DEC (deg)','EXP (s)','FILTER','AIRMASS'])
    # method 2: declare types
    data = QTable(dtype=[object,object,float,float,float,float,object,float]
                names=['FILE','TARGET','JD','RA (deg)','DEC (deg)','EXP (s)','FILTER','AIRMASS'])
    # loop over the files
    for imgfile in listfile:
        hdul = fits.open(imgfile)
        hdr = hdul[0].header
        target_name = hdr['OBJECT']#.strip().upper().replace(' ','')
        # JD, the center coordinate (RA, Dec), exposure time, filter, airmass.
        JD = hdr['JD']
        try:
            RAcenter = hdr['CRVAL1']
            DEcenter = hdr['CRVAL2']
        except:
            t120.log.warning('There is no CRVAL keyword  in file '+imgfile)
            skycoo = SkyCoord(hdr['OBJCTRA'],hdr['OBJCTDEC'], unit=(u.hourangle, u.deg))
            RAcenter = skycoo.ra.to('deg').value
            DEcenter = skycoo.dec.to('deg').value
        exptime = hdr['EXPOSURE']
        filtr = hdr['FILTER']
        airmass = hdr['AIRMASS']
        data.add_row([imgfile.replace(prefix,'').lstrip(),target_name,JD,RAcenter,DEcenter,exptime,filtr,airmass])
        t120.log.debug('File='+imgfile+' TARGET'+target_name+' JD='+str(JD)+' RA='+str(RAcenter)+' DE='+str(DEcenter)+
                  ' exp='+str(exptime)+' fil='+str(filtr)+' airmass='+str(airmass))
    # remove first row : useful if method 1 is used: obsolete and dumb...
    #data.remove_row(0)
    # save log in file
    ascii.write(data,fileout,format='fixed_width',delimiter=' ',formats={'JD': '%18.12f'},overwrite=True)
    t120.log.info('There are '+str(len(listfile))+' data saved in '+fileout)
    return
예제 #38
0
    filetype = args.filetype
    r, rIn, rOut = args.radius
    ap_corr = args.ap_corr
    ch, pix = args.ch_px
    rejection = args.outlier
    nRun = args.run
    tName = args.target
    comments = args.comments

    #----------------------------

    #Generating & writing data tables to csv files
    res, data, prob = run(crdFormat, aor_crd, filetype, r, rIn, rOut, ap_corr,
                          int(ch), pix, rejection)
    ascii.write(res,
                '../Reduction_Data_&_Logs/%s_aor_data.csv' % nRun,
                delimiter=',',
                overwrite=True)
    ascii.write(data,
                '../Reduction_Data_&_Logs/%s_img_data.csv' % nRun,
                delimiter=',',
                overwrite=True)

    #Writing a txt file that keeps log about this reduction run
    #----------------------------------------------------------
    log = open('../Reduction_Data_&_Logs/run%s_log.txt' % nRun, 'w')
    log.write("Date Reduced     : %s \n" % datetime.now().isoformat())
    log.write("Input Parameters : %s \n" % str(vars(args)))
    log.write("Instrument       : IRAC Channel %i \n" % int(ch))
    log.write("File Type        : %s \n" % filetype.upper())
    log.write("Target           : %s \n" % tName)
    log.write("Radius Used      : r %i, rIn %i, rOut %i \n" % (r, rIn, rOut))
예제 #39
0
    outname = typex + "_Exp35Exp56.csv"
    tab1 = ascii.read(filex[0])
    tab2 = ascii.read(filex[1])
    del tab1["type"]
    del tab2["type"]
    tab1["count"].name = "count1"
    tab2["count"].name = "count2"
    newtab = join(tab1, tab2, keys="shRNA", join_type="inner")
    total_count = [
        x + y for index, (x, y) in enumerate(
            zip(list(newtab["count1"]), list(newtab["count2"])))
    ]
    newtab["count"] = total_count
    del newtab["count1"]
    del newtab["count2"]
    ascii.write(newtab, outname, format="csv", overwrite=True)

#----- Filter out bench contaminants
wk_dir = "/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190516/Exp35Exp56Combined"
os.chdir(wk_dir)
# Contaminant: CDK9, Ccnt1
# Off target shRNA: Cd19
cont_list = ["CDK9", "Ccnt1", "Cd19"]


def flt_ct(inFile):
    outName = inFile.replace(".csv", "flt-ct.csv")
    with open(inFile, 'r') as fin:
        with open(outName, 'w') as fout:
            rfin = csv.reader(fin, delimiter=",")
            wfout = csv.writer(fout, delimiter=",")
예제 #40
0
def bundle_definition(file_in, ifu=1, path='./', diagnose=False, pilot=False):
    """ 
    Make a definition file containing a schematic of a fibre bundle 

    There is some duplication in this code, as it includes a test for 
    two different methods to plot the so-called bundle definition file. 
    This can be removed. 

    Adapted to new IFU object input. Kept old input (still appropriate 
    for Pilot Sample data). 
    """

    if pilot:
        # Follow old input style, appropriate for RSS files from Pilot Sample.
        # Open file and mask single IFU
        hdu = pf.open(path + file_in)
        fibtab = hdu[2].data  # binary table containing fibre information

        mask_ifu = fibtab.field('PROBENAME') == ifu  # index the selected IFU
        fibtab = fibtab[mask_ifu]  # and mask fibre data

        nfib = len(fibtab)  # count the number of fibres
        fib1 = np.where(fibtab['FIBNUM'] == 1)[0]  # identify the central fibre

    if not pilot:
        myIFU = utils.IFU(file_in, ifu, flag_name=False)
        nfib = len(myIFU.n)

    # get true angular separation (a) between each fibre and Fib1
    # ra and dec separations will then be cos(a), sin(a)
    offset_ra = np.zeros(nfib, dtype='double')
    offset_dec = np.zeros(nfib, dtype='double')

    for i in range(nfib):

        if pilot:
            ra1 = np.radians(fibtab['FIB_MRA'][fib1])
            ra_fib = np.radians(fibtab['FIB_MRA'][i])
            dec1 = np.radians(fibtab['FIB_MDEC'][fib1])
            dec_fib = np.radians(fibtab['FIB_MDEC'][i])

        if not pilot:
            ra1 = np.radians(myIFU.xpos[np.where(myIFU.n == 1)])
            dec1 = np.radians(myIFU.ypos[np.where(myIFU.n == 1)])
            ra_fib = np.radians(myIFU.xpos[i])
            dec_fib = np.radians(myIFU.ypos[i])

        # Angular distance
        cosA = np.cos(np.pi/2-dec1) * np.cos(np.pi/2-dec_fib) + \
            np.sin(np.pi/2-dec1) * np.sin(np.pi/2-dec_fib) * np.cos(ra1-ra_fib)

        # DEC offset
        cos_dRA  = np.cos(np.pi/2-dec1) * np.cos(np.pi/2-dec1) + \
            np.sin(np.pi/2-dec1) * np.sin(np.pi/2-dec1) * np.cos(ra1-ra_fib)

        # RA offset
        cos_dDEC = np.cos(np.pi/2-dec1) * np.cos(np.pi/2-dec_fib) + \
            np.sin(np.pi/2-dec1) * np.sin(np.pi/2-dec_fib) * np.cos(ra1-ra1)

        # Sign check; trig collapses everything to a single quadrant, so need
        # to check which I am on:
        if (ra_fib >= ra1) and (dec_fib >= dec1):  # 1. quadrant (+, +)
            offset_ra[i] = np.degrees(np.arccos(cos_dRA[0]))
            offset_dec[i] = np.degrees(np.arccos(cos_dDEC[0]))

        if (ra_fib <= ra1) and (dec_fib >= dec1):  # 2. quadrant (-, +)
            offset_ra[i] = np.negative(np.degrees(np.arccos(cos_dRA[0])))
            offset_dec[i] = np.degrees(np.arccos(cos_dDEC[0]))

        if (ra_fib <= ra1) and (dec_fib <= dec1):  # 3. quadrant (-, -)
            offset_ra[i] = np.negative(np.degrees(np.arccos(cos_dRA[0])))
            offset_dec[i] = np.negative(np.degrees(np.arccos(cos_dDEC[0])))

        if (ra_fib >= ra1) and (dec_fib <= dec1):  # 4. quadrant (+, -)
            offset_ra[i] = np.degrees(np.arccos(cos_dRA[0]))
            offset_dec[i] = np.negative(np.degrees(np.arccos(cos_dDEC[0])))

    # Write a dictionary of relative RA, DEC lists
    datatab = {
        'RA': offset_ra,
        'DEC': offset_dec
    }  # proper, spherical trig, sky-projected
    """
    datatab2 = {'RA': fibtab['FIB_MRA'] - fibtab['FIB_MRA'][fib1], 
                'DEC': fibtab['FIB_MDEC'] - fibtab['FIB_MDEC'][fib1]} # simple
    """

    # Write to file
    file_out = './bundle' + str(ifu) + '.bdf'
    tab.write(datatab, file_out, names=['RA', 'DEC'])  # need 'names' in order

    # And test the positioning:
    if diagnose == True:
        ctr = [0.0, 0.0]
        fig = plt.gcf()
        fig.clf()
        ax = fig.add_subplot(111)
        axis = [
            -8. / 3600. + ctr[0], 8. / 3600. + ctr[0], -8. / 3600. + ctr[1],
            8. / 3600. + ctr[1]
        ]
        plt.axis(axis)
        plt.title('Bundle ' + str(ifu))
        plt.xlabel('RA Offset [degrees]')
        plt.ylabel('DEC Offset [degrees]')
        ax.set_aspect('equal')

        for i in range(61):
            circ = patches.Circle(
                (datatab['RA'][i] + ctr[0], datatab['DEC'][i] + ctr[1]),
                0.8 / 3600.,
                edgecolor='none',
                facecolor='cyan',
                alpha=.5)
            ax.add_patch(circ)
            """
            circ2 = patches.Circle((datatab2['RA'][i] + ctr[0], 
                                    datatab2['DEC'][i] + ctr[1]), 0.8/3600.,
                                   edgecolor='none', facecolor='cyan',alpha=.5)
            ax.add_patch(circ2)
            """

        big_circ = patches.Circle(ctr,
                                  7.5 / 3600.,
                                  edgecolor='green',
                                  facecolor='none',
                                  lw=3)

        #ax.add_patch(big_circ)
        plt.savefig('/Users/iraklis/Desktop/bundle.pdf', transparent=True)
        plt.show()
예제 #41
0
def cat_trans_im(incat, dbfile, geomap_infile, refcat, clustname, septol,
                 **kwargs):
    '''This routine reads in a FITS catalog, writes a temporary output
    ASCII catalog, then transforms this catalog using geoxytran.  It
    then appends these new coordinates as new columns to the original catalog.

    It is usually run by hand after the geomap solution has been derived.

    INPUT

    incat: the input SExtractor catalog

    dbfile: the geomap database file

    geomap_infile: used as the database record

    refcat: the original astrometric reference catalog

    clustname: the name of the cluster

    septol: the maximum separation allowed for a match in pixels

    OPTIONAL KEYWORDS

    xmin, xmax, ymin, ymax.  These are the limits over which the
    transform was originally computed.  If these are given then it
    uses those limits to color the points in the ra and decdiff plots.
    If one is given, all must be given.

    '''

    #read in GMOS sextractor photometry catalog with (x,y) coordinates
    cat_dat = ascii.read(incat)

    #read in original astrometric catalog
    ref_dat = ascii.read(refcat)
    xref = np.array(ref_dat['X_IMAGE'])
    yref = np.array(ref_dat['Y_IMAGE'])

    #tmp coordinate files for geoxytran
    tmpin = 'tmp_geoxytran_im_in'
    tmpout = 'tmp_geoxytran_im_out'

    #make a new name for the transformed file
    newcat = incat.replace('.sexcat', '.trans.cat')

    if os.path.isfile(tmpout) is True:
        cmdstr = 'rm ' + tmpout
        os.system(cmdstr)
        cmdstr = 'rm ' + newcat
        os.system(cmdstr)

    #output temporary ASCII file with coordinates
    fo = open(tmpin, "w")
    fo.write("# x y\n")
    for i, val in enumerate(cat_dat['X_IMAGE']):
        fo.write('{} {}\n'.format(cat_dat['X_IMAGE'][i],
                                  cat_dat['Y_IMAGE'][i]))
    fo.close()

    iraf.geoxytran(tmpin, tmpout, dbfile, geomap_infile, direction="backward",\
                   xcolumn=1, ycolumn = 2)

    #read in ascii output file
    trans_dat = ascii.read(tmpout)
    xtrans = np.array(trans_dat['x'])
    ytrans = np.array(trans_dat['y'])

    #replace the old RAs and DECs with new RAs and DECs in place
    tcat_dat = cat_dat
    tcat_dat['X_IMAGE'] = xtrans
    tcat_dat['Y_IMAGE'] = ytrans

    #write the new catalog file with the transformed coordinates
    ascii.write(tcat_dat, newcat, format='commented_header')

    #match new catalog against original catalog
    mfile = "allcat_im_match.txt"
    (xrefm,yrefm,xtransm,ytransm, translims) = cmti.cat_im_match(xref, yref, \
                                                                 xtrans, ytrans, septol, \
                                                                 matchfile = mfile)

    #make a plot of the residuals
    allcattrans_plotfile = 'allcat_trans.' + clustname + '_coordiff_im.pdf'
    #passes ra and dec limits if they are defined to find source
    #outside of ra and dec lims.  Assumes that if one keyword is givenSpARCS0035_GMOS_z.v0.sexcat'
    #that all are given

    if 'xmin' in kwargs.keys():
        cmti.match_diff_im_plot(xrefm,yrefm,xtransm,ytransm, plotfile = allcattrans_plotfile, \
                            xmin = kwargs['xmin'], xmax = kwargs['xmax'], \
                            ymin = kwargs['ymin'], ymax = kwargs['ymax'])
    else:
        cmti.match_diff_im_plot(xrefm,
                                yrefm,
                                xtransm,
                                ytransm,
                                plotfile=allcattrans_plotfile)
예제 #42
0
## Define posiciones X e Y a partir de los datos entregados por SExtractor.
sex_x = sex_aux2['X_IMAGE']
sex_y = sex_aux2['Y_IMAGE']
sex_mag = sex_aux2['MAG_ISO']
# Conversion a coordenadas de CMOS.
sex_x1 = (sex_x - 512) * 0.00270  # queda en mm centrada en (0,0)
sex_y1 = (sex_y - 512) * 0.00270  # queda en mm centrada en (0,0)

os.chdir(current_path)

# Guarda las columnas X, Y y MAG del resultado de Sextractor.
ascii.write([sex_x1, sex_y1, sex_mag],
            'tmp/sext',
            delimiter='\t',
            format='no_header',
            formats={
                'col0': '% 15.10f',
                'col1': '% 15.10f',
                'col2': '% 15.10f'
            })

###Read Source extractor
with open('tmp/sext') as f:
    content = f.readlines()

content = [x.strip() for x in content]

x_sext_list = []
y_sext_list = []
ID_sext_list = []
예제 #43
0
            groupcount,
            np.median(cat['RA'][sel]),
            np.median(cat['DEC'][sel]),
            np.median(cat['wave'][sel]),
            np.median(cat['sn'][sel]),
            np.std(cat['sn'][sel]),
            np.max(cat['nalines'][sel]),
            np.max(cat['ncat0'][sel]),
            np.max(cat['ngood'][sel])
        ])
        #        if np.size(sel) > 3: detectlist.append([groupcount,cat['ID'][sel]])
        groupcount += 1

#save output table

ascii.write(cat, 'cosdeep_group_info.dat', overwrite=True)

sel2 = np.where((cat['nobs'] > 1))
nuniq = np.size(np.unique(cat['multi_idx'][sel2]))

print "number of unique objects found in 2 or more shots is", nuniq

outputarr = Table(rows=output,
                  names=('multi_idx', 'ra', 'dec', 'wave', 'sn', 'snrms',
                         'nalines', 'nobs', 'ngood'),
                  dtype=('i4', 'f8', 'f8', 'f8', 'f8', 'f8', 'i4', 'i4', 'i4'))

#first plot completeness

nbin = 20
snarray = np.arange(nbin) * 2
    ra2 = radians(catRA[i])
    dec2 = radians(catDec[i])

    # Angular distance equation
    angSep = math.acos(
        math.sin(dec2) * math.sin(dec1) + math.cos(dec2) * math.cos(dec1) *
        math.cos(ra2 - ra1)) * (180 / math.pi) * 3600
    angDist.append(angSep)
    #if (catID[i] == 350):
    #print ("350",angDist[i])
print(angDist)
print("CATID", len(catID))
print("ANGDIST", len(angDist))
print(
    "-------------------------------------------------------------------------"
)

#Write galaxy ID, galaxy RA, and galaxy Dec to ascii table to read into RunGrismFiles program
RaDecData = Table(
    [catID, catRA, catDec, angDist],
    names=['Galaxy ID', 'Galaxy RA', 'Galaxy Dec', 'Angular Distance'])
ascii.write(RaDecData, 'RaDecData.dat', format='fixed_width', overwrite=True)
print("RaDecData.dat file written")
"""
file = ascii.read('RaDecData.dat', delimiter="|")
print (file['col2'])
print (file['col3'])
print (file['col4'])
print (file['col5'])
"""
예제 #45
0
    path = sys.argv[1]
    if path[-1] != '/':
        path = path + '/'

filenames = glob(path + '*panstarrs_radec.csv')
clustername = filenames[0].split('_')[0]

# filename = pathfile.split('/')[-1]
# filename_nosuffix = filename.split('.')[0]
# path = pathfile[: - len(filename)]

panstarrs_file = path + clustername + '_panstarrs_radec.csv'
allwise_file = path + clustername + '_allwise_radec.txt'

panstarrs = ascii.read(panstarrs_file)
allwise = ascii.read(allwise_file)

# t0 = ascii.read(pathfile)

p1 = panstarrs['raMean', 'decMean']
a1 = allwise['ra', 'dec']

p1.add_column(np.full(len(p1), 2), name='size')
a1.add_column(np.full(len(a1), 2), name='size')
p1.rename_columns(('raMean', 'decMean'), ('ra', 'dec'))
a1.meta = {}

ascii.write(p1, output=path + clustername + '_panstarrs_radec_for_extinction_upload.csv', delimiter=',', format='basic', overwrite=True)
ascii.write(a1, output=path + clustername + '_allwise_radec_for_extinction_upload.csv', delimiter=',', format='basic', overwrite=True)

예제 #46
0
def save_star(detectid):
	try:
		lae = complete_lae_tab[complete_lae_tab["detectid"]==detectid]
		lae_ra, lae_dec = lae["ra"], lae["dec"]
		lae_coords = SkyCoord(ra = lae_ra*u.deg, dec = lae_dec*u.deg)
		rs = lae_coords.separation(shot_coords).arcsec
		mask = rs <= 50

		if len(mask[mask]) < 10:
			print("{} is empty.".format(detectid))
			return 0

		rs = rs[mask]
		spec_here = ffskysub[mask]
		err_here = spec_err[mask]
		mask_7_here = mask_7[mask]
		mask_10_here = mask_10[mask]
		lae_ra = shot_tab["ra"][mask]
		lae_dec = shot_tab["dec"][mask]
		order = np.argsort(rs)
		rs = rs[order]
		spec_here = spec_here[order]
		err_here = err_here[order]
		mask_7_here = mask_7_here[order]
		mask_10_here = mask_10_here[order]
		lae_ra = lae_ra[order]
		lae_dec = lae_dec[order]

		wlhere = (def_wave > 4550) & (def_wave <= 4650)
			
		weights = err_here[:,wlhere] ** (-2)
		weights_sum = np.nansum(weights, axis=1)
		flux_mean = np.nansum(spec_here[:,wlhere]*weights, axis=1) / weights_sum
		flux_mean_error = 1./np.sqrt(weights_sum)
		
		mask = (flux_mean != 0) & (flux_mean_error != 0)
		rs_0 = rs[mask][:] #/ u.arcsec
		#rs_0 = rs_0.decompose()

		flux_mean = flux_mean[mask].data[:]
		flux_mean_error = flux_mean_error[mask].data[:]
		mask_7_here_0 = mask_7_here[mask]
		mask_10_here_0 = mask_10_here[mask]
		lae_ra_0 = lae_ra[mask]
		lae_dec_0 = lae_dec[mask]
		
		tab = {"r": rs_0,
			"ra": lae_ra_0,
			"dec": lae_dec_0,
			"flux": flux_mean,
			"sigma": flux_mean_error,
			"mask_7": mask_7_here_0,
			"mask_10": mask_10_here_0}
		save_file = os.path.join(basedir, f"radial_profiles/stars_skymask/star_{detectid}.dat")
		ascii.write(tab, save_file)
		print("Wrote to "+save_file)
	except Exception as e:
		print("{} failed: ".format(detectid))
		print(e)
		return 0
	return 1
예제 #47
0
object_properties['RA'] = ra_dd
object_properties['DEC'] = dec_dd

# Consistency check

print(bcolors.OKGREEN + "\nIs the object in the image footprint?" +
      bcolors.ENDC)

try:
    x_exp, y_exp = fits_tools.sky2xy(args.fits, RA=ra_dd, DEC=dec_dd)
    object_properties['X_EXP'] = x_exp
    object_properties['Y_EXP'] = y_exp
    filename_science_xy = args.outdir + object_properties['OBJECT'][
        0] + '_xy.cat'
    ascii.write(np.array([x_exp, y_exp]).T,
                filename_science_xy,
                format='no_header',
                overwrite=True)

    # print('Coordinates = %s, %s' %(x_exp, y_exp))

    print('Yes.')
    logger.info('Is the object in the image footprint: Passed')

except:
    msg = 'Object (RA, DEC = {ra}, {dec}) is not in the image footprint. Check object coordinates and FITS file.'.format(
        ra=args.ra, dec=args.dec)
    print(bcolors.FAIL + msg + bcolors.ENDC)
    logger.error(msg)
    sys.exit()

# If sextractor is run in dual image mode, process reference image first
예제 #48
0
lamout = np.linspace(lam.min(), lam.max(), len(lam))
zpaux = np.zeros([len(uorder), len(lamout)])

for i in range(len(uorder)):
    f = interp1d(lam[order == uorder[i]],
                 zplam[order == uorder[i]],
                 bounds_error=False,
                 fill_value=0)
    zpaux[i, :] = f(lamout)

zpout = np.zeros(len(lamout))

for i in range(len(lamout)):
    zpout[i] = np.amax(zpaux[:, i])

# ASUMMING READ MODE = FAST (GAIN = 1.12)
zpout = zpout + 2.5 * np.log10(1.2)

ascii.write([lamout, zpout],
            fileout,
            format='commented_header',
            names=['lambda', 'zeropoint'],
            formats={
                'lambda': '%.3f',
                'zeropoint': '%.2f'
            })

#plt.plot(lamout, zpaux[0,:])
plt.plot(lamout, zpout)
plt.show()
예제 #49
0
def pick_models_toothpick_style(
    sedgrid_fname,
    filters,
    N_fluxes,
    min_N_per_flux,
    outfile=None,
    outfile_params=None,
    bins_outfile=None,
    bright_cut=None,
):
    """
    Creates a fake star catalog from a BEAST model grid. The chosen seds
    are optimized for the toothpick model, by working with a given
    number of flux bins, and making sure that every flux bin is covered
    by at least a given number of models (for each filter individually,
    which is how the toothpick model works).

    Parameters
    ----------
    sedgrid_fname: string
        BEAST model grid from which the models are picked (hdf5 file)

    filters: list of string
        Names of the filters, to be used as columns of the output table

    N_fluxes: integer
        The number of flux bins into which the dynamic range of the
        model grid in each filter is divided

    min_N_per_flux: integer
        Minimum number of model seds that need to fall into each bin

    outfile: string
        Output path for the models (optional). If this file already
        exists, the chosen seds are loaded from this file instead.

    outfile_params: string (default=None)
        If a file name is given, the physical parameters associated with
        each model will be written to disk

    bins_outfile: string
        Output path for a file containing the flux bin limits for each
        filter, and the number of samples for each (optional)

    bright_cut: list of float
        List of magnitude limits for each filter (won't sample model
        SEDs that are too bright)

    Returns
    -------
    sedsMags: astropy Table
        A table containing the selected model seds (columns are named
        after the filters)

    """
    if outfile is not None and os.path.isfile(outfile):
        print(
            "{} already exists. Will attempt to load SEDs for ASTs from there."
            .format(outfile))
        t = Table.read(outfile, format="ascii")
        return t

    with Vega() as v:
        vega_f, vega_flux, lambd = v.getFlux(filters)

    modelsedgrid = SEDGrid(sedgrid_fname)

    sedsMags = -2.5 * np.log10(modelsedgrid.seds[:] / vega_flux)
    Nseds = sedsMags.shape[0]
    Nf = sedsMags.shape[1]
    idxs = np.arange(Nseds)

    # Check if logL=-9.999 model points sliently sneak through
    if min(modelsedgrid.grid["logL"]) < -9:
        warnings.warn("There are logL=-9.999 model points in the SED grid!")
        print("Excluding those SED models from selecting input ASTs")
        idxs = np.where(modelsedgrid.grid["logL"] > -9)[0]
        sedsMags = sedsMags[idxs]

    # Set up a number of flux bins for each filter
    maxes = np.amax(sedsMags, axis=0)
    mins = np.amin(sedsMags, axis=0)

    bin_edges = np.zeros((N_fluxes + 1, Nf))  # indexed on [fluxbin, nfilters]
    for f in range(Nf):
        bin_edges[:, f] = np.linspace(mins[f], maxes[f], N_fluxes + 1)
    bin_mins = bin_edges[:-1, :]
    bin_maxs = bin_edges[1:, :]
    if not len(bin_mins) == len(bin_maxs) == N_fluxes:
        raise AssertionError()

    bin_count = np.zeros((N_fluxes, Nf))
    chosen_idxs = []
    counter = 0
    successes = 0
    include_mask = np.full(idxs.shape, True, dtype=bool)
    chunksize = 100000
    while True:
        counter += 1
        # pick some random models
        rand_idx = np.random.choice(idxs[include_mask], size=chunksize)
        randomseds = sedsMags[rand_idx, :]

        # Find in which bin each model belongs, for each filter
        fluxbins = np.zeros(randomseds.shape, dtype=int)
        for fltr in range(Nf):
            fluxbins[:, fltr] = np.digitize(randomseds[:, fltr],
                                            bin_maxs[:, fltr])

        # Clip in place (models of which the flux is equal to the max
        # are assigned bin nr N_fluxes. Move these down to bin nr
        # N_fluxes - 1)
        np.clip(fluxbins, a_min=0, a_max=N_fluxes - 1, out=fluxbins)

        add_these = np.full((len(rand_idx)), False, dtype=bool)
        for r in range(len(rand_idx)):
            # If any of the flux bins that this model falls into does
            # not have enough samples yet, add it to the list of model
            # spectra to be output
            if (bin_count[fluxbins[r, :], range(Nf)] < min_N_per_flux).any():
                bin_count[fluxbins[r, :], range(Nf)] += 1
                successes += 1
                add_these[r] = True

            # If all these bins are full...
            else:
                # ... do not include this model again, since we will reject it
                # anyway.
                include_mask[idxs == rand_idx] = False

        # Add the approved models
        chosen_idxs.extend(rand_idx[add_these])

        # If some of the randomly picked models were not added
        if not add_these.any():
            # ... check if we have enough samples everywhere, or if all
            # the models have been exhausted (and hence the bins are
            # impossible to fill).
            enough_samples = (bin_count.flatten() >= min_N_per_flux).all()
            still_models_left = include_mask.any()
            if enough_samples or not still_models_left:
                break

        if not counter % 10:
            print("Sampled {} models. {} successfull seds. Ratio = {}".format(
                counter * chunksize, successes,
                successes / counter / chunksize))

    # Gather the selected model seds in a table
    sedsMags = Table(sedsMags[chosen_idxs, :], names=filters)

    if outfile is not None:
        ascii.write(
            sedsMags,
            outfile,
            overwrite=True,
            formats={k: "%.5f"
                     for k in sedsMags.colnames},
        )

    # if chosen, save the corresponding model parameters
    if outfile_params is not None:
        grid_dict = {}
        for key in list(modelsedgrid.grid.keys()):
            grid_dict[key] = modelsedgrid.grid[key][chosen_idxs]
        grid_dict["sedgrid_indx"] = chosen_idxs
        ast_params = Table(grid_dict)
        ast_params.write(outfile_params, overwrite=True)

    if bins_outfile is not None:
        bin_info_table = Table()
        col_bigarrays = [bin_mins, bin_maxs, bin_count]
        col_basenames = ["bin_mins_", "bin_maxs_", "bin_count_"]
        for fltr, filter_name in enumerate(filters):
            for bigarray, basename in zip(col_bigarrays, col_basenames):
                bin_info_table.add_column(
                    Column(bigarray[:, fltr], name=basename + filter_name))
        ascii.write(bin_info_table, bins_outfile, overwrite=True)

    return sedsMags
예제 #50
0
def pick_models(
    sedgrid_fname,
    filters,
    mag_cuts,
    Nfilter=3,
    N_stars=70,
    Nrealize=20,
    outfile=None,
    outfile_params=None,
    bright_cut=None,
    vega_fname=None,
    ranseed=None,
):
    """Creates a fake star catalog from a BEAST model grid

    Parameters
    ----------
    sedgrid_fname: string
        BEAST model grid from which the models are picked (hdf5 file)

    filters: list of string
        Names of the filters

    mag_cuts: list
        List of magnitude limits for each filter

    Nfilter: Integer
             In how many filters, you want a fake star to be brighter
             than the limit (mag_cut) (default = 3)

    N_stars: Integer
               Number of stellar models picked per a single log(age)
               (default=70)

    Nrealize: Integer
              Number of realization of each models (default = 20)

    outfile: str
        If a file name is given, the selected models will be written to
        disk

    outfile_params: str
        If a file name is given, the physical parameters associated with
        each model will be written to disk

    bright_cut: list of float
        Same as mag_cuts, but for the bright end

    vega_fname: str
        filename of vega file

    ranseed : int
        used to set the seed to make the results reproducable
        useful for testing

    Returns
    -------
    astropy Table of selected models
    - and optionally -
    ascii file: A list of selected models, written to 'outfile'
    fits file: the corresponding physical parameters, written to 'outfile_params'
    """

    with Vega(source=vega_fname) as v:  # Get the vega fluxes
        vega_f, vega_flux, lamb = v.getFlux(filters)

    modelsedgrid = SEDGrid(sedgrid_fname)

    # Convert to Vega mags
    sedsMags = -2.5 * np.log10(modelsedgrid.seds[:] / vega_flux)

    # make sure Nfilters isn't larger than the total number of filters
    if Nfilter > len(filters):
        Nfilter = len(filters)

    # Select the models above the magnitude limits in N filters
    idxs = mag_limits(sedsMags,
                      mag_cuts,
                      Nfilter=Nfilter,
                      bright_cut=bright_cut)
    cols = {}
    for key in list(modelsedgrid.grid.keys()):
        cols[key] = modelsedgrid.grid[key][idxs]
    grid_cut = Table(cols)

    # Sample the model grid uniformly
    prime_params = np.column_stack(
        (grid_cut["logA"], grid_cut["M_ini"], grid_cut["Av"]))
    search_age = np.unique(prime_params[:, 0])

    N_sample = N_stars
    model_ind = []  # indices for the model grid
    ast_params = grid_cut[[]]  # the corresponding model parameters

    # set the random seed - mainly for testing
    if not None:
        np.random.seed(ranseed)

    for iage in search_age:
        (tmp, ) = np.where(prime_params[:, 0] == iage)
        new_ind = np.random.choice(tmp, N_sample)
        model_ind.append(new_ind)
        [ast_params.add_row(grid_cut[new_ind[i]]) for i in range(len(new_ind))]

    index = np.repeat(idxs[np.array(model_ind).reshape((-1))], Nrealize)
    sedsMags = Table(sedsMags[index, :], names=filters)

    if outfile is not None:
        ascii.write(
            sedsMags,
            outfile,
            overwrite=True,
            formats={k: "%.5f"
                     for k in sedsMags.colnames},
        )

    if outfile_params is not None:
        ast_params.write(outfile_params, overwrite=True)

    return sedsMags
예제 #51
0
def supplement_ast(
    sedgrid_fname,
    filters,
    nAST=1000,
    existingASTfile=None,
    outASTfile=None,
    outASTfile_params=None,
    mag_cuts=None,
    color_cuts=None,
):
    """
    Creates an additional fake star catalog from a BEAST model grid
    that fulfills the customized conditions to supplement input ASTs.
    If the existing input AST parameter file is given, already selected
    models will be excluded from this process. The input artificial
    stars are picked randomly from the remaining models.

    Parameters
    ----------
    sedgrid_fname: string
        BEAST model grid from which the models are picked (hdf5 file)

    filters: list of string
        Names of the filters

    nAST: int
        Number of unique additional ASTs per source density bin

    existingASTfile: string (optional, default=None)
        Name of the existing input AST parameter file. If not None,
        the models that were already listed in the existing list Will
        be removed by default

    outASTfile: string (optional, default=None)
        Output file name for the chosen models

    outASTfile_params: string (optional, default=None)
        If a file name is given, the physical parameters associated with
        each model will be written to disk

    mag_cut: dictionary (optional, default=None)
        Dictionary of bright and faint magnitude limits for given filters.
        The way to specify the cuts is by updating the "ast_suppl_maglimit" key
        in the beast_settings file. This is a dictionary that includes information
        for the magnitude cuts as a function of the filters included in observation.

        For example, for a field observed with HST_WFC3_F336W, HST_WFC3_F475W,
        and HST_WFC3_F814W, to set a magnitude range limit of 16<HST_WFC3_F475W<28 mag,
        and 15<HST_WFC3_F814W<27 mag you need to set the following within the beast_settings file:

        # specify that the ast_supplement mode should be on
        ast_supplement = True

        # initialize and populate the dictionary of desired magnitude limits
        ast_suppl_maglimits = {}
        # the magntidue limits are defined by the filter and a list of the limits in magnitudes
        ast_suppl_maglimits["HST_WFC3_F475W"] = [16,28]
        ast_suppl_maglimits["HST_WFC3_F814W"] = [15,27]

        # set the key word
        ast_suppl_maglimit = ast_suppl_maglimits

    color_cut: dictionary (optional, default=None)
        Dictionary of red color limits for given filters.
        The way to specify the cuts is by updating the "ast_suppl_colorlimit" key
        in the beast_settings file. This is a dictionary that includes information
        for the color cuts as a function of the filters included in observation.

        For example, for a field observed with HST_WFC3_F336W, HST_WFC3_F475W,
        and HST_WFC3_F814W, to set a color range limit of HST_WFC3_F475W-HST_WFC3_F814W<6,
        HST_WFC3_F336W-HST_WFC3_F475W<5 and HST_WFC3_F336W-HST_WFC3_F814W<4, you need
        to set the following within the beast_settings file:

        # specify that the ast_supplement mode should be on
        ast_supplement = True

        # initialize the dictionary of desired magnitude limits
        ast_suppl_colorlimits = {}

        # the color limits are defined by the first filter in the color (e.g, X for X-Y),
        # and the input is a list including the second filter (e.g., Y for X-Y) and the
        # color limit in magnitudes
        ast_suppl_colorlimits["HST_WFC3_F475W"] = [["HST_WFC3_F814W",6]]
        ast_suppl_colorlimits["HST_WFC3_F336W"] = [["HST_WFC3_F475W",5], ["HST_WFC3_F814W",4]]

        # set the key word
        ast_suppl_colorlimit =  ast_suppl_colorlimits

    Returns
    -------
    sedsMags: astropy Table
        A table containing the selected model seds (columns are named
        after the filters)

    """

    with Vega() as v:
        vega_f, vega_flux, lambd = v.getFlux(filters)

    modelsedgrid = SEDGrid(sedgrid_fname)

    # Convert to Vega mags
    sedsMags = -2.5 * np.log10(modelsedgrid.seds[:] / vega_flux)

    Nseds = sedsMags.shape[0]
    sedsIndx = np.arange(Nseds)

    if existingASTfile is not None and os.path.isfile(existingASTfile):
        print("{} exists. Will attempt to load SEDs for ASTs from there \
            and remove those SEDs from the SED grid".format(existingASTfile))
        print("existing AST file", existingASTfile)
        t = Table.read(existingASTfile, format="fits")
        sedsMags = np.delete(sedsMags, t["sedgrid_indx"], axis=0)
        sedsIndx = np.delete(sedsIndx, t["sedgrid_indx"])
        Nseds = sedsMags.shape[0]

    # Apply selection conditions if supplied
    # Just magnitude cuts
    print("mag_cuts", mag_cuts)
    print("color_cuts", color_cuts)
    if mag_cuts is not None:
        cond = np.ones(Nseds, dtype=bool)
        for key in list(mag_cuts.keys()):
            idx_filter = [i for i, iflt in enumerate(filters) if key in iflt]
            bright_cut = mag_cuts[key][0]
            faint_cut = mag_cuts[key][1]
            tmp_cond = np.logical_and(
                (sedsMags[:, idx_filter] >= bright_cut),
                (sedsMags[:, idx_filter] <= faint_cut),
            )

            if color_cuts is not None:
                if key in color_cuts:
                    for limit in color_cuts[key]:

                        idx_color_filter = [
                            i for i, iflt in enumerate(filters)
                            if limit[0] in iflt
                        ]
                        tmp_cond = np.logical_and(
                            tmp_cond,
                            (sedsMags[:, idx_filter] -
                             sedsMags[:, idx_color_filter] <= limit[1]),
                        )
            cond = np.logical_and(cond, tmp_cond.ravel())

        sedsMags = sedsMags[cond, :]
        sedsIndx = sedsIndx[cond]

    # Randomly select models
    # Supplementing ASTs does not need to follow
    # the toothpick-way selection
    chosen_idxs = np.random.choice(len(sedsIndx), nAST)
    sedsIndx = sedsIndx[chosen_idxs]

    # Gather the selected model seds in a table
    sedsMags = Table(sedsMags[chosen_idxs, :], names=filters)

    if outASTfile is not None:
        ascii.write(
            sedsMags,
            outASTfile,
            overwrite=True,
            formats={k: "%.5f"
                     for k in sedsMags.colnames},
        )

    # if chosen, save the corresponding model parameters
    if outASTfile_params is not None:
        grid_dict = {}
        for key in list(modelsedgrid.grid.keys()):
            grid_dict[key] = modelsedgrid.grid[key][sedsIndx]
        grid_dict["sedgrid_indx"] = sedsIndx
        ast_params = Table(grid_dict)
        ast_params.write(outASTfile_params, overwrite=True)

    return sedsMags
예제 #52
0
def DoAll():
    data = ascii.read('M83-Comb1_4mags.txt')
    m1, m2, m3, m4, ra, dec = data['m1'], data['m2'], data['m3'], data[
        'm4'], data['ra'], data['dec']
    del data

    # HST UVIS-1/IR Vega zero points in Jy, and corresponding lambda_eff
    fo = np.array([4196.2, 2439.35, 1738.4,
                   1138.06])  # F438W, F814W, F110W, F160W
    wv1 = np.array([0.43151, 0.790114, 1.102969, 1.523589]) * 1e-4  # cm

    # Flux per freq bin in ergs/s/cm2/Hz
    m = np.array([m1, m2, m3, m4]).T
    f_nu = 10**(m / (-2.5)) * fo * 1e-23

    # log of Counts per lambda bin in photons/s/cm2/cm
    N_lam = np.log10(f_nu / (wv1 * h))
    wv1 = np.log10(wv1)
    del m, f_nu

    # Effective Area * Relative Bandpass, and lambda_pivot for the notional ZYH filters + F606-like f_1
    Arel = np.array([0.2107, 0.5663, 0.5856, 0.5686]) * 1e4  # m^2 > cm^2
    wv2 = np.array([0.6342, 0.8758, 1.0671, 1.5909]) * 1e-4  # um  > cm
    wv2 = np.log10(wv2)

    tmp = np.array([
        np.polyfit(wv1[0:2], [N_lam[:, 0][i], N_lam[:, 1][i]], 1)
        for i in range(N_lam.shape[0])
    ])
    A, B = tmp[:, 0], tmp[:, 1]
    N0 = 10**(A * wv2[0] + B) * 10**wv2[0] * Arel[0]
    del tmp, A, B

    tmp = np.array([
        np.polyfit(wv1[1:3], [N_lam[:, 1][i], N_lam[:, 2][i]], 1)
        for i in range(N_lam.shape[0])
    ])
    A, B = tmp[:, 0], tmp[:, 1]
    N1, N2 = 10**(A * wv2[1] + B) * 10**wv2[1] * Arel[1], 10**(
        A * wv2[2] + B) * 10**wv2[2] * Arel[2]
    del tmp, A, B

    tmp = np.array([
        np.polyfit(wv1[2:4], [N_lam[:, 2][i], N_lam[:, 3][i]], 1)
        for i in range(N_lam.shape[0])
    ])
    A, B = tmp[:, 0], tmp[:, 1]
    N3 = 10**(A * wv2[3] + B) * 10**wv2[3] * Arel[3]
    del tmp, A, B

    id = np.arange(ra.size) + 1
    ones = np.ones_like(id)
    typ = np.repeat(np.array(['point']), id.size)
    cmnt = np.repeat(np.array(['comment']), id.size)

    tab0 = [id, ra, dec, N0, typ, ones, ones, ones, ones, cmnt]
    tab1 = [id, ra, dec, N1, typ, ones, ones, ones, ones, cmnt]
    tab2 = [id, ra, dec, N2, typ, ones, ones, ones, ones, cmnt]
    tab3 = [id, ra, dec, N3, typ, ones, ones, ones, ones, cmnt]

    nms = ('id', 'ra', 'dec', 'flux', 'type', 'n', 're', 'phi', 'ratio',
           'notes')

    fmt = {'id':'%10d', 'ra':'%10.5f', 'dec':'%10.5f', 'flux':'%15.5f', 'type':'%8s', \
           'n':'%8.1f', 're':'%8.1f', 'phi':'%8.1f', 'ratio':'%8.1f', 'notes':'%8s'}

    t0 = Table(tab0, names=nms)
    t1 = Table(tab1, names=nms)
    t2 = Table(tab2, names=nms)
    t3 = Table(tab3, names=nms)

    ascii.write(t0,
                'list0.Comb1.tbl',
                format='fixed_width',
                delimiter='',
                formats=fmt)
    ascii.write(t1,
                'list1.Comb1.tbl',
                format='fixed_width',
                delimiter='',
                formats=fmt)
    ascii.write(t2,
                'list2.Comb1.tbl',
                format='fixed_width',
                delimiter='',
                formats=fmt)
    ascii.write(t3,
                'list3.Comb1.tbl',
                format='fixed_width',
                delimiter='',
                formats=fmt)
예제 #53
0
def td1_estimate(td1_hdu):
    alpha = td1_hdu[1].data['ra']
    delta = td1_hdu[1].data['dec']
    nuv_flux = td1_hdu[1].data['flux_2365_a']
    fuv_flux = td1_hdu[1].data['flux_1565_a']

    # NUV
    refined_set = [(al, de, nf) for al, de, nf in zip(alpha, delta, nuv_flux)
                   if (cc.ra.value - 5) <= al <= (cc.ra.value + 5) and
                   (cc.dec.value - 5) <= de <= (cc.dec.value + 5)]

    nalpha, ndelta, nuv_flux = zip(*refined_set)

    confined_set = [
        nf for al, de, nf in zip(nalpha, ndelta, nuv_flux)
        if cel_separation(al, de) <= field_radius[instrument] * u.arcsec
    ]

    # If list is empty, normal value need to be taken.
    if len(confined_set) == 0:
        confined_set.append(flux_norm)

    nd = sorted(confined_set)[-1]
    flux, ta, tb, tc, td, te = td1_countnuv(nd)
    nuv_res = Table([[flux], [ta], [tb], [tc], [td], [te]],
                    names=('flux_2365_a', 'silica', 'b4', 'b13', 'b15', 'n2'),
                    meta={'name': 'NUV counts'})

    #nuv_res['flux_2365_a'].format = '
    nuv_res['silica'].format = '4.1f'
    nuv_res['b4'].format = '4.1f'
    nuv_res['b13'].format = '4.1f'
    nuv_res['b15'].format = '4.1f'
    nuv_res['n2'].format = '4.1f'

    print('\n\n### NUV\n\n{}\n'.format(nuv_res))

    # To select NUV safe filters.
    nuv_filter_dict = {
        0: 'Silica',
        1: 'NUV-B4',
        2: 'NUV-B13',
        3: 'NUV-B15',
        4: 'NUV-N2'
    }
    i = 0
    nuv_safe = []
    for Filter in zip(*nuv_res['silica', 'b4', 'b13', 'b15', 'n2']):
        if sum(np.array(Filter) > 1500) == 0:
            nuv_safe.append(nuv_filter_dict[i])
        if i == 0:
            if sum(np.array(Filter) > 1133) == 0:
                nuv_safe.append('NUV-grating')
        i = i + 1

    nuv_declaration = 'Safe filters in NUV: {}'.format(nuv_safe)
    print('\n\n{}\n'.format(nuv_declaration))

    # To write to file.
    nuv_table = 'NUV_td1-nd-int.txt'
    ascii.write(nuv_res, nuv_table, format='csv', overwrite=True)

    with open('safe_NUV_filters.txt', 'w') as safe_file:
        safe_file.write(nuv_declaration)

    # FUV
    refined_set = [(al, de, ff) for al, de, ff in zip(alpha, delta, fuv_flux)
                   if (cc.ra.value - 5) <= al <= (cc.ra.value + 5) and
                   (cc.dec.value - 5) <= de <= (cc.dec.value + 5)]

    nalpha, ndelta, fuv_flux = zip(*refined_set)

    confined_set = [
        ff for al, de, ff in zip(nalpha, ndelta, fuv_flux)
        if cel_separation(al, de) <= field_radius[instrument] * u.arcsec
    ]

    # If list is empty, normal value need to be taken.
    if len(confined_set) == 0:
        confined_set.append(flux_norm)

    fd = sorted(confined_set)[-1]
    flux, ta, tb, tc, td = td1_countfuv(fd)
    fuv_res = Table([[flux], [ta], [tb], [tc], [td]],
                    names=('flux_1565_a', 'caf2', 'baf2', 'sapphire',
                           'silica'),
                    meta={'name': 'NUV counts'})

    #fuv_res['flux_1565_a'].format = '
    fuv_res['caf2'].format = '4.1f'
    fuv_res['baf2'].format = '4.1f'
    fuv_res['sapphire'].format = '4.1f'
    fuv_res['silica'].format = '4.1f'

    print('\n### FUV \n\n{}\n\n'.format(fuv_res))

    # To select FUV safe filters.
    fuv_filter_dict = {0: 'CaF2', 1: 'BaF2', 2: 'Sapphire', 3: 'Silica'}
    j = 0
    fuv_safe = []
    for Filter in zip(*fuv_res['caf2', 'baf2', 'sapphire', 'silica']):
        if sum(np.array(Filter) > 1500) == 0:
            fuv_safe.append(fuv_filter_dict[j])
        if j == 0:
            if sum(np.array(Filter) > 892) == 0:
                fuv_safe.append('FUV-grating')
        j = j + 1

    fuv_declaration = 'Safe filters in FUV: {}'.format(fuv_safe)
    print('\n\n{}\n'.format(fuv_declaration))

    # To write to file.
    fuv_table = 'FUV_td1-fd-int.txt'
    ascii.write(fuv_res, fuv_table, format='csv', overwrite=True)

    with open('safe_FUV_filters.txt', 'w') as safe_file:
        safe_file.write(fuv_declaration)
예제 #54
0
파일: beta2d.py 프로젝트: estevesjh/ciao
ext.ypos = ycen
ext.alpha = 0.8
bgnd.c0 = 0.04
freeze(ext.ellip, ext.theta, ext.xpos, ext.ypos)
# freeze(ext.xpos,ext.ypos)

show_model()
set_stat(
    "cstat"
)  #CStat - A maximum likelihood function (XSPEC implementation of Cash); the background must be fitted
set_method("neldermead")
fit()
covariance()

image_data()
image_resid(tile=True, newframe=True)
image_source_component(ext, tile=True, newframe=True)

save_model("model.fits")
save_resid("resid.fits")

c = get_covariance_results()
# out = numpy.array([c.parvals,c.parmins, c.parmaxes])
# out[:,1] = out[:,1]*pixscale

at.write(numpy.array([c.parvals, c.parmins, c.parmaxes]),
         "beta2d.txt",
         names=c.parnames)

exit
예제 #55
0
i = 0
nuv_safe = []
for Filter in zip(*nuv_res['silica', 'b4', 'b13', 'b15', 'n2']):
    if sum(np.array(Filter) > 1500) == 0:
        nuv_safe.append(nuv_filter_dict[i])
    if i == 0:
        if sum(np.array(Filter) > 1133) == 0:
            nuv_safe.append('NUV-grating')
    i = i + 1

nuv_declaration = 'Safe filters in NUV: {}'.format(nuv_safe)
print('\n\n{}\n'.format(nuv_declaration))

# To write to file.
nuv_table = 'NUV_' + catalogue.replace('.fits.gz', '-nd-int.txt')
ascii.write(nuv_res, nuv_table, format='csv', overwrite=True)

with open('safe_NUV_filters.txt', 'w') as safe_file:
    safe_file.write(nuv_declaration)

# To select FUV safe filters.
print('\n### FUV \n\n{}\n\n'.format(fuv_res))
fuv_filter_dict = {0: 'CaF2', 1: 'BaF2', 2: 'Sapphire', 3: 'Silica'}
j = 0
fuv_safe = []
for Filter in zip(*fuv_res['caf2', 'baf2', 'sapphire', 'silica']):
    if sum(np.array(Filter) > 1500) == 0:
        fuv_safe.append(fuv_filter_dict[j])
    if j == 0:
        if sum(np.array(Filter) > 892) == 0:
            fuv_safe.append('FUV-grating')
예제 #56
0
def stpars(n_ms, n_rg, feh, afe, age, logg_cn=3, fig=False, iso='DARTMOUTH'):

    #---------------------------------
    # SET OUTPUT FILENAME
    #---------------------------------
    fileout = set_stpars_filename(n_ms, n_rg, feh, afe, age, logg_cn)

    #---------------------------------
    # ISOCHRONE [FE/H]-INTERPOLATION
    #---------------------------------
    isofile = gettracks(feh, afe, age, iso=iso)

    if iso.upper() == 'DARTMOUTH':
        print('Utilizing the DARTMOUTH isochrones')
    if iso.upper() == 'PADOVA':
        print('Utilizing the Padova isochrones')

    #---------------------------------
    # READ ISOCHRONE
    #---------------------------------
    t = np.loadtxt(isofile)
    F0 = 1.021e-20  #Vega flux in erg cm-2 s-1 Hz-1
    pc = 3.086e18  # parsec in cm
    if iso.upper() == 'DARTMOUTH':
        isoteff = 10**t[:, 2]
        isologg = t[:, 3]
        isomass = t[:, 1]
        isologL = t[:, 4]
    if iso.upper() == 'PADOVA':
        isoteff = 10**t[:, 6]
        isologg = t[:, 7]
        isomass = t[:, 2]
        isologL = t[:, 5]
        isoHFlux = 10**(-t[:, 30] / 2.5) * F0
        isologLH = np.log10(isoHFlux * 4 * np.pi * (10 * pc)**2)

    #---------------------------------
    # GET STELLAR PARAMETERS
    #---------------------------------
    logg_lim = max(isologg[isoteff == max(isoteff)])
    min_teff_ms = min(isoteff[isologg >= logg_lim])
    min_teff_rg = min(isoteff[isologg < logg_lim])

    delta_teff_ms = (max(isoteff) - min_teff_ms) / n_ms
    delta_teff_rg = (max(isoteff) - min_teff_rg) / n_rg

    teff_grid = np.linspace(1, n_ms + n_rg + 1, n_ms + n_rg + 1)
    phase = list(itertools.repeat("ms", n_ms + n_rg + 1))

    for i in range(n_ms):
        teff_grid[i] = min_teff_ms + i * delta_teff_ms

    teff_grid[n_ms] = max(isoteff)
    j = 0
    for i in range(n_ms + n_rg, n_ms, -1):
        teff_grid[i] = min_teff_rg + j * delta_teff_rg
        phase[i] = "rgb"
        j += 1

    index_lim = np.where(teff_grid == max(isoteff))[0][0]
    logg_grid = np.zeros(len(teff_grid))
    mass_grid = np.zeros(len(teff_grid))
    lumi_grid = np.zeros(len(teff_grid))

    for i in range(len(teff_grid)):

        if i <= index_lim:
            xxxx = isologg >= logg_lim
        else:
            xxxx = isologg < logg_lim

        temp = abs(isoteff[xxxx] - teff_grid[i])
        teff_grid_temp = isoteff[xxxx]
        condition = np.where(temp == min(temp))[0]
        teff_grid[i] = teff_grid_temp[condition][0]
        logg_grid_temp = isologg[xxxx]
        logg_grid[i] = logg_grid_temp[condition][0]
        mass_grid_temp = isomass[xxxx]
        mass_grid[i] = mass_grid_temp[condition][0]
        lumi_grid_temp = isologLH[xxxx]
        lumi_grid[i] = lumi_grid_temp[condition][0]
        #
        #        logg_grid[i] = isologg[(xxxx) and np.where(temp == min(temp))][0]
        #        mass_grid[i] = isomass[(xxxx) and np.where(temp == min(temp))][0]
        #        lumi_grid[i] = isologL[(xxxx) and np.where(temp == min(temp))][0]

        if logg_grid[i] <= logg_cn:
            phase[i] = "rgb_cn"

    isoteffgrid = teff_grid
    isologggrid = logg_grid
    isomassgrid = mass_grid
    isologLgrid = lumi_grid

    #---------------------------------
    # WRITE OUTPUT FILE
    #---------------------------------
    exists = os.path.isfile('./Stellar_pars')

    if exists is False:
        os.system("mkdir Stellar_pars")

    ascii.write([isoteffgrid, isologggrid, isomassgrid, isologLgrid, phase],
                fileout,
                names=['#Teff/k', 'logg', 'Mass/Msun', 'logL/Lsun', 'phase'],
                overwrite=True)

    #---------------------------------
    # PLOT ISOCHRONE
    #---------------------------------
    #    plt.figure()
    #    plt.plot()
    #    lines(iso.teff, iso.logg, lwd = 2, col = 'red')
    #    points(iso.teff.grid, iso.logg.grid, col = 'red', pch = 19, cex = 1.4)
    isoplot = plt.plot(isoteff, isologg)
    isogridplot = plt.plot(isoteffgrid, isologggrid, 'o')
    plt.xlabel('Temperature (K)')
    plt.ylabel('log g (dex)')
    plt.xlim([2000, 8000])
    plt.ylim([-1, 6])
    plt.gca().invert_xaxis()
    plt.gca().invert_yaxis()
    plt.legend(
        (('Isochrone [Fe/H] = ' + str(feh) + ', [a/Fe] = ' + str(afe) +
          ', Age = ' + str(age) + ' Gyr'), ('Selected Stellar Parameters')),
        loc='upper left',
        fontsize='small')
    #    plt.text(4750,0.5,('Isochrone [Fe/H] = ' + str(feh) + '\n[a/Fe] = '+ str(afe) \
    #    + '\nAge = ' + str(age) + ' Gyr'))
    plt.show()

    #---------------------------------
    # PRINT SOME INFORMATION
    #---------------------------------

    print('Isochrone: [Fe/H] = ' + str(feh))
    print('           [a/Fe] = ' + str(afe))
    print('           Age = ' + str(age) + 'Gyr')
    print('STELLAR PARAMETERS OF: ' + str(n_ms + n_rg + 1) + 'STARS IN THE')
    print('OUTPUT FILE: ' + fileout)
예제 #57
0
lst_id = []
lst_field = []
lst_ra = []
lst_dec = []
lst_z = []
lst_lmass = []
lst_uv = []
lst_vj = []

#getting the necessary data for lists, as well as calculating uv and vj#
for i in range(len(data_color)):
    gal = data_color[i]
    gal_info = data_info[i]
    uv = -2.5 * np.log10(gal['L153'] / gal['L155'])
    vj = -2.5 * np.log10(gal['L155'] / gal['L161'])

    lst_id.append(gal_info['id'])
    lst_field.append(gal_info['field'])
    lst_ra.append(gal_info['ra'])
    lst_dec.append(gal_info['dec'])
    lst_z.append(gal_info['z_peak'])
    lst_lmass.append(gal_info['lmass'])
    lst_uv.append(uv)
    lst_vj.append(vj)

#writing table#
table = Table(
    [lst_id, lst_field, lst_ra, lst_dec, lst_z, lst_lmass, lst_uv, lst_vj],
    names=['id', 'field', 'ra', 'dec', 'z', 'lmass', 'uv', 'vj'])
ascii.write(table, 'color_values.dat')
예제 #58
0
ew4 = []
ew5 = []
for i in ew_list:
    ew1.append(np.mean(np.array(i[0])))
    ew2.append(np.mean(np.array(i[1])))
    ew3.append(np.mean(np.array(i[2])))
    ew4.append(np.mean(np.array(i[3])))
    ew5.append(np.mean(np.array(i[4])))

cont1 = []
cont2 = []
cont3 = []
cont4 = []
cont5 = []
for i in cont_lst:
    cont1.append(np.mean(np.array(i[0])))
    cont2.append(np.mean(np.array(i[1])))
    cont3.append(np.mean(np.array(i[2])))
    cont4.append(np.mean(np.array(i[3])))
    cont5.append(np.mean(np.array(i[4])))

exp_data_ew = Table(
    [date_list, ew1, ew2, ew3, ew4, ew5],
    names=['date', 'HeI4 ew', 'CaII ew', 'FeII3 ew', 'OI2 ew', 'CI ew'])
ascii.write(exp_data_ew, sne_name + '.4.ew.txt', overwrite=True)

exp_data_cont = Table(
    [date_list, cont1, cont2, cont3, cont4, cont5],
    names=['date', 'HeI4 cont', 'CaII cont', 'FeII3 cont', 'OI2 cont', 'CI'])
ascii.write(exp_data_cont, sne_name + '.4.cont.txt', overwrite=True)
예제 #59
0
def main():

    # get simulation information
    if len(sys.argv) > 1:
        sim_name = sys.argv[1]
        snapnum = int(sys.argv[2])
        shape_type = sys.argv[3]
        sample_name = sys.argv[4]
    else:
        sim_name = 'TNG300-1'  # full physics high-res run
        snapnum = 99  # z=0
        shape_type = 'reduced'  # non-reduced, reduced, iterative
        sample_name = 'sample_3'

    # load a test halo catalog
    from halotools.sim_manager import CachedHaloCatalog
    halocat = CachedHaloCatalog(simname='bolplanck',
                                halo_finder='rockstar',
                                redshift=0.0,
                                dz_tol=0.1,
                                version_name='halotools_v0p4')

    from halotools.empirical_models import HodModelFactory

    # define the central occupatoion model
    from halotools.empirical_models import TrivialPhaseSpace, Zheng07Cens
    cens_occ_model = Zheng07Cens()
    cens_prof_model = TrivialPhaseSpace()

    # define the satellite occupation model
    from halotools.empirical_models import Zheng07Sats
    from halotools.empirical_models import NFWPhaseSpace, SubhaloPhaseSpace
    from intrinsic_alignments.ia_models.anisotropic_nfw_phase_space import AnisotropicNFWPhaseSpace
    sats_occ_model = Zheng07Sats()
    #sats_prof_model = AnisotropicNFWPhaseSpace()
    sats_prof_model = SubhaloPhaseSpace('satellites',
                                        np.logspace(10.5, 15.2, 15))

    # define the alignment models
    from intrinsic_alignments.ia_models.ia_model_components import CentralAlignment,\
        RadialSatelliteAlignment,  MajorAxisSatelliteAlignment, HybridSatelliteAlignment
    central_orientation_model = CentralAlignment()
    satellite_orientation_model = RadialSatelliteAlignment()

    if sample_name == 'sample_1':
        cens_occ_model.param_dict['logMmin'] = 12.54
        cens_occ_model.param_dict['sigma_logM'] = 0.26

        sats_occ_model.param_dict['alpha'] = 1.0
        sats_occ_model.param_dict['logM0'] = 12.68
        sats_occ_model.param_dict['logM1'] = 13.48

        central_orientation_model.param_dict[
            'central_alignment_strength'] = 0.755
        satellite_orientation_model.param_dict[
            'satellite_alignment_strength'] = 0.279
    elif sample_name == 'sample_2':
        cens_occ_model.param_dict['logMmin'] = 11.93
        cens_occ_model.param_dict['sigma_logM'] = 0.26

        sats_occ_model.param_dict['alpha'] = 1.0
        sats_occ_model.param_dict['logM0'] = 12.05
        sats_occ_model.param_dict['logM1'] = 12.85

        central_orientation_model.param_dict[
            'central_alignment_strength'] = 0.64
        satellite_orientation_model.param_dict[
            'satellite_alignment_strength'] = 0.084
    elif sample_name == 'sample_3':
        cens_occ_model.param_dict['logMmin'] = 11.61
        cens_occ_model.param_dict['sigma_logM'] = 0.26

        sats_occ_model.param_dict['alpha'] = 1.0
        sats_occ_model.param_dict['logM0'] = 11.8
        sats_occ_model.param_dict['logM1'] = 12.6

        central_orientation_model.param_dict[
            'central_alignment_strength'] = 0.57172919
        satellite_orientation_model.param_dict[
            'satellite_alignment_strength'] = 0.01995

    # combine model components
    model_instance = HodModelFactory(
        centrals_occupation=cens_occ_model,
        centrals_profile=cens_prof_model,
        satellites_occupation=sats_occ_model,
        satellites_profile=sats_prof_model,
        centrals_orientation=central_orientation_model,
        satellites_orientation=satellite_orientation_model,
        model_feature_calling_sequence=('centrals_occupation',
                                        'centrals_profile',
                                        'satellites_occupation',
                                        'satellites_profile',
                                        'centrals_orientation',
                                        'satellites_orientation'))

    from intrinsic_alignments.utils.jackknife_observables import jackknife_ed_3d
    from halotools.mock_observables.alignments import ed_3d

    rbins = np.logspace(-1, 1.5, 15)
    rbin_centers = (rbins[:-1] + rbins[1:]) / 2.0

    N = 10
    ed = np.zeros((N, len(rbins) - 1))
    for i in range(0, N):

        # populate mock catalog
        model_instance.populate_mock(halocat)
        print("number of galaxies: ", len(model_instance.mock.galaxy_table))

        mock = model_instance.mock.galaxy_table

        # galaxy coordinates and orientations
        coords = np.vstack((mock['x'], mock['y'], mock['z'])).T

        orientations = np.vstack(
            (mock['galaxy_axisA_x'], mock['galaxy_axisA_y'],
             mock['galaxy_axisA_z'])).T

        rbins = np.logspace(-1, 1.5, 15)
        rbin_centers = (rbins[:-1] + rbins[1:]) / 2.0

        ed[i, :] = ed_3d(coords,
                         orientations,
                         coords,
                         rbins,
                         period=halocat.Lbox)

    err = np.std(ed, axis=0)
    ed = np.mean(ed, axis=0)

    # save measurements
    fpath = fpath = PROJECT_DIRECTORY + 'modelling_illustris/data/'
    fname = sim_name + '_' + str(snapnum) + '-' + sample_name + '_model_ed.dat'
    ascii.write([rbin_centers, ed, err],
                fpath + fname,
                names=['r', 'ed', 'err'],
                overwrite=True)
예제 #60
0
N = 1000
np.random.seed(123)

cos = np.array([np.random.normal(.0, .3) for i in range(10000)])
cos0 = [c for c in cos if c >= 0. and c <= 1.]
cos = np.array([np.random.normal(1., .3) for i in range(10000)])
cos1 = [c for c in cos if c >= 0. and c <= 1.]

plt.hist(cos0, color='b', alpha=.8)
plt.hist(cos1, color='g', alpha=.8)
plt.show()

cos, y, ecdf, yfit, d_yfit, a2 = fits(cos0)
print('a2=', a2)
ascii.write(Table(np.column_stack([cos, ecdf(cos), y, yfit, d_yfit])),
            '../data/fits_ones',
            names=['cos', 'ecdf', 'y', 'yfit', 'd_yfit'],
            overwrite=True)
plt.step(cos, y, 'b-')
plt.plot(cos, yfit, 'k--')

cos, y, ecdf, yfit, d_yfit, a2 = fits(cos1)
print('a2=', a2)
ascii.write(Table(np.column_stack([cos, ecdf(cos), y, yfit, d_yfit])),
            '../data/fits_zeros',
            names=['cos', 'ecdf', 'y', 'yfit', 'd_yfit'],
            overwrite=True)
plt.step(cos, y, 'g-')
plt.plot(cos, yfit, 'k--')
plt.show()
# %%