Пример #1
0
def read_valentini():
    data1= asciitable.read(os.path.join(datadir,'arcs_a.dat'),
                           readme="../data/ReadMe",
                           Reader=asciitable.cds.Cds,
                           guess=False,
                    fill_values=[('', '-999')])
    data2= asciitable.read(os.path.join(datadir,'arcs_b.dat'),
                           readme="../data/ReadMe",
                           Reader=asciitable.cds.Cds,
                           guess=False,
                    fill_values=[('', '-999')])
    #Remove duplicates
    indxarray1= numpy.zeros(len(data1),dtype='bool')+True
    hipnums= []
    for ii in range(len(data1)):
        if data1['HIP'][ii] in hipnums:
            indxarray1[ii]= False
            continue
        hipnums.append(data1['HIP'][ii])
    data1= data1[indxarray1]
    indxarray2= numpy.zeros(len(data2),dtype='bool')+True
    hipnums= []
    for ii in range(len(data2)):
        if data2['HIP'][ii] in hipnums:
            indxarray2[ii]= False
            continue
        hipnums.append(data2['HIP'][ii])
    data2= data2[indxarray2]
    return data2
Пример #2
0
def add_keys(file_list, keys=''):
    """Add keywords to a list of FITS files.

    `file_list` should have one file per line.

    `keylist` can be in any format easily readable by
    asciitable. There needs to be a header line followed by, on each
    line, a FITS keyword and its value.

    All keywords in the `keylist` file are added to all of the files
    in `file_list`.

    A sample `keylist` file is:
        Keyword   Value
        OBJCTDEC '+49 49 14'
        OBJCTRA '09 02 21'

    """
    files =at.read(file_list)
    key_table = at.read(keys)
    for fil in files:
        fil_fits = pyfits.open(fil[0],mode='update')
        hdr = fil_fits[0].header
        for key, val in key_table:
            print key, val
            hdr.update(key, val)
        fil_fits.close()
Пример #3
0
def fetch_12():

    base_path = '/vega/astro/users/sd2706/'
    if os.path.exists(base_path)==True:
        my_bds = BDList('/vega/astro/users/sd2706/Ldwarf_sample2014.csv')
        # Use Burgasser+08's spectrum from the SpeX Prism Library
        oldfile = at.read('/vega/astro/users/sd2706/2MASSWJ0208+25_spex.dat')
    else:
        my_bds = BDList('/home/stephanie/ldwarfs/Ldwarf_sample2014.csv')
        # Use Burgasser+08's spectrum from the SpeX Prism Library
        oldfile = at.read('/home/stephanie/ldwarfs/summerAMNH/LdwarfSpectra/2MASSWJ0208+25_spex.dat')


    old_spectrum = {'wavelength':oldfile['col1']*u.um,
        'flux':oldfile['col2']*u.dimensionless_unscaled,
        'unc':oldfile['col3']*u.dimensionless_unscaled}
    my_bds.brown_dwarfs['U13079'].specs['low'] = old_spectrum

    # select appropriate spectrum for GD 165 B //Should no longer be needed
    #source_id = my_bds.brown_dwarfs['U40039'].sid
    #my_bds.brown_dwarfs['U40039'].specs['low'] = spectrum_query(source_id,
    #     '','',filename='spex_prism_G196-3B_U40039.fits')


    return my_bds
Пример #4
0
def test_first_sosa_update():
    # for a plain update, sosa or not, nothing should be deleted
    # and new entries should be inserted
    db_loads = asciitable.read('t/pre_sosa_db_loads.txt')
    want_load_rdb = asciitable.read('t/first_sosa.rdb')
    want_loads = update_load_seg_db.rdb_to_db_schema(want_load_rdb)
    to_delete, to_insert = update_load_seg_db.find_load_seg_changes(want_loads, db_loads, exclude=['id']) 
    assert len(to_delete) == 0
    assert len(to_insert) == len(want_loads[want_loads['datestart'] >= '2011:335:13:44:41.368'])
    assert to_insert[0]['datestart'] == '2011:335:13:44:41.368'
Пример #5
0
def test_load_update_truncate():
    # if an old entry is now truncated, it and all after should be replaced
    db_loads = asciitable.read('t/pre_sosa_db_loads.txt')
    want_load_rdb = asciitable.read('t/first_sosa.rdb')
    want_loads = update_load_seg_db.rdb_to_db_schema(want_load_rdb)
    want_loads[10]['datestop'] = '2011:332:00:00:00.000'
    to_delete, to_insert = update_load_seg_db.find_load_seg_changes(want_loads, db_loads, exclude=['id']) 
    assert len(to_delete) == len(db_loads[db_loads['datestart'] >= want_loads[10]['datestart']])
    assert len(to_insert) == len(want_loads[want_loads['datestart'] >= want_loads[10]['datestart']])
    assert to_delete[0]['datestart'] == want_loads[10]['datestart']
    assert to_insert[0]['datestart'] == want_loads[10]['datestart']
Пример #6
0
def calc_final_eqws(file1,file2):
    e1 = at.read(file1)
    e2 = at.read(file2)

    eqws = (e1["eqw"] + e2["eqw"]) / 2.0

    diff = e1["eqw"] - e2["eqw"]

    u_eqws = np.sqrt(e2["u_eqw"]**2 + diff**2)

    return eqws,u_eqws
Пример #7
0
def test_load_update_sosa_truncate():
    # if an old entry is now truncated, it and all after should be replaced
    # but the first entry of a sosa pair should not be removed if it is before the difference
    db_loads = asciitable.read('t/post_sosa_db_loads.txt')
    want_load_rdb = asciitable.read('t/second_sosa.rdb')
    want_loads = update_load_seg_db.rdb_to_db_schema(want_load_rdb)
    want_loads[12]['datestop'] = '2011:338:00:00:00.000'
    to_delete, to_insert = update_load_seg_db.find_load_seg_changes(want_loads, db_loads, exclude=['id']) 
    assert len(to_delete) == 11
    assert len(to_insert) == 11
    assert to_delete[0]['datestart'] == want_loads[12]['datestart']
    assert to_insert[0]['datestart'] == want_loads[12]['datestart']
Пример #8
0
def test_load_update_weird():
    # for a difference in the past on any column, the entry
    # should be replaced
    db_loads = asciitable.read('t/pre_sosa_db_loads.txt')
    db_loads[5]['load_segment'] = 'CL324:0120'
    want_load_rdb = asciitable.read('t/first_sosa.rdb')
    want_loads = update_load_seg_db.rdb_to_db_schema(want_load_rdb)
    to_delete, to_insert = update_load_seg_db.find_load_seg_changes(want_loads, db_loads, exclude=['id']) 
    assert len(to_delete) == len(db_loads[db_loads['datestart'] >= '2011:324:01:05:40.930'])
    assert len(to_insert) == len(want_loads[want_loads['datestart'] >= '2011:324:01:05:40.930'])
    assert to_delete[0]['datestart'] == '2011:324:01:05:40.930'
    assert to_insert[0]['datestart'] == '2011:324:01:05:40.930'
Пример #9
0
 def read_depthofcoverage(self, prefix, indir="."):
     """read depthofcoverage tables"""
     ## Read sample summary - skip last line since lacks columns
     infile = os.path.abspath(os.path.join(indir, prefix + "." + self.ext["sample_summary"]))
     with open(infile) as fp:
         self.data["sample_summary"] = asciitable.read(infile, delimiter="\t", guess=False, data_end=-1)
         
     ## Read rest of tables
     for k in self.ext_keys[0:(len(self.ext_keys) - 1)]:
         infile = os.path.abspath(os.path.join(indir, prefix + "." + self.ext[k]))
         tab = asciitable.read(self._sniff_table(infile), delimiter="\t", guess=False)
         self.data[k] = tab
Пример #10
0
def convertCosmos(inName, outName):
    inFile = open(inName, "r")
    table = asciitable.read(inFile, Reader=asciitable.FixedWidthTwoLine, delimiter='|', header_start=0,
                            data_start=4, data_end=-1)

    schema = pyfits.ColDefs([column for column in MAPPING.values()])
    outHdu = pyfits.new_table(schema, nrows=len(table))
    outData = outHdu.data

    for name, column in MAPPING.items():
        outData.field(column.name)[:] = table.field(name)

    for f in FILTERS:
        mag = outData.field(f)
        err = outData.field(f + "_err")
        indices = numpy.where(numpy.logical_or(mag < 0, mag > 50))
        mag[indices] = numpy.NAN
        err[indices] = numpy.NAN

    outHdu.writeto(outName, clobber=True)
    print "Wrote %s" % outName
    print "To create an astrometry.net catalogue, execute:"
    outBase = outName.replace(".fits", "")
    print "build-index -i %s -o %s_and_0.fits -I 77770 -P0 -n 100 -S r -L 20 -E -M -j 0.4" % (inName, outBase)
    for i in range(1, 5):
        print "build-index -1 %s_and_0.fits -o %s_and_%d.fits -I 7777%d -P%d -n 100 -S r -L 10 -E -M -j 0.4 &" % (outBase, outBase, i, i, i)
def read_table_rg(filename):
    # astropy.io.ascii.convert_numpy
    converters = { 'col1': [asciitable.convert_list(str)],
    'col2': [asciitable.convert_list(float)],
    'col3': [asciitable.convert_list(float)],
    'col4': [asciitable.convert_list(float)],
    'col5': [asciitable.convert_list(float)],
    'col6': [asciitable.convert_list(str)],
    'col7': [asciitable.convert_list(str)],
    'col8': [asciitable.convert_list(str)],
    'col9': [asciitable.convert_list(str)],
    'col10': [asciitable.convert_list(float)]}


    names = ('POINT_NAME', 'RA_PNT',  'DEC_PNT', 'BACK','NH', 'MOS1FILTER', 'MOS2FILTER', 'PNFILTER', 'EXPOTIME')

    data = asciitable.read(filename, delimiter='|', converters=converters)
    npoint = data.size

    name = data['col1'].tolist()

    back = data['col4']
    nh   = data['col5']

    m1filt = data['col6'].tolist()
    m2filt = data['col7'].tolist()
    pnfilt = data['col8'].tolist()

    timestamp = data['col9']
    area = data['col10']

    return (name, back, nh, m1filt, m2filt, pnfilt, timestamp, area, npoint)
Пример #12
0
def readColHeight():
    minhgt= 2.5                         # Assumed minimum detection limit
    fname = 'column_heights.txt'        # File to read

    table = asciitable.read(fname)      # Read file
    date0 = table.col1                  # extract start date
    time0 = table.col2                  # extract start time
    date1 = table.col3                  # extract end date
    time1 = table.col4                  # extract end time
    hgt   = table.col5                  # extract height
    mass  = table.col6                  # extract mass release
    name  = table.col7                  # extract name/comment
    #TODO check if column 6 is missing, use power law to calc
    # mass if that is the case.
    for i in range(hgt.shape[0]):       # For each column height
    #for h in hgt:
        h=hgt[i]
        if h >=999:                     # Assume error
            msg="column height greater than 999 km found, assuming error code"
            warnings.warn(msg)
            print "Row: %i,     Height: %f",i,h
            raise Exception("NOT SUPPORTED YET, ABORTING")
    # Bundle date and time in same string
    datestr0 = [date0[i]+time0[i] for i in range(date0.shape[0])]
    datestr1 = [date1[i]+time1[i] for i in range(date1.shape[0])]
    # Convert string date/time to python date format
    date0_py  = [datetime.strptime(t,"%Y-%m-%d%H:%M:%S") for t in datestr0]
    date1_py  = [datetime.strptime(t,"%Y-%m-%d%H:%M:%S") for t in datestr1]
    # Make time relative to first start date:
    dateBase = date0_py[0]
    date0_py = [d-dateBase for d in date0_py]
    date1_py = [d-dateBase for d in date1_py]
    return date0_py,date1_py,hgt,mass,name
Пример #13
0
 def print_table(self, data, title=None):
     print("")
     table = asciitable.read(data)
     if title:
         table.title = title
     print(table.table)
     print("")
Пример #14
0
def get_ifot(event_type, start=None, stop=None, props=[], columns=[], timeout=TIMEOUT, types={}):
    start = DateTime('1998:001' if start is None else start)
    stop = DateTime(stop)
    event_props = '.'.join([event_type] + props)

    params = odict(r='home',
                   t='qserver',
                   format='tsv',
                   tstart=start.date,
                   tstop=stop.date,
                   e=event_props,
                   ul='7',
                   )
    if columns:
        params['columns'] = ','.join(columns)

    # Get the TSV data for the iFOT event table
    url = ROOTURL + URLS['ifot']
    response = requests.get(url, auth=get_auth(), params=params, timeout=timeout)

    text = response.text.encode('ascii', 'ignore')
    text = re.sub(r'\r\n', ' ', text)
    lines = [x for x in text.split('\t\n') if x.strip()]

    converters = {key: [asciitable.convert_numpy(getattr(np, type_))]
                  for key, type_ in types.items()}
    dat = asciitable.read(lines, Reader=asciitable.Tab, guess=False, converters=converters)
    return dat
Пример #15
0
def texes2fits(file):

    detector_shape = (256,256)
    data = np.fromfile(file,dtype='>i4')
    header_raw = at.read(file+'.hd',delimiter='=')
        
    header_dict = {}
    for pair in header_raw:
        key = pair[0]
        try:
            value=float(pair[1])
        except:
            value=pair[1]
            
        header_dict[key] = value    
    
    nnods = header_dict['nnod']
    try:
        cube = np.reshape(-data,(nnods*2,detector_shape[0],detector_shape[1]))
    except ValueError:
        print "Failed reading ", file
        return

    hdu = pf.PrimaryHDU(cube)
    hdu.header.extend(header_dict.items())
#    for key in header_dict.keys():
#        hdu.header[key] = header_dict[key]
    
    hdu.writeto(file+'.fits',clobber=True)
    
    print "successfully read ", file
Пример #16
0
def readRave():
    data= asciitable.read(_DATAFILE,
                          readme=_DATAREADME,
                          Reader=asciitable.cds.Cds,
                          guess=False,
                          fill_values=[('', '-999')])
    return data
Пример #17
0
    def vivado_resources(self, report_file):
        with open(report_file, 'r') as fp:
            report_data = fp.read()
            report_data = report_data.split('\n\n')
            report = dict()
            section = None
            for d in report_data:
                match = re.search(r'\n-+$', d)
                if match is not None:
                    match = re.search(r'\n?[0-9\.]+ (.*)', d)
                    if match is not None:
                        section = match.groups()[0]
                if d.startswith('+--'):
                    if section is not None:
                        # cleanup the table
                        d = re.sub(r'\+-.*-\+\n', '', d)
                        d = re.sub(r'\+-.*-\+$', '', d)
                        d = re.sub(r'^\|\s+', '', d, flags=re.M)
                        d = re.sub(r'\s\|\n', '\n', d)

                        report[section.lower()] = asciitable.read(
                            d,
                            delimiter='|',
                            guess=False,
                            comment=r'(\+.*)|(\*.*)',
                            numpy=False)

        return report
Пример #18
0
def check_pop(sample_list, user_choices):
    """
    Check the number of objects from each time in a given sample.

    input: sample_list, str
           list of objects raw data files.

           user_choices, dict
           output from read_user_input

    output: pop, dict
            keys are types and values are the number of objects in the sample
    """
    # read raw data files names 
    flist = asciitable.read(sample_list)

    # count types
    pop = {}
    for name in flist:
        user_choices['path_to_lc'] = [name[0]]
        raw = read_snana_lc(user_choices)
        if raw['SIM_NON1a:'][0] not in pop.keys():
            pop[raw['SIM_NON1a:'][0]] = 1
        else:
            pop[raw['SIM_NON1a:'][0]] = pop[raw['SIM_NON1a:'][0]] + 1

    return pop
Пример #19
0
def main(args=None):
    args = get_args()
    tank_limit = 93.0
    files = glob('*-*-{}.dat'.format(args.tank_start))
    print files

    dats = []
    for fn in files:
        dat = asciitable.read(fn,
                              guess=False,
                              Reader=asciitable.NoHeader,
                              names=['p0', 'p1', 'p2', 'p3', 't_max', 't_end'])
        dats.append(dat)

    colds = np.hstack([dat['p3'] for dat in dats])
    hots = np.hstack([dat['p0'] + dat['p2'] for dat in dats])
    t_maxes = np.hstack([dat['t_max'] for dat in dats])
    t_ends = np.hstack([dat['t_end'] for dat in dats])

    ok_colds = []
    ok_hots = []
    n_bin = 20
    bins = np.linspace(0., 1., n_bin + 1)
    t_50 = np.zeros((n_bin, n_bin), dtype=np.float) + 70.0
    t_90 = np.zeros((n_bin, n_bin), dtype=np.float) + 70.0
    frac_bad = np.zeros((n_bin, n_bin), dtype=np.float) - 0.5

    for x0, x1 in zip(bins[:-1], bins[1:]):
        ok_colds.append((colds > x0) & (colds <= x1))
        ok_hots.append((hots > x0) & (hots <= x1))

    for i in range(n_bin):
        for j in range(n_bin):
            ok = ok_colds[i] & ok_hots[j]  # select realizations within bin
            t_ends_ok = t_ends[ok]
            if len(t_ends_ok) > 3:
                t_50[i, j], t_90[i, j] = np.percentile(t_ends_ok, [50, 90])
            t_maxes_ok = t_maxes[ok]
            if len(t_maxes_ok) > 0:
                n_bad = np.sum(t_maxes_ok > tank_limit)
                frac_bad[i, j] = n_bad / len(t_maxes_ok)

    x = (bins[1:] + bins[:-1]) / 2.0
    y = x.copy()
    for perc, t, pref in ((50, t_50, 'Median'), (90, t_90, '90%')):
        title = '{} temp after 7 days, start={} F'.format(
            pref, args.tank_start)
        filename = None if args.no_save else 'tank_perc{}_start{}.png'.format(
            perc, args.tank_start)
        plot_t_img_contour(x, y, t, title, filename)

    title = 'Fraction exceeding 93F limit, start={}'.format(args.tank_start)
    filename = None if args.no_save else 'tank_bad_start{}.png'.format(
        args.tank_start)
    plot_frac_bad(x, y, frac_bad, title, filename)

    title = 'blah start={}'.format(args.tank_start)
    filename = None if args.no_save else 'tank_end_10_start{}.png'.format(
        args.tank_start)
    plot_t_50_at_frac_bad_10(args.tank_start, x, y, t_50, title, filename=None)
Пример #20
0
def save_female_height_np():
    import asciitable
    x = asciitable.read(
        '/Users/pdh21/Documents/CFwork/Patient_link/Female_height_2_20_years.txt',
        guess=False,
        delimiter='\t',
        fill_values=[('', '-999')])
    age = []
    height = []
    for i in x:
        age.append(i[0])
        height_tmp = []
        for j in range(1, 12):
            height_tmp.append(i[j])

        height.append(height_tmp)
    age = np.array(age)
    height = np.array(height)

    age = np.append(age, np.arange(240, 960.0, 1))
    height_extra = np.empty((960.0 - 240.0, height.shape[1]))
    print
    height[-1, :], height.shape, height_extra.shape
    for i in np.arange(0, 960.0 - 240.0, 1):
        print
        i
        height_extra[i, :] = height[-1, :]
    height = np.concatenate((height, height_extra), axis=0)
    import pylab as plt
    for i in range(0, 11):
        plt.plot(age, height[:, i])
    plt.show()
    np.savez('Female_height', age, height)
Пример #21
0
 def survey_file_import(self,filename):
     """
     A simple function to import the survey data (not a subsample)
     """
     datafile = open(filename,"r")
     self.survey_data = atab.read(datafile.read())
     datafile.close()
Пример #22
0
def get_ECFSPR_data():
    import asciitable
    x = asciitable.read(
        '/Users/pdh21/Documents/CFwork/Patient_link/2008_2009/db0809_protected.txt',
        guess=False,
        delimiter='\t',
        fill_values=[('', '-999')])
    # x=asciitable.read('db0809_protected.txt', guess=False,delimiter='\t',fill_values=[('', '-999')])

    yy = map(int, x['birth_yy'])
    year = map(int, x['year'])
    gender = np.array(map(float, x['gender']))
    mm = map(float, x['birth_mm'])
    dd = map(float, x['birth_dd'])
    ID = map(float, x['ID'])
    bmi = map(float, x['bmiECFSPR'])
    hgt = map(float, x['hgt'])
    mut1 = np.array(x['mut1'])
    mut2 = np.array(x['mut2'])
    age_dia = np.array(x['age_dia'])
    ID = np.array(ID)
    BMI = np.array(bmi)
    hgt = np.array(hgt)
    year = np.array(year)
    dob_j = np.empty_like(yy)
    for i in range(0, len(yy)):
        dob_j[i] = ((yy[i] - 1900) * 12) + mm[i]
    # indices in dob_j for each year

    ind_2008, = np.nonzero(np.less(year, 2009))
    ind_2009, = np.nonzero(np.greater(year, 2008))
    return BMI, hgt, ID, ind_2008, ind_2009, dob_j, gender, np.array(
        [mut1, mut2]), age_dia
Пример #23
0
def parse_header(header_lines):
    """Parse VOTS header fields from 'header_lines', which should be an
    iterable that returns lines of the VOTS table.  Returns an dict
    of header fields where all but 'description' are in turn a numpy
    recarray table."""
    header = {}
    keywords = ('DESCRIPTION::', 'COOSYS::', 'PARAM::', 'FIELD::')
    key = 'none'
    for line in header_lines:
        line = line.strip()
        if line in keywords:
            key = line[:-2].lower()
            continue
        if key not in header:
            header[key] = []
        header[key].append(line)

    for key, lines in header.items():
        # Flatten description key, otherwise parse lines as a table
        if key == 'description':
            header[key] = '\n'.join(lines)
        else:
            for quotechar in ['"', "'"]:
                try:
                    header[key] = asciitable.read(lines, quotechar=quotechar)
                    break
                except asciitable.InconsistentTableError, error:
                    pass
            else:
                raise asciitable.InconsistentTableError(error)
Пример #24
0
def texes2fits(file):

    detector_shape = (256, 256)
    data = np.fromfile(file, dtype='>i4')
    header_raw = at.read(file + '.hd', delimiter='=')

    header_dict = {}
    for pair in header_raw:
        key = pair[0]
        try:
            value = float(pair[1])
        except:
            value = pair[1]

        header_dict[key] = value

    nnods = header_dict['nnod']
    try:
        cube = np.reshape(-data,
                          (nnods * 2, detector_shape[0], detector_shape[1]))
    except ValueError:
        print("Failed reading ", file)
        return

    hdu = pf.PrimaryHDU(cube)
    hdu.header.extend(header_dict.items())
    #    for key in header_dict.keys():
    #        hdu.header[key] = header_dict[key]

    hdu.writeto(file + '.fits', clobber=True)

    print("successfully read ", file)
Пример #25
0
def test_from_lines(numpy):
    f = 't/simple.txt'
    table = open(f).readlines()
    testfile = get_testfiles(f)
    data = asciitable.read(table, numpy=numpy, **testfile['opts'])
    assert_equal(data.dtype.names, testfile['cols'])
    assert_equal(len(data), testfile['nrows'])
Пример #26
0
	def import_file(self, filename, camera_name):
		h = extract_headers(filename)
		T = asciitable.read(filename)
		table = convert_ascii(T)
		cam = Camera(camera_name, float(h['zp']), float(h['sky']), float(h['scale']), self.name) #create a camera from headers
		cam.add_data_table(table) #call add_table_data in camera
		self.camera_list.append(cam)
		self.ID = h['ID']
Пример #27
0
def interpUVB(model):
    if model=='HM12':
        data=asciitable.read("FIXME")
    elif model=="OHL16": # this is our corrected model to match observations
        data=asciitable.read("FIXME")
    elif model=="P18":
        data=asciitable.read("data/TREECOOL_P18.txt")
    else:
        print('ERROR, model not defined: %s'%(model))
    lz = data['col1']
    fpiHI=spi.interp1d(lz,data['col2'],kind='linear')
    fpiHeI=spi.interp1d(lz,data['col3'],kind='linear')
    fpiHeII=spi.interp1d(lz,data['col4'],kind='linear')
    fphHI=spi.interp1d(lz,data['col5'],kind='linear')
    fphHeI=spi.interp1d(lz,data['col6'],kind='linear')
    fphHeII=spi.interp1d(lz,data['col7'],kind='linear')
    return [lz,fpiHI,fpiHeI,fpiHeII,fphHI,fphHeI,fphHeII]
Пример #28
0
def test_set_converters(numpy):
    converters = {'zabs1.nh': [asciitable.convert_numpy('int32'),
                               asciitable.convert_numpy('float32')],
                  'p1.gamma': [asciitable.convert_numpy('str')]
                  }
    data = asciitable.read('t/test4.dat', converters=converters, numpy=numpy)
    assert_equal(str(data['zabs1.nh'].dtype), 'float32')
    assert_equal(data['p1.gamma'][0], '1.26764544642')
Пример #29
0
def read_ECFSPR_all():
    import asciitable
    x = asciitable.read(
        '/Users/pdh21/Documents/CFwork/Patient_link/2008_2009/db0809_protected.txt',
        guess=False,
        delimiter='\t',
        fill_values=[('', '-999')])
    return x
Пример #30
0
def test_fill_values_list(numpy):
    f = 't/fill_values.txt'
    testfile = get_testfiles(f)
    data = asciitable.read(f, numpy=numpy, fill_values=[('a','42'),('1','42','a')],
                           **testfile['opts'])
    if numpy:
        assert_true((data.data['a']==[42,42]).all())
    else:
        assert_equal(data['a'],[42,42])
Пример #31
0
def filter_bad_times(msid_self, start=None, stop=None, table=None):
    """Filter out intervals of bad data in the MSID object.

    There are three usage options:

    - Supply no arguments.  This will use the global list of bad times read
      in with fetch.read_bad_times().
    - Supply both ``start`` and ``stop`` values where each is a single
      value in a valid DateTime format.
    - Supply an ``table`` parameter in the form of a 2-column table of
      start and stop dates (space-delimited) or the name of a file with
      data in the same format.

    The ``table`` parameter must be supplied as a table or the name of a
    table file, for example::

      bad_times = ['2008:292:00:00:00 2008:297:00:00:00',
                   '2008:305:00:12:00 2008:305:00:12:03',
                   '2010:101:00:01:12 2010:101:00:01:25']
      msid.filter_bad_times(table=bad_times)
      msid.filter_bad_times(table='msid_bad_times.dat')

    :param start: Start of time interval to exclude (any DateTime format)
    :param stop: End of time interval to exclude (any DateTime format)
    :param table: Two-column table (start, stop) of bad time intervals
    """
    if table is not None:
        bad_times = asciitable.read(table, Reader=asciitable.NoHeader,
                                    names=['start', 'stop'])
    elif start is None and stop is None:
        raise ValueError('filter_times requires 2 args ')
    elif start is None or stop is None:
        raise ValueError('filter_times requires either 2 args '
                         '(start, stop) or no args')
    else:
        bad_times = [(start, stop)]

    ok = np.ones(len(msid_self.times), dtype=bool)
    for start, stop in bad_times:
        tstart = DateTime(start).secs
        tstop = DateTime(stop).secs
        if tstart > tstop:
            raise ValueError("Start time %s must be less than stop time %s"
                             % (start, stop))

        if tstop < msid_self.times[0] or tstart > msid_self.times[-1]:
            continue

        i0, i1 = np.searchsorted(msid_self.times, [tstart, tstop])
        ok[i0:i1 + 1] = False

    colnames = (x for x in msid_self.colnames)
    for colname in colnames:
        attr = getattr(msid_self, colname)
        if isinstance(attr, np.ndarray):
            setattr(msid_self, colname, attr[ok])       
            
Пример #32
0
def plotData():
	""" PURPOSE: Read in the text file and plot the median efficiency """
	medefffn="/Users/matt/projects/CHIRON/EFFICIENCY/data.txt"
	medeff = asciitable.read(medefffn, data_start=0, delimiter=" ")
	pylab.clf()
	pylab.xlabel('Wavelength [$\AA$]')
	pylab.ylabel('Efficiency (%)')
	pylab.plot(medeff.wav, medeff.eff*100.,'ko')
	pylab.savefig('fig_medeff.eps')
Пример #33
0
def plotData():
    """ PURPOSE: Read in the text file and plot the median efficiency """
    medefffn = "/Users/matt/projects/CHIRON/EFFICIENCY/data.txt"
    medeff = asciitable.read(medefffn, data_start=0, delimiter=" ")
    pylab.clf()
    pylab.xlabel('Wavelength [$\AA$]')
    pylab.ylabel('Efficiency (%)')
    pylab.plot(medeff.wav, medeff.eff * 100., 'ko')
    pylab.savefig('fig_medeff.eps')
Пример #34
0
def read_saga():
    """Read the SAGA data"""
    filename= os.path.join('..','data','star_prop_uvby.dat')
    readme= os.path.join('..','data','ReadMe.txt')
    sagadata= asciitable.read(filename,
                              readme=readme,
                              Reader=asciitable.cds.Cds,
                              guess=False,
                              fill_values=[('', '-999')])
    return sagadata
 def ascii2pkl(name, phase):
     table = asciitable.read(name, delimiter="\s")
     points = numpy.zeros((len(table), 2))
     for i in xrange(len(table)):
         points[i] = (table.col1[i] - phase, table.col2[i])
     values = table.col3
     out = dict([('points', points), ('values', values)])
     f = open(name + '.pkl', 'wb')
     pickle.dump(out, f, pickle.HIGHEST_PROTOCOL)
     f.close()
Пример #36
0
def plotLC(line, continuum):
    lData = asc.read('values' + line + '.dat', numpy=True)
    cData = asc.read('values' + continuum + '.dat', numpy=True)
    lDist, lFlux, lIvar = lData['dist'], lData['flux'], lData['ivar']
    cDist, cFlux, cIvar = cData['dist'], cData['flux'], cData['ivar']
    if (len(lDist) > len(cDist)):
        dist = lDist
        flux = lFlux - np.interp(dist, cDist, cFlux)
        err = np.sqrt(1.0 / lIvar) + np.sqrt(1.0 / np.interp(dist, cDist, cIvar))
    else:
        dist = cDist
        flux = np.interp(dist, lDist, lFlux) - cFlux
        err = np.sqrt(1.0 / np.interp(dist, lDist, lIvar)) + np.sqrt(1.0 / cIvar)
    plt.subplot(212)
    plt.xlabel("Distance along the slit (kpc)", fontsize=20)
    plt.ylabel("Flux Difference", fontsize=20)
    plt.tick_params(axis='both', which='major', labelsize=20)
    plt.axhline(0, color='k')
    plt.errorbar(dist, flux, err, marker='o')
Пример #37
0
def plotDataShort():
	""" PURPOSE: Read in the text file with the removed "kinks"
	and replot """
	medefffn="/Users/matt/projects/CHIRON/EFFICIENCY/data_short.txt"
	#medeff = asciitable.read(medefffn,delimiter=" ", data_start=2)
	medeff = asciitable.read(medefffn, data_start=0, delimiter="\s")
	pylab.clf()
	pylab.xlabel('Wavelength [$\AA$]')
	pylab.ylabel('Efficiency (%)')
	pylab.plot(medeff.wav, medeff.eff*100.,'ko')
Пример #38
0
def plotDataShort():
    """ PURPOSE: Read in the text file with the removed "kinks"
	and replot """
    medefffn = "/Users/matt/projects/CHIRON/EFFICIENCY/data_short.txt"
    #medeff = asciitable.read(medefffn,delimiter=" ", data_start=2)
    medeff = asciitable.read(medefffn, data_start=0, delimiter="\s")
    pylab.clf()
    pylab.xlabel('Wavelength [$\AA$]')
    pylab.ylabel('Efficiency (%)')
    pylab.plot(medeff.wav, medeff.eff * 100., 'ko')
Пример #39
0
def asciiWrite(stackedFluxes, asciiName):
    flux = stackedFluxes[1] / stackedFluxes[2]
    flux = flux / np.max(flux)  # normalized meanFlux
    ivar = (stackedFluxes[2] ** 2) / (np.max(flux) ** 2)  # propagated inverse variance

    data = Table([stackedFluxes[0], flux, ivar], names=['dist', 'flux', 'ivar'])
    ascii.write(data, ('values' + asciiName + '.dat'))
    readData = asc.read('values' + asciiName + '.dat', numpy=True)
    dist, flux, ivar = readData['dist'], readData['flux'], readData['ivar']
    return dist, flux, ivar
Пример #40
0
def ECFPR_data_BMI():
    import asciitable
    x = asciitable.read(
        '/Users/pdh21/Documents/CFwork/Patient_link/2008_2009/db0809_protected.txt',
        guess=False,
        delimiter='\t',
        fill_values=[('', '-999')])

    yy = map(int, x['birth_yy'])
    year = map(int, x['year'])
    gender = np.array(map(float, x['gender']))
    mm = map(float, x['birth_mm'])
    dd = map(float, x['birth_dd'])
    ID = map(float, x['ID'])
    bmi = map(float, x['bmiECFSPR'])
    hgt = map(float, x['hgt'])

    hgt = np.array(hgt)
    ID = np.array(ID)
    BMI = np.array(bmi)
    year = np.array(year)
    dob_j = np.empty_like(yy)
    for i in range(0, len(yy)):
        dob_j[i] = ((yy[i] - 1900) * 12) + mm[i]
    # indices in dob_j for each year

    # first select patients which have a BMI and height measure
    b = np.array([], dtype=int)
    for i in range(0, len(BMI) - 1):
        if BMI[i] >= 0:
            if hgt[i] > 0:
                b = np.append(b, [[i]])
    BMI = BMI[b]
    ID = ID[b]
    year = year[b]
    gender = gender[b]
    dob_j = dob_j[b]
    hgt[b]

    # next, selet patients where there is a match in the database
    a = np.array([], dtype=int)
    for i in range(0, len(ID) - 1):
        if ID[i] == ID[i + 1]:
            a = np.append(a, [[i], [i + 1]])

    BMI = BMI[a]
    ID = ID[a]
    year = year[a]
    gender = gender[a]
    dob_j = dob_j[a]
    hgt = hgt[a]

    ind_2008, = np.nonzero(np.less(year, 2009))
    ind_2009, = np.nonzero(np.greater(year, 2008))
    return BMI, hgt, ID, ind_2008, ind_2009, dob_j, gender
Пример #41
0
def test_masking_Cds(numpy):
    f = 't/cds.dat'
    testfile = get_testfiles(f)
    data = asciitable.read(f, numpy=numpy, 
                           **testfile['opts'])
    if numpy:
        assert_true(data['AK'].mask[0])
        assert_true(not data['Fit'].mask[0])
    else:
        assert_true(isnan(data['AK'][0]))
        assert_true(not isnan(data['Fit'][0]))
Пример #42
0
 def _load(self):
     filename = 'sedov.csv'
     data = asciitable.read(filename)
     L = data['col1']
     R = data['col2']
     R[np.abs(R) < 1e-8] = 0
     V = data['col3']
     P = data['col4']
     self.R = interp1d(L, R)
     self.V = interp1d(L, V)
     self.P = interp1d(L, P)
Пример #43
0
def load_sb_curve(fname):
    """
    Loads the surface brightness curve from file.

    Arguments:
    - `fname`: file name
    """
    # FIXME: refactor to have a unified reading based on topcatformat

    try:
        # old format: headerstart=9
        data = atab.read(table=fname,
                         data_start=0, data_end=None, header_start=9,
                         delimiter=' ', comment='#', quotechar='"')

    except Exception, e:
        # new format: headerstart=11
        data = atab.read(table=fname,
                         data_start=0, data_end=None, header_start=11,
                         delimiter=' ', comment='#', quotechar='"')
Пример #44
0
def funReadBlast(sOutFileName,all_species,sARGOSID,nNumContig,nTotalAssemblySize,\
                 nN50,nLargestContig,lGC,lSize):
    import asciitable 
    tblBlast = asciitable.read(sOutFileName, Reader = asciitable.NoHeader, delimiter ='\t')
    
    """Get Name of Contigs""" 
    lContigName = list(set(tblBlast['col1']))
    
    tblComplete = [];
    for j in range(0,len(lContigName)):
        print('Processing BLAST Result for Contig')
        iContigIndices = [i for i,x in enumerate(list(tblBlast['col1'])) if x == lContigName[j]]
        lAccesions = list(set(tblBlast[iContigIndices]['col4']))
        
        temp = all_species[j]
    
        lTempRow = [sARGOSID] #0
        lTempRow.append(str(nNumContig)) #1
        lTempRow.append(str(nTotalAssemblySize)) #2
        lTempRow.append(str(nN50)) #3
        lTempRow.append(str(nLargestContig)) #4
        lTempRow.append(lContigName[j]) #5
        lTempRow.append(str(tblBlast[iContigIndices[0]][1])) #6
        lTempRow.append(str(lGC[lSize.index(tblBlast[iContigIndices[0]][1])])) #7
        lTempRow.append(temp[0:-3]) #8
        lTempRow.append('Hit Name') #9
        lTempRow.append('Hit ACC') #10
        lTempRow.append('Hit Score') #11
        lTempRow.append('Hit PIdent') #12
        lTempRow.append('Hit PCOV') #13
        lTempRow.append('Scientific_Names') #14

        for k in range(0,len(lAccesions)):
            iAccesionIndices = [i for i,x in enumerate(list(tblBlast['col4'])) if x == lAccesions[k]]
            iAccesionIndices = list(set(iAccesionIndices).intersection(iContigIndices))
            name = tblBlast[iAccesionIndices[0]]['col5']
            name = name.replace(",","_")
            lTempRow[9] = name
            lTempRow[10] = lAccesions[k]
            lTempRow[-2] = tblBlast[iAccesionIndices[0]]['col9']
            lTempRow[-1] = tblBlast[iAccesionIndices[0]]['col3']
            nPident = 0
            nAsize = 0
            nScore = 0 
            for l in range(0,len(iAccesionIndices)):
                nPident = nPident + tblBlast[iAccesionIndices[l]]['col8']*tblBlast[iAccesionIndices[l]]['col6']
                nAsize = nAsize + tblBlast[iAccesionIndices[l]]['col6']
                nScore = nScore + tblBlast[iAccesionIndices[l]]['col7']
                
            nPident = nPident/nAsize
            lTempRow[11] = nScore
            lTempRow[12] = nPident
            tblComplete.append(lTempRow.copy())
    return tblComplete 
Пример #45
0
def read_multibench_results(read_path):
    sample_sizes = []
    samples_per_sample_size = []

    column_keys = [None, "runtime", "memory", "disk_read", "disk_write", None]

    file1 = open(read_path, 'r')
    lines = file1.readlines()

    table_ind = -1
    table_keys = []
    tables = []

    for line in lines:
        if (line[:2] == "->"):
            table_ind += 1

            table_keys.append(line[2:-1])
            tables.append([])
        else:
            tables[table_ind].append(line)

    multibench_results_per_key = []
    for table in tables:
        key_multibench_results = []
        raw_rows = asciitable.read(table,
                                   Reader=asciitable.FixedWidthTwoLine,
                                   bookend=True,
                                   delimiter="|",
                                   quotechar="'")
        raw_rows = list(raw_rows)
        for raw_row in raw_rows:
            multibench_result = {}
            for ind, column_key in enumerate(column_keys):
                if column_key is not None:
                    multibench_result[column_key] = raw_row[ind]
            sample_sizes.append(raw_row[0])
            samples_per_sample_size.append(
                list(map(lambda word: word.strip(), raw_row[5].split(" "))))
            key_multibench_results.append(multibench_result)

        multibench_results_per_key.append(key_multibench_results)

    multibench_results = []
    for multibench_result_ind in range(len(multibench_results_per_key[0])):
        multibench_result = {}
        for key_ind, key_multibench_results in enumerate(
                multibench_results_per_key):
            multibench_result[table_keys[key_ind]] = key_multibench_results[
                multibench_result_ind]
        multibench_results.append(multibench_result)

    return multibench_results, samples_per_sample_size
Пример #46
0
def convert_extinction(inputfile,outputfile): # from palomar

	table = asciitable.read(inputfile)
	abswave = []
	abstemp = []
	for i in range(len(table)):
		abswave.append(table[i][0])
		abstemp.append(table[i][1])

	with archive.archive(outputfile,'a') as ar:
		ar['/sky/extinction/wave'] = numpy.array(abswave)
		ar['/sky/extinction/power'] = numpy.array(abstemp)
Пример #47
0
def convert_sky_bkg(inputfile,outputfile): # from skybg_50_10

	table = asciitable.read(inputfile)
	wave = []
	power = []
	for i in range(len(table)):
		wave.append(table[i][0])
		power.append(table[i][1])

	with archive.archive(outputfile,'a') as ar:
		ar['/sky/background/wave'] = numpy.array(wave)
		ar['/sky/background/power'] = numpy.array(power)
def read_lbt_spec(filename):
    d = asciitable.read(filename, names=('wave', 'flux', 'err'))
    waved = np.array(d['wave'])
    fluxd = np.array(d['flux'])
    g1 = ((waved > 7000) & (waved < 9000))
    waved = waved[g1]
    fluxd = fluxd[g1]
    z = np.polyfit(waved, fluxd, 4)
    f = np.poly1d(z)
    fluxd = fluxd / f(waved)
    #median(fluxd[g1])
    data = Spectrum(waved, fluxd)
    return data
Пример #49
0
Файл: main.py Проект: ngc436/PM
def main():
    data = asciitable.read('ringwaydata.txt', fill_values=('---', None))
    data = data.filled()
    data.dtype.names = ('year', 'mm', 'tmax_deg', 'tmin_deg', 'af_days',
                        'rain_mm', 'sun_hours')
    max_deg = list(data['tmax_deg'])
    max_deg = [float(x) for x in max_deg if x != 'N/A']
    random.seed(42)
    max_deg = [x + 0.01 * random.random() for x in max_deg]
    m = Distribution(max_deg)
    # m.show_histogram()
    # m.kernel_density(1)
    est = Estimator(max_deg)
    est.divide_subset(3)
Пример #50
0
def f2c(res_tem="S", step_tem=100):  # es el programa en si, todo en iraf
    #valores para los template
    if res_tem == "h" or res_tem == "H":
        lit = "@template"
        print("Working with template spectra in hihg resolution")
    else:
        lit = "@template"
        print("Working with template spectra in low resolution")

    #parametros de entrada
    lio = "@objetos"
    lit = "@template"
    spa = "A"
    spb = "B"
    vo = 24.86
    q1 = 0.1
    q2 = 0.7
    dq = 0.05
    sam = "4120-4340,4360-4840,4880-5700"
    flist = "tmp/lit"

    #ejecuto fn2
    iraf.fn2(lio=lio,
             lit=lit,
             spa=spa,
             spb=spb,
             vo=vo,
             q1=q1,
             q2=q2,
             dq=dq,
             sam=sam,
             flist=flist)
    os.chdir("tmp")

    #recupero los datos de fn2
    archivos = glob.glob("datT*")  #archivos de salida de f2c
    temps = []
    pasos = []
    intensidad = []
    for archivo in archivos:
        temp = float(archivo[4:])
        data = asciitable.read(archivo)
        for paso, inte in zip(
                data["col1"],
                data["col2"]):  #recupero de los archivos la info que necesito
            #print temp,paso,inte
            temps.append(temp)
            pasos.append(paso)
            intensidad.append(inte)
    return temps, pasos, intensidad
Пример #51
0
def pull_data(sampletable):
    '''
    Function that pulls data from the BDNYC database. Run this in the same directory
    in which you have the Python Database stored!

    Input: 
    ASCII table with unum, type, resolution, instrument, date,
    and filter/order for each data set in the sample.

    Outputs:
    specData: list of arrays with wavelength in position 0, flux is position 1,
    and uncertainty in position 2 if included in database.
    targetinfo: lost of each target instance of the objects.
    '''
    f=open('BDNYCData.txt','rb')
    bdnyc=pickle.load(f)
    f.close()
    
    objects = asciitable.read(sampletable)

    specData = []
    targetinfo = []

    for x in range(len(objects)):
        target = bdnyc.match_unum(objects.unum[x])
        res = objects.res[x]
        instrument = objects.inst[x]
        filter = objects.filter[x]
        date = objects.date[x]
        if objects.type[x] == 'nir':
            data = bdnyc.targets[target].nir[res][instrument][date][filter]
        if objects.type[x] == 'mir':
            data = bdnyc.targets[target].mir[res][instrument][date][filter]
        if objects.type[x] == 'opt':
            data = bdnyc.targets[target].opt[res][instrument][date][filter]
        wl_array = data['wl']
        flux_array = data['flux']
        objData = [wl_array,flux_array]
        if len(data.keys()) >= 3:
            if data.keys()[2] == 'uncertainty':
                uncertainty_array = data['uncertainty']
                objData.append(uncertainty_array)
        
        targetinfo.append(bdnyc.targets[target])
        specData.append(objData)

    return [specData,targetinfo]
Пример #52
0
 def __init__(self, filter=None, sf10=True):
     """
     NAME:
        __init__
     PURPOSE:
        Initialize the Sale14 dust map
     INPUT:
        filter= filter to return the extinction in
        sf10= (True) if True, use the Schlafly & Finkbeiner calibrations
     OUTPUT:
        object
     HISTORY:
        2015-03-08 - Started - Bovy (IAS)
     """
     DustMap3D.__init__(self, filter=filter)
     self._sf10 = sf10
     #Read the maps
     sys.stdout.write('\r' + "Reading Sale et al. (2014) data file ...\r")
     sys.stdout.flush()
     self._saledata = asciitable.read(os.path.join(_saledir, 'Amap.dat'),
                                      readme=os.path.join(
                                          _saledir, 'ReadMe'),
                                      Reader=asciitable.cds.Cds,
                                      guess=False,
                                      fill_values=[('', '-999')])
     sys.stdout.write('\r' + _ERASESTR + '\r')
     sys.stdout.flush()
     # Some summaries
     self._dl = self._saledata['lmax'] - self._saledata['lmin']
     self._db = self._saledata['b_max'] - self._saledata['b_min']
     self._lmin = numpy.amin(self._saledata['lmin'])
     self._lmax = numpy.amax(self._saledata['lmax'])
     self._bmin = numpy.amin(self._saledata['b_min'])
     self._bmax = numpy.amax(self._saledata['b_max'])
     self._ndistbin = 150
     self._ds = numpy.linspace(0.05, 14.95, self._ndistbin)
     # For dust_vals
     self._sintheta = numpy.sin((90. - self._saledata['GLAT']) * _DEGTORAD)
     self._costheta = numpy.cos((90. - self._saledata['GLAT']) * _DEGTORAD)
     self._sinphi = numpy.sin(self._saledata['GLON'] * _DEGTORAD)
     self._cosphi = numpy.cos(self._saledata['GLON'] * _DEGTORAD)
     self._intps = numpy.zeros(
         len(self._saledata),
         dtype='object')  #array to cache interpolated extinctions
     return None
Пример #53
0
	def __init__(self, filename):
		super(AsciiTable, self).__init__(filename, nommap=True)
		import asciitable
		table = asciitable.read(filename)
		logger.debug("done parsing ascii table")
		#import pdb
		#pdb.set_trace()
		#names = table.array.dtype.names
		names = table.dtype.names

		#data = table.array.data
		for i in range(len(table.dtype)):
			name = table.dtype.names[i]
			type = table.dtype[i]
			if type.kind in ["f", "i"]: # only store float and int
				#datagroup.create_dataset(name, data=table.array[name].astype(np.float64))
				#dataset.addMemoryColumn(name, table.array[name].astype(np.float64))
				self.addColumn(name, array=table[name])
Пример #54
0
 def __init__(self, filter=None, sf10=True):
     """
     NAME:
        __init__
     PURPOSE:
        Initialize the Marshall06 dust map
     INPUT:
        filter= filter to return the extinction in
        sf10= (True) if True, use the Schlafly & Finkbeiner calibrations
     OUTPUT:
        object
     HISTORY:
        2013-11-24 - Started - Bovy (IAS)
     """
     DustMap3D.__init__(self, filter=filter)
     self._sf10 = sf10
     #Read the maps
     sys.stdout.write('\r' +
                      "Reading Marshall et al. (2006) data file ...\r")
     sys.stdout.flush()
     self._marshalldata = asciitable.read(
         os.path.join(_marshalldir, 'table1.dat'),
         readme=os.path.join(_marshalldir, 'ReadMe'),
         Reader=asciitable.cds.Cds,
         guess=False,
         fill_values=[('', '-999')])
     sys.stdout.write('\r' + _ERASESTR + '\r')
     sys.stdout.flush()
     #Sort the data on l and then b
     negIndx = self._marshalldata['GLON'] > 180.
     self._marshalldata['GLON'][
         negIndx] = self._marshalldata['GLON'][negIndx] - 360.
     sortIndx = numpy.arange(len(self._marshalldata))
     keyArray = (self._marshalldata['GLON'] +
                 self._marshalldata['GLAT'] / 100.).data
     sortIndx = sorted(sortIndx, key=lambda x: keyArray[x])
     self._marshalldata = self._marshalldata[sortIndx]
     self._dl = 0.25
     self._db = 0.25
     self._intps = numpy.zeros(
         len(self._marshalldata),
         dtype='object')  #array to cache interpolated extinctions
     return None
Пример #55
0
def get_iss_photos():
    """
    Gets public photos from ISS missions and provide data input for tasks
    :arg string size: Size of the image from ISS mission
    :returns: A list of photos.
    :rtype: list
    http://eol.jsc.nasa.gov/sseop/images/ESC/small/ISS030/ISS030-E-67805.JPG
    """
    photos = []

    #asciitable.BaseReader.inconsistent_handler = skip_bad_lines
    lista = asciitable.read('atlasOfNight.csv', guess=False, delimiter=",")

    for i in lista:
        tmpMission = i['ISS-ID'].split('-E-')
        mission = tmpMission[0]
        idIss = tmpMission[1]

        pattern_s = "http://eol.jsc.nasa.gov/sseop/images/ESC/%s/%s/%s-E-%s.JPG" % (
            "small", mission, mission, idIss)
        pattern_b = "http://eol.jsc.nasa.gov/sseop/images/ESC/%s/%s/%s-E-%s.JPG" % (
            'large', mission, mission, idIss)

        linkData = "http://eol.jsc.nasa.gov/scripts/sseop/photo.pl?mission=%s&roll=E&frame=%s" % (
            mission, idIss)
        idISS = idIss

        citylon2 = str(i['nlon'])

        citylat2 = str(i['nlat'])

        f = '50'

        tmp = dict(link_small=pattern_s,
                   link_big=pattern_b,
                   linkData=linkData,
                   idISS=idISS,
                   citylon=citylon2,
                   citylat=citylat2,
                   focal=f)
        photos.append(tmp)
    return photos
Пример #56
0
def pytest_generate_tests(metafunc):
	print metafunc.config.getoption('cmdopt')
	# if linux test job get parmetrize from core_test.list
	if metafunc.config.getoption('cmdopt') != "adb":
		if 'testcase_dict' in metafunc.fixturenames:
			test_path = metafunc.config.getoption('targetdir')
			testcase_list=collections.OrderedDict()
			print(test_path)
			testlist=asciitable.read(test_path+'/jenkins/core_test.list')

			for rec in testlist:
				testcase_list[rec[0]]=rec[1]
			pprint.pprint(dict(testcase_list.items()))
			metafunc.parametrize("testcase_dict", testcase_list.keys())
			

	else:
		# If test job is for Android ,use the list of android_testlist to call 
		# testcases in testcasses.py
		metafunc.parametrize("testcase_dict", android_testlist)
		print(android_testlist)
Пример #57
0
def save_BMI_data_Female():
    import asciitable

    x = asciitable.read(
        '/Users/pdh21/Documents/CFwork/Patient_link/BMI_Boelle2012.txt',
        guess=False,
        delimiter='\t',
        fill_values=[('', '-999')])
    age_tab = x['age_fe']
    BMI_tab = x['female']
    age = np.arange(3, 38, 1)
    ind_90 = range(0, 10)
    ind_75 = range(11, 24)
    ind_50 = range(25, 39)
    ind_25 = range(40, 51)
    ind_10 = range(52, 64)
    indices = [ind_10, ind_25, ind_50, ind_75, ind_90]
    BMI = np.empty((35.0, 7))
    ii = 0
    for i in indices:
        print
        age_tab[i], BMI_tab[i]
        f = interp1d(age_tab[i], BMI_tab[i], kind='slinear')
        BMI[:, ii + 1] = f(age)
        ii += 1
    BMI[:, 0] = BMI[:, 1] / 1.5
    BMI[:, -1] = BMI[:, -2] * 1.2

    age = np.append(age, np.arange(38, 80.0, 1))
    BMI_extra = np.empty((80 - 38, BMI.shape[1]))
    for i in np.arange(0, 80 - 38, 1):
        print
        i
        BMI_extra[i, :] = BMI[-1, :]
    BMI = np.concatenate((BMI, BMI_extra), axis=0)
    import pylab as plt
    for i in range(0, 7):
        plt.plot(age, BMI[:, i])
    plt.show()
    np.savez('Female_BMI', age, BMI)
Пример #58
0
def take_data(Burst='130427A'):
    import asciitable
    GRB = 'GRBs/' + Burst + '.csv'
    burst = asciitable.read(GRB, delimiter=',')
    start_time = []
    stop_time = []
    Flux_erg = []
    for x_1 in burst['start time']:
        start_time.append(x_1)
    for x_2 in burst['stop time']:
        stop_time.append(x_2)
    for y in burst['Energy flux (erg/cm2/s)']:
        Flux_erg.append(y)
    time, time_err = zip(*[(((y - x) / 2) + x, (y - x) / 2)
                           for x, y in zip(start_time, stop_time)])
    Flux_GeV = [624.150934 * x for x in Flux_erg]
    fig2 = pyplot.figure(figsize=(16, 8))
    fig2ax1 = fig2.add_subplot(111)
    fig2ax1.set_ylabel(r'Differential Flux [cm$^{-2}$ s$^{-1}$ GeV$^{-1}$]')
    fig2ax1.set_xlabel('Time [s]')
    fig2ax1.errorbar(time, Flux_GeV, xerr=time_err, fmt='o')
    fig2ax1.loglog(time, Flux_GeV)
    pyplot.show()
Пример #59
0
        ax.set_xlabel('t (Gyr)', fontsize=15)
        ax.legend(loc=4, fontsize=10, frameon=True, facecolor='w')
    ax1.set_ylim(2e5, 2e12)
    ax1.set_ylabel('M* (M$_\odot$)', fontsize=15)
    ax2.set_ylim(1e-3, 1e2)
    ax2.set_ylabel('SFR (M$_\odot$/yr)', fontsize=15)
plt.tight_layout()
plt.show()

# =============================================================================
#  Show ALdo and Ciesla in one Fig.
# =============================================================================
M_today = np.array([9.5, 10.0, 10.5, 10.75, 11.0])
Data_a = {}
for m in M_today.astype('str'):
    table = asc.read('Aldo/galaxies/gal_%s.dat' % m)
    t, z, lgMh, lgMs, SFR = table.col1, table.col2, table.col3, table.col4, table.col5
    data = pd.DataFrame({
        't': t,
        'z': z,
        'lgMh': lgMh,
        'lgMs': lgMs,
        'SFR': SFR
    })
    Data_a['%s' % m] = data

from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import matplotlib.transforms as mtransforms
from matplotlib.patches import FancyBboxPatch
Пример #60
0
import asciitable

from context_def import ft, files


def get_options():
    parser = optparse.OptionParser()
    parser.add_option("--dry-run",
                      action="store_true",
                      help="Dry run (no actual file or database updatees)")
    return parser.parse_args()


opt, args = get_options()

filetypes = asciitable.read('Ska/engarchive/filetypes.dat')
if len(args) > 0:
    filetypes = filetypes[filetypes['content'] == args[0].upper()]

for filetype in filetypes:
    # Update attributes of global ContextValue "ft".  This is needed for
    # rendering of "files" ContextValue.
    print filetype.content

    ft['content'] = filetype['content'].lower()
    ft['msid'] = 'TIME'

    # archive files
    if not os.path.exists(files['oldmsid'].abs + '.bak'):
        print 'Skipping', ft[
            'content'], ' because there is no backup from fix_ingest_h5.py'