コード例 #1
0
def VALD(elem=None, xmin=3200., xmax=4800., outputdir=None):
    """
  Request linelists from VALD for each ion seperately within a specific wavelength range.
  
  elem = an array of ions e.g. ['CI','OII'], xmin and xmax: wavelength range in which the spectral lines are searched, outputdir = output directory chosen by the user.
  
  If no elements are given, this function returns all of them.
  
  @param elem: list of ions
  @type elem: list of str
  """
    if elem is None:
        files = sorted(config.glob('VALD_individual', 'VALD_*.lijnen'))
        elem = [
            os.path.splitext(os.path.basename(ff))[0].split('_')[1]
            for ff in files
        ]

    all_lines = []
    for i in range(len(elem)):
        print elem[i]
        filename = config.get_datafile('VALD_individual',
                                       'VALD_' + elem[i] + '.lijnen')
        if not os.path.isfile(filename):
            logger.info('No data for element ' + str(elem[i]))
            return None

        newwav, newexc, newep, newgf = np.loadtxt(filename).T
        lines = np.rec.fromarrays([newwav, newexc, newep, newgf],
                                  names=['wavelength', 'ion', 'ep', 'gf'])
        keep = (xmin <= lines['wavelength']) & (lines['wavelength'] <= xmax)
        if not hasattr(keep, '__iter__'):
            continue
        lines = lines[keep]
        if len(lines) and outputdir is not None:
            ascii.write_array(lines,
                              outputdir + 'VALD_' + str(elem[i]) + '_' +
                              str(xmin) + '_' + str(xmax) + '.dat',
                              auto_width=True,
                              header=True,
                              formats=['%.3f', '%.1f', '%.3f', '%.3f'])
        elif len(lines):
            all_lines.append(lines)
        else:
            logger.debug('No lines of ' + str(elem[i]) +
                         ' in given wavelength range')
    return np.hstack(all_lines)
コード例 #2
0
def update_info(zp=None):
    """
    Update information in zeropoint file, e.g. after calibration.
    
    Call first L{ivs.sed.model.calibrate} without arguments, and pass the output
    to this function.
    
    @param zp: updated contents from C{zeropoints.dat}
    @type zp: recarray
    """
    zp_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "zeropoints.dat")
    zp_, comms = ascii.read2recarray(zp_file, return_comments=True)
    existing = [str(i.strip()) for i in zp_["photband"]]
    resp_files = sorted(glob.glob(os.path.join(os.path.dirname(os.path.abspath(__file__)), "filters/*")))
    resp_files = [os.path.basename(ff) for ff in resp_files if not os.path.basename(ff) in existing]
    resp_files.remove("HUMAN.EYE")
    resp_files.remove("HUMAN.CONES")
    resp_files.remove("CONES.EYE")
    if zp is None:
        zp = zp_
        logger.info("No new calibrations; previous information on existing response curves is copied")
    else:
        logger.info("Received new calibrations contents of zeropoints.dat will be updated")

    # -- update info on previously non existing response curves
    new_zp = np.zeros(len(resp_files), dtype=zp.dtype)
    logger.info("Found {} new response curves, adding them with default information".format(len(resp_files)))
    for i, respfile in enumerate(resp_files):
        new_zp[i]["photband"] = respfile
        new_zp[i]["eff_wave"] = float(eff_wave(respfile))
        new_zp[i]["type"] = "CCD"
        new_zp[i]["vegamag"] = np.nan
        new_zp[i]["ABmag"] = np.nan
        new_zp[i]["STmag"] = np.nan
        new_zp[i]["Flam0_units"] = "erg/s/cm2/AA"
        new_zp[i]["Fnu0_units"] = "erg/s/cm2/AA"
        new_zp[i]["source"] = "nan"
    zp = np.hstack([zp, new_zp])
    sa = np.argsort(zp["photband"])
    ascii.write_array(
        zp[sa],
        "zeropoints.dat",
        header=True,
        auto_width=True,
        comments=["#" + line for line in comms[:-2]],
        use_float="%g",
    )
コード例 #3
0
ファイル: coralie.py プロジェクト: stpstp/IvSPythonRepository
def search(ID,radius=1.,filename=None):
    """
    Retrieve datafiles from the Coralie catalogue.
    
    We search on coordinates, pulled from SIMBAD. If the star ID is not
    recognised, a string search is performed to match the 'targ name' field in the
    FITS headers.
    
    Only the s1d_A data are searched.
    
    @param ID: ID of the star, understandable by SIMBAD
    @type ID: str
    @param radius: search radius around the coordinates
    @type radius: 1
    @param filename: write summary to outputfile if not None
    @type filename: str
    @return: record array with summary information on the observations, as well
    as their location (column 'filename')
    @rtype: numpy rec array
    """
    data = ascii.read2recarray(config.get_datafile(os.path.join('catalogs','coralie'),'CoralieFullDataOverview.tsv'),splitchar='\t')
    info = sesame.search(ID)
    if info:
        ra,dec = info['jradeg'],info['jdedeg']
        keep = np.sqrt((data['ra']-ra)**2 + (data['dec']-dec)**2) < radius/60.
    else:
        keep = [((re.compile(ID).search(objectn) is not None) and True or False) for objectn in data['object']]
        keep = np.array(keep)
    
    data = data[keep]
    
    logger.info('Found %d spectra'%(len(data)))
    
    if filename is not None:
        ascii.write_array(data,filename,auto_width=True,header=True)
    else:
        return data
コード例 #4
0
def VALD(elem=None,xmin=3200.,xmax=4800.,outputdir=None):
  """
  Request linelists from VALD for each ion seperately within a specific wavelength range.
  
  elem = an array of ions e.g. ['CI','OII'], xmin and xmax: wavelength range in which the spectral lines are searched, outputdir = output directory chosen by the user.
  
  If no elements are given, this function returns all of them.
  
  @param elem: list of ions
  @type elem: list of str
  """
  if elem is None:
    files = sorted(config.glob('VALD_individual','VALD_*.lijnen'))
    elem = [os.path.splitext(os.path.basename(ff))[0].split('_')[1] for ff in files]
  
  all_lines = []
  for i in range(len(elem)):
    print elem[i]
    filename = config.get_datafile('VALD_individual','VALD_' + elem[i] + '.lijnen')
    if not os.path.isfile(filename):
      logger.info('No data for element ' + str(elem[i]))
      return None
   
    newwav,newexc,newep,newgf = np.loadtxt(filename).T
    lines = np.rec.fromarrays([newwav,newexc,newep,newgf],names=['wavelength','ion','ep','gf'])
    keep = (xmin<=lines['wavelength']) & (lines['wavelength']<=xmax)
    if not hasattr(keep,'__iter__'):
      continue
    lines = lines[keep]
    if len(lines) and outputdir is not None:
      ascii.write_array(lines,outputdir + 'VALD_' + str(elem[i]) + '_' + str(xmin) + '_' + str(xmax) + '.dat',auto_width=True,header=True,formats=['%.3f','%.1f','%.3f','%.3f'])
    elif len(lines):
      all_lines.append(lines)
    else:
      logger.debug('No lines of ' + str(elem[i]) + ' in given wavelength range')
  return np.hstack(all_lines)
コード例 #5
0
        syn = [calibrator[0]] + list(syn)
        obs = [calibrator[0]] + list(obs) + list(err)

        synthetic.append(tuple(syn))
        observed.append(tuple(obs))

    #-- store in easy to use recarrays
    dtype = [('name', 'a20')] + [(pb.split('.')[-1], 'f8') for pb in photbands]
    synthetic = np.array(synthetic, dtype=dtype)

    dtype = [('name', 'a20')] + [(pb.split('.')[-1] , 'f8') for pb in photbands] +\
          [('e_'+pb.split('.')[-1] , 'f8') for pb in photbands]
    observed = np.array(observed, dtype=dtype)

    #-- write results to file
    ascii.write_array(synthetic, synfile, sep=',')
    ascii.write_array(observed, obsfile, sep=',')

else:
    #-- load results from file
    dtype = [('name', 'a20')] + [(pb.split('.')[-1], 'f8') for pb in photbands]
    synthetic = ascii.read2recarray(synfile, splitchar=',', dtype=dtype)
    dtype = [('name', 'a20')] + [(pb.split('.')[-1] , 'f8') for pb in photbands] +\
          [('e_'+pb.split('.')[-1] , 'f8') for pb in photbands]
    observed = ascii.read2recarray(obsfile, splitchar=',', dtype=dtype)

#-- add some minimal error is non is given observationaly
for band in ['e_' + pb.split('.')[-1] for pb in photbands]:
    observed[band] = np.where(observed[band] <= minerror, minerror,
                              observed[band])
コード例 #6
0
    burntype[(burn <= 1e0)] = 0
    burntype[(1e0 < burn) & (burn <= 1e3)] = 1
    burntype[(1e3 < burn) & (burn <= 1e7)] = 2
    burntype[(1e7 < burn)] = 3
    x = starl[x]
    #-- search for the limits of convection types
    limits = [[burntype[0], x[0]]]
    for i in range(1, len(x)):
        if burntype[i] != limits[-1][0]:
            limits[-1].append(x[i])
            limits.append([burntype[i], x[i]])
    limits[-1].append(x[-1])
    alphas = [1.0, 0.5, 0.5, 0.5, 0.5]
    colors = ['k', (1.00, 1.00, 0.0), (1.00, 0.50, 0.0), (1.00, 0.00, 0.0)]
    for limit in limits:
        if limit[0] == 0: continue
        index = int(limit[0])
        logger.info(
            'Burning between %g-%g (%s,color=%s)' %
            (limit[1], limit[2], ['Low', 'Mid', 'High'][index], colors[index]))
        out = pl.axvspan(limit[1],
                         limit[2],
                         color=colors[index],
                         alpha=alphas[index])


#}
if __name__ == "__main__":
    data = read_agsm('tempmodel_frq.gsm')
    ascii.write_array(data, 'test.dat', header=True, auto_width=True)
コード例 #7
0
def update_grid(gridfile,responses,threads=10):
    """
    Add passbands to an existing grid.
    """
    shutil.copy(gridfile,gridfile+'.backup')
    hdulist = pyfits.open(gridfile,mode='update')
    existing_responses = set(list(hdulist[1].columns.names))
    responses = sorted(list(set(responses) - existing_responses))
    if not len(responses):
        hdulist.close()
        print "No new responses to do"
        return None
    law = hdulist[1].header['REDLAW']
    units = hdulist[1].header['FLUXTYPE']
    teffs = hdulist[1].data.field('teff')
    loggs = hdulist[1].data.field('logg')
    ebvs = hdulist[1].data.field('ebv')
    zs = hdulist[1].data.field('z')
    rvs = hdulist[1].data.field('rv')
    vrads = hdulist[1].data.field('vrad')
    names = hdulist[1].columns.names
    
    N = len(teffs)
    index = np.arange(N)
    
    output = np.zeros((len(responses),len(teffs)))
    print N
    
    #--- PARALLEL PROCESS
    def do_process(teffs,loggs,ebvs,zs,rvs,index,arr):
        output = np.zeros((len(responses)+1,len(teffs)))
        c0 = time.time()
        N = len(teffs)
        for i,(teff,logg,ebv,z,rv,ind) in enumerate(zip(teffs,loggs,ebvs,zs,rvs,index)):
            if i%100==0:
                dt = time.time()-c0
                print "ETA",index[0],(N-i)/100.*dt/3600.,'hr'
                c0 = time.time()
            #-- get model SED and absolute luminosity
            model.set_defaults(z=z)
            wave,flux = model.get_table(teff,logg)
            Labs = model.luminosity(wave,flux)
            flux_ = reddening.redden(flux,wave=wave,ebv=ebv,rtype='flux',law=law,Rv=rv)
            #-- calculate synthetic fluxes
            output[0,i] = ind
            output[1:,i] = model.synthetic_flux(wave,flux_,responses,units=units)
        arr.append(output)
    #--- PARALLEL PROCESS
    c0 = time.time()
    
    manager = Manager()
    arr = manager.list([])
    
    all_processes = []
    for j in range(threads):
        all_processes.append(Process(target=do_process,args=(teffs[j::threads],\
                                                                loggs[j::threads],\
                                                                ebvs[j::threads],\
                                                                zs[j::threads],\
                                                                rvs[j::threads],\
                                                                index[j::threads],arr)))
        all_processes[-1].start()
    for p in all_processes:
        p.join()
    
    output = np.hstack([res for res in arr])
    del arr
    sa = np.argsort(output[0])
    output = output[:,sa][1:]
    ascii.write_array(np.rec.fromarrays(output,names=responses),'test.temp',header=True)
    #-- copy old columns and append new ones
    cols = []
    for i,photband in enumerate(responses):
        cols.append(pyfits.Column(name=photband,format='E',array=output[i]))
    #-- create new table
    table = pyfits.new_table(pyfits.ColDefs(cols))
    table = pyfits.new_table(hdulist[1].columns + table.columns,header=hdulist[1].header)
    hdulist[1] = table
    hdulist.close()
コード例 #8
0
def plot_add_burn(starl,x='q'):
    burn = starl['eps_nuc']
    burntype = np.zeros(len(burn))
    burntype[(burn<=1e0)] = 0
    burntype[(1e0<burn)&(burn<=1e3)] = 1
    burntype[(1e3<burn)&(burn<=1e7)] = 2
    burntype[(1e7<burn)] = 3
    x = starl[x]
    #-- search for the limits of convection types
    limits = [[burntype[0],x[0]]]
    for i in range(1,len(x)):
        if burntype[i]!=limits[-1][0]:
            limits[-1].append(x[i])
            limits.append([burntype[i],x[i]])
    limits[-1].append(x[-1])
    alphas = [1.0,0.5,0.5,0.5,0.5]
    colors = ['k',(1.00,1.00,0.0),(1.00,0.50,0.0),(1.00,0.00,0.0)]
    for limit in limits:
        if limit[0]==0: continue
        index = int(limit[0])
        logger.info('Burning between %g-%g (%s,color=%s)'%(limit[1],limit[2],
                     ['Low','Mid','High'][index],
                     colors[index]))
        out = pl.axvspan(limit[1],limit[2],color=colors[index],alpha=alphas[index])

#}
if __name__=="__main__":
    data = read_agsm('tempmodel_frq.gsm')
    ascii.write_array(data,'test.dat',header=True,auto_width=True)
コード例 #9
0
def update_grid(gridfile, responses, threads=10):
    """
    Add passbands to an existing grid.
    """
    shutil.copy(gridfile, gridfile + '.backup')
    hdulist = pyfits.open(gridfile, mode='update')
    existing_responses = set(list(hdulist[1].columns.names))
    responses = sorted(list(set(responses) - existing_responses))
    if not len(responses):
        hdulist.close()
        print "No new responses to do"
        return None
    law = hdulist[1].header['REDLAW']
    units = hdulist[1].header['FLUXTYPE']
    teffs = hdulist[1].data.field('teff')
    loggs = hdulist[1].data.field('logg')
    ebvs = hdulist[1].data.field('ebv')
    zs = hdulist[1].data.field('z')
    rvs = hdulist[1].data.field('rv')
    vrads = hdulist[1].data.field('vrad')
    names = hdulist[1].columns.names

    N = len(teffs)
    index = np.arange(N)

    output = np.zeros((len(responses), len(teffs)))
    print N

    #--- PARALLEL PROCESS
    def do_process(teffs, loggs, ebvs, zs, rvs, index, arr):
        output = np.zeros((len(responses) + 1, len(teffs)))
        c0 = time.time()
        N = len(teffs)
        for i, (teff, logg, ebv, z, rv,
                ind) in enumerate(zip(teffs, loggs, ebvs, zs, rvs, index)):
            if i % 100 == 0:
                dt = time.time() - c0
                print "ETA", index[0], (N - i) / 100. * dt / 3600., 'hr'
                c0 = time.time()
            #-- get model SED and absolute luminosity
            model.set_defaults(z=z)
            wave, flux = model.get_table(teff, logg)
            Labs = model.luminosity(wave, flux)
            flux_ = reddening.redden(flux,
                                     wave=wave,
                                     ebv=ebv,
                                     rtype='flux',
                                     law=law,
                                     Rv=rv)
            #-- calculate synthetic fluxes
            output[0, i] = ind
            output[1:, i] = model.synthetic_flux(wave,
                                                 flux_,
                                                 responses,
                                                 units=units)
        arr.append(output)

    #--- PARALLEL PROCESS
    c0 = time.time()

    manager = Manager()
    arr = manager.list([])

    all_processes = []
    for j in range(threads):
        all_processes.append(Process(target=do_process,args=(teffs[j::threads],\
                                                                loggs[j::threads],\
                                                                ebvs[j::threads],\
                                                                zs[j::threads],\
                                                                rvs[j::threads],\
                                                                index[j::threads],arr)))
        all_processes[-1].start()
    for p in all_processes:
        p.join()

    output = np.hstack([res for res in arr])
    del arr
    sa = np.argsort(output[0])
    output = output[:, sa][1:]
    ascii.write_array(np.rec.fromarrays(output, names=responses),
                      'test.temp',
                      header=True)
    #-- copy old columns and append new ones
    cols = []
    for i, photband in enumerate(responses):
        cols.append(pyfits.Column(name=photband, format='E', array=output[i]))
    #-- create new table
    table = pyfits.new_table(pyfits.ColDefs(cols))
    table = pyfits.new_table(hdulist[1].columns + table.columns,
                             header=hdulist[1].header)
    hdulist[1] = table
    hdulist.close()