Esempio n. 1
0
def make_list_star(ID,direc=None):
    """
    Mimics HermesTool MakeListStar.

    This should work as input for HermesTool CCFList.py

    The result is a file in the current working directory with name C{ID.list}.
    If you have specified the C{direc} keyword, the file will be written inside
    that directory. Make sure you have write permission.

    The contents of the file is:

    unseq, date-avg, ID, bjd, bvcor, prog_id, exptime, airmass, pmtotal

    @param ID: name of the star, understandable by SIMBAD.
    @type ID: string
    @param direc: directory to write the file into (defaults to current working
    directory)
    @type direc: string
    """
    if direc is None:
        direc = os.getcwd()
    data = search(ID)
    fname = os.path.join(direc,'%s.list'%(ID))
    ascii.write_array([data['unseq'],data['date-avg'],[ID for i in data['unseq']],
                       data['bjd'],data['bvcor'],data['prog_id'],data['exptime'],
                       data['airmass'],data['pmtotal']],fname,axis0='cols')
def make_list_star(ID,direc=None):
    """
    Mimics HermesTool MakeListStar.

    This should work as input for HermesTool CCFList.py

    The result is a file in the current working directory with name C{ID.list}.
    If you have specified the C{direc} keyword, the file will be written inside
    that directory. Make sure you have write permission.

    The contents of the file is:

    unseq, date-avg, ID, bjd, bvcor, prog_id, exptime, airmass, pmtotal

    @param ID: name of the star, understandable by SIMBAD.
    @type ID: string
    @param direc: directory to write the file into (defaults to current working
    directory)
    @type direc: string
    """
    if direc is None:
        direc = os.getcwd()
    data = search(ID)
    fname = os.path.join(direc,'%s.list'%(ID))
    ascii.write_array([data['unseq'],data['date-avg'],[ID for i in data['unseq']],
                       data['bjd'],data['bvcor'],data['prog_id'],data['exptime'],
                       data['airmass'],data['pmtotal']],fname,axis0='cols')
Esempio n. 3
0
def update_info(zp=None):
    """
    Update information in zeropoint file, e.g. after calibration.
    
    Call first L{ivs.sed.model.calibrate} without arguments, and pass the output
    to this function.
    
    @param zp: updated contents from C{zeropoints.dat}
    @type zp: recarray
    """
    zp_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                           'zeropoints.dat')
    zp_, comms = ascii.read2recarray(zp_file, return_comments=True)
    existing = [str(i.strip()) for i in zp_['photband']]
    resp_files = sorted(
        glob.glob(
            os.path.join(os.path.dirname(os.path.abspath(__file__)),
                         'filters/*')))
    resp_files = [
        os.path.basename(ff) for ff in resp_files
        if not os.path.basename(ff) in existing
    ]
    resp_files.remove('HUMAN.EYE')
    resp_files.remove('HUMAN.CONES')
    resp_files.remove('CONES.EYE')
    if zp is None:
        zp = zp_
        logger.info(
            'No new calibrations; previous information on existing response curves is copied'
        )
    else:
        logger.info(
            'Received new calibrations contents of zeropoints.dat will be updated'
        )

    #-- update info on previously non existing response curves
    new_zp = np.zeros(len(resp_files), dtype=zp.dtype)
    logger.info(
        'Found {} new response curves, adding them with default information'.
        format(len(resp_files)))
    for i, respfile in enumerate(resp_files):
        new_zp[i]['photband'] = respfile
        new_zp[i]['eff_wave'] = float(eff_wave(respfile))
        new_zp[i]['type'] = 'CCD'
        new_zp[i]['vegamag'] = np.nan
        new_zp[i]['ABmag'] = np.nan
        new_zp[i]['STmag'] = np.nan
        new_zp[i]['Flam0_units'] = 'erg/s/cm2/AA'
        new_zp[i]['Fnu0_units'] = 'erg/s/cm2/AA'
        new_zp[i]['source'] = 'nan'
    zp = np.hstack([zp, new_zp])
    sa = np.argsort(zp['photband'])
    ascii.write_array(zp[sa],
                      'zeropoints.dat',
                      header=True,
                      auto_width=True,
                      comments=['#' + line for line in comms[:-2]],
                      use_float='%g')
Esempio n. 4
0
def VALD(elem=None, xmin=3200., xmax=4800., outputdir=None):
    """
  Request linelists from VALD for each ion seperately within a specific wavelength range.

  elem = an array of ions e.g. ['CI','OII'], xmin and xmax: wavelength range in which the spectral lines are searched, outputdir = output directory chosen by the user.

  If no elements are given, this function returns all of them.

  @param elem: list of ions
  @type elem: list of str

  Example usage:

  >>> x = VALD(elem=['CI','OII'],xmin=3000., xmax=4000.)
  CI
  OII
  """
    if elem is None:
        files = sorted(config.glob('VALD_individual', 'VALD_*.lijnen'))
        elem = [
            os.path.splitext(os.path.basename(ff))[0].split('_')[1]
            for ff in files
        ]

    all_lines = []
    for i in range(len(elem)):
        print(elem[i])
        filename = config.get_datafile('VALD_individual',
                                       'VALD_' + elem[i] + '.lijnen')
        if not os.path.isfile(filename):
            logger.info('No data for element ' + str(elem[i]))
            return None

        newwav, newexc, newep, newgf = np.loadtxt(filename).T
        lines = np.rec.fromarrays([newwav, newexc, newep, newgf],
                                  names=['wavelength', 'ion', 'ep', 'gf'])
        keep = (xmin <= lines['wavelength']) & (lines['wavelength'] <= xmax)
        if not hasattr(keep, '__iter__'):
            continue
        lines = lines[keep]
        if len(lines) and outputdir is not None:
            ascii.write_array(lines,
                              outputdir + 'VALD_' + str(elem[i]) + '_' +
                              str(xmin) + '_' + str(xmax) + '.dat',
                              auto_width=True,
                              header=True,
                              formats=['%.3f', '%.1f', '%.3f', '%.3f'])
        elif len(lines):
            all_lines.append(lines)
        else:
            logger.debug('No lines of ' + str(elem[i]) +
                         ' in given wavelength range')
    return np.hstack(all_lines)
Esempio n. 5
0
def search(ID, radius=1., filename=None):
    """
    Retrieve datafiles from the Coralie catalogue.

    We search on coordinates, pulled from SIMBAD. If the star ID is not
    recognised, a string search is performed to match the 'targ name' field in the
    FITS headers.

    Only the s1d_A data are searched.

    @param ID: ID of the star, understandable by SIMBAD
    @type ID: str
    @param radius: search radius around the coordinates
    @type radius: 1
    @param filename: write summary to outputfile if not None
    @type filename: str
    @return: record array with summary information on the observations, as well
    as their location (column 'filename')
    @rtype: numpy rec array
    """
    data = ascii.read2recarray(config.get_datafile(
        os.path.join('catalogs', 'coralie'), 'CoralieFullDataOverview.tsv'),
                               splitchar='\t')
    info = sesame.search(ID)
    if info:
        ra, dec = info['jradeg'], info['jdedeg']
        keep = np.sqrt((data['ra'] - ra)**2 +
                       (data['dec'] - dec)**2) < radius / 60.
    else:
        keep = [((re.compile(ID).search(objectn) is not None) and True
                 or False) for objectn in data['object']]
        keep = np.array(keep)

    data = data[keep]

    logger.info('Found %d spectra' % (len(data)))

    if filename is not None:
        ascii.write_array(data, filename, auto_width=True, header=True)
    else:
        return data
def update_info(zp=None):
    """
    Update information in zeropoint file, e.g. after calibration.
    
    Call first L{ivs.sed.model.calibrate} without arguments, and pass the output
    to this function.
    
    @param zp: updated contents from C{zeropoints.dat}
    @type zp: recarray
    """
    zp_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'zeropoints.dat')
    zp_,comms = ascii.read2recarray(zp_file,return_comments=True)
    existing = [str(i.strip()) for i in zp_['photband']]
    resp_files = sorted(glob.glob(os.path.join(os.path.dirname(os.path.abspath(__file__)),'filters/*')))
    resp_files = [os.path.basename(ff) for ff in resp_files if not os.path.basename(ff) in existing]
    resp_files.remove('HUMAN.EYE')
    resp_files.remove('HUMAN.CONES')
    resp_files.remove('CONES.EYE')
    if zp is None:
        zp = zp_
        logger.info('No new calibrations; previous information on existing response curves is copied')
    else:
        logger.info('Received new calibrations contents of zeropoints.dat will be updated')
    
    #-- update info on previously non existing response curves
    new_zp = np.zeros(len(resp_files),dtype=zp.dtype)
    logger.info('Found {} new response curves, adding them with default information'.format(len(resp_files)))
    for i,respfile in enumerate(resp_files):
        new_zp[i]['photband'] = respfile
        new_zp[i]['eff_wave'] = float(eff_wave(respfile))
        new_zp[i]['type'] = 'CCD'
        new_zp[i]['vegamag'] = np.nan
        new_zp[i]['ABmag'] = np.nan
        new_zp[i]['STmag'] = np.nan
        new_zp[i]['Flam0_units'] = 'erg/s/cm2/AA'
        new_zp[i]['Fnu0_units'] = 'erg/s/cm2/AA'
        new_zp[i]['source'] = 'nan'
    zp = np.hstack([zp,new_zp])
    sa = np.argsort(zp['photband'])
    ascii.write_array(zp[sa],'zeropoints.dat',header=True,auto_width=True,comments=['#'+line for line in comms[:-2]],use_float='%g')
def search(ID,radius=1.,filename=None):
    """
    Retrieve datafiles from the Coralie catalogue.

    We search on coordinates, pulled from SIMBAD. If the star ID is not
    recognised, a string search is performed to match the 'targ name' field in the
    FITS headers.

    Only the s1d_A data are searched.

    @param ID: ID of the star, understandable by SIMBAD
    @type ID: str
    @param radius: search radius around the coordinates
    @type radius: 1
    @param filename: write summary to outputfile if not None
    @type filename: str
    @return: record array with summary information on the observations, as well
    as their location (column 'filename')
    @rtype: numpy rec array
    """
    data = ascii.read2recarray(config.get_datafile(os.path.join('catalogs','coralie'),'CoralieFullDataOverview.tsv'),splitchar='\t')
    info = sesame.search(ID)
    if info:
        ra,dec = info['jradeg'],info['jdedeg']
        keep = np.sqrt((data['ra']-ra)**2 + (data['dec']-dec)**2) < radius/60.
    else:
        keep = [((re.compile(ID).search(objectn) is not None) and True or False) for objectn in data['object']]
        keep = np.array(keep)

    data = data[keep]

    logger.info('Found %d spectra'%(len(data)))

    if filename is not None:
        ascii.write_array(data,filename,auto_width=True,header=True)
    else:
        return data
def VALD(elem=None,xmin=3200.,xmax=4800.,outputdir=None):
  """
  Request linelists from VALD for each ion seperately within a specific wavelength range.
  
  elem = an array of ions e.g. ['CI','OII'], xmin and xmax: wavelength range in which the spectral lines are searched, outputdir = output directory chosen by the user.
  
  If no elements are given, this function returns all of them.
  
  @param elem: list of ions
  @type elem: list of str
  """
  if elem is None:
    files = sorted(config.glob('VALD_individual','VALD_*.lijnen'))
    elem = [os.path.splitext(os.path.basename(ff))[0].split('_')[1] for ff in files]
  
  all_lines = []
  for i in range(len(elem)):
    print elem[i]
    filename = config.get_datafile('VALD_individual','VALD_' + elem[i] + '.lijnen')
    if not os.path.isfile(filename):
      logger.info('No data for element ' + str(elem[i]))
      return None
   
    newwav,newexc,newep,newgf = np.loadtxt(filename).T
    lines = np.rec.fromarrays([newwav,newexc,newep,newgf],names=['wavelength','ion','ep','gf'])
    keep = (xmin<=lines['wavelength']) & (lines['wavelength']<=xmax)
    if not hasattr(keep,'__iter__'):
      continue
    lines = lines[keep]
    if len(lines) and outputdir is not None:
      ascii.write_array(lines,outputdir + 'VALD_' + str(elem[i]) + '_' + str(xmin) + '_' + str(xmax) + '.dat',auto_width=True,header=True,formats=['%.3f','%.1f','%.3f','%.3f'])
    elif len(lines):
      all_lines.append(lines)
    else:
      logger.debug('No lines of ' + str(elem[i]) + ' in given wavelength range')
  return np.hstack(all_lines)
def update_grid(gridfile, responses, threads=10):
    """
    Add passbands to an existing grid.
    """
    shutil.copy(gridfile, gridfile + '.backup')
    hdulist = pf.open(gridfile, mode='update')
    existing_responses = set(list(hdulist[1].columns.names))
    responses = sorted(list(set(responses) - existing_responses))
    if not len(responses):
        hdulist.close()
        print("No new responses to do")
        return None
    law = hdulist[1].header['REDLAW']
    units = hdulist[1].header['FLUXTYPE']
    teffs = hdulist[1].data.field('teff')
    loggs = hdulist[1].data.field('logg')
    ebvs = hdulist[1].data.field('ebv')
    zs = hdulist[1].data.field('z')
    rvs = hdulist[1].data.field('rv')
    vrads = hdulist[1].data.field('vrad')
    names = hdulist[1].columns.names

    N = len(teffs)
    index = np.arange(N)

    output = np.zeros((len(responses), len(teffs)))
    print(N)

    #--- PARALLEL PROCESS
    def do_process(teffs, loggs, ebvs, zs, rvs, index, arr):
        output = np.zeros((len(responses) + 1, len(teffs)))
        c0 = time.time()
        N = len(teffs)
        for i, (teff, logg, ebv, z, rv,
                ind) in enumerate(zip(teffs, loggs, ebvs, zs, rvs, index)):
            if i % 100 == 0:
                dt = time.time() - c0
                print("ETA", index[0], (N - i) / 100. * dt / 3600., 'hr')
                c0 = time.time()
            #-- get model SED and absolute luminosity
            model.set_defaults(z=z)
            wave, flux = model.get_table(teff, logg)
            Labs = model.luminosity(wave, flux)
            flux_ = reddening.redden(flux,
                                     wave=wave,
                                     ebv=ebv,
                                     rtype='flux',
                                     law=law,
                                     Rv=rv)
            #-- calculate synthetic fluxes
            output[0, i] = ind
            output[1:, i] = model.synthetic_flux(wave,
                                                 flux_,
                                                 responses,
                                                 units=units)
        arr.append(output)

    #--- PARALLEL PROCESS
    c0 = time.time()

    manager = Manager()
    arr = manager.list([])

    all_processes = []
    for j in range(threads):
        all_processes.append(Process(target=do_process,args=(teffs[j::threads],\
                                                                loggs[j::threads],\
                                                                ebvs[j::threads],\
                                                                zs[j::threads],\
                                                                rvs[j::threads],\
                                                                index[j::threads],arr)))
        all_processes[-1].start()
    for p in all_processes:
        p.join()

    output = np.hstack([res for res in arr])
    del arr
    sa = np.argsort(output[0])
    output = output[:, sa][1:]
    ascii.write_array(np.rec.fromarrays(output, names=responses),
                      'test.temp',
                      header=True)
    #-- copy old columns and append new ones
    cols = []
    for i, photband in enumerate(responses):
        cols.append(pf.Column(name=photband, format='E', array=output[i]))
    #-- create new table
    table = pf.new_table(pf.ColDefs(cols))
    table = pf.new_table(hdulist[1].columns + table.columns,
                         header=hdulist[1].header)
    hdulist[1] = table
    hdulist.close()
Esempio n. 10
0
def search(ID=None,time_range=None,prog_ID=None,data_type='cosmicsremoved_log',
           radius=1.,filename=None):
    """
    Retrieve datafiles from the Hermes catalogue.

    B{If C{ID} is given}: A string search is performed to match the 'object'
    field in the FITS headers. The coordinates are pulled from SIMBAD. If the
    star ID is recognised by SIMBAD, an additional search is done based only on
    the coordinates. The union of both searches is the final result.

    B{If C{time_range} is given}: The search is confined within the defined
    range. If you only give one day, the search is confined to the observations
    made during the night starting at that day. If C{ID} is not given, all
    observations will be returned of the given datatype.

    B{If C{prog_ID} is given}: The search is performed to match the number of
    the program. Individual stars are not queried in SIMBAD, so any information
    that is missing in the header will not be corrected.

    If you don't give either ID or time_range, the info on all data will be
    returned. This is a huge amount of data, so it can take a while before it
    is returned. Remember that the header of each spectrum is read in and checked.

    Data type can be any of:
        1. cosmicsremoved_log: return log merged without cosmics
        2. cosmicsremoved_wavelength: return wavelength merged without cosmics
        3. ext_log: return log merged with cosmics
        4. ext_wavelength: return wavelength merged with cosmics
        5. raw: raw files (also TECH..., i.e. any file in the raw directory)

    This functions needs a C{HermesFullDataOverview.tsv} file located in one
    of the datadirectories from C{config.py}, and subdirectory C{catalogs/hermes}.

    If this file does not exist, you can create it with L{make_data_overview}.

    If you want a summary file with the data you search for, you can give
    C{filename} as an extra keyword argument. The results will be saved to that
    file.

    The columns in the returned record array are listed in L{make_data_overview},
    but are repeated here (capital letters are directly retrieved from the
    fits header, small letters are calculated values. The real header strings
    are all small capitals):

        1.  UNSEQ
        2.  PROG_ID
        3.  OBSMODE
        4.  BVCOR
        5.  OBSERVER
        6.  OBJECT
        7.  RA
        8.  DEC
        9.  BJD
        10. EXPTIME
        11. PMTOTAL
        12. DATE-AVG
        13. OBJECT
        14. airmass
        15. filename

    The column C{filename} contains a string with the absolute location of the
    file. If you need any extra information from the header, you can easily
    retrieve it.

    If BVCOR or BJD are not available from the FITS header, this function will
    attempt to calculate it. It will not succeed if the object's name is not
    recognised by SIMBAD.

    Example usage: retrieve all data on HD50230

    >>> mydata = search('HD50230')

    Keep only those with a long enough exposure time:

    >>> myselection = mydata[mydata['exptime']>500]

    Look up the 'telalt' value in the FITS headers of all these files via a fast
    list comprehension:

    >>> telalts = [pf.getheader(fname)['telalt'] for fname in myselection['filename']]

    Search for all data of HD50230 taken in the night of 22 September 2009:

    >>> data = search('HD50230',time_range='2009-9-22')

    Or within an interval of a few days:

    >>> data = search('HD50230',time_range=('2009-9-22','2009-9-30'))

    Search for all data observed in a given night:

    >>> data = search(time_range='2009-9-22')

    B{Warning:} the heliocentric correction is not calculated when no ID is given,
    so make sure it is present in the header if you need it, or calculate it yourself.

    @param ID: ID of the star, understandable by SIMBAD
    @type ID: str
    @param time_range: range of dates to confine the search to
    @type time_range: tuple strings of type '2009-09-23T04:24:35.712556' or '2009-09-23'
    @param data_type: if None, all data will be returned. Otherwise, subset
    'cosmicsremoved', 'merged' or 'raw'
    @type data_type: str
    @param radius: search radius around the coordinates (arcminutes)
    @type radius: float
    @param filename: write summary to outputfile if not None
    @type filename: str
    @return: record array with summary information on the observations, as well
    as their location (column 'filename')
    @rtype: numpy rec array
    """
    #-- read in the data from the overview file, and get SIMBAD information
    #   of the star
    ctlFile = '/STER/mercator/hermes/HermesFullDataOverview.tsv'
    data = ascii.read2recarray(ctlFile, splitchar='\t')
    #data = ascii.read2recarray(config.get_datafile(os.path.join('catalogs','hermes'),'HermesFullDataOverview.tsv'),splitchar='\t')
    keep = np.array(np.ones(len(data)),bool)
    #-- confined search within given time range
    if time_range is not None:
        if isinstance(time_range,str):
            time_range = _timestamp2datetime(time_range)
            time_range = (time_range,time_range+datetime.timedelta(days=1))
        else:
            time_range = (_timestamp2datetime(time_range[0]),_timestamp2datetime(time_range[1]))
        keep = keep & np.array([(time_range[0]<=_timestamp2datetime(i)<=time_range[1]) for i in data['date-avg']],bool)
        info = None


    #-- search on ID
    if ID is not None:
        info = sesame.search(ID)

        #-- first search on object name only
        ID = ID.replace(' ','').replace('.','').replace('+','').replace('-','').replace('*','')
        match_names = np.array([objectn.replace(' ','').replace('.','').replace('+','').replace('-','').replace('*','') for objectn in data['object']],str)
        keep_id = [((((ID in objectn) or (objectn in ID)) and len(objectn)) and True or False) for objectn in match_names]
        keep_id = np.array(keep_id)
        #   if we found the star on SIMBAD, we use its RA and DEC to match the star
        if info:
            ra,dec = info['jradeg'],info['jdedeg']
            keep_id = keep_id | (np.sqrt((data['ra']-ra)**2 + (data['dec']-dec)**2) < radius/60.)
        keep = keep & keep_id

    if prog_ID is not None:
        keep = keep & (data['prog_id']==prog_ID)

    #-- if some data is found, we check if the C{data_type} string is contained
    #   with the file's name. If not, we remove it.
    if np.any(keep):
        data = data[keep]

        if data_type is not None:
            data_type == data_type.lower()
            #-- now derive the location of the 'data_type' types from the raw
            #   files
            if not data_type=='raw':
                data['filename'] = [_derive_filelocation_from_raw(ff,data_type) for ff in data['filename']]
                existing_files = np.array([ff!='naf' for ff in data['filename']],bool)
                data = data[existing_files]
            seqs = sorted(set(data['unseq']))
        logger.info('ID={}/prog_ID={}: Found {:d} spectra (data type={} with unique unseqs)'.format(ID,prog_ID,len(seqs),data_type))
    else:
        data = data[:0]
        logger.info('%s: Found no spectra'%(ID))

    #-- we now check if the barycentric correction was calculated properly.
    #   If not, we calculate it here, but only if the object was found in
    #   SIMBAD. Else, we have no information on the ra and dec (if bvcorr was
    #   not calculated, ra and dec are not in the header).
    for obs in data:
        if ID is not None and info:
            try:
                jd  = _timestamp2jd(obs['date-avg'])
            except ValueError:
                logger.info('Header probably corrupted for unseq {}: no info on time or barycentric correction'.format(obs['unseq']))
                jd = np.nan
            # the previous line is equivalent to:
            # day = dateutil.parser.parse(header['DATE-AVG'])
            # BJD = ephem.julian_date(day)
            bvcorr, hjd = helcorr(ra/360.*24, dec, jd)
        else:
            break
        if np.isnan(obs['bvcor']):
            logger.info("Corrected 'bvcor' for unseq {} (missing in header)".format(obs['unseq']))
            obs['bvcor'] = float(bvcorr)
        if np.isnan(obs['bjd']):
            logger.info("Corrected 'bjd' for unseq {} (missing in header)".format(obs['unseq']))
            obs['bjd'] = float(hjd)


    #-- do we need the information as a file, or as a numpy array?
    if filename is not None:
        ascii.write_array(data,filename,auto_width=True,header=True)
    else:
        return data
def update_grid(gridfile,responses,threads=10):
    """
    Add passbands to an existing grid.
    """
    shutil.copy(gridfile,gridfile+'.backup')
    hdulist = pf.open(gridfile,mode='update')
    existing_responses = set(list(hdulist[1].columns.names))
    responses = sorted(list(set(responses) - existing_responses))
    if not len(responses):
        hdulist.close()
        print "No new responses to do"
        return None
    law = hdulist[1].header['REDLAW']
    units = hdulist[1].header['FLUXTYPE']
    teffs = hdulist[1].data.field('teff')
    loggs = hdulist[1].data.field('logg')
    ebvs = hdulist[1].data.field('ebv')
    zs = hdulist[1].data.field('z')
    rvs = hdulist[1].data.field('rv')
    vrads = hdulist[1].data.field('vrad')
    names = hdulist[1].columns.names
    
    N = len(teffs)
    index = np.arange(N)
    
    output = np.zeros((len(responses),len(teffs)))
    print N
    
    #--- PARALLEL PROCESS
    def do_process(teffs,loggs,ebvs,zs,rvs,index,arr):
        output = np.zeros((len(responses)+1,len(teffs)))
        c0 = time.time()
        N = len(teffs)
        for i,(teff,logg,ebv,z,rv,ind) in enumerate(zip(teffs,loggs,ebvs,zs,rvs,index)):
            if i%100==0:
                dt = time.time()-c0
                print "ETA",index[0],(N-i)/100.*dt/3600.,'hr'
                c0 = time.time()
            #-- get model SED and absolute luminosity
            model.set_defaults(z=z)
            wave,flux = model.get_table(teff,logg)
            Labs = model.luminosity(wave,flux)
            flux_ = reddening.redden(flux,wave=wave,ebv=ebv,rtype='flux',law=law,Rv=rv)
            #-- calculate synthetic fluxes
            output[0,i] = ind
            output[1:,i] = model.synthetic_flux(wave,flux_,responses,units=units)
        arr.append(output)
    #--- PARALLEL PROCESS
    c0 = time.time()
    
    manager = Manager()
    arr = manager.list([])
    
    all_processes = []
    for j in range(threads):
        all_processes.append(Process(target=do_process,args=(teffs[j::threads],\
                                                                loggs[j::threads],\
                                                                ebvs[j::threads],\
                                                                zs[j::threads],\
                                                                rvs[j::threads],\
                                                                index[j::threads],arr)))
        all_processes[-1].start()
    for p in all_processes:
        p.join()
    
    output = np.hstack([res for res in arr])
    del arr
    sa = np.argsort(output[0])
    output = output[:,sa][1:]
    ascii.write_array(np.rec.fromarrays(output,names=responses),'test.temp',header=True)
    #-- copy old columns and append new ones
    cols = []
    for i,photband in enumerate(responses):
        cols.append(pf.Column(name=photband,format='E',array=output[i]))
    #-- create new table
    table = pf.new_table(pf.ColDefs(cols))
    table = pf.new_table(hdulist[1].columns + table.columns,header=hdulist[1].header)
    hdulist[1] = table
    hdulist.close()
Esempio n. 12
0
def search(ID=None,time_range=None,prog_ID=None,data_type='cosmicsremoved_log',
           radius=1.,filename=None):
    """
    Retrieve datafiles from the Hermes catalogue.

    B{If C{ID} is given}: A string search is performed to match the 'object'
    field in the FITS headers. The coordinates are pulled from SIMBAD. If the
    star ID is recognised by SIMBAD, an additional search is done based only on
    the coordinates. The union of both searches is the final result.

    B{If C{time_range} is given}: The search is confined within the defined
    range. If you only give one day, the search is confined to the observations
    made during the night starting at that day. If C{ID} is not given, all
    observations will be returned of the given datatype.

    B{If C{prog_ID} is given}: The search is performed to match the number of
    the program. Individual stars are not queried in SIMBAD, so any information
    that is missing in the header will not be corrected.

    If you don't give either ID or time_range, the info on all data will be
    returned. This is a huge amount of data, so it can take a while before it
    is returned. Remember that the header of each spectrum is read in and checked.

    Data type can be any of:
        1. cosmicsremoved_log: return log merged without cosmics
        2. cosmicsremoved_wavelength: return wavelength merged without cosmics
        3. ext_log: return log merged with cosmics
        4. ext_wavelength: return wavelength merged with cosmics
        5. raw: raw files (also TECH..., i.e. any file in the raw directory)

    This functions needs a C{HermesFullDataOverview.tsv} file located in one
    of the datadirectories from C{config.py}, and subdirectory C{catalogs/hermes}.

    If this file does not exist, you can create it with L{make_data_overview}.

    If you want a summary file with the data you search for, you can give
    C{filename} as an extra keyword argument. The results will be saved to that
    file.

    The columns in the returned record array are listed in L{make_data_overview},
    but are repeated here (capital letters are directly retrieved from the
    fits header, small letters are calculated values. The real header strings
    are all small capitals):

        1.  UNSEQ
        2.  PROG_ID
        3.  OBSMODE
        4.  BVCOR
        5.  OBSERVER
        6.  OBJECT
        7.  RA
        8.  DEC
        9.  BJD
        10. EXPTIME
        11. PMTOTAL
        12. DATE-AVG
        13. OBJECT
        14. airmass
        15. filename

    The column C{filename} contains a string with the absolute location of the
    file. If you need any extra information from the header, you can easily
    retrieve it.

    If BVCOR or BJD are not available from the FITS header, this function will
    attempt to calculate it. It will not succeed if the object's name is not
    recognised by SIMBAD.

    Example usage: retrieve all data on HD50230

    >>> mydata = search('HD50230')

    Keep only those with a long enough exposure time:

    >>> myselection = mydata[mydata['exptime']>500]

    Look up the 'telalt' value in the FITS headers of all these files via a fast
    list comprehension:

    >>> telalts = [pf.getheader(fname)['telalt'] for fname in myselection['filename']]

    Search for all data of HD50230 taken in the night of 22 September 2009:

    >>> data = hermes.search('HD50230',time_range='2009-9-22')

    Or within an interval of a few days:

    >>> data = hermes.search('HD50230',time_range=('2009-9-23','2009-9-30'))

    Search for all data observed in a given night:

    >>> data = hermes.search(time_range='2009-9-22')

    B{Warning:} the heliocentric correction is not calculated when no ID is given,
    so make sure it is present in the header if you need it, or calculate it yourself.

    @param ID: ID of the star, understandable by SIMBAD
    @type ID: str
    @param time_range: range of dates to confine the search to
    @type time_range: tuple strings of type '2009-09-23T04:24:35.712556' or '2009-09-23'
    @param data_type: if None, all data will be returned. Otherwise, subset
    'cosmicsremoved', 'merged' or 'raw'
    @type data_type: str
    @param radius: search radius around the coordinates (arcminutes)
    @type radius: float
    @param filename: write summary to outputfile if not None
    @type filename: str
    @return: record array with summary information on the observations, as well
    as their location (column 'filename')
    @rtype: numpy rec array
    """
    #-- read in the data from the overview file, and get SIMBAD information
    #   of the star
    ctlFile = '/STER/mercator/hermes/HermesFullDataOverview.tsv'
    data = ascii.read2recarray(ctlFile, splitchar='\t')
    #data = ascii.read2recarray(config.get_datafile(os.path.join('catalogs','hermes'),'HermesFullDataOverview.tsv'),splitchar='\t')
    keep = np.array(np.ones(len(data)),bool)
    #-- confined search within given time range
    if time_range is not None:
        if isinstance(time_range,str):
            time_range = _timestamp2datetime(time_range)
            time_range = (time_range,time_range+datetime.timedelta(days=1))
        else:
            time_range = (_timestamp2datetime(time_range[0]),_timestamp2datetime(time_range[1]))
        keep = keep & np.array([(time_range[0]<=_timestamp2datetime(i)<=time_range[1]) for i in data['date-avg']],bool)
        info = None


    #-- search on ID
    if ID is not None:
        info = sesame.search(ID)

        #-- first search on object name only
        ID = ID.replace(' ','').replace('.','').replace('+','').replace('-','').replace('*','')
        match_names = np.array([objectn.replace(' ','').replace('.','').replace('+','').replace('-','').replace('*','') for objectn in data['object']],str)
        keep_id = [((((ID in objectn) or (objectn in ID)) and len(objectn)) and True or False) for objectn in match_names]
        keep_id = np.array(keep_id)
        #   if we found the star on SIMBAD, we use its RA and DEC to match the star
        if info:
            ra,dec = info['jradeg'],info['jdedeg']
            keep_id = keep_id | (np.sqrt((data['ra']-ra)**2 + (data['dec']-dec)**2) < radius/60.)
        keep = keep & keep_id

    if prog_ID is not None:
        keep = keep & (data['prog_id']==prog_ID)

    #-- if some data is found, we check if the C{data_type} string is contained
    #   with the file's name. If not, we remove it.
    if np.any(keep):
        data = data[keep]

        if data_type is not None:
            data_type == data_type.lower()
            #-- now derive the location of the 'data_type' types from the raw
            #   files
            if not data_type=='raw':
                data['filename'] = [_derive_filelocation_from_raw(ff,data_type) for ff in data['filename']]
                existing_files = np.array([ff!='naf' for ff in data['filename']],bool)
                data = data[existing_files]
            seqs = sorted(set(data['unseq']))
        logger.info('ID={}/prog_ID={}: Found {:d} spectra (data type={} with unique unseqs)'.format(ID,prog_ID,len(seqs),data_type))
    else:
        data = data[:0]
        logger.info('%s: Found no spectra'%(ID))

    #-- we now check if the barycentric correction was calculated properly.
    #   If not, we calculate it here, but only if the object was found in
    #   SIMBAD. Else, we have no information on the ra and dec (if bvcorr was
    #   not calculated, ra and dec are not in the header).
    for obs in data:
        if ID is not None and info:
            try:
                jd  = _timestamp2jd(obs['date-avg'])
            except ValueError:
                logger.info('Header probably corrupted for unseq {}: no info on time or barycentric correction'.format(obs['unseq']))
                jd = np.nan
            # the previous line is equivalent to:
            # day = dateutil.parser.parse(header['DATE-AVG'])
            # BJD = ephem.julian_date(day)
            bvcorr, hjd = helcorr(ra/360.*24, dec, jd)
        else:
            break
        if np.isnan(obs['bvcor']):
            logger.info("Corrected 'bvcor' for unseq {} (missing in header)".format(obs['unseq']))
            obs['bvcor'] = float(bvcorr)
        if np.isnan(obs['bjd']):
            logger.info("Corrected 'bjd' for unseq {} (missing in header)".format(obs['unseq']))
            obs['bjd'] = float(hjd)


    #-- do we need the information as a file, or as a numpy array?
    if filename is not None:
        ascii.write_array(data,filename,auto_width=True,header=True)
    else:
        return data