def do_process(teffs, loggs, ebvs, zs, rvs, index, arr):
     output = np.zeros((len(responses) + 1, len(teffs)))
     c0 = time.time()
     N = len(teffs)
     for i, (teff, logg, ebv, z, rv,
             ind) in enumerate(zip(teffs, loggs, ebvs, zs, rvs, index)):
         if i % 100 == 0:
             dt = time.time() - c0
             print("ETA", index[0], (N - i) / 100. * dt / 3600., 'hr')
             c0 = time.time()
         #-- get model SED and absolute luminosity
         model.set_defaults(z=z)
         wave, flux = model.get_table(teff, logg)
         Labs = model.luminosity(wave, flux)
         flux_ = reddening.redden(flux,
                                  wave=wave,
                                  ebv=ebv,
                                  rtype='flux',
                                  law=law,
                                  Rv=rv)
         #-- calculate synthetic fluxes
         output[0, i] = ind
         output[1:, i] = model.synthetic_flux(wave,
                                              flux_,
                                              responses,
                                              units=units)
     arr.append(output)
 def setUpClass(ModelTestCase):
     """ Setup the tmap grid as it is smaller and thus faster as kurucz"""
     model.set_defaults(grid='kurucztest')
     grid1 = dict(grid='tmaptest')
     grid2 = dict(grid='tmaptest')
     model.set_defaults_multiple(grid1,grid2)
     model.copy2scratch(z='*', Rv='*')      
 def testiGrid_searchSingleHot(self):
     """ INTEGRATION igrid_search single star (tmap) """
     sed = builder.SED(ID='TEST', load_fits=False)
     np.random.seed(111)
     meas = self.measHot + np.random.uniform(0, 0.04, size=len(self.measHot)) * self.measHot
     emeas = meas / 100.0
     units = ['erg/s/cm2/AA' for i in meas]
     source = ['SYNTH' for i in meas]
     sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
     
     model.set_defaults(grid='tmaptest')
     model.copy2scratch(z='*', Rv='*')
     
     np.random.seed(111)
     sed.igrid_search(points=100000,teffrange=(25000, 35000),loggrange=(5.0, 6.0), 
                      ebvrange=(0.005, 0.015),zrange=(0,0),rvrange=(3.1,3.1),
                      vradrange=(0,0),df=None,CI_limit=0.95,set_model=True)
     
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff'], 30200, delta=250)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg'], 5.67, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv'], 0.0078, delta=0.02)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_l'], 29337, delta=250)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_l'], 5.0, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_l'], 0.005, delta=0.02)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_u'], 31623, delta=250)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_u'], 6.0, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_u'], 0.015, delta=0.02)
     
     # check that the best model is stored
     self.assertTrue('model' in sed.results['igrid_search'])
     self.assertTrue('synflux' in sed.results['igrid_search'])
     self.assertTrue('chi2' in sed.results['igrid_search'])
     self.assertEqual(len(sed.results['igrid_search']['model']), 3, msg='stored model has wrong number of collumns (should be 3)')
     self.assertEqual(len(sed.results['igrid_search']['synflux']), 3, msg='stored synflux has wrong number of collumns (should be 3)')
 def do_process(teffs,loggs,ebvs,zs,rvs,index,arr):
     output = np.zeros((len(responses)+1,len(teffs)))
     c0 = time.time()
     N = len(teffs)
     for i,(teff,logg,ebv,z,rv,ind) in enumerate(zip(teffs,loggs,ebvs,zs,rvs,index)):
         if i%100==0:
             dt = time.time()-c0
             print "ETA",index[0],(N-i)/100.*dt/3600.,'hr'
             c0 = time.time()
         #-- get model SED and absolute luminosity
         model.set_defaults(z=z)
         wave,flux = model.get_table(teff,logg)
         Labs = model.luminosity(wave,flux)
         flux_ = reddening.redden(flux,wave=wave,ebv=ebv,rtype='flux',law=law,Rv=rv)
         #-- calculate synthetic fluxes
         output[0,i] = ind
         output[1:,i] = model.synthetic_flux(wave,flux_,responses,units=units)
     arr.append(output)
 def testiMinimizeSingleCold(self):
     """ INTEGRATION iminimize single star (kurucz) """
     sed = builder.SED(ID='TEST', load_fits=False)
     np.random.seed(111)
     meas = self.measCold + np.random.uniform(0, 0.04, size=len(self.measCold)) * self.measCold
     emeas = meas / 100.0
     units = ['erg/s/cm2/AA' for i in meas]
     source = ['SYNTH' for i in meas]
     sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
     
     model.set_defaults(grid='kurucztest')
     model.copy2scratch(z='*', Rv='*')
     
     np.random.seed(111)        
     sed.iminimize(teff=6000, logg=4.0, ebv=0.007, z=-0.3, rv=2.4, vrad=0,
                   teffrange=(5000, 7000),loggrange=(3.5, 4.5),zrange=(-0.5,0.0),
                   ebvrange=(0.005, 0.015), rvrange=(2.1,3.1),vradrange=(0,0),
                   points=None,df=None,CI_limit=0.60,calc_ci=True, set_model=True)
     
     self.assertAlmostEqual(sed.results['iminimize']['CI']['teff'], 6036, delta=50)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['logg'], 4.19, delta=0.1)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['ebv'], 0.015, delta=0.02)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['z'], -0.21, delta=0.1)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['rv'], 2.1, delta=0.3)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['scale'], 1, delta=0.5)
     
     self.assertAlmostEqual(sed.results['iminimize']['CI']['teff_l'], 6025, delta=50)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['teff_u'], 6036, delta=50)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['scale_l'], 1, delta=0.5)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['scale_u'], 1, delta=0.5)
     
     self.assertEqual(sed.results['iminimize']['grid']['teffstart'][0], 6000)
     self.assertEqual(sed.results['iminimize']['grid']['loggstart'][0], 4.0)
     self.assertEqual(sed.results['iminimize']['grid']['ebvstart'][0], 0.007)
     self.assertEqual(sed.results['iminimize']['grid']['zstart'][0], -0.3)
     self.assertEqual(sed.results['iminimize']['grid']['rvstart'][0], 2.4)
     self.assertAlmostEqual(sed.results['iminimize']['grid']['chisq'][0], 3.9, delta=1)
     
     self.assertTrue('model' in sed.results['iminimize'])
     self.assertTrue('synflux' in sed.results['iminimize'])
     self.assertTrue('chi2' in sed.results['iminimize'])
     self.assertEqual(len(sed.results['iminimize']['model']), 3, msg='stored model has wrong number of collumns (should be 3)')
     self.assertEqual(len(sed.results['iminimize']['synflux']), 3, msg='stored synflux has wrong number of collumns (should be 3)')
 def setUpClass(cls):
     if not noMock:
         sesame.search = mock.Mock(return_value={'plx':(0.0,0.0)})
     if not noIntegration:
         # ==== COLD model ====
         model.set_defaults(grid='kurucztest')
         model.copy2scratch(z='*', Rv='*')
         measCold = model.get_itable_pix(photbands=cls.photbands, teff=array([6000]), \
                             logg=array([4.0]),ebv=array([0.01]), rv=array([2.8]), z=array([-0.25]))[0][:,0]
         
         np.random.seed(111)
         cls.measCold = measCold
         
         # ==== HOT model ====
         model.set_defaults(grid='tmaptest')
         model.copy2scratch(z='*', Rv='*')
         measHot = model.get_itable_pix(photbands=cls.photbands, teff=array([30000]), \
                             logg=array([5.5]),ebv=array([0.01]), rv=3.1, z=0.0)[0][:,0]
         
         np.random.seed(111)
         cls.measHot = measHot
         
         # ==== BINARY model ====
         grid1 = dict(grid='kurucztest')
         grid2 = dict(grid='tmaptest')
         model.set_defaults_multiple(grid1,grid2)
         model.clean_scratch(z='*', Rv='*')
         model.copy2scratch(z='*', Rv='*')
         
         G, Msol, Rsol = constants.GG_cgs, constants.Msol_cgs, constants.Rsol_cgs
         masses = [0.85, 0.50]
         rad = array([np.sqrt(G*masses[0]*Msol/10**4.0)/Rsol])
         rad2 = array([np.sqrt(G*masses[1]*Msol/10**5.5)/Rsol])
         
         measBin = model.get_itable_pix(photbands=cls.photbands, teff=array([6000]), \
                             logg=array([4.0]),ebv=array([0.01]), teff2=array([30000]),
                             logg2=array([5.5]), ebv2=array([0.01]), rad=rad,
                             rad2=rad2)[0][:,0]
                             
         np.random.seed(111)
         cls.measBin = measBin
         cls.masses = masses
 def testiGrid_searchSingleCold(self):
     """ INTEGRATION igrid_search single star (kurucz)"""
     sed = builder.SED(ID='TEST', load_fits=False)
     np.random.seed(111)
     meas = self.measCold + np.random.uniform(0, 0.01, size=len(self.measCold)) * self.measCold
     emeas = meas / 100.0
     units = ['erg/s/cm2/AA' for i in meas]
     source = ['SYNTH' for i in meas]
     sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
     
     model.set_defaults(grid='kurucztest')
     model.copy2scratch(z='*', Rv='*')
     
     np.random.seed(111)
     sed.igrid_search(points=100000,teffrange=(5000, 7000),loggrange=(3.5, 4.5), 
                      ebvrange=(0.005, 0.015),zrange=(-0.5,0.0),rvrange=(2.1,3.1),
                      vradrange=(0,0),df=None,CI_limit=0.95,set_model=True)
     
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff'], 6000, delta=50)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg'], 3.98, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv'], 0.011, delta=0.02)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['rv'], 2.13, delta=0.5)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['z'], -0.28, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_l'], 5949, delta=50)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_l'], 3.62, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_l'], 0.005, delta=0.02)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['rv_l'], 2.1, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['z_l'], -0.46, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_u'], 6060, delta=50)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_u'], 4.5, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_u'], 0.015, delta=0.02)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['rv_u'], 3.1, delta=0.1)
     self.assertAlmostEqual(sed.results['igrid_search']['CI']['z_u'], -0.05, delta=0.1)
     
     # check that the best model is stored
     self.assertTrue('model' in sed.results['igrid_search'])
     self.assertTrue('synflux' in sed.results['igrid_search'])
     self.assertTrue('chi2' in sed.results['igrid_search'])
     self.assertEqual(len(sed.results['igrid_search']['model']), 3, msg='stored model has wrong number of collumns (should be 3)')
     self.assertEqual(len(sed.results['igrid_search']['synflux']), 3, msg='stored synflux has wrong number of collumns (should be 3)')
 def testiMinimizeSingleHot(self):
     """ INTEGRATION iminimize single star (tmap) """
     sed = builder.SED(ID='TEST', load_fits=False)
     np.random.seed(111)
     meas = self.measHot + np.random.uniform(0, 0.04, size=len(self.measHot)) * self.measHot
     emeas = meas / 100.0
     units = ['erg/s/cm2/AA' for i in meas]
     source = ['SYNTH' for i in meas]
     sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
     
     model.set_defaults(grid='tmaptest')
     model.copy2scratch(z='*', Rv='*')
     
     np.random.seed(111)
     sed.iminimize(teff=27000, logg=5.1, ebv=0.01, z=0, rv=3.1, vrad=0,
                   teffrange=(25000, 35000),loggrange=(5.0, 6.0), 
                   ebvrange=(0.005, 0.015),zrange=(0,0),rvrange=(3.1,3.1),
                   vradrange=(0,0),df=None,CI_limit=0.95,set_model=False)
     
     self.assertAlmostEqual(sed.results['iminimize']['CI']['teff'], 30250, delta=100)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['logg'], 5.66, delta=0.1)
     self.assertAlmostEqual(sed.results['iminimize']['CI']['ebv'], 0.008, delta=0.02)
     self.assertAlmostEqual(sed.results['iminimize']['grid']['chisq'][0], 3.8, delta=1)
def calc_integrated_grid(threads=1,
                         ebvs=None,
                         law='fitzpatrick2004',
                         Rv=3.1,
                         units='Flambda',
                         responses=None,
                         update=False,
                         add_spectrophotometry=False,
                         **kwargs):
    """
    Integrate an entire SED grid over all passbands and save to a FITS file.

    The output file can be used to fit SEDs more efficiently, since integration
    over the passbands has already been carried out.

    WARNING: this function can take a loooooong time to compute!

    Extra keywords can be used to specify the grid.

    @param threads: number of threads
    @type threads; integer, 'max', 'half' or 'safe'
    @param ebvs: reddening parameters to include
    @type ebvs: numpy array
    @param law: interstellar reddening law to use
    @type law: string (valid law name, see C{reddening.py})
    @param Rv: Rv value for reddening law
    @type Rv: float
    @param units: choose to work in 'Flambda' or 'Fnu'
    @type units: str, one of 'Flambda','Fnu'
    @param responses: respons curves to add (if None, add all)
    @type responses: list of strings
    @param update: if true append to existing FITS file, otherwise overwrite
    possible existing file.
    @type update: boolean
    """
    if ebvs is None:
        ebvs = np.r_[0:4.01:0.01]

    #-- select number of threads
    if threads == 'max':
        threads = cpu_count()
    elif threads == 'half':
        threads = cpu_count() / 2
    elif threads == 'safe':
        threads = cpu_count() - 1
    threads = int(threads)
    if threads > len(ebvs):
        threads = len(ebvs)
    logger.info('Threads: %s' % (threads))

    #-- set the parameters for the SED grid
    model.set_defaults(**kwargs)
    #-- get the dimensions of the grid: both the grid points, but also
    #   the wavelength range
    teffs, loggs = model.get_grid_dimensions()
    wave, flux = model.get_table(teff=teffs[0], logg=loggs[0])
    #-- get the response functions covering the wavelength range of the models
    #   also get the information on those filters
    responses = get_responses(responses=responses,\
              add_spectrophotometry=add_spectrophotometry,wave=wave)

    #-- definition of one process:
    def do_ebv_process(ebvs, arr, responses):
        logger.debug('EBV: %s-->%s (%d)' % (ebvs[0], ebvs[-1], len(ebvs)))
        for ebv in ebvs:
            flux_ = reddening.redden(flux,
                                     wave=wave,
                                     ebv=ebv,
                                     rtype='flux',
                                     law=law,
                                     Rv=Rv)
            #-- calculate synthetic fluxes
            synflux = model.synthetic_flux(wave, flux_, responses, units=units)
            arr.append([np.concatenate(([ebv], synflux))])
        logger.debug("Finished EBV process (len(arr)=%d)" % (len(arr)))

    #-- do the calculations
    c0 = time.time()
    output = np.zeros((len(teffs) * len(ebvs), 4 + len(responses)))
    start = 0
    logger.info('Total number of tables: %i' % (len(teffs)))
    exceptions = 0
    exceptions_logs = []
    for i, (teff, logg) in enumerate(zip(teffs, loggs)):
        if i > 0:
            logger.info('%s %s %s %s: ET %d seconds' %
                        (teff, logg, i, len(teffs),
                         (time.time() - c0) / i * (len(teffs) - i)))

        #-- get model SED and absolute luminosity
        wave, flux = model.get_table(teff=teff, logg=logg)
        Labs = model.luminosity(wave, flux)

        #-- threaded calculation over all E(B-V)s
        processes = []
        manager = Manager()
        arr = manager.list([])
        all_processes = []
        for j in range(threads):
            all_processes.append(
                Process(target=do_ebv_process,
                        args=(ebvs[j::threads], arr, responses)))
            all_processes[-1].start()
        for p in all_processes:
            p.join()

        try:
            #-- collect the results and add them to 'output'
            arr = np.vstack([row for row in arr])
            sa = np.argsort(arr[:, 0])
            arr = arr[sa]
            output[start:start + arr.shape[0], :3] = teff, logg, Labs
            output[start:start + arr.shape[0], 3:] = arr
            start += arr.shape[0]
        except:
            logger.warning('Exception in calculating Teff=%f, logg=%f' %
                           (teff, logg))
            logger.debug('Exception: %s' % (sys.exc_info()[1]))
            exceptions = exceptions + 1
            exceptions_logs.append(sys.exc_info()[1])

    #-- make FITS columns
    gridfile = model.get_file()
    if os.path.isfile(os.path.basename(gridfile)):
        outfile = os.path.basename(gridfile)
    else:
        outfile = os.path.join(os.path.dirname(gridfile),
                               'i{0}'.format(os.path.basename(gridfile)))
    outfile = 'i{0}'.format(os.path.basename(gridfile))
    outfile = os.path.splitext(outfile)
    outfile = outfile[0] + '_law{0}_Rv{1:.2f}'.format(law, Rv) + outfile[1]
    logger.info('Precaution: making original grid backup at {0}.backup'.format(
        outfile))
    if os.path.isfile(outfile):
        shutil.copy(outfile, outfile + '.backup')
    output = output.T
    if not update or not os.path.isfile(outfile):
        cols = [
            pf.Column(name='teff', format='E', array=output[0]),
            pf.Column(name='logg', format='E', array=output[1]),
            pf.Column(name='ebv', format='E', array=output[3]),
            pf.Column(name='Labs', format='E', array=output[2])
        ]
        for i, photband in enumerate(responses):
            cols.append(
                pf.Column(name=photband, format='E', array=output[4 + i]))
    #-- make FITS columns but copy the existing ones
    else:
        hdulist = pf.open(outfile, mode='update')
        names = hdulist[1].columns.names
        cols = [
            pf.Column(name=name, format='E', array=hdulist[1].data.field(name))
            for name in names
        ]
        for i, photband in enumerate(responses):
            cols.append(
                pf.Column(name=photband, format='E', array=output[4 + i]))

    #-- make FITS extension and write grid/reddening specifications to header
    table = pf.new_table(pf.ColDefs(cols))
    table.header.update('gridfile', os.path.basename(gridfile))
    for key in sorted(model.defaults.keys()):
        key_ = (len(key) > 8) and 'HIERARCH ' + key or key
        table.header.update(key_, model.defaults[key])
    for key in sorted(kwargs.keys()):
        key_ = (len(key) > 8) and 'HIERARCH ' + key or key
        table.header.update(key_, kwargs[key])
    table.header.update('FLUXTYPE', units)
    table.header.update('REDLAW', law, 'interstellar reddening law')
    table.header.update('RV', Rv, 'interstellar reddening parameter')

    #-- make/update complete FITS file
    if not update or not os.path.isfile(outfile):
        if os.path.isfile(outfile):
            os.remove(outfile)
            logger.warning('Removed existing file: %s' % (outfile))
        hdulist = pf.HDUList([])
        hdulist.append(pf.PrimaryHDU(np.array([[0, 0]])))
        hdulist.append(table)
        hdulist.writeto(outfile)
        logger.info("Written output to %s" % (outfile))
    else:
        hdulist[1] = table
        hdulist.flush()
        hdulist.close()
        logger.info("Appended output to %s" % (outfile))

    logger.warning('Encountered %s exceptions!' % (exceptions))
    for i in exceptions_logs:
        print('ERROR')
        print(i)
def calc_integrated_grid(threads=1,ebvs=None,law='fitzpatrick2004',Rv=3.1,
           units='Flambda',responses=None,update=False,add_spectrophotometry=False,**kwargs):
    """
    Integrate an entire SED grid over all passbands and save to a FITS file.
    
    The output file can be used to fit SEDs more efficiently, since integration
    over the passbands has already been carried out.
    
    WARNING: this function can take a loooooong time to compute!
    
    Extra keywords can be used to specify the grid.
    
    @param threads: number of threads
    @type threads; integer, 'max', 'half' or 'safe' 
    @param ebvs: reddening parameters to include
    @type ebvs: numpy array
    @param law: interstellar reddening law to use
    @type law: string (valid law name, see C{reddening.py})
    @param Rv: Rv value for reddening law
    @type Rv: float
    @param units: choose to work in 'Flambda' or 'Fnu'
    @type units: str, one of 'Flambda','Fnu'
    @param responses: respons curves to add (if None, add all)
    @type responses: list of strings
    @param update: if true append to existing FITS file, otherwise overwrite
    possible existing file.
    @type update: boolean
    """    
    if ebvs is None:
        ebvs = np.r_[0:4.01:0.01]
        
    #-- select number of threads
    if threads=='max':
        threads = cpu_count()
    elif threads=='half':
        threads = cpu_count()/2
    elif threads=='safe':
        threads = cpu_count()-1
    threads = int(threads)
    if threads > len(ebvs):
        threads = len(ebvs)
    logger.info('Threads: %s'%(threads))
    
    #-- set the parameters for the SED grid
    model.set_defaults(**kwargs)
    #-- get the dimensions of the grid: both the grid points, but also
    #   the wavelength range
    teffs,loggs = model.get_grid_dimensions()
    wave,flux = model.get_table(teff=teffs[0],logg=loggs[0])
    #-- get the response functions covering the wavelength range of the models
    #   also get the information on those filters
    responses = get_responses(responses=responses,\
              add_spectrophotometry=add_spectrophotometry,wave=wave)
    
    #-- definition of one process:
    def do_ebv_process(ebvs,arr,responses):
        logger.debug('EBV: %s-->%s (%d)'%(ebvs[0],ebvs[-1],len(ebvs)))
        for ebv in ebvs:
            flux_ = reddening.redden(flux,wave=wave,ebv=ebv,rtype='flux',law=law,Rv=Rv)
            #-- calculate synthetic fluxes
            synflux = model.synthetic_flux(wave,flux_,responses,units=units)
            arr.append([np.concatenate(([ebv],synflux))])
        logger.debug("Finished EBV process (len(arr)=%d)"%(len(arr)))
    
    #-- do the calculations
    c0 = time.time()
    output = np.zeros((len(teffs)*len(ebvs),4+len(responses)))
    start = 0
    logger.info('Total number of tables: %i'%(len(teffs)))
    exceptions = 0
    exceptions_logs = []
    for i,(teff,logg) in enumerate(zip(teffs,loggs)):
        if i>0:
            logger.info('%s %s %s %s: ET %d seconds'%(teff,logg,i,len(teffs),(time.time()-c0)/i*(len(teffs)-i)))
        
        #-- get model SED and absolute luminosity
        wave,flux = model.get_table(teff=teff,logg=logg)
        Labs = model.luminosity(wave,flux)
        
        #-- threaded calculation over all E(B-V)s
        processes = []
        manager = Manager()
        arr = manager.list([])
        all_processes = []
        for j in range(threads):
            all_processes.append(Process(target=do_ebv_process,args=(ebvs[j::threads],arr,responses)))
            all_processes[-1].start()
        for p in all_processes:
            p.join()
        
        try:
            #-- collect the results and add them to 'output'
            arr = np.vstack([row for row in arr])
            sa = np.argsort(arr[:,0])
            arr = arr[sa]
            output[start:start+arr.shape[0],:3] = teff,logg,Labs
            output[start:start+arr.shape[0],3:] = arr
            start += arr.shape[0]
        except:
            logger.warning('Exception in calculating Teff=%f, logg=%f'%(teff,logg))
            logger.debug('Exception: %s'%(sys.exc_info()[1]))
            exceptions = exceptions + 1
            exceptions_logs.append(sys.exc_info()[1])
    
    #-- make FITS columns
    gridfile = model.get_file()
    if os.path.isfile(os.path.basename(gridfile)):
        outfile = os.path.basename(gridfile)
    else:
        outfile = os.path.join(os.path.dirname(gridfile),'i{0}'.format(os.path.basename(gridfile)))
    outfile = 'i{0}'.format(os.path.basename(gridfile))
    outfile = os.path.splitext(outfile)
    outfile = outfile[0]+'_law{0}_Rv{1:.2f}'.format(law,Rv)+outfile[1]
    logger.info('Precaution: making original grid backup at {0}.backup'.format(outfile))
    if os.path.isfile(outfile):
        shutil.copy(outfile,outfile+'.backup')
    output = output.T
    if not update or not os.path.isfile(outfile):
        cols = [pf.Column(name='teff',format='E',array=output[0]),
                pf.Column(name='logg',format='E',array=output[1]),
                pf.Column(name='ebv',format='E',array=output[3]),
                pf.Column(name='Labs',format='E',array=output[2])]
        for i,photband in enumerate(responses):
            cols.append(pf.Column(name=photband,format='E',array=output[4+i]))
    #-- make FITS columns but copy the existing ones
    else:
        hdulist = pf.open(outfile,mode='update')
        names = hdulist[1].columns.names
        cols = [pf.Column(name=name,format='E',array=hdulist[1].data.field(name)) for name in names]
        for i,photband in enumerate(responses):
            cols.append(pf.Column(name=photband,format='E',array=output[4+i]))
        
    #-- make FITS extension and write grid/reddening specifications to header
    table = pf.new_table(pf.ColDefs(cols))
    table.header.update('gridfile',os.path.basename(gridfile))
    for key in sorted(model.defaults.keys()):
        key_ = (len(key)>8) and 'HIERARCH '+key or key
        table.header.update(key_,model.defaults[key])
    for key in sorted(kwargs.keys()):
        key_ = (len(key)>8) and 'HIERARCH '+key or key
        table.header.update(key_,kwargs[key])
    table.header.update('FLUXTYPE',units)
    table.header.update('REDLAW',law,'interstellar reddening law')
    table.header.update('RV',Rv,'interstellar reddening parameter')
    
    #-- make/update complete FITS file
    if not update or not os.path.isfile(outfile):
        if os.path.isfile(outfile):
            os.remove(outfile)
            logger.warning('Removed existing file: %s'%(outfile))
        hdulist = pf.HDUList([])
        hdulist.append(pf.PrimaryHDU(np.array([[0,0]])))
        hdulist.append(table)
        hdulist.writeto(outfile)
        logger.info("Written output to %s"%(outfile))
    else:
        hdulist[1] = table
        hdulist.flush()
        hdulist.close()
        logger.info("Appended output to %s"%(outfile))
    
    logger.warning('Encountered %s exceptions!'%(exceptions))
    for i in exceptions_logs:
        print 'ERROR'
        print i