示例#1
0
def redshift_vectorized(formation_z):
    t1 = datetime.now()

    print('Beginning redshift referencing')
    reference_z = np.arange(0, 100, 0.01)
    reference_t = Planck13.age(reference_z)
    t2 = datetime.now()
    print('Redshift referencing time: = ' + str(t2 - t1))

    formation_time = []

    t1 = datetime.now()
    for i in range(len(formation_z)):

        idx = find_nearest(reference_z, formation_z[i])
        formation_time.append(reference_t[idx])

    t2 = datetime.now()
    print('redshift_vectorizing time: = ' + str(t2 - t1))

    return formation_time
示例#2
0
def active_dust_add(ds,m,grid_of_sizes,nsizes,dustdens,specific_energy,refined=[False]):
        #first, save the grid_of_sizes to the ds.paramteters so we can carry it around
        ds.parameters['reg_grid_of_sizes'] = grid_of_sizes #named 'reg_grid_of_sizes' 


        #for empty cells, use the median size distribution
        for isize in range(nsizes):
                wzero = np.where(grid_of_sizes[:,isize] == 0)[0]
                wnonzero = np.where(grid_of_sizes[:,isize] != 0)[0]
                
                grid_of_sizes[wzero,isize] = np.median(grid_of_sizes[wnonzero,isize])
                
                print(len(wzero)/len(wnonzero))



        #now load the mapping between grain bin and filename for the lookup table
        data = np.load(cfg.par.pd_source_dir+'active_dust/dust_files/binned_dust_sizes.npz')
        grain_size_left_edge_array = data['grain_size_left_edge_array']
        grain_size_right_edge_array  = data['grain_size_right_edge_array']
        dust_filenames = data['outfile_filenames']

        nbins = len(grain_size_left_edge_array)




        #find which sizes in the hydro simulation correspond to the
        #pre-binned extinction law sizes from dust_file_writer.py

        dust_file_to_grain_size_mapping_idx = []
        x=np.linspace(cfg.par.otf_extinction_log_min_size,cfg.par.otf_extinction_log_max_size,nsizes)
        for i in range(nbins):
                dust_file_to_grain_size_mapping_idx.append(find_nearest(x,grain_size_left_edge_array[i]))


        #set up the frac array that is nbins big.  this is the
        #fractional contribution of each dust file bin which is based
        #on the total number of grains in the grid in that bin.

        #frac =np.zeros([dustdens.shape[0],nbins])

        dsf_grid = np.zeros([dustdens.shape[0],nbins])
        frac_grid = np.zeros([dustdens.shape[0],nbins])
        debug_nearest_extinction_curve = np.zeros([nbins])

        if cfg.par.OTF_EXTINCTION_MRN_FORCE == True:
                grid_sum = np.zeros(nbins)

                #how DNSF was set up.  not needed other than for testing
                x=np.linspace(-4,0,41)
                #load an example dust size function for testing against
                dsf = np.loadtxt(cfg.par.pd_source_dir+'active_dust/mrn_dn.txt')#DNSF_example.txt')

                #nbins = len(grain_size_left_edge_array)


                for i in range(nbins):
                        #find the index bounds in x that we want to interpolate between
                        idx0 = find_nearest(x,grain_size_left_edge_array[i])
                        if x[idx0] > grain_size_left_edge_array[i]: idx0 -= 1
                        idx1 = idx0+1
                
                        dsf_interp = np.interp(grain_size_left_edge_array[i],[x[idx0],x[idx1]],[dsf[idx0],dsf[idx1]])
                
                        #this sets the fraction of each bin size we need (for the
                        #entire grid!)
                        dsf_grid[:,i] = dsf_interp
                        grid_sum[i] = np.sum(dsf_grid[:,i])
                        debug_nearest_extinction_curve[i] = dsf_interp


                #set up the frac array that is nbins big.  this is the
                #fractional contribution of each dust file bin which is based
                #on the total number of grains in the grid in that bin.
                frac = grid_sum/np.sum(grid_sum)

                #now we need to set the localized extinction law. we do
                #this by comparing, fractionally, a given cell's number of
                #grains in that bin to the maximum number of grains that
                #the grid has in that bin.
                
                for i in range(nbins):
                        frac_grid[:,i] = dsf_grid[:,i]/np.max(dsf_grid[:,i])*frac[i]
            
                '''
                import matplotlib.pyplot as plt
                fig = plt.figure()
                ax = fig.add_subplot(111)
                ax.plot(x,dsf,label='dsf')
                ax.plot(grain_size_left_edge_array,frac_grid[0,:],label='frac_grid')
                ax.plot(grain_size_left_edge_array,grid_sum,label='grid_sum')
                ax.plot(grain_size_left_edge_array,debug_nearest_extinction_curve,label='d_n_e_c')
                ax.set_yscale('log')
                plt.legend()
                fig.savefig('junk.png',dpi=300)
                
                import pdb
                pdb.set_trace()
                '''

                #------------------------    
        
        else:


                grid_sum = np.zeros(nbins)


                #this sets the fraction of each bin size we need (for the
                #entire grid!)
                for i in range(nbins):
                        grid_sum[i] = np.sum(grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]])


                #set up the frac array that is nbins big.  this is the
                #fractional contribution of each dust file bin which is based
                #on the total number of grains in the grid in that bin.
                frac = grid_sum/np.sum(grid_sum)

            
                #now we need to set the localized extinction law. we do
                #this by comparing, fractionally, a given cell's number of
                #grains in that bin to the maximum number of grains that
                #the grid has in that bin.
                
                #this block tests if we're in an octree or not (i.e., we
                #could be in a voronoi mesh, in which case refined doesn't
                #mean anything).  this is necessary since for an octree we
                #don't want to worry about the Trues
                if np.sum(refined) > 0:
                        wFalse = np.where(np.asarray(refined) == 0)[0]
                
                        for i in range(nbins):
                                frac_grid[wFalse,i] = grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]]/np.max(grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]])*frac[i]
                else:
                        #we take the fractioal grain size distribution
                        #from each size bin, and multiply it by the
                        #cells in each grid (weighted by the ratio of
                        #the logarithm of the actual number of grains
                        #in that bin in that cell to the log of the
                        #cell with the most grains in that bin).
                        for i in range(nbins):
                                frac_grid[:,i] = np.log10(grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]])/np.max(np.log10(grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]]))*frac[i]


                #now add the dust grids to hyperion
                for bin in range(nbins):
                        file = dust_filenames[bin]
                        d = SphericalDust(cfg.par.pd_source_dir+'active_dust/'+file)
                        m.add_density_grid(dustdens*frac_grid[:,bin],d,specific_energy=specific_energy)
                        #m.add_density_grid(dustdens*frac[bin],d,specific_energy=specific_energy)

        

        #finally, save the grid_of_sizes and grain sizes to the ds.paramteters so we can carry it around
        ds.parameters['reg_grid_of_sizes'] = grid_of_sizes #named 'reg_grid_of_sizes'
        ds.parameters['grain_sizes_in_micron '] = 10.**(x)
示例#3
0
def add_binned_seds(df_nu,stars_list,diskstars_list,bulgestars_list,cosmoflag,m,sp):
    
    # calculate max and min ages
    minimum_age = 15 #Gyr - obviously too high of a number
    maximum_age = 0 #Gyr

    # calculate the minimum and maximum luminosity
    minimum_mass = 1e15*constants.M_sun.cgs.value #msun - some absurdly large value for a single stellar cluster
    maximum_mass = 0 #msun

    # calculate the minimum and maximum stellar metallicity
    minimum_metallicity = 1.e5 #some absurdly large metallicity
    maximum_metallicity = 0

    nstars = len(stars_list)
    for i in range(nstars):
        #if stars_list[i].metals[0] < minimum_metallicity: minimum_metallicity = stars_list[i].metals[0]
        #if stars_list[i].metals[0] > maximum_metallicity: maximum_metallicity = stars_list[i].metals[0]
        
        if stars_list[i].mass < minimum_mass: minimum_mass = stars_list[i].mass
        if stars_list[i].mass > maximum_mass: maximum_mass = stars_list[i].mass

        if stars_list[i].age < minimum_age: minimum_age = stars_list[i].age
        if stars_list[i].age > maximum_age: maximum_age = stars_list[i].age

    # If Flag is set we do not bin stars younger than the age set by max_age_unbinned_stars
    if not cfg.par.FORCE_BINNED:
        if cfg.par.max_age_direct > minimum_age:
            minimum_age = cfg.par.max_age_direct + 0.001

    delta_age = (maximum_age-minimum_age)/cfg.par.N_STELLAR_AGE_BINS

    if delta_age <= 0: # If max age for direct adding stars is greater than the max age of stars in the galaxy then 
        return m       # exit the function since there are no stars left for binning

    # define the metallicity bins: we do this by saying that they are the number of metallicity bins in FSPS

    fsps_metals = np.array(sp.zlegend)
    N_METAL_BINS = len(fsps_metals)

    # note the bins are NOT metallicity, but rather the zmet keys in
    # fsps (i.e. the zmet column in Table 1 of the fsps manual)
    metal_bins = np.arange(N_METAL_BINS)+1

    # define the age bins in log space so that we maximise resolution around young stars
    age_bins = 10.**(np.linspace(np.log10(minimum_age),np.log10(maximum_age),cfg.par.N_STELLAR_AGE_BINS))

    #tack on the maximum age bin
    age_bins = np.append(age_bins,age_bins[-1]+delta_age)

   
    #define the mass bins (log)
    #note - for some codes, all star particles have the same mass.  in this case, we have to have a trap:
    if minimum_mass == maximum_mass or cfg.par.N_MASS_BINS == 0: 
        mass_bins = np.zeros(cfg.par.N_MASS_BINS+1)+minimum_mass
    else:
        delta_mass = (np.log10(maximum_mass)-np.log10(minimum_mass))/cfg.par.N_MASS_BINS
        mass_bins = np.arange(np.log10(minimum_mass),np.log10(maximum_mass),delta_mass)
        mass_bins = np.append(mass_bins,mass_bins[-1]+delta_mass)
        mass_bins = 10.**mass_bins
        
    print ('mass_bins = ',mass_bins)
    print ('metal_bins = ',metal_bins)
    print ('age_bins = ',age_bins)

    #has_stellar_mass is a 3D boolean array that's [wz,wa,wm] big and
    #says whether or not that bin is being used downstream for
    #creating a point source collection (i.e. that it actually has at
    #least one star cluster that falls into it)
    has_stellar_mass = np.zeros([N_METAL_BINS,cfg.par.N_STELLAR_AGE_BINS+1,cfg.par.N_MASS_BINS+1],dtype=bool)


    stars_in_bin = {} #this will be a dictionary that holds the list
    #of star particles that go in every [wz,wa,wm]
    #group.  The keys will be tuples that hold a
    #(wz,wa,wm) set that we will then use later to
    #speed up adding sources.
    
    for i in range(nstars):
        wz = find_nearest(metal_bins,stars_list[i].fsps_zmet)
        wa = find_nearest(age_bins,stars_list[i].age)
        wm = find_nearest(mass_bins,stars_list[i].mass)
        
        stars_list[i].sed_bin = [wz,wa,wm]
        has_stellar_mass[wz,wa,wm] = True

        if (wz,wa,wm) in stars_in_bin:
            stars_in_bin[(wz,wa,wm)].append(i)
        else:
            stars_in_bin[(wz,wa,wm)] = [i]


    print ('assigning stars to SED bins')
    sed_bins_list=[]
    sed_bins_list_has_stellar_mass = []
       
    #we loop through age bins +1 because the max values were tacked
    #onto those bin lists. but for metal bins, this isn't the case, so
    #we don't loop the extra +1
    for wz in range(N_METAL_BINS):
        for wa in range(cfg.par.N_STELLAR_AGE_BINS+1):
            for wm in range(cfg.par.N_MASS_BINS+1):
                sed_bins_list.append(Sed_Bins(mass_bins[wm],fsps_metals[wz],age_bins[wa],metal_bins[wz]))
                if has_stellar_mass[wz,wa,wm] == True:
                    stars_metals = []
                    for star in stars_in_bin[(wz,wa,wm)]:
                        stars_metals.append(stars_list[star].all_metals)
                    stars_metals = np.array(stars_metals)
                    stars_metals = np.mean(stars_metals,axis=0)
                    #print(stars_metals, metal_bins[wz], fsps_metals[wz])
                    sed_bins_list_has_stellar_mass.append(Sed_Bins(mass_bins[wm],fsps_metals[wz],age_bins[wa],metal_bins[wz],stars_metals))
   
    #sed_bins_list is a list of Sed_Bins objects that have the
    #information about what mass bin, metal bin and age bin they
    #correspond to.  It is unnecessary, and heavy computational work
    #to re-create the SED for each of these bins - rather, we can just
    #calculate the SED for the bins that have any actual stellar mass.
            
    print ('Running SPS for Binned SEDs')
    print ('calculating the SEDs for ',len(sed_bins_list_has_stellar_mass),' bins')
    
    binned_stellar_nu,binned_stellar_fnu_has_stellar_mass,disk_fnu,bulge_fnu,mfrac = sg.allstars_sed_gen(sed_bins_list_has_stellar_mass,cosmoflag,sp)

    #since the binned_stellar_fnu_has_stellar_mass is now
    #[len(sed_bins_list_has_stellar_mass),nlam)] big, we need to
    #transform it back to the a larger array.  this is an ugly loop
    #that could probably be prettier...but whatever.  this saves >an
    #order of magnitude in time in SED gen.  
    nlam = binned_stellar_nu.shape[0]
    binned_stellar_fnu = np.zeros([len(sed_bins_list),nlam])
    binned_mfrac = np.zeros([len(sed_bins_list)])

    counter = 0
    counter_has_stellar_mass = 0
    for wz in range(N_METAL_BINS):
        for wa in range(cfg.par.N_STELLAR_AGE_BINS+1):
            for wm in range(cfg.par.N_MASS_BINS+1):
                if has_stellar_mass[wz,wa,wm] == True:
                    binned_mfrac[counter] = mfrac[counter_has_stellar_mass]
                    binned_stellar_fnu[counter,:] = binned_stellar_fnu_has_stellar_mass[counter_has_stellar_mass,:]
                    counter_has_stellar_mass += 1 
                counter+=1

    print(f'after selecting for ones with stellar mass: {np.shape(binned_stellar_fnu)}')


    
    '''
    #DEBUG trap for nans and infs
    if np.isinf(np.sum(binned_stellar_nu)):  pdb.set_trace()
    if np.isinf(np.sum(binned_stellar_fnu)): pdb.set_trace()
    if np.isnan(np.sum(binned_stellar_nu)): pdb.set_trace()
    if np.isnan(np.sum(binned_stellar_fnu)): pdb.set_trace()
    '''

    #now binned_stellar_nu and binned_stellar_fnu are the SEDs for the bins in order of wz, wa, wm 
    
    #create the point source collections: we loop through the bins and
    #see what star particles correspond to these.  if any do, then we
    #add them to a list, and create a point source collection out of
    #these


    print ('adding point source collections')
    t1=datetime.now()


    totallum = 0 
    totalmass = 0 
    counter=0
    for wz in range(N_METAL_BINS):
        for wa in range(cfg.par.N_STELLAR_AGE_BINS+1):
            for wm in range(cfg.par.N_MASS_BINS+1):
                
                if has_stellar_mass[wz,wa,wm] == True:
                
                    source = m.add_point_source_collection()
                    
                    
                    nu = binned_stellar_nu
                    fnu = binned_stellar_fnu[counter,:]
                    nu,fnu = wavelength_compress(nu,fnu,df_nu)
                    
                    #reverse for hyperion
                    nu = nu[::-1]
                    fnu = fnu[::-1]

                    
                    #source luminosities
                    #here, each (wz, wa, wm) bin will have an associated mfrac that corresponds to the fnu generated for this bin
                    #while each star particle in the bin has a distinct mass, they all share mfrac as this value depends only on the age and Z of the star
                    #thus, there are 'counter' number of binned_mfrac values (to match the number of fnu arrays)
                    lum = np.array([stars_list[i].mass/constants.M_sun.cgs.value*constants.L_sun.cgs.value/binned_mfrac[counter] for i in stars_in_bin[(wz,wa,wm)]])
                    lum *= np.absolute(np.trapz(fnu,x=nu))
                    source.luminosity = lum
                    


                    for i in stars_in_bin[(wz,wa,wm)]:  totalmass += stars_list[i].mass
                    
                    #source positions
                    pos = np.zeros([len(stars_in_bin[(wz,wa,wm)]),3])
                    #for i in range(len(stars_in_bin[(wz,wa,wm)])): pos[i,:] = stars_list[i].positions
                    for i in range(len(stars_in_bin[(wz,wa,wm)])):
                        pos[i,:] = stars_list[stars_in_bin[(wz,wa,wm)][i]].positions

                    source.position=pos

                    #source spectrum
                    source.spectrum = (nu,fnu)
                                    
                    totallum += np.sum(source.luminosity)

                    
                    '''
                    if np.isnan(lum): 
                        print 'lum is a nan in point source collection addition. exiting now.'
                        sys.exit()
                    if np.isinf(lum): 
                        print 'lum is an inf in point source collection addition. exiting now.'
                        sys.exit()
                    '''
                counter+=1

                
    if cosmoflag == False: add_bulge_disk_stars(df_nu,binned_stellar_nu,binned_stellar_fnu,disk_fnu,bulge_fnu,stars_list,diskstars_list,bulgestars_list,m)

    m.set_sample_sources_evenly(True)

    t2=datetime.now()
    print ('[source_creation/add_binned_seds:] Execution time for point source collection adding = '+str(t2-t1))
    print ('[source_creation/add_binned_seds:] Total Luminosity of point source collection is: ',totallum)


    return m