Example #1
0
def enzo_m_gen(fname, field_add):

    reg, ds1 = enzo_grid_generate(fname, field_add)

    amr = AMRGrid.from_yt(
        ds1, quantity_mapping={'density': ('gas', 'dust_density')})
    '''
    levels = ds.index.max_level
    
    amr = AMRGrid()
    for ilevel in range(levels):
        level = amr.add_level()
        
    for igrid in ds.index.select_grids(ilevel):
        print igrid
        grid = level.add_grid()
        grid.xmin,grid.xmax = igrid.LeftEdge[0].in_units('cm'),igrid.RightEdge[0].in_units('cm')
        grid.ymin,grid.ymax = igrid.LeftEdge[1].in_units('cm'),igrid.RightEdge[1].in_units('cm')
        grid.zmin,grid.zmax = igrid.LeftEdge[2].in_units('cm'),igrid.RightEdge[2].in_units('cm')
        grid.quantities["density"] = np.transpose(np.array(igrid[("gas","metal_density")].in_units('g/cm**3')*cfg.par.dusttometals_ratio))
        grid.nx,grid.ny,grid.nz = igrid[("gas","metal_density")].shape
    '''

    m = Model()

    m.set_amr_grid(amr)

    #CMB DISABLED -- UNCOMMENT THIS TO FIX THIS.  The main issue is
    #that I'm not sure what shape to give to the np.repeat
    #array of energy_density_absorbed; I think it needs to be the ARM Grid shape but i'm not quite sure if it needs to be an AMRGrid()
    #energy_density_absorbed=energy_density_absorbed_by_CMB()
    #energy_density_absorbed =np.repeat(energy_density_absorbed.value,reg.index.num_grids)#amr['density'].shape)

    d = SphericalDust(cfg.par.dustdir + cfg.par.dustfile)
    if cfg.par.SUBLIMATION == True:
        d.set_sublimation_temperature(
            'fast', temperature=cfg.par.SUBLIMATION_TEMPERATURE)

    m.add_density_grid(amr["density"], d)
    #uncomment when we're ready to put CMB in (and comment out previous line)
    #m.add_density_grid(amr['density'],d,specific_energy=energy_density_absorbed)
    #m.set_specific_energy_type('additional')

    center = ds1.arr([cfg.model.x_cent, cfg.model.y_cent, cfg.model.z_cent],
                     'code_length')
    [xcent, ycent, zcent
     ] = center.in_units('cm')  #boost needs to be in cm since that's what the

    boost = np.array([xcent, ycent, zcent])

    dx = ds1.domain_width[0].in_units('cm')
    dy = ds1.domain_width[1].in_units('cm')
    dz = ds1.domain_width[2].in_units('cm')

    return m, xcent, ycent, zcent, dx, dy, dz, reg, ds1, boost
Example #2
0
def enzo_m_gen(fname, field_add):

    reg, ds1 = enzo_grid_generate(fname, field_add)

    amr = yt_dataset_to_amr_grid_xyz(
        ds1, quantity_mapping={'density': ('gas', 'dust_density')})

    m = Model()

    #save in the m__dict__ that we're in an amr geometry
    m.__dict__['grid_type'] = 'amr'

    m.set_amr_grid(amr)

    #CMB DISABLED -- UNCOMMENT THIS TO FIX THIS.  The main issue is
    #that I'm not sure what shape to give to the np.repeat
    #array of energy_density_absorbed; I think it needs to be the ARM Grid shape but i'm not quite sure if it needs to be an AMRGrid()
    #energy_density_absorbed=energy_density_absorbed_by_CMB()
    #energy_density_absorbed =np.repeat(energy_density_absorbed.value,reg.index.num_grids)#amr['density'].shape)

    d = SphericalDust(cfg.par.dustdir + cfg.par.dustfile)
    if cfg.par.SUBLIMATION == True:
        d.set_sublimation_temperature(
            'fast', temperature=cfg.par.SUBLIMATION_TEMPERATURE)

    m.add_density_grid(amr["density"], d)
    #uncomment when we're ready to put CMB in (and comment out previous line)
    #m.add_density_grid(amr['density'],d,specific_energy=energy_density_absorbed)
    #m.set_specific_energy_type('additional')

    center = ds1.arr([cfg.model.x_cent, cfg.model.y_cent, cfg.model.z_cent],
                     'code_length')
    [xcent, ycent, zcent
     ] = center.in_units('cm')  #boost needs to be in cm since that's what the

    boost = np.array([xcent, ycent, zcent])

    dx = ds1.domain_width[0].in_units('cm')
    dy = ds1.domain_width[1].in_units('cm')
    dz = ds1.domain_width[2].in_units('cm')

    return m, xcent, ycent, zcent, dx, dy, dz, reg, ds1, boost
Example #3
0
	def dust_gen(self,dustfile,dustfile_out='d03_5.5_3.0_A.hdf5'):
		### first, we need to load Tracy's dust files and manipulate them to feed to Hyperion
		### wavelength (microns),Cext,Csca,Kappa,g,pmax,theta (ignored)
		### albedo = Csca/Cext
		### opacity kappa is in cm^2/gm, dust_gas extinction opactity (absorption+scattering) - assumes gas-to=dust raio of 100
		### see Whitney et al. 2003a
		
#		tracy_dust = np.loadtxt('Tracy_models/OH5.par')

#		### format for dust: d = HenyeyGreensteinDust(nu, albedo, chi, g, p_lin_max)
#		nu = const.c.value/ (tracy_dust[:,0]*1e-6)
#		albedo = tracy_dust[:,2]/tracy_dust[:,1]
#		chi = tracy_dust[:,3]
#		g = tracy_dust[:,4]
#		p_lin_max = tracy_dust[:,5]

#		### flip the table to have an increasing frequency
#		nu = nu[::-1]
#		albedo = albedo[::-1]
#		chi=chi[::-1]
#		g=g[::-1]
#		p_lin_max=p_lin_max[::-1]

#		### create the dust model
#		d = HenyeyGreensteinDust(nu, albedo, chi, g, p_lin_max)
#		d.optical_properties.extrapolate_wav(0.001,1.e7)
#		d.plot('OH5.png')
#		d.write('OH5.hdf5')
		
		self.d = SphericalDust()
		self.d.read(dustfile)
		self.d.plot(str(dustfile.split(',')[:-1])+'.png')
		self.d_out = SphericalDust()
		self.d_out.read(dustfile_out)
		#self.d_out.read(dustfile)
		self.d_out.plot(str(dustfile_out.split(',')[:-1])+'.png')
def setup_model(cli):

	#
	# Hyperion setup:
	#
	model = Model()


	if(cli.mode == "temperature"):
		#
		# Dust properties:
		#
		dust_properties = SphericalDust('dust_integrated_full_scattering.hdf5')


		#
		# Write dust properties:
		#
		dust_properties.write('dust_properties.hdf5')
		dust_properties.plot('dust_properties.png')

	
		#
		# Grid setup:
		#
		grid_wmin =  0
		grid_wmax =  5.0*pc # 4.0*pc
		grid_zmin =  0.0*pc
		grid_zmax = 10.0*pc
		grid_pmin =  0
		grid_pmax =  2*pi

		grid_dx = cli.resolution*pc
		grid_dw = grid_dx # uniform resolution
		grid_dz = grid_dx # uniform resolution
		grid_dp = grid_dx # resolution at filament location at r = 1 pc

		grid_Nw   = int((grid_wmax - grid_wmin) / grid_dw)
		grid_Nz   = int((grid_zmax - grid_zmin) / grid_dz)
		grid_Np   = int(2*pi * 1.0*pc / grid_dp)

		if(cli.verbose):
			print("Grid setup:")
			print(" Grid resolution =",cli.resolution, "pc.")
			print(" grid_Nw =",grid_Nw)
			print(" grid_Nz =",grid_Nz)
			print(" grid_Np =",grid_Np)

		#grid_w      = np.logspace(np.log10(grid_wmin), np.log10(grid_wmax), grid_Nw)
		#grid_w      = np.hstack([0., grid_w]) # add innermost cell interface at w=0
		grid_w    = np.linspace(grid_wmin, grid_wmax, grid_Nw+1)
		grid_z    = np.linspace(grid_zmin, grid_zmax, grid_Nz+1)
		grid_p    = np.linspace(grid_pmin, grid_pmax, grid_Np+1)

		model.set_cylindrical_polar_grid(grid_w, grid_z, grid_p)

		#
		# Dust density setup:
		#
		RC  = 0.1*pc
		nC  = 6.6580e+03       # in cm^-3
		nC *= cli.opticaldepth # the optical depth at 1 micron
		nC *= m_h              # in g cm^-3
		nC /= 100.0            # converts from gas to dust density
	
		rho = np.zeros(model.grid.shape)
	
		#
		# n(r) = nC / [ 1.0 + (r/RC)**2.0 ]
		# x = -sin(2.0×pi×t) pc, y = +cos(2.0×pi×t) pc, z = 10.0×t pc, t = [0.0, 1.0]
		#  => t = m.grid.gz / (10*pc)
		#  => phi(t) = mod(360*t+270, 360)
		#
		for k in range(0, grid_Np):
			for j in range(0, grid_Nz):
				for i in range(0, grid_Nw):
				
					t = model.grid.gz[k,j,i] / (10*pc)
				
					if(cli.filament == "linear"):
						filament_center_x  = 0
						filament_center_y  = 0
					elif(cli.filament == "spiraling"):
						filament_center_x  = - math.sin(2*pi*t)*pc
						filament_center_y  = + math.cos(2*pi*t)*pc
				
					spherical_grid_r   = model.grid.gw[k,j,i]
					spherical_grid_phi = model.grid.gp[k,j,i]
				
					cartesian_grid_x   = spherical_grid_r * math.cos(spherical_grid_phi)
					cartesian_grid_y   = spherical_grid_r * math.sin(spherical_grid_phi)
				
					rsquared = (
								(cartesian_grid_x - filament_center_x)**2
								+
								(cartesian_grid_y - filament_center_y)**2
								)
				
					rho[k,j,i] = nC / (1.0 + (rsquared / (RC*RC)))
				
					if rsquared**0.5 > 3*pc:
						rho[k,j,i] = 0

		rho[model.grid.gw > grid_wmax] = 0
		rho[model.grid.gz < grid_zmin] = 0
		rho[model.grid.gz > grid_zmax] = 0

		model.add_density_grid(rho, 'dust_properties.hdf5')


		#
		# Check optical depth through the filament:
		#
		#  (y,z = 0, 2.5 pc goes through the filament center in all setups)
		
		#
		# Determine index of closest grid cell to z = 2.5 pc:
		#
		dz_last = 2*abs(grid_zmax-grid_zmin)
		for j in range(0, grid_Nz):
			dz = abs(model.grid.gz[0,j,0] - 2.5*pc)
			if(dz > dz_last):
				j=j-1
				break
			else:
				dz_last = dz

		#
		# Opacity at 1.0 micron (per gram dust):
		#
		chi = dust_properties.optical_properties.interp_chi_wav(1.0)

		tau_max = 0
		for k in range(0, grid_Np):
			tau = 0
			for i in range(0, grid_Nw):
				dr = model.grid.widths[0,k,j,i]
				dtau = dr * rho[k,j,i] * chi
				tau += dtau
			tau_max = max(tau_max, tau)

		if(cli.filament == "linear"):
			tau_max *= 2

		dev = 100 * abs(cli.opticaldepth - tau_max) / cli.opticaldepth

		if(cli.verbose):
			print("Check:")
			print(" Numerical integration of the optical depth through the filament center yields tau = ", tau_max)
			print(" This corresponds to a deviation to the chosen setup value of", dev, "percent")


		#
		# Source:
		#
		if(cli.sources == "external"):
		
			nu, jnu            = np.loadtxt('bg_intensity_modified.txt', unpack=True)
			source_R           = 5*pc
			source             = model.add_external_spherical_source()
			source.peeloff     = False
			source.position    = (0, 0, 5.0*pc) # in a Cartesian frame
			source.radius      = source_R
			source.spectrum    = (nu, jnu)
			#source_MeanIntensity_J = <integrate bg_intensity.txt>
			#source_Area        = 4.0 * pi * source_R*source_R
			source.luminosity  = 8237.0*lsun #source_Area * pi * source_MeanIntensity_J
		
		elif(cli.sources == "stellar"):

			source             = model.add_point_source()
			source.luminosity  = 3.839e35 # in ergs s^-1
			source.temperature = 10000.0 # in K
			if(cli.filament == "linear"):
				source.position    = (3.0*pc, 0, 5.0*pc)
			elif(cli.filament == "spiraling"):
				source.position    = (0     , 0, 3.0*pc)

		#
		# To compute total photon numbers:
		#
		grid_N = grid_Nw * grid_Nz * grid_Np
		if(cli.verbose):
			print("Radiation setup:")
			print(" photons_temperature / cell =", cli.photons_temperature)
			print(" photons_temperature total  =", grid_N * cli.photons_temperature)

		file = filename(cli, "temperature")
		file += ".rtin"

	else:
		file = filename(cli, "temperature")
		file += ".rtout"
	
		try:
			with open(file):
				if(cli.verbose):
					print("Using the specific energy distribution from file", file)
				model.use_geometry(file)
				model.use_quantities(file, only_initial=False, copy=False)
				model.use_sources(file)

		except IOError:
			print("ERROR: File '", file, "' cannot be found. \nERROR: This file, containing the specific energy density, has to be computed first via calling hyperion.")
			exit(2)

		#
		# To compute total photon numbers:
		#
		grid_Nw = len(model.grid.gw[0,0,:])
		grid_Nz = len(model.grid.gw[0,:,0])
		grid_Np = len(model.grid.gw[:,0,0])
		grid_N = grid_Nw * grid_Nz * grid_Np
		if(cli.verbose):
			print("Grid setup:")
			print(" grid_Nw =",grid_Nw)
			print(" grid_Nz =",grid_Nz)
			print(" grid_Np =",grid_Np)
			print("Radiation setup:")
			print(" photons_temperature / cell =", cli.photons_temperature)
			print(" photons_temperature total  =", grid_N * cli.photons_temperature)
			print(" photons_raytracing / cell  =", cli.photons_raytracing)
			print(" photons_raytracing total   =", grid_N * cli.photons_raytracing)
			print(" photons_imaging / cell     =", cli.photons_imaging)
			print(" photons_imaging total      =", grid_N * cli.photons_imaging)

		file = filename(cli, "")
		file += ".rtin"


	##
	## Temperature, Images, and SEDs:
	##
	if(cli.mode == "temperature"):

		model.set_raytracing(True)
		model.set_n_photons(
						initial            = grid_N * cli.photons_temperature,
						raytracing_sources = grid_N * cli.photons_raytracing,
						raytracing_dust    = grid_N * cli.photons_raytracing,
						imaging            = grid_N * cli.photons_imaging
						)
	
	elif(cli.mode == "images"):
	
		model.set_n_initial_iterations(0)
		model.set_raytracing(True)
		model.set_monochromatic(True, wavelengths=[100.0, 500.0, 0.55, 2.2])
		model.set_n_photons(
						raytracing_sources = grid_N * cli.photons_raytracing,
						raytracing_dust    = grid_N * cli.photons_raytracing,
						imaging_sources    = grid_N * cli.photons_imaging,
						imaging_dust       = grid_N * cli.photons_imaging
						)
	
		# group = 0
		image1x = model.add_peeled_images(sed=False, image=True)
		image1x.set_image_size(300, 300)
		image1x.set_image_limits(-5*pc, +5*pc, 0, 10*pc)
		image1x.set_viewing_angles([90],[0]) # along the x-direction
		image1x.set_uncertainties(True)
		image1x.set_output_bytes(8)
		image1x.set_track_origin('basic')
	
		# group = 1
		image1y = model.add_peeled_images(sed=False, image=True)
		image1y.set_image_size(300, 300)
		image1y.set_image_limits(-5*pc, +5*pc, 0, 10*pc)
		image1y.set_viewing_angles([90],[90]) # along the y-direction
		image1y.set_uncertainties(True)
		image1y.set_output_bytes(8)
		image1y.set_track_origin('basic')
	
		# group = 2
		image1z = model.add_peeled_images(sed=False, image=True)
		image1z.set_image_size(300, 300)
		image1z.set_image_limits(-5*pc, +5*pc, -5*pc, +5*pc)
		image1z.set_viewing_angles([0],[0]) # along the z-direction
		image1z.set_uncertainties(True)
		image1z.set_output_bytes(8)
		image1z.set_track_origin('basic')

	elif(cli.mode == "sed"):
	
		model.set_n_initial_iterations(0)
		model.set_raytracing(True)
		model.set_n_photons(
							raytracing_sources = grid_N * cli.photons_raytracing,
							raytracing_dust    = grid_N * cli.photons_raytracing,
							imaging            = grid_N * cli.photons_imaging
							)
	
		# group = 0
		sed1 = model.add_peeled_images(sed=True, image=False)
		sed1.set_wavelength_range(250, 0.01, 2000.0)
		sed1.set_viewing_angles([90],[0]) # along the x-direction
		sed1.set_peeloff_origin((0, 0, 2.5*pc))
		sed1.set_aperture_range(1, 0.3*pc, 0.3*pc)
		sed1.set_uncertainties(True)
		sed1.set_output_bytes(8)
		sed1.set_track_origin('basic')

		# group = 1
		sed2 = model.add_peeled_images(sed=True, image=False)
		sed2.set_wavelength_range(250, 0.01, 2000.0)
		sed2.set_viewing_angles([90],[0]) # along the x-direction
		sed2.set_peeloff_origin((0, 0, 5.0*pc))
		sed2.set_aperture_range(1, 0.3*pc, 0.3*pc)
		sed2.set_uncertainties(True)
		sed2.set_output_bytes(8)
		sed2.set_track_origin('basic')

		# group = 2
		sed3 = model.add_peeled_images(sed=True, image=False)
		sed3.set_wavelength_range(250, 0.01, 2000.0)
		sed3.set_viewing_angles([90],[0]) # along the x-direction
		sed3.set_peeloff_origin((0, 0, 7.5*pc))
		sed3.set_aperture_range(1, 0.3*pc, 0.3*pc)
		sed3.set_uncertainties(True)
		sed3.set_output_bytes(8)
		sed3.set_track_origin('basic')

	##
	## Write model for hyperion runs:
	##
	model.conf.output.output_density         = 'last'
	model.conf.output.output_specific_energy = 'last'
	model.conf.output.output_n_photons       = 'last'
	model.write(file)
	if(cli.verbose):
		print("The input file for hyperion was written to", file)
Example #5
0
def arepo_m_gen(fname, field_add):

    reg, ds, dustdens = arepo_vornoi_grid_generate(fname, field_add)

    xcent = ds.quan(cfg.model.x_cent, 'code_length').to('cm')  #proper cm
    ycent = ds.quan(cfg.model.y_cent, 'code_length').to('cm')
    zcent = ds.quan(cfg.model.z_cent, 'code_length').to('cm')

    boost = np.array([xcent, ycent, zcent])
    print('[arepo_tributary/vornoi_m_gen]:  boost = ', boost)

    #========================================================================
    #Initialize Hyperion Model
    #========================================================================

    m = Model()

    #because we boost the stars to a [0,0,0] coordinate center, we
    #want to make sure our vornoi tesslation is created in the same manner.

    particle_x = reg["gascoordinates"][:, 0].to('cm')
    particle_y = reg["gascoordinates"][:, 1].to('cm')
    particle_z = reg["gascoordinates"][:, 2].to('cm')

    #just for the sake of symmetry, pass on a dx,dy,dz since it can be
    #used optionally downstream in other functions.
    dx = 2. * ds.quan(cfg.par.zoom_box_len, 'kpc').to('cm')
    dy = 2. * ds.quan(cfg.par.zoom_box_len, 'kpc').to('cm')
    dz = 2. * ds.quan(cfg.par.zoom_box_len, 'kpc').to('cm')

    print('[arepo_tributary] boost = ', boost)
    print('[arepo_tributary] xmin (pc)= ', (xcent - dx / 2.).to('pc'))
    print('[arepo_tributary] xmax (pc)= ', (xcent + dx / 2.).to('pc'))
    print('[arepo_tributary] ymin (pc)= ', (ycent - dy / 2.).to('pc'))
    print('[arepo_tributary] ymax (pc)= ', (ycent + dy / 2.).to('pc'))
    print('[arepo_tributary] zmin (pc)= ', (zcent - dz / 2.).to('pc'))
    print('[arepo_tributary] zmax (pc)= ', (zcent + dz / 2.).to('pc'))

    x_pos_boost = (particle_x - xcent).to('cm')
    y_pos_boost = (particle_y - ycent).to('cm')
    z_pos_boost = (particle_z - zcent).to('cm')

    m.set_voronoi_grid(x_pos_boost.value, y_pos_boost.value, z_pos_boost.value)

    #get CMB:

    energy_density_absorbed = energy_density_absorbed_by_CMB()
    specific_energy = np.repeat(energy_density_absorbed.value, dustdens.shape)

    if cfg.par.PAH == True:

        # load PAH fractions for usg, vsg, and big (grain sizes)
        frac = cfg.par.PAH_frac

        # Normalize to 1
        total = np.sum(list(frac.values()))
        frac = {k: v / total for k, v in frac.items()}

        for size in frac.keys():
            d = SphericalDust(cfg.par.dustdir + '%s.hdf5' % size)
            if cfg.par.SUBLIMATION == True:
                d.set_sublimation_temperature(
                    'fast', temperature=cfg.par.SUBLIMATION_TEMPERATURE)
            #m.add_density_grid(dustdens * frac[size], cfg.par.dustdir+'%s.hdf5' % size)
            m.add_density_grid(dustdens * frac[size],
                               d,
                               specific_energy=specific_energy)
        m.set_enforce_energy_range(cfg.par.enforce_energy_range)
    else:
        d = SphericalDust(cfg.par.dustdir + cfg.par.dustfile)
        if cfg.par.SUBLIMATION == True:
            d.set_sublimation_temperature(
                'fast', temperature=cfg.par.SUBLIMATION_TEMPERATURE)
        m.add_density_grid(dustdens, d, specific_energy=specific_energy)
        #m.add_density_grid(dustdens,cfg.par.dustdir+cfg.par.dustfile)
    m.set_specific_energy_type('additional')

    return m, xcent, ycent, zcent, dx.value, dy.value, dz.value, reg, ds, boost
Example #6
0
def sph_m_gen(fname,field_add):
    
    refined,dustdens,fc1,fw1,pf,ad = yt_octree_generate(fname,field_add)
    xmin = (fc1[:,0]-fw1[:,0]/2.).convert_to_units('cm') #in proper cm 
    xmax = (fc1[:,0]+fw1[:,0]/2.).convert_to_units('cm')
    ymin = (fc1[:,1]-fw1[:,1]/2.).convert_to_units('cm')
    ymax = (fc1[:,1]+fw1[:,1]/2.).convert_to_units('cm')
    zmin = (fc1[:,2]-fw1[:,2]/2.).convert_to_units('cm')
    zmax = (fc1[:,2]+fw1[:,2]/2.).convert_to_units('cm')
    

    #dx,dy,dz are the edges of the parent grid
    dx = (np.max(xmax)-np.min(xmin)).value
    dy = (np.max(ymax)-np.min(ymin)).value
    dz = (np.max(zmax)-np.min(zmin)).value


    xcent = np.mean([np.min(xmin),np.max(xmax)]) #kpc
    ycent = np.mean([np.min(ymin),np.max(ymax)])
    zcent = np.mean([np.min(zmin),np.max(zmax)])
    
    boost = np.array([xcent,ycent,zcent])
    print ('[pd_front end] boost = ',boost)

    
    #Tom Robitaille's conversion from z-first ordering (yt's default) to
    #x-first ordering (the script should work both ways)

    refined_array = np.array(refined)
    refined_array = np.squeeze(refined_array)
    
    order = find_order(refined_array)
    refined_reordered = []
    dustdens_reordered = np.zeros(len(order))
    
    
    
    for i in range(len(order)): 
        refined_reordered.append(refined[order[i]])
        dustdens_reordered[i] = dustdens[order[i]]


    refined = refined_reordered
    dustdens=dustdens_reordered

    #hyperion octree stats
    max_level = hos.hyperion_octree_stats(refined)


    pto.test_octree(refined,max_level)

    dump_cell_info(refined,fc1,fw1,xmin,xmax,ymin,ymax,zmin,zmax)
    np.save('refined.npy',refined)
    np.save('density.npy',dustdens)
    

    #========================================================================
    #Initialize Hyperion Model
    #========================================================================

    m = Model()
    
    if cfg.par.FORCE_RANDOM_SEED == True: m.set_seed(cfg.par.seed)

    print ('Setting Octree Grid with Parameters: ')



    #m.set_octree_grid(xcent,ycent,zcent,
    #                  dx,dy,dz,refined)
    m.set_octree_grid(0,0,0,dx/2,dy/2,dz/2,refined)    


    #get CMB:
    
    energy_density_absorbed=energy_density_absorbed_by_CMB()
    specific_energy = np.repeat(energy_density_absorbed.value,dustdens.shape)

    if cfg.par.PAH == True:
        
        # load PAH fractions for usg, vsg, and big (grain sizes)
        frac = cfg.par.PAH_frac

        # Normalize to 1
        total = np.sum(list(frac.values()))
        frac = {k: v / total for k, v in frac.items()}

        for size in frac.keys():
            d = SphericalDust(cfg.par.dustdir+'%s.hdf5'%size)
            if cfg.par.SUBLIMATION == True:
                d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
            #m.add_density_grid(dustdens * frac[size], cfg.par.dustdir+'%s.hdf5' % size)
            m.add_density_grid(dustdens*frac[size],d,specific_energy=specific_energy)
        m.set_enforce_energy_range(cfg.par.enforce_energy_range)
    else:
        d = SphericalDust(cfg.par.dustdir+cfg.par.dustfile)
        if cfg.par.SUBLIMATION == True:
            d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
        m.add_density_grid(dustdens,d,specific_energy=specific_energy)
        #m.add_density_grid(dustdens,cfg.par.dustdir+cfg.par.dustfile)  
    m.set_specific_energy_type('additional')








    return m,xcent,ycent,zcent,dx,dy,dz,pf,boost
Example #7
0
def extract_extinction_map(SO_cm, SO, dust):
    '''
    Extracting extinction map from radiative transfer calculation.
    
    Parameters
    ----------
    
    SO_cm : SyntheticImage 
        FluxCompensator object of cm observation
    
    SO : SyntheticImage 
        FluxCompensator object of synthetic observation where fieldstars should be added to.
    
    dust : str
        Path and name of dust file.
    
    
    Returns
    -------
    
    A_v : numpy.ndarray
        Optical extinction map.
    
    '''

    # S0_cm needs to be at 1cm
    w0 = SO_cm.wav[0]
    if w0 > 10000. or w0 < 9999.:
        raise Exception('WARNING: Dust extinction image is not at 1cm.')

    # check if resolution is the same
    if SO.resolution['rad'] != SO_cm.resolution['rad']:
        raise Exception(
            'WARNING: Extinction image at 1cm and image from pipeline do not have the same resolution.'
        )

    # check if units are correct
    if SO_cm.units != 'ergs/cm^2/s/Hz':
        raise Exception('WARNING: Units of SO_cm need to be ergs/cm^2/s/Hz.')

    if isinstance(dust, str):
        # load dust properties from hyperion
        from hyperion.dust import SphericalDust
        d = SphericalDust(dust)
        kappa = d.optical_properties.kappa
        chi = d.optical_properties.chi
        wav = d.optical_properties.wav

    else:
        # load dust_kappa, dust_chi & dust_wav
        # from tuple dust={kappa, chi, wav}
        kappa = dust['kappa']
        chi = dust['chi']
        wav = dust['wav']

        if wav[0] < wav[1]:
            kappa = kappa[::-1]
            chi = chi[::-1]
            wav = wav[::-1]

    # extrapolate kappa at 1cm and chi at all wavelengths of SO
    kappa_cm = np.interp(w0, wav[::-1], kappa[::-1])  # cm^2/g
    chi_wav = np.interp(SO.wav, wav[::-1], chi[::-1])  # cm^2/g

    # constants in cgs
    T = 10.
    pi = 3.141592653589793
    h = 6.626068e-27
    k = 1.3806503e-16
    c = 29979245800.0

    # Plank function and Jnu at 1cm with T=10K
    nu = c / (w0 * 1e-4)
    Bnu = 2 * h * nu**3 / c**2 * (np.exp(h * nu / k / T) - 1)**(-1)
    Jnu = Bnu * kappa_cm

    # surface density * chi = tau
    # surface density = surface brightnes / Jnu
    sr = SO_cm.resolution['rad']**2
    tau = SO_cm.val / Jnu / sr * chi_wav

    # extinction at 1 cm
    A_lam = 1.086 * tau

    # convert to A_v
    print 'CAUTION: Extinction law from Kim et al. is used.'
    wav_ext, k_lam = np.loadtxt(ROOT +
                                '../database/extinction/extinction_law.txt',
                                unpack=True)
    k_v = np.interp(0.550, wav_ext, k_lam)
    k = np.interp(1., wav_ext, k_lam)
    A_v = A_lam * (k_v / k)

    return A_v[:, :, 0]
import numpy as np
from hyperion.model import Model
from hyperion.util.constants import au, lsun, rsun
from hyperion.dust import SphericalDust


# Model
m = Model()
dist = 20000 * au
x = np.linspace(-dist, dist, 101)
y = np.linspace(-dist, dist, 101)
z = np.linspace(-dist, dist, 101)
m.set_cartesian_grid(x, y, z)

# Dust
d = SphericalDust("kmh.hdf5")
d.set_sublimation_temperature("fast", temperature=1600.0)
m.add_density_grid(np.ones((100, 100, 100)) * 1.0e-18, "kmh.hdf5")

# Alpha centauri
sourceA = m.add_spherical_source()
sourceA.luminosity = 1.519 * lsun
sourceA.radius = 1.227 * rsun
sourceA.temperature = 5790.0
sourceA.position = (0.0, 0.0, 0.0)

# Beta centauri
sourceB = m.add_spherical_source()
sourceB.luminosity = 0.5 * lsun
sourceB.radius = 0.865 * rsun
sourceB.temperature = 5260.0
Example #9
0
Herschel = ['H70','H160','H250','H350']
wlHerschel = [70,160,250,350]
uHerschel = ["e_"+col for col in Herschel]
labelSpitzer = 'Herschel'
sources = sourcetable.group_by('SOFIA_name')

# set up extinction
extinctions = range(30)
#d = SphericalDust()
#d.read('d03_5.5_3.0_A.hdf5')
#chi = d.optical_properties.chi
#chi = chi[::-1]
#wav = d.optical_properties.wav
#wav = wav[::-1]
#Chi = interp1d(wav,chi,kind='linear')
d = SphericalDust()
d.read('OH5.hdf5')
chi = d.optical_properties.chi#/100. # divide by 100 for the gas-to-dust ratio
chi = chi[::-1]# divide by 100 for the gas-to-dust ratio
wav = d.optical_properties.wav
wav = wav[::-1]
Chi = interp1d(wav,chi,kind='linear')

#inclinations = [0.,10.,20.,30.,40.,50.,60.,70.,80.,90.]
angles=np.arccos(np.linspace(0,1.,20))*180./np.pi
inclinations=angles[::-1]

# set the wavelengths
names = TwoMASS+Spitzer+SOFIA+Herschel
wl=wlTwoMASS+wlSpitzer+wlSOFIA+wlHerschel
wl_table = Table(names = names,dtype=['f8' for col in wl])
Example #10
0
def setup_model(cli):
	
    lsun_TRUST = 3.839e33
        
    #
    # Hyperion setup:
    #
    model = Model()


    if(cli.mode == "temperature"):
        #
        # Dust properties:
        #
        dust_properties = SphericalDust('dust_integrated_full_scattering.hdf5')
            
            
        #
        # Write dust properties:
        #
        dust_properties.write('dust_properties.hdf5')
        dust_properties.plot('dust_properties.png')
        
        
        #
        # Specify galaxy setup:
        #
        hR                     =  4000.0*pc             # [cm]
        Rmax                   =     5.0*hR             # [cm]
        hz_oldstars            =   350.0*pc             # [cm]
        hz_youngstars          =   200.0*pc             # [cm]
        hz_dust                =   200.0*pc             # [cm]
        zmax_oldstars          =     5.0*hz_oldstars    # [cm]
        zmax_youngstars        =     5.0*hz_youngstars  # [cm]
        zmax_dust              =     5.0*hz_dust        # [cm]
        zmax                   =  zmax_oldstars         # [cm]
        reff                   =  1600.0*pc             # [cm]
        n                      =     3.0
        q                      =     0.6
        bn                     = 2.0*n - 1.0/3.0 + 4.0/405.0/n + 46.0/25515.0/n/n + 131.0/1148175.0/n/n/n
        temperature_oldstars   =  3500.0                # [K]
        temperature_youngstars = 10000.0                # [K]
        temperature_bulge      =  3500.0                # [K]
        luminosity_oldstars    =     4.0e+10*lsun_TRUST # [ergs/s]
        luminosity_youngstars  =     1.0e+10*lsun_TRUST # [ergs/s]
        luminosity_bulge       =     3.0e+10*lsun_TRUST # [ergs/s]
        
        w_oldstars             =     0.25
        w_youngstars           =     0.75
        w_dust                 =     0.75
        phi0_oldstars          =     0.0
        phi0_youngstars        =    20.0 * pi/180.0
        phi0_dust              =    20.0 * pi/180.0
        modes                  =     2
        pitchangle             =    20.0 * pi/180.0
        
        
        
        #
        # Grid setup:
        #
        grid_wmin =  0.0
        grid_wmax =  Rmax
        grid_zmin = -zmax
        grid_zmax = +zmax
        grid_pmin =  0.0
        grid_pmax =  2.0*pi
        
        grid_dx = cli.resolution*pc
        grid_dw = grid_dx # uniform resolution
        grid_dz = grid_dx # uniform resolution
        grid_dp = grid_dx # resolution at characteristic radial disk spatial scale hR = 4000.0 pc
        
        grid_Nw   = int((grid_wmax - grid_wmin) / grid_dw) + 1
        grid_Nz   = int((grid_zmax - grid_zmin) / grid_dz) + 1
        if(cli.case == 1):
            grid_Np = 1
        if(cli.case == 2):
            grid_Np = int((grid_pmax - grid_pmin) * hR / grid_dp)
        
        if(cli.verbose):
            print("Grid setup:")
            print(" Grid resolution =",cli.resolution, "pc.")
            print(" grid_Nw =",grid_Nw)
            print(" grid_Nz =",grid_Nz)
            print(" grid_Np =",grid_Np)
        
        #grid_w      = np.logspace(np.log10(grid_wmin), np.log10(grid_wmax), grid_Nw)
        #grid_w      = np.hstack([0., grid_w]) # add innermost cell interface at w=0
        grid_w    = np.linspace(grid_wmin, grid_wmax, grid_Nw+1)
        grid_z    = np.linspace(grid_zmin, grid_zmax, grid_Nz+1)
        grid_p    = np.linspace(grid_pmin, grid_pmax, grid_Np+1)
        
        model.set_cylindrical_polar_grid(grid_w, grid_z, grid_p)
        
        #
        # Dust density and sources setup:
        #
        rho_oldstars   = np.zeros(model.grid.shape)
        rho_youngstars = np.zeros(model.grid.shape)
        rho_bulge      = np.zeros(model.grid.shape)
        rho_dust       = np.zeros(model.grid.shape)
        
        for k in range(0, grid_Np):
            for j in range(0, grid_Nz):
                for i in range(0, grid_Nw):
                    
                    R = model.grid.gw[k,j,i]
                    z = model.grid.gz[k,j,i]
                    m = math.sqrt(R*R + z*z/q/q)
                    
                    rho_dust[k,j,i]       = math.exp(- R/hR -abs(z)/hz_dust      )
                    rho_oldstars[k,j,i]   = math.exp(- R/hR -abs(z)/hz_oldstars  )
                    rho_youngstars[k,j,i] = math.exp(- R/hR -abs(z)/hz_youngstars)
                    rho_bulge[k,j,i]      = math.pow(m/reff, 0.5/n - 1.0) * math.exp(- bn * math.pow(m/reff, 1.0/n))
                    
                    if(cli.case == 2):
                        phi = model.grid.gp[k,j,i]
                        perturb = math.sin(modes * (math.log(R/hR) / math.tan(pitchangle) - (phi - phi0_dust)))
                        rho_dust[k,j,i]       *= (1.0 + w_dust       * perturb)
                        perturb = math.sin(modes * (math.log(R/hR) / math.tan(pitchangle) - (phi - phi0_oldstars)))
                        rho_oldstars[k,j,i]   *= (1.0 + w_oldstars   * perturb)
                        perturb = math.sin(modes * (math.log(R/hR) / math.tan(pitchangle) - (phi - phi0_youngstars)))
                        rho_youngstars[k,j,i] *= (1.0 + w_youngstars * perturb)
        
        rho_dust[model.grid.gw > grid_wmax] = 0
        rho_dust[model.grid.gz < grid_zmin] = 0
        rho_dust[model.grid.gz > grid_zmax] = 0
        
        kappa_ref     = dust_properties.optical_properties.interp_chi_wav(0.55693)
        rho0          = cli.opticaldepth / (2.0 * hz_dust * kappa_ref)
        rho_dust[:]  *= rho0
        model.add_density_grid(rho_dust, 'dust_properties.hdf5')
        
        source_oldstars                = model.add_map_source()
        source_oldstars.luminosity     = luminosity_oldstars
        source_oldstars.temperature    = temperature_oldstars
        source_oldstars.map            = rho_oldstars
        
        source_youngstars              = model.add_map_source()
        source_youngstars.luminosity   = luminosity_youngstars
        source_youngstars.temperature  = temperature_youngstars
        source_youngstars.map          = rho_youngstars
        
        source_bulge                   = model.add_map_source()
        source_bulge.luminosity        = luminosity_bulge
        source_bulge.temperature       = temperature_bulge
        source_bulge.map               = rho_bulge
        
        
        #
        # Check face-on optical depth at 1.0 micron (per gram dust) through the dust disk:
        #
        tau   = 0
        
        k = 0
        i = 0
        for j in range(0, grid_Nz):
            #print(model.grid.gz[k,j,i]/pc, rho_dust[k,j,i])
            dz   = model.grid.widths[1,k,j,i]
            dtau = dz * rho_dust[k,j,i] * kappa_ref
            tau += dtau
        
        deviation = 100.0 * abs(cli.opticaldepth - tau) / cli.opticaldepth
        
        if(cli.verbose):
            print("Check optical depth of dust density setup:")
            print(" kappa(0.55693 micron) = ", kappa_ref, "cm^2 g^-1")
            print(" Numerical integration of the face-on optical depth at 0.55693 micron through the central dust disk yields tau = ", tau)
            print(" This corresponds to a deviation to the chosen setup value of", deviation, "percent")
    
        #
        # Check central dust density:
        #
        rho_max = np.max(rho_dust)
        if(cli.opticaldepth < 1.0):
            rho_setup = 1.04366e-4 * msun/pc/pc/pc
        if(cli.opticaldepth < 3.0):
            rho_setup = 5.21829e-4 * msun/pc/pc/pc
        else:
            rho_setup = 2.60915e-3 * msun/pc/pc/pc

        deviation = 100.0 * abs(rho_setup - rho_max) / rho_setup

        if(cli.verbose):
            print("Check value of central dust density:")
            print(" rho_max = ", rho_max, "g cm^-3")
            print(" This corresponds to a deviation to the chosen setup value of", deviation, "percent")

        #
        # To compute total photon numbers:
        #
        grid_N = grid_Nw * grid_Nz * grid_Np
        if(cli.verbose):
            print("Radiation setup:")
            print(" photons_temperature / cell =", cli.photons_temperature)
            print(" photons_temperature total  =", grid_N * cli.photons_temperature)

        file = filename(cli, "temperature")
        file += ".rtin"
    
    
    else:
        file = filename(cli, "temperature")
        file += ".rtout"
        
        try:
            with open(file):
                if(cli.verbose):
                    print("Using the specific energy distribution from file", file)
                model.use_geometry(file)
                model.use_quantities(file, only_initial=False, copy=False)
                model.use_sources(file)
            
        except IOError:
            print("ERROR: File '", file, "' cannot be found. \nERROR: This file, containing the specific energy density, has to be computed first via calling hyperion.")
            exit(2)
        
		#
		# To compute total photon numbers:
		#
        grid_Nw = len(model.grid.gw[0,0,:])
        grid_Nz = len(model.grid.gw[0,:,0])
        grid_Np = len(model.grid.gw[:,0,0])
        grid_N = grid_Nw * grid_Nz * grid_Np
        if(cli.verbose):
            print("Grid setup:")
            print(" grid_Nw =",grid_Nw)
            print(" grid_Nz =",grid_Nz)
            print(" grid_Np =",grid_Np)
            print("Radiation setup:")
            print(" photons_temperature / cell =", cli.photons_temperature)
            print(" photons_temperature total  =", grid_N * cli.photons_temperature)
            print(" photons_raytracing / cell  =", cli.photons_raytracing)
            print(" photons_raytracing total   =", grid_N * cli.photons_raytracing)
            print(" photons_imaging / cell     =", cli.photons_imaging)
            print(" photons_imaging total      =", grid_N * cli.photons_imaging)
        
        file = filename(cli, "")
        file += ".rtin"


    ##
    ## Temperature, Images, and SEDs:
    ##
    if(cli.mode == "temperature"):
    
        model.set_raytracing(True)
        model.set_n_photons(
            initial            = grid_N * cli.photons_temperature,
            raytracing_sources = grid_N * cli.photons_raytracing,
            raytracing_dust    = grid_N * cli.photons_raytracing,
            imaging            = grid_N * cli.photons_imaging
        )
        
    elif(cli.mode == "images"):
        
        model.set_n_initial_iterations(0)
        model.set_raytracing(True)
        # old setup: model.set_monochromatic(True, wavelengths=[0.4, 1.0, 10.0, 100.0, 500.0])
        model.set_monochromatic(True, wavelengths=[0.45483, 1.2520, 26.114, 242.29])
        model.set_n_photons(
            raytracing_sources = grid_N * cli.photons_raytracing,
            raytracing_dust    = grid_N * cli.photons_raytracing,
            imaging_sources    = grid_N * cli.photons_imaging,
            imaging_dust       = grid_N * cli.photons_imaging
        )
    
        # group = 0
        image1 = model.add_peeled_images(sed=False, image=True)
        image1.set_image_size(501, 501)
        image1.set_image_limits(-12500.0*pc, +12500.0*pc, -12500.0*pc, +12500.0*pc)
        image1.set_viewing_angles([30],[0])
        image1.set_uncertainties(True)
        image1.set_output_bytes(8)
        image1.set_track_origin('basic')
    
        # group = 1
        image2 = model.add_peeled_images(sed=False, image=True)
        image2.set_image_size(501, 501)
        image2.set_image_limits(-12500.0*pc, +12500.0*pc, -12500.0*pc, +12500.0*pc)
        image2.set_viewing_angles([80],[90])
        image2.set_uncertainties(True)
        image2.set_output_bytes(8)
        image2.set_track_origin('basic')
    
        # group = 2
        image3 = model.add_peeled_images(sed=False, image=True)
        image3.set_image_size(501, 501)
        image3.set_image_limits(-12500.0*pc, +12500.0*pc, -12500.0*pc, +12500.0*pc)
        image3.set_viewing_angles([88],[0]) # mostly edge-on
        image3.set_uncertainties(True)
        image3.set_output_bytes(8)
        image3.set_track_origin('basic')

    elif(cli.mode == "seds"):
        
        model.set_n_initial_iterations(0)
        model.set_raytracing(True)
        model.set_n_photons(
            raytracing_sources = grid_N * cli.photons_raytracing,
            raytracing_dust    = grid_N * cli.photons_raytracing,
            imaging            = grid_N * cli.photons_imaging
        )
    
        # group = 0
        sed1 = model.add_peeled_images(sed=True, image=False)
        sed1.set_wavelength_range(47, 0.081333, 1106.56)
        sed1.set_viewing_angles([30],[0])
        sed1.set_peeloff_origin((0, 0, 0))
        sed1.set_aperture_range(1, 25000.0*pc, 25000.0*pc)
        sed1.set_uncertainties(True)
        sed1.set_output_bytes(8)
        sed1.set_track_origin('basic')
        
        # group = 1
        sed2 = model.add_peeled_images(sed=True, image=False)
        sed2.set_wavelength_range(47, 0.081333, 1106.56)
        sed2.set_viewing_angles([80],[0])
        sed2.set_peeloff_origin((0, 0, 0))
        sed2.set_aperture_range(1, 25000.0*pc, 25000.0*pc)
        sed2.set_uncertainties(True)
        sed2.set_output_bytes(8)
        sed2.set_track_origin('basic')
    
        # group = 2
        sed3 = model.add_peeled_images(sed=True, image=False)
        sed3.set_wavelength_range(47, 0.081333, 1106.56)
        sed3.set_viewing_angles([88],[0])
        sed3.set_peeloff_origin((0, 0, 0))
        sed3.set_aperture_range(1, 25000.0*pc, 25000.0*pc)
        sed3.set_uncertainties(True)
        sed3.set_output_bytes(8)
        sed3.set_track_origin('basic')

    ##
    ## Write model for hyperion runs:
    ##
    model.conf.output.output_density         = 'last'
    model.conf.output.output_specific_energy = 'last'
    model.conf.output.output_n_photons       = 'last'
    model.write(file)
    if(cli.verbose):
        print("The input file for hyperion was written to", file)
Example #11
0
from hyperion.dust import SphericalDust
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

from astropy import units as u
from astropy import constants as constants

dustfile = '/ufrc/narayanan/desika.narayanan/pd/hyperion-dust/dust_files/d03_3.1_6.0_A.hdf5'
dust = SphericalDust(dustfile)
mw_df_nu = dust.optical_properties.nu
mw_df_chi  = dust.optical_properties.chi

mw_df_lam = (constants.c/(mw_df_nu*u.Hz)).to(u.micron)
x = 1./mw_df_lam


fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,mw_df_chi)
ax.set_xlim([1,10])
ax.set_xlabel('x')
ax.set_ylabel(r'$\chi$')
fig.savefig('extinction.png',dpi=300)
Example #12
0
import numpy as np
from hyperion.model import Model
from hyperion.util.constants import au, lsun, rsun
from hyperion.dust import SphericalDust


# Model
m = Model()
dist = 20000 * au
x = np.linspace(-dist, dist, 101)
y = np.linspace(-dist, dist, 101)
z = np.linspace(-dist, dist, 101)
m.set_cartesian_grid(x,y,z)

# Dust
d = SphericalDust('kmh.hdf5')
d.set_sublimation_temperature('fast', temperature=1600.)
m.add_density_grid(np.ones((100,100,100)) * 1.e-18,'kmh.hdf5')

# Alpha centauri
sourceA = m.add_spherical_source()
sourceA.luminosity = 1.519 * lsun
sourceA.radius = 1.227 * rsun
sourceA.temperature = 5790.
sourceA.position = (0., 0., 0.)

# Beta centauri
sourceB = m.add_spherical_source()
sourceB.luminosity = 0.5 * lsun
sourceB.radius = 0.865 * rsun
sourceB.temperature = 5260.
Example #13
0
def arepo_m_gen(fname, field_add):

    reg, ds, dustdens = arepo_vornoi_grid_generate(fname, field_add)

    xcent = ds.quan(cfg.model.x_cent, 'code_length').to('cm')  #proper cm
    ycent = ds.quan(cfg.model.y_cent, 'code_length').to('cm')
    zcent = ds.quan(cfg.model.z_cent, 'code_length').to('cm')

    boost = np.array([xcent, ycent, zcent])
    print('[arepo_tributary/vornoi_m_gen]:  boost = ', boost)

    #========================================================================
    #Initialize Hyperion Model
    #========================================================================

    m = Model()

    #because we boost the stars to a [0,0,0] coordinate center, we
    #want to make sure our vornoi tesslation is created in the same manner.

    particle_x = reg["gas", "coordinates"][:, 0].to('cm')
    particle_y = reg["gas", "coordinates"][:, 1].to('cm')
    particle_z = reg["gas", "coordinates"][:, 2].to('cm')

    #just for the sake of symmetry, pass on a dx,dy,dz since it can be
    #used optionally downstream in other functions.
    dx = 2. * ds.quan(cfg.par.zoom_box_len, 'kpc').to('cm')
    dy = 2. * ds.quan(cfg.par.zoom_box_len, 'kpc').to('cm')
    dz = 2. * ds.quan(cfg.par.zoom_box_len, 'kpc').to('cm')

    print('[arepo_tributary] boost = ', boost)
    print('[arepo_tributary] xmin (pc)= ', (xcent - dx / 2.).to('pc'))
    print('[arepo_tributary] xmax (pc)= ', (xcent + dx / 2.).to('pc'))
    print('[arepo_tributary] ymin (pc)= ', (ycent - dy / 2.).to('pc'))
    print('[arepo_tributary] ymax (pc)= ', (ycent + dy / 2.).to('pc'))
    print('[arepo_tributary] zmin (pc)= ', (zcent - dz / 2.).to('pc'))
    print('[arepo_tributary] zmax (pc)= ', (zcent + dz / 2.).to('pc'))

    x_pos_boost = (particle_x - xcent).to('cm')
    y_pos_boost = (particle_y - ycent).to('cm')
    z_pos_boost = (particle_z - zcent).to('cm')

    m.set_voronoi_grid(x_pos_boost.value, y_pos_boost.value, z_pos_boost.value)

    #get CMB:

    energy_density_absorbed = energy_density_absorbed_by_CMB()
    specific_energy = np.repeat(energy_density_absorbed.value, dustdens.shape)

    if cfg.par.otf_extinction == False:

        if cfg.par.PAH == True:

            # load PAH fractions for usg, vsg, and big (grain sizes)
            frac = cfg.par.PAH_frac

            # Normalize to 1
            total = np.sum(list(frac.values()))
            frac = {k: v / total for k, v in frac.items()}

            for size in frac.keys():
                d = SphericalDust(cfg.par.dustdir + '%s.hdf5' % size)
                if cfg.par.SUBLIMATION == True:
                    d.set_sublimation_temperature(
                        'fast', temperature=cfg.par.SUBLIMATION_TEMPERATURE)
                    #m.add_density_grid(dustdens * frac[size], cfg.par.dustdir+'%s.hdf5' % size)
                m.add_density_grid(dustdens * frac[size],
                                   d,
                                   specific_energy=specific_energy)
            m.set_enforce_energy_range(cfg.par.enforce_energy_range)
        else:
            d = SphericalDust(cfg.par.dustdir + cfg.par.dustfile)
            if cfg.par.SUBLIMATION == True:
                d.set_sublimation_temperature(
                    'fast', temperature=cfg.par.SUBLIMATION_TEMPERATURE)
            m.add_density_grid(dustdens, d, specific_energy=specific_energy)
        #m.add_density_grid(dustdens,cfg.par.dustdir+cfg.par.dustfile)

    else:  #instead of using a constant extinction law across the
        #entire galaxy, we'll compute it on a cell-by-cell bassis by
        #using information about the grain size distribution from
        #the simulation itself.

        ad = ds.all_data()
        nsizes = reg['PartType0', 'NumGrains'].shape[1]
        try:
            assert (np.sum(ad['PartType0', 'NumGrains']) > 0)
        except AssertionError:
            raise AssertionError(
                "[arepo_tributary:] There are no dust grains in this simulation.  This can sometimes happen in an early snapshot of a simulation where the dust has not yet had time to form."
            )
        grid_of_sizes = reg['PartType0', 'NumGrains']
        active_dust_add(ds, m, grid_of_sizes, nsizes, dustdens,
                        specific_energy)

    m.set_specific_energy_type('additional')

    return m, xcent, ycent, zcent, dx.value, dy.value, dz.value, reg, ds, boost
from astropy.table import Table
import pickle
from hyperion.util.constants import pc


target_list = ['IRAS20050.1','IRAS20050.2','IRAS20050.3','IRAS20050.4','IRAS20050.5']
dist = 700*pc


folder = ['Grid/']
name = ['model']

angles=np.arccos(np.linspace(0,1.,20))*180./np.pi
inclinations=angles[::-1]

d = SphericalDust()
d.read('d03_5.5_3.0_A.hdf5')
chi = d.optical_properties.chi
chi = chi[::-1]
wav = d.optical_properties.wav
wav = wav[::-1]
Chi = interp1d(wav,chi,kind='linear')

sorted_grid = pickle.load(open(folder[0]+name[0]+"_"+target+".grid.dat",'r'))
best_model_fname = folder[0]+sorted_grid['name'][0]+'.rtout'
best_model = ModelOutput(fname)
inc = int(np.argwhere(inclinations==sorted_grid['inc'][0]))
sed = best_model.get_sed(aperture=-1, inclination=inc, distance=dist,units='Jy')
N = len(sed.wav)
vec = np.zeros(N,len(target_list)+1)
vec[:,0] = sed.wav
Example #15
0
def setup_model(parfile, output, imaging=True):

    # Read in model parameters
    par = read_parfile(parfile, nested=True)

    # Find all dust files
    dust_files = {}
    for par_name in par:
        if 'dust' in par[par_name]:
            dust_file = par[par_name]['dust']
            dust_files[dust_file] = SphericalDust(dust_file)

    # Find dimensionality of problem:
    if 'disk' in par:
        ndim = 2
        optimize = False
    elif 'cavity' in par:
        ndim = 2
        optimize = False
    elif 'envelope' in par and 'rc' in par['envelope']:
        ndim = 2
        optimize = False
    else:
        ndim = 1
        optimize = True

    # Set up model
    m = AnalyticalYSOModel(output)

    if not 'star' in par:
        raise Exception("Cannot compute a model without a central source")

    # Set radius and luminosity
    m.star.radius = par['star']['radius'] * rsun
    m.star.luminosity = 4. * pi * (par['star']['radius'] * rsun) ** 2. \
                        * sigma * par['star']['temperature'] ** 4.

    # Interpolate and set spectrum
    nu, fnu = interp_atmos(par['star']['temperature'])
    m.star.spectrum = (nu, fnu)

    subtract_from_ambient = []

    if 'disk' in par:

        # Add the flared disk component
        disk = m.add_flared_disk()

        # Basic parameters
        disk.mass = par['disk']['mass'] * msun
        disk.rmax = par['disk']['rmax'] * au
        disk.p = par['disk']['p']
        disk.beta = par['disk']['beta']
        disk.h_0 = par['disk']['h100'] * au
        disk.r_0 = 100. * au

        # Set inner and outer walls to be spherical
        disk.cylindrical_inner_rim = False
        disk.cylindrical_outer_rim = False

        # Set dust
        disk.dust = dust_files[par['disk']['dust']]

        # Inner radius
        if 'rmin' in par['disk']:
            disk.rmin = par['disk']['rmin'] * OptThinRadius(TSUB)
        else:
            disk.rmin = OptThinRadius(TSUB)

        # Settling
        if 'eta' in par['disk']:
            raise Exception("Dust settling not implemented")

        # Accretion luminosity
        if 'lacc' in par['disk']:
            raise Exception("Accretion luminosity not implemented")
            # m.setup_magnetospheric_accretion(par['disk']['lacc'] * lsun,
            #                                  par['disk']['rtrunc'],
            #                                  par['star']['fspot'], disk)

        subtract_from_ambient.append(disk)

    if 'envelope' in par:

        if 'rc' in par['envelope']:  # Ulrich envelope

            envelope = m.add_ulrich_envelope()
            envelope.rho_0 = par['envelope']['rho_0']
            envelope.rc = par['envelope']['rc'] * au

        elif 'power' in par['envelope']:  # Power-law envelope

            envelope = m.add_power_law_envelope()
            envelope.power = par['envelope']['power']
            envelope.rho_0 = par['envelope']['rho_0']
            envelope.r_0 = 1000. * au

        # Set dust
        envelope.dust = dust_files[par['envelope']['dust']]

        # Inner radius
        if 'rmin' in par['envelope']:
            envelope.rmin = par['envelope']['rmin'] * OptThinRadius(TSUB)
        else:
            envelope.rmin = OptThinRadius(TSUB)

        subtract_from_ambient.append(envelope)

    if 'cavity' in par:

        if not 'envelope' in par:
            raise Exception("Can't have a bipolar cavity without an envelope")

        # Add the bipolar cavity component
        cavity = envelope.add_bipolar_cavity()

        # Basic parameters
        cavity.power = par['cavity']['power']
        cavity.r_0 = 10000 * au
        cavity.theta_0 = par['cavity']['theta_0']
        cavity.rho_0 = par['cavity']['rho_0']
        cavity.rho_exp = 0.

        # Very important is that the cavity density should not be *larger* than
        # the envelope density.
        cavity.cap_to_envelope_density = True

        # Set dust
        cavity.dust = dust_files[par['cavity']['dust']]

        subtract_from_ambient.append(cavity)

    if 'ambient' in par:

        # Add the ambient medium contribution
        ambient = m.add_ambient_medium(subtract=subtract_from_ambient)

        # Set the density, temperature, and dust properties
        ambient.rho = par['ambient']['density']
        ambient.temperature = par['ambient']['temperature']
        ambient.dust = dust_files[par['ambient']['dust']]

        # If there is an envelope, set the outer radius to where the
        # optically thin temperature would transition to the ambient medium
        # temperature
        if 'envelope' in par:

            # Find radius where the optically thin temperature drops to the
            # ambient temperature. We can do this only if we've already set
            # up all the sources of emission beforehand (which we have)
            rmax_temp = OptThinRadius(ambient.temperature).evaluate(m.star, envelope.dust)

            # Find radius where the envelope density drops to the ambient density
            rmax_dens = envelope.outermost_radius(ambient.rho)

            # If disk radius is larger than this, use that instead
            if 'disk' in par:
                if disk.rmax > rmax_dens:
                    rmax_dens = disk.rmax

            # Pick the largest
            if rmax_temp < rmax_dens:
                print("Setting envelope outer radius to that where rho(r) = rho_amb")
                envelope.rmax = rmax_dens
            else:
                print("Setting envelope outer radius to that where T_thin(r) = T_amb")
                envelope.rmax = OptThinRadius(ambient.temperature)

            ambient.rmax = envelope.rmax

        else:

            if 'disk' in par:

                # Find radius where the optically thin temperature drops to the
                # ambient temperature. We can do this only if we've already set
                # up all the sources of emission beforehand (which we have)
                rmax_temp = OptThinRadius(ambient.temperature).evaluate(m.star, ambient.dust)

                # Find outer disk radius
                rmax_dens = disk.rmax

                # Pick the largest
                if rmax_temp < rmax_dens:
                    print("Setting ambient outer radius to outer disk radius")
                    ambient.rmax = rmax_dens
                else:
                    print("Setting ambient outer radius to that where T_thin(r) = T_amb")
                    ambient.rmax = OptThinRadius(ambient.temperature)

            else:

                ambient.rmax = OptThinRadius(ambient.temperature)

        # The inner radius for the ambient medium should be the largest of
        # the inner radii for the disk and envelope
        if 'envelope' in par and 'rmin' in par['envelope']:
            if 'disk' in par and 'rmin' in par['disk']:
                ambient.rmin = max(par['disk']['rmin'], \
                                   par['envelope']['rmin']) \
                               * OptThinRadius(TSUB)
            else:
                ambient.rmin = par['envelope']['rmin'] * OptThinRadius(TSUB)
        elif 'disk' in par and 'rmin' in par['disk']:
            ambient.rmin = par['disk']['rmin'] * OptThinRadius(TSUB)
        else:
            ambient.rmin = OptThinRadius(TSUB)

        # The ambient medium needs to go out to sqrt(2.) times the envelope
        # radius to make sure the slab is full (don't need to do sqrt(3)
        # because we only need a cylinder along line of sight)
        ambient.rmax *= np.sqrt(2.)

        # Make sure that the temperature in the model is always at least
        # the ambient temperature
        m.set_minimum_temperature(ambient.temperature)

    else:

        # Make sure that the temperature in the model is always at least
        # the CMB temperature
        m.set_minimum_temperature(2.725)

        if 'envelope' in par:
            raise Exception("Can't have an envelope without an ambient medium")

    # Use raytracing to improve s/n of thermal/source emission
    m.set_raytracing(True)

    # Use the modified random walk
    m.set_mrw(True, gamma=2.)

    # Use the partial diffusion approximation
    m.set_pda(True)

    # Improve s/n of scattering by forcing the first interaction
    m.set_forced_first_scattering(True)

    # Set up grid.
    if ndim == 1:
        m.set_spherical_polar_grid_auto(400, 1, 1)
    else:
        m.set_spherical_polar_grid_auto(400, 300, 1)

    # Find the range of radii spanned by the grid
    rmin, rmax = m.radial_range()

    # Set up SEDs
    image = m.add_peeled_images(sed=True, image=False)
    image.set_wavelength_range(200, 0.01, 5000.)

    if 'ambient' in par:
        image.set_aperture_range(20, rmin, rmax / np.sqrt(2.))
    else:
        image.set_aperture_range(20, rmin, rmax)

    image.set_output_bytes(8)
    image.set_track_origin(True)
    image.set_uncertainties(True)
    image.set_stokes(True)

    if ndim == 1:

        # Viewing angle does not matter
        image.set_viewing_angles([45.], [45.])

    else:

        # Use stratified random sampling to ensure that all models
        # contain a viewing angle in each bin, but also ensure we have a
        # continuum of viewing angles over all models
        xi = np.random.uniform(0., 90./float(NVIEW), NVIEW)
        theta = xi + np.linspace(0., 90. * (1. - 1./float(NVIEW)), NVIEW)
        image.set_viewing_angles(theta, np.repeat(45., NVIEW))

    if 'ambient' in par:  # take a slab to avoid spherical geometrical effects
        w = ambient.rmax / np.sqrt(2.)
        image.set_depth(-w, w)
    else:  # don't need to take a slab, as no ambient material or envelope
        image.set_depth(-np.inf, np.inf)

    # Set number of photons

    if imaging:
        n_imaging=1e6
        n_raytracing_sources=10000
        n_raytracing_dust=1e6
    else:
        n_imaging=0
        n_raytracing_sources=0
        n_raytracing_dust=0

    if ndim == 1:
        m.set_n_photons(initial=100, imaging=n_imaging,
                        raytracing_sources=n_raytracing_sources,
                        raytracing_dust=n_raytracing_dust)
    else:
        m.set_n_photons(initial=1000000, imaging=n_imaging,
                        raytracing_sources=n_raytracing_sources,
                        raytracing_dust=n_raytracing_dust)

    # Set physical array output to 32-bit
    m.set_output_bytes(4)

    # Set maximum of 10^8 interactions per photon
    m.set_max_interactions(1e8)

    # Only request certain arrays to be output
    m.conf.output.output_density = 'none'
    m.conf.output.output_specific_energy = 'last'
    m.conf.output.output_n_photons = 'none'
    m.conf.output.output_density_diff = 'last'

    # Set number of temperature iterations and convergence criterion
    m.set_n_initial_iterations(10)
    m.set_convergence(True, percentile=99.0, absolute=2.0, relative=1.1)

    # Don't copy the full input into the output files
    m.set_copy_input(False)

    # Check whether the model is very optically thick

    mf = m.to_model()

    if 'envelope' in par:

        from hyperion.model.helpers import tau_to_radius
        surface = tau_to_radius(mf, tau=1., wav=5e3)
        rtau = np.min(surface)

        if rtau > rmin and optimize:
            log.warn("tau_5mm > 1 for all (theta,phi) values - truncating "
                     "inner envelope from {0:.3f}au to {1:.3f}au".format(mf.grid.r_wall[1] / au, rtau / au))
            for item in mf.grid['density']:
                item.array[mf.grid.gr < rtau] = 0.

    # Write out file
    mf.write(copy=False, absolute_paths=False,
             physics_dtype=np.float32, wall_dtype=float)
Example #16
0
    # * system size = 10x10x10 pc
    # * system coordinates (x,y,z min/max) = -5 to +5 pc
    # * slab z extent = -2 to -5 pc
    # * slab xy extend = -5 pc to 5 pc
    # * z optical depth @0.55um in slab = 0.1, 1, 20
    # * optical depth outside slab = 0

    x = np.linspace(-5 * pc, 5 * pc, 100)
    y = np.linspace(-5 * pc, 5 * pc, 100)
    z = np.hstack([np.linspace(-5 * pc, -2 * pc, 100), 5 * pc])

    m.set_cartesian_grid(x, y, z)

    # Grain Properties:

    d = SphericalDust('integrated_hg_scattering.hdf5')
    chi_v = d.optical_properties.interp_chi_wav(0.55)

    # Determine density in slab
    rho0 = tau_v / (3 * pc * chi_v)

    # Set up density grid
    density = np.ones(m.grid.shape) * rho0
    density[-1,:,:] = 0.

    m.add_density_grid(density, d)

    # Set up illuminating source:
    s = m.add_point_source()
    s.position = (0., 0., 4 * pc)
    s.temperature = 10000.0
Example #17
0
def sph_m_gen(fname,field_add):

    refined,dustdens,fc1,fw1,reg,ds = yt_octree_generate(fname,field_add)
    
    if float(yt.__version__[0:3]) >= 4:
        xmin = (fc1[:,0]-fw1[:,0]/2.).to('cm') #in proper cm 
        xmax = (fc1[:,0]+fw1[:,0]/2.).to('cm')
        ymin = (fc1[:,1]-fw1[:,1]/2.).to('cm')
        ymax = (fc1[:,1]+fw1[:,1]/2.).to('cm')
        zmin = (fc1[:,2]-fw1[:,2]/2.).to('cm')
        zmax = (fc1[:,2]+fw1[:,2]/2.).to('cm')
    else:
        xmin = (fc1[:,0]-fw1[:,0]/2.).convert_to_units('cm') #in proper cm
        xmax = (fc1[:,0]+fw1[:,0]/2.).convert_to_units('cm')
        ymin = (fc1[:,1]-fw1[:,1]/2.).convert_to_units('cm')
        ymax = (fc1[:,1]+fw1[:,1]/2.).convert_to_units('cm')
        zmin = (fc1[:,2]-fw1[:,2]/2.).convert_to_units('cm')
        zmax = (fc1[:,2]+fw1[:,2]/2.).convert_to_units('cm')

    #dx,dy,dz are the edges of the parent grid
    dx = (np.max(xmax)-np.min(xmin)).value
    dy = (np.max(ymax)-np.min(ymin)).value
    dz = (np.max(zmax)-np.min(zmin)).value


    xcent = float(ds.quan(cfg.model.x_cent,"code_length").to('cm').value)
    ycent = float(ds.quan(cfg.model.y_cent,"code_length").to('cm').value)
    zcent = float(ds.quan(cfg.model.z_cent,"code_length").to('cm').value)

    boost = np.array([xcent,ycent,zcent])
    print ('[sph_tributary] boost = ',boost)
    print ('[sph_tributary] xmin (pc)= ',np.min(xmin.to('pc')))
    print ('[sph_tributary] xmax (pc)= ',np.max(xmax.to('pc')))
    print ('[sph_tributary] ymin (pc)= ',np.min(ymin.to('pc')))
    print ('[sph_tributary] ymax (pc)= ',np.max(ymax.to('pc')))
    print ('[sph_tributary] zmin (pc)= ',np.min(zmin.to('pc')))
    print ('[sph_tributary] zmax (pc)= ',np.max(zmax.to('pc')))
    #Tom Robitaille's conversion from z-first ordering (yt's default) to
    #x-first ordering (the script should work both ways)



    refined_array = np.array(refined)
    refined_array = np.squeeze(refined_array)
    
    order = find_order(refined_array)
    refined_reordered = []
    dustdens_reordered = np.zeros(len(order))
    


    
    for i in range(len(order)): 
        refined_reordered.append(refined[order[i]])
        dustdens_reordered[i] = dustdens[order[i]]


    refined = refined_reordered
    dustdens=dustdens_reordered

    #hyperion octree stats
    max_level = hos.hyperion_octree_stats(refined)


    pto.test_octree(refined,max_level)
    
    if float(yt.__version__[0:3]) >= 4:
        dump_cell_info(refined,fc1.to('cm'),fw1.to('cm'),xmin,xmax,ymin,ymax,zmin,zmax)
    else:
        dump_cell_info(refined,fc1.convert_to_units('cm'),fw1.convert_to_units('cm'),xmin,xmax,ymin,ymax,zmin,zmax)
    
    np.save('refined.npy',refined)
    np.save('density.npy',dustdens)
    

    #========================================================================
    #Initialize Hyperion Model
    #========================================================================

    m = Model()
    
    #save in the m__dict__ that we're in an oct geometry
    m.__dict__['grid_type']='oct'

    print ('Setting Octree Grid with Parameters: ')



    #m.set_octree_grid(xcent,ycent,zcent,
    #                  dx,dy,dz,refined)
    m.set_octree_grid(0,0,0,dx/2,dy/2,dz/2,refined)    

    #get CMB:
    
    energy_density_absorbed=energy_density_absorbed_by_CMB()
    specific_energy = np.repeat(energy_density_absorbed.value,dustdens.shape)


    if cfg.par.otf_extinction == False:
        
        if cfg.par.PAH == True:

            # load PAH fractions for usg, vsg, and big (grain sizes)
            frac = cfg.par.PAH_frac
            
            # Normalize to 1
            total = np.sum(list(frac.values()))
            frac = {k: v / total for k, v in frac.items()}

            for size in frac.keys():
                d = SphericalDust(cfg.par.dustdir+'%s.hdf5'%size)
                if cfg.par.SUBLIMATION == True:
                    d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
                #m.add_density_grid(dustdens * frac[size], cfg.par.dustdir+'%s.hdf5' % size)
                m.add_density_grid(dustdens*frac[size],d,specific_energy=specific_energy)
            m.set_enforce_energy_range(cfg.par.enforce_energy_range)
        else:
            d = SphericalDust(cfg.par.dustdir+cfg.par.dustfile)
            if cfg.par.SUBLIMATION == True:
                d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
            m.add_density_grid(dustdens,d,specific_energy=specific_energy)
        #m.add_density_grid(dustdens,cfg.par.dustdir+cfg.par.dustfile)  


    else: #instead of using a constant extinction law across the
          #entire galaxy, we'll compute it on a cell-by-cell basis by
          #using information about the grain size distribution from
          #the simulation itself.


        print("==============================================\n")
        print("Entering OTF Extinction Calculation\n")
        print("Note: For very high-resolution grids, this may cause memory issues due to adding ncells dust grids")
        print("==============================================\n")
        
        ad = ds.all_data()
        nsizes = ad['PartType3','Dust_Size'].shape[1]
        ncells = reg.parameters["octree_of_sizes"].shape[0]
        #ensure that the grid has particles
        for isize in range(nsizes):
            try:
                assert (np.sum(reg.parameters["octree_of_sizes"][:,isize]) > 0)
            except AssertionError:
                raise AssertionError("[sph_tributary:] The grain size distribution smoothed onto the octree has deposited no particles.  Try either increasing your box size, or decreasing n_ref in parameters_master.  Alternatively, run the simulation with otf_extinction=False")

        #define the grid of sizes that will be used in tributary_dust_add
        grid_of_sizes = reg.parameters["octree_of_sizes"]
        
        active_dust_add(ds,m,grid_of_sizes,nsizes,dustdens,specific_energy,refined)
        

    m.set_specific_energy_type('additional')

    return m,xcent,ycent,zcent,dx,dy,dz,reg,ds,boost
Example #18
0
class YSOModelSim(object):
	
	def __init__(self,name,folder,T=9000,M_sun=5.6,L_sun=250,disk_mass=0.01,disk_rmax=100, 
		env=True,env_type='power',rc=400,mdot=1e-8,env_mass=0.1,env_rmin=30,env_rmax=5000,cav=True,cav_r0=500,cav_rho_0=8e-24,cav_theta=25,env_power=-1.5,
		Npix=149,angles=[20.,45.,60.,80],angles2=[60.,60.,60.,60.], amb_dens=8e-24, disk="Flared",disk_rmin=1., amb_rmin=1., amb_rmax=1000., innerdustfile='OH5.hdf5',
		outerdustfile='d03_5.5_3.0_A.hdf5',beta=1.1):
		self.name=name
		self.folder=folder
		self.T=T
		self.M_sun=M_sun*msun
		self.L_sun=L_sun*lsun
		self.disk_mass=disk_mass*msun
		self.disk_rmax=disk_rmax*au
		self.disk_rmin=disk_rmin*au
		self.disk_h_0 = OptThinRadius(1600)
		self.env=env
		self.disk=disk
		self.env_type=env_type
		self.env_mass=env_mass*msun
		self.env_rmin=env_rmin*au
		self.env_rmax=env_rmax*au
		self.mdot=mdot #*msun/yr*self.M_sun # disk accretion rate
		self.rc=rc*au
		self.cav=cav
		self.cav_rho_0=cav_rho_0
		self.cav_r0=cav_r0*au
		self.cav_theta=cav_theta
		self.Npix=Npix
		self.angles=angles
		self.angles2=angles2
		self.amb_dens=amb_dens
		self.amb_rmin=amb_rmin
		self.amb_rmax=amb_rmax*au
		self.env_power=env_power
		self.dustfile=innerdustfile
		self.dustfile_out=outerdustfile
		self.limval = max(self.env_rmax,1000*au)
		self.beta = beta

	def modelDump(self):
		sp.call('rm %s.mod ' % (self.folder+self.name),shell=True)
		pickle.dump(self,open(self.folder+self.name+'.mod','wb'))
		time.sleep(2)

	def modelPrint(self):
		#string= self.folder+ self.name+'\n'
		string="T="+str(self.T)+"K"+'\n'
		string+= "M="+str(self.M_sun/msun)+'Msun'+'\n'
		string+= "L="+str(self.L_sun/lsun)+'Lsun'+'\n'
		string+= "Disk="+str(self.disk)+'\n'
		string+= "Disk_mass="+str(self.disk_mass/msun)+'Msun'+'\n'
		string+= "Disk_rmax="+str(self.disk_rmax/au)+'AU'+'\n'
		string+= "Disk_rmin="+str(self.disk_rmin/au)+'AU'+'\n'
		string+= "env="+str(self.env)+'\n'
		string+= "env_type="+self.env_type+'\n'
		string+= "env_mass="+str(self.env_mass/msun)+'Msun'+'\n'
		string+= "env_rmax="+str(self.env_rmax/au)+'AU'+'\n'
		string+= "env_rmin="+str(self.env_rmin/au)+'AU'+'\n'
		if self.env_type == 'ulrich' and self.env==True:
			string+= "mass_ulrich="+str((8.*np.pi*self.env_rho_0*self.rc**3*pow(self.env_rmax/self.rc,1.5)/(3.*np.sqrt(2)))/msun)+'Msun'+'\n'
		string+= "mdot="+str(self.mdot)+'Msun/yr'+'\n' # (only if env_type="Ulrich")
		string+= "rc="+str(self.rc/au)+'AU'+'\n' # (only if env_type="Ulrich")
		string+= "cav="+str(self.cav)+'\n'
		string+= "cav_theta="+str(self.cav_theta)+'\n'
		string+= "cav_r0="+str(self.cav_r0/au)+'\n'
		string+= "env_power="+str(self.env_power)+'\n'
		string+= "disk_h_0="+str(self.disk_h_0)+'\n'
		string+= "dustfile="+self.dustfile+'\n'
		string+= "dustfile_out="+self.dustfile_out+'\n'
		string+= "amb_dens="+str(self.amb_dens)+'\n'
		string+= "amb_rmin="+str(self.amb_rmin)+'\n'
		string+= "amb_rmax="+str(self.amb_rmax/au)+'\n'
		string+= "angles="+str(self.angles)+'\n'
		print string
		return string

	def dust_gen(self,dustfile,dustfile_out='d03_5.5_3.0_A.hdf5'):
		### first, we need to load Tracy's dust files and manipulate them to feed to Hyperion
		### wavelength (microns),Cext,Csca,Kappa,g,pmax,theta (ignored)
		### albedo = Csca/Cext
		### opacity kappa is in cm^2/gm, dust_gas extinction opactity (absorption+scattering) - assumes gas-to=dust raio of 100
		### see Whitney et al. 2003a
		
#		tracy_dust = np.loadtxt('Tracy_models/OH5.par')

#		### format for dust: d = HenyeyGreensteinDust(nu, albedo, chi, g, p_lin_max)
#		nu = const.c.value/ (tracy_dust[:,0]*1e-6)
#		albedo = tracy_dust[:,2]/tracy_dust[:,1]
#		chi = tracy_dust[:,3]
#		g = tracy_dust[:,4]
#		p_lin_max = tracy_dust[:,5]

#		### flip the table to have an increasing frequency
#		nu = nu[::-1]
#		albedo = albedo[::-1]
#		chi=chi[::-1]
#		g=g[::-1]
#		p_lin_max=p_lin_max[::-1]

#		### create the dust model
#		d = HenyeyGreensteinDust(nu, albedo, chi, g, p_lin_max)
#		d.optical_properties.extrapolate_wav(0.001,1.e7)
#		d.plot('OH5.png')
#		d.write('OH5.hdf5')
		
		self.d = SphericalDust()
		self.d.read(dustfile)
		self.d.plot(str(dustfile.split(',')[:-1])+'.png')
		self.d_out = SphericalDust()
		self.d_out.read(dustfile_out)
		#self.d_out.read(dustfile)
		self.d_out.plot(str(dustfile_out.split(',')[:-1])+'.png')

	def initModel(self):
		### Use Tracy parameter file to set up the model 
		self.dust_gen(self.dustfile,self.dustfile_out)
		mi = AnalyticalYSOModel()

		mi.star.temperature = self.T
		mi.star.mass = self.M_sun
		mi.star.luminosity = self.L_sun
		mi.star.radius=np.sqrt(mi.star.luminosity/(4.0*np.pi*sigma*mi.star.temperature**4))
		#m.star.luminosity = 4.0*np.pi*m.star.radius**2*sigma*m.star.temperature**4
		print mi.star.luminosity/lsun
		self.luminosity=mi.star.luminosity/lsun

		if self.disk=="Flared":
			print "Adding flared disk"
			disk = mi.add_flared_disk()
			disk.dust=self.d
			if self.dustfile == 'd03_5.5_3.0_A.hdf5':
				disk.mass=self.disk_mass/100.
			else: disk.mass=self.disk_mass
			disk.rmin=OptThinRadius(1600) #self.disk_rmin
			print "disk.rmin = ",disk.rmin,disk.rmin/au
			disk.rmax=self.disk_rmax
			disk.r_0 = self.disk_rmin
			disk.h_0 = disk.r_0/10. #self.disk_h_0*au
			disk.beta=self.beta
			disk.p = -1.
		elif self.disk=="Alpha":
			print "Adding alpha disk"
			disk = mi.add_alpha_disk()
			disk.dust=self.d
			if self.dustfile == 'd03_5.5_3.0_A.hdf5':
				disk.mass=self.disk_mass/100.
			else: disk.mass=self.disk_mass
			disk.rmin=OptThinRadius(1600)
			disk.rmax=self.disk_rmax
			disk.r_0 = self.disk_rmin
			disk.h_0 = disk.r_0/10. #self.disk_h_0*au
			disk.beta=1.1
			disk.p = -1
			disk.mdot=self.mdot
			disk.star = mi.star
			
		#print 'Disk density:',disk.rho_0

		
		if self.env==True and self.env_type=='power':
			envelope=mi.add_power_law_envelope()
			envelope.dust=self.d_out
			envelope.r_0=self.env_rmin
			#envelope.r_0 = OptThinRadius(1600)
			if self.dustfile_out == 'd03_5.5_3.0_A.hdf5':
				envelope.mass=self.env_mass/100.
			else: envelope.mass=self.env_mass
			envelope.rmin=self.env_rmin
			envelope.rmax=self.env_rmax
			envelope.power=self.env_power
			#print 'Envelope rho:',envelope.rho_0
		elif self.env==True and self.env_type=='ulrich':
			envelope=mi.add_ulrich_envelope()
			envelope.dust=self.d_out
			envelope.mdot=1e-6*msun/yr # has little impact on the fluxes, so fixed
			envelope.rc=self.rc
			envelope.rmin=self.env_rmin
			envelope.rmax=self.env_rmax
		if self.env==True:
			self.env_rho_0 = envelope.rho_0
			print 'Envelope rho:',envelope.rho_0

		#print "Rho_0 = ",envelope.rho_0
		if self.cav==True:
			cavity=envelope.add_bipolar_cavity()
			cavity.dust=self.d_out
			cavity.power=1.5
			cavity.cap_to_envelope_density=True ### prevents the cavity density to go above the envelope's density
			cavity.r_0=self.cav_r0
			cavity.theta_0=self.cav_theta
			cavity.rho_0=self.cav_rho_0 #in g/cm^3
			cavity.rho_exp=0.0
			
		
#		if self.env==True:
#			ambient=mi.add_ambient_medium(subtract=[envelope,disk])
#		if self.dustfile_out == 'd03_5.5_3.0_A.hdf5':
#			ambient.rho=self.amb_dens/100.
#		else: ambient.rho=self.amb_dens
#		ambient.rmin=OptThinRadius(1600.)
#		ambient.rmax=self.env_rmax
#		ambient.dust=self.d_out
		

		'''*** Grid parameters ***'''
		mi.set_spherical_polar_grid_auto(199,49,1)

		# Specify that the specific energy and density are needed
		mi.conf.output.output_specific_energy = 'last'
		mi.conf.output.output_density = 'last'


		'''**** Output Data ****'''
		image = mi.add_peeled_images(sed=True,image=False)
		image.set_wavelength_range(150,1,3000)
		#image.set_image_size(self.Npix,self.Npix)
		#image.set_image_limits(-self.limval,self.limval,-self.limval,self.limval)
		image.set_aperture_range(1,100000.*au,100000.*au)
		image.set_viewing_angles(self.angles,self.angles2)
		#image.set_track_origin('detailed')
		image.set_uncertainties(True)

		''' Use the modified random walk
		*** Advanced ***'
		YES = DIFFUSION  = Whether to use the diffusion
		'''
		if self.env==True:
			#mi.set_pda(True)
			mi.set_mrw(True)
		else:
			mi.set_pda(False)
			mi.set_mrw(False)

		# Use raytracing to improve s/n of thermal/source emission
		mi.set_raytracing(True)


		'''**** Preliminaries ****'''
		mi.set_n_initial_iterations(5)
		mi.set_n_photons(initial=1e6,imaging=1e6,raytracing_sources=1e5,raytracing_dust=1e6)
		mi.set_convergence(True, percentile=99.0, absolute=2.0, relative=1.1)
		self.m = mi

	def runModel(self):
		self.initModel()
		self.m.write(self.folder+self.name+'.rtin')
		self.m.run(self.folder+self.name+'.rtout', mpi=True,n_processes=6)

	def plotData(self,ax,sourcename):
		if sourcename != 'None':
			folder_export="/n/a2/mrizzo/Dropbox/SOFIA/Processed_Data/"
			sourcetable = pickle.load(open(folder_export+"totsourcetable_fits.data","r"))

			markers = ['v','p','D','^','h','o','*','x','d','<']
			TwoMASS = ['j','h','ks']
			uTwoMASS = ["e_"+col for col in TwoMASS]
			wlTwoMASS = [1.3,1.6,2.2]
			colTwoMASS = colors[0]
			markerTwoMASS = markers[0]
			labelTwoMASS = '2MASS'
			Spitzer = ['i1','i2','i3','i4','m1','m2']
			uSpitzer = ["e_"+col for col in Spitzer]
			wlSpitzer = [3.6,4.5,5.8,8.,24,70]
			colSpitzer = colors[1]
			markerSpitzer = markers[1]
			labelSpitzer = 'Spitzer'
			WISE = ['w1','w2','w3','w4']
			uWISE = ["e_"+col for col in WISE]
			wlWISE = [3.4,4.6,12,22]
			colWISE = colors[2]
			labelWISE = 'WISE'
			markerWISE = markers[2]
			SOFIA = ['F11','F19','F31','F37']
			uSOFIA = ["e_"+col for col in SOFIA]
			wlSOFIA = [11.1,19.7,31.5,37.1]
			colSOFIA = colors[3]
			markerSOFIA = markers[3]
			labelSOFIA = 'SOFIA'
			IRAS = ['Fnu_12','Fnu_25','Fnu_60','Fnu_100']
			uIRAS = ["e_"+col for col in IRAS]
			wlIRAS = [12,25,60,100]
			colIRAS = colors[4]
			markerIRAS = markers[4]
			labelIRAS = 'IRAS'
			AKARI = ['S65','S90','S140','S160']
			uAKARI = ["e_"+col for col in AKARI]
			wlAKARI = [65,90,140,160]
			colAKARI = colors[5]
			markerAKARI = markers[5]
			labelAKARI = 'AKARI'
			ENOCH = ['Fp']
			uENOCH = ["e_"+col for col in ENOCH]
			wlENOCH = [1300]
			colENOCH = colors[6]
			markerENOCH = markers[6]
			labelENOCH = 'ENOCH'
			HERSCHEL = ['H70','H160','H250','H350','H500']
			uHERSCHEL = ["e_"+col for col in HERSCHEL]
			wlHERSCHEL = [70,160,250,350,500]
			colHERSCHEL = colors[7]
			markerHERSCHEL = markers[7]
			labelHERSCHEL = 'HERSCHEL'
			SCUBA = ['S450','S850','S1300']
			uSCUBA = ["e_"+col for col in SCUBA]
			wlSCUBA = [450,850,1300]
			colSCUBA = colors[8]
			markerSCUBA = markers[8]
			labelSCUBA = 'SCUBA'
			alpha=1
			sources = sourcetable.group_by('SOFIA_name')
			for key,sourcetable in zip(sources.groups.keys,sources.groups):
				if sourcename == sourcetable['SOFIA_name'][0]:	
					#print sourcetable['SOFIA_name'][0]
					p.plotData(ax,sourcetable,markerTwoMASS,TwoMASS,uTwoMASS,wlTwoMASS,colTwoMASS,labelTwoMASS,alpha)
					p.plotData(ax,sourcetable,markerSpitzer,Spitzer,uSpitzer,wlSpitzer,colSpitzer,labelSpitzer,alpha)
					p.plotData(ax,sourcetable,markerWISE,WISE,uWISE,wlWISE,colWISE,labelWISE,alpha)
					p.plotData(ax,sourcetable,markerSOFIA,SOFIA,uSOFIA,wlSOFIA,colSOFIA,labelSOFIA,alpha)
					p.plotData(ax,sourcetable,markerIRAS,IRAS,uIRAS,wlIRAS,colIRAS,labelIRAS,alpha)
					p.plotData(ax,sourcetable,markerAKARI,AKARI,uAKARI,wlAKARI,colAKARI,labelAKARI,alpha)
					p.plotData(ax,sourcetable,markerENOCH,ENOCH,uENOCH,wlENOCH,colENOCH,labelENOCH,alpha)
					p.plotData(ax,sourcetable,markerHERSCHEL,HERSCHEL,uHERSCHEL,wlHERSCHEL,colHERSCHEL,labelHERSCHEL,alpha)
					p.plotData(ax,sourcetable,markerSCUBA,SCUBA,uSCUBA,wlSCUBA,colSCUBA,labelSCUBA,alpha)

	def calcChi2(self,dist_pc=140,extinction=0, sourcename='Oph.1'):
		self.dist=dist_pc*pc
		self.extinction=extinction
		chi = np.loadtxt('kmh94_3.1_full.chi')
		wav = np.loadtxt('kmh94_3.1_full.wav')
		Chi = interp1d(wav,chi,kind='linear')
		modelname = self.folder+self.name
		self.mo = ModelOutput(modelname+'.rtout')
		
		# get the sed of all inclination
		sed = self.mo.get_sed(aperture=-1, inclination='all', distance=self.dist,units='Jy')
				
		# calculate the optical depth at all wavelengths
		tau = self.extinction*Chi(sed.wav)/Chi(0.550)/1.086
		
		# calculate extinction values
		ext = np.array([np.exp(-tau) for i in range(sed.val.shape[0])])
		
		# apply extinction to model
		extinct_values = np.log10(sed.val.transpose()*ext.T)
		
		# data points and errors
		folder_export="/n/a2/mrizzo/Dropbox/SOFIA/Processed_Data/"
		sourcetable = pickle.load(open(folder_export+"totsourcetable_fits.data","r"))
		TwoMASS = ['j','h','ks']
		uTwoMASS = ["e_"+col for col in TwoMASS]
		wlTwoMASS = [1.3,1.6,2.2]
		labelTwoMASS = '2MASS'
		Spitzer = ['i1','i2','i3','i4']
		uSpitzer = ["e_"+col for col in Spitzer]
		wlSpitzer = [3.6,4.5,5.8,8.]
		labelSpitzer = 'Spitzer'
		SOFIA = ['F11','F19','F31','F37']
		uSOFIA = ["e_"+col for col in SOFIA]
		wlSOFIA = [11.1,19.7,31.5,37.1]
		labelSOFIA = 'SOFIA'
		sources = sourcetable.group_by('SOFIA_name')
		for key,source in zip(sources.groups.keys,sources.groups):
			if sourcename == source['SOFIA_name'][0]:	
				datapoints = source[TwoMASS+Spitzer+SOFIA]
				dataerrors = source[uTwoMASS+uSpitzer+uSOFIA]
				print p.nptable(datapoints),p.nptable(dataerrors)
				
				# calculate log10 of quantities required for chi squared
				logFnu = np.log10(p.nptable(datapoints))-0.5*(1./np.log(10.))*p.nptable(dataerrors)**2/p.nptable(datapoints)**2
				varlogFnu = (1./np.log(10)/p.nptable(datapoints))**2*p.nptable(dataerrors)**2
				print extinct_values,extinct_values.shape
				
				# for each inclination, calculate chi squared; need to interpolate to get model at required wavelengths
				Ninc = extinct_values.shape[1]
				chi2 = np.zeros(Ninc)
				wl=wlTwoMASS+wlSpitzer+wlSOFIA
				N = len(wl)
				for j in range(Ninc):
					interp_func = interp1d(sed.wav,extinct_values[:,j],kind='linear')
					interp_vals = interp_func(wl)
					chi2[j] = 1./N * np.sum((logFnu - interp_vals)**2/varlogFnu)
					
				print chi2

	def plotModel(self,dist_pc=140,inc=3,extinction=0,show=False,sourcename='Oph.1'):
		self.dist=dist_pc*pc
		self.inc=inc
		self.extinction=extinction
		modelname = self.folder+self.name
		self.mo = ModelOutput(modelname+'.rtout')

		#tracy_dust = np.loadtxt('Tracy_models/OH5.par')
		chi = np.loadtxt('kmh94_3.1_full.chi')
		wav = np.loadtxt('kmh94_3.1_full.wav')
		Chi = interp1d(wav,chi,kind='linear')



		fig = plt.figure(figsize=(20,14))
		ax=fig.add_subplot(2,3,1)
		sed = self.mo.get_sed(aperture=-1, inclination='all', distance=self.dist)
		#print tracy_dust[11,1],Cext(sed.wav[-1]),Cext(sed.wav[-1])/tracy_dust[11,1]
		tau = self.extinction*Chi(sed.wav)/Chi(0.550)/1.086
		#print Cext(sed.wav)/tracy_dust[11,1]
		ext = np.array([np.exp(-tau) for i in range(sed.val.shape[0])])
		#print tau,np.exp(-tau)
		ax.loglog(sed.wav, sed.val.transpose()*ext.T, color='black')
		ax.set_title(modelname+'_seds, Av='+str(self.extinction))
		ax.set_xlim(sed.wav.min(), 1300)
		ax.set_ylim(1e-13, 1e-7)
		ax.set_xlabel(r'$\lambda$ [$\mu$m]')
		ax.set_ylabel(r'$\lambda F_\lambda$ [ergs/cm$^2/s$]')
		self.plotData(ax,sourcename)
		ax.set_xscale('log')
		ax.set_yscale('log')

		#ax.set_ylabel(r'$F_{Jy}$ [Jy]')
		#plt.legend(loc=4)

		ax=fig.add_subplot(2,3,2)
		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist)
		ext=np.exp(-tau)
		ax.loglog(sed.wav, sed.val.transpose()*ext.T, lw=3,color='black',label='source_total')
		ax.set_xlim(sed.wav.min(), 1300)
		ax.set_ylim(1e-13, 1e-7)  ### for lamFlam
		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist,component='source_emit')
		ax.loglog(sed.wav, sed.val.transpose()*ext.T, color='blue',label='source_emit')
		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist,component='source_scat')
		ax.loglog(sed.wav, sed.val.transpose()*ext.T, color='teal',label='source_scat')
		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist,component='dust_emit')
		ax.loglog(sed.wav, sed.val.transpose()*ext.T, color='red',label='dust_emit')
		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist,component='dust_scat')
		ax.loglog(sed.wav, sed.val.transpose()*ext.T, color='orange',label='dust_scat')
		self.plotData(ax,sourcename)
		ax.set_xscale('log')
		ax.set_yscale('log')
		ax.set_title('seds_inc=inc')
		ax.set_xlabel(r'$\lambda$ [$\mu$m]')
		ax.set_ylabel(r'$\lambda F_\lambda$ [ergs/cm$^2/s$]')
		#ax.set_ylabel(r'$F_{Jy}$ [Jy]')
		leg = ax.legend(loc=4,fontsize='small')
		#leg = plt.gca().get_legend()
		#plt.setp(leg.get_text(),fontsize='small')
		# Extract the quantities
		g = self.mo.get_quantities()
		
		# Get the wall positions for r and theta
		rw, tw = g.r_wall / au, g.t_wall

		# Make a 2-d grid of the wall positions (used by pcolormesh)
		R, T = np.meshgrid(rw, tw)

		# Calculate the position of the cell walls in cartesian coordinates
		X, Z = R * np.sin(T), R * np.cos(T)

		# Make a plot in (x, z) space for different zooms
		from matplotlib.colors import LogNorm,PowerNorm
		# Make a plot in (r, theta) space
		ax = fig.add_subplot(2, 3, 3)
		if g.shape[-1]==2:
			c = ax.pcolormesh(X, Z, g['temperature'][0].array[0, :, :]+g['temperature'][1].array[0, :, :],norm=PowerNorm(gamma=0.5,vmin=1,vmax=500))
		else :
			c = ax.pcolormesh(X, Z, g['temperature'][0].array[0, :, :],norm=PowerNorm(gamma=0.5,vmin=1,vmax=500))
		#ax.set_xscale('log')
		#ax.set_yscale('log')
		ax.set_xlim(X.min(), X.max()/5.)
		ax.set_ylim(Z.min()/10., Z.max()/10.)
		ax.set_xlabel('x (au)')
		ax.set_ylabel('z (au)')
		#ax.set_yticks([np.pi, np.pi * 0.75, np.pi * 0.5, np.pi * 0.25, 0.])
		#ax.set_yticklabels([r'$\pi$', r'$3\pi/4$', r'$\pi/2$', r'$\pi/4$', r'$0$'])
		cb = fig.colorbar(c)
		ax.set_title('Temperature structure')
		cb.set_label('Temperature (K)')
		#fig.savefig(modelname+'_temperature_spherical_rt.png', bbox_inches='tight')


		ax = fig.add_subplot(2, 3, 4)
		if g.shape[-1]==2:
			c = ax.pcolormesh(X, Z, g['density'][0].array[0, :, :]+g['density'][1].array[0, :, :],norm=LogNorm(vmin=1e-22,vmax=g['density'][0].array[0, :, :].max()))
		else :
			c = ax.pcolormesh(X, Z, g['density'][0].array[0, :, :],norm=LogNorm(vmin=1e-22,vmax=g['density'][0].array[0, :, :].max()))
		#ax.set_xscale('log')
		#ax.set_yscale('log')
		ax.set_xlim(X.min(), X.max()/5.)
		ax.set_ylim(Z.min()/10., Z.max()/10.)
		ax.set_xlabel('x (au)')
		ax.set_ylabel('z (au)')
		ax.set_title('Density structure')
		cb = fig.colorbar(c)
		cb.set_label('Density (g/cm2)')

		### plot the convolved image with the 37 micron filter (manually set to slice 18 of the cube - this would change with wavelength coverage)
		ax = fig.add_subplot(2, 3, 5)
		self.image = self.mo.get_image(inclination=inc,distance=self.dist,units='Jy')
		fits.writeto(modelname+'_inc_'+str(inc)+'.fits',self.image.val.swapaxes(0,2).swapaxes(1,2),clobber=True)

		### need to convolve the image with a Gaussian PSF
		pix = 2.*self.limval/au/self.Npix # in AU/pix
		pix_asec = pix/(self.dist/pc) # in asec/pix
		airy_asec = 3.5 #asec
		airy_pix = airy_asec/pix_asec # in pix
		gauss_pix = airy_pix/2.35 # in Gaussian std 
		print "Gaussian std: ",gauss_pix

		from scipy.ndimage.filters import gaussian_filter as gauss
		#print [(i,sed.wav[i]) for i in range(len(sed.wav))]

		img37 = self.image.val[:,:,18]
		convol = gauss(img37,gauss_pix,mode='constant',cval=0.0)
		Nc = self.Npix/2
		hw = min(int(20./pix_asec),Nc) #(max is Nc)
		#ax.imshow(img37,norm=LogNorm(vmin=1e-20,vmax=img37.max()))
		#ax.imshow(img37,interpolation='nearest')
		#ax.imshow(convol,norm=LogNorm(vmin=1e-20,vmax=img37.max()))
		#ax.imshow(convol,interpolation='nearest',norm=LogNorm(vmin=1e-20,vmax=img37.max()))
		ax.imshow(convol[Nc-hw:Nc+hw,Nc-hw:Nc+hw],interpolation='nearest',origin='lower',cmap=plt.get_cmap('gray'))
		airy_disk = plt.Circle((airy_pix*1.3,airy_pix*1.3),airy_pix,color=colors[3])		
		ax.add_artist(airy_disk)
		ax.text(airy_pix*3,airy_pix*1.3/2.0,'SOFIA 37um Airy disk',color=colors[3])
		ax.set_title('Convolved image')
		fits.writeto(modelname+'_inc_'+str(inc)+'_convol37.fits',convol,clobber=True)

		### draw a cross-section of the image to show the spatial extension in linear scale, to compare with what we observe in the model.
		ax = fig.add_subplot(2, 3, 6)
		ax.plot(range(Nc-hw,Nc+hw),convol[Nc-hw:Nc+hw,Nc-1],label='cross-section 1')
		ax.plot(range(Nc-hw,Nc+hw),convol[Nc-1,Nc-hw:Nc+hw],label='cross-section 2')
		maxconvol = convol[Nc-hw:Nc+hw,Nc-1].max()
		gauss = np.exp( -(np.array(range(-hw,hw))**2 / (2. * gauss_pix**2)))
		gauss/= gauss.max()
		gauss*=maxconvol
		ax.plot(range(Nc-hw,Nc+hw),gauss,label='SOFIA beam')
		leg = ax.legend(loc=2,fontsize='small')
		#leg = plt.gca().get_legend()
		#plt.setp(leg.get_text(),fontsize='small')
		ax.set_title('Cross section at the center')

		string=self.modelPrint()
		fig.text(0.0,0.14,string+'Av='+str(self.extinction)+'\n'+'dist='+str(self.dist/pc)+'\n',color='r')
		fig.savefig(modelname+'.png', bbox_inches='tight',dpi=300)

		if show:
			plt.show()
			
	def plotSim(self,dist_pc=140,inc=3,extinction=0,show=False):
		self.dist=dist_pc*pc
		self.inc=inc
		self.extinction=extinction
		modelname = self.folder+self.name
		self.mo = ModelOutput(modelname+'.rtout')

		#tracy_dust = np.loadtxt('Tracy_models/OH5.par')
		#chi = np.loadtxt('kmh94_3.1_full.chi')
		#wav = np.loadtxt('kmh94_3.1_full.wav')
		#Chi = interp1d(wav,chi,kind='linear')



		fig = plt.figure(figsize=(20,14))
		ax=fig.add_subplot(1,3,1)
		sed = self.mo.get_sed(aperture=-1, inclination='all', distance=self.dist)
		#print tracy_dust[11,1],Cext(sed.wav[-1]),Cext(sed.wav[-1])/tracy_dust[11,1]
		#tau = self.extinction*Chi(sed.wav)/Chi(0.550)/1.086
		#print Cext(sed.wav)/tracy_dust[11,1]
		#ext = np.array([np.exp(-tau) for i in range(sed.val.shape[0])])
		#print tau,np.exp(-tau)
		ax.loglog(sed.wav, sed.val.transpose(), color='black')
		ax.set_title(modelname+'_seds, Av='+str(self.extinction))
		ax.set_xlim(sed.wav.min(), 1300)
		ax.set_ylim(1e-13, 1e-7)
		ax.set_xlabel(r'$\lambda$ [$\mu$m]')
		ax.set_ylabel(r'$\lambda F_\lambda$ [ergs/cm$^2/s$]')
		#self.plotData(ax,sourcename)
		ax.set_xscale('log')
		ax.set_yscale('log')

		#ax.set_ylabel(r'$F_{Jy}$ [Jy]')
		#plt.legend(loc=4)

#		ax=fig.add_subplot(2,3,2)
#		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist)
#		ext=np.exp(-tau)
#		ax.loglog(sed.wav, sed.val.transpose()*ext.T, lw=3,color='black',label='source_total')
#		ax.set_xlim(sed.wav.min(), 1300)
#		ax.set_ylim(1e-13, 1e-7)  ### for lamFlam
#		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist,component='source_emit')
#		ax.loglog(sed.wav, sed.val.transpose()*ext.T, color='blue',label='source_emit')
#		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist,component='source_scat')
#		ax.loglog(sed.wav, sed.val.transpose()*ext.T, color='teal',label='source_scat')
#		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist,component='dust_emit')
#		ax.loglog(sed.wav, sed.val.transpose()*ext.T, color='red',label='dust_emit')
#		sed = self.mo.get_sed(aperture=-1, inclination=self.inc, distance=self.dist,component='dust_scat')
#		ax.loglog(sed.wav, sed.val.transpose()*ext.T, color='orange',label='dust_scat')
#		#self.plotData(ax,sourcename)
#		ax.set_xscale('log')
#		ax.set_yscale('log')
#		ax.set_title('seds_inc=inc')
#		ax.set_xlabel(r'$\lambda$ [$\mu$m]')
#		ax.set_ylabel(r'$\lambda F_\lambda$ [ergs/cm$^2/s$]')
#		#ax.set_ylabel(r'$F_{Jy}$ [Jy]')
#		leg = ax.legend(loc=4,fontsize='small')
		#leg = plt.gca().get_legend()
		#plt.setp(leg.get_text(),fontsize='small')
		# Extract the quantities
		g = self.mo.get_quantities()
		
		# Get the wall positions for r and theta
		rw, tw = g.r_wall / au, g.t_wall

		# Make a 2-d grid of the wall positions (used by pcolormesh)
		R, T = np.meshgrid(rw, tw)

		# Calculate the position of the cell walls in cartesian coordinates
		X, Z = R * np.sin(T), R * np.cos(T)

		# Make a plot in (x, z) space for different zooms
		from matplotlib.colors import LogNorm,PowerNorm
		# Make a plot in (r, theta) space
		ax = fig.add_subplot(1, 3, 2)
		if g.shape[-1]==2:
			c = ax.pcolormesh(X, Z, g['temperature'][0].array[0, :, :]+g['temperature'][1].array[0, :, :],norm=PowerNorm(gamma=0.5,vmin=1,vmax=500))
		else :
			c = ax.pcolormesh(X, Z, g['temperature'][0].array[0, :, :],norm=PowerNorm(gamma=0.5,vmin=1,vmax=500))
		#ax.set_xscale('log')
		#ax.set_yscale('log')
		ax.set_xlim(X.min(), X.max())
		ax.set_ylim(Z.min(), Z.max())
		ax.set_xlabel('x (au)')
		ax.set_ylabel('z (au)')
		#ax.set_yticks([np.pi, np.pi * 0.75, np.pi * 0.5, np.pi * 0.25, 0.])
		#ax.set_yticklabels([r'$\pi$', r'$3\pi/4$', r'$\pi/2$', r'$\pi/4$', r'$0$'])
		cb = fig.colorbar(c)
		ax.set_title('Temperature structure')
		cb.set_label('Temperature (K)')
		#fig.savefig(modelname+'_temperature_spherical_rt.png', bbox_inches='tight')


		ax = fig.add_subplot(1, 3, 3)
		if g.shape[-1]==2:
			c = ax.pcolormesh(X, Z, g['density'][0].array[0, :, :]+g['density'][1].array[0, :, :],norm=LogNorm(vmin=1e-22,vmax=g['density'][0].array[0, :, :].max()))
		else :
			c = ax.pcolormesh(X, Z, g['density'][0].array[0, :, :],norm=LogNorm(vmin=1e-22,vmax=g['density'][0].array[0, :, :].max()))
		#ax.set_xscale('log')
		#ax.set_yscale('log')
		ax.set_xlim(X.min(), X.max())
		ax.set_ylim(Z.min(), Z.max())
		ax.set_xlabel('x (au)')
		ax.set_ylabel('z (au)')
		ax.set_title('Density structure')
		cb = fig.colorbar(c)
		cb.set_label('Density (g/cm2)')

#		### plot the convolved image with the 37 micron filter (manually set to slice 18 of the cube - this would change with wavelength coverage)
#		ax = fig.add_subplot(2, 3, 5)
#		self.image = self.mo.get_image(inclination=inc,distance=self.dist,units='Jy')
#		fits.writeto(modelname+'_inc_'+str(inc)+'.fits',self.image.val.swapaxes(0,2).swapaxes(1,2),clobber=True)

#		### need to convolve the image with a Gaussian PSF
#		pix = 2.*self.limval/au/self.Npix # in AU/pix
#		pix_asec = pix/(self.dist/pc) # in asec/pix
#		airy_asec = 3.5 #asec
#		airy_pix = airy_asec/pix_asec # in pix
#		gauss_pix = airy_pix/2.35 # in Gaussian std 
#		print "Gaussian std: ",gauss_pix

#		from scipy.ndimage.filters import gaussian_filter as gauss
#		#print [(i,sed.wav[i]) for i in range(len(sed.wav))]

#		img37 = self.image.val[:,:,18]
#		convol = gauss(img37,gauss_pix,mode='constant',cval=0.0)
#		Nc = self.Npix/2
#		hw = min(int(20./pix_asec),Nc) #(max is Nc)
#		#ax.imshow(img37,norm=LogNorm(vmin=1e-20,vmax=img37.max()))
#		#ax.imshow(img37,interpolation='nearest')
#		#ax.imshow(convol,norm=LogNorm(vmin=1e-20,vmax=img37.max()))
#		#ax.imshow(convol,interpolation='nearest',norm=LogNorm(vmin=1e-20,vmax=img37.max()))
#		ax.imshow(convol[Nc-hw:Nc+hw,Nc-hw:Nc+hw],interpolation='nearest',origin='lower',cmap=plt.get_cmap('gray'))
#		airy_disk = plt.Circle((airy_pix*1.3,airy_pix*1.3),airy_pix,color=colors[3])		
#		ax.add_artist(airy_disk)
#		ax.text(airy_pix*3,airy_pix*1.3/2.0,'SOFIA 37um Airy disk',color=colors[3])
#		ax.set_title('Convolved image')
#		fits.writeto(modelname+'_inc_'+str(inc)+'_convol37.fits',convol,clobber=True)

#		### draw a cross-section of the image to show the spatial extension in linear scale, to compare with what we observe in the model.
#		ax = fig.add_subplot(2, 3, 6)
#		ax.plot(range(Nc-hw,Nc+hw),convol[Nc-hw:Nc+hw,Nc-1],label='cross-section 1')
#		ax.plot(range(Nc-hw,Nc+hw),convol[Nc-1,Nc-hw:Nc+hw],label='cross-section 2')
#		maxconvol = convol[Nc-hw:Nc+hw,Nc-1].max()
#		gauss = np.exp( -(np.array(range(-hw,hw))**2 / (2. * gauss_pix**2)))
#		gauss/= gauss.max()
#		gauss*=maxconvol
#		ax.plot(range(Nc-hw,Nc+hw),gauss,label='SOFIA beam')
#		leg = ax.legend(loc=2,fontsize='small')
#		#leg = plt.gca().get_legend()
#		#plt.setp(leg.get_text(),fontsize='small')
#		ax.set_title('Cross section at the center')

		string=self.modelPrint()
		fig.text(0.0,0.14,string+'Av='+str(self.extinction)+'\n'+'dist='+str(self.dist/pc)+'\n',color='r')
		fig.savefig(modelname+'.png', bbox_inches='tight',dpi=300)

		if show:
			plt.show()
Example #19
0
def active_dust_add(ds,m,grid_of_sizes,nsizes,dustdens,specific_energy,refined=[False]):
        #first, save the grid_of_sizes to the ds.paramteters so we can carry it around
        ds.parameters['reg_grid_of_sizes'] = grid_of_sizes #named 'reg_grid_of_sizes' 


        #for empty cells, use the median size distribution
        for isize in range(nsizes):
                wzero = np.where(grid_of_sizes[:,isize] == 0)[0]
                wnonzero = np.where(grid_of_sizes[:,isize] != 0)[0]
                
                grid_of_sizes[wzero,isize] = np.median(grid_of_sizes[wnonzero,isize])
                
                print(len(wzero)/len(wnonzero))



        #now load the mapping between grain bin and filename for the lookup table
        data = np.load(cfg.par.pd_source_dir+'active_dust/dust_files/binned_dust_sizes.npz')
        grain_size_left_edge_array = data['grain_size_left_edge_array']
        grain_size_right_edge_array  = data['grain_size_right_edge_array']
        dust_filenames = data['outfile_filenames']

        nbins = len(grain_size_left_edge_array)




        #find which sizes in the hydro simulation correspond to the
        #pre-binned extinction law sizes from dust_file_writer.py

        dust_file_to_grain_size_mapping_idx = []
        x=np.linspace(cfg.par.otf_extinction_log_min_size,cfg.par.otf_extinction_log_max_size,nsizes)
        for i in range(nbins):
                dust_file_to_grain_size_mapping_idx.append(find_nearest(x,grain_size_left_edge_array[i]))


        #set up the frac array that is nbins big.  this is the
        #fractional contribution of each dust file bin which is based
        #on the total number of grains in the grid in that bin.

        #frac =np.zeros([dustdens.shape[0],nbins])

        dsf_grid = np.zeros([dustdens.shape[0],nbins])
        frac_grid = np.zeros([dustdens.shape[0],nbins])
        debug_nearest_extinction_curve = np.zeros([nbins])

        if cfg.par.OTF_EXTINCTION_MRN_FORCE == True:
                grid_sum = np.zeros(nbins)

                #how DNSF was set up.  not needed other than for testing
                x=np.linspace(-4,0,41)
                #load an example dust size function for testing against
                dsf = np.loadtxt(cfg.par.pd_source_dir+'active_dust/mrn_dn.txt')#DNSF_example.txt')

                #nbins = len(grain_size_left_edge_array)


                for i in range(nbins):
                        #find the index bounds in x that we want to interpolate between
                        idx0 = find_nearest(x,grain_size_left_edge_array[i])
                        if x[idx0] > grain_size_left_edge_array[i]: idx0 -= 1
                        idx1 = idx0+1
                
                        dsf_interp = np.interp(grain_size_left_edge_array[i],[x[idx0],x[idx1]],[dsf[idx0],dsf[idx1]])
                
                        #this sets the fraction of each bin size we need (for the
                        #entire grid!)
                        dsf_grid[:,i] = dsf_interp
                        grid_sum[i] = np.sum(dsf_grid[:,i])
                        debug_nearest_extinction_curve[i] = dsf_interp


                #set up the frac array that is nbins big.  this is the
                #fractional contribution of each dust file bin which is based
                #on the total number of grains in the grid in that bin.
                frac = grid_sum/np.sum(grid_sum)

                #now we need to set the localized extinction law. we do
                #this by comparing, fractionally, a given cell's number of
                #grains in that bin to the maximum number of grains that
                #the grid has in that bin.
                
                for i in range(nbins):
                        frac_grid[:,i] = dsf_grid[:,i]/np.max(dsf_grid[:,i])*frac[i]
            
                '''
                import matplotlib.pyplot as plt
                fig = plt.figure()
                ax = fig.add_subplot(111)
                ax.plot(x,dsf,label='dsf')
                ax.plot(grain_size_left_edge_array,frac_grid[0,:],label='frac_grid')
                ax.plot(grain_size_left_edge_array,grid_sum,label='grid_sum')
                ax.plot(grain_size_left_edge_array,debug_nearest_extinction_curve,label='d_n_e_c')
                ax.set_yscale('log')
                plt.legend()
                fig.savefig('junk.png',dpi=300)
                
                import pdb
                pdb.set_trace()
                '''

                #------------------------    
        
        else:


                grid_sum = np.zeros(nbins)


                #this sets the fraction of each bin size we need (for the
                #entire grid!)
                for i in range(nbins):
                        grid_sum[i] = np.sum(grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]])


                #set up the frac array that is nbins big.  this is the
                #fractional contribution of each dust file bin which is based
                #on the total number of grains in the grid in that bin.
                frac = grid_sum/np.sum(grid_sum)

            
                #now we need to set the localized extinction law. we do
                #this by comparing, fractionally, a given cell's number of
                #grains in that bin to the maximum number of grains that
                #the grid has in that bin.
                
                #this block tests if we're in an octree or not (i.e., we
                #could be in a voronoi mesh, in which case refined doesn't
                #mean anything).  this is necessary since for an octree we
                #don't want to worry about the Trues
                if np.sum(refined) > 0:
                        wFalse = np.where(np.asarray(refined) == 0)[0]
                
                        for i in range(nbins):
                                frac_grid[wFalse,i] = grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]]/np.max(grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]])*frac[i]
                else:
                        #we take the fractioal grain size distribution
                        #from each size bin, and multiply it by the
                        #cells in each grid (weighted by the ratio of
                        #the logarithm of the actual number of grains
                        #in that bin in that cell to the log of the
                        #cell with the most grains in that bin).
                        for i in range(nbins):
                                frac_grid[:,i] = np.log10(grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]])/np.max(np.log10(grid_of_sizes[:,dust_file_to_grain_size_mapping_idx[i]]))*frac[i]


                #now add the dust grids to hyperion
                for bin in range(nbins):
                        file = dust_filenames[bin]
                        d = SphericalDust(cfg.par.pd_source_dir+'active_dust/'+file)
                        m.add_density_grid(dustdens*frac_grid[:,bin],d,specific_energy=specific_energy)
                        #m.add_density_grid(dustdens*frac[bin],d,specific_energy=specific_energy)

        

        #finally, save the grid_of_sizes and grain sizes to the ds.paramteters so we can carry it around
        ds.parameters['reg_grid_of_sizes'] = grid_of_sizes #named 'reg_grid_of_sizes'
        ds.parameters['grain_sizes_in_micron '] = 10.**(x)
Example #20
0
def enzo_m_gen(fname,field_add):
    

    
    #add the fields in pd format
    pf = field_add(fname)
    ad = pf.all_data()
   
 

    #cutout
    center = pf.arr([cfg.model.x_cent,cfg.model.y_cent,cfg.model.z_cent],'code_length')
    
    box_len = pf.quan(cfg.par.zoom_box_len,'kpc').in_units('code_length')
   
    min_region = [center[0]-box_len,center[1]-box_len,center[2]-box_len]
    max_region = [center[0]+box_len,center[1]+box_len,center[2]+box_len]
    region = pf.region(center,min_region,max_region)
  
    pf = region.ds
  
    proj_plots(pf)
    #def. dust density
    def _dust_density(field, data):
        return data[('gas', 'metal_density')].in_units("g/cm**3")*cfg.par.dusttometals_ratio
    
    pf.add_field(('gas', 'dust_density'), function=_dust_density, units = 'g/cm**3')
       
    amr = AMRGrid.from_yt(pf, quantity_mapping={'density':('gas','dust_density')})
    


    '''
    levels = pf.index.max_level
    
    amr = AMRGrid()
    for ilevel in range(levels):
        level = amr.add_level()
        
    for igrid in pf.index.select_grids(ilevel):
        print igrid
        grid = level.add_grid()
        grid.xmin,grid.xmax = igrid.LeftEdge[0].in_units('cm'),igrid.RightEdge[0].in_units('cm')
        grid.ymin,grid.ymax = igrid.LeftEdge[1].in_units('cm'),igrid.RightEdge[1].in_units('cm')
        grid.zmin,grid.zmax = igrid.LeftEdge[2].in_units('cm'),igrid.RightEdge[2].in_units('cm')
        grid.quantities["density"] = np.transpose(np.array(igrid[("gas","metal_density")].in_units('g/cm**3')*cfg.par.dusttometals_ratio))
        grid.nx,grid.ny,grid.nz = igrid[("gas","metal_density")].shape
    '''


    m = Model()

    m.set_amr_grid(amr)

    energy_density_absorbed=energy_density_absorbed_by_CMB()
    energy_density_absorbed = np.repeat(energy_density_absorbed.value,amr['density'].shape)


    d = SphericalDust(cfg.par.dustdir+cfg.par.dustfile)
    if cfg.par.SUBLIMATION == True:
        d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
    m.add_density_grid(amr['density'],d,specific_energy=energy_density_absorbed)
    m.set_specific_energy_type('additional')
 #m.add_density_grid(amr['density'], cfg.par.dustdir+cfg.par.dustfile)
    

    #define the random things needed for parsing out the output args
    #center = pf.domain_center
    [xcent,ycent,zcent] = center
   
    boost = np.array([xcent,ycent,zcent])
    dx = pf.domain_width.in_units('cm')
    dy = pf.domain_width.in_units('cm')
    dz = pf.domain_width.in_units('cm')
    
    
    return m,xcent,ycent,zcent,dx,dy,dz,pf,boost