Esempio n. 1
0
def _get_halo_list(dataset,
                   halo_profiler_kwargs=None,
                   halo_profiler_actions=None,
                   halo_list='all'):
    "Load a list of halos for the dataset."

    if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
    if halo_profiler_actions is None: halo_profiler_actions = []

    hp = HaloProfiler(dataset, **halo_profiler_kwargs)
    for action in halo_profiler_actions:
        if not action.has_key('args'): action['args'] = ()
        if not action.has_key('kwargs'): action['kwargs'] = {}
        action['function'](hp, *action['args'], **action['kwargs'])

    if halo_list == 'all':
        return_list = copy.deepcopy(hp.all_halos)
    elif halo_list == 'filtered':
        return_list = copy.deepcopy(hp.filtered_halos)
    else:
        mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
        return_list = None

    del hp
    return return_list
Esempio n. 2
0
 def _ensure_db_sync(self):
     # If the database becomes out of sync for each task, ostensibly due to
     # parallel file system funniness, things will go bad very quickly.
     # Therefore, just to be very, very careful, we will ensure that the
     # md5 hash of the file is identical across all tasks before proceeding.
     for i in range(5):
         try:
             file = open(self.database)
         except IOError:
             # This is to give a little bit of time for the database creation
             # to replicate across the file system.
             time.sleep(5)
             file = open(self.database)
         hash = md5.md5(file.read()).hexdigest()
         file.close()
         ignore, hashes = self._mpi_info_dict(hash)
         hashes = set(hashes.values())
         if len(hashes) == 1:
             break
         else:
             # Wait a little bit for the file system to (hopefully) sync up.
             time.sleep(5)
     if len(hashes) == 1:
         return
     else:
         mylog.error("The file system is not properly synchronizing the database.")
         raise RunTimeError("Fatal error. Exiting.")
Esempio n. 3
0
def deltaz_forward(cosmology, z, target_distance):
    "Calculate deltaz corresponding to moving a comoving distance starting from some redshift."

    d_Tolerance = 1e-4
    max_Iterations = 100

    # Calculate delta z that corresponds to the length of the box at a given redshift.
    # Use Newton's method to calculate solution.
    z1 = z
    z2 = z1 - 0.1  # just an initial guess
    distance1 = 0.0
    iteration = 1

    # Convert comoving radial distance into Mpc / h, since that's how box size is stored.
    distance2 = cosmology.ComovingRadialDistance(
        z2, z) * cosmology.HubbleConstantNow / 100.0

    while ((na.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
        m = (distance2 - distance1) / (z2 - z1)
        z1 = z2
        distance1 = distance2
        z2 = ((target_distance - distance2) / m) + z2
        distance2 = cosmology.ComovingRadialDistance(
            z2, z) * cosmology.HubbleConstantNow / 100.0
        iteration += 1
        if (iteration > max_Iterations):
            mylog.error(
                "deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)."
                % (z, na.fabs(z2 - z)))
            break
    return na.fabs(z2 - z)
Esempio n. 4
0
 def __check_directory(self, outputDir):
     if (os.path.exists(outputDir)):
         if not (os.path.isdir(outputDir)):
             mylog.error(
                 "Output directory exists, but is not a directory: %s." %
                 outputDir)
             raise IOError(outputDir)
     else:
         os.mkdir(outputDir)
Esempio n. 5
0
 def _open_database(self):
     # open the database. Check to make sure the database file exists.
     if not os.path.exists(self.database):
         mylog.error("The database file %s cannot be found. Exiting." % \
             self.database)
         return False
     self.conn = sql.connect(self.database)
     self.cursor = self.conn.cursor()
     return True
Esempio n. 6
0
def _compare_solutions(solution1, solution2):
    "Calculate common volume between two light cone solutions."

    if (len(solution1) != len(solution2)):
        mylog.error(
            "Cannot compare light cone solutions with unequal numbers of slices."
        )
        return -1

    commonVolume = 0.0
    totalVolume = 0.0

    # Check that solution volumes are the same.
    if ((solution1[0]['DepthBoxFraction'] *
         solution1[0]['WidthBoxFraction']**2) !=
        (solution2[0]['DepthBoxFraction'] *
         solution2[0]['WidthBoxFraction']**2)):
        mylog.error(
            "Light cone solutions do not have equal volumes, will use the smaller one."
        )

    for q in range(len(solution1)):
        cube1 = na.zeros(shape=(len(solution1[q]['ProjectionCenter']), 2))
        volume1 = 1.0
        for w in range(len(cube1)):
            if (w == solution1[q]['ProjectionAxis']):
                width = solution1[q]['DepthBoxFraction']
            else:
                width = solution1[q]['WidthBoxFraction']
            volume1 *= width
            cube1[w] = [
                solution1[q]['ProjectionCenter'][w] - 0.5 * width,
                solution1[q]['ProjectionCenter'][w] + 0.5 * width
            ]

        cube2 = na.zeros(shape=(len(solution2[q]['ProjectionCenter']), 2))
        volume2 = 1.0
        for w in range(len(cube2)):
            if (w == solution2[q]['ProjectionAxis']):
                width = solution2[q]['DepthBoxFraction']
            else:
                width = solution2[q]['WidthBoxFraction']
            volume2 *= width
            cube2[w] = [
                solution2[q]['ProjectionCenter'][w] - 0.5 * width,
                solution2[q]['ProjectionCenter'][w] + 0.5 * width
            ]

        totalVolume += min(volume1, volume2)
        commonVolume += commonNVolume(cube1,
                                      cube2,
                                      periodic=na.array([[0, 1], [0, 1],
                                                         [0, 1]]))

    return (commonVolume / totalVolume)
Esempio n. 7
0
 def query(self, string):
     # Query the database and return a list of tuples.
     if string is None:
         mylog.error("You must enter a SQL query.")
         return None
     items = []
     self.cursor.execute(string)
     results = self.cursor.fetchone()
     while results:
         items.append(results)
         results = self.cursor.fetchone()
     return items
Esempio n. 8
0
    def _write_filtered_halo_list(self, filename, format="%s"):
        """
        Write out list of filtered halos along with any quantities 
        picked up during the filtering process.
        """

        if len(self.filtered_halos) == 0:
            mylog.error("No halos in filtered list.")
            return

        filename = "%s/%s" % (self.pf.fullpath, filename)
        mylog.info("Writing filtered halo list to %s." % filename)
        file = open(filename, "w")
        fields = [field for field in sorted(self.filtered_halos[0])]
        halo_fields = []
        for halo_field in self.filter_quantities:
            if halo_field in fields:
                fields.remove(halo_field)
                halo_fields.append(halo_field)
        # Make it so number of fields in header is same as number of data columns.
        header_fields = []
        for halo_field in halo_fields:
            if isinstance(self.filtered_halos[0][halo_field], types.ListType):
                header_fields.extend([
                    "%s[%d]" % (halo_field, q)
                    for q in range(len(self.filtered_halos[0][halo_field]))
                ])
            else:
                header_fields.append(halo_field)
        file.write("# ")
        file.write("\t".join(header_fields + fields + ["\n"]))

        for halo in self.filtered_halos:
            for halo_field in halo_fields:
                if isinstance(halo[halo_field], types.ListType):
                    field_data = na.array(halo[halo_field])
                    field_data.tofile(file, sep="\t", format=format)
                else:
                    if halo_field == 'id':
                        file.write("%04d" % halo[halo_field])
                    else:
                        file.write("%s" % halo[halo_field])
                file.write("\t")
            field_data = na.array([halo[field] for field in fields])
            field_data.tofile(file, sep="\t", format=format)
            file.write("\n")
        file.close()
Esempio n. 9
0
    def multiplicityfunction(self, sigma):
        """
        /* Multiplicity function - this is where the various fitting functions/analytic 
        theories are different.  The various places where I found these fitting functions
        are listed below.  */
        """

        nu = self.delta_c0 / sigma

        if self.fitting_function == 1:
            # Press-Schechter (This form from Jenkins et al. 2001, MNRAS 321, 372-384, eqtn. 5)
            thismult = math.sqrt(2.0 / math.pi) * nu * math.exp(-0.5 * nu * nu)

        elif self.fitting_function == 2:
            # Jenkins et al. 2001, MNRAS 321, 372-384, eqtn. 9
            thismult = 0.315 * math.exp(
                -1.0 * math.pow(abs(math.log(1.0 / sigma) + 0.61), 3.8))

        elif self.fitting_function == 3:
            # Sheth-Tormen 1999, eqtn 10, using expression from Jenkins et al. 2001, eqtn. 7
            A = 0.3222
            a = 0.707
            p = 0.3
            thismult = A*math.sqrt(2.0*a/math.pi)*(1.0+ math.pow( 1.0/(nu*nu*a), p) )*\
            nu * math.exp(-0.5*a*nu*nu)

        elif self.fitting_function == 4:
            # LANL fitting function - Warren et al. 2005, astro-ph/0506395, eqtn. 5
            A = 0.7234
            a = 1.625
            b = 0.2538
            c = 1.1982
            thismult = A * (math.pow(sigma, -1.0 * a) + b) * math.exp(
                -1.0 * c / sigma / sigma)

        else:
            mylog.error(
                "Don't understand this.  Fitting function requested is %d\n",
                self.fitting_function)
            return None

        return thismult
Esempio n. 10
0
    def _calculate_deltaz_min(self, deltaz_min=0.0):
        "Calculate delta z that corresponds to a single top grid pixel going from z to (z - delta z)."

        d_Tolerance = 1e-4
        max_Iterations = 100

        targetDistance = self.enzoParameters[
            'CosmologyComovingBoxSize'] / self.enzoParameters[
                'TopGridDimensions'][0]

        for output in self.allOutputs:
            z = output['redshift']

            # Calculate delta z that corresponds to the length of a top grid pixel at a given redshift.
            # Use Newton's method to calculate solution.
            z1 = z
            z2 = z1 - 0.01  # just an initial guess
            distance1 = 0.0
            iteration = 1

            # Convert comoving radial distance into Mpc / h, since that's how box size is stored.
            distance2 = self.cosmology.ComovingRadialDistance(
                z2, z) * self.enzoParameters['CosmologyHubbleConstantNow']

            while ((na.fabs(distance2 - targetDistance) / distance2) >
                   d_Tolerance):
                m = (distance2 - distance1) / (z2 - z1)
                z1 = z2
                distance1 = distance2
                z2 = ((targetDistance - distance2) / m) + z2
                distance2 = self.cosmology.ComovingRadialDistance(
                    z2, z) * self.enzoParameters['CosmologyHubbleConstantNow']
                iteration += 1
                if (iteration > max_Iterations):
                    mylog.error(
                        "calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)."
                        % (z, na.fabs(z2 - z)))
                    break
            # Use this calculation or the absolute minimum specified by the user.
            output['deltazMin'] = max(na.fabs(z2 - z), deltaz_min)
Esempio n. 11
0
 def __init__(self,
              pf,
              data_source=None,
              star_mass=None,
              star_creation_time=None,
              volume=None,
              bins=300):
     self._pf = pf
     self._data_source = data_source
     self.star_mass = star_mass
     self.star_creation_time = star_creation_time
     self.volume = volume
     self.bin_count = bins
     # Check to make sure we have the right set of informations.
     if data_source is None:
         if self.star_mass is None or self.star_creation_time is None or \
         self.volume is None:
             mylog.error("""
             If data_source is not provided, all of these paramters need to be set:
             star_mass (array, Msun),
             star_creation_time (array, code units),
             volume (float, Mpc**3).
             """)
             return None
         self.mode = 'provided'
     else:
         self.mode = 'data_source'
     # Set up for time conversion.
     self.cosm = lagos.EnzoCosmology(
         HubbleConstantNow=(100.0 * self._pf['CosmologyHubbleConstantNow']),
         OmegaMatterNow=self._pf['CosmologyOmegaMatterNow'],
         OmegaLambdaNow=self._pf['CosmologyOmegaLambdaNow'],
         InitialRedshift=self._pf['CosmologyInitialRedshift'])
     # Find the time right now.
     self.time_now = self.cosm.ComputeTimeFromRedshift(
         self._pf["CosmologyCurrentRedshift"])  # seconds
     # Build the distribution.
     self.build_dist()
Esempio n. 12
0
 def __init__(self, halos=None, database='halos.db',
         dotfile='MergerTree.gv', current_time=None, link_min=0.2):
     self.database = database
     self.link_min = link_min
     if halos is None:
         mylog.error("Please provide at least one halo to start the tree. Exiting.")
         return None
     result = self._open_database()
     if not result:
         return None
     if type(halos) == types.IntType:
         halos = [halos]
     if current_time is not None:
         halos = self._translate_haloIDs(halos, current_time)
     newhalos = set(halos)
     # A key is the GlobalHaloID for this halo, and the content is a
     # Node object.
     self.nodes = {}
     # A key is the GlobalHaloID for the parent in the relationship,
     # and the content is a Link ojbect.
     self.links = defaultdict(Link)
     # Record which halos are at the same z level for convenience.
     # They key is a z value, and the content a list of co-leveled halo IDs.
     self.levels = defaultdict(list)
     # For the first set of halos.
     self._add_nodes(newhalos)
     # Recurse over parents.
     while len(newhalos) > 0:
         mylog.info("Finding parents for %d children." % len(newhalos))
         newhalos = self._find_parents(newhalos)
         self._add_nodes(newhalos)
     mylog.info("Writing out to disk.")
     self._open_dot(dotfile)
     self._write_nodes()
     self._write_links()
     self._write_levels()
     self._close_dot()
     self._close_database()
Esempio n. 13
0
    def _calculate_deltaz_max(self):
        "Calculate delta z that corresponds to full box length going from z to (z - delta z)."

        d_Tolerance = 1e-4
        max_Iterations = 100

        targetDistance = self.enzoParameters['CosmologyComovingBoxSize']

        for output in self.allOutputs:
            z = output['redshift']

            # Calculate delta z that corresponds to the length of the box at a given redshift.
            # Use Newton's method to calculate solution.
            z1 = z
            z2 = z1 - 0.1  # just an initial guess
            distance1 = 0.0
            iteration = 1

            # Convert comoving radial distance into Mpc / h, since that's how box size is stored.
            distance2 = self.cosmology.ComovingRadialDistance(
                z2, z) * self.enzoParameters['CosmologyHubbleConstantNow']

            while ((na.fabs(distance2 - targetDistance) / distance2) >
                   d_Tolerance):
                m = (distance2 - distance1) / (z2 - z1)
                z1 = z2
                distance1 = distance2
                z2 = ((targetDistance - distance2) / m) + z2
                distance2 = self.cosmology.ComovingRadialDistance(
                    z2, z) * self.enzoParameters['CosmologyHubbleConstantNow']
                iteration += 1
                if (iteration > max_Iterations):
                    mylog.error(
                        "calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)."
                        % (z, na.fabs(z2 - z)))
                    break
            output['deltazMax'] = na.fabs(z2 - z)
Esempio n. 14
0
    def __init__(self, omega_matter, omega_baryon, omega_hdm, degen_hdm,
                 omega_lambda, hubble, redshift):
        """
        /* This routine takes cosmological parameters and a redshift and sets up
        all the internal scalar quantities needed to compute the transfer function. */
        /* INPUT: omega_matter -- Density of CDM, baryons, and massive neutrinos,
                        in units of the critical density. */
        /* 	  omega_baryon -- Density of baryons, in units of critical. */
        /* 	  omega_hdm    -- Density of massive neutrinos, in units of critical */
        /* 	  degen_hdm    -- (Int) Number of degenerate massive neutrino species */
        /*        omega_lambda -- Cosmological constant */
        /* 	  hubble       -- Hubble constant, in units of 100 km/s/Mpc */
        /*        redshift     -- The redshift at which to evaluate */
        /* OUTPUT: Returns 0 if all is well, 1 if a warning was issued.  Otherwise,
            sets many global variables for use in TFmdm_onek_mpc() */
        """
        self.qwarn = 0
        self.theta_cmb = 2.728 / 2.7  # Assuming T_cmb = 2.728 K

        # Look for strange input
        if (omega_baryon < 0.0):
            mylog.error(
                "TFmdm_set_cosm(): Negative omega_baryon set to trace amount.\n"
            )
            self.qwarn = 1
        if (omega_hdm < 0.0):
            mylog.error(
                "TFmdm_set_cosm(): Negative omega_hdm set to trace amount.\n")
            self.qwarn = 1
        if (hubble <= 0.0):
            mylog.error(
                "TFmdm_set_cosm(): Negative Hubble constant illegal.\n")
            return None
        elif (hubble > 2.0):
            mylog.error(
                "TFmdm_set_cosm(): Hubble constant should be in units of 100 km/s/Mpc.\n"
            )
            self.qwarn = 1
        if (redshift <= -1.0):
            mylog.error("TFmdm_set_cosm(): Redshift < -1 is illegal.\n")
            return None
        elif (redshift > 99.0):
            mylog.error(
                "TFmdm_set_cosm(): Large redshift entered.  TF may be inaccurate.\n"
            )
            self.qwarn = 1

        if (degen_hdm < 1): degen_hdm = 1
        self.num_degen_hdm = degen_hdm
        # Have to save this for TFmdm_onek_mpc()
        # This routine would crash if baryons or neutrinos were zero,
        # so don't allow that.
        if (omega_baryon <= 0): omega_baryon = 1e-5
        if (omega_hdm <= 0): omega_hdm = 1e-5

        self.omega_curv = 1.0 - omega_matter - omega_lambda
        self.omhh = omega_matter * SQR(hubble)
        self.obhh = omega_baryon * SQR(hubble)
        self.onhh = omega_hdm * SQR(hubble)
        self.f_baryon = omega_baryon / omega_matter
        self.f_hdm = omega_hdm / omega_matter
        self.f_cdm = 1.0 - self.f_baryon - self.f_hdm
        self.f_cb = self.f_cdm + self.f_baryon
        self.f_bnu = self.f_baryon + self.f_hdm

        # Compute the equality scale.
        self.z_equality = 25000.0 * self.omhh / SQR(SQR(
            self.theta_cmb))  # Actually 1+z_eq
        self.k_equality = 0.0746 * self.omhh / SQR(self.theta_cmb)

        # Compute the drag epoch and sound horizon
        z_drag_b1 = 0.313 * math.pow(
            self.omhh, -0.419) * (1 + 0.607 * math.pow(self.omhh, 0.674))
        z_drag_b2 = 0.238 * math.pow(self.omhh, 0.223)
        self.z_drag = 1291*math.pow(self.omhh,0.251)/(1.0+0.659*math.pow(self.omhh,0.828))* \
            (1.0+z_drag_b1*math.pow(self.obhh,z_drag_b2))
        self.y_drag = self.z_equality / (1.0 + self.z_drag)

        self.sound_horizon_fit = 44.5 * math.log(
            9.83 / self.omhh) / math.sqrt(1.0 +
                                          10.0 * math.pow(self.obhh, 0.75))

        # Set up for the free-streaming & infall growth function
        self.p_c = 0.25 * (5.0 - math.sqrt(1 + 24.0 * self.f_cdm))
        self.p_cb = 0.25 * (5.0 - math.sqrt(1 + 24.0 * self.f_cb))

        omega_denom = omega_lambda+SQR(1.0+redshift)*(self.omega_curv+\
                omega_matter*(1.0+redshift))
        self.omega_lambda_z = omega_lambda / omega_denom
        self.omega_matter_z = omega_matter * SQR(1.0 + redshift) * (
            1.0 + redshift) / omega_denom
        self.growth_k0 = self.z_equality/(1.0+redshift)*2.5*self.omega_matter_z/ \
            (math.pow(self.omega_matter_z,4.0/7.0)-self.omega_lambda_z+ \
            (1.0+self.omega_matter_z/2.0)*(1.0+self.omega_lambda_z/70.0))
        self.growth_to_z0 = self.z_equality*2.5*omega_matter/(math.pow(omega_matter,4.0/7.0) \
            -omega_lambda + (1.0+omega_matter/2.0)*(1.0+omega_lambda/70.0))
        self.growth_to_z0 = self.growth_k0 / self.growth_to_z0

        # Compute small-scale suppression
        self.alpha_nu = self.f_cdm/self.f_cb*(5.0-2.*(self.p_c+self.p_cb))/(5.-4.*self.p_cb)* \
        math.pow(1+self.y_drag,self.p_cb-self.p_c)* \
        (1+self.f_bnu*(-0.553+0.126*self.f_bnu*self.f_bnu))/ \
        (1-0.193*math.sqrt(self.f_hdm*self.num_degen_hdm)+0.169*self.f_hdm*math.pow(self.num_degen_hdm,0.2))* \
        (1+(self.p_c-self.p_cb)/2*(1+1/(3.-4.*self.p_c)/(7.-4.*self.p_cb))/(1+self.y_drag))
        self.alpha_gamma = math.sqrt(self.alpha_nu)
        self.beta_c = 1 / (1 - 0.949 * self.f_bnu)
        # Done setting scalar variables
        self.hhubble = hubble  # Need to pass Hubble constant to TFmdm_onek_hmpc()
Esempio n. 15
0
    def calculate_spectrum(self,
                           data_source=None,
                           star_mass=None,
                           star_creation_time=None,
                           star_metallicity_fraction=None,
                           star_metallicity_constant=None):
        """
        For the set of stars, calculate the collective spectrum.
        Attached to the output are several useful objects:
        final_spec: The collective spectrum in units of flux binned in wavelength.
        wavelength: The wavelength for the spectrum bins, in Angstroms.
        total_mass: Total mass of all the stars.
        avg_mass: Average mass of all the stars.
        avg_metal: Average metallicity of all the stars.
        :param data_source (object): A yt data_source that defines a portion
        of the volume from which to extract stars.
        :param star_mass (array, float): An array of star masses in Msun units.
        :param star_creation_time (array, float): An array of star creation
        times in code units.
        :param star_metallicity_fraction (array, float): An array of star
        metallicity fractions, in code units (which is not Z/Zsun).
        :param star_metallicity_constant (float): If desired, override the star
        metallicity fraction of all the stars to the given value.
        """
        # Initialize values
        self.final_spec = na.zeros(self.wavelength.size, dtype='float64')
        self._data_source = data_source
        self.star_mass = star_mass
        self.star_creation_time = star_creation_time
        self.star_metal = star_metallicity_fraction

        # Check to make sure we have the right set of data.
        if data_source is None:
            if self.star_mass is None or self.star_creation_time is None or \
            (star_metallicity_fraction is None and star_metallicity_constant is None):
                mylog.error("""
                If data_source is not provided, all of these paramters need to be set:
                star_mass (array, Msun),
                star_creation_time (array, code units),
                And one of:
                star_metallicity_fraction (array, code units).
                --OR--
                star_metallicity_constant (float, code units).
                """)
                return None
            if star_metallicity_constant is not None:
                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
                    star_metallicity_constant
        else:
            # Get the data we need.
            ct = self._data_source["creation_time"]
            self.star_creation_time = ct[ct > 0]
            self.star_mass = self._data_source["ParticleMassMsun"][ct > 0]
            if star_metallicity_constant is not None:
                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
                    star_metallicity_constant
            else:
                self.star_metal = self._data_source["metallicity_fraction"][
                    ct > 0]
        # Fix metallicity to units of Zsun.
        self.star_metal /= Zsun
        # Age of star in years.
        dt = (self.time_now -
              self.star_creation_time * self._pf['Time']) / YEAR
        # Figure out which METALS bin the star goes into.
        Mindex = na.digitize(dt, METALS)
        # Replace the indices with strings.
        Mname = MtoD[Mindex]
        # Figure out which age bin this star goes into.
        Aindex = na.digitize(dt, self.age)
        # Ratios used for the interpolation.
        ratio1 = (dt - self.age[Aindex - 1]) / (self.age[Aindex] -
                                                self.age[Aindex - 1])
        ratio2 = (self.age[Aindex] - dt) / (self.age[Aindex] -
                                            self.age[Aindex - 1])
        # Sort the stars by metallicity and then by age, which should reduce
        # memory access time by a little bit in the loop.
        sort = na.lexsort((Aindex, Mname))
        Mname = Mname[sort]
        Aindex = Aindex[sort]
        ratio1 = ratio1[sort]
        ratio2 = ratio2[sort]
        self.star_mass = self.star_mass[sort]
        self.star_creation_time = self.star_creation_time[sort]
        self.star_metal = self.star_metal[sort]

        # Interpolate the flux for each star, adding to the total by weight.
        for star in itertools.izip(Mname, Aindex, ratio1, ratio2,
                                   self.star_mass):
            # Pick the right age bin for the right flux array.
            flux = self.flux[star[0]][star[1], :]
            # Get the one just before the one above.
            flux_1 = self.flux[star[0]][star[1] - 1, :]
            # interpolate in log(flux), linear in time.
            int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
            # Add this flux to the total, weighted by mass.
            self.final_spec += na.power(10., int_flux) * star[4]
        # Normalize.
        self.total_mass = sum(self.star_mass)
        self.avg_mass = na.mean(self.star_mass)
        tot_metal = sum(self.star_metal * self.star_mass)
        self.avg_metal = math.log10(tot_metal / self.total_mass / Zsun)
Esempio n. 16
0
    def __init__(self,
                 EnzoParameterFile,
                 initial_time=None,
                 final_time=None,
                 initial_redshift=None,
                 final_redshift=None,
                 links=False,
                 enzo_parameters=None,
                 get_time_outputs=True,
                 get_redshift_outputs=True,
                 get_available_data=False):
        """
        Initialize an EnzoSimulation object.
        :param initial_time (float): the initial time in code units for the dataset list.  Default: None.
        :param final_time (float): the final time in code units for the dataset list.  Default: None.
        :param initial_redshift (float): the initial (highest) redshift for the dataset list.  Only for 
               cosmological simulations.  Default: None.
        :param final_redshift (float): the final (lowest) redshift for the dataset list.  Only for cosmological 
               simulations.  Default: None.
        :param links (bool): if True, each entry in the dataset list will contain entries, previous and next, that 
               point to the previous and next entries on the dataset list.  Default: False.
        :param enzo_parameters (dict): a dictionary specify additional parameters to be retrieved from the 
               parameter file.  The format should be the name of the parameter as the key and the variable type as 
               the value.  For example, {'CosmologyComovingBoxSize':float}.  All parameter values will be stored in 
               the dictionary attribute, enzoParameters.  Default: None.
        :param get_time_outputs (bool): if False, the time datasets, specified in Enzo with the dtDataDump, will not 
               be added to the dataset list.  Default: True.
        :param get_redshift_outputs (bool): if False, the redshift datasets will not be added to the dataset list.  Default: True.
        :param get_available_data (bool): if True, only datasets that are found to exist at the file path are added 
               to the list.  Devault: False.
        """
        self.EnzoParameterFile = EnzoParameterFile
        self.enzoParameters = {}
        self.redshiftOutputs = []
        self.timeOutputs = []
        self.allOutputs = []
        self.InitialTime = initial_time
        self.FinalTime = final_time
        self.InitialRedshift = initial_redshift
        self.FinalRedshift = final_redshift
        self.links = links
        self.get_time_outputs = get_time_outputs
        self.get_redshift_outputs = get_redshift_outputs
        self.get_available_data = get_available_data

        # Add any extra parameters to parameter dict.
        if enzo_parameters is None: enzo_parameters = {}
        EnzoParameterDict.update(enzo_parameters)

        # Set some parameter defaults.
        self._SetParameterDefaults()

        # Read parameters.
        self._ReadEnzoParameterFile()

        # Check for sufficient starting/ending parameters.
        if self.InitialTime is None and self.InitialRedshift is None:
            if self.enzoParameters['ComovingCoordinates'] and \
               'CosmologyInitialRedshift' in self.enzoParameters:
                self.InitialRedshift = self.enzoParameters[
                    'CosmologyInitialRedshift']
            elif 'InitialTime' in self.enzoParameters:
                self.InitialTime = self.enzoParameters['InitialTime']
            else:
                mylog.error(
                    "Couldn't find parameter for initial time or redshift from parameter file."
                )
                return None

        if self.FinalTime is None and self.FinalRedshift is None:
            if self.enzoParameters['ComovingCoordinates'] and \
               'CosmologyFinalRedshift' in self.enzoParameters:
                self.FinalRedshift = self.enzoParameters[
                    'CosmologyFinalRedshift']
            elif 'StopTime' in self.enzoParameters:
                self.FinalTime = self.enzoParameters['StopTime']
            else:
                mylog.error(
                    "Couldn't find parameter for final time or redshift from parameter file."
                )
                return None

        # Convert initial/final redshifts to times.
        if self.enzoParameters['ComovingCoordinates']:
            # Instantiate a cosmology calculator.
            self.cosmology = lagos.Cosmology(
                HubbleConstantNow=(
                    100.0 * self.enzoParameters['CosmologyHubbleConstantNow']),
                OmegaMatterNow=self.enzoParameters['CosmologyOmegaMatterNow'],
                OmegaLambdaNow=self.enzoParameters['CosmologyOmegaLambdaNow'])

            # Instantiate EnzoCosmology object for units and time conversions.
            self.enzo_cosmology = lagos.EnzoCosmology(
                HubbleConstantNow=(
                    100.0 * self.enzoParameters['CosmologyHubbleConstantNow']),
                OmegaMatterNow=self.enzoParameters['CosmologyOmegaMatterNow'],
                OmegaLambdaNow=self.enzoParameters['CosmologyOmegaLambdaNow'],
                InitialRedshift=self.enzoParameters['CosmologyInitialRedshift']
            )
            if self.InitialRedshift is not None:
                self.InitialTime = self.enzo_cosmology.ComputeTimeFromRedshift(self.InitialRedshift) / \
                    self.enzo_cosmology.TimeUnits
            if self.FinalRedshift is not None:
                self.FinalTime = self.enzo_cosmology.ComputeTimeFromRedshift(self.FinalRedshift) / \
                    self.enzo_cosmology.TimeUnits

        # Get initial time of simulation.
        if self.enzoParameters['ComovingCoordinates'] and \
                'CosmologyInitialRedshift' in self.enzoParameters:
            self.SimulationInitialTime = self.enzo_cosmology.InitialTime / self.enzo_cosmology.TimeUnits
        elif 'InitialTime' in self.enzoParameters:
            self.SimulationInitialTime = self.enzoParameters['InitialTime']
        else:
            self.SimulationInitialTime = 0.0

        # Combine all data dumps.
        self._CombineDataOutputs()
Esempio n. 17
0
    def make_projections(self,
                         axes=[0, 1, 2],
                         halo_list='filtered',
                         save_images=False,
                         save_cube=True,
                         **kwargs):
        "Make projections of all halos using specified fields."

        # Get list of halos for projecting.
        if halo_list == 'filtered':
            self._halo_projection_list = self.filtered_halos
        elif halo_list == 'all':
            self._halo_projection_list = self.all_halos
        elif isinstance(halo_list, types.StringType):
            self._halo_projection_list = self._read_halo_list(halo_list)
        elif isinstance(halo_list, types.ListType):
            self._halo_projection_list = halo_list
        else:
            mylog.error(
                "Keyword, halo_list', must be 'filtered', 'all', a filename, or an actual list."
            )
            return

        if len(self._halo_projection_list) == 0:
            mylog.error("Halo list for projections is empty.")
            return

        # Set resolution for fixed resolution output.
        if save_cube:
            if self.project_at_level == 'max':
                proj_level = self.pf.h.max_level
            else:
                proj_level = int(self.project_at_level)
            proj_dx = self.pf.units[self.projection_width_units] / self.pf.parameters['TopGridDimensions'][0] / \
                (self.pf.parameters['RefineBy']**proj_level)
            projectionResolution = int(self.projection_width / proj_dx)

        outputDir = "%s/%s" % (self.pf.fullpath, self.projection_output_dir)
        self.__check_directory(outputDir)

        center = [
            0.5 * (self.pf.parameters['DomainLeftEdge'][w] +
                   self.pf.parameters['DomainRightEdge'][w])
            for w in range(self.pf.parameters['TopGridRank'])
        ]

        # Create a plot collection.
        pc = raven.PlotCollection(self.pf, center=center)

        for halo in self._get_objs('_halo_projection_list', round_robin=True):
            if halo is None:
                continue
            # Check if region will overlap domain edge.
            # Using non-periodic regions is faster than using periodic ones.
            leftEdge = [(halo['center'][w] - 0.5 * self.projection_width /
                         self.pf.units[self.projection_width_units])
                        for w in range(len(halo['center']))]
            rightEdge = [(halo['center'][w] + 0.5 * self.projection_width /
                          self.pf.units[self.projection_width_units])
                         for w in range(len(halo['center']))]

            mylog.info(
                "Projecting halo %04d in region: [%f, %f, %f] to [%f, %f, %f]."
                % (halo['id'], leftEdge[0], leftEdge[1], leftEdge[2],
                   rightEdge[0], rightEdge[1], rightEdge[2]))

            need_per = False
            for w in range(len(halo['center'])):
                if ((leftEdge[w] < self.pf.parameters['DomainLeftEdge'][w]) or
                    (rightEdge[w] > self.pf.parameters['DomainRightEdge'][w])):
                    need_per = True
                    break

            if need_per:
                region = self.pf.h.periodic_region(halo['center'], leftEdge,
                                                   rightEdge)
            else:
                region = self.pf.h.region(halo['center'], leftEdge, rightEdge)

            # Make projections.
            if not isinstance(axes, types.ListType): axes = list([axes])
            for w in axes:
                # YT projections do not follow the right-hand rule.
                coords = range(3)
                del coords[w]
                x_axis = coords[0]
                y_axis = coords[1]

                for hp in self.projection_fields:
                    pc.add_projection(hp['field'],
                                      w,
                                      weight_field=hp['weight_field'],
                                      source=region,
                                      lazy_reader=False,
                                      serialize=False,
                                      **kwargs)

                # Set x and y limits, shift image if it overlaps domain boundary.
                if need_per:
                    pw = self.projection_width / self.pf.units[
                        self.projection_width_units]
                    shift_projections(self.pf, pc, halo['center'], center, w)
                    # Projection has now been shifted to center of box.
                    proj_left = [
                        center[x_axis] - 0.5 * pw, center[y_axis] - 0.5 * pw
                    ]
                    proj_right = [
                        center[x_axis] + 0.5 * pw, center[y_axis] + 0.5 * pw
                    ]
                else:
                    proj_left = [leftEdge[x_axis], leftEdge[y_axis]]
                    proj_right = [rightEdge[x_axis], rightEdge[y_axis]]

                pc.set_xlim(proj_left[0], proj_right[0])
                pc.set_ylim(proj_left[1], proj_right[1])

                # Save projection data to hdf5 file.
                if save_cube:
                    axis_labels = ['x', 'y', 'z']
                    dataFilename = "%s/Halo_%04d_%s_data.h5" % \
                            (outputDir, halo['id'], axis_labels[w])
                    mylog.info("Saving projection data to %s." % dataFilename)

                    output = h5py.File(dataFilename, "a")
                    # Create fixed resolution buffer for each projection and write them out.
                    for e, hp in enumerate(self.projection_fields):
                        frb = raven.FixedResolutionBuffer(
                            pc.plots[e].data, (proj_left[0], proj_right[0],
                                               proj_left[1], proj_right[1]),
                            (projectionResolution, projectionResolution),
                            antialias=False)
                        dataset_name = "%s_%s" % (hp['field'],
                                                  hp['weight_field'])
                        if dataset_name in output.listnames():
                            del output[dataset_name]
                        output.create_dataset(dataset_name,
                                              data=frb[hp['field']])
                    output.close()

                if save_images:
                    pc.save("%s/Halo_%04d" % (outputDir, halo['id']),
                            force_save=True)

                pc.clear_plots()
            del region
        del pc
Esempio n. 18
0
    def __init__(self,
                 dataset,
                 halos='multiple',
                 halo_list_file='HopAnalysis.out',
                 halo_list_format='yt_hop',
                 halo_finder_function=HaloFinder,
                 halo_finder_args=None,
                 halo_finder_kwargs=None,
                 use_density_center=False,
                 density_center_exponent=1.0,
                 use_field_max_center=None,
                 halo_radius=0.1,
                 radius_units='1',
                 n_profile_bins=50,
                 profile_output_dir='radial_profiles',
                 projection_output_dir='projections',
                 projection_width=8.0,
                 projection_width_units='mpc',
                 project_at_level='max',
                 velocity_center=['bulk', 'halo'],
                 filter_quantities=['id', 'center']):
        """
        Initialize a HaloProfiler object.
        :param halos (str): "multiple" for profiling more than one halo.  In this mode halos are read in 
               from a list or identified with a halo finder.  In "single" mode, the one and only halo 
               center is identified automatically as the location of the peak in the density field.  
               Default: "multiple".
        :param halo_list_file (str): name of file containing the list of halos.  The HaloProfiler will 
               look for this file in the data directory.  Default: "HopAnalysis.out".
        :param halo_list_format (str or dict): the format of the halo list file.  "yt_hop" for the format 
               given by yt's halo finders.  "enzo_hop" for the format written by enzo_hop.  This keyword 
               can also be given in the form of a dictionary specifying the column in which various 
               properties can be found.  For example, {"id": 0, "center": [1, 2, 3], "mass": 4, "radius": 5}.  
               Default: "yt_hop".
        :param halo_finder_function (function): If halos is set to multiple and the file given by 
               halo_list_file does not exit, the halo finding function specified here will be called.  
               Default: HaloFinder (yt_hop).
        :param halo_finder_args (tuple): args given with call to halo finder function.  Default: None.
        :param halo_finder_kwargs (dict): kwargs given with call to halo finder function. Default: None.
        :param use_density_center (bool): re-center halos before performing profiles with an center of mass 
               weighted by overdensity.  This is generally not needed.  Default: False.
        :param density_center_exponent (float): when use_density_center set to True, this specifies the 
               exponent, alpha, such that the halo center calculation is weighted by overdensity^alpha.  
               Default: 1.0.
        :param use_field_max_center (str): another alternative for halo re-centering by selecting the 
               location of the maximum of the field given by this keyword.  This is generally not needed.  
               Default: None.
        :param halo_radius (float): if no halo radii are provided in the halo list file, this parameter is 
               used to specify the radius out to which radial profiles will be made.  This keyword is also 
               used when halos is set to single.  Default: 0.1.
        :param radius_units (str): the units of halo_radius.  Default: "1" (code units).
        :param n_profile_bins (int): the number of bins in the radial profiles.  Default: 50.
        :param profile_output_dir (str): the subdirectory, inside the data directory, in which radial profile 
               output files will be created.  The directory will be created if it does not exist.  
               Default: "radial_profiles".
        :param projection_output_dir (str): the subdirectory, inside the data directory, in which projection 
               output files will be created.  The directory will be created if it does not exist.  
               Default: "projections".
        :param projection_width (float): the width of halo projections.  Default: 8.0.
        :param projection_width_units (str): the units of projection_width. Default: "mpc".
        :param project_at_level (int or "max"): the maximum refinement level to be included in projections.  
               Default: "max" (maximum level within the dataset).
        :param velocity_center (list): the method in which the halo bulk velocity is calculated (used for 
               calculation of radial and tangential velocities.  Valid options are:
     	          - ["bulk", "halo"] (Default): the velocity provided in the halo list
                  - ["bulk", "sphere"]: the bulk velocity of the sphere centered on the halo center.
    	          - ["max", field]: the velocity of the cell that is the location of the maximum of the field 
                                    specified (used only when halos set to single).
        :param filter_quantities (list): quantities from the original halo list file to be written out in the 
               filtered list file.  Default: ['id','center'].
        """

        self.dataset = dataset

        self.profile_output_dir = profile_output_dir
        self.projection_output_dir = projection_output_dir
        self.n_profile_bins = n_profile_bins
        self.projection_width = projection_width
        self.projection_width_units = projection_width_units
        self.project_at_level = project_at_level
        self.filter_quantities = filter_quantities
        if self.filter_quantities is None: self.filter_quantities = []

        self.profile_fields = []
        self.projection_fields = []

        self._halo_filters = []
        self.all_halos = []
        self.filtered_halos = []
        self._projection_halo_list = []

        # Set halo finder function and parameters, if needed.
        self.halo_finder_function = halo_finder_function
        self.halo_finder_args = halo_finder_args
        if self.halo_finder_args is None: self.halo_finder_args = ()
        self.halo_finder_kwargs = halo_finder_kwargs
        if self.halo_finder_kwargs is None: self.halo_finder_kwargs = {}

        # Set option to get halos from hop or single halo at density maximum.
        # multiple: get halos from hop
        # single: get single halo from density maximum
        self.halos = halos
        if not (self.halos is 'multiple' or self.halos is 'single'):
            mylog.error(
                "Keyword, halos, must be either 'single' or 'multiple'.")
            return None

        # Set halo list format.
        # 'yt_hop': yt hop output.
        # 'enzo_hop': enzo_hop output.
        # dictionary: a dictionary containing fields and their corresponding columns.
        self.halo_list_file = halo_list_file
        if halo_list_format == 'yt_hop':
            self.halo_list_format = {
                'id': 0,
                'mass': 1,
                'center': [7, 8, 9],
                'velocity': [10, 11, 12],
                'r_max': 13
            }
        elif halo_list_format == 'enzo_hop':
            self.halo_list_format = {'id': 0, 'center': [4, 5, 6]}
        elif isinstance(halo_list_format, types.DictType):
            self.halo_list_format = halo_list_format
        else:
            mylog.error(
                "Keyword, halo_list_format, must be 'yt_hop', 'enzo_hop', or a dictionary of custom settings."
            )
            return None

        # Option to recenter sphere on density center.
        self.use_density_center = use_density_center
        self.density_center_exponent = density_center_exponent
        if self.use_density_center:

            def _MatterDensityXTotalMass(field, data):
                return na.power(
                    (data['Matter_Density'] * data['TotalMassMsun']),
                    self.density_center_exponent)

            def _Convert_MatterDensityXTotalMass(data):
                return 1

            lagos.add_field("MatterDensityXTotalMass",
                            units=r"",
                            function=_MatterDensityXTotalMass,
                            convert_function=_Convert_MatterDensityXTotalMass)

        # Option to recenter sphere on the location of a field max.
        self.use_field_max_center = use_field_max_center
        if self.use_field_max_center is not None:
            self.use_density_center = False

        # Look for any field that might need to have the bulk velocity set.
        self._need_bulk_velocity = False
        for field in [hp['field'] for hp in self.profile_fields]:
            if 'Velocity' in field or 'Mach' in field:
                self._need_bulk_velocity = True
                break

        # Check validity for VelocityCenter parameter which toggles how the
        # velocity is zeroed out for radial velocity profiles.
        self.velocity_center = velocity_center[:]
        if self.velocity_center[0] == 'bulk':
            if self.velocity_center[1] == 'halo' and \
                    self.halos is 'single':
                mylog.error(
                    "Parameter, VelocityCenter, must be set to 'bulk sphere' or 'max <field>' with halos flag set to 'single'."
                )
                return None
            if self.velocity_center[1] == 'halo' and \
                    self.halo_list_format is 'enzo_hop':
                mylog.error(
                    "Parameter, VelocityCenter, must be 'bulk sphere' for old style hop output files."
                )
                return None
            if not (self.velocity_center[1] == 'halo'
                    or self.velocity_center[1] == 'sphere'):
                mylog.error(
                    "Second value of VelocityCenter must be either 'halo' or 'sphere' if first value is 'bulk'."
                )
                return None
        elif self.velocity_center[0] == 'max':
            if self.halos is 'multiple':
                mylog.error(
                    "Getting velocity center from a max field value only works with halos='single'."
                )
                return None
        else:
            mylog.error(
                "First value of parameter, VelocityCenter, must be either 'bulk' or 'max'."
            )
            return None

        # Create dataset object.
        self.pf = lagos.EnzoStaticOutput(self.dataset)
        self.pf.h
        if self.halos is 'single' or not 'r_max' in self.halo_list_format:
            self.halo_radius = halo_radius / self.pf[radius_units]

        # Get halo(s).
        if self.halos is 'single':
            v, center = self.pf.h.find_max('Density')
            singleHalo = {}
            singleHalo['center'] = center
            singleHalo['r_max'] = self.halo_radius * self.pf.units['mpc']
            singleHalo['id'] = 0
            self.all_halos.append(singleHalo)
        elif self.halos is 'multiple':
            # Get hop data.
            self._load_halo_data()
            if len(self.all_halos) == 0:
                mylog.error("No halos loaded, there will be nothing to do.")
                return None
        else:
            mylog.error(
                "I don't know whether to get halos from hop or from density maximum.  This should not have happened."
            )
            return None
Esempio n. 19
0
    def _get_halo_profile(self,
                          halo,
                          filename,
                          virial_filter=True,
                          force_write=False):
        """
        Profile a single halo and write profile data to a file.
        If file already exists, read profile data from file.
        Return a dictionary of id, center, and virial quantities if virial_filter is True.
        """

        # Read profile from file if it already exists.
        # If not, profile will be None.
        profile = self._read_profile(filename)

        # Make profile if necessary.
        newProfile = profile is None
        if newProfile:

            r_min = 2 * self.pf.h.get_smallest_dx() * self.pf['mpc']
            if (halo['r_max'] / r_min < PROFILE_RADIUS_THRESHOLD):
                mylog.error("Skipping halo with r_max / r_min = %f." %
                            (halo['r_max'] / r_min))
                return None

            sphere = self.pf.h.sphere(halo['center'],
                                      halo['r_max'] / self.pf.units['mpc'])
            if len(sphere._grids) == 0: return None
            new_sphere = False

            if self.use_density_center:
                dc_x = sphere.quantities['WeightedAverageQuantity'](
                    'x', 'MatterDensityXTotalMass')
                dc_y = sphere.quantities['WeightedAverageQuantity'](
                    'y', 'MatterDensityXTotalMass')
                dc_z = sphere.quantities['WeightedAverageQuantity'](
                    'z', 'MatterDensityXTotalMass')
                mylog.info("Moving halo center from %s to %s." %
                           (halo['center'], [dc_x, dc_y, dc_z]))
                halo['center'] = [dc_x, dc_y, dc_z]
                new_sphere = True

            if self.use_field_max_center is not None:
                ma, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](
                    self.use_field_max_center)
                mylog.info("Moving halo center from %s to %s." %
                           (halo['center'], [mx, my, mz]))
                halo['center'] = [mx, my, mz]
                new_sphere = True

            if new_sphere:
                # Temporary solution to memory leak.
                for g in self.pf.h.grids:
                    g.clear_data()
                sphere.clear_data()
                del sphere
                sphere = self.pf.h.sphere(halo['center'],
                                          halo['r_max'] / self.pf.units['mpc'])

            if self._need_bulk_velocity:
                # Set bulk velocity to zero out radial velocity profiles.
                if self.velocity_center[0] == 'bulk':
                    if self.velocity_center[1] == 'halo':
                        sphere.set_field_parameter('bulk_velocity',
                                                   halo['velocity'])
                    elif self.velocity_center[1] == 'sphere':
                        sphere.set_field_parameter(
                            'bulk_velocity',
                            sphere.quantities['BulkVelocity']())
                    else:
                        mylog.error("Invalid parameter: VelocityCenter.")
                elif self.velocity_center[0] == 'max':
                    max_grid, max_cell, max_value, max_location = self.pf.h.find_max_cell_location(
                        self.velocity_center[1])
                    sphere.set_field_parameter('bulk_velocity', [
                        max_grid['x-velocity'][max_cell],
                        max_grid['y-velocity'][max_cell],
                        max_grid['z-velocity'][max_cell]
                    ])

            profile = lagos.BinnedProfile1D(sphere,
                                            self.n_profile_bins,
                                            "RadiusMpc",
                                            r_min,
                                            halo['r_max'],
                                            log_space=True,
                                            lazy_reader=False)
            for hp in self.profile_fields:
                profile.add_fields(hp['field'],
                                   weight=hp['weight_field'],
                                   accumulation=hp['accumulation'])

        if virial_filter:
            self._add_actual_overdensity(profile)

        if newProfile:
            mylog.info("Writing halo %d" % halo['id'])
            profile.write_out(filename, format='%0.6e')
        elif force_write:
            mylog.info("Re-writing halo %d" % halo['id'])
            self._write_profile(profile, filename, format='%0.6e')

        if newProfile:
            # Temporary solution to memory leak.
            for g in self.pf.h.grids:
                g.clear_data()
            sphere.clear_data()
            del sphere

        return profile
Esempio n. 20
0
    def __init__(self,
                 EnzoParameterFile,
                 initial_redshift=1.0,
                 final_redshift=0.0,
                 observer_redshift=0.0,
                 field_of_view_in_arcminutes=600.0,
                 image_resolution_in_arcseconds=60.0,
                 use_minimum_datasets=True,
                 deltaz_min=0.0,
                 minimum_coherent_box_fraction=0.0,
                 output_dir='LC',
                 output_prefix='LightCone'):
        """
        Initialize a LightCone object.
        :param initial_redshift (float): the initial (highest) redshift for the light cone.  Default: 1.0.
        :param final_redshift (float): the final (lowest) redshift for the light cone.  Default: 0.0.
        :param observer_redshift (float): the redshift of the observer.  Default: 0.0.
        :param field_of_view_in_arcminutes (float): the field of view of the image in units of arcminutes.  
               Default: 600.0.
        :param image_resolution_in_arcseconds (float): the size of each image pixel in units of arcseconds.  
               Default: 60.0.
        :param use_minimum_datasets (bool): if True, the minimum number of datasets is used to connect the 
               initial and final redshift.  If false, the light cone solution will contain as many entries 
               as possible within the redshift interval.  Default: True.
        :param deltaz_min (float): specifies the minimum :math:`\Delta z` between consecutive datasets in 
               the returned list.  Default: 0.0.
        :param minimum_coherent_box_fraction (float): used with use_minimum_datasets set to False, this 
               parameter specifies the fraction of the total box size to be traversed before rerandomizing 
               the projection axis and center.  This was invented to allow light cones with thin slices to 
               sample coherent large scale structure, but in practice does not work so well.  Try setting 
               this parameter to 1 and see what happens.  Default: 0.0.
        :param output_dir (str): the directory in which images and data files will be written.  Default: 'LC'.
        :param output_prefix (str): the prefix of all images and data files.  Default: 'LightCone'.
        """

        self.initial_redshift = initial_redshift
        self.final_redshift = final_redshift
        self.observer_redshift = observer_redshift
        self.field_of_view_in_arcminutes = field_of_view_in_arcminutes
        self.image_resolution_in_arcseconds = image_resolution_in_arcseconds
        self.use_minimum_datasets = use_minimum_datasets
        self.deltaz_min = deltaz_min
        self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
        self.output_dir = output_dir
        self.output_prefix = output_prefix

        self.master_solution = []  # kept to compare with recycled solutions
        self.projection_stack = []
        self.projection_weight_field_stack = []
        self.halo_mask = []

        # Original random seed of the first solution.
        self.originalRandomSeed = 0

        # Parameters for recycling light cone solutions.
        self.recycleSolution = False
        self.recycleRandomSeed = 0

        # Initialize EnzoSimulation machinery for getting dataset list.
        EnzoSimulation.__init__(
            self,
            EnzoParameterFile,
            initial_redshift=self.initial_redshift,
            final_redshift=self.final_redshift,
            links=True,
            enzo_parameters={'CosmologyComovingBoxSize': float})

        # Calculate number of pixels.
        self.pixels = int(self.field_of_view_in_arcminutes * 60.0 / \
                          self.image_resolution_in_arcseconds)

        if ytcfg.getint("yt", "__parallel_rank") == 0:
            # Create output directory.
            if (os.path.exists(self.output_dir)):
                if not (os.path.isdir(self.output_dir)):
                    mylog.error(
                        "Output directory exists, but is not a directory: %s."
                        % self.output_dir)
                    self.output_dir = './'
            else:
                os.mkdir(self.output_dir)

        # Get list of datasets for light cone solution.
        self.light_cone_solution = self.create_cosmology_splice(
            minimal=self.use_minimum_datasets, deltaz_min=self.deltaz_min)
Esempio n. 21
0
    def project_light_cone(self,
                           field,
                           weight_field=None,
                           apply_halo_mask=False,
                           node=None,
                           save_stack=True,
                           save_slice_images=False,
                           flatten_stack=False,
                           photon_field=False,
                           **kwargs):
        """
        Create projections for light cone, then add them together.
        :param weight_field (str): the weight field of the projection.  This has the same meaning as in standard 
               projections.  Default: None.
        :param apply_halo_mask (bool): if True, a boolean mask is apply to the light cone projection.  See below for a 
               description of halo masks.  Default: False.
        :param node (str): a prefix to be prepended to the node name under which the projection data is serialized.  
               Default: None.
        :param save_stack (bool): if True, the unflatted light cone data including each individual slice is written to 
               an hdf5 file.  Default: True.
        :param save_slice_images (bool): save images for each individual projection slice.  Default: False.
        :param flatten_stack (bool): if True, the light cone stack is continually flattened each time a slice is added 
               in order to save memory.  This is generally not necessary.  Default: False.
        :param photon_field (bool): if True, the projection data for each slice is decremented by 4 Pi R^2`, where R 
               is the luminosity distance between the observer and the slice redshift.  Default: False.
        """

        # Clear projection stack.
        self.projection_stack = []
        self.projection_weight_field_stack = []
        if (self.light_cone_solution[-1].has_key('object')):
            del self.light_cone_solution[-1]['object']

        if not (self.output_dir.endswith("/")):
            self.output_dir += "/"

        for q, output in enumerate(self.light_cone_solution):
            if node is None:
                name = "%s%s_%04d_%04d" % (self.output_dir, self.output_prefix,
                                           q, len(self.light_cone_solution))
            else:
                name = "%s%s_%s_%04d_%04d" % (self.output_dir,
                                              self.output_prefix, node, q,
                                              len(self.light_cone_solution))
            output['object'] = lagos.EnzoStaticOutput(output['filename'])
            frb = LightConeProjection(output,
                                      field,
                                      self.pixels,
                                      weight_field=weight_field,
                                      save_image=save_slice_images,
                                      name=name,
                                      node=node,
                                      **kwargs)
            if ytcfg.getint("yt", "__parallel_rank") == 0:
                if photon_field:
                    # Decrement the flux by the luminosity distance. Assume field in frb is in erg/s/cm^2/Hz
                    co = lagos.Cosmology(
                        HubbleConstantNow=(
                            100.0 *
                            self.enzoParameters['CosmologyHubbleConstantNow']),
                        OmegaMatterNow=self.
                        enzoParameters['CosmologyOmegaMatterNow'],
                        OmegaLambdaNow=self.
                        enzoParameters['CosmologyOmegaLambdaNow'])
                    dL = self.cosmology.LuminosityDistance(
                        self.observer_redshift, output['redshift'])  #in Mpc
                    boxSizeProper = self.enzoParameters[
                        'CosmologyComovingBoxSize'] / (
                            self.enzoParameters['CosmologyHubbleConstantNow'] *
                            (1.0 + output['redshift']))
                    pixelarea = (boxSizeProper /
                                 self.pixels)**2  #in proper cm^2
                    factor = pixelarea / (4.0 * na.pi * dL**2)
                    mylog.info("Distance to slice = %e" % dL)
                    frb[field] *= factor  #in erg/s/cm^2/Hz on observer's image plane.

            if ytcfg.getint("yt", "__parallel_rank") == 0:
                if weight_field is not None:
                    # Data come back normalized by the weight field.
                    # Undo that so it can be added up for the light cone.
                    self.projection_stack.append(frb[field] *
                                                 frb['weight_field'])
                    self.projection_weight_field_stack.append(
                        frb['weight_field'])
                else:
                    self.projection_stack.append(frb[field])

                # Delete the frb.  This saves a decent amount of ram.
                if (q < len(self.light_cone_solution) - 1):
                    del frb

                # Flatten stack to save memory.
                if flatten_stack and (len(self.projection_stack) > 1):
                    self.projection_stack = [sum(self.projection_stack)]
                    if weight_field is not None:
                        self.projection_weight_field_stack = [
                            sum(self.projection_weight_field_stack)
                        ]

            # Delete the plot collection now that the frb is deleted.
            del output['pc']

            # Unless this is the last slice, delete the dataset object.
            # The last one will be saved to make the plot collection.
            if (q < len(self.light_cone_solution) - 1):
                del output['object']

        if ytcfg.getint("yt", "__parallel_rank") == 0:
            # Add up slices to make light cone projection.
            if (weight_field is None):
                lightConeProjection = sum(self.projection_stack)
            else:
                lightConeProjection = sum(self.projection_stack) / sum(
                    self.projection_weight_field_stack)

            if node is None:
                filename = "%s%s" % (self.output_dir, self.output_prefix)
            else:
                filename = "%s%s_%s" % (self.output_dir, self.output_prefix,
                                        node)

            # Save the last fixed resolution buffer for the plot collection,
            # but replace the data with the full light cone projection data.
            frb.data[field] = lightConeProjection

            # Write stack to hdf5 file.
            if save_stack:
                self._save_light_cone_stack(field=field,
                                            weight_field=weight_field,
                                            filename=filename)

            # Apply halo mask.
            if apply_halo_mask:
                if len(self.halo_mask) > 0:
                    mylog.info("Applying halo mask.")
                    frb.data[field] *= self.halo_mask
                else:
                    mylog.error("No halo mask loaded, call get_halo_mask.")

            # Make a plot collection for the light cone projection.
            center = [
                0.5 *
                (self.light_cone_solution[-1]['object'].
                 parameters['DomainLeftEdge'][w] + self.light_cone_solution[-1]
                 ['object'].parameters['DomainRightEdge'][w])
                for w in range(self.light_cone_solution[-1]
                               ['object'].parameters['TopGridRank'])
            ]
            pc = raven.PlotCollection(self.light_cone_solution[-1]['object'],
                                      center=center)
            pc.add_fixed_resolution_plot(frb, field, **kwargs)
            pc.save(filename)

            # Return the plot collection so the user can remake the plot if they want.
            return pc
Esempio n. 22
0
def VirialFilter(profile,
                 overdensity_field='ActualOverdensity',
                 virial_overdensity=200.,
                 must_be_virialized=True,
                 virial_filters=[['TotalMassMsun', '>=', '1e14']],
                 virial_quantities=['TotalMassMsun', 'RadiusMpc'],
                 virial_index=None):
    """
    Filter halos by virial quantities.
    Return values are a True or False whether the halo passed the filter, 
    along with a dictionary of virial quantities for the fields specified in 
    the virial_quantities keyword.  Thresholds for virial quantities are 
    given with the virial_filters keyword in the following way: 
    [field, condition, value].
    """

    fields = deepcopy(virial_quantities)
    if virial_filters is None: virial_filters = []
    for vfilter in virial_filters:
        if not vfilter[0] in fields:
            fields.append(vfilter[0])

    overDensity = []
    temp_profile = {}
    for field in fields:
        temp_profile[field] = []

    for q in range(len(profile[overdensity_field])):
        good = True
        if (profile[overdensity_field][q] != profile[overdensity_field][q]):
            good = False
            continue
        for field in fields:
            if (profile[field][q] != profile[field][q]):
                good = False
                break
        if good:
            overDensity.append(profile[overdensity_field][q])
            for field in fields:
                temp_profile[field].append(profile[field][q])

    virial = {}
    for field in fields:
        virial[field] = 0.0

    if (not (na.array(overDensity) >= virial_overdensity).any()) and \
            must_be_virialized:
        mylog.error("This halo is not virialized!")
        return [False, {}]

    if (len(overDensity) < 2):
        mylog.error("Skipping halo with no valid points in profile.")
        return [False, {}]

    if (overDensity[1] <= virial_overdensity):
        index = 0
    elif (overDensity[-1] >= virial_overdensity):
        index = -2
    else:
        for q in (na.arange(len(overDensity) - 2)) + 2:
            if (overDensity[q] < virial_overdensity):
                index = q - 1
                break

    if type(virial_index) is list:
        virial_index.append(index)

    for field in fields:
        if (overDensity[index + 1] - overDensity[index]) == 0:
            mylog.error("Overdensity profile has slope of zero.")
            return [False, {}]
        else:
            slope = (temp_profile[field][index+1] - temp_profile[field][index]) / \
                (overDensity[index+1] - overDensity[index])
            value = slope * (virial_overdensity - overDensity[index]) + \
                temp_profile[field][index]
            virial[field] = value

    for vfilter in virial_filters:
        if eval("%s %s %s" % (virial[vfilter[0]], vfilter[1], vfilter[2])):
            mylog.debug(
                "(%s %s %s) returned True for %s." %
                (vfilter[0], vfilter[1], vfilter[2], virial[vfilter[0]]))
            continue
        else:
            mylog.debug(
                "(%s %s %s) returned False for %s." %
                (vfilter[0], vfilter[1], vfilter[2], virial[vfilter[0]]))
            return [False, {}]

    return [True, dict((q, virial[q]) for q in virial_quantities)]
Esempio n. 23
0
def find_unique_solutions(lightcone1,
                          solutions=100,
                          seed=None,
                          max_overlap=0.25,
                          failures=10,
                          recycle=True,
                          filename='unique.dat'):
    "Find a set of random seeds that will give light cones will minimal volume overlap."

    lightcone2 = copy.deepcopy(lightcone1)
    lightcone1.calculate_light_cone_solution(seed=0)
    lightcone2.calculate_light_cone_solution(seed=0)

    uniqueSeeds = []
    if recycle:
        master = None
    newRecycleSeed = None
    fails = 0
    recycleFails = 0

    maxCommon = 0.0

    # Need to continuall save and reset the state of the random number generator
    # since it is being reset by the light cone generator.
    if seed is None:
        state = None
    else:
        rand.seed(seed)
        state = rand.getstate()

    failDigits = str(int(na.log10(failures)) + 1)

    while (len(uniqueSeeds) < solutions):
        # Create new random seed.
        if (recycle and master is not None):
            newSeed = master
            if state is not None: rand.setstate(state)
            newRecycleSeed = rand.randint(1, 1e9)
            state = rand.getstate()
        else:
            if state is not None: rand.setstate(state)
            newSeed = rand.randint(1, 1e9)
            state = rand.getstate()
            if recycle:
                master = newSeed
                recycleFails = 0
            newRecycleSeed = None

        sys.stderr.write(("Unique solutions: %d, consecutive failures: %"+failDigits+"d, %"+failDigits+"d.\r") % \
                             (len(uniqueSeeds), fails, recycleFails))

        lightcone1.rerandomize_light_cone_solution(newSeed, recycle=False)
        if newRecycleSeed is not None:
            lightcone1.rerandomize_light_cone_solution(newRecycleSeed,
                                                       recycle=True)

        # Compare with all other seeds.
        testPass = True
        for uniqueSeed in uniqueSeeds:
            lightcone2.rerandomize_light_cone_solution(uniqueSeed['master'],
                                                       recycle=False)
            if uniqueSeed['recycle'] is not None:
                lightcone2.rerandomize_light_cone_solution(
                    uniqueSeed['recycle'], recycle=True)

            common = _compare_solutions(lightcone1.light_cone_solution,
                                        lightcone2.light_cone_solution)

            if (common > max_overlap):
                testPass = False
                break
            else:
                maxCommon = max(maxCommon, common)

        if testPass:
            uniqueSeeds.append({'master': newSeed, 'recycle': newRecycleSeed})
            fails = 0
            recycleFails = 0

        else:
            if recycle:
                recycleFails += 1
            else:
                fails += 1

            if (recycleFails >= failures):
                sys.stderr.write(("Unique solutions: %d, consecutive failures: %"+failDigits+"d, %"+failDigits+"d.\n") % \
                                     (len(uniqueSeeds), fails, recycleFails))
                fails += 1
                mylog.info(
                    "Max recycled failures reached with master seed %d." %
                    newSeed)
                master = None
            if (fails >= failures):
                sys.stderr.write(("Unique solutions: %d, consecutive failures: %"+failDigits+"d, %"+failDigits+"d.\n") % \
                                     (len(uniqueSeeds), fails, recycleFails))
                mylog.error("Max consecutive failures reached.")
                break

    mylog.info("Created %d unique solutions." % len(uniqueSeeds))
    mylog.info("Maximum common volume is %.2e." % maxCommon)
    _write_seed_file(uniqueSeeds, filename)
    return uniqueSeeds
Esempio n. 24
0
    def create_cosmology_splice(self,
                                minimal=True,
                                deltaz_min=0.0,
                                initial_redshift=None,
                                final_redshift=None):
        """
        Create list of datasets to be used for LightCones or LightRays.
        :param minimal (bool): if True, the minimum number of datasets is used to connect the initial and final 
               redshift.  If false, the list will contain as many entries as possible within the redshift 
               interval.  Default: True.
        :param deltaz_min (float): specifies the minimum delta z between consecutive datasets in the returned 
               list.  Default: 0.0.
        :param initial_redshift (float): the initial (highest) redshift in the cosmology splice list.  If none 
               given, the highest redshift dataset present will be used.  Default: None.
        :param final_redshift (float): the final (lowest) redshift in the cosmology splice list.  If none given, 
               the lowest redshift dataset present will be used.  Default: None.
        """

        if initial_redshift is None: initial_redshift = self.InitialRedshift
        if final_redshift is None: final_redshift = self.FinalRedshift

        # Calculate maximum delta z for each data dump.
        self._calculate_deltaz_max()

        # Calculate minimum delta z for each data dump.
        self._calculate_deltaz_min(deltaz_min=deltaz_min)

        cosmology_splice = []

        # Use minimum number of datasets to go from z_i to z_f.
        if minimal:

            z_Tolerance = 1e-3
            z = initial_redshift

            # fill redshift space with datasets
            while ((z > final_redshift)
                   and (na.fabs(z - final_redshift) > z_Tolerance)):

                # For first data dump, choose closest to desired redshift.
                if (len(cosmology_splice) == 0):
                    # Sort data outputs by proximity to current redsfhit.
                    self.allOutputs.sort(
                        key=lambda obj: na.fabs(z - obj['redshift']))
                    cosmology_splice.append(self.allOutputs[0])

                # Move forward from last slice in stack until z > z_max.
                else:
                    current_slice = cosmology_splice[-1]
                    while current_slice['next'] is not None and \
                            (z < current_slice['next']['redshift'] or \
                                 na.abs(z - current_slice['next']['redshift']) < z_Tolerance):
                        current_slice = current_slice['next']

                    if current_slice is cosmology_splice[-1]:
                        final_redshift = cosmology_splice[-1][
                            'redshift'] - cosmology_splice[-1]['deltazMax']
                        mylog.error(
                            "Cosmology splice incomplete due to insufficient data outputs."
                        )
                        break
                    else:
                        cosmology_splice.append(current_slice)

                z = cosmology_splice[-1]['redshift'] - cosmology_splice[-1][
                    'deltazMax']

        # Make light ray using maximum number of datasets (minimum spacing).
        else:
            # Sort data outputs by proximity to current redsfhit.
            self.allOutputs.sort(
                key=lambda obj: na.fabs(initial_redshift - obj['redshift']))
            # For first data dump, choose closest to desired redshift.
            cosmology_splice.append(self.allOutputs[0])

            nextOutput = cosmology_splice[-1]['next']
            while (nextOutput is not None):
                if (nextOutput['redshift'] <= final_redshift):
                    break
                if ((cosmology_splice[-1]['redshift'] - nextOutput['redshift'])
                        > cosmology_splice[-1]['deltazMin']):
                    cosmology_splice.append(nextOutput)
                nextOutput = nextOutput['next']
            if (cosmology_splice[-1]['redshift'] -
                    cosmology_splice[-1]['deltazMax']) > final_redshift:
                mylog.error(
                    "Cosmology splice incomplete due to insufficient data outputs."
                )
                final_redshift = cosmology_splice[-1][
                    'redshift'] - cosmology_splice[-1]['deltazMax']

        mylog.info(
            "create_cosmology_splice: Used %d data dumps to get from z = %f to %f."
            % (len(cosmology_splice), initial_redshift, final_redshift))

        return cosmology_splice
Esempio n. 25
0
    def sigmaM(self):
        """
         Written by BWO, 2006 (updated 25 January 2007).
         Converted to Python by Stephen Skory December 2009.

         This routine takes in cosmological parameters and creates a file (array) with
         sigma(M) in it, which is necessary for various press-schechter type
         stuff.  In principle one can calculate it ahead of time, but it's far,
         far faster in the long run to calculate your sigma(M) ahead of time.
        
         Inputs: cosmology, user must set parameters
        
         Outputs: four columns of data containing the following information:

         1) log mass (Msolar)
         2) mass (Msolar/h)
         3) Radius (comoving Mpc/h)
         4) sigma (normalized) using Msun/h as the input
         
         The arrays output are used later.
        """

        # Set up the transfer function object.
        self.TF = TransferFunction(self.omega_matter0, self.omega_baryon0, 0.0,
                                   0, self.omega_lambda0, self.hubble0,
                                   self.this_redshift)

        if self.TF.qwarn:
            mylog.error("You should probably fix your cosmology parameters!")

        # output arrays
        # 1) log10 of mass (Msolar, NOT Msolar/h)
        self.Rarray = na.empty(self.num_sigma_bins, dtype='float64')
        # 2) mass (Msolar/h)
        self.logmassarray = na.empty(self.num_sigma_bins, dtype='float64')
        # 3) spatial scale corresponding to that radius (Mpc/h)
        self.massarray = na.empty(self.num_sigma_bins, dtype='float64')
        # 4) sigma(M, z=0, where mass is in Msun/h)
        self.sigmaarray = na.empty(self.num_sigma_bins, dtype='float64')

        # get sigma_8 normalization
        R = 8.0
        # in units of Mpc/h (comoving)

        sigma8_unnorm = math.sqrt(self.sigma_squared_of_R(R))
        sigma_normalization = self.sigma8input / sigma8_unnorm

        rho0 = self.omega_matter0 * 2.78e+11
        # in units of h^2 Msolar/Mpc^3

        # spacing in mass of our sigma calculation
        dm = (float(self.log_mass_max) -
              self.log_mass_min) / self.num_sigma_bins
        """
         loop over the total number of sigma_bins the user has requested. 
         For each bin, calculate mass and equivalent radius, and call
         sigma_squared_of_R to get the sigma(R) (equivalent to sigma(M)),
         normalize by user-specified sigma_8, and then write out.
        """
        for i in xrange(self.num_sigma_bins):

            # thislogmass is in units of Msolar, NOT Msolar/h
            thislogmass = self.log_mass_min + i * dm

            # mass in units of h^-1 Msolar
            thismass = math.pow(10.0, thislogmass) * self.hubble0

            # radius is in units of h^-1 Mpc (comoving)
            thisradius = math.pow(3.0 * thismass / 4.0 / math.pi / rho0,
                                  1.0 / 3.0)

            R = thisradius
            # h^-1 Mpc (comoving)

            self.Rarray[i] = thisradius
            # h^-1 Mpc (comoving)
            self.logmassarray[i] = thislogmass
            # Msun (NOT Msun/h)
            self.massarray[i] = thismass
            # Msun/h

            # get normalized sigma(R)
            self.sigmaarray[i] = math.sqrt(
                self.sigma_squared_of_R(R)) * sigma_normalization
Esempio n. 26
0
    def __init__(self,
                 pf,
                 halo_file=None,
                 omega_matter0=None,
                 omega_lambda0=None,
                 omega_baryon0=0.05,
                 hubble0=None,
                 sigma8input=0.86,
                 primordial_index=1.0,
                 this_redshift=None,
                 log_mass_min=None,
                 log_mass_max=None,
                 num_sigma_bins=360,
                 fitting_function=4,
                 mass_column=5):
        """
        Initalize a HaloMassFcn object to analyze the distribution of haloes
        as a function of mass.
        :param halo_file (str): The filename of the output of the Halo Profiler.
        Default=None.
        :param omega_matter0 (float): The fraction of the universe made up of
        matter (dark and baryonic). Default=None.
        :param omega_lambda0 (float): The fraction of the universe made up of
        dark energy. Default=None.
        :param omega_baryon0 (float): The fraction of the universe made up of
        ordinary baryonic matter. This should match the value
        used to create the initial conditions, using 'inits'. This is 
        *not* stored in the enzo datset so it must be checked by hand.
        Default=0.05.
        :param hubble0 (float): The expansion rate of the universe in units of
        100 km/s/Mpc. Default=None.
        :param sigma8input (float): The amplitude of the linear power
        spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
        in a top-hat sphere of radius 8 Mpc/h. This should match the value
        used to create the initial conditions, using 'inits'. This is 
        *not* stored in the enzo datset so it must be checked by hand.
        Default=0.86.
        :param primoridal_index (float): This is the index of the mass power
        spectrum before modification by the transfer function. A value of 1
        corresponds to the scale-free primordial spectrum. This should match
        the value used to make the initial conditions using 'inits'. This is 
        *not* stored in the enzo datset so it must be checked by hand.
        Default=1.0.
        :param this_redshift (float): The current redshift. Default=None.
        :param log_mass_min (float): The log10 of the mass of the minimum of the
        halo mass range. Default=None.
        :param log_mass_max (float): The log10 of the mass of the maximum of the
        halo mass range. Default=None.
        :param num_sigma_bins (float): The number of bins (points) to use for
        the calculations and generated fit. Default=360.
        :param fitting_function (int): Which fitting function to use.
        1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
        Default=4.
        :param mass_column (int): The column of halo_file that contains the
        masses of the haloes. Default=4.
        """
        self.pf = pf
        self.halo_file = halo_file
        self.omega_matter0 = omega_matter0
        self.omega_lambda0 = omega_lambda0
        self.omega_baryon0 = omega_baryon0
        self.hubble0 = hubble0
        self.sigma8input = sigma8input
        self.primordial_index = primordial_index
        self.this_redshift = this_redshift
        self.log_mass_min = log_mass_min
        self.log_mass_max = log_mass_max
        self.num_sigma_bins = num_sigma_bins
        self.fitting_function = fitting_function
        self.mass_column = mass_column

        # Determine the run mode.
        if halo_file is None:
            # We are hand-picking our various cosmological parameters
            self.mode = 'single'
        else:
            # Make the fit using the same cosmological parameters as the dataset.
            self.mode = 'haloes'
            self.omega_matter0 = self.pf['CosmologyOmegaMatterNow']
            self.omega_lambda0 = self.pf['CosmologyOmegaLambdaNow']
            self.hubble0 = self.pf['CosmologyHubbleConstantNow']
            self.this_redshift = self.pf['CosmologyCurrentRedshift']
            self.read_haloes()
            if self.log_mass_min == None:
                self.log_mass_min = math.log10(min(self.haloes))
            if self.log_mass_max == None:
                self.log_mass_max = math.log10(max(self.haloes))

        # Input error check.
        if self.mode == 'single':
            if omega_matter0 == None or omega_lambda0 == None or \
            hubble0 == None or this_redshift == None or log_mass_min == None or\
            log_mass_max == None:
                mylog.error("All of these parameters need to be set:")
                mylog.error("[omega_matter0, omega_lambda0, \
                hubble0, this_redshift, log_mass_min, log_mass_max]")
                mylog.error("[%s,%s,%s,%s,%s,%s]" % (omega_matter0,\
                omega_lambda0, hubble0, this_redshift,\
                log_mass_min, log_mass_max))
                return None

        # Poke the user to make sure they're doing it right.
        mylog.info("""
        Please make sure these are the correct values! They are
        not stored in enzo datasets, so must be entered by hand.
        sigma8input=%f primordial_index=%f omega_baryon0=%f
        """ % (self.sigma8input, self.primordial_index, self.omega_baryon0))
        time.sleep(1)

        # Do the calculations.
        self.sigmaM()
        self.dndm()

        if self.mode == 'haloes':
            self.bin_haloes()