Example #1
0
 def find_max_cell_location(self, field, finest_levels=True):
     if finest_levels is True:
         gi = (self.grid_levels >= self.max_level - NUMTOCHECK).ravel()
         source = self.grid_collection([0.0] * 3, self.grids[gi])
     else:
         source = self.all_data()
     mylog.debug("Searching %s grids for maximum value of %s",
                 len(source._grids), field)
     max_val, maxi, mx, my, mz, mg = \
         source.quantities["MaxLocation"]( field, lazy_reader=True)
     max_grid = self.grids[mg]
     mc = na.unravel_index(maxi, max_grid.ActiveDimensions)
     mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s %s", \
           max_val, mx, my, mz, max_grid, max_grid.Level, mc)
     self.parameters["Max%sValue" % (field)] = max_val
     self.parameters["Max%sPos" % (field)] = "%s" % ((mx, my, mz), )
     return max_grid, mc, max_val, na.array((mx, my, mz), dtype='float64')
Example #2
0
 def find_min(self, field):
     """
     Returns (value, center) of location of minimum for a given field
     """
     gI = na.where(self.grid_levels >= 0)  # Slow but pedantic
     minVal = 1e100
     for grid in self.grids[gI[0]]:
         mylog.debug("Checking %s (level %s)", grid.id, grid.Level)
         val, coord = grid.find_min(field)
         if val < minVal:
             minCoord = coord
             minVal = val
             minGrid = grid
     mc = na.array(minCoord)
     pos = minGrid.get_position(mc)
     mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s", \
           minVal, pos[0], pos[1], pos[2], minGrid, minGrid.Level)
     self.center = pos
     self.parameters["Min%sValue" % (field)] = minVal
     self.parameters["Min%sPos" % (field)] = "%s" % (pos)
     return minVal, pos
Example #3
0
    def _save_light_cone_stack(self,
                               field=None,
                               weight_field=None,
                               filename=None,
                               over_write=True):
        "Save the light cone projection stack as a 3d array in and hdf5 file."

        # Make list of redshifts to include as a dataset attribute.
        redshiftList = na.array(
            [slice['redshift'] for slice in self.light_cone_solution])

        field_node = "%s_%s" % (field, weight_field)
        weight_field_node = "weight_field_%s" % weight_field

        import h5py
        if (filename is None):
            filename = "%s/%s_data" % (self.output_dir, self.output_prefix)
        if not (filename.endswith('.h5')):
            filename += ".h5"

        if (len(self.projection_stack) == 0):
            mylog.debug("save_light_cone_stack: no projection data loaded.")
            return

        mylog.info("Writing light cone data to %s." % filename)

        output = h5py.File(filename, "a")

        node_exists = field_node in output.listnames()

        if node_exists:
            if over_write:
                mylog.info("Dataset, %s, already exists, overwriting." %
                           field_node)
                write_data = True
                del output[field_node]
            else:
                mylog.info("Dataset, %s, already exists in %s, not saving." %
                           (field_node, filename))
                write_data = False
        else:
            write_data = True

        if write_data:
            mylog.info("Saving %s to %s." % (field_node, filename))
            self.projection_stack = na.array(self.projection_stack)
            field_dataset = output.create_dataset(field_node,
                                                  data=self.projection_stack)
            field_dataset.attrs['redshifts'] = redshiftList
            field_dataset.attrs['observer_redshift'] = na.float(
                self.observer_redshift)
            field_dataset.attrs['field_of_view_in_arcminutes'] = na.float(
                self.field_of_view_in_arcminutes)
            field_dataset.attrs['image_resolution_in_arcseconds'] = na.float(
                self.image_resolution_in_arcseconds)

        if (len(self.projection_weight_field_stack) > 0):
            if node_exists:
                if over_write:
                    mylog.info("Dataset, %s, already exists, overwriting." %
                               weight_field_node)
                    del output[field_node]
                else:
                    mylog.info(
                        "Dataset, %s, already exists in %s, not saving." %
                        (weight_field_node, filename))
                    write_data = False
            else:
                write_data = True

            if write_data:
                mylog.info("Saving %s to %s." % (weight_field_node, filename))
                self.projection_weight_field_stack = na.array(
                    self.projection_weight_field_stack)
                weight_field_dataset = output.create_dataset(
                    weight_field_node, data=self.projection_weight_field_stack)
                weight_field_dataset.attrs['redshifts'] = redshiftList
                weight_field_dataset.attrs['observer_redshift'] = na.float(
                    self.observer_redshift)
                weight_field_dataset.attrs[
                    'field_of_view_in_arcminutes'] = na.float(
                        self.field_of_view_in_arcminutes)
                weight_field_dataset.attrs[
                    'image_resolution_in_arcseconds'] = na.float(
                        self.image_resolution_in_arcseconds)

        output.close()
Example #4
0
    def rerandomize_light_cone_solution(self,
                                        newSeed,
                                        recycle=True,
                                        filename=None):
        """
        When making a projection for a light cone, only randomizations along the line of sight make any 
        given projection unique, since the lateral shifting and tiling is done after the projection is made.
        Therefore, multiple light cones can be made from a single set of projections by introducing different 
        lateral random shifts and keeping all the original shifts along the line of sight.
        This routine will take in a new random seed and rerandomize the parts of the light cone that do not contribute 
        to creating a unique projection object.  Additionally, this routine is built such that if the same random 
        seed is given for the rerandomizing, the solution will be identical to the original.

        This routine has now been updated to be a general solution rescrambler.  If the keyword recycle is set to 
        True, then it will recycle.  Otherwise, it will create a completely new solution.

        :param recycle (bool): if True, the new solution will have the same shift in the line of sight as the original 
               solution.  Since the projections of each slice are serialized and stored for the entire width of the 
               box (even if the width used is left than the total box), the projection data can be deserialized 
               instead of being remade from scratch.  This can greatly speed up the creation of a large number of light 
               cone projections.  Default: True.
        :param filename (str): if given, a text file detailing the solution will be written out.  Default: None.
        """

        # Get rid of old halo mask, if one was there.
        self.halo_mask = []

        # Clean pf objects out of light cone solution.
        for slice in self.light_cone_solution:
            if slice.has_key('object'):
                del slice['object']

        if recycle:
            mylog.debug("Recycling solution made with %s with new seed %s." %
                        (self.originalRandomSeed, newSeed))
            self.recycleRandomSeed = int(newSeed)
        else:
            mylog.debug("Creating new solution with random seed %s." % newSeed)
            self.originalRandomSeed = int(newSeed)
            self.recycleRandomSeed = 0

        self.recycleSolution = recycle

        # Keep track of fraction of volume in common between the original and recycled solution.
        commonVolume = 0.0
        totalVolume = 0.0

        # For box coherence, keep track of effective depth travelled.
        boxFractionUsed = 0.0

        # Seed random number generator with new seed.
        na.random.seed(int(newSeed))

        for q, output in enumerate(self.light_cone_solution):
            # It is necessary to make the same number of calls to the random number generator
            # so the original solution willbe produced if the same seed is given.

            # Get projection axis and center.
            # If using box coherence, only get random axis and center if enough of the box has been used,
            # or if boxFractionUsed will be greater than 1 after this slice.
            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
                    (boxFractionUsed > self.minimum_coherent_box_fraction) or \
                    (boxFractionUsed + self.light_cone_solution[q]['DepthBoxFraction'] > 1.0):
                # Get random projection axis and center.
                # If recycling, axis will get thrown away since it is used in creating a unique projection object.
                newAxis = na.random.randint(0, 3)

                newCenter = [
                    na.random.random(),
                    na.random.random(),
                    na.random.random()
                ]
                boxFractionUsed = 0.0
            else:
                # Same axis and center as previous slice, but with depth center shifted.
                newAxis = self.light_cone_solution[q - 1]['ProjectionAxis']
                newCenter = copy.deepcopy(
                    self.light_cone_solution[q - 1]['ProjectionCenter'])
                newCenter[newAxis] += \
                    0.5 * (self.light_cone_solution[q]['DepthBoxFraction'] + self.light_cone_solution[q-1]['DepthBoxFraction'])
                if newCenter[newAxis] >= 1.0:
                    newCenter[newAxis] -= 1.0

            if recycle:
                output['ProjectionAxis'] = self.master_solution[q][
                    'ProjectionAxis']
            else:
                output['ProjectionAxis'] = newAxis

            boxFractionUsed += self.light_cone_solution[q]['DepthBoxFraction']

            # Make list of rectangle corners to calculate common volume.
            newCube = na.zeros(shape=(len(newCenter), 2))
            oldCube = na.zeros(shape=(len(newCenter), 2))
            for w in range(len(newCenter)):
                if (w == self.master_solution[q]['ProjectionAxis']):
                    oldCube[w] = [
                        self.master_solution[q]['ProjectionCenter'][w] -
                        0.5 * self.master_solution[q]['DepthBoxFraction'],
                        self.master_solution[q]['ProjectionCenter'][w] +
                        0.5 * self.master_solution[q]['DepthBoxFraction']
                    ]
                else:
                    oldCube[w] = [
                        self.master_solution[q]['ProjectionCenter'][w] -
                        0.5 * self.master_solution[q]['WidthBoxFraction'],
                        self.master_solution[q]['ProjectionCenter'][w] +
                        0.5 * self.master_solution[q]['WidthBoxFraction']
                    ]

                if (w == output['ProjectionAxis']):
                    if recycle:
                        newCube[w] = oldCube[w]
                    else:
                        newCube[w] = [
                            newCenter[w] -
                            0.5 * self.master_solution[q]['DepthBoxFraction'],
                            newCenter[w] +
                            0.5 * self.master_solution[q]['DepthBoxFraction']
                        ]
                else:
                    newCube[w] = [
                        newCenter[w] -
                        0.5 * self.master_solution[q]['WidthBoxFraction'],
                        newCenter[w] +
                        0.5 * self.master_solution[q]['WidthBoxFraction']
                    ]

            commonVolume += commonNVolume(oldCube,
                                          newCube,
                                          periodic=na.array([[0, 1], [0, 1],
                                                             [0, 1]]))
            totalVolume += output['DepthBoxFraction'] * output[
                'WidthBoxFraction']**2

            # Replace centers for every axis except the line of sight axis.
            for w in range(len(newCenter)):
                if not (recycle and
                        (w == self.light_cone_solution[q]['ProjectionAxis'])):
                    self.light_cone_solution[q]['ProjectionCenter'][
                        w] = newCenter[w]

        if recycle:
            mylog.debug(
                "Fractional common volume between master and recycled solution is %.2e"
                % (commonVolume / totalVolume))
        else:
            mylog.debug(
                "Fraction of total volume in common with old solution is %.2e."
                % (commonVolume / totalVolume))
            self.master_solution = [
                copy.deepcopy(q) for q in self.light_cone_solution
            ]

        # Write solution to a file.
        if filename is not None:
            self._save_light_cone_solution(filename=filename)
Example #5
0
    def calculate_light_cone_solution(self, seed=None, filename=None):
        """
        Create list of projections to be added together to make the light cone.
        :param seed (int): the seed for the random number generator.  Any light cone solution 
               can be reproduced by giving the same random seed.  Default: None (each solution 
               will be distinct).
        :param filename (str): if given, a text file detailing the solution will be written out.  Default: None.
        """

        # Don't use box coherence with maximum projection depths.
        if self.use_minimum_datasets and \
                self.minimum_coherent_box_fraction > 0:
            mylog.info(
                "Setting minimum_coherent_box_fraction to 0 with minimal light cone."
            )
            self.minimum_coherent_box_fraction = 0

        # Make sure recycling flag is off.
        self.recycleSolution = False

        # Get rid of old halo mask, if one was there.
        self.halo_mask = []

        if seed is not None:
            self.originalRandomSeed = int(seed)

        # Calculate projection sizes, and get random projection axes and centers.
        na.random.seed(self.originalRandomSeed)

        # For box coherence, keep track of effective depth travelled.
        boxFractionUsed = 0.0

        for q in range(len(self.light_cone_solution)):
            del self.light_cone_solution[q]['previous']
            del self.light_cone_solution[q]['next']
            if (q == len(self.light_cone_solution) - 1):
                z_next = self.final_redshift
            else:
                z_next = self.light_cone_solution[q + 1]['redshift']

            # Calculate fraction of box required for a depth of delta z
            self.light_cone_solution[q]['DepthBoxFraction'] = self.cosmology.ComovingRadialDistance(z_next, self.light_cone_solution[q]['redshift']) * \
                self.enzoParameters['CosmologyHubbleConstantNow'] / self.enzoParameters['CosmologyComovingBoxSize']

            # Simple error check to make sure more than 100% of box depth is never required.
            if (self.light_cone_solution[q]['DepthBoxFraction'] > 1.0):
                mylog.debug(
                    "Warning: box fraction required to go from z = %f to %f is %f"
                    % (self.light_cone_solution[q]['redshift'], z_next,
                       self.light_cone_solution[q]['DepthBoxFraction']))
                mylog.debug(
                    "Full box delta z is %f, but it is %f to the next data dump."
                    % (self.light_cone_solution[q]['deltazMax'],
                       self.light_cone_solution[q]['redshift'] - z_next))

            # Calculate fraction of box required for width corresponding to requested image size.
            scale = self.cosmology.AngularScale_1arcsec_kpc(
                self.observer_redshift,
                self.light_cone_solution[q]['redshift'])
            size = self.field_of_view_in_arcminutes * 60.0 * scale / 1000.0
            boxSizeProper = self.enzoParameters['CosmologyComovingBoxSize'] / (
                self.enzoParameters['CosmologyHubbleConstantNow'] *
                (1.0 + self.light_cone_solution[q]['redshift']))
            self.light_cone_solution[q][
                'WidthBoxFraction'] = size / boxSizeProper

            # Get projection axis and center.
            # If using box coherence, only get random axis and center if enough of the box has been used,
            # or if boxFractionUsed will be greater than 1 after this slice.
            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
                    (boxFractionUsed > self.minimum_coherent_box_fraction) or \
                    (boxFractionUsed + self.light_cone_solution[q]['DepthBoxFraction'] > 1.0):
                # Random axis and center.
                self.light_cone_solution[q][
                    'ProjectionAxis'] = na.random.randint(0, 3)
                self.light_cone_solution[q]['ProjectionCenter'] = [
                    na.random.random(),
                    na.random.random(),
                    na.random.random()
                ]
                boxFractionUsed = 0.0
            else:
                # Same axis and center as previous slice, but with depth center shifted.
                self.light_cone_solution[q][
                    'ProjectionAxis'] = self.light_cone_solution[
                        q - 1]['ProjectionAxis']
                self.light_cone_solution[q][
                    'ProjectionCenter'] = copy.deepcopy(
                        self.light_cone_solution[q - 1]['ProjectionCenter'])
                self.light_cone_solution[q]['ProjectionCenter'][self.light_cone_solution[q]['ProjectionAxis']] += \
                    0.5 * (self.light_cone_solution[q]['DepthBoxFraction'] + self.light_cone_solution[q-1]['DepthBoxFraction'])
                if self.light_cone_solution[q]['ProjectionCenter'][
                        self.light_cone_solution[q]['ProjectionAxis']] >= 1.0:
                    self.light_cone_solution[q]['ProjectionCenter'][
                        self.light_cone_solution[q]['ProjectionAxis']] -= 1.0

            boxFractionUsed += self.light_cone_solution[q]['DepthBoxFraction']

        # Store this as the master solution.
        self.master_solution = [
            copy.deepcopy(q) for q in self.light_cone_solution
        ]

        # Write solution to a file.
        if filename is not None:
            self._save_light_cone_solution(filename=filename)
Example #6
0
def VirialFilter(profile,
                 overdensity_field='ActualOverdensity',
                 virial_overdensity=200.,
                 must_be_virialized=True,
                 virial_filters=[['TotalMassMsun', '>=', '1e14']],
                 virial_quantities=['TotalMassMsun', 'RadiusMpc'],
                 virial_index=None):
    """
    Filter halos by virial quantities.
    Return values are a True or False whether the halo passed the filter, 
    along with a dictionary of virial quantities for the fields specified in 
    the virial_quantities keyword.  Thresholds for virial quantities are 
    given with the virial_filters keyword in the following way: 
    [field, condition, value].
    """

    fields = deepcopy(virial_quantities)
    if virial_filters is None: virial_filters = []
    for vfilter in virial_filters:
        if not vfilter[0] in fields:
            fields.append(vfilter[0])

    overDensity = []
    temp_profile = {}
    for field in fields:
        temp_profile[field] = []

    for q in range(len(profile[overdensity_field])):
        good = True
        if (profile[overdensity_field][q] != profile[overdensity_field][q]):
            good = False
            continue
        for field in fields:
            if (profile[field][q] != profile[field][q]):
                good = False
                break
        if good:
            overDensity.append(profile[overdensity_field][q])
            for field in fields:
                temp_profile[field].append(profile[field][q])

    virial = {}
    for field in fields:
        virial[field] = 0.0

    if (not (na.array(overDensity) >= virial_overdensity).any()) and \
            must_be_virialized:
        mylog.error("This halo is not virialized!")
        return [False, {}]

    if (len(overDensity) < 2):
        mylog.error("Skipping halo with no valid points in profile.")
        return [False, {}]

    if (overDensity[1] <= virial_overdensity):
        index = 0
    elif (overDensity[-1] >= virial_overdensity):
        index = -2
    else:
        for q in (na.arange(len(overDensity) - 2)) + 2:
            if (overDensity[q] < virial_overdensity):
                index = q - 1
                break

    if type(virial_index) is list:
        virial_index.append(index)

    for field in fields:
        if (overDensity[index + 1] - overDensity[index]) == 0:
            mylog.error("Overdensity profile has slope of zero.")
            return [False, {}]
        else:
            slope = (temp_profile[field][index+1] - temp_profile[field][index]) / \
                (overDensity[index+1] - overDensity[index])
            value = slope * (virial_overdensity - overDensity[index]) + \
                temp_profile[field][index]
            virial[field] = value

    for vfilter in virial_filters:
        if eval("%s %s %s" % (virial[vfilter[0]], vfilter[1], vfilter[2])):
            mylog.debug(
                "(%s %s %s) returned True for %s." %
                (vfilter[0], vfilter[1], vfilter[2], virial[vfilter[0]]))
            continue
        else:
            mylog.debug(
                "(%s %s %s) returned False for %s." %
                (vfilter[0], vfilter[1], vfilter[2], virial[vfilter[0]]))
            return [False, {}]

    return [True, dict((q, virial[q]) for q in virial_quantities)]
Example #7
0
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.

  You should have received a copy of the GNU General Public License
  along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

from yt.config import ytcfg
from yt.logger import lagosLogger as mylog

try:
    from pyhdf_np import SD  # NumPy
    import pyhdf_np.error  # NumPy
except:
    mylog.debug("No HDF4 support")

import warnings
try:
    import h5py
    if not hasattr(h5py.h5, "ArgsError"):
        h5py.h5.ArgsError = h5py.h5.H5Error
except ImportError:
    ytcfg["lagos", "serialize"] = "False"
    mylog.warning("No h5py. Data serialization disabled.")

from yt.arraytypes import *
import weakref
from new import classobj
from string import strip, rstrip
from math import ceil, floor, log10, pi