コード例 #1
0
    def volume(self):
        """The abstract volume associated with this VolumeSource

        This object does the heavy lifting to access data in an efficient manner
        using a KDTree
        """
        if self._volume is None:
            mylog.info("Creating volume")
            volume = AMRKDTree(self.data_source.ds, data_source=self.data_source)
            self._volume = volume

        return self._volume
コード例 #2
0
def test_amr_kdtree_coverage():
    return  #TESTDISABLED
    domain_dims = (32, 32, 32)
    data = np.zeros(domain_dims) + 0.25
    fo = [
        ic.CoredSphere(0.05, 0.3, [0.7, 0.4, 0.75], {"density": (0.25, 100.0)})
    ]
    rc = [fm.flagging_method_registry["overdensity"](8.0)]
    ug = load_uniform_grid({"density": data}, domain_dims, 1.0)
    ds = refine_amr(ug, rc, fo, 5)

    kd = AMRKDTree(ds)

    volume = kd.count_volume()
    yield assert_equal, volume, \
        np.prod(ds.domain_right_edge - ds.domain_left_edge)

    cells = kd.count_cells()
    true_cells = ds.all_data().quantities['TotalQuantity']('Ones')[0]
    yield assert_equal, cells, true_cells

    # This largely reproduces the AMRKDTree.tree.check_tree() functionality
    tree_ok = True
    for node in kd.tree.trunk.depth_traverse():
        if node.grid is None:
            continue
        grid = ds.index.grids[node.grid - kd._id_offset]
        dds = grid.dds
        gle = grid.LeftEdge
        nle = node.get_left_edge()
        nre = node.get_right_edge()
        li = np.rint((nle - gle) / dds).astype('int32')
        ri = np.rint((nre - gle) / dds).astype('int32')
        dims = (ri - li).astype('int32')
        tree_ok *= np.all(grid.LeftEdge <= nle)
        tree_ok *= np.all(grid.RightEdge >= nre)
        tree_ok *= np.all(dims > 0)

    yield assert_equal, True, tree_ok
コード例 #3
0
def test_amr_kdtree_coverage():
    return #TESTDISABLED
    domain_dims = (32, 32, 32)
    data = np.zeros(domain_dims) + 0.25
    fo = [ic.CoredSphere(0.05, 0.3, [0.7, 0.4, 0.75],
                         {"density": (0.25, 100.0)})]
    rc = [fm.flagging_method_registry["overdensity"](8.0)]
    ug = load_uniform_grid({"density": data}, domain_dims, 1.0)
    ds = refine_amr(ug, rc, fo, 5)

    kd = AMRKDTree(ds)

    volume = kd.count_volume()
    yield assert_equal, volume, \
        np.prod(ds.domain_right_edge - ds.domain_left_edge)

    cells = kd.count_cells()
    true_cells = ds.all_data().quantities['TotalQuantity']('Ones')[0]
    yield assert_equal, cells, true_cells

    # This largely reproduces the AMRKDTree.tree.check_tree() functionality
    tree_ok = True
    for node in depth_traverse(kd.tree.trunk):
        if node.grid is None:
            continue
        grid = ds.index.grids[node.grid - kd._id_offset]
        dds = grid.dds
        gle = grid.LeftEdge
        nle = get_left_edge(node)
        nre = get_right_edge(node)
        li = np.rint((nle-gle)/dds).astype('int32')
        ri = np.rint((nre-gle)/dds).astype('int32')
        dims = (ri - li).astype('int32')
        tree_ok *= np.all(grid.LeftEdge <= nle)
        tree_ok *= np.all(grid.RightEdge >= nre)
        tree_ok *= np.all(dims > 0)

    yield assert_equal, True, tree_ok
コード例 #4
0
 def __init__(self, ds, positions, xfield='velocity_x', yfield='velocity_x',
              zfield='velocity_x', volume=None,
              dx=None, length=None, direction=1,
              get_magnitude=False):
     ParallelAnalysisInterface.__init__(self)
     self.ds = ds
     self.start_positions = sanitize_length(positions, ds)
     self.N = self.start_positions.shape[0]
     # I need a data object to resolve the field names to field tuples
     # via _determine_fields()
     ad = self.ds.all_data()
     self.xfield = ad._determine_fields(xfield)[0]
     self.yfield = ad._determine_fields(yfield)[0]
     self.zfield = ad._determine_fields(zfield)[0]
     self.get_magnitude=get_magnitude
     self.direction = np.sign(direction)
     if volume is None:
         volume = AMRKDTree(self.ds)
         volume.set_fields([self.xfield,self.yfield,self.zfield],
                           [False,False,False],
                           False)
         volume.join_parallel_trees()
     self.volume = volume
     if dx is None:
         dx = self.ds.index.get_smallest_dx()
     self.dx = sanitize_length(dx, ds)
     if length is None:
         length = np.max(self.ds.domain_right_edge-self.ds.domain_left_edge)
     self.length = sanitize_length(length, ds)
     self.steps = int(length/dx)+1
     # Fix up the dx.
     self.dx = 1.0*self.length/self.steps
     self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
     self.magnitudes = None
     if self.get_magnitude:
         self.magnitudes = np.zeros((self.N,self.steps), dtype='float64')
コード例 #5
0
 def __init__(
     self,
     ds,
     positions,
     xfield="velocity_x",
     yfield="velocity_x",
     zfield="velocity_x",
     volume=None,
     dx=None,
     length=None,
     direction=1,
     get_magnitude=False,
 ):
     ParallelAnalysisInterface.__init__(self)
     self.ds = ds
     self.start_positions = sanitize_length(positions, ds)
     self.N = self.start_positions.shape[0]
     # I need a data object to resolve the field names to field tuples
     # via _determine_fields()
     ad = self.ds.all_data()
     self.xfield = ad._determine_fields(xfield)[0]
     self.yfield = ad._determine_fields(yfield)[0]
     self.zfield = ad._determine_fields(zfield)[0]
     self.get_magnitude = get_magnitude
     self.direction = np.sign(direction)
     if volume is None:
         volume = AMRKDTree(self.ds)
         volume.set_fields([self.xfield, self.yfield, self.zfield],
                           [False, False, False], False)
         volume.join_parallel_trees()
     self.volume = volume
     if dx is None:
         dx = self.ds.index.get_smallest_dx()
     self.dx = sanitize_length(dx, ds)
     if length is None:
         length = np.max(self.ds.domain_right_edge -
                         self.ds.domain_left_edge)
     self.length = sanitize_length(length, ds)
     self.steps = int(self.length / self.dx) + 1
     # Fix up the dx.
     self.dx = 1.0 * self.length / self.steps
     self.streamlines = np.zeros((self.N, self.steps, 3), dtype="float64")
     self.magnitudes = None
     if self.get_magnitude:
         self.magnitudes = np.zeros((self.N, self.steps), dtype="float64")
コード例 #6
0
ファイル: streamlines.py プロジェクト: Xarthisius/yt-drone
 def __init__(self,
              ds,
              positions,
              xfield='velocity_x',
              yfield='velocity_x',
              zfield='velocity_x',
              volume=None,
              dx=None,
              length=None,
              direction=1,
              get_magnitude=False):
     ParallelAnalysisInterface.__init__(self)
     self.ds = ds
     self.start_positions = np.array(positions)
     self.N = self.start_positions.shape[0]
     self.xfield = xfield
     self.yfield = yfield
     self.zfield = zfield
     self.get_magnitude = get_magnitude
     self.direction = np.sign(direction)
     if volume is None:
         volume = AMRKDTree(self.ds)
         volume.set_fields([self.xfield, self.yfield, self.zfield],
                           [False, False, False], False)
         volume.join_parallel_trees()
     self.volume = volume
     if dx is None:
         dx = self.ds.index.get_smallest_dx()
     self.dx = dx
     if length is None:
         length = np.max(self.ds.domain_right_edge -
                         self.ds.domain_left_edge)
     self.length = length
     self.steps = int(length / dx) + 1
     # Fix up the dx.
     self.dx = 1.0 * self.length / self.steps
     self.streamlines = np.zeros((self.N, self.steps, 3), dtype='float64')
     self.magnitudes = None
     if self.get_magnitude:
         self.magnitudes = np.zeros((self.N, self.steps), dtype='float64')
コード例 #7
0
# Using AMRKDTree Homogenized Volumes to examine large datasets
# at lower resolution.

# In this example we will show how to use the AMRKDTree to take a simulation
# with 8 levels of refinement and only use levels 0-3 to render the dataset.

# We begin by loading up yt, and importing the AMRKDTree
import numpy as np

import yt
from yt.utilities.amr_kdtree.api import AMRKDTree

# Load up a dataset and define the kdtree
ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
kd = AMRKDTree(ds)

# Print out specifics of KD Tree
print("Total volume of all bricks = %i" % kd.count_volume())
print("Total number of cells = %i" % kd.count_cells())

# Define a camera and take an volume rendering.
tf = yt.ColorTransferFunction((-30, -22))
cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
                  tf, volume=kd)
tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
cam.snapshot("v1.png", clip_ratio=6.0)

# This rendering is okay, but lets say I'd like to improve it, and I don't want
# to spend the time rendering the high resolution data.  What we can do is
# generate a low resolution version of the AMRKDTree and pass that in to the
# camera.  We do this by specifying a maximum refinement level of 6.
コード例 #8
0
from yt.utilities.amr_kdtree.api import AMRKDTree

# Load up a dataset and define the kdtree
ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
im, sc = yt.volume_render(ds, 'density', fname='v0.png')
sc.camera.set_width(ds.arr(100, 'kpc'))
render_source = sc.get_source(0)
kd = render_source.volume

# Print out specifics of KD Tree
print("Total volume of all bricks = %i" % kd.count_volume())
print("Total number of cells = %i" % kd.count_cells())

new_source = ds.all_data()
new_source.max_level = 3
kd_low_res = AMRKDTree(ds, data_source=new_source)
print(kd_low_res.count_volume())
print(kd_low_res.count_cells())

# Now we pass this in as the volume to our camera, and render the snapshot
# again.

render_source.set_volume(kd_low_res)
render_source.set_field('density')
sc.render()
sc.save("v1.png", sigma_clip=6.0)

# This operation was substantiall faster.  Now lets modify the low resolution
# rendering until we find something we like.

tf = render_source.transfer_function
コード例 #9
0
ファイル: clump_validators.py プロジェクト: cbrummelsmith/yt
def surfacePressureEnergy(clump):
    steps = np.array([[-1, -1, -1], [-1, -1, 0], [-1, -1, 1], [-1, 0, -1],
                      [-1, 0, 0], [-1, 0, 1], [-1, 1, -1], [-1, 1, 0],
                      [-1, 1, 1], [0, -1, -1], [0, -1, 0], [0, -1, 1],
                      [0, 0, -1], [0, 0, 1], [0, 1, -1], [0, 1, 0], [0, 1, 1],
                      [1, -1, -1], [1, -1, 0], [1, -1, 1], [1, 0, -1],
                      [1, 0, 0], [1, 0, 1], [1, 1, -1], [1, 1, 0], [1, 1, 1]])

    mag = np.sqrt((steps**2).sum(axis=1))
    mag = np.array([mag, mag, mag]).T.astype(float)
    normals = -steps / mag

    neighborsTrue = np.ones([3, 3, 3], bool)
    neighborsTrue[1, 1, 1] = False

    ds = clump.data.ds
    amrTree = clump.data.tiles
    cm = clump.quantities.center_of_mass()

    LE = clump.data.tiles.data_source.base_object.left_edge
    RE = clump.data.tiles.data_source.base_object.right_edge
    newLE = LE - (RE - LE) / 10
    newRE = RE + (RE - LE) / 10
    biggerBox = ds.box(newLE, newRE)
    amrTree = AMRKDTree(ds, data_source=biggerBox)

    surfPressureTerm = 0

    grid_mask_dict = dict((g, m) for g, m in clump.data.blocks)

    pad = 2

    # dictionary to keep track of surface cells that have been
    # visited and the corresponding grid they were visited from.
    allSurfCells = {'cell': [], 'grid': []}

    for grid, clumpMask in clump.data.blocks:
        print(grid)
        # Get surface cells using masks
        surfMask = np.zeros(np.array(clumpMask.shape) + 2 * pad, dtype=bool)
        L = surfMask.shape[0] - pad
        W = surfMask.shape[1] - pad
        H = surfMask.shape[2] - pad

        surfMask[2:L, 2:W, 2:H] = clumpMask
        padClumpMask = surfMask.copy()
        for i in [-1, 0, 1]:
            for j in [-1, 0, 1]:
                for k in [-1, 0, 1]:
                    surfMask[pad + i:L + i, pad + j:W + j,
                             pad + k:H + k] |= clumpMask

        surfMask = np.logical_xor(surfMask, padClumpMask)

        surfCells_fromPad = np.argwhere(surfMask)
        surfCells = surfCells_fromPad - np.array([pad, pad, pad])
        surfInGrid = np.all((surfCells >= 0) * (surfCells < clumpMask.shape),
                            axis=1)
        surfOutGrid = np.logical_not(surfInGrid)

        # *** Think about looping over surfInGrid surfOutGrid separately *** #
        for cell in surfCells:
            surfNormals = []
            cell = np.array(cell)
            center_dds = grid.dds

            # get physical position of surface cell
            position = grid.LeftEdge + (np.array(cell) + 0.5) * grid.dds
            new_position = periodic_position(position, ds)
            r = position - cm

            # get cooresponding grid that surface cell lives in as well as its
            # indicies relatvie to that grid.
            trueGrid = ds.index.grids[ amrTree.locate_brick(new_position).grid  \
                                       - grid._id_offset]
            trueCell = ((new_position - trueGrid.LeftEdge) / trueGrid.dds).v
            trueCell = tuple(trueCell.astype(int))

            # if surface cell has already been visited in a different grid loop,
            # then we have already accounted for the surface pressure from that cell
            # and we can skip it.
            skip = False
            for g, c in zip(allSurfCells['grid'], allSurfCells['cell']):
                if (not (np.array(c) -
                         np.array(trueCell)).sum()) and grid.id != g:
                    skip = True
                    break
            if skip:
                continue

            allSurfCells['cell'].append(trueCell)
            allSurfCells['grid'].append(grid.id)

            #print("\tgrid, trueGrid", grid, trueGrid)
            #print("\tcell, trueCell", cell, trueCell)

            # if trueCell is part of the clump in trueGrid it is not actually
            # a surface cell and we can skip it. Also if trueGrid is not in
            # grid_mask_dict, that means trueCell is not in this clump and is
            # a true surface cells
            possiblyInClump = False
            try:
                trueClumpMask = grid_mask_dict[trueGrid]
                possiblyInClump = True
            except:
                pass

            trueCellInClump = False
            if possiblyInClump:
                #print("\ttrue cell", trueCell, " in clump?")
                trueCellMask = np.zeros_like(trueClumpMask)
                trueCellMask[trueCell] = True
                trueCellInClump = np.logical_and(trueCellMask,
                                                 trueClumpMask).any()

            if trueCellInClump:
                # not a true surface cell, continue to next one
                #print("\t\tYES. Next surface cell")
                continue
            #print("\t\tNO. True surface cell")

            # if we made it here we have a true surface cell. We know need
            # the pressure and the surface normals of all the cells in the
            # clump it is touching.
            pressure = trueGrid['pressure'][trueCell]
            dS = grid.dds[0] * grid.dds[0]

            # find grid and cell indicies of neighboring cells
            # which may be in separate grids
            cell = np.array(cell)
            center_dds = grid.dds
            grids = np.empty(26, dtype='object')
            neigborCells = np.empty([26, 3], dtype='int64')
            offs = 0.5 * (center_dds + amrTree.sdx)
            new_neigborCells = cell + steps

            # index mask for cells this in grid
            in_grid = np.all( (new_neigborCells >=0) * \
                              (new_neigborCells < grid.ActiveDimensions),
                               axis=1 )

            # index mask for cells in a neighboring grid
            not_in_grid = np.logical_not(in_grid)

            # physical positions of neighboring cells (assumes a periodic box)
            new_positions = position + steps * offs
            new_positions = [periodic_position(p, ds) for p in new_positions]

            grids[in_grid] = grid
            neigborCells[in_grid] = new_neigborCells[in_grid]

            # neigboring cell indicies of cells that are in other grids
            get_them = np.argwhere(not_in_grid).ravel()

            if not_in_grid.any():
                # get grids containing cells outside current grids
                grids[get_them] = \
                [ds.index.grids[amrTree.locate_brick(new_positions[i]).grid - \
                    grid._id_offset] for i in get_them ]

                # get cell location indicies in grids outside current grid
                neigborCells[not_in_grid] = \
                    [ (new_positions[i] - grids[i].LeftEdge)/grids[i].dds \
                        for i in get_them ]

                neigborCells = [tuple(_cs) for _cs in neigborCells]

            # find which neigbor cells are in the clump. We only care about
            # neighbor cells that are within the clump becaause we need to
            # know the normal vectors for those ones.
            #print("\t\tFind neigbor cells within clump")
            for nGrid, nCell, norm in zip(grids, neigborCells, normals):
                nCell = tuple(nCell)
                #print("\t\tnCell, nGrid", nCell, nGrid)
                try:
                    nClumpMask = grid_mask_dict[nGrid]
                except:
                    # if nGrid is not in dict of grids for this clump,
                    #  then nCell cannot be in this clump
                    continue

                # is nCell in nClump?
                nCellMask = np.zeros_like(nClumpMask)
                nCellMask[nCell] = True
                inClump = np.logical_and(nCellMask, nClumpMask).any()

                if inClump:
                    #print("\t\t\tIN clump")
                    surfNormals.append(norm)
                #else:
                #print("\t\t\tNOT in clump")

            surfNormals = np.array(surfNormals)

            rs = ds.arr(np.empty_like(surfNormals), r.units)
            rs[:] = r

            # surface pressure term = Integrate{ p r dot dS }
            r_dot_dS = (rs * surfNormals).sum(axis=1) * dS
            surfPressureTerm += (pressure * r_dot_dS).sum()
            #print(surfPressureTerm.to('erg'))

    return surfPressureTerm