예제 #1
0
def test_save_load_octree():
    np.random.seed(int(0x4d3d3d3))
    pos = np.random.normal(0.5, scale=0.05,
                           size=(NPART, 3)) * (DRE - DLE) + DLE
    octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
    octree.n_ref = 32
    for i in range(3):
        np.clip(pos[:, i], DLE[i], DRE[i], pos[:, i])
    # Convert to integers
    pos = np.floor((pos - DLE) / dx).astype("uint64")
    morton = get_morton_indices(pos)
    morton.sort()
    octree.add(morton)
    octree.finalize()
    saved = octree.save_octree()
    loaded = OctreeContainer.load_octree(saved)
    always = AlwaysSelector(None)
    ir1 = octree.ires(always)
    ir2 = loaded.ires(always)
    yield assert_equal, ir1, ir2

    fc1 = octree.fcoords(always)
    fc2 = loaded.fcoords(always)
    yield assert_equal, fc1, fc2

    fw1 = octree.fwidth(always)
    fw2 = loaded.fwidth(always)
    yield assert_equal, fw1, fw2
예제 #2
0
    def _read_chunk_data(self, chunk, fields):
        # This reads the data from a single chunk, and is only used for
        # caching.
        #print "readChunk"
        f = self._handle
        rv = {}
        for g in chunk.objs:
            rv[g.id] = {}

    # Split into particles and non-particles
        fluid_fields, particle_fields = [], []
        for ftype, fname in fields:
            if ftype in self.ds.particle_types:
                particle_fields.append((ftype, fname))
            else:
                fluid_fields.append((ftype, fname))
        if len(particle_fields) > 0:
            selector = AlwaysSelector(self.ds)
            rv.update(
                self._read_particle_selection([chunk], selector,
                                              particle_fields))
        if len(fluid_fields) == 0: return rv
        for field in fluid_fields:
            ftype, fname = field
            ds = f["/%s" % fname]
            ind = 0
            for gs in grid_sequences(chunk.objs):
                start = gs[0].id - gs[0]._id_offset
                end = gs[-1].id - gs[-1]._id_offset + 1
                data = ds[start:end, :, :, :].transpose()
                for i, g in enumerate(gs):
                    rv[g.id][field] = np.asarray(data[..., i], "=f8")
        return rv
예제 #3
0
    def _read_chunk_data(self, chunk, fields):
        rv = {}
        if len(chunk.objs) == 0: return rv

        for g in chunk.objs: rv[g.id] = {}

        # Split into particles and non-particles
        fluid_fields, particle_fields = [], []
        for ftype, fname in fields:
            if ftype in self.ds.particle_types:
                particle_fields.append( (ftype, fname) )
            else:
                fluid_fields.append( (ftype, fname) )

        # particles
        if len(particle_fields) > 0:
            selector = AlwaysSelector(self.ds)
            rv.update( self._read_particle_selection(
                [chunk], selector, particle_fields) )

        # fluid
        if len(fluid_fields) == 0: return rv

        for field in fluid_fields:
            ds = self._group_grid[ field[1] ]

            for gs in grid_sequences(chunk.objs):
                start = gs[ 0].id
                end   = gs[-1].id + 1
                data  = ds[start:end,:,:,:].transpose()
                for i, g in enumerate(gs):
                    rv[g.id][field] = np.asarray( data[...,i], dtype=self._field_dtype )
        return rv
예제 #4
0
파일: io.py 프로젝트: cevans216/yt
    def _read_chunk_data(self, chunk, fields):
        rv = {}
        if len(chunk.objs) == 0:
            return rv

        for g in chunk.objs:
            rv[g.id] = {}

        # Split into particles and non-particles
        fluid_fields, particle_fields = [], []
        for ftype, fname in fields:
            if ftype in self.ds.particle_types:
                particle_fields.append((ftype, fname))
            else:
                fluid_fields.append((ftype, fname))

        # particles
        if len(particle_fields) > 0:
            selector = AlwaysSelector(self.ds)
            rv.update(
                self._read_particle_selection([chunk], selector,
                                              particle_fields))

        # fluid
        if len(fluid_fields) == 0:
            return rv

        ps2 = self.patch_size
        ps1 = ps2 // 2

        for field in fluid_fields:
            ds = self._group_grid[field[1]]

            for gs in grid_sequences(chunk.objs):
                start = (gs[0].id) * self.pgroup
                end = (gs[-1].id + 1) * self.pgroup
                buf = ds[start:end, :, :, :]
                ngrid = len(gs)
                data = np.empty((ngrid, ps2, ps2, ps2),
                                dtype=self._field_dtype)

                for g in range(ngrid):
                    pid0 = g * self.pgroup
                    data[g, 0:ps1, 0:ps1, 0:ps1] = buf[pid0 + 0, :, :, :]
                    data[g, 0:ps1, 0:ps1, ps1:ps2] = buf[pid0 + 1, :, :, :]
                    data[g, 0:ps1, ps1:ps2, 0:ps1] = buf[pid0 + 2, :, :, :]
                    data[g, ps1:ps2, 0:ps1, 0:ps1] = buf[pid0 + 3, :, :, :]
                    data[g, 0:ps1, ps1:ps2, ps1:ps2] = buf[pid0 + 4, :, :, :]
                    data[g, ps1:ps2, ps1:ps2, 0:ps1] = buf[pid0 + 5, :, :, :]
                    data[g, ps1:ps2, 0:ps1, ps1:ps2] = buf[pid0 + 6, :, :, :]
                    data[g, ps1:ps2, ps1:ps2, ps1:ps2] = buf[pid0 + 7, :, :, :]

                data = data.transpose()

                for i, g in enumerate(gs):
                    rv[g.id][field] = data[..., i]
        return rv
예제 #5
0
 def _read_chunk_data(self, chunk, fields):
     fid = fn = None
     rv = {}
     mylog.debug("Preloading fields %s", fields)
     # Split into particles and non-particles
     fluid_fields, particle_fields = [], []
     for ftype, fname in fields:
         if ftype in self.ds.particle_types:
             particle_fields.append((ftype, fname))
         else:
             fluid_fields.append((ftype, fname))
     if len(particle_fields) > 0:
         selector = AlwaysSelector(self.ds)
         rv.update(
             self._read_particle_selection([chunk], selector,
                                           particle_fields))
     if len(fluid_fields) == 0: return rv
     h5_type = self._field_dtype
     for g in chunk.objs:
         rv[g.id] = gf = {}
         if g.id in self._cached_fields:
             rv[g.id].update(self._cached_fields[g.id])
         if g.filename is None: continue
         elif g.filename != fn:
             if fid is not None: fid.close()
             fid = None
         if fid is None:
             fid = h5py.h5f.open(b(g.filename), h5py.h5f.ACC_RDONLY)
             fn = g.filename
         data = np.empty(g.ActiveDimensions[::-1], dtype=h5_type)
         data_view = data.swapaxes(0, -1)
         for field in fluid_fields:
             if field in gf:
                 self._hits += 1
                 continue
             self._misses += 1
             ftype, fname = field
             try:
                 node = "/Grid%08i/%s" % (g.id, fname)
                 dg = h5py.h5d.open(fid, b(node))
             except KeyError:
                 if fname == "Dark_Matter_Density": continue
                 raise
             dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
             gf[field] = data_view.copy()
     if fid: fid.close()
     if self._cache_on:
         for gid in rv:
             self._cached_fields.setdefault(gid, {})
             self._cached_fields[gid].update(rv[gid])
     return rv
예제 #6
0
파일: fubar.py 프로젝트: rennehan/caesar
def fof(obj, positions, LL, group_type=None):
    """Friends of friends.

    Perform 3D friends of friends via yt's ParticleContourTree method.
    
    Parameters
    ----------
    obj : :class:`main.CAESAR`
        Object containing the yt_dataset parameter.
    positions : np.ndarray
        Nx3 position array of the particles to perform the FOF on.
    LL : float
        Linking length for the FOF procedure.

    Returns
    -------
    group_tags : np.ndarray
        Returns an integer array containing the GroupID that each 
        particle belongs to.  GroupIDs of -1 mean the particle is 
        *not* grouped.

    """
    ## TEMP RS WORK ##
    # only use DM for now
    #positions = positions[obj.data_manager.dmlist]
    ##################

    #if group_type is not None:
    #    mylog.info('Performing 3D FOF on %d positions for %s identification' %
    #               (len(positions), group_type))

    pct = ParticleContourTree(LL)

    pos = YTPositionArray(obj.yt_dataset.arr(positions, obj.units['length']))
    #pos = YTPositionArray(pdata['pos'])
    ot = pos.to_octree()

    #ot  = [c._current_chunk.objs[0] for c in obj._dd.chunks([], 'all')][0]
    #group_tags = pct.identify_contours(ot.oct_handler, ot.domain_ind, pdata['pos'],
    #                                   np.arange(0,len(pdata['pos']),dtype=np.int64),
    #                                   0,0)

    group_tags = pct.identify_contours(
        ot, ot.domain_ind(AlwaysSelector(None)), positions,
        np.arange(0, len(positions), dtype=np.int64), 0, 0)

    return group_tags
    """  (PAY NO ATTENTION TO THE MAN BEHIND THE CURTAIN)
예제 #7
0
def octree_zoom_bbox_filter(fname, ds, bbox0, field_add):

    ds.index
    ad = ds.all_data()

    print('\n\n')
    print('----------------------------')
    print("[octree zoom_bbox_filter:] Calculating Center of Mass")

    gas_com_x = np.sum(ad["gasdensity"] * ad["gascoordinates"][:, 0]) / np.sum(
        ad["gasdensity"])
    gas_com_y = np.sum(ad["gasdensity"] * ad["gascoordinates"][:, 1]) / np.sum(
        ad["gasdensity"])
    gas_com_z = np.sum(ad["gasdensity"] * ad["gascoordinates"][:, 2]) / np.sum(
        ad["gasdensity"])

    com = [gas_com_x, gas_com_y, gas_com_z]

    print(
        "[octree zoom_bbox_filter:] Center of Mass is at coordinates (kpc): ",
        com)

    center = [cfg.model.x_cent, cfg.model.y_cent, cfg.model.z_cent]
    print('[octree zoom_bbox_filter:] using center: ', center)

    box_len = cfg.par.zoom_box_len
    #now begin the process of converting box_len to physical units in
    #case we're in a cosmological simulation.  We'll first give it
    #units of proper kpc, then convert to code length (which for
    #gadget is kpcm/h) for the bbox calculation (dropping the units of
    #course).  then when we re-convert to proper units, the box_len as
    #input in parameters_master will be in proper units.  if a
    #simulation isn't cosmological, then the only difference here will
    #be a 1/h
    #yt 3.x
    box_len = ds.quan(box_len, 'kpc')
    #yt 4.x
    if yt.__version__ == '4.0.dev0':
        box_len = float(box_len.to('code_length').value)
        bbox_lim = box_len
    else:
        box_len = box_len.convert_to_units('code_length').value
        bbox_lim = box_len

    bbox1 = [[center[0] - bbox_lim, center[0] + bbox_lim],
             [center[1] - bbox_lim, center[1] + bbox_lim],
             [center[2] - bbox_lim, center[2] + bbox_lim]]
    print('[octree zoom] new zoomed bbox (comoving/h) in code units= ', bbox1)

    #yt 3.x
    #ds1 = yt.load(fname,bounding_box=bbox1,n_ref = cfg.par.n_ref,over_refine_factor=cfg.par.oref)

    #What follows is tricky.  Broadly, the plan is to create a yt
    #region to cut out the dataset to our desired box size.  In yt4.x,
    #we will then pass around reg (which represents the cutout version
    #of the ds), as well as ds (which is the original ds).  the
    #original ds will contain all the original parameters of the
    #dataset.  We pass around the octree itself in a newly created
    #dictionary called reg.parameters

    if yt.__version__ == '4.0.dev0':

        #re load the field names, but now with the bounding box
        #set. this will allow us to map the field names to those
        #generated in the octree.  this represents a massive
        #inefficiency as we have to load the entire dataset a *second*
        #time.
        ds = field_add(fname,
                       bounding_box=bbox1,
                       ds=ds,
                       add_smoothed_quantities=True)
        ds.periodicity = (False, False, False)
        reg = ds.region(center=center,
                        left_edge=np.asarray(center) - bbox_lim,
                        right_edge=np.asarray(center) + bbox_lim)

        #ds1 = reg.ds
        left = np.array([pos[0] for pos in bbox1])
        right = np.array([pos[1] for pos in bbox1])
        octree = ds.octree(left, right,
                           n_ref=cfg.par.n_ref)  #, force_build=True)

        reg.parameters = {}
        reg.parameters['octree'] = octree

    else:
        #load up a cutout ds1 with a bounding_box so we can generate the octree on this dataset
        ds1 = yt.load(fname,
                      bounding_box=bbox1,
                      n_ref=cfg.par.n_ref,
                      over_refine_factor=cfg.par.oref)
        ds1.periodicity = (False, False, False)
        #now update the field names
        ds1 = field_add(None,
                        bounding_box=bbox1,
                        ds=ds1,
                        add_smoothed_quantities=True)

        #now create the region so that we have the smoothed properties downstream correct
        reg = ds1.region(center=center,
                         left_edge=np.asarray(center) - bbox_lim,
                         right_edge=np.asarray(center) + bbox_lim)
        reg.parameters = {}

        saved = ds1.index.oct_handler.save_octree()
        always = AlwaysSelector(None)
        #ir1 = ds.index.oct_handler.ires(always)  # refinement levels
        reg.parameters["fc1"] = ds1.index.oct_handler.fcoords(
            always)  # coordinates in code_length
        reg.parameters["fw1"] = ds1.index.oct_handler.fwidth(
            always)  # width of cell in code_length
        reg.parameters["refined"] = saved['octree'].astype('bool')

        reg.parameters["n_ref"] = ds1.index.oct_handler.n_ref
        reg.parameters["max_level"] = ds1.index.oct_handler.max_level
        reg.parameters["nocts"] = ds1.index.oct_handler.nocts

    #re-add the new powderday convention fields; this time we need to
    #make sure to do the ages calculation since it hasn't been done
    #before.
    #ds1 = field_add(None,bounding_box = bbox1,ds=ds1,starages=True)

    return reg
예제 #8
0
def yt_octree_generate(fname, field_add):

    print('[grid_construction]: bbox_lim = ', cfg.par.bbox_lim)

    bbox = [[-2. * cfg.par.bbox_lim, 2. * cfg.par.bbox_lim],
            [-2. * cfg.par.bbox_lim, 2. * cfg.par.bbox_lim],
            [-2. * cfg.par.bbox_lim, 2. * cfg.par.bbox_lim]]

    # load the DS and add pd fields; no need to put in stellar ages yet
    # as this will happen downstream in zoom
    pf = field_add(fname, bounding_box=bbox)

    # zoom if necessary
    # if cfg.par.zoom == True:
    pf = octree_zoom_bbox_filter(fname, pf, bbox, field_add)

    pf.index
    ad = pf.all_data()

    # ---------------------------------------------------------------
    # PLOTTING DIAGNOSTIC PROJECTION PLOTS
    # ---------------------------------------------------------------

    # proj_plots(pf)

    from yt.data_objects.particle_unions import ParticleUnion
    pu = ParticleUnion("all", list(pf.particle_types_raw))

    saved = pf.index.oct_handler.save_octree()

    always = AlwaysSelector(None)
    ir1 = pf.index.oct_handler.ires(always)  # refinement levels
    fc1 = pf.index.oct_handler.fcoords(always)  # coordinates in code_length
    fw1 = pf.index.oct_handler.fwidth(always)  # width of cell in code_length

    # convert fc1 and fw1 to YTArrays
    fc1 = pf.arr(fc1, 'code_length')
    fw1 = pf.arr(fw1, 'code_length')

    print('----------------------------')
    print('yt Octree Construction Stats')
    print('----------------------------')
    print(' n_ref = ', pf.index.oct_handler.n_ref)
    print(' max_level = ', pf.index.oct_handler.max_level)
    print(' nocts = ', pf.index.oct_handler.nocts)
    print('----------------------------')

    gridstats(ir1, fc1, fw1)

    # ==================================

    refined = saved['octree'].astype('bool')

    try:
        refined.reshape(len(ir1))
    except:
        refinements = 2**(3 * cfg.par.oref)
        refined2 = []
        for r in refined:
            if r == 1:
                refined2.append(True)
            if r == 0:
                refined2.append(np.zeros(refinements).astype('bool'))
        refined = np.hstack(refined2)

    # smooth the data on to the octree

    volume = np.zeros(len(refined))

    if cfg.par.CONSTANT_DUST_GRID == False:

        # crash the code if the parameter choice for dust grid type isn't in
        # the hard coded valid list below
        dust_grid_type_list = ['dtm', 'rr', 'manual']
        try:
            dust_choice = dust_grid_type_list.index(cfg.par.dust_grid_type)
        except ValueError as e:
            print(
                'You chose a dust_choice that isnt a valid selection within the list: '
                + dust_grid_type_list + '....crashing now!')
            sys.exit()

        if cfg.par.dust_grid_type == 'dtm':
            dust_smoothed_dtm = dtm_grid(pf, refined)
            dust_smoothed = dust_smoothed_dtm

        if cfg.par.dust_grid_type == 'rr':
            dust_smoothed_remy_ruyer = remy_ruyer(pf, refined)
            dust_smoothed = dust_smoothed_remy_ruyer

        if cfg.par.dust_grid_type == 'manual':
            dust_smoothed_manual = manual(pf, refined)
            dust_smoothed = dust_smoothed_manual

    else:
        print('cfg.par.CONSTANT_DUST_GRID=True')
        print('setting constant dust grid to 4.e-22')
        dust_smoothed = np.zeros(len(refined)) + 4.e-23

    # return refined,dust_smoothed,xmin,xmax,ymin,ymax,zmin,zmax,boost
    return refined, dust_smoothed, fc1, fw1, pf, ad